diff --git a/bin/terraform.sh b/bin/terraform.sh index adabd7a..171b302 100755 --- a/bin/terraform.sh +++ b/bin/terraform.sh @@ -32,14 +32,15 @@ function usage() { cat < @@ -64,7 +65,13 @@ build_id (optional): component_name: - the name of the terraform component module in the components directory - + +storage_provider: + - the backend to use for state storage, defaults to aws s3 + - Options: + * aws + * gcloud + environment: - dev - test @@ -88,6 +95,34 @@ additional arguments: EOF }; +function storage_ls() { +case "${storage_provider}" in + aws) + aws s3 ls s3://${bucket}/${project}/${account_id}/${region}/${environment}/${dynamic_file_name} + ;; + gcloud) + gsutil ls gs://${bucket}/${project}/${account_id}/${region}/${environment}/${dynamic_file_name} + ;; + azurerm) + az storage blob list + ;; +esac +}; + +function storage_cp() { +case "${storage_provider}" in + aws) + aws s3 cp s3://${bucket}/${project}/${account_id}/${region}/${environment}/${dynamic_file_name} + ;; + gcloud) + gsutil cp gs://${bucket}/${project}/${account_id}/${region}/${environment}/${dynamic_file_name} + ;; + azurerm) + az storage blob copy start + ;; +esac +}; + ## # Test for GNU getopt ## @@ -101,8 +136,8 @@ fi ## readonly raw_arguments="${*}"; ARGS=$(getopt \ - -o hva:b:c:e:g:i:p:r: \ - -l "help,version,bootstrap,action:,bucket-prefix:,build-id:,component:,environment:,group:,project:,region:" \ + -o hva:b:c:e:g:i:p:r:s: \ + -l "help,version,bootstrap,action:,bucket-prefix:,build-id:,component:,environment:,group:,project:,region:,storage-provider:" \ -n "${0}" \ -- \ "$@"); @@ -124,6 +159,7 @@ declare action; declare bucket_prefix; declare build_id; declare project; +declare storage_provider; while true; do case "${1}" in @@ -184,6 +220,13 @@ while true; do shift; fi; ;; + -s|--storage-provider) + shift; + if [ -n "${1}" ]; then + storage_provider="${1}"; + shift; + fi; + ;; -p|--project) shift; if [ -n "${1}" ]; then @@ -194,7 +237,7 @@ while true; do --bootstrap) shift; bootstrap="true" - ;; + ;; --) shift; break; @@ -233,6 +276,29 @@ readonly region="${region_arg:-${AWS_DEFAULT_REGION}}"; [ -n "${project}" ] \ || error_and_die "Required argument -p/--project not specified"; +[ -n "${storage_provider}" ] \ + || storage_provider="aws"; + +if [ "${storage_provider}" == "aws" ]; then + storage_cp_cmd="aws s3 cp"; + storage_ls_cmd="aws s3 ls"; + verify_cmd="aws sts get-caller-identity --query Arn --output text"; + account_cmd="aws sts get-caller-identity --query Account --output text"; + storage_url="s3://" +elif [ "${storage_provider}" == "gcloud" ]; then + storage_cp_cmd="gsutil cp"; + storage_ls_cmd="gsutil ls"; + verify_cmd="gcloud config list --format value(core.account)"; + account_cmd="gcloud config list --format value(core.project)"; + storage_url="gs://" +elif [ "${storage_provider}" == "azurerm" ]; then + storage_cp_cmd="az storage blob copy start" ; + storage_ls_cmd="az storage blob list"; + verify_cmd="az ad signed-in-user show --query userPrincipalName --output tsv"; + account_cmd="az account list --query [?isDefault==\`true\`].name --output tsv"; + storage_url="azurerm://" +fi + # Bootstrapping is special if [ "${bootstrap}" == "true" ]; then [ -n "${component_arg}" ] \ @@ -251,40 +317,40 @@ else [ -n "${environment_arg}" ] \ || error_and_die "Required argument missing: -e/--environment"; readonly environment="${environment_arg}"; - + fi [ -n "${action}" ] \ || error_and_die "Required argument missing: -a/--action"; -# Validate AWS Credentials Available -iam_iron_man="$(aws sts get-caller-identity --query 'Arn' --output text)"; -if [ -n "${iam_iron_man}" ]; then - echo -e "AWS Credentials Found. Using ARN '${iam_iron_man}'"; +# Validate Credentials Available +verify="$(${verify_cmd})"; +if [ -n "${verify}" ]; then + echo -e "Credentials Found. Using '${verify}'"; else - error_and_die "No AWS Credentials Found. \"aws sts get-caller-identity --query 'Arn' --output text\" responded with ARN '${iam_iron_man}'"; + error_and_die "No Credentials Found. \"${verify_cmd}\" responded with '${verify}'"; fi; -# Query canonical AWS Account ID -aws_account_id="$(aws sts get-caller-identity --query 'Account' --output text)"; -if [ -n "${aws_account_id}" ]; then - echo -e "AWS Account ID: ${aws_account_id}"; +# Query canonical Account ID +account_id="$(${account_cmd})"; +if [ -n "${account_id}" ]; then + echo -e "Account ID: ${account_id}"; else - error_and_die "Couldn't determine AWS Account ID. \"aws sts get-caller-identity --query 'Account' --output text\" provided no output"; + error_and_die "Couldn't determine Account ID. \"${account_cmd}\" provided no output"; fi; -# Validate S3 bucket. Set default if undefined +# Validate bucket. Set default if undefined if [ -n "${bucket_prefix}" ]; then - readonly bucket="${bucket_prefix}-${aws_account_id}-${region}" - echo -e "Using S3 bucket s3://${bucket}"; + readonly bucket="${bucket_prefix}-${account_id}-${region}" + echo -e "Using bucket ${storage_url}${bucket}"; else - readonly bucket="${project}-terraformscaffold-${aws_account_id}-${region}"; - echo -e "No bucket prefix specified. Using S3 bucket s3://${bucket}"; + readonly bucket="${project}-terraformscaffold-${account_id}-${region}"; + echo -e "No bucket prefix specified. Using bucket ${storage_url}${bucket}"; fi; declare component_path; if [ "${bootstrap}" == "true" ]; then - component_path="${base_path}/bootstrap"; + component_path="${base_path}/bootstrap/${storage_provider}"; else component_path="${base_path}/components/${component}"; fi; @@ -374,7 +440,7 @@ if [ "${bootstrap}" == "true" ]; then tf_var_params+=" -var region=${region}"; tf_var_params+=" -var project=${project}"; tf_var_params+=" -var bucket_name=${bucket}"; - tf_var_params+=" -var aws_account_id=${aws_account_id}"; + tf_var_params+=" -var account_id=${account_id}"; else # Run pre.sh if [ -f "pre.sh" ]; then @@ -391,16 +457,16 @@ else declare -a secrets=(); readonly secrets_file_name="secret.tfvars.enc"; readonly secrets_file_path="build/${secrets_file_name}"; - aws s3 ls s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${secrets_file_name} >/dev/null 2>&1; + ${storage_ls_cmd} ${storage_url}${bucket}/${project}/${account_id}/${region}/${environment}/${secrets_file_name} >/dev/null 2>&1; if [ $? -eq 0 ]; then mkdir -p build; - aws s3 cp s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${secrets_file_name} ${secrets_file_path} \ - || error_and_die "S3 secrets file is present, but inaccessible. Ensure you have permission to read s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${secrets_file_name}"; + ${storage_cp_cmd} ${storage_url}${bucket}/${project}/${account_id}/${region}/${environment}/${secrets_file_name} ${secrets_file_path} \ + || error_and_die "S3 secrets file is present, but inaccessible. Ensure you have permission to read ${storage_url}${bucket}/${project}/${account_id}/${region}/${environment}/${secrets_file_name}"; if [ -f "${secrets_file_path}" ]; then secrets=($(aws kms decrypt --ciphertext-blob fileb://${secrets_file_path} --output text --query Plaintext | base64 --decode)); fi; fi; - + if [ -n "${secrets[0]}" ]; then secret_regex='^[A-Za-z0-9_-]+=.+$'; secret_count=1; @@ -415,7 +481,7 @@ else fi; done; fi; - + # Pull down additional dynamic plaintext tfvars file from S3 # Anti-pattern warning: Your variables should almost always be in source control. # There are a very few use cases where you need constant variability in input variables, @@ -424,27 +490,27 @@ else # Use this feature only if you're sure it's the right pattern for your use case. readonly dynamic_file_name="dynamic.tfvars"; readonly dynamic_file_path="build/${dynamic_file_name}"; - aws s3 ls s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${dynamic_file_name} >/dev/null 2>&1; + ${storage_ls_cmd} ${storage_url}${bucket}/${project}/${account_id}/${region}/${environment}/${dynamic_file_name} >/dev/null 2>&1; if [ $? -eq 0 ]; then - aws s3 cp s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${dynamic_file_name} ${dynamic_file_path} \ - || error_and_die "S3 tfvars file is present, but inaccessible. Ensure you have permission to read s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${dynamic_file_name}"; + ${storage_cp_cmd} ${storage_url}${bucket}/${project}/${account_id}/${region}/${environment}/${dynamic_file_name} ${dynamic_file_path} \ + || error_and_die "S3 tfvars file is present, but inaccessible. Ensure you have permission to read s3://${bucket}/${project}/${account_id}/${region}/${environment}/${dynamic_file_name}"; fi; - + # Use versions TFVAR files if exists readonly versions_file_name="versions_${region}_${environment}.tfvars"; readonly versions_file_path="${base_path}/etc/${versions_file_name}"; - + # Check environment name is a known environment # Could potentially support non-existent tfvars, but choosing not to. readonly env_file_path="${base_path}/etc/env_${region}_${environment}.tfvars"; if [ ! -f "${env_file_path}" ]; then error_and_die "Unknown environment. ${env_file_path} does not exist."; fi; - + # Check for presence of a global variables file, and use it if readable readonly global_vars_file_name="global.tfvars"; readonly global_vars_file_path="${base_path}/etc/${global_vars_file_name}"; - + # Check for presence of a region variables file, and use it if readable readonly region_vars_file_name="${region}.tfvars"; readonly region_vars_file_path="${base_path}/etc/${region_vars_file_name}"; @@ -454,10 +520,10 @@ else readonly group_vars_file_name="group_${group}.tfvars"; readonly group_vars_file_path="${base_path}/etc/${group_vars_file_name}"; fi; - + # Collect the paths of the variables files to use declare -a tf_var_file_paths; - + # Use Global and Region first, to allow potential for terraform to do the # honourable thing and override global and region settings with environment # specific ones; however we do not officially support the same variable @@ -477,14 +543,14 @@ else echo -e "[WARNING] Group \"${group}\" has been specified, but no group variables file is available at ${group_vars_file_path}"; fi; fi; - + # We've already checked this is readable and its presence is mandatory tf_var_file_paths+=("${env_file_path}"); - + # If present and readable, use versions and dynamic variables too [ -f "${versions_file_path}" ] && tf_var_file_paths+=("${versions_file_path}"); [ -f "${dynamic_file_path}" ] && tf_var_file_paths+=("${dynamic_file_path}"); - + # Warn on duplication duplicate_variables="$(cat "${tf_var_file_paths[@]}" | sed -n -e 's/\(^[a-zA-Z0-9_\-]\+\)\s*=.*$/\1/p' | sort | uniq -d)"; [ -n "${duplicate_variables}" ] \ @@ -499,12 +565,12 @@ ${duplicate_variables} This could lead to unexpected behaviour. Overriding of variables has previously been unpredictable and is not currently supported, but it may work. - + Recent changes to terraform might give you useful overriding and map-merging functionality, please use with caution and report back on your successes & failures. ###################################################################"; - + # Build up the tfvars arguments for terraform command line for file_path in "${tf_var_file_paths[@]}"; do tf_var_params+=" -var-file=${file_path}"; @@ -532,21 +598,44 @@ declare backend_prefix; declare backend_filename; if [ "${bootstrap}" == "true" ]; then - backend_prefix="${project}/${aws_account_id}/${region}/bootstrap"; + backend_prefix="${project}/${account_id}/${region}/bootstrap"; backend_filename="bootstrap.tfstate"; else - backend_prefix="${project}/${aws_account_id}/${region}/${environment}"; + backend_prefix="${project}/${account_id}/${region}/${environment}"; backend_filename="${component_name}.tfstate"; fi; readonly backend_key="${backend_prefix}/${backend_filename}"; -readonly backend_config="terraform { - backend \"s3\" { - region = \"${region}\" - bucket = \"${bucket}\" - key = \"${backend_key}\" - } -}"; + +if [ ${storage_provider} == "gcloud" ]; then + readonly backend_config="terraform { + backend \"gcs\" { + project = \"${project}\" + region = \"${region}\" + bucket = \"${bucket}\" + prefix = \"${backend_key}\" + } + }"; + +elif [ ${storage_provider} == "azurerm" ]; then + readonly backend_config="terraform { + backend \"azurerm\" { + storage_account_name = \"${project}${region}tfstate\" + container_name = \"${bucket,,}\" + key = \"${backend_key}\" + resource_group_name = \"${bucket,,}\" + } + }"; + +else + readonly backend_config="terraform { + backend \"s3\" { + region = \"${region}\" + bucket = \"${bucket}\" + key = \"${backend_key}\" + } + }"; +fi # We're now all ready to go. All that's left is to: # * Write the backend config @@ -571,7 +660,7 @@ if [ "${bootstrap}" == "true" ]; then # For this exist check we could do many things, but we explicitly perform # an ls against the key we will be working with so as to not require # permissions to, for example, list all buckets, or the bucket root keyspace - aws s3 ls s3://${bucket}/${backend_prefix}/${backend_filename} >/dev/null 2>&1; + ${storage_command} ls ${storage_url}${bucket}/${backend_prefix}/${backend_filename} >/dev/null 2>&1; [ $? -eq 0 ] || bootstrapped="false"; fi; @@ -581,7 +670,7 @@ if [ "${bootstrapped}" == "true" ]; then # Nix the horrible hack on exit trap "rm -f $(pwd)/backend_terraformscaffold.tf" EXIT; - + # Configure remote state storage echo "Setting up S3 remote state from s3://${bucket}/${backend_key}"; # TODO: Add -upgrade to init when we drop support for <0.10 @@ -615,17 +704,17 @@ case "${action}" in || error_and_die "Terraform plan failed"; if [ -n "${build_id}" ]; then - aws s3 cp build/${plan_file_name} s3://${bucket}/${plan_file_remote_key} \ - || error_and_die "Plan file upload to S3 failed (s3://${bucket}/${plan_file_remote_key})"; + ${storage_cp_cmd} build/${plan_file_name} ${storage_url}${bucket}/${plan_file_remote_key} \ + || error_and_die "Plan file upload failed (${storage_url}${bucket}/${plan_file_remote_key})"; fi; exit ${status}; ;; 'graph') mkdir -p build || error_and_die "Failed to create output directory '$(pwd)/build'"; - terraform graph -draw-cycles | dot -Tpng > build/${project}-${aws_account_id}-${region}-${environment}.png \ + terraform graph -draw-cycles | dot -Tpng > build/${project}-${account_id}-${region}-${environment}.png \ || error_and_die "Terraform simple graph generation failed"; - terraform graph -draw-cycles -verbose | dot -Tpng > build/${project}-${aws_account_id}-${region}-${environment}-verbose.png \ + terraform graph -draw-cycles -verbose | dot -Tpng > build/${project}-${account_id}-${region}-${environment}-verbose.png \ || error_and_die "Terraform verbose graph generation failed"; exit 0; ;; @@ -644,7 +733,7 @@ case "${action}" in plan_file_name="${component_name}_${build_id}.tfplan"; plan_file_remote_key="${backend_prefix}/plans/${plan_file_name}"; - aws s3 cp s3://${bucket}/${plan_file_remote_key} build/${plan_file_name} \ + ${storage_cp_cmd} ${storage_url}${bucket}/${plan_file_remote_key} build/${plan_file_name} \ || error_and_die "Plan file download from S3 failed (s3://${bucket}/${plan_file_remote_key})"; apply_plan="build/${plan_file_name}"; diff --git a/bootstrap/.terraform-version b/bootstrap/.terraform-version index c112f0e..0521cad 100644 --- a/bootstrap/.terraform-version +++ b/bootstrap/.terraform-version @@ -1 +1 @@ -latest:^0.11 +0.11.10 diff --git a/bootstrap/outputs.tf b/bootstrap/aws/outputs.tf similarity index 100% rename from bootstrap/outputs.tf rename to bootstrap/aws/outputs.tf diff --git a/bootstrap/provider_aws.tf b/bootstrap/aws/provider_aws.tf similarity index 91% rename from bootstrap/provider_aws.tf rename to bootstrap/aws/provider_aws.tf index 10c9758..272199d 100644 --- a/bootstrap/provider_aws.tf +++ b/bootstrap/aws/provider_aws.tf @@ -7,6 +7,6 @@ provider "aws" { # specified in the environment variables. # This helps to prevent accidents. allowed_account_ids = [ - "${var.aws_account_id}", + "${var.account_id}", ] } diff --git a/bootstrap/s3_bucket.tf b/bootstrap/aws/s3_bucket.tf similarity index 89% rename from bootstrap/s3_bucket.tf rename to bootstrap/aws/s3_bucket.tf index 0ecb741..5731818 100644 --- a/bootstrap/s3_bucket.tf +++ b/bootstrap/aws/s3_bucket.tf @@ -30,10 +30,10 @@ resource "aws_s3_bucket" "bucket" { # This does not use default tag map merging because bootstrapping is special # You should use default tag map merging elsewhere tags { - "Name" = "Terraform Scaffold State File Bucket for account ${var.aws_account_id} in region ${var.region}" + "Name" = "Terraform Scaffold State File Bucket for account ${var.account_id} in region ${var.region}" "Environment" = "${var.environment}" "Project" = "${var.project}" "Component" = "${var.component}" - "Account" = "${var.aws_account_id}" + "Account" = "${var.account_id}" } } diff --git a/bootstrap/variables.tf b/bootstrap/aws/variables.tf similarity index 96% rename from bootstrap/variables.tf rename to bootstrap/aws/variables.tf index c0e26ec..25d72a1 100644 --- a/bootstrap/variables.tf +++ b/bootstrap/aws/variables.tf @@ -3,7 +3,7 @@ variable "project" { description = "The name of the Project we are bootstrapping terraformscaffold for" } -variable "aws_account_id" { +variable "account_id" { type = "string" description = "The AWS Account ID into which we are bootstrapping terraformscaffold" } diff --git a/bootstrap/azurerm/outputs.tf b/bootstrap/azurerm/outputs.tf new file mode 100644 index 0000000..e9939ce --- /dev/null +++ b/bootstrap/azurerm/outputs.tf @@ -0,0 +1,3 @@ +output "bucket_name" { + value = "${azurerm_storage_container.container.id}" +} diff --git a/bootstrap/azurerm/provider_azure.tf b/bootstrap/azurerm/provider_azure.tf new file mode 100644 index 0000000..a528d57 --- /dev/null +++ b/bootstrap/azurerm/provider_azure.tf @@ -0,0 +1 @@ +provider "azurerm" {} diff --git a/bootstrap/azurerm/storage_container.tf b/bootstrap/azurerm/storage_container.tf new file mode 100644 index 0000000..ec2baa6 --- /dev/null +++ b/bootstrap/azurerm/storage_container.tf @@ -0,0 +1,60 @@ +/* resource "aws_s3_bucket" "bucket" { + bucket = "${var.bucket_name}" + acl = "private" + + force_destroy = "false" + + versioning { + enabled = "true" + } + + lifecycle_rule { + prefix = "/" + enabled = "true" + + noncurrent_version_transition { + days = "30" + storage_class = "STANDARD_IA" + } + + noncurrent_version_transition { + days = "60" + storage_class = "GLACIER" + } + + noncurrent_version_expiration { + days = "90" + } + } + + # This does not use default tag map merging because bootstrapping is special + # You should use default tag map merging elsewhere + tags { + "Name" = "Terraform Scaffold State File Bucket for account ${var.aws_account_id} in region ${var.region}" + "Environment" = "${var.environment}" + "Project" = "${var.project}" + "Component" = "${var.component}" + "Account" = "${var.aws_account_id}" + } +} +*/ + +resource "azurerm_resource_group" "container" { + name = "${lower(var.bucket_name)}" + location = "${var.region}" +} + +resource "azurerm_storage_account" "container" { + name = "${var.project}${var.region}tfstate" + resource_group_name = "${azurerm_resource_group.container.name}" + location = "${var.region}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_container" "container" { + name = "${lower(var.bucket_name)}" + resource_group_name = "${azurerm_resource_group.container.name}" + storage_account_name = "${azurerm_storage_account.container.name}" + container_access_type = "private" +} diff --git a/bootstrap/azurerm/variables.tf b/bootstrap/azurerm/variables.tf new file mode 100644 index 0000000..15f3ee3 --- /dev/null +++ b/bootstrap/azurerm/variables.tf @@ -0,0 +1,31 @@ +variable "project" { + type = "string" + description = "The name of the Project we are bootstrapping terraformscaffold for" +} + +variable "account_id" { + type = "string" + description = "The Azure Subscription ID into which we are bootstrapping terraformscaffold" +} + +variable "region" { + type = "string" + description = "The Azure Region into which we are bootstrapping terraformscaffold" +} + +variable "environment" { + type = "string" + description = "The name of the environment for the bootstrapping process; which is always bootstrap" + default = "bootstrap" +} + +variable "component" { + type = "string" + description = "The name of the component for the bootstrapping process; which is always bootstrap" + default = "bootstrap" +} + +variable "bucket_name" { + type = "string" + description = "The name to use for the terraformscaffold bucket" +} diff --git a/bootstrap/gcloud/gcp_bucket.tf b/bootstrap/gcloud/gcp_bucket.tf new file mode 100644 index 0000000..b253df3 --- /dev/null +++ b/bootstrap/gcloud/gcp_bucket.tf @@ -0,0 +1,22 @@ +resource "google_storage_bucket" "bucket" { + name = "${var.bucket_name}" + project = "${var.project}" + + location = "${var.region}" + storage_class = "REGIONAL" + + force_destroy = "false" + + versioning { enabled = "true" } + + + # This does not use default tag map merging because bootstrapping is special + # You should use default tag map merging elsewhere +# labels = { +# "Name" = "Terraform Scaffold State File Bucket for account ${var.account_id} in region ${var.region}" +# "Environment" = "${var.environment}" +# "Account" = "${var.account_id}" +# "Component" = "${var.component}" +# } + } + diff --git a/bootstrap/gcloud/outputs.tf b/bootstrap/gcloud/outputs.tf new file mode 100644 index 0000000..03a4ef6 --- /dev/null +++ b/bootstrap/gcloud/outputs.tf @@ -0,0 +1,3 @@ +output "bucket_name" { + value = "${google_storage_bucket.bucket.id}" +} diff --git a/bootstrap/gcloud/provider_gcp.tf b/bootstrap/gcloud/provider_gcp.tf new file mode 100755 index 0000000..5c9fa6f --- /dev/null +++ b/bootstrap/gcloud/provider_gcp.tf @@ -0,0 +1,3 @@ +provider "google" { + region = "${var.region}" +} diff --git a/bootstrap/gcloud/variables.tf b/bootstrap/gcloud/variables.tf new file mode 100644 index 0000000..25d72a1 --- /dev/null +++ b/bootstrap/gcloud/variables.tf @@ -0,0 +1,31 @@ +variable "project" { + type = "string" + description = "The name of the Project we are bootstrapping terraformscaffold for" +} + +variable "account_id" { + type = "string" + description = "The AWS Account ID into which we are bootstrapping terraformscaffold" +} + +variable "region" { + type = "string" + description = "The AWS Region into which we are bootstrapping terraformscaffold" +} + +variable "environment" { + type = "string" + description = "The name of the environment for the bootstrapping process; which is always bootstrap" + default = "bootstrap" +} + +variable "component" { + type = "string" + description = "The name of the component for the bootstrapping process; which is always bootstrap" + default = "bootstrap" +} + +variable "bucket_name" { + type = "string" + description = "The name to use for the terraformscaffold bucket" +}