diff --git a/.deployment/prod_environment.yml b/.deployment/prod_environment.yml index 007f7ab..d44b79f 100644 --- a/.deployment/prod_environment.yml +++ b/.deployment/prod_environment.yml @@ -19,9 +19,9 @@ dependencies: - pystac-client - planetary-computer - fastapi <0.108.0 #to avoid AssertionError bug w/ titiler - - paramiko + - smart_open - uvicorn - pip - pip: - - titiler.core - - python-multipart \ No newline at end of file + - titiler.core + - python-multipart diff --git a/.deployment/tofu/main.tf b/.deployment/tofu/main.tf index f266e9f..1a96044 100644 --- a/.deployment/tofu/main.tf +++ b/.deployment/tofu/main.tf @@ -27,29 +27,29 @@ provider "google" { } data "google_project" "project" {} - -# Get the one secret we need - ssh key -data "google_secret_manager_secret_version" "burn_sftp_ssh_keys" { - secret = "burn_sftp_ssh_keys" -} +data "aws_region" "current" {} +data "aws_caller_identity" "current" {} locals { - ssh_pairs = jsondecode(data.google_secret_manager_secret_version.burn_sftp_ssh_keys.secret_data) google_project_number = data.google_project.project.number + aws_account_id = data.aws_caller_identity.current.account_id + aws_region = data.aws_region.current.name + # oidc_provider_domain_url = "https://accounts.google.com" + oidc_provider_domain_url = "accounts.google.com" + gcp_cloud_run_client_id = "117526146749746854545" ## This is the ClientID of the cloud run instance, and can't be output from terraform! } - # Initialize the modules -module "sftp" { - source = "./modules/sftp" - ssh_pairs = local.ssh_pairs +module "static_io" { + source = "./modules/static_io" google_project_number = local.google_project_number + gcp_service_account_s3_email = module.burn_backend.gcp_service_account_s3_email + gcp_cloud_run_client_id = local.gcp_cloud_run_client_id + aws_account_id = local.aws_account_id + oidc_provider_domain_url = local.oidc_provider_domain_url } module "burn_backend" { source = "./modules/burn_backend" - ssh_pairs = local.ssh_pairs google_project_number = local.google_project_number - sftp_server_endpoint = module.sftp.sftp_server_endpoint - sftp_admin_username = module.sftp.sftp_admin_username } \ No newline at end of file diff --git a/.deployment/tofu/modules/burn_backend/main.tf b/.deployment/tofu/modules/burn_backend/main.tf index afb5011..7689b03 100644 --- a/.deployment/tofu/modules/burn_backend/main.tf +++ b/.deployment/tofu/modules/burn_backend/main.tf @@ -110,14 +110,6 @@ resource "google_cloud_run_v2_service" "tf-rest-burn-severity" { name = "ENV" value = "CLOUD" } - env { - name = "SFTP_SERVER_ENDPOINT" - value = var.sftp_server_endpoint - } - env { - name = "SFTP_ADMIN_USERNAME" - value = var.sftp_admin_username - } env { name = "CPL_VSIL_CURL_ALLOWED_EXTENSIONS" value = ".tif,.TIF,.tiff" @@ -225,6 +217,9 @@ resource "google_service_account_iam_binding" "workload_identity_user" { ] } +## TODO: Harcoded project string and others - now that tofu outputs are setup up, make more general +## Will be helpful as we move to other projects and environments + # Create the IAM service account for GitHub Actions resource "google_service_account" "github_actions" { account_id = "github-actions-service-account" @@ -237,7 +232,7 @@ resource "google_service_account" "github_actions" { resource "google_service_account" "burn-backend-service" { account_id = "burn-backend-service" display_name = "Cloud Run Service Account for burn backend" - description = "This service account is used by the Cloud Run service to access GCP Secrets Manager" + description = "This service account is used by the Cloud Run service to access GCP Secrets Manager and authenticate with OIDC for AWS S3 access" project = "dse-nps" } @@ -253,6 +248,12 @@ resource "google_project_iam_member" "log_writer" { member = "serviceAccount:${google_service_account.burn-backend-service.email}" } +resource "google_project_iam_member" "oidc_token_creator" { + project = "dse-nps" + role = "roles/iam.serviceAccountTokenCreator" + member = "serviceAccount:${google_service_account.burn-backend-service.email}" +} + # Give the service account permissions to deploy to Cloud Run, and to Cloud Build, and to the Workload Identity Pool resource "google_project_iam_member" "run_admin" { project = "dse-nps" diff --git a/.deployment/tofu/modules/burn_backend/outputs.tf b/.deployment/tofu/modules/burn_backend/outputs.tf index e658c1e..21b978e 100644 --- a/.deployment/tofu/modules/burn_backend/outputs.tf +++ b/.deployment/tofu/modules/burn_backend/outputs.tf @@ -1,4 +1,14 @@ output "burn_backend_server_endpoint" { description = "The endpoint of the Cloud Run burn-backend service" value = google_cloud_run_v2_service.tf-rest-burn-severity.uri -} \ No newline at end of file +} + +output "burn_backend_server_uuid" { + description = "The UUID of the Cloud Run service" + value = google_cloud_run_v2_service.tf-rest-burn-severity.uid +} + +output "gcp_service_account_s3_email" { + description = "The email of the service account used by the backend service on GCP Cloud Run" + value = google_service_account.burn-backend-service.email +} diff --git a/.deployment/tofu/modules/burn_backend/variables.tf b/.deployment/tofu/modules/burn_backend/variables.tf index 3bb78e0..1c221ff 100644 --- a/.deployment/tofu/modules/burn_backend/variables.tf +++ b/.deployment/tofu/modules/burn_backend/variables.tf @@ -1,19 +1,4 @@ -variable "ssh_pairs" { - description = "SSH private/public key pairs for the normie and admin user" - type = any -} - variable "google_project_number" { description = "Google project number" type = string } - -variable "sftp_server_endpoint" { - description = "The endpoint of the SFTP server" - type = string -} - -variable "sftp_admin_username" { - description = "The username of the admin user" - type = string -} \ No newline at end of file diff --git a/.deployment/tofu/modules/sftp/main.tf b/.deployment/tofu/modules/sftp/main.tf deleted file mode 100644 index d2b3439..0000000 --- a/.deployment/tofu/modules/sftp/main.tf +++ /dev/null @@ -1,343 +0,0 @@ -### AWS ### - -# Get the AWS region for the current workspace -data "aws_region" "current" {} - -# Create the CloudWatch logs policy -data "aws_iam_policy_document" "cloudwatch_logs_policy" { - statement { - actions = [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:PutLogEvents", - "logs:DescribeLogStreams", - ] - - resources = ["arn:aws:logs:*:*:*"] - } -} - -data "aws_iam_policy_document" "cloudwatch_logs_role" { - statement { - actions = ["sts:AssumeRole"] - principals { - type = "Service" - identifiers = ["transfer.amazonaws.com"] - } - } -} - -resource "aws_iam_policy" "cloudwatch_logs_policy" { - name = "cloudwatch_logs_policy" - description = "CloudWatch Logs policy for AWS Transfer logging" - policy = data.aws_iam_policy_document.cloudwatch_logs_policy.json -} - -# Create the IAM role for CloudWatch logging -resource "aws_iam_role" "cloudwatch_logs_role" { - name = "cloudwatch_logs_role" - assume_role_policy = data.aws_iam_policy_document.cloudwatch_logs_role.json -} - -# Attach the CloudWatch logs policy to the new role -resource "aws_iam_role_policy_attachment" "cloudwatch_logs_policy_attachment" { - role = aws_iam_role.cloudwatch_logs_role.name - policy_arn = aws_iam_policy.cloudwatch_logs_policy.arn -} - -resource "aws_cloudwatch_log_group" "transfer_log_group" { - name = "/aws/transfer/${aws_transfer_server.tf-sftp-burn-severity.id}" - retention_in_days = 14 -} - -# # Create a security group for the Transfer Family server, to allow inbound traffic from GCP -# resource "aws_security_group" "sftp_sg" { -# name = "sftp_sg" -# description = "Allow inbound traffic from GCP Cloud Run service" -# vpc_id = aws_vpc.sftp_vpc.id - -# ingress { -# from_port = 22 # SFTP uses port 22 -# to_port = 22 -# protocol = "tcp" -# cidr_blocks = ["10.3.0.0/28"] # CIDR range of the GCP Cloud Run instance -# } - -# egress { -# from_port = 0 -# to_port = 0 -# protocol = "-1" -# cidr_blocks = ["0.0.0.0/0"] -# } -# } - -# Create a VPC endpoint for the Transfer Family server - -# resource "aws_vpc" "sftp_vpc" { -# cidr_block = "10.0.0.0/16" -# } - -# resource "aws_subnet" "sftp_subnet" { -# vpc_id = aws_vpc.sftp_vpc.id -# cidr_block = "10.0.1.0/24" -# } - -# resource "aws_vpc_endpoint" "sftp_endpoint" { -# vpc_id = aws_vpc.sftp_vpc.id -# service_name = "com.amazonaws.${data.aws_region.current.name}.transfer.server" -# vpc_endpoint_type = "Interface" -# subnet_ids = [aws_subnet.sftp_subnet.id] -# security_group_ids = [aws_security_group.sftp_sg.id] -# } - - -# First the server itself -resource "aws_transfer_server" "tf-sftp-burn-severity" { - identity_provider_type = "SERVICE_MANAGED" - protocols = ["SFTP"] - domain = "S3" - endpoint_type = "PUBLIC" - logging_role = aws_iam_role.cloudwatch_logs_role.arn -} - -# Then, the s3 bucket for the server -resource "aws_s3_bucket" "burn-severity-backend" { - bucket = "burn-severity-backend" -} - -resource "aws_s3_bucket_versioning" "burn-severity-backend" { - bucket = aws_s3_bucket.burn-severity-backend.id - versioning_configuration { - status = "Enabled" - } -} - -resource "aws_s3_bucket_cors_configuration" "burn_severity_backend_cors" { - bucket = aws_s3_bucket.burn-severity-backend.bucket - - cors_rule { - allowed_headers = ["*"] - allowed_methods = ["GET"] - allowed_origins = ["*"] - max_age_seconds = 3000 - } -} - -data "aws_iam_policy_document" "burn-severity-backend-policy" { - statement { - sid = "PublicReadGetObject" - effect = "Allow" - actions = ["s3:GetObject"] - resources = ["${aws_s3_bucket.burn-severity-backend.arn}/*"] - - principals { - type = "*" - identifiers = ["*"] - } - } -} - -resource "aws_s3_bucket_policy" "burn-severity-backend-policy" { - bucket = aws_s3_bucket.burn-severity-backend.id - policy = data.aws_iam_policy_document.burn-severity-backend-policy.json -} - -resource "aws_s3_bucket_ownership_controls" "burn-severity-backend" { - bucket = aws_s3_bucket.burn-severity-backend.id - rule { - object_ownership = "BucketOwnerPreferred" - } -} - -resource "aws_s3_bucket_public_access_block" "burn-severity-backend" { - bucket = aws_s3_bucket.burn-severity-backend.id - - block_public_acls = false - block_public_policy = false - ignore_public_acls = false - restrict_public_buckets = false -} - -resource "aws_s3_bucket_acl" "burn-severity-backend" { - depends_on = [ - aws_s3_bucket_ownership_controls.burn-severity-backend, - aws_s3_bucket_public_access_block.burn-severity-backend, - ] - - bucket = aws_s3_bucket.burn-severity-backend.id - acl = "public-read" -} - -resource "aws_s3_bucket_website_configuration" "burn-severity-backend" { - bucket = aws_s3_bucket.burn-severity-backend.id - index_document { - suffix = "index.html" - } - error_document { - key = "error.html" - } -} - -// Add the contents of ../assets to the bucket -resource "aws_s3_bucket_object" "assets" { - for_each = fileset("../assets", "**/*") - - bucket = aws_s3_bucket.burn-severity-backend.id - key = each.value - source = "../assets/${each.value}" -} - -# Then, the user for the server, allowing it access to Transfer Family - -data "aws_iam_policy_document" "assume_role" { - statement { - effect = "Allow" - - principals { - type = "Service" - identifiers = ["transfer.amazonaws.com"] - } - - actions = ["sts:AssumeRole"] - } -} - -resource "aws_iam_role" "admin" { - name = "tf-sftp-admin-iam-role" - assume_role_policy = data.aws_iam_policy_document.assume_role.json -} - -# Then, allow it to actually access the S3 assets themselves -# Define the policy - -data "aws_iam_policy_document" "s3_policy" { - statement { - sid = "ReadWriteS3" - effect = "Allow" - actions = [ - "s3:ListBucket", - ] - resources = [ - "arn:aws:s3:::burn-severity-backend", - ] - } - - statement { - effect = "Allow" - actions = [ - "s3:PutObject", - "s3:GetObject", - "s3:GetObjectTagging", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:GetObjectVersion", - "s3:GetObjectVersionTagging", - "s3:GetObjectACL", - "s3:PutObjectACL", - ] - resources = [ - "arn:aws:s3:::burn-severity-backend/*", - ] - } -} - -# Create the s3_policy -resource "aws_iam_policy" "s3_admin_policy" { - name = "s3_admin_policy" - description = "S3 policy for admin user" - policy = data.aws_iam_policy_document.s3_policy.json -} - -# Attach the policy to the role -resource "aws_iam_role_policy_attachment" "s3_policy_attachment" { - role = aws_iam_role.admin.name - policy_arn = aws_iam_policy.s3_admin_policy.arn -} - -# Add the necessary session policy to the user -data "aws_iam_policy_document" "session_policy" { - statement { - sid = "AllowListingOfUserFolder" - effect = "Allow" - actions = [ - "s3:ListBucket", - ] - resources = [ - "arn:aws:s3:::burn-severity-backend", - ] - condition { - test = "StringLike" - variable = "s3:prefix" - values = [ - "/public/*", - "/public", - "/" - ] - } - } - - statement { - sid = "HomeDirObjectAccess" - effect = "Allow" - actions = [ - "s3:PutObject", - "s3:GetObject", - "s3:DeleteObject", - "s3:GetObjectVersion", - ] - resources = [ - "arn:aws:s3:::burn-severity-backend/*", - ] - } -} - -# Finally, create the user within Transfer Family -resource "aws_transfer_user" "tf-sftp-burn-severity" { - server_id = aws_transfer_server.tf-sftp-burn-severity.id - user_name = "admin" - role = aws_iam_role.admin.arn - home_directory_mappings { - entry = "/" - target = "/burn-severity-backend/public" - } - home_directory_type = "LOGICAL" - policy = data.aws_iam_policy_document.session_policy.json -} - -resource "aws_transfer_ssh_key" "sftp_ssh_key_public" { - depends_on = [aws_transfer_user.tf-sftp-burn-severity] - server_id = aws_transfer_server.tf-sftp-burn-severity.id - user_name = "admin" - body = var.ssh_pairs["SSH_KEY_ADMIN_PUBLIC"] -} - - -## TODO [#4]: This is OIDC stuff, which is not yet working -# Set up STS to allow the GCP server to assume a role for AWS secrets - -# data "aws_iam_policy_document" "assume_role_policy" { -# statement { -# actions = ["sts:AssumeRoleWithWebIdentity"] -# effect = "Allow" - -# principals { -# type = "Federated" -# identifiers = ["arn:aws:iam::557418946771:oidc-provider/https://${var.google_project_number}"] -# } - -# condition { -# test = "StringEquals" -# variable = "https://${var.google_project_number}.svc.id.goog:sub" - -# values = [ -# "system:serviceaccount:${var.google_project_number}.svc.id.goog[default/${google_service_account.burn-backend-service.account_id}]" -# ] -# } -# } -# } - -# resource "aws_iam_role" "role" { -# name = "aws_secrets_access_role" -# assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json -# } - diff --git a/.deployment/tofu/modules/sftp/outputs.tf b/.deployment/tofu/modules/sftp/outputs.tf deleted file mode 100644 index e3c13bc..0000000 --- a/.deployment/tofu/modules/sftp/outputs.tf +++ /dev/null @@ -1,9 +0,0 @@ -output "sftp_server_endpoint" { - description = "The endpoint of the SFTP server" - value = aws_transfer_server.tf-sftp-burn-severity.endpoint -} - -output "sftp_admin_username" { - description = "The username of the SFTP admin user" - value = aws_transfer_user.tf-sftp-burn-severity.user_name -} \ No newline at end of file diff --git a/.deployment/tofu/modules/sftp/variables.tf b/.deployment/tofu/modules/sftp/variables.tf deleted file mode 100644 index 34157e7..0000000 --- a/.deployment/tofu/modules/sftp/variables.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "ssh_pairs" { - description = "SSH private/public key pairs for the normie and admin user" - type = any -} - -variable "google_project_number" { - description = "Google project number" - type = string -} \ No newline at end of file diff --git a/.deployment/tofu/modules/static_io/main.tf b/.deployment/tofu/modules/static_io/main.tf new file mode 100644 index 0000000..8acf01e --- /dev/null +++ b/.deployment/tofu/modules/static_io/main.tf @@ -0,0 +1,304 @@ +### AWS ### + +# Then, the s3 bucket for the server +resource "aws_s3_bucket" "burn-severity-backend" { + bucket = "burn-severity-backend" +} + +resource "aws_s3_bucket_versioning" "burn-severity-backend" { + bucket = aws_s3_bucket.burn-severity-backend.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_cors_configuration" "burn_severity_backend_cors" { + bucket = aws_s3_bucket.burn-severity-backend.bucket + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["GET"] + allowed_origins = ["*"] + max_age_seconds = 3000 + } +} + +data "aws_iam_policy_document" "burn-severity-backend-policy" { + statement { + sid = "PublicReadGetObject" + effect = "Allow" + actions = ["s3:GetObject"] + resources = ["${aws_s3_bucket.burn-severity-backend.arn}/*"] + + principals { + type = "*" + identifiers = ["*"] + } + } +} + +resource "aws_s3_bucket_policy" "burn-severity-backend-policy" { + bucket = aws_s3_bucket.burn-severity-backend.id + policy = data.aws_iam_policy_document.burn-severity-backend-policy.json +} + +resource "aws_s3_bucket_ownership_controls" "burn-severity-backend" { + bucket = aws_s3_bucket.burn-severity-backend.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_public_access_block" "burn-severity-backend" { + bucket = aws_s3_bucket.burn-severity-backend.id + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket_acl" "burn-severity-backend" { + depends_on = [ + aws_s3_bucket_ownership_controls.burn-severity-backend, + aws_s3_bucket_public_access_block.burn-severity-backend, + ] + + bucket = aws_s3_bucket.burn-severity-backend.id + acl = "public-read" +} + +resource "aws_s3_bucket_website_configuration" "burn-severity-backend" { + bucket = aws_s3_bucket.burn-severity-backend.id + index_document { + suffix = "index.html" + } + error_document { + key = "error.html" + } +} + +// Add the contents of ../assets to the bucket +resource "aws_s3_bucket_object" "assets" { + for_each = fileset("../assets", "**/*") + + bucket = aws_s3_bucket.burn-severity-backend.id + key = each.value + source = "../assets/${each.value}" +} + +# Then, the user for the server, allowing it access to Transfer Family + +# data "aws_iam_policy_document" "assume_role" { +# statement { +# effect = "Allow" + +# principals { +# type = "Service" +# identifiers = ["transfer.amazonaws.com"] +# } + +# actions = ["sts:AssumeRole"] +# } +# } + +# resource "aws_iam_role" "admin" { +# name = "tf-sftp-admin-iam-role" +# assume_role_policy = data.aws_iam_policy_document.assume_role.json +# } + +# data "aws_iam_policy_document" "s3_policy" { +# statement { +# sid = "ReadWriteS3" +# effect = "Allow" +# actions = [ +# "s3:ListBucket", +# ] +# resources = [ +# "arn:aws:s3:::burn-severity-backend", +# ] +# } + +# statement { +# effect = "Allow" +# actions = [ +# "s3:PutObject", +# "s3:GetObject", +# "s3:GetObjectTagging", +# "s3:DeleteObject", +# "s3:DeleteObjectVersion", +# "s3:GetObjectVersion", +# "s3:GetObjectVersionTagging", +# "s3:GetObjectACL", +# "s3:PutObjectACL", +# ] +# resources = [ +# "arn:aws:s3:::burn-severity-backend/*", +# ] +# } +# } + +# # Create the s3_policy +# resource "aws_iam_policy" "s3_admin_policy" { +# name = "s3_admin_policy" +# description = "S3 policy for admin user" +# policy = data.aws_iam_policy_document.s3_policy.json +# } + +# # Attach the policy to the role +# resource "aws_iam_role_policy_attachment" "s3_policy_attachment" { +# role = aws_iam_role.admin.name +# policy_arn = aws_iam_policy.s3_admin_policy.arn +# } + +# # Add the necessary session policy to the user +# data "aws_iam_policy_document" "session_policy" { +# statement { +# sid = "AllowListingOfUserFolder" +# effect = "Allow" +# actions = [ +# "s3:ListBucket", +# ] +# resources = [ +# "arn:aws:s3:::burn-severity-backend", +# ] +# condition { +# test = "StringLike" +# variable = "s3:prefix" +# values = [ +# "/public/*", +# "/public", +# "/" +# ] +# } +# } + +# statement { +# sid = "HomeDirObjectAccess" +# effect = "Allow" +# actions = [ +# "s3:PutObject", +# "s3:GetObject", +# "s3:DeleteObject", +# "s3:GetObjectVersion", +# ] +# resources = [ +# "arn:aws:s3:::burn-severity-backend/*", +# ] +# } +# } + +# # Finally, create the user within Transfer Family +# resource "aws_transfer_user" "tf-sftp-burn-severity" { +# server_id = aws_transfer_server.tf-sftp-burn-severity.id +# user_name = "admin" +# role = aws_iam_role.admin.arn +# home_directory_mappings { +# entry = "/" +# target = "/burn-severity-backend/public" +# } +# home_directory_type = "LOGICAL" +# policy = data.aws_iam_policy_document.session_policy.json +# } + +# resource "aws_transfer_ssh_key" "sftp_ssh_key_public" { +# depends_on = [aws_transfer_user.tf-sftp-burn-severity] +# server_id = aws_transfer_server.tf-sftp-burn-severity.id +# user_name = "admin" +# body = var.ssh_pairs["SSH_KEY_ADMIN_PUBLIC"] +# } + + +## TODO [#4]: This is OIDC stuff, which is not yet working +# Set up STS to allow the GCP server to assume a role for AWS secrets + +# Defines who can assume the role. +# Confusing string mapping for the OIDC provider URL (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_aud) +# example paylod of our token looks like:/ + # { + # "aud": "sts.amazonaws.com", + # "azp": "117526146749746854545", + # "email": "burn-backend-service@dse-nps.iam.gserviceaccount.com", + # "email_verified": true, + # "exp": 1706551963, + # "iat": 1706548363, + # "iss": "https://accounts.google.com", + # "sub": "117526146749746854545" + # } +# AWS says: aud -> azp, oaud -> aud, sub -> sub + +data "aws_iam_policy_document" "oidc_assume_role_policy" { + statement { + actions = [ + "sts:AssumeRoleWithWebIdentity" + ] + effect = "Allow" + + principals { + type = "Federated" + # identifiers = ["arn:aws:iam::${var.aws_account_id}:oidc-provider/${var.oidc_provider_domain_url}"] + identifiers = ["accounts.google.com"] + } + + condition { + test = "StringEquals" + variable = "${var.oidc_provider_domain_url}:sub" + + values = [ + "${var.gcp_cloud_run_client_id}" + ] + } + + condition { + test = "StringEquals" + variable = "${var.oidc_provider_domain_url}:aud" + + values = [ + "${var.gcp_cloud_run_client_id}" + ] + } + + condition { + test = "StringEquals" + variable = "${var.oidc_provider_domain_url}:oaud" + + values = [ + "sts.amazonaws.com" + ] + } + } +} + +# Defines what actions can be done once the role is assumed. +data "aws_iam_policy_document" "session_policy" { + statement { + sid = "HomeDirObjectAccess" + effect = "Allow" + actions = [ + "s3:ListBucket", + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:GetObjectVersion", + ] + resources = [ + "arn:aws:s3:::burn-severity-backend/*", + ] + } +} + +# Create the IAM role with both the assume-role and the session policy. +resource "aws_iam_role" "aws_s3_from_gcp" { + name = "aws_s3_from_gcp" + assume_role_policy = data.aws_iam_policy_document.oidc_assume_role_policy.json + + # Inline policy for session + inline_policy { + name = "session_policy" + policy = data.aws_iam_policy_document.session_policy.json + } + + tags = { + project = "burn-severity-backend" + } +} diff --git a/.deployment/tofu/modules/static_io/outputs.tf b/.deployment/tofu/modules/static_io/outputs.tf new file mode 100644 index 0000000..3e9e743 --- /dev/null +++ b/.deployment/tofu/modules/static_io/outputs.tf @@ -0,0 +1,4 @@ +output "s3_from_gcp_arn" { + value = aws_iam_role.aws_s3_from_gcp.arn + description = "The ARN of the IAM role for S3 Access from GCP" +} diff --git a/.deployment/tofu/modules/static_io/variables.tf b/.deployment/tofu/modules/static_io/variables.tf new file mode 100644 index 0000000..09223ec --- /dev/null +++ b/.deployment/tofu/modules/static_io/variables.tf @@ -0,0 +1,24 @@ +variable "google_project_number" { + description = "Google project number" + type = string +} + +variable "gcp_service_account_s3_email" { + description = "Google service account email for GCP's access to S3" + type = string +} + +variable "aws_account_id" { + description = "AWS account ID" + type = string +} + +variable "oidc_provider_domain_url" { + description = "OIDC provider domain URL for GCP" + type = string +} + +variable "gcp_cloud_run_client_id" { + description = "GCP Cloud Run client id for burn backend service" + type = string +} \ No newline at end of file diff --git a/.deployment/tofu/outputs.tf b/.deployment/tofu/outputs.tf index 81078cf..d30b07e 100644 --- a/.deployment/tofu/outputs.tf +++ b/.deployment/tofu/outputs.tf @@ -1,14 +1,19 @@ -output "sftp_server_endpoint" { - description = "The endpoint of the SFTP server" - value = module.sftp.sftp_server_endpoint +output "gcp_cloud_run_endpoint" { + description = "The endpoint of the Cloud Run burn-backend service" + value = module.burn_backend.burn_backend_server_endpoint } -output "sftp_admin_username" { - description = "The username of the SFTP admin user" - value = module.sftp.sftp_admin_username +output "gcp_service_account_s3_email" { + description = "The email address of the Cloud Run burn-backend service account" + value = module.burn_backend.gcp_service_account_s3_email } -output "gcp_cloud_run_endpoint" { - description = "The endpoint of the Cloud Run burn-backend service" - value = module.burn_backend.burn_backend_server_endpoint +output "s3_from_gcp_arn" { + description = "The ARN of the IAM Role which allows GCP to access S3" + value = module.static_io.s3_from_gcp_arn +} + +output "gcp_cloud_run_uuid" { + description = "The UUID of the Cloud Run burn-backend service" + value = module.burn_backend.burn_backend_server_uuid } \ No newline at end of file diff --git a/.devcontainer/dev_environment.yml b/.devcontainer/dev_environment.yml index baefa34..123c184 100644 --- a/.devcontainer/dev_environment.yml +++ b/.devcontainer/dev_environment.yml @@ -27,8 +27,8 @@ dependencies: - planetary-computer - fastapi <0.108.0 #to avoid AssertionError bug w/ titiler - uvicorn - - paramiko - pip + - smart_open - pip: - - titiler.core - - python-multipart + - titiler.core + - python-multipart diff --git a/.devcontainer/prebuild/setup_opentofu.sh b/.devcontainer/prebuild/setup_opentofu.sh index 97d6f9c..5aef729 100755 --- a/.devcontainer/prebuild/setup_opentofu.sh +++ b/.devcontainer/prebuild/setup_opentofu.sh @@ -1,6 +1,6 @@ #!/bin/bash set -e -TOFU_VERSION="1.6.0-beta4" +TOFU_VERSION="1.6.1" OS="$(uname | tr '[:upper:]' '[:lower:]')" ARCH="$(uname -m | sed -e 's/aarch64/arm64/' -e 's/x86_64/amd64/')" TEMPDIR="$(mktemp -d)" diff --git a/.devcontainer/scripts/export_tofu_dotenv.sh b/.devcontainer/scripts/export_tofu_dotenv.sh index 3e2b526..ca0d995 100755 --- a/.devcontainer/scripts/export_tofu_dotenv.sh +++ b/.devcontainer/scripts/export_tofu_dotenv.sh @@ -2,8 +2,15 @@ cd /workspace/.deployment/tofu tofu init tofu refresh -export sftp_admin_username="$(tofu output sftp_admin_username)" -export sftp_server_endpoint="$(tofu output sftp_server_endpoint)" + +export gcp_cloud_run_endpoint="$(tofu output gcp_cloud_run_endpoint)" +export s3_from_gcp_arn="$(tofu output s3_from_gcp_arn)" + +# Remove quotes from the email to avoid issue with the impersonation below +export gcp_service_account_s3_email=$(tofu output gcp_service_account_s3_email | tr -d '"') + echo "# TOFU ENV VARS" >> /workspace/.devcontainer/.env -echo "SFTP_ADMIN_USERNAME=$sftp_admin_username" >> /workspace/.devcontainer/.env -echo "SFTP_SERVER_ENDPOINT=$sftp_server_endpoint" >> /workspace/.devcontainer/.env +echo "ENV=LOCAL" >> /workspace/.devcontainer/.env +echo "S3_FROM_GCP_ARN=$s3_from_gcp_arn" >> /workspace/.devcontainer/.env +echo "GCP_CLOUD_RUN_ENDPOINT=$gcp_cloud_run_endpoint" >> /workspace/.devcontainer/.env +echo "GCP_SERVICE_ACCOUNT_S3_EMAIL=$gcp_service_account_s3_email" >> /workspace/.devcontainer/.env \ No newline at end of file diff --git a/app.py b/app.py index 451916c..7cb7ac3 100644 --- a/app.py +++ b/app.py @@ -32,7 +32,7 @@ from titiler.core.errors import DEFAULT_STATUS_CODES, add_exception_handlers from src.lib.query_sentinel import Sentinel2Client -from src.util.sftp import SFTPClient +from src.util.cloud_static_io import CloudStaticIOClient from src.util.gcp_secrets import get_ssh_secret, get_mapbox_secret from src.util.ingest_burn_zip import ingest_esri_zip_file, shp_to_geojson from src.lib.titiler_algorithms import algorithms @@ -91,20 +91,10 @@ def check_dns(): ### DEPENDENCIES ### +def get_cloud_static_io_client(): + return CloudStaticIOClient('burn-severity-backend', "s3") -def get_sftp_client(): - SFTP_SERVER_ENDPOINT = os.getenv("SFTP_SERVER_ENDPOINT") - SFTP_ADMIN_USERNAME = os.getenv("SFTP_ADMIN_USERNAME") - SSH_SECRET = get_ssh_secret() - - logger.log_text(f"SFTP_SERVER_ENDPOINT: {SFTP_SERVER_ENDPOINT}") - logger.log_text(f"SFTP_ADMIN_USERNAME: {SFTP_ADMIN_USERNAME}") - logger.log_text(f"SSH_SECRET (trunc): {SSH_SECRET[:20]}") - - return SFTPClient(SFTP_SERVER_ENDPOINT, SFTP_ADMIN_USERNAME, SSH_SECRET) - - -def get_manifest(sfpt_client: SFTPClient = Depends(get_sftp_client)): +def get_manifest(sfpt_client: CloudStaticIOClient = Depends(get_cloud_static_io_client)): try: sfpt_client.connect() manifest = sfpt_client.get_manifest() @@ -118,20 +108,20 @@ def get_manifest(sfpt_client: SFTPClient = Depends(get_sftp_client)): ### API ENDPOINTS ### -@app.get("/api/query-satellite/available-cogs") -def available_cogs(sftp_client: SFTPClient = Depends(get_sftp_client)): - try: - sftp_client.update_available_cogs() - - response = { - "message": "updated available cogs", - "available_cogs": sftp_client.available_cogs, - } - logger.log_text(f"Available COGs updated: {sftp_client.available_cogs}") - return response, 200 - except Exception as e: - logger.log_text(f"Error: {e}") - return f"Error: {e}", 400 +# @app.get("/api/query-satellite/available-cogs") +# def available_cogs(cloud_static_io_client: CloudStaticIOClient = Depends(get_cloud_static_io_client)): +# try: +# cloud_static_io_client.update_available_cogs() + +# response = { +# "message": "updated available cogs", +# "available_cogs": cloud_static_io_client.available_cogs, +# } +# logger.log_text(f"Available COGs updated: {cloud_static_io_client.available_cogs}") +# return response, 200 +# except Exception as e: +# logger.log_text(f"Error: {e}") +# return f"Error: {e}", 400 class AnaylzeBurnPOSTBody(BaseModel): @@ -147,7 +137,7 @@ class AnaylzeBurnPOSTBody(BaseModel): # or something similar when the process is complete. Esp if the frontend remanins static. @app.post("/api/query-satellite/analyze-burn") def analyze_burn( - body: AnaylzeBurnPOSTBody, sftp_client: SFTPClient = Depends(get_sftp_client) + body: AnaylzeBurnPOSTBody, cloud_static_io_client: CloudStaticIOClient = Depends(get_cloud_static_io_client) ): geojson_boundary = json.loads(body.geojson) @@ -183,17 +173,17 @@ def analyze_burn( logger.log_text(f"Derived boundary for {fire_event_name}") # Upload the derived boundary - sftp_client.connect() + cloud_static_io_client.connect() with tempfile.NamedTemporaryFile(suffix=".geojson", delete=False) as tmp: tmp_geojson = tmp.name with open(tmp_geojson, "w") as f: f.write(geo_client.geojson_boundary.to_json()) - sftp_client.upload( + cloud_static_io_client.upload( source_local_path=tmp_geojson, - remote_path=f"{affiliation}/{fire_event_name}/boundary.geojson", + remote_path=f"public/{affiliation}/{fire_event_name}/boundary.geojson", ) - sftp_client.disconnect() + cloud_static_io_client.disconnect() # Return the derived boundary derived_boundary = geo_client.geojson_boundary.to_json() @@ -203,8 +193,7 @@ def analyze_burn( # but if not, should be refactored to use a context manager # save the cog to the FTP server - sftp_client.connect() - sftp_client.upload_fire_event( + cloud_static_io_client.upload_fire_event( metrics_stack=geo_client.metrics_stack, affiliation=affiliation, fire_event_name=fire_event_name, @@ -212,7 +201,6 @@ def analyze_burn( postfire_date_range=date_ranges["postfire"], derive_boundary=derive_boundary, ) - sftp_client.disconnect() logger.log_text(f"Cogs uploaded for {fire_event_name}") return JSONResponse( @@ -274,7 +262,7 @@ def get_ecoclass_info(ecoclassid: str = Query(...)): # refactor out the low level endpoints (/api) and rename others (this isn't really an `analysis` but it does compose a lot of logic like `analyze-burn`) @app.post("/api/query-soil/analyze-ecoclass") def analyze_ecoclass( - body: QuerySoilPOSTBody, sftp_client: SFTPClient = Depends(get_sftp_client) + body: QuerySoilPOSTBody, cloud_static_io_client: CloudStaticIOClient = Depends(get_cloud_static_io_client) ): fire_event_name = body.fire_event_name geojson = json.loads(body.geojson) @@ -344,12 +332,11 @@ def analyze_ecoclass( tmp_geojson_path = tmp.name with open(tmp_geojson_path, "w") as f: f.write(edit_ecoclass_geojson) - sftp_client.connect() - sftp_client.upload( + + cloud_static_io_client.upload( source_local_path=tmp_geojson_path, - remote_path=f"{affiliation}/{fire_event_name}/ecoclass_dominant_cover.geojson", + remote_path=f"public/{affiliation}/{fire_event_name}/ecoclass_dominant_cover.geojson", ) - sftp_client.disconnect() logger.log_text(f"Ecoclass GeoJSON uploaded for {fire_event_name}") return f"Ecoclass GeoJSON uploaded for {fire_event_name}", 200 @@ -364,7 +351,7 @@ async def upload_shapefile( fire_event_name: str = Form(...), affiliation: str = Form(...), file: UploadFile = File(...), - sftp_client: SFTPClient = Depends(get_sftp_client), + cloud_static_io_client: CloudStaticIOClient = Depends(get_cloud_static_io_client), ): try: # Read the file @@ -384,24 +371,20 @@ async def upload_shapefile( __shp_paths, geojson = valid_shp[0] # Upload the zip and a geojson to SFTP - sftp_client.connect() - - sftp_client.upload( + cloud_static_io_client.upload( source_local_path=tmp_zip, - remote_path=f"{affiliation}/{fire_event_name}/user_uploaded_{file.filename}", + remote_path=f"public/{affiliation}/{fire_event_name}/user_uploaded_{file.filename}", ) with tempfile.NamedTemporaryFile(suffix=".geojson", delete=False) as tmp: tmp_geojson = tmp.name with open(tmp_geojson, "w") as f: f.write(geojson) - sftp_client.upload( + cloud_static_io_client.upload( source_local_path=tmp_geojson, - remote_path=f"{affiliation}/{fire_event_name}/boundary.geojson", + remote_path=f"public/{affiliation}/{fire_event_name}/boundary.geojson", ) - sftp_client.disconnect() - return JSONResponse(status_code=200, content={"geojson": geojson}) except Exception as e: @@ -413,23 +396,17 @@ async def upload_drawn_aoi( fire_event_name: str = Form(...), affiliation: str = Form(...), geojson: str = Form(...), - sftp_client: SFTPClient = Depends(get_sftp_client), + cloud_static_io_client: CloudStaticIOClient = Depends(get_cloud_static_io_client), ): try: - # Upload the geojson to SFTP - sftp_client.connect() - with tempfile.NamedTemporaryFile(suffix=".geojson", delete=False) as tmp: tmp_geojson = tmp.name with open(tmp_geojson, "w") as f: f.write(geojson) - sftp_client.upload( + cloud_static_io_client.upload( source_local_path=tmp_geojson, - remote_path=f"{affiliation}/{fire_event_name}/boundary.geojson", + remote_path=f"public/{affiliation}/{fire_event_name}/boundary.geojson", ) - - sftp_client.disconnect() - return JSONResponse(status_code=200, content={"geojson": geojson}) except Exception as e: @@ -451,8 +428,10 @@ def serve_map( ): mapbox_token = get_mapbox_secret() - tileserver_endpoint = "https://tf-rest-burn-severity-ohi6r6qs2a-uc.a.run.app" - # tileserver_endpoint = 'http://localhost:5050' + tileserver_endpoint = os.getenv("GCP_CLOUD_RUN_ENDPOINT") + + ## TODO: Use Tofu Output to construct hardocded cog and geojson urls (in case we change s3 bucket name) + cog_url = f"https://burn-severity-backend.s3.us-east-2.amazonaws.com/public/{affiliation}/{fire_event_name}/{burn_metric}.tif" burn_boundary_geojson_url = f"https://burn-severity-backend.s3.us-east-2.amazonaws.com/public/{affiliation}/{fire_event_name}/boundary.geojson" ecoclass_geojson_url = f"https://burn-severity-backend.s3.us-east-2.amazonaws.com/public/{affiliation}/{fire_event_name}/ecoclass_dominant_cover.geojson" diff --git a/src/lib/query_sentinel.py b/src/lib/query_sentinel.py index 4db8d95..cbdcfc4 100644 --- a/src/lib/query_sentinel.py +++ b/src/lib/query_sentinel.py @@ -15,7 +15,7 @@ import os from .burn_severity import calc_burn_metrics, classify_burn from ..util.raster_to_poly import raster_mask_to_geojson -from src.util.sftp import SFTPClient +from src.util.cloud_static_io import CloudStaticIOClient SENTINEL2_PATH = "https://planetarycomputer.microsoft.com/api/stac/v1" diff --git a/src/util/sftp.py b/src/util/cloud_static_io.py similarity index 52% rename from src/util/sftp.py rename to src/util/cloud_static_io.py index 87ba1cd..25394e0 100644 --- a/src/util/sftp.py +++ b/src/util/cloud_static_io.py @@ -1,93 +1,124 @@ -import paramiko -from urllib.parse import urlparse -import io +import smart_open +import time import os -import tempfile -import logging import json import datetime import rasterio from rasterio.enums import Resampling import geopandas as gpd from google.cloud import logging as cloud_logging +import tempfile +import subprocess +import os +import boto3 +import google.auth +import requests +from google.auth.transport import requests as gcp_requests +from google.auth import impersonated_credentials, exceptions +class CloudStaticIOClient: + def __init__(self, bucket_name, provider): -# TODO [#9]: Convert to agnostic Boto client -# Use the slick smart-open library to handle S3 connections. This maintains the agnostic nature -# of sftp, not tied to any specific cloud provider, but is way more efficient than paramiko/sftp in terms of $$ -class SFTPClient: - def __init__(self, hostname, username, private_key, port=22): - """Constructor Method""" - self.connection = None - self.hostname = hostname - self.username = username - self.port = port - - private_key_file = io.StringIO(private_key) - self.private_key = paramiko.RSAKey.from_private_key(private_key_file) + self.env = os.environ.get("ENV") + self.role_arn = os.environ.get("S3_FROM_GCP_ARN") + self.service_account_email = os.environ.get("GCP_SERVICE_ACCOUNT_S3_EMAIL") + self.role_session_name = "burn-backend-session" - self.available_cogs = None + self.bucket_name = bucket_name # Set up logging logging_client = cloud_logging.Client(project="dse-nps") log_name = "burn-backend" self.logger = logging_client.logger(log_name) - # Route Paramiko logs to Google Cloud Logging - paramiko_logger = logging.getLogger("paramiko") - paramiko_logger.setLevel(logging.DEBUG) - paramiko_logger.addHandler( - cloud_logging.handlers.CloudLoggingHandler(logging_client, name=log_name) - ) + self.sts_client = boto3.client('sts') + + if provider == "s3": + self.prefix = f"s3://{self.bucket_name}" + else: + raise Exception(f"Provider {provider} not supported") + + self.iam_credentials = None + self.role_assumed_credentials = None + self.s3_session = None + self.validate_credentials() + + self.logger.log_text(f"Initialized CloudStaticIOClient for {self.bucket_name} with provider {provider}") + + def impersonate_service_account(self): + # Load the credentials of the user + source_credentials, project = google.auth.default() + + # Define the scopes of the impersonated credentials + target_scopes = ["https://www.googleapis.com/auth/cloud-platform"] - self.logger.log_text( - f"Initialized SFTPClient for {self.hostname} as {self.username}" + # Create the IAM credentials client for the impersonated service account + iam_credentials = impersonated_credentials.Credentials( + source_credentials=source_credentials, + target_principal=self.service_account_email, + target_scopes=target_scopes, + lifetime=3600 ) - def connect(self): - """Connects to the sftp server and returns the sftp connection object""" - try: - # Create SSH client - ssh_client = paramiko.SSHClient() - ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - - # Connect to the server - ssh_client.connect( - self.hostname, - port=self.port, - username=self.username, - pkey=self.private_key, + # Refresh the client + self.iam_credentials = iam_credentials + + def fetch_id_token(self, audience): + if not self.iam_credentials.valid: + # Refresh the credentials + self.iam_credentials.refresh(Request()) + + # Make an authenticated HTTP request to the Google OAuth2 v1/token endpoint + url = f"https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/{self.service_account_email}:generateIdToken" + headers = {"Authorization": f"Bearer {self.iam_credentials.token}"} + body = {"audience": audience, "includeEmail": True} + response = requests.post(url, headers=headers, json=body) + + # Check the response + if response.status_code != 200: + raise exceptions.DefaultCredentialsError( + "Failed to fetch ID token: " + response.text ) - # Create SFTP client from SSH client - self.connection = ssh_client.open_sftp() + # Return the ID token + return response.json()["token"] - except Exception as err: - raise Exception(err) - finally: - print(f"Connected to {self.hostname} as {self.username}.") + def validate_credentials(self): - def disconnect(self): - """Closes the sftp connection""" - self.connection.close() - print(f"Disconnected from host {self.hostname}") + if not self.role_assumed_credentials or (self.role_assumed_credentials['Expiration'].timestamp() - time.time() < 300): + oidc_token = None + request = gcp_requests.Request() - def listdir(self, remote_path): - """lists all the files and directories in the specified path and returns them""" - for obj in self.connection.listdir(remote_path): - yield obj + if self.env == 'LOCAL': + if not self.iam_credentials or self.iam_credentials.expired: + self.impersonate_service_account() + self.iam_credentials.refresh(request) - def listdir_attr(self, remote_path): - """lists all the files and directories (with their attributes) in the specified path and returns them""" - for attr in self.connection.listdir_attr(remote_path): - yield attr + oidc_token = self.fetch_id_token(audience="sts.amazonaws.com") + if not oidc_token: + raise ValueError("Failed to retrieve OIDC token") + + sts_response = self.sts_client.assume_role_with_web_identity( + RoleArn=self.role_arn, + RoleSessionName=self.role_session_name, + WebIdentityToken=oidc_token + ) + + self.role_assumed_credentials = sts_response['Credentials'] + + self.boto_session = boto3.Session( + aws_access_key_id=self.role_assumed_credentials['AccessKeyId'], + aws_secret_access_key=self.role_assumed_credentials['SecretAccessKey'], + aws_session_token=self.role_assumed_credentials['SessionToken'], + region_name='us-east-2' + ) def download(self, remote_path, target_local_path): """ - Downloads the file from remote sftp server to local. + Downloads the file from remote s3 server to local. Also, by default extracts the file to the specified target_local_path """ - + self.validate_credentials() try: # Create the target directory if it does not exist path, _ = os.path.split(target_local_path) @@ -97,43 +128,50 @@ def download(self, remote_path, target_local_path): except Exception as err: raise Exception(err) - # Download from remote sftp server to local - self.connection.get(remote_path, target_local_path) + # Download from remote s3 server to local + with smart_open.open( + f"{self.prefix}/{remote_path}", + "rb", + transport_params={"client": self.boto_session.client('s3')}, + ) as remote_file: + with open(target_local_path, "wb") as local_file: + local_file.write(remote_file.read()) except Exception as err: raise Exception(err) def upload(self, source_local_path, remote_path): """ - Uploads the source files from local to the sftp server. + Uploads the source files from local to the s3 server. """ - + self.validate_credentials() try: print( - f"uploading to {self.hostname} as {self.username} [(remote path: {remote_path});(source local path: {source_local_path})]" + f"uploading to {self.bucket_name} [(remote path: {remote_path});(source local path: {source_local_path})]" ) - # Upload file from local to SFTP - self.connection.put(source_local_path, remote_path) + # Upload file from local to S3 + with open(source_local_path, "rb") as local_file: + with smart_open.open( + f"{self.prefix}/{remote_path}", + "wb", + transport_params={"client": self.boto_session.client('s3')}, + ) as remote_file: + remote_file.write(local_file.read()) print("upload completed") except Exception as err: raise Exception(err) - def get_available_cogs(self): - """Lists all available COGs on the SFTP server""" - available_cogs = {} - for top_level_folder in self.connection.listdir(): - if not top_level_folder.endswith(".json"): - s3_file_path = f"{top_level_folder}/metrics.tif" - available_cogs[top_level_folder] = s3_file_path - - return available_cogs + def listdir(self, remote_path): + """lists all the files and directories in the specified path and returns them""" + for obj in self.connection.listdir(remote_path): + yield obj - def update_available_cogs(self): - self.connect() - self.available_cogs = self.get_available_cogs() - self.disconnect() + def listdir_attr(self, remote_path): + """lists all the files and directories (with their attributes) in the specified path and returns them""" + for attr in self.connection.listdir_attr(remote_path): + yield attr def upload_cogs( self, @@ -158,7 +196,7 @@ def upload_cogs( self.upload( source_local_path=local_cog_path, - remote_path=f"{affiliation}/{fire_event_name}/{band_name}.tif", + remote_path=f"public/{affiliation}/{fire_event_name}/{band_name}.tif", ) # Upload the difference between dNBR and RBR @@ -174,7 +212,7 @@ def upload_cogs( pct_change.rio.to_raster(local_cog_path, driver="GTiff") self.upload( source_local_path=local_cog_path, - remote_path=f"{affiliation}/{fire_event_name}/pct_change_dnbr_rbr.tif", + remote_path=f"public/{affiliation}/{fire_event_name}/pct_change_dnbr_rbr.tif", ) def update_manifest(