From 396ac9f4dcdf1617f5a9a788bfa5179ec8948377 Mon Sep 17 00:00:00 2001 From: Clinton Knight Date: Mon, 10 Jun 2024 10:27:27 -0400 Subject: [PATCH] Added driver for Google Cloud NetApp Volumes --- config/config.go | 1 + go.mod | 20 +- go.sum | 49 +- logging/defined_log_layers.go | 1 + persistent_store/passthrough.go | 2 + storage/backend.go | 7 +- storage/factory/factory.go | 2 + storage_drivers/azure/api/azure_discovery.go | 36 +- .../azure/api/azure_discovery_test.go | 72 +- storage_drivers/common.go | 21 + storage_drivers/common_test.go | 38 + storage_drivers/gcp/gcnvapi/gcnv.go | 1278 ++++++++++ storage_drivers/gcp/gcnvapi/gcnv_discovery.go | 419 ++++ storage_drivers/gcp/gcnvapi/gcnv_structs.go | 151 ++ storage_drivers/gcp/gcnvapi/types.go | 42 + storage_drivers/gcp/gcp_common.go | 45 + storage_drivers/gcp/gcp_cvs.go | 39 +- storage_drivers/gcp/gcp_gcnv.go | 2206 +++++++++++++++++ storage_drivers/ontap/ontap_common.go | 23 +- storage_drivers/ontap/ontap_common_test.go | 4 +- storage_drivers/ontap/ontap_nas.go | 2 +- storage_drivers/ontap/ontap_nas_flexgroup.go | 2 +- storage_drivers/ontap/ontap_san.go | 4 +- storage_drivers/ontap/ontap_san_nvme.go | 4 +- storage_drivers/types.go | 121 +- storage_drivers/types_test.go | 5 + utils/utils.go | 8 + utils/utils_test.go | 25 + 28 files changed, 4499 insertions(+), 128 deletions(-) create mode 100644 storage_drivers/gcp/gcnvapi/gcnv.go create mode 100644 storage_drivers/gcp/gcnvapi/gcnv_discovery.go create mode 100644 storage_drivers/gcp/gcnvapi/gcnv_structs.go create mode 100644 storage_drivers/gcp/gcnvapi/types.go create mode 100644 storage_drivers/gcp/gcp_common.go create mode 100644 storage_drivers/gcp/gcp_gcnv.go diff --git a/config/config.go b/config/config.go index 13ca0c228..ccde17c55 100644 --- a/config/config.go +++ b/config/config.go @@ -131,6 +131,7 @@ const ( AzureNASStorageDriverName = "azure-netapp-files" AzureNASBlockStorageDriverName = "azure-netapp-files-subvolume" GCPNFSStorageDriverName = "gcp-cvs" + GCNVNASStorageDriverName = "google-cloud-netapp-volumes" FakeStorageDriverName = "fake" /* REST frontend constants */ diff --git a/go.mod b/go.mod index c234cd8bf..0078422a0 100755 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/netapp/trident go 1.22.0 require ( + cloud.google.com/go/netapp v1.1.0 // https://pkg.go.dev/cloud.google.com/go/netapp github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/netapp/armnetapp/v7 v7.0.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 @@ -54,7 +55,9 @@ require ( golang.org/x/sys v0.20.0 // github.com/golang/sys golang.org/x/text v0.15.0 // github.com/golang/text golang.org/x/time v0.5.0 // github.com/golang/time + google.golang.org/api v0.180.0 google.golang.org/grpc v1.64.0 // github.com/grpc/grpc-go + google.golang.org/protobuf v1.34.1 k8s.io/api v0.28.10 // github.com/kubernetes/api k8s.io/apiextensions-apiserver v0.28.10 // github.com/kubernetes/apiextensions-apiserver k8s.io/apimachinery v0.28.10 // github.com/kubernetes/apimachinery @@ -64,7 +67,11 @@ require ( ) require ( + cloud.google.com/go v0.112.2 // indirect + cloud.google.com/go/auth v0.4.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/longrunning v0.5.7 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 // indirect @@ -99,6 +106,7 @@ require ( github.com/elastic/go-windows v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.23.0 // indirect @@ -111,6 +119,9 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -139,16 +150,19 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/sdk v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/term v0.20.0 // indirect golang.org/x/tools v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 39374fbf6..2a0470ae7 100755 --- a/go.sum +++ b/go.sum @@ -1,6 +1,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= +cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= +cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg= +cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= +cloud.google.com/go/netapp v1.1.0 h1:kapWAE+ptbwuyoK/GkDD8f0/9gf0E+Kd+RR95S9vBVs= +cloud.google.com/go/netapp v1.1.0/go.mod h1:NSv6J5fRa/vvfZr/Y+vjkmLpaLyWJa/F92EI2PvA6EE= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= @@ -89,6 +99,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= @@ -112,12 +123,15 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/ghodss/yaml v1.0.1-0.20220118164431-d8423dcdf344 h1:Arcl6UOIS/kgO2nW3A65HN+7CMjSDP/gofXL4CZt1V4= github.com/ghodss/yaml v1.0.1-0.20220118164431-d8423dcdf344/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -153,6 +167,7 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -177,6 +192,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -185,9 +201,15 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -295,6 +317,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -302,6 +326,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= @@ -315,6 +342,12 @@ github.com/zcalusic/sysinfo v1.1.0 h1:79Hqn8h4poVz6T57/4ezXbT5ZkZbZm7u1YU1C4paMy github.com/zcalusic/sysinfo v1.1.0/go.mod h1:NX+qYnWGtJVPV0yWldff9uppNKU4h40hJIRPf/pGLv4= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= @@ -349,6 +382,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= @@ -402,17 +436,24 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.180.0 h1:M2D87Yo0rGBPWpo1orwfCLehUUL6E7/TYe5gvMQWDh4= +google.golang.org/api v0.180.0/go.mod h1:51AiyoEg1MJPSZ9zvklA8VnRILPXxn1iVen9v25XHAE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 h1:W5Xj/70xIA4x60O/IFyXivR5MGqblAb8R3w26pnD6No= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= @@ -425,8 +466,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/logging/defined_log_layers.go b/logging/defined_log_layers.go index 1478522c6..31096d8b1 100644 --- a/logging/defined_log_layers.go +++ b/logging/defined_log_layers.go @@ -22,6 +22,7 @@ const ( LogLayerANFSubvolumeDriver = LogLayer(AzureNASBlockStorageDriverName) LogLayerSolidfireDriver = LogLayer(SolidfireSANStorageDriverName) LogLayerGCPNASDriver = LogLayer(GCPNFSStorageDriverName) + LogLayerGCNVNASDriver = LogLayer(GCNVNASStorageDriverName) LogLayerOntapNASDriver = LogLayer(OntapNASStorageDriverName) LogLayerOntapNASFlexgroupDriver = LogLayer(OntapNASFlexGroupStorageDriverName) LogLayerOntapNASQtreeDriver = LogLayer(OntapNASQtreeStorageDriverName) diff --git a/persistent_store/passthrough.go b/persistent_store/passthrough.go index 34f698ea2..8abbe9ec5 100644 --- a/persistent_store/passthrough.go +++ b/persistent_store/passthrough.go @@ -168,6 +168,8 @@ func (c *PassthroughClient) unmarshalConfig(ctx context.Context, fileContents [] configType = "azure_config" case config.GCPNFSStorageDriverName: configType = "gcp_config" + case config.GCNVNASStorageDriverName: + configType = "gcnv_config" case config.FakeStorageDriverName: configType = "fake_config" default: diff --git a/storage/backend.go b/storage/backend.go index 4375a4ab3..11f53c1bf 100644 --- a/storage/backend.go +++ b/storage/backend.go @@ -1151,6 +1151,7 @@ type PersistentStorageBackendConfig struct { SolidfireConfig *drivers.SolidfireStorageDriverConfig `json:"solidfire_config,omitempty"` AzureConfig *drivers.AzureNASStorageDriverConfig `json:"azure_config,omitempty"` GCPConfig *drivers.GCPNFSStorageDriverConfig `json:"gcp_config,omitempty"` + GCNVConfig *drivers.GCNVNASStorageDriverConfig `json:"gcnv_config,omitempty"` FakeStorageDriverConfig *drivers.FakeStorageDriverConfig `json:"fake_config,omitempty"` } @@ -1166,6 +1167,8 @@ func (psbc *PersistentStorageBackendConfig) GetDriverConfig() (drivers.DriverCon driverConfig = psbc.AzureConfig case psbc.GCPConfig != nil: driverConfig = psbc.GCPConfig + case psbc.GCNVConfig != nil: + driverConfig = psbc.GCNVConfig case psbc.FakeStorageDriverConfig != nil: driverConfig = psbc.FakeStorageDriverConfig default: @@ -1222,6 +1225,8 @@ func (p *BackendPersistent) MarshalConfig() (string, error) { bytes, err = json.Marshal(p.Config.AzureConfig) case p.Config.GCPConfig != nil: bytes, err = json.Marshal(p.Config.GCPConfig) + case p.Config.GCNVConfig != nil: + bytes, err = json.Marshal(p.Config.GCNVConfig) case p.Config.FakeStorageDriverConfig != nil: bytes, err = json.Marshal(p.Config.FakeStorageDriverConfig) default: @@ -1267,7 +1272,7 @@ func (p *BackendPersistent) ExtractBackendSecrets( // Check if user-provided credentials field is set if backendSecretName, backendSecretType, err := p.GetBackendCredentials(); err != nil { - Log().Errorf("Could not determined if backend credentials field exist; %v", err) + Log().Errorf("Could not determine if backend credentials field exist; %v", err) return nil, nil, usingTridentSecretName, err } else if backendSecretName != "" { if backendSecretName == secretName { diff --git a/storage/factory/factory.go b/storage/factory/factory.go index 0b3fdd29b..42976dd64 100644 --- a/storage/factory/factory.go +++ b/storage/factory/factory.go @@ -145,6 +145,8 @@ func GetStorageDriver(driverName, driverProtocol string) (storage.Driver, error) storageDriver = &azure.NASBlockStorageDriver{} case config.GCPNFSStorageDriverName: storageDriver = &gcp.NFSStorageDriver{} + case config.GCNVNASStorageDriverName: + storageDriver = &gcp.NASStorageDriver{} case config.FakeStorageDriverName: storageDriver = &fake.StorageDriver{} default: diff --git a/storage_drivers/azure/api/azure_discovery.go b/storage_drivers/azure/api/azure_discovery.go index 69d1e5d54..a2cfba955 100644 --- a/storage_drivers/azure/api/azure_discovery.go +++ b/storage_drivers/azure/api/azure_discovery.go @@ -25,12 +25,12 @@ import ( ) const ( - PServiceLevel = "serviceLevel" - PVirtualNetwork = "virtualNetwork" - PSubnet = "subnet" - PResourceGroups = "resourceGroups" - PNetappAccounts = "netappAccounts" - PCapacityPools = "capacityPools" + serviceLevel = "serviceLevel" + virtualNetwork = "virtualNetwork" + subnet = "subnet" + resourceGroups = "resourceGroups" + netappAccounts = "netappAccounts" + capacityPools = "capacityPools" DefaultMaxCacheAge = 10 * time.Minute ) @@ -244,7 +244,7 @@ func (c Client) checkForUnsatisfiedPools(ctx context.Context) (discoveryErrors [ for sPoolName, sPool := range c.sdkClient.AzureResources.StoragePoolMap { // Find all capacity pools that work for this storage pool - cPools := c.CapacityPoolsForStoragePool(ctx, sPool, sPool.InternalAttributes()[PServiceLevel]) + cPools := c.CapacityPoolsForStoragePool(ctx, sPool, sPool.InternalAttributes()[serviceLevel]) if len(cPools) == 0 { @@ -279,7 +279,7 @@ func (c Client) checkForNonexistentResourceGroups(ctx context.Context) (anyMisma } // Find any resource group value in this storage pool that doesn't match known resource groups - for _, configRG := range utils.SplitString(ctx, sPool.InternalAttributes()[PResourceGroups], ",") { + for _, configRG := range utils.SplitString(ctx, sPool.InternalAttributes()[resourceGroups], ",") { if !utils.StringInSlice(configRG, rgNames) { anyMismatches = true @@ -307,7 +307,7 @@ func (c Client) checkForNonexistentNetAppAccounts(ctx context.Context) (anyMisma } // Find any netapp account value in this storage pool that doesn't match known netapp accounts - for _, configNA := range utils.SplitString(ctx, sPool.InternalAttributes()[PNetappAccounts], ",") { + for _, configNA := range utils.SplitString(ctx, sPool.InternalAttributes()[netappAccounts], ",") { if !utils.StringInSlice(configNA, naNames) { anyMismatches = true @@ -335,7 +335,7 @@ func (c Client) checkForNonexistentCapacityPools(ctx context.Context) (anyMismat } // Find any capacity pools value in this storage pool that doesn't match known capacity pools - for _, configCP := range utils.SplitString(ctx, sPool.InternalAttributes()[PCapacityPools], ",") { + for _, configCP := range utils.SplitString(ctx, sPool.InternalAttributes()[capacityPools], ",") { if !utils.StringInSlice(configCP, cpNames) { anyMismatches = true @@ -363,7 +363,7 @@ func (c Client) checkForNonexistentVirtualNetworks(ctx context.Context) (anyMism } // Find any virtual network value in this storage pool that doesn't match known virtual networks - configVN := sPool.InternalAttributes()[PVirtualNetwork] + configVN := sPool.InternalAttributes()[virtualNetwork] if configVN != "" && !utils.StringInSlice(configVN, vnNames) { anyMismatches = true @@ -390,7 +390,7 @@ func (c Client) checkForNonexistentSubnets(ctx context.Context) (anyMismatches b } // Find any subnet value in this storage pool that doesn't match known subnets - configSN := sPool.InternalAttributes()[PSubnet] + configSN := sPool.InternalAttributes()[subnet] if configSN != "" && !utils.StringInSlice(configSN, snNames) { anyMismatches = true @@ -832,7 +832,7 @@ func (c Client) CapacityPoolsForStoragePool( } // If resource groups were specified, filter out non-matching capacity pools - rgList := utils.SplitString(ctx, sPool.InternalAttributes()[PResourceGroups], ",") + rgList := utils.SplitString(ctx, sPool.InternalAttributes()[resourceGroups], ",") if len(rgList) > 0 { for cPoolFullName, cPool := range c.sdkClient.CapacityPoolMap { if !utils.SliceContainsString(rgList, cPool.ResourceGroup) { @@ -844,7 +844,7 @@ func (c Client) CapacityPoolsForStoragePool( } // If netapp accounts were specified, filter out non-matching capacity pools - naList := utils.SplitString(ctx, sPool.InternalAttributes()[PNetappAccounts], ",") + naList := utils.SplitString(ctx, sPool.InternalAttributes()[netappAccounts], ",") if len(naList) > 0 { for cPoolFullName, cPool := range c.sdkClient.CapacityPoolMap { naName := cPool.NetAppAccount @@ -858,7 +858,7 @@ func (c Client) CapacityPoolsForStoragePool( } // If capacity pools were specified, filter out non-matching capacity pools - cpList := utils.SplitString(ctx, sPool.InternalAttributes()[PCapacityPools], ",") + cpList := utils.SplitString(ctx, sPool.InternalAttributes()[capacityPools], ",") if len(cpList) > 0 { for cPoolFullName, cPool := range c.sdkClient.CapacityPoolMap { if !utils.SliceContainsString(cpList, cPool.Name) && !utils.SliceContainsString(cpList, cPoolFullName) { @@ -940,7 +940,7 @@ func (c Client) SubnetsForStoragePool(ctx context.Context, sPool storage.Pool) [ } // If resource groups were specified, filter out non-matching subnets - rgList := utils.SplitString(ctx, sPool.InternalAttributes()[PResourceGroups], ",") + rgList := utils.SplitString(ctx, sPool.InternalAttributes()[resourceGroups], ",") if len(rgList) > 0 { for subnetFullName, subnet := range c.sdkClient.SubnetMap { if !utils.SliceContainsString(rgList, subnet.ResourceGroup) { @@ -952,7 +952,7 @@ func (c Client) SubnetsForStoragePool(ctx context.Context, sPool storage.Pool) [ } // If virtual network was specified, filter out non-matching subnets - vn := sPool.InternalAttributes()[PVirtualNetwork] + vn := sPool.InternalAttributes()[virtualNetwork] if vn != "" { for subnetFullName, subnet := range c.sdkClient.SubnetMap { vnName := subnet.VirtualNetwork @@ -966,7 +966,7 @@ func (c Client) SubnetsForStoragePool(ctx context.Context, sPool storage.Pool) [ } // If subnet was specified, filter out non-matching capacity subnets - sn := sPool.InternalAttributes()[PSubnet] + sn := sPool.InternalAttributes()[subnet] if sn != "" { for subnetFullName, subnet := range c.sdkClient.SubnetMap { if sn != subnet.Name && sn != subnetFullName { diff --git a/storage_drivers/azure/api/azure_discovery_test.go b/storage_drivers/azure/api/azure_discovery_test.go index a52bff652..a8cd2f6f1 100644 --- a/storage_drivers/azure/api/azure_discovery_test.go +++ b/storage_drivers/azure/api/azure_discovery_test.go @@ -299,9 +299,9 @@ func TestCheckForUnsatisfiedPools_NoPools(t *testing.T) { func TestCheckForUnsatisfiedPools_EmptyPools(t *testing.T) { sPool1 := storage.NewStoragePool(nil, "pool1") - sPool1.InternalAttributes()[PCapacityPools] = "" + sPool1.InternalAttributes()[capacityPools] = "" sPool2 := storage.NewStoragePool(nil, "pool2") - sPool2.InternalAttributes()[PCapacityPools] = "" + sPool2.InternalAttributes()[capacityPools] = "" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool1": sPool1, "pool2": sPool2} @@ -313,9 +313,9 @@ func TestCheckForUnsatisfiedPools_EmptyPools(t *testing.T) { func TestCheckForUnsatisfiedPools_ValidPools(t *testing.T) { sPool1 := storage.NewStoragePool(nil, "pool1") - sPool1.InternalAttributes()[PCapacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2,CP3" + sPool1.InternalAttributes()[capacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2,CP3" sPool2 := storage.NewStoragePool(nil, "pool2") - sPool2.InternalAttributes()[PCapacityPools] = "CP1,CP2" + sPool2.InternalAttributes()[capacityPools] = "CP1,CP2" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool1": sPool1, "pool2": sPool2} @@ -327,9 +327,9 @@ func TestCheckForUnsatisfiedPools_ValidPools(t *testing.T) { func TestCheckForUnsatisfiedPools_OneInvalidPool(t *testing.T) { sPool1 := storage.NewStoragePool(nil, "pool1") - sPool1.InternalAttributes()[PCapacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2,CP4" + sPool1.InternalAttributes()[capacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2,CP4" sPool2 := storage.NewStoragePool(nil, "pool2") - sPool2.InternalAttributes()[PCapacityPools] = "CP4" + sPool2.InternalAttributes()[capacityPools] = "CP4" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool1": sPool1, "pool2": sPool2} @@ -341,12 +341,12 @@ func TestCheckForUnsatisfiedPools_OneInvalidPool(t *testing.T) { func TestCheckForUnsatisfiedPools_TwoInvalidPools(t *testing.T) { sPool1 := storage.NewStoragePool(nil, "pool1") - sPool1.InternalAttributes()[PServiceLevel] = ServiceLevelUltra - sPool1.InternalAttributes()[PCapacityPools] = "CP2" + sPool1.InternalAttributes()[serviceLevel] = ServiceLevelUltra + sPool1.InternalAttributes()[capacityPools] = "CP2" sPool2 := storage.NewStoragePool(nil, "pool2") - sPool2.InternalAttributes()[PResourceGroups] = "RG1" - sPool2.InternalAttributes()[PNetappAccounts] = "NA1" - sPool2.InternalAttributes()[PCapacityPools] = "CP4" + sPool2.InternalAttributes()[resourceGroups] = "RG1" + sPool2.InternalAttributes()[netappAccounts] = "NA1" + sPool2.InternalAttributes()[capacityPools] = "CP4" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool1": sPool1, "pool2": sPool2} @@ -367,7 +367,7 @@ func TestCheckForNonexistentResourceGroups_NoPools(t *testing.T) { func TestCheckForNonexistentResourceGroups_Empty(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PResourceGroups] = "" + sPool.InternalAttributes()[resourceGroups] = "" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -379,7 +379,7 @@ func TestCheckForNonexistentResourceGroups_Empty(t *testing.T) { func TestCheckForNonexistentResourceGroups_OK(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PResourceGroups] = "RG1,RG2" + sPool.InternalAttributes()[resourceGroups] = "RG1,RG2" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -391,7 +391,7 @@ func TestCheckForNonexistentResourceGroups_OK(t *testing.T) { func TestCheckForNonexistentResourceGroups_Missing(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PResourceGroups] = "RG1,RG2,RG3" + sPool.InternalAttributes()[resourceGroups] = "RG1,RG2,RG3" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -412,7 +412,7 @@ func TestCheckForNonexistentNetAppAccounts_NoPools(t *testing.T) { func TestCheckForNonexistentNetAppAccounts_Empty(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PNetappAccounts] = "" + sPool.InternalAttributes()[netappAccounts] = "" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -424,7 +424,7 @@ func TestCheckForNonexistentNetAppAccounts_Empty(t *testing.T) { func TestCheckForNonexistentNetAppAccounts_OK(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PNetappAccounts] = "RG1/NA1,RG2/NA1,NA2" + sPool.InternalAttributes()[netappAccounts] = "RG1/NA1,RG2/NA1,NA2" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -436,7 +436,7 @@ func TestCheckForNonexistentNetAppAccounts_OK(t *testing.T) { func TestCheckForNonexistentNetAppAccounts_Missing(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PNetappAccounts] = "RG1/NA1,RG2/NA1,NA3" + sPool.InternalAttributes()[netappAccounts] = "RG1/NA1,RG2/NA1,NA3" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -457,7 +457,7 @@ func TestCheckForNonexistentCapacityPools_NoPools(t *testing.T) { func TestCheckForNonexistentCapacityPools_Empty(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PCapacityPools] = "" + sPool.InternalAttributes()[capacityPools] = "" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -469,7 +469,7 @@ func TestCheckForNonexistentCapacityPools_Empty(t *testing.T) { func TestCheckForNonexistentCapacityPools_OK(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PCapacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2,CP3" + sPool.InternalAttributes()[capacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2,CP3" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -481,7 +481,7 @@ func TestCheckForNonexistentCapacityPools_OK(t *testing.T) { func TestCheckForNonexistentCapacityPools_Missing(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PCapacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2,CP4" + sPool.InternalAttributes()[capacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2,CP4" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -502,7 +502,7 @@ func TestCheckForNonexistentVirtualNetworks_NoPools(t *testing.T) { func TestCheckForNonexistentVirtualNetworks_Empty(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PVirtualNetwork] = "" + sPool.InternalAttributes()[virtualNetwork] = "" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -514,7 +514,7 @@ func TestCheckForNonexistentVirtualNetworks_Empty(t *testing.T) { func TestCheckForNonexistentVirtualNetworks_OK(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PVirtualNetwork] = "RG1/VN1" + sPool.InternalAttributes()[virtualNetwork] = "RG1/VN1" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -526,7 +526,7 @@ func TestCheckForNonexistentVirtualNetworks_OK(t *testing.T) { func TestCheckForNonexistentVirtualNetworks_Missing(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PVirtualNetwork] = "VN4" + sPool.InternalAttributes()[virtualNetwork] = "VN4" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -547,7 +547,7 @@ func TestCheckForNonexistentSubnets_NoPools(t *testing.T) { func TestCheckForNonexistentSubnets_Empty(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PSubnet] = "" + sPool.InternalAttributes()[subnet] = "" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -559,7 +559,7 @@ func TestCheckForNonexistentSubnets_Empty(t *testing.T) { func TestCheckForNonexistentSubnets_OK(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PSubnet] = "RG1/VN2/SN3" + sPool.InternalAttributes()[subnet] = "RG1/VN2/SN3" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -571,7 +571,7 @@ func TestCheckForNonexistentSubnets_OK(t *testing.T) { func TestCheckForNonexistentSubnets_Missing(t *testing.T) { sPool := storage.NewStoragePool(nil, "pool") - sPool.InternalAttributes()[PSubnet] = "RG1/VN2/SN4" + sPool.InternalAttributes()[subnet] = "RG1/VN2/SN4" sdk := getFakeSDK() sdk.sdkClient.StoragePoolMap = map[string]storage.Pool{"pool": sPool} @@ -654,11 +654,11 @@ func TestCapacityPoolsForStoragePools(t *testing.T) { RG2_NA2_CP3 := sdk.capacityPool("RG2/NA2/CP3") sPool1 := storage.NewStoragePool(nil, "testPool1") - sPool1.InternalAttributes()[PCapacityPools] = "CP3" + sPool1.InternalAttributes()[capacityPools] = "CP3" sdk.sdkClient.StoragePoolMap[sPool1.Name()] = sPool1 sPool2 := storage.NewStoragePool(nil, "testPool2") - sPool2.InternalAttributes()[PCapacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2" + sPool2.InternalAttributes()[capacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2" sdk.sdkClient.StoragePoolMap[sPool2.Name()] = sPool2 expected := []*CapacityPool{RG1_NA1_CP1, RG1_NA1_CP2, RG2_NA2_CP3} @@ -810,9 +810,9 @@ func TestCapacityPoolsForStoragePool(t *testing.T) { for _, test := range tests { - sPool.InternalAttributes()[PResourceGroups] = test.resourceGroups - sPool.InternalAttributes()[PNetappAccounts] = test.netappAccounts - sPool.InternalAttributes()[PCapacityPools] = test.capacityPools + sPool.InternalAttributes()[resourceGroups] = test.resourceGroups + sPool.InternalAttributes()[netappAccounts] = test.netappAccounts + sPool.InternalAttributes()[capacityPools] = test.capacityPools cPools := sdk.CapacityPoolsForStoragePool(context.TODO(), sPool, test.serviceLevel) @@ -834,13 +834,13 @@ func TestEnsureVolumeInValidCapacityPool(t *testing.T) { assert.Nil(t, sdk.EnsureVolumeInValidCapacityPool(context.TODO(), volume), "result not nil") sPool1 := storage.NewStoragePool(nil, "testPool1") - sPool1.InternalAttributes()[PCapacityPools] = "CP3" + sPool1.InternalAttributes()[capacityPools] = "CP3" sdk.sdkClient.StoragePoolMap[sPool1.Name()] = sPool1 assert.NotNil(t, sdk.EnsureVolumeInValidCapacityPool(context.TODO(), volume), "result nil") sPool2 := storage.NewStoragePool(nil, "testPool2") - sPool2.InternalAttributes()[PCapacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2" + sPool2.InternalAttributes()[capacityPools] = "RG1/NA1/CP1,RG1/NA1/CP2" sdk.sdkClient.StoragePoolMap[sPool2.Name()] = sPool2 assert.Nil(t, sdk.EnsureVolumeInValidCapacityPool(context.TODO(), volume), "result not nil") @@ -957,9 +957,9 @@ func TestSubnetsForStoragePoolAndFilteredSubnetMap(t *testing.T) { for _, test := range tests { - sPool.InternalAttributes()[PResourceGroups] = test.resourceGroups - sPool.InternalAttributes()[PVirtualNetwork] = test.virtualNetwork - sPool.InternalAttributes()[PSubnet] = test.subnet + sPool.InternalAttributes()[resourceGroups] = test.resourceGroups + sPool.InternalAttributes()[virtualNetwork] = test.virtualNetwork + sPool.InternalAttributes()[subnet] = test.subnet subnets := sdk.SubnetsForStoragePool(context.TODO(), sPool) subnet := sdk.RandomSubnetForStoragePool(context.TODO(), sPool) diff --git a/storage_drivers/common.go b/storage_drivers/common.go index 2a67d435a..c6d09ee9a 100644 --- a/storage_drivers/common.go +++ b/storage_drivers/common.go @@ -214,6 +214,27 @@ func CheckMinVolumeSize(requestedSizeBytes, minVolumeSizeBytes uint64) error { return nil } +// CalculateVolumeSizeBytes calculates the size of a volume taking into account the snapshot reserve +func CalculateVolumeSizeBytes( + ctx context.Context, volume string, requestedSizeBytes uint64, snapshotReserve int, +) uint64 { + snapReserveDivisor := 1.0 - (float64(snapshotReserve) / 100.0) + + sizeWithSnapReserve := float64(requestedSizeBytes) / snapReserveDivisor + + volumeSizeBytes := uint64(sizeWithSnapReserve) + + Logc(ctx).WithFields(LogFields{ + "volume": volume, + "snapReserveDivisor": snapReserveDivisor, + "requestedSize": requestedSizeBytes, + "sizeWithSnapReserve": sizeWithSnapReserve, + "volumeSizeBytes": volumeSizeBytes, + }).Debug("Calculated optimal size for volume with snapshot reserve.") + + return volumeSizeBytes +} + // Clone will create a copy of the source object and store it into the destination object (which must be a pointer) func Clone(ctx context.Context, source, destination interface{}) { if reflect.TypeOf(destination).Kind() != reflect.Ptr { diff --git a/storage_drivers/common_test.go b/storage_drivers/common_test.go index d15f88164..b3af09ba8 100644 --- a/storage_drivers/common_test.go +++ b/storage_drivers/common_test.go @@ -397,6 +397,44 @@ func TestCheckMinVolumeSize(t *testing.T) { } } +func TestCalculateVolumeSizeBytes(t *testing.T) { + ctx := context.TODO() + + tests := []struct { + requestedSizeBytes uint64 + snapshotReserve int + expectedSizeBytes uint64 + }{ + { + requestedSizeBytes: 1000000000, + snapshotReserve: 0, + expectedSizeBytes: 1000000000, + }, + { + requestedSizeBytes: 1000000000, + snapshotReserve: 10, + expectedSizeBytes: 1111111111, + }, + { + requestedSizeBytes: 1000000000, + snapshotReserve: 50, + expectedSizeBytes: 2000000000, + }, + { + requestedSizeBytes: 1000000000, + snapshotReserve: 90, + expectedSizeBytes: 10000000000, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("CalculateVolumeSizeBytes: %d", i), func(t *testing.T) { + actual := CalculateVolumeSizeBytes(ctx, "", test.requestedSizeBytes, test.snapshotReserve) + assert.Equal(t, test.expectedSizeBytes, actual, "incorrect volume size") + }) + } +} + func TestClone(t *testing.T) { type test struct { source interface{} diff --git a/storage_drivers/gcp/gcnvapi/gcnv.go b/storage_drivers/gcp/gcnvapi/gcnv.go new file mode 100644 index 000000000..e94cb5b9d --- /dev/null +++ b/storage_drivers/gcp/gcnvapi/gcnv.go @@ -0,0 +1,1278 @@ +// Copyright 2024 NetApp, Inc. All Rights Reserved. + +// Package gcnvapi provides a high-level interface to the Google Cloud NetApp Volumes SDK +package gcnvapi + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "time" + + netapp "cloud.google.com/go/netapp/apiv1" + "cloud.google.com/go/netapp/apiv1/netapppb" + "github.com/cenkalti/backoff/v4" + "golang.org/x/oauth2/google" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/fieldmaskpb" + + . "github.com/netapp/trident/logging" + "github.com/netapp/trident/storage" + drivers "github.com/netapp/trident/storage_drivers" + "github.com/netapp/trident/utils" + "github.com/netapp/trident/utils/errors" +) + +const ( + VolumeCreateTimeout = 10 * time.Second + SnapshotTimeout = 240 * time.Second // Snapshotter sidecar has a timeout of 5 minutes. Stay under that! + DefaultTimeout = 120 * time.Second + MaxLabelLength = 63 + MaxLabelCount = 64 + DefaultSDKTimeout = 30 * time.Second + PaginationLimit = 100 +) + +var ( + capacityPoolNameRegex = regexp.MustCompile(`^projects/(?P[^/]+)/locations/(?P[^/]+)/storagePools/(?P[^/]+)$`) + volumeNameRegex = regexp.MustCompile(`^projects/(?P[^/]+)/locations/(?P[^/]+)/volumes/(?P[^/]+)$`) + snapshotNameRegex = regexp.MustCompile(`^projects/(?P[^/]+)/locations/(?P[^/]+)/volumes/(?P[^/]+)/snapshots/(?P[^/]+)$`) + networkNameRegex = regexp.MustCompile(`^projects/(?P[^/]+)/global/networks/(?P[^/]+)$`) +) + +// ClientConfig holds configuration data for the API driver object. +type ClientConfig struct { + StorageDriverName string + + // GCP project number + ProjectNumber string + + // GCP CVS API authentication parameters + APIKey *drivers.GCPPrivateKey + + // GCP region + Location string + + // URL for accessing the API via an HTTP/HTTPS proxy + ProxyURL string + + // Options + DebugTraceFlags map[string]bool + SDKTimeout time.Duration // Timeout applied to all calls to the GCNV SDK + MaxCacheAge time.Duration // The oldest data we should expect in the cached resources +} + +type GCNVClient struct { + gcnv *netapp.Client + GCNVResources +} + +// Client encapsulates connection details. +type Client struct { + config *ClientConfig + sdkClient *GCNVClient +} + +func createGCNVClient(ctx context.Context, config *ClientConfig) (*netapp.Client, error) { + if config.APIKey != nil { + keyBytes, jsonErr := json.Marshal(config.APIKey) + if jsonErr != nil { + return nil, jsonErr + } + + creds, credsErr := google.CredentialsFromJSON(ctx, keyBytes, netapp.DefaultAuthScopes()...) + if credsErr != nil { + return nil, credsErr + } + + return netapp.NewClient(ctx, option.WithCredentials(creds)) + } else { + return nil, errors.New("apiKey in config must be specified") + } +} + +// NewDriver is a factory method for creating a new SDK interface. +func NewDriver(ctx context.Context, config *ClientConfig) (GCNV, error) { + gcnvClient, err := createGCNVClient(ctx, config) + if err != nil { + return nil, err + } + + return Client{ + config: config, + sdkClient: &GCNVClient{ + gcnv: gcnvClient, + }, + }, nil +} + +// Init runs startup logic after allocating the driver resources. +func (c Client) Init(ctx context.Context, pools map[string]storage.Pool) error { + // Map vpools to backend + c.registerStoragePools(pools) + + // Find out what we have to work with in GCNV + return c.RefreshGCNVResources(ctx) +} + +// RegisterStoragePool makes a note of pools defined by the driver for later mapping. +func (c Client) registerStoragePools(sPools map[string]storage.Pool) { + c.sdkClient.GCNVResources.StoragePoolMap = make(map[string]storage.Pool) + + for _, sPool := range sPools { + c.sdkClient.GCNVResources.StoragePoolMap[sPool.Name()] = sPool + } +} + +// /////////////////////////////////////////////////////////////////////////////// +// Functions to create & parse GCNV resource names +// /////////////////////////////////////////////////////////////////////////////// + +// createBaseID creates the base GCNV-style ID for a project & location. +func (c Client) createBaseID() string { + return fmt.Sprintf("projects/%s/locations/%s", c.config.ProjectNumber, c.config.Location) +} + +// createCapacityPoolID creates the GCNV-style ID for a capacity pool. +func (c Client) createCapacityPoolID(capacityPool string) string { + return fmt.Sprintf("projects/%s/locations/%s/storagePools/%s", + c.config.ProjectNumber, c.config.Location, capacityPool) +} + +// parseCapacityPoolID parses the GCNV-style full name for a capacity pool. +func parseCapacityPoolID(fullName string) (projectNumber, location, capacityPool string, err error) { + match := capacityPoolNameRegex.FindStringSubmatch(fullName) + + if match == nil { + err = fmt.Errorf("capacity pool name %s is invalid", fullName) + return + } + + paramsMap := make(map[string]string) + for i, name := range capacityPoolNameRegex.SubexpNames() { + if i > 0 && i <= len(match) { + paramsMap[name] = match[i] + } + } + + projectNumber = paramsMap["projectNumber"] + location = paramsMap["location"] + capacityPool = paramsMap["capacityPool"] + + return +} + +// createVolumeID creates the GCNV-style ID for a volume. +func (c Client) createVolumeID(volume string) string { + return fmt.Sprintf("projects/%s/locations/%s/volumes/%s", c.config.ProjectNumber, c.config.Location, volume) +} + +// parseVolumeID parses the GCNV-style full name for a volume. +func parseVolumeID(fullName string) (projectNumber, location, volume string, err error) { + match := volumeNameRegex.FindStringSubmatch(fullName) + + if match == nil { + err = fmt.Errorf("volume name %s is invalid", fullName) + return + } + + paramsMap := make(map[string]string) + for i, name := range volumeNameRegex.SubexpNames() { + if i > 0 && i <= len(match) { + paramsMap[name] = match[i] + } + } + + projectNumber = paramsMap["projectNumber"] + location = paramsMap["location"] + volume = paramsMap["volume"] + + return +} + +// createSnapshotID creates the GCNV-style ID for a snapshot. +func (c Client) createSnapshotID(volume, snapshot string) string { + return fmt.Sprintf("projects/%s/locations/%s/volumes/%s/snapshots/%s", + c.config.ProjectNumber, c.config.Location, volume, snapshot) +} + +// parseSnapshotID parses the GCNV-style full name for a snapshot. +func parseSnapshotID(fullName string) (projectNumber, location, volume, snapshot string, err error) { + match := snapshotNameRegex.FindStringSubmatch(fullName) + + if match == nil { + err = fmt.Errorf("snapshot name %s is invalid", fullName) + return + } + + paramsMap := make(map[string]string) + for i, name := range snapshotNameRegex.SubexpNames() { + if i > 0 && i <= len(match) { + paramsMap[name] = match[i] + } + } + + projectNumber = paramsMap["projectNumber"] + location = paramsMap["location"] + volume = paramsMap["volume"] + snapshot = paramsMap["snapshot"] + + return +} + +// createNetworkID creates the GCNV-style ID for a network. +func (c Client) createNetworkID(network string) string { + return fmt.Sprintf("projects/%s/global/networks/%s", c.config.ProjectNumber, network) +} + +// parseNetworkID parses the GCNV-style full name for a network. +func parseNetworkID(fullName string) (projectNumber, network string, err error) { + match := networkNameRegex.FindStringSubmatch(fullName) + + if match == nil { + err = fmt.Errorf("network name %s is invalid", fullName) + return + } + + paramsMap := make(map[string]string) + for i, name := range networkNameRegex.SubexpNames() { + if i > 0 && i <= len(match) { + paramsMap[name] = match[i] + } + } + + projectNumber = paramsMap["projectNumber"] + network = paramsMap["network"] + + return +} + +// /////////////////////////////////////////////////////////////////////////////// +// Functions to convert between GCNV SDK & internal volume structs +// /////////////////////////////////////////////////////////////////////////////// + +// exportPolicyExport turns an internal ExportPolicy into something consumable by the SDK. +func exportPolicyExport(exportPolicy *ExportPolicy) *netapppb.ExportPolicy { + gcnvRules := make([]*netapppb.SimpleExportPolicyRule, 0) + + for _, rule := range exportPolicy.Rules { + + allowedClients := rule.AllowedClients + accessType := GCNVAccessTypeFromVolumeAccessType(rule.AccessType) + nfsv3 := rule.Nfsv3 + nfsv4 := rule.Nfsv4 + + gcnvRule := netapppb.SimpleExportPolicyRule{ + AllowedClients: &allowedClients, + AccessType: &accessType, + Nfsv3: &nfsv3, + Nfsv4: &nfsv4, + } + + gcnvRules = append(gcnvRules, &gcnvRule) + } + + return &netapppb.ExportPolicy{ + Rules: gcnvRules, + } +} + +// exportPolicyImport turns an SDK ExportPolicy into an internal one. +func (c Client) exportPolicyImport(gcnvExportPolicy *netapppb.ExportPolicy) *ExportPolicy { + rules := make([]ExportRule, 0) + + if gcnvExportPolicy == nil || len(gcnvExportPolicy.Rules) == 0 { + return &ExportPolicy{Rules: rules} + } + + for index, gcnvRule := range gcnvExportPolicy.Rules { + rules = append(rules, ExportRule{ + AllowedClients: DerefString(gcnvRule.AllowedClients), + Nfsv3: DerefBool(gcnvRule.Nfsv3), + Nfsv4: DerefBool(gcnvRule.Nfsv4), + RuleIndex: int32(index), + AccessType: VolumeAccessTypeFromGCNVAccessType(DerefAccessType(gcnvRule.AccessType)), + }) + } + + return &ExportPolicy{Rules: rules} +} + +// newVolumeFromGCNVVolume creates a new internal Volume struct from a GCNV volume. +func (c Client) newVolumeFromGCNVVolume(ctx context.Context, volume *netapppb.Volume) (*Volume, error) { + if volume == nil { + return nil, errors.New("nil volume") + } + + _, location, volumeName, err := parseVolumeID(volume.Name) + if err != nil { + return nil, err + } + + _, network, err := parseNetworkID(volume.Network) + if err != nil { + return nil, err + } + + var protocolTypes []string + for _, gcnvProtocolType := range volume.Protocols { + protocolTypes = append(protocolTypes, VolumeProtocolFromGCNVProtocol(gcnvProtocolType)) + } + + return &Volume{ + Name: volumeName, + CreationToken: volume.ShareName, + FullName: volume.Name, + Location: location, + State: VolumeStateFromGCNVState(volume.State), + CapacityPool: volume.StoragePool, + NetworkName: network, + NetworkFullName: volume.Network, + ServiceLevel: ServiceLevelFromCapacityPool(c.capacityPool(volume.StoragePool)), + SizeBytes: volume.CapacityGib * int64(1073741824), + ExportPolicy: c.exportPolicyImport(volume.ExportPolicy), + ProtocolTypes: protocolTypes, + MountTargets: c.getMountTargetsFromVolume(ctx, volume), + UnixPermissions: volume.UnixPermissions, + Labels: volume.Labels, + SnapshotReserve: int64(volume.SnapReserve), + SnapshotDirectory: volume.SnapshotDirectory, + SecurityStyle: VolumeSecurityStyleFromGCNVSecurityStyle(volume.SecurityStyle), + }, nil +} + +// getMountTargetsFromVolume extracts the mount targets from a GCNV volume. +func (c Client) getMountTargetsFromVolume(ctx context.Context, volume *netapppb.Volume) []MountTarget { + mounts := make([]MountTarget, 0) + + if len(volume.MountOptions) == 0 { + Logc(ctx).Tracef("Volume %s has no mount targets.", volume.Name) + return mounts + } + + for _, gcnvMountTarget := range volume.MountOptions { + mounts = append(mounts, MountTarget{ + Export: gcnvMountTarget.Export, + ExportPath: gcnvMountTarget.ExportFull, + Protocol: VolumeProtocolFromGCNVProtocol(gcnvMountTarget.Protocol), + }) + } + + return mounts +} + +// /////////////////////////////////////////////////////////////////////////////// +// Functions to retrieve and manage volumes +// /////////////////////////////////////////////////////////////////////////////// + +// Volumes queries GCNV SDK for all volumes in the current location. +func (c Client) Volumes(ctx context.Context) (*[]*Volume, error) { + logFields := LogFields{ + "API": "GCNV.ListVolumes", + } + + var volumes []*Volume + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.ListVolumesRequest{ + Parent: c.createBaseID(), + PageSize: PaginationLimit, + } + it := c.sdkClient.gcnv.ListVolumes(sdkCtx, req) + for { + gcnvVolume, err := it.Next() + if errors.Is(err, iterator.Done) { + break + } + if err != nil { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).WithError(err).Error("Could not read volumes.") + return nil, err + } + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).WithError(err).Debug("Volume: %v.", gcnvVolume) + + volume, err := c.newVolumeFromGCNVVolume(ctx, gcnvVolume) + if err != nil { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithError(err).Warning("Skipping volume.") + continue + } + volumes = append(volumes, volume) + } + + return &volumes, nil +} + +// Volume uses a volume config record to fetch a volume by the most efficient means. +func (c Client) Volume(ctx context.Context, volConfig *storage.VolumeConfig) (*Volume, error) { + return c.VolumeByName(ctx, volConfig.InternalName) +} + +func (c Client) VolumeByName(ctx context.Context, name string) (*Volume, error) { + logFields := LogFields{ + "API": "GCNV.GetVolume", + "volume": name, + } + + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).Trace("Fetching volume by name.") + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.GetVolumeRequest{ + Name: c.createVolumeID(name), + } + gcnvVolume, err := c.sdkClient.gcnv.GetVolume(sdkCtx, req) + if err != nil { + if IsGCNVNotFoundError(err) { + Logc(ctx).WithFields(logFields).Debug("Volume not found.") + return nil, errors.WrapWithNotFoundError(err, "volume '%s' not found", name) + } + + Logc(ctx).WithFields(logFields).WithError(err).Error("Error fetching volume.") + return nil, err + } + + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).Debug("Found volume by name.") + + return c.newVolumeFromGCNVVolume(ctx, gcnvVolume) +} + +// VolumeExists uses a volume config record to look for a Filesystem by the most efficient means. +func (c Client) VolumeExists(ctx context.Context, volConfig *storage.VolumeConfig) (bool, *Volume, error) { + volume, err := c.Volume(ctx, volConfig) + if err != nil { + if IsGCNVNotFoundError(err) { + return false, nil, nil + } else { + return false, nil, err + } + } + return true, volume, nil +} + +// WaitForVolumeState watches for a desired volume state and returns when that state is achieved. +func (c Client) WaitForVolumeState( + ctx context.Context, volume *Volume, desiredState string, abortStates []string, + maxElapsedTime time.Duration, +) (string, error) { + volumeState := "" + + checkVolumeState := func() error { + v, err := c.VolumeByName(ctx, volume.Name) + if err != nil { + + // There is no 'Deleted' state in GCNV -- the volume just vanishes. If we failed to query + // the volume info, and we're trying to transition to StateDeleted, and we get back a 404, + // then return success. Otherwise, log the error as usual. + if desiredState == VolumeStateDeleted && errors.IsNotFoundError(err) { + Logc(ctx).Debugf("Implied deletion for volume %s.", volume.Name) + volumeState = VolumeStateDeleted + return nil + } + if errors.Is(err, context.Canceled) { + return backoff.Permanent(err) + } + volumeState = "" + return fmt.Errorf("could not get volume status; %v", err) + } + + volumeState = v.State + + if v.State == desiredState { + return nil + } + + errMsg := fmt.Sprintf("volume state is %s, not %s", v.State, desiredState) + if desiredState == VolumeStateDeleted && v.State == VolumeStateDeleting { + err = errors.VolumeDeletingError(errMsg) + } else { + err = errors.New(errMsg) + } + + // Return a permanent error to stop retrying if we reached one of the abort states + if utils.SliceContainsString(abortStates, v.State) { + return backoff.Permanent(TerminalState(err)) + } + + return err + } + + stateNotify := func(err error, duration time.Duration) { + Logc(ctx).WithFields(LogFields{ + "increment": duration.Truncate(10 * time.Millisecond), + "message": err.Error(), + }).Debugf("Waiting for volume state.") + } + + stateBackoff := backoff.NewExponentialBackOff() + stateBackoff.MaxElapsedTime = maxElapsedTime + stateBackoff.MaxInterval = 5 * time.Second + stateBackoff.RandomizationFactor = 0.1 + stateBackoff.InitialInterval = 3 * time.Second + stateBackoff.Multiplier = 1.414 + + Logc(ctx).WithField("desiredState", desiredState).Info("Waiting for volume state.") + + if err := backoff.RetryNotify(checkVolumeState, stateBackoff, stateNotify); err != nil { + if IsTerminalStateError(err) { + Logc(ctx).WithError(err).Error("Volume reached terminal state.") + } else { + Logc(ctx).Warningf("Volume state was not %s after %3.2f seconds.", + desiredState, stateBackoff.MaxElapsedTime.Seconds()) + } + return volumeState, err + } + + Logc(ctx).WithField("desiredState", desiredState).Debug("Desired volume state reached.") + + return volumeState, nil +} + +// CreateVolume creates a new volume. +func (c Client) CreateVolume(ctx context.Context, request *VolumeCreateRequest) (*Volume, error) { + var protocols []netapppb.Protocols + for _, protocolType := range request.ProtocolTypes { + protocols = append(protocols, GCNVProtocolFromVolumeProtocol(protocolType)) + } + + cPool := c.capacityPool(request.CapacityPool) + if cPool == nil { + return nil, fmt.Errorf("pool %s not found", request.CapacityPool) + } + + newVol := &netapppb.Volume{ + Name: request.Name, + ShareName: request.CreationToken, + StoragePool: request.CapacityPool, + CapacityGib: request.SizeBytes / 1073741824, + ExportPolicy: exportPolicyExport(request.ExportPolicy), + Protocols: protocols, + UnixPermissions: request.UnixPermissions, + Labels: request.Labels, + SnapshotDirectory: request.SnapshotDirectory, + SecurityStyle: GCNVSecurityStyleFromVolumeSecurityStyle(request.SecurityStyle), + } + + if request.SnapshotReserve != nil { + newVol.SnapReserve = float64(*request.SnapshotReserve) + } + + // Only set the snapshot ID if we are cloning + if request.SnapshotID != "" { + newVol.RestoreParameters = &netapppb.RestoreParameters{ + Source: &netapppb.RestoreParameters_SourceSnapshot{ + SourceSnapshot: request.SnapshotID, + }, + } + } + + Logc(ctx).WithFields(LogFields{ + "name": request.Name, + "creationToken": request.CreationToken, + "capacityPool": request.CapacityPool, + }).Debug("Issuing create request.") + + logFields := LogFields{ + "API": "GCNV.CreateVolume", + "volume": request.Name, + } + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.CreateVolumeRequest{ + Parent: c.createBaseID(), + VolumeId: request.Name, + Volume: newVol, + } + poller, err := c.sdkClient.gcnv.CreateVolume(sdkCtx, req) + if err != nil { + Logc(ctx).WithFields(logFields).WithError(err).Error("Error creating volume.") + return nil, err + } + + Logc(ctx).WithFields(logFields).Info("Volume create request issued.") + + if _, pollErr := poller.Poll(sdkCtx); pollErr != nil { + return nil, pollErr + } else { + // The volume doesn't exist yet, so forge the name & network IDs to enable conversion to a Volume struct + newVol.Name = c.createVolumeID(request.Name) + newVol.Network = cPool.NetworkFullName + return c.newVolumeFromGCNVVolume(ctx, newVol) + } +} + +// ModifyVolume updates attributes of a volume. +func (c Client) ModifyVolume( + ctx context.Context, volume *Volume, labels map[string]string, unixPermissions *string, + snapshotDirAccess *bool, _ *ExportRule, +) error { + logFields := LogFields{ + "API": "GCNV.UpdateVolume", + "volume": volume.Name, + } + + newVolume := &netapppb.Volume{ + Name: volume.FullName, + Labels: labels, + } + updateMask := &fieldmaskpb.FieldMask{ + Paths: []string{"labels"}, + } + + if unixPermissions != nil { + newVolume.UnixPermissions = *unixPermissions + updateMask.Paths = append(updateMask.Paths, "unix_permissions") + } + + if snapshotDirAccess != nil { + newVolume.SnapshotDirectory = *snapshotDirAccess + updateMask.Paths = append(updateMask.Paths, "snapshot_directory") + } + + Logc(ctx).WithFields(logFields).Debug("Modifying volume.") + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.UpdateVolumeRequest{ + Volume: newVolume, + UpdateMask: updateMask, + } + poller, err := c.sdkClient.gcnv.UpdateVolume(sdkCtx, req) + if err != nil { + Logc(ctx).WithFields(logFields).WithError(err).Error("Error modifying volume.") + return err + } + + Logc(ctx).WithFields(logFields).Info("Volume modify request issued.") + + waitCtx, waitCancel := context.WithTimeout(ctx, DefaultTimeout) + defer waitCancel() + if _, pollErr := poller.Wait(waitCtx); pollErr != nil { + Logc(ctx).WithFields(logFields).WithError(pollErr).Error("Error polling for volume modify result.") + return pollErr + } + + Logc(ctx).WithFields(logFields).Debug("Volume modified.") + + return nil +} + +// ResizeVolume sends a VolumePatch to update a volume's quota. +func (c Client) ResizeVolume(ctx context.Context, volume *Volume, newSizeBytes int64) error { + logFields := LogFields{ + "API": "GCNV.UpdateVolume", + "volume": volume.Name, + } + + newVolume := &netapppb.Volume{ + Name: volume.FullName, + CapacityGib: newSizeBytes / 1073741824, + } + updateMask := &fieldmaskpb.FieldMask{ + Paths: []string{"capacity_gib"}, + } + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.UpdateVolumeRequest{ + Volume: newVolume, + UpdateMask: updateMask, + } + poller, err := c.sdkClient.gcnv.UpdateVolume(sdkCtx, req) + if err != nil { + Logc(ctx).WithFields(logFields).WithError(err).Error("Error resizing volume.") + return err + } + + Logc(ctx).WithFields(logFields).Info("Volume resize request issued.") + + waitCtx, waitCancel := context.WithTimeout(ctx, DefaultTimeout) + defer waitCancel() + if _, pollErr := poller.Wait(waitCtx); pollErr != nil { + Logc(ctx).WithFields(logFields).WithError(pollErr).Error("Error polling for volume resize result.") + return pollErr + } + + Logc(ctx).WithFields(logFields).Debug("Volume resize complete.") + + return nil +} + +// DeleteVolume deletes a volume. +func (c Client) DeleteVolume(ctx context.Context, volume *Volume) error { + name := volume.Name + logFields := LogFields{ + "API": "GCNV.DeleteVolume", + "volume": name, + } + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.DeleteVolumeRequest{ + Name: c.createVolumeID(name), + Force: true, + } + _, err := c.sdkClient.gcnv.DeleteVolume(sdkCtx, req) + if err != nil { + if IsGCNVNotFoundError(err) { + Logc(ctx).WithFields(logFields).Info("Volume already deleted.") + return nil + } + + Logc(ctx).WithFields(logFields).WithError(err).Error("Error deleting volume.") + return err + } + + Logc(ctx).WithFields(logFields).Debug("Volume deleted.") + + return nil +} + +// /////////////////////////////////////////////////////////////////////////////// +// Functions to retrieve and manage snapshots +// /////////////////////////////////////////////////////////////////////////////// + +// newSnapshotFromGCNVSnapshot creates a new internal Snapshot struct from a GCNV snapshot. +func (c Client) newSnapshotFromGCNVSnapshot(_ context.Context, gcnvSnapshot *netapppb.Snapshot) (*Snapshot, error) { + _, location, volumeName, snapshotName, err := parseSnapshotID(gcnvSnapshot.Name) + if err != nil { + return nil, err + } + + snapshot := &Snapshot{ + Name: snapshotName, + FullName: gcnvSnapshot.Name, + Volume: volumeName, + Location: location, + State: SnapshotStateFromGCNVState(gcnvSnapshot.State), + Labels: gcnvSnapshot.Labels, + } + + if gcnvSnapshot.CreateTime != nil { + snapshot.Created = gcnvSnapshot.CreateTime.AsTime() + } + + return snapshot, nil +} + +// SnapshotsForVolume returns a list of snapshots on a volume. +func (c Client) SnapshotsForVolume(ctx context.Context, volume *Volume) (*[]*Snapshot, error) { + logFields := LogFields{ + "API": "GCNV.ListSnapshots", + "volume": volume.Name, + } + + var snapshots []*Snapshot + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.ListSnapshotsRequest{ + Parent: c.createVolumeID(volume.Name), + PageSize: PaginationLimit, + } + it := c.sdkClient.gcnv.ListSnapshots(sdkCtx, req) + for { + gcnvSnapshot, err := it.Next() + if errors.Is(err, iterator.Done) { + break + } + if err != nil { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).WithError(err).Error("Could not read snapshots.") + return nil, err + } + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).WithError(err).Debug("Snapshot: %v.", gcnvSnapshot) + + snapshot, err := c.newSnapshotFromGCNVSnapshot(ctx, gcnvSnapshot) + if err != nil { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithError(err).Warning("Skipping snapshot.") + continue + } + snapshots = append(snapshots, snapshot) + } + + Logc(ctx).WithFields(logFields).Debug("Read snapshots from volume.") + + return &snapshots, nil +} + +// SnapshotForVolume fetches a specific snapshot on a volume by its name. +func (c Client) SnapshotForVolume( + ctx context.Context, volume *Volume, snapshotName string, +) (*Snapshot, error) { + logFields := LogFields{ + "API": "GCNV.GetSnapshot", + "volume": volume.Name, + "snapshot": snapshotName, + } + + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).Trace("Fetching snapshot by name.") + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.GetSnapshotRequest{ + Name: c.createSnapshotID(volume.Name, snapshotName), + } + gcnvSnapshot, err := c.sdkClient.gcnv.GetSnapshot(sdkCtx, req) + if err != nil { + if IsGCNVNotFoundError(err) { + Logc(ctx).WithFields(logFields).Debug("Snapshot not found.") + return nil, errors.WrapWithNotFoundError(err, "snapshot '%s' not found", snapshotName) + } + + Logc(ctx).WithFields(logFields).WithError(err).Error("Error fetching snapshot.") + return nil, err + } + + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).Debug("Found snapshot by name.") + + return c.newSnapshotFromGCNVSnapshot(ctx, gcnvSnapshot) +} + +// WaitForSnapshotState waits for a desired snapshot state and returns once that state is achieved. +func (c Client) WaitForSnapshotState( + ctx context.Context, snapshot *Snapshot, volume *Volume, desiredState string, abortStates []string, + maxElapsedTime time.Duration, +) error { + checkSnapshotState := func() error { + s, err := c.SnapshotForVolume(ctx, volume, snapshot.Name) + if err != nil { + + // There is no 'Deleted' state in GCNV -- the snapshot just vanishes. If we failed to query + // the snapshot info, and we're trying to transition to StateDeleted, and we get back a 404, + // then return success. Otherwise, log the error as usual. + if desiredState == SnapshotStateDeleted && errors.IsNotFoundError(err) { + Logc(ctx).Debugf("Implied deletion for snapshot %s.", snapshot.Name) + return nil + } + if errors.Is(err, context.Canceled) { + return backoff.Permanent(err) + } + return fmt.Errorf("could not get snapshot status; %v", err) + } + + if s.State == desiredState { + return nil + } + + err = fmt.Errorf("snapshot state is %s, not %s", s.State, desiredState) + + // Return a permanent error to stop retrying if we reached one of the abort states + if utils.SliceContainsString(abortStates, s.State) { + return backoff.Permanent(TerminalState(err)) + } + + return err + } + + stateNotify := func(err error, duration time.Duration) { + Logc(ctx).WithFields(LogFields{ + "increment": duration.Truncate(10 * time.Millisecond), + "message": err.Error(), + }).Debugf("Waiting for snapshot state.") + } + + stateBackoff := backoff.NewExponentialBackOff() + stateBackoff.MaxElapsedTime = maxElapsedTime + stateBackoff.MaxInterval = 5 * time.Second + stateBackoff.RandomizationFactor = 0.1 + stateBackoff.InitialInterval = 3 * time.Second + stateBackoff.Multiplier = 1.414 + + Logc(ctx).WithField("desiredState", desiredState).Info("Waiting for snapshot state.") + + if err := backoff.RetryNotify(checkSnapshotState, stateBackoff, stateNotify); err != nil { + if IsTerminalStateError(err) { + Logc(ctx).WithError(err).Error("Snapshot reached terminal state.") + } else { + Logc(ctx).Warningf("Snapshot state was not %s after %3.2f seconds.", + desiredState, stateBackoff.MaxElapsedTime.Seconds()) + } + return err + } + + Logc(ctx).WithField("desiredState", desiredState).Debugf("Desired snapshot state reached.") + + return nil +} + +// CreateSnapshot creates a new snapshot. +func (c Client) CreateSnapshot(ctx context.Context, volume *Volume, snapshotName string) (*Snapshot, error) { + newSnapshot := &netapppb.Snapshot{} + + logFields := LogFields{ + "API": "GCNV.CreateSnapshot", + "volume": volume.Name, + "snapshot": snapshotName, + } + + Logc(ctx).WithFields(logFields).Debug("Issuing snapshot create request.") + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.CreateSnapshotRequest{ + Parent: volume.FullName, + Snapshot: newSnapshot, + SnapshotId: snapshotName, + } + poller, err := c.sdkClient.gcnv.CreateSnapshot(sdkCtx, req) + if err != nil { + Logc(ctx).WithFields(logFields).WithError(err).Error("Error creating snapshot.") + return nil, err + } + + Logc(ctx).WithFields(logFields).Info("Snapshot create request issued.") + + if _, pollErr := poller.Poll(sdkCtx); pollErr != nil { + return nil, pollErr + } else { + // The snapshot doesn't exist yet, so forge the name ID to enable conversion to a Snapshot struct + newSnapshot.Name = c.createSnapshotID(volume.Name, snapshotName) + return c.newSnapshotFromGCNVSnapshot(ctx, newSnapshot) + } +} + +// RestoreSnapshot restores a volume to a snapshot. +func (c Client) RestoreSnapshot(ctx context.Context, volume *Volume, snapshot *Snapshot) error { + logFields := LogFields{ + "API": "GCNV.RevertVolume", + "volume": volume.Name, + "snapshot": snapshot.Name, + } + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.RevertVolumeRequest{ + Name: volume.FullName, + SnapshotId: snapshot.Name, + } + poller, err := c.sdkClient.gcnv.RevertVolume(sdkCtx, req) + if err != nil { + if IsGCNVNotFoundError(err) { + Logc(ctx).WithFields(logFields).Info("Volume or snapshot not found.") + return err + } + + Logc(ctx).WithFields(logFields).WithError(err).Error("Error reverting volume to snapshot.") + return err + } + + waitCtx, waitCancel := context.WithTimeout(ctx, DefaultTimeout) + defer waitCancel() + if _, pollErr := poller.Wait(waitCtx); pollErr != nil { + Logc(ctx).WithFields(logFields).WithError(pollErr).Error("Error polling for volume revert to snapshot result.") + return pollErr + } + + Logc(ctx).WithFields(logFields).Debug("Volume reverted to snapshot.") + + return nil +} + +// DeleteSnapshot deletes a snapshot. +func (c Client) DeleteSnapshot(ctx context.Context, volume *Volume, snapshot *Snapshot) error { + logFields := LogFields{ + "API": "GCNV.DeleteSnapshot", + "volume": volume.Name, + "snapshot": snapshot.Name, + } + + sdkCtx, sdkCancel := context.WithTimeout(ctx, c.config.SDKTimeout) + defer sdkCancel() + req := &netapppb.DeleteSnapshotRequest{ + Name: c.createSnapshotID(volume.Name, snapshot.Name), + } + _, err := c.sdkClient.gcnv.DeleteSnapshot(sdkCtx, req) + if err != nil { + if IsGCNVNotFoundError(err) { + Logc(ctx).WithFields(logFields).Info("Snapshot already deleted.") + return nil + } + + Logc(ctx).WithFields(logFields).WithError(err).Error("Error deleting snapshot.") + return err + } + + Logc(ctx).WithFields(logFields).Debug("Snapshot deleted.") + + return nil +} + +// /////////////////////////////////////////////////////////////////////////////// +// Miscellaneous utility functions and error types +// /////////////////////////////////////////////////////////////////////////////// + +func ServiceLevelFromCapacityPool(capacityPool *CapacityPool) string { + if capacityPool == nil { + return ServiceLevelUnspecified + } + return capacityPool.ServiceLevel +} + +// ServiceLevelFromGCNVServiceLevel converts GCNV service level to string +func ServiceLevelFromGCNVServiceLevel(serviceLevel netapppb.ServiceLevel) string { + switch serviceLevel { + default: + fallthrough + case netapppb.ServiceLevel_SERVICE_LEVEL_UNSPECIFIED: + return ServiceLevelUnspecified + case netapppb.ServiceLevel_FLEX: + return ServiceLevelFlex + case netapppb.ServiceLevel_STANDARD: + return ServiceLevelStandard + case netapppb.ServiceLevel_PREMIUM: + return ServiceLevelPremium + case netapppb.ServiceLevel_EXTREME: + return ServiceLevelExtreme + } +} + +// StoragePoolStateFromGCNVState converts GCNV storage pool state to string +func StoragePoolStateFromGCNVState(state netapppb.StoragePool_State) string { + switch state { + default: + fallthrough + case netapppb.StoragePool_STATE_UNSPECIFIED: + return StoragePoolStateUnspecified + case netapppb.StoragePool_READY: + return StoragePoolStateReady + case netapppb.StoragePool_CREATING: + return StoragePoolStateCreating + case netapppb.StoragePool_DELETING: + return StoragePoolStateDeleting + case netapppb.StoragePool_UPDATING: + return StoragePoolStateUpdating + case netapppb.StoragePool_RESTORING: + return StoragePoolStateRestoring + case netapppb.StoragePool_DISABLED: + return StoragePoolStateDisabled + case netapppb.StoragePool_ERROR: + return StoragePoolStateError + } +} + +// VolumeStateFromGCNVState converts GCNV volume state to string +func VolumeStateFromGCNVState(state netapppb.Volume_State) string { + switch state { + default: + fallthrough + case netapppb.Volume_STATE_UNSPECIFIED: + return VolumeStateUnspecified + case netapppb.Volume_READY: + return VolumeStateReady + case netapppb.Volume_CREATING: + return VolumeStateCreating + case netapppb.Volume_DELETING: + return VolumeStateDeleting + case netapppb.Volume_UPDATING: + return VolumeStateUpdating + case netapppb.Volume_RESTORING: + return VolumeStateRestoring + case netapppb.Volume_DISABLED: + return VolumeStateDisabled + case netapppb.Volume_ERROR: + return VolumeStateError + } +} + +// VolumeSecurityStyleFromGCNVSecurityStyle converts GCNV volume security style to string +func VolumeSecurityStyleFromGCNVSecurityStyle(state netapppb.SecurityStyle) string { + switch state { + default: + fallthrough + case netapppb.SecurityStyle_SECURITY_STYLE_UNSPECIFIED: + return SecurityStyleUnspecified + case netapppb.SecurityStyle_NTFS: + return SecurityStyleNTFS + case netapppb.SecurityStyle_UNIX: + return SecurityStyleUnix + } +} + +// GCNVSecurityStyleFromVolumeSecurityStyle converts string to GCNV volume security style +func GCNVSecurityStyleFromVolumeSecurityStyle(state string) netapppb.SecurityStyle { + switch state { + default: + fallthrough + case SecurityStyleUnspecified: + return netapppb.SecurityStyle_SECURITY_STYLE_UNSPECIFIED + case SecurityStyleNTFS: + return netapppb.SecurityStyle_NTFS + case SecurityStyleUnix: + return netapppb.SecurityStyle_UNIX + } +} + +// VolumeAccessTypeFromGCNVAccessType converts GCNV volume access type to string +func VolumeAccessTypeFromGCNVAccessType(accessType netapppb.AccessType) string { + switch accessType { + default: + fallthrough + case netapppb.AccessType_ACCESS_TYPE_UNSPECIFIED: + return AccessTypeUnspecified + case netapppb.AccessType_READ_ONLY: + return AccessTypeReadOnly + case netapppb.AccessType_READ_WRITE: + return AccessTypeReadWrite + case netapppb.AccessType_READ_NONE: + return AccessTypeReadNone + } +} + +// GCNVAccessTypeFromVolumeAccessType converts string to GCNV volume access type +func GCNVAccessTypeFromVolumeAccessType(accessType string) netapppb.AccessType { + switch accessType { + default: + fallthrough + case AccessTypeUnspecified: + return netapppb.AccessType_ACCESS_TYPE_UNSPECIFIED + case AccessTypeReadOnly: + return netapppb.AccessType_READ_ONLY + case AccessTypeReadWrite: + return netapppb.AccessType_READ_WRITE + case AccessTypeReadNone: + return netapppb.AccessType_READ_NONE + } +} + +// VolumeProtocolFromGCNVProtocol converts GCNV protocol type to string +func VolumeProtocolFromGCNVProtocol(protocol netapppb.Protocols) string { + switch protocol { + default: + fallthrough + case netapppb.Protocols_PROTOCOLS_UNSPECIFIED: + return ProtocolTypeUnknown + case netapppb.Protocols_NFSV3: + return ProtocolTypeNFSv3 + case netapppb.Protocols_NFSV4: + return ProtocolTypeNFSv41 + case netapppb.Protocols_SMB: + return ProtocolTypeSMB + } +} + +// GCNVProtocolFromVolumeProtocol converts string to GCNV protocol type +func GCNVProtocolFromVolumeProtocol(protocol string) netapppb.Protocols { + switch protocol { + default: + fallthrough + case ProtocolTypeUnknown: + return netapppb.Protocols_PROTOCOLS_UNSPECIFIED + case ProtocolTypeNFSv3: + return netapppb.Protocols_NFSV3 + case ProtocolTypeNFSv41: + return netapppb.Protocols_NFSV4 + case ProtocolTypeSMB: + return netapppb.Protocols_SMB + } +} + +// SnapshotStateFromGCNVState converts GCNV snapshot state to string +func SnapshotStateFromGCNVState(state netapppb.Snapshot_State) string { + switch state { + default: + fallthrough + case netapppb.Snapshot_STATE_UNSPECIFIED: + return SnapshotStateUnspecified + case netapppb.Snapshot_READY: + return SnapshotStateReady + case netapppb.Snapshot_CREATING: + return SnapshotStateCreating + case netapppb.Snapshot_DELETING: + return SnapshotStateDeleting + case netapppb.Snapshot_UPDATING: + return SnapshotStateUpdating + case netapppb.Snapshot_DISABLED: + return SnapshotStateDisabled + case netapppb.Snapshot_ERROR: + return SnapshotStateError + } +} + +// IsGCNVNotFoundError checks whether an error returned from the GCNV SDK contains a 404 (Not Found) error. +func IsGCNVNotFoundError(err error) bool { + if err == nil { + return false + } + + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return true + } + + return false +} + +// IsGCNVTooManyRequestsError checks whether an error returned from the GCNV SDK contains a 429 (Too Many Requests) error. +func IsGCNVTooManyRequestsError(err error) bool { + if err == nil { + return false + } + + if s, ok := status.FromError(err); ok && s.Code() == codes.ResourceExhausted { + return true + } + + return false +} + +// DerefString accepts a string pointer and returns the value of the string, or "" if the pointer is nil. +func DerefString(s *string) string { + if s != nil { + return *s + } + return "" +} + +// DerefBool accepts a bool pointer and returns the value of the bool, or false if the pointer is nil. +func DerefBool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +func DerefAccessType(at *netapppb.AccessType) netapppb.AccessType { + if at != nil { + return *at + } + return netapppb.AccessType_ACCESS_TYPE_UNSPECIFIED +} + +// TerminalStateError signals that the object is in a terminal state. This is used to stop waiting on +// an object to change state. +type TerminalStateError struct { + Err error +} + +func (e *TerminalStateError) Error() string { + return e.Err.Error() +} + +// TerminalState wraps the given err in a *TerminalStateError. +func TerminalState(err error) *TerminalStateError { + return &TerminalStateError{ + Err: err, + } +} + +func IsTerminalStateError(err error) bool { + if err == nil { + return false + } + var terminalStateError *TerminalStateError + ok := errors.As(err, &terminalStateError) + return ok +} diff --git a/storage_drivers/gcp/gcnvapi/gcnv_discovery.go b/storage_drivers/gcp/gcnvapi/gcnv_discovery.go new file mode 100644 index 000000000..025e8d72b --- /dev/null +++ b/storage_drivers/gcp/gcnvapi/gcnv_discovery.go @@ -0,0 +1,419 @@ +// Copyright 2024 NetApp, Inc. All Rights Reserved. + +// Package gcnvapi provides a high-level interface to the Google Cloud NetApp Volumes SDK +package gcnvapi + +import ( + "context" + "fmt" + "math/rand" + "strings" + "time" + + "cloud.google.com/go/netapp/apiv1/netapppb" + "github.com/cenkalti/backoff/v4" + "go.uber.org/multierr" + "google.golang.org/api/iterator" + + . "github.com/netapp/trident/logging" + "github.com/netapp/trident/storage" + "github.com/netapp/trident/utils" + "github.com/netapp/trident/utils/errors" +) + +const ( + serviceLevel = "serviceLevel" + network = "network" + capacityPools = "capacityPools" + DefaultMaxCacheAge = 10 * time.Minute +) + +// /////////////////////////////////////////////////////////////////////////////// +// Top level discovery functions +// /////////////////////////////////////////////////////////////////////////////// + +// RefreshGCNVResources refreshes the cache of discovered GCNV resources and validates +// them against our known storage pools. +func (c Client) RefreshGCNVResources(ctx context.Context) error { + // Check if it is time to update the cache + if time.Now().Before(c.sdkClient.GCNVResources.lastUpdateTime.Add(c.config.MaxCacheAge)) { + Logc(ctx).Debugf("Cached resources not yet %v old, skipping refresh.", c.config.MaxCacheAge) + return nil + } + + // (re-)Discover what we have to work with in GCNV + Logc(ctx).Debugf("Discovering GCNV resources.") + discoveryErr := multierr.Combine(c.DiscoverGCNVResources(ctx)) + + // This is noisy, hide it behind api tracing. + c.dumpGCNVResources(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]) + + // Warn about anything in the config that doesn't match any discovered resources + c.checkForNonexistentCapacityPools(ctx) + c.checkForNonexistentNetworks(ctx) + + // Return errors for any storage pool that cannot be satisfied by discovered resources + poolErrors := multierr.Combine(c.checkForUnsatisfiedPools(ctx)...) + discoveryErr = multierr.Combine(discoveryErr, poolErrors) + + return discoveryErr +} + +// DiscoverGCNVResources rediscovers the GCNV resources we care about and updates the cache. +func (c Client) DiscoverGCNVResources(ctx context.Context) (returnError error) { + // Start from scratch each time we are called. + newCapacityPoolMap := make(map[string]*CapacityPool) + + defer func() { + if returnError != nil { + Logc(ctx).WithError(returnError).Debug("Discovery error, not retaining any discovered resources.") + return + } + + // Swap the newly discovered resources into the cache only if discovery succeeded. + c.sdkClient.GCNVResources.CapacityPoolMap = newCapacityPoolMap + c.sdkClient.GCNVResources.lastUpdateTime = time.Now() + + Logc(ctx).Debug("Switched to newly discovered resources.") + }() + + // Discover capacity pools + cPools, returnError := c.discoverCapacityPoolsWithRetry(ctx) + if returnError != nil { + return + } + + // Update maps with all data from discovered capacity pools + for _, cPool := range *cPools { + newCapacityPoolMap[cPool.FullName] = cPool + } + + // Detect the lack of any resources: can occur when no connectivity, etc. + // Would like a better way of proactively finding out there is something wrong + // at a very basic level. (Reproduce this by turning off your network!) + numCapacityPools := len(newCapacityPoolMap) + + if numCapacityPools == 0 { + return errors.New("no GCNV storage pools discovered; volume provisioning may fail until corrected") + } + + Logc(ctx).WithFields(LogFields{ + "capacityPools": numCapacityPools, + }).Info("Discovered GCNV resources.") + + return +} + +// dumpGCNVResources writes a hierarchical representation of discovered resources to the log. +func (c Client) dumpGCNVResources(ctx context.Context, driverName string, discoveryTraceEnabled bool) { + Logd(ctx, driverName, discoveryTraceEnabled).Tracef("Discovered GCNV Resources:") + + for _, cp := range c.sdkClient.GCNVResources.CapacityPoolMap { + Logd(ctx, driverName, discoveryTraceEnabled).Tracef("CPool: %s, [%s, %s]", + cp.Name, cp.ServiceLevel, cp.NetworkName) + } +} + +// checkForUnsatisfiedPools returns one or more errors if one or more configured storage pools +// are satisfied by no capacity pools. +func (c Client) checkForUnsatisfiedPools(ctx context.Context) (discoveryErrors []error) { + // Ensure every storage pool matches one or more capacity pools + for sPoolName, sPool := range c.sdkClient.GCNVResources.StoragePoolMap { + + // Find all capacity pools that work for this storage pool + cPools := c.CapacityPoolsForStoragePool(ctx, sPool, sPool.InternalAttributes()[serviceLevel]) + + if len(cPools) == 0 { + + err := fmt.Errorf("no GCNV storage pools found for Trident pool %s", sPoolName) + Logc(ctx).WithError(err).Error("Discovery error.") + discoveryErrors = append(discoveryErrors, err) + + } else { + + cPoolFullNames := make([]string, 0) + for _, cPool := range cPools { + cPoolFullNames = append(cPoolFullNames, cPool.FullName) + } + + // Print the mapping in the logs so we see it after each discovery refresh. + Logc(ctx).Debugf("Storage pool %s mapped to capacity pools %v.", sPoolName, cPoolFullNames) + } + } + + return +} + +// checkForNonexistentCapacityPools logs warnings if any configured capacity pools do not +// match discovered capacity pools in the resource cache. +func (c Client) checkForNonexistentCapacityPools(ctx context.Context) (anyMismatches bool) { + for sPoolName, sPool := range c.sdkClient.GCNVResources.StoragePoolMap { + + // Build list of capacity pool names + cpNames := make([]string, 0) + for _, cacheCP := range c.sdkClient.GCNVResources.CapacityPoolMap { + cpNames = append(cpNames, cacheCP.Name) + cpNames = append(cpNames, cacheCP.FullName) + } + + // Find any capacity pools value in this storage pool that doesn't match known capacity pools + for _, configCP := range utils.SplitString(ctx, sPool.InternalAttributes()[capacityPools], ",") { + if !utils.StringInSlice(configCP, cpNames) { + anyMismatches = true + + Logc(ctx).WithFields(LogFields{ + "pool": sPoolName, + "capacityPool": configCP, + }).Warning("Capacity pool referenced in pool not found.") + } + } + } + + return +} + +// checkForNonexistentNetworks logs warnings if any configured networks do not +// match discovered virtual networks in the resource cache. +func (c Client) checkForNonexistentNetworks(ctx context.Context) (anyMismatches bool) { + for sPoolName, sPool := range c.sdkClient.GCNVResources.StoragePoolMap { + + // Build list of short and long capacity network names + networkNames := make([]string, 0) + for _, cPool := range c.sdkClient.GCNVResources.CapacityPoolMap { + networkNames = append(networkNames, cPool.NetworkName) + networkNames = append(networkNames, cPool.NetworkFullName) + } + + // Find any network value in this storage pool that doesn't match the pool's network + configNetwork := sPool.InternalAttributes()[network] + if configNetwork != "" && !utils.StringInSlice(configNetwork, networkNames) { + anyMismatches = true + + Logc(ctx).WithFields(LogFields{ + "pool": sPoolName, + "network": configNetwork, + }).Warning("Network referenced in pool not found.") + } + } + + return +} + +// /////////////////////////////////////////////////////////////////////////////// +// Internal functions to do discovery via the GCNV SDK +// /////////////////////////////////////////////////////////////////////////////// + +// discoverCapacityPoolsWithRetry queries GCNV SDK for all capacity pools in the current location, +// retrying if the API request is throttled. +func (c Client) discoverCapacityPoolsWithRetry(ctx context.Context) (pools *[]*CapacityPool, err error) { + discover := func() error { + if pools, err = c.discoverCapacityPools(ctx); err != nil && IsGCNVTooManyRequestsError(err) { + return err + } + return backoff.Permanent(err) + } + + notify := func(err error, duration time.Duration) { + Logc(ctx).WithFields(LogFields{ + "increment": duration.Truncate(10 * time.Millisecond), + }).Debugf("Retrying capacity pools query.") + } + + expBackoff := backoff.NewExponentialBackOff() + expBackoff.MaxElapsedTime = DefaultSDKTimeout + expBackoff.MaxInterval = 5 * time.Second + expBackoff.RandomizationFactor = 0.1 + expBackoff.InitialInterval = 5 * time.Second + expBackoff.Multiplier = 1 + + err = backoff.RetryNotify(discover, expBackoff, notify) + + return +} + +// discoverCapacityPools queries GCNV SDK for all capacity pools in the current location. +func (c Client) discoverCapacityPools(ctx context.Context) (*[]*CapacityPool, error) { + logFields := LogFields{ + "API": "GCNV.ListStoragePools", + } + + var pools []*CapacityPool + + req := &netapppb.ListStoragePoolsRequest{ + Parent: fmt.Sprintf("projects/%s/locations/%s", c.config.ProjectNumber, c.config.Location), + PageSize: PaginationLimit, + } + it := c.sdkClient.gcnv.ListStoragePools(ctx, req) + for { + pool, err := it.Next() + if errors.Is(err, iterator.Done) { + break + } + if err != nil { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).WithError(err).Error("Could not read pools.") + return nil, err + } + + _, location, capacityPool, err := parseCapacityPoolID(pool.Name) + if err != nil { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).WithError(err).Warning("Skipping pool.") + } + + _, network, err := parseNetworkID(pool.Network) + if err != nil { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["api"]). + WithFields(logFields).WithError(err).Warning("Skipping pool.") + } + + pools = append(pools, &CapacityPool{ + Name: capacityPool, + FullName: pool.Name, + Location: location, + ServiceLevel: ServiceLevelFromGCNVServiceLevel(pool.ServiceLevel), + State: StoragePoolStateFromGCNVState(pool.State), + NetworkName: network, + NetworkFullName: pool.Network, + }) + } + + return &pools, nil +} + +// /////////////////////////////////////////////////////////////////////////////// +// API functions to match/search capacity pools +// /////////////////////////////////////////////////////////////////////////////// + +// CapacityPools returns a list of all discovered GCNV capacity pools. +func (c Client) CapacityPools() *[]*CapacityPool { + var cPools []*CapacityPool + + for _, cPool := range c.sdkClient.GCNVResources.CapacityPoolMap { + cPools = append(cPools, cPool) + } + + return &cPools +} + +// capacityPool returns a single discovered capacity pool by its short name. +func (c Client) capacityPool(cPoolName string) *CapacityPool { + for _, cPool := range c.sdkClient.GCNVResources.CapacityPoolMap { + if cPool.Name == cPoolName { + return cPool + } + } + return nil +} + +// CapacityPoolsForStoragePools returns all discovered capacity pools matching all known storage pools, +// regardless of service levels. +func (c Client) CapacityPoolsForStoragePools(ctx context.Context) []*CapacityPool { + // This map deduplicates cPools from multiple storage pools + cPoolMap := make(map[*CapacityPool]bool) + + // Build deduplicated map of cPools + for _, sPool := range c.sdkClient.StoragePoolMap { + for _, cPool := range c.CapacityPoolsForStoragePool(ctx, sPool, "") { + cPoolMap[cPool] = true + } + } + + // Copy keys into a list of deduplicated cPools + cPools := make([]*CapacityPool, 0) + + for cPool := range cPoolMap { + cPools = append(cPools, cPool) + } + + return cPools +} + +// CapacityPoolsForStoragePool returns all discovered capacity pools matching the specified +// storage pool and service level. The pools are shuffled to enable easier random selection. +func (c Client) CapacityPoolsForStoragePool( + ctx context.Context, sPool storage.Pool, serviceLevel string, +) []*CapacityPool { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["discovery"]).WithField("storagePool", sPool.Name()). + Tracef("Determining capacity pools for storage pool.") + + // This map tracks which capacity pools have passed the filters + filteredCapacityPoolMap := make(map[string]bool) + + // Start with all capacity pools marked as passing the filters + for cPoolFullName := range c.sdkClient.CapacityPoolMap { + filteredCapacityPoolMap[cPoolFullName] = true + } + + // If capacity pools were specified, filter out non-matching capacity pools + cpList := utils.SplitString(ctx, sPool.InternalAttributes()[capacityPools], ",") + if len(cpList) > 0 { + for cPoolFullName, cPool := range c.sdkClient.CapacityPoolMap { + if !utils.SliceContainsString(cpList, cPool.Name) && !utils.SliceContainsString(cpList, cPoolFullName) { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["discovery"]).Tracef("Ignoring capacity pool %s, not in capacity pools [%s].", + cPoolFullName, cpList) + filteredCapacityPoolMap[cPoolFullName] = false + } + } + } + + // If networks were specified, filter out non-matching capacity pools + network := sPool.InternalAttributes()[network] + if network != "" { + for cPoolFullName, cPool := range c.sdkClient.CapacityPoolMap { + if network != cPool.NetworkName && network != cPool.NetworkFullName { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["discovery"]).Tracef("Ignoring capacity pool %s, not in capacity pools [%s].", + cPoolFullName, cpList) + filteredCapacityPoolMap[cPoolFullName] = false + } + } + } + + // Filter out pools with non-matching service levels + if serviceLevel != "" { + for cPoolFullName, cPool := range c.sdkClient.CapacityPoolMap { + if !strings.EqualFold(cPool.ServiceLevel, serviceLevel) { + Logd(ctx, c.config.StorageDriverName, c.config.DebugTraceFlags["discovery"]).Tracef("Ignoring capacity pool %s, not service level %s.", + cPoolFullName, serviceLevel) + filteredCapacityPoolMap[cPoolFullName] = false + } + } + } + + // Build list of all capacity pools that have passed all filters + cPools := make([]*CapacityPool, 0) + for cPoolFullName, match := range filteredCapacityPoolMap { + if match { + cPools = append(cPools, c.sdkClient.CapacityPoolMap[cPoolFullName]) + } + } + + // Shuffle the pools + rand.Shuffle(len(cPools), func(i, j int) { cPools[i], cPools[j] = cPools[j], cPools[i] }) + + return cPools +} + +// EnsureVolumeInValidCapacityPool checks whether the specified volume exists in any capacity pool that is +// referenced by the backend config. It returns nil if so, or if no capacity pools are named in the config. +func (c Client) EnsureVolumeInValidCapacityPool(ctx context.Context, volume *Volume) error { + // Get a list of all capacity pools referenced in the config + allCapacityPools := c.CapacityPoolsForStoragePools(ctx) + + // If we aren't restricting capacity pools, any capacity pool is OK + if len(allCapacityPools) == 0 { + return nil + } + + // Always match by capacity pool full name + cPoolFullName := c.createCapacityPoolID(volume.CapacityPool) + + for _, cPool := range allCapacityPools { + if cPoolFullName == cPool.FullName { + return nil + } + } + + return errors.NotFoundError("volume %s is part of another storage pool not referenced "+ + "by this backend", volume.Name) +} diff --git a/storage_drivers/gcp/gcnvapi/gcnv_structs.go b/storage_drivers/gcp/gcnvapi/gcnv_structs.go new file mode 100644 index 000000000..8cfa2866a --- /dev/null +++ b/storage_drivers/gcp/gcnvapi/gcnv_structs.go @@ -0,0 +1,151 @@ +// Copyright 2024 NetApp, Inc. All Rights Reserved. + +// Package gcnvapi provides a high-level interface to the Google Cloud NetApp Volumes SDK +package gcnvapi + +import ( + "time" + + "github.com/netapp/trident/storage" +) + +const ( + ProtocolTypeUnknown = "Unknown" + ProtocolTypeNFSPrefix = "NFSv" + ProtocolTypeNFSv3 = ProtocolTypeNFSPrefix + "3" + ProtocolTypeNFSv41 = ProtocolTypeNFSPrefix + "4.1" + ProtocolTypeSMB = "SMB" + + ServiceLevelUnspecified = "Unspecified" + ServiceLevelFlex = "Flex" + ServiceLevelStandard = "Standard" + ServiceLevelPremium = "Premium" + ServiceLevelExtreme = "Extreme" + + StoragePoolStateUnspecified = "Unspecified" + StoragePoolStateReady = "Ready" + StoragePoolStateCreating = "Creating" + StoragePoolStateDeleting = "Deleting" + StoragePoolStateDeleted = "NoSuchState" + StoragePoolStateUpdating = "Updating" + StoragePoolStateRestoring = "Restoring" + StoragePoolStateDisabled = "Disabled" + StoragePoolStateError = "Error" + + VolumeStateUnspecified = "Unspecified" + VolumeStateReady = "Ready" + VolumeStateCreating = "Creating" + VolumeStateDeleting = "Deleting" + VolumeStateDeleted = "NoSuchState" + VolumeStateUpdating = "Updating" + VolumeStateRestoring = "Restoring" + VolumeStateDisabled = "Disabled" + VolumeStateError = "Error" + + SnapshotStateUnspecified = "Unspecified" + SnapshotStateReady = "Ready" + SnapshotStateCreating = "Creating" + SnapshotStateDeleting = "Deleting" + SnapshotStateDeleted = "NoSuchState" + SnapshotStateUpdating = "Updating" + SnapshotStateDisabled = "Disabled" + SnapshotStateError = "Error" + + SecurityStyleUnspecified = "Unspecified" + SecurityStyleNTFS = "NTFS" + SecurityStyleUnix = "Unix" + + AccessTypeUnspecified = "Unspecified" + AccessTypeReadOnly = "ReadOnly" + AccessTypeReadWrite = "ReadWrite" + AccessTypeReadNone = "ReadNone" +) + +// GCNVResources is the toplevel cache for the set of things we discover about our GCNV environment. +type GCNVResources struct { + CapacityPoolMap map[string]*CapacityPool + StoragePoolMap map[string]storage.Pool + lastUpdateTime time.Time +} + +// CapacityPool records details of a discovered GCNV storage pool. +type CapacityPool struct { + Name string + FullName string + Location string + ServiceLevel string + State string + NetworkName string + NetworkFullName string +} + +// Volume records details of a discovered GCNV volume. +type Volume struct { + Name string + CreationToken string + FullName string + Location string + State string + CapacityPool string + NetworkName string + NetworkFullName string + ServiceLevel string + SizeBytes int64 + ExportPolicy *ExportPolicy + ProtocolTypes []string + MountTargets []MountTarget + UnixPermissions string + Labels map[string]string + SnapshotReserve int64 + SnapshotDirectory bool + SecurityStyle string +} + +// VolumeCreateRequest embodies all the details of a volume to be created. +type VolumeCreateRequest struct { + Name string + CreationToken string + CapacityPool string + SizeBytes int64 + ExportPolicy *ExportPolicy + ProtocolTypes []string + UnixPermissions string + Labels map[string]string + SnapshotReserve *int64 + SnapshotDirectory bool + SecurityStyle string + SnapshotID string +} + +// ExportPolicy records details of a discovered GCNV volume export policy. +type ExportPolicy struct { + Rules []ExportRule +} + +// ExportRule records details of a discovered GCNV volume export policy rule. +type ExportRule struct { + AllowedClients string + SMB bool + Nfsv3 bool + Nfsv4 bool + RuleIndex int32 + AccessType string +} + +// MountTarget records details of a discovered GCNV volume mount target. +type MountTarget struct { + Export string + ExportPath string + Protocol string +} + +// Snapshot records details of a discovered GCNV snapshot. +type Snapshot struct { + Name string + FullName string + Volume string + Location string + State string + Created time.Time + Labels map[string]string +} diff --git a/storage_drivers/gcp/gcnvapi/types.go b/storage_drivers/gcp/gcnvapi/types.go new file mode 100644 index 000000000..f1dce6195 --- /dev/null +++ b/storage_drivers/gcp/gcnvapi/types.go @@ -0,0 +1,42 @@ +// Copyright 2024 NetApp, Inc. All Rights Reserved. + +// Package gcnvapi provides a high-level interface to the Google Cloud NetApp Volumes SDK +package gcnvapi + +import ( + "context" + "time" + + "github.com/netapp/trident/storage" +) + +//go:generate mockgen -package mock_api -destination=../../../mocks/mock_storage_drivers/mock_gcp/mock_gcnvapi.go github.com/netapp/trident/storage_drivers/gcp/gcnvapi GCNVAPI + +type GCNV interface { + Init(context.Context, map[string]storage.Pool) error + + RefreshGCNVResources(context.Context) error + DiscoverGCNVResources(context.Context) error + + CapacityPools() *[]*CapacityPool + CapacityPoolsForStoragePools(context.Context) []*CapacityPool + CapacityPoolsForStoragePool(context.Context, storage.Pool, string) []*CapacityPool + EnsureVolumeInValidCapacityPool(context.Context, *Volume) error + + Volumes(context.Context) (*[]*Volume, error) + Volume(context.Context, *storage.VolumeConfig) (*Volume, error) + VolumeByName(context.Context, string) (*Volume, error) + VolumeExists(context.Context, *storage.VolumeConfig) (bool, *Volume, error) + WaitForVolumeState(context.Context, *Volume, string, []string, time.Duration) (string, error) + CreateVolume(context.Context, *VolumeCreateRequest) (*Volume, error) + ModifyVolume(context.Context, *Volume, map[string]string, *string, *bool, *ExportRule) error + ResizeVolume(context.Context, *Volume, int64) error + DeleteVolume(context.Context, *Volume) error + + SnapshotsForVolume(context.Context, *Volume) (*[]*Snapshot, error) + SnapshotForVolume(context.Context, *Volume, string) (*Snapshot, error) + WaitForSnapshotState(context.Context, *Snapshot, *Volume, string, []string, time.Duration) error + CreateSnapshot(context.Context, *Volume, string) (*Snapshot, error) + RestoreSnapshot(context.Context, *Volume, *Snapshot) error + DeleteSnapshot(context.Context, *Volume, *Snapshot) error +} diff --git a/storage_drivers/gcp/gcp_common.go b/storage_drivers/gcp/gcp_common.go new file mode 100644 index 000000000..5de010c3d --- /dev/null +++ b/storage_drivers/gcp/gcp_common.go @@ -0,0 +1,45 @@ +// Copyright 2022 NetApp, Inc. All Rights Reserved. + +package gcp + +import ( + tridentconfig "github.com/netapp/trident/config" + drivers "github.com/netapp/trident/storage_drivers" + "github.com/netapp/trident/storage_drivers/gcp/api" +) + +const ( + MinimumVolumeSizeBytes = uint64(1073741824) // 1 GiB + + defaultNfsMountOptions = "nfsvers=3" + defaultSecurityStyle = "unix" + defaultSnapshotDir = "false" + defaultSnapshotReserve = "" + defaultUnixPermissions = "0777" + defaultStorageClass = api.StorageClassHardware + defaultLimitVolumeSize = "" + defaultExportRule = "0.0.0.0/0" + + // Constants for internal pool attributes + + Size = "size" + ServiceLevel = "serviceLevel" + SnapshotDir = "snapshotDir" + SnapshotReserve = "snapshotReserve" + ExportRule = "exportRule" + Network = "network" + Region = "region" + Zone = "zone" + StorageClass = "storageClass" + UnixPermissions = "unixPermissions" + StoragePools = "storagePools" + + // Topology label names + topologyZoneLabel = drivers.TopologyLabelPrefix + "/" + Zone + topologyRegionLabel = drivers.TopologyLabelPrefix + "/" + Region +) + +type Telemetry struct { + tridentconfig.Telemetry + Plugin string `json:"plugin"` +} diff --git a/storage_drivers/gcp/gcp_cvs.go b/storage_drivers/gcp/gcp_cvs.go index 9bd616332..efebe3327 100644 --- a/storage_drivers/gcp/gcp_cvs.go +++ b/storage_drivers/gcp/gcp_cvs.go @@ -30,40 +30,14 @@ import ( ) const ( - MinimumVolumeSizeBytes = uint64(1073741824) // 1 GiB MinimumCVSVolumeSizeBytesHW = uint64(107374182400) // 100 GiB MaximumVolumesPerStoragePool = 50 MinimumAPIVersion = "1.4.0" MinimumSDEVersion = "2023.1.2" - defaultHWServiceLevel = api.UserServiceLevel1 - defaultSWServiceLevel = api.PoolServiceLevel1 - defaultNfsMountOptions = "-o nfsvers=3" - defaultSecurityStyle = "unix" - defaultSnapshotDir = "false" - defaultSnapshotReserve = "" - defaultUnixPermissions = "0777" - defaultStorageClass = api.StorageClassHardware - defaultLimitVolumeSize = "" - defaultExportRule = "0.0.0.0/0" - defaultNetwork = "default" - - // Constants for internal pool attributes - Size = "size" - ServiceLevel = "serviceLevel" - SnapshotDir = "snapshotDir" - SnapshotReserve = "snapshotReserve" - ExportRule = "exportRule" - Network = "network" - Region = "region" - Zone = "zone" - StorageClass = "storageClass" - UnixPermissions = "unixPermissions" - StoragePools = "storagePools" - - // Topology label names - topologyZoneLabel = drivers.TopologyLabelPrefix + "/" + Zone - topologyRegionLabel = drivers.TopologyLabelPrefix + "/" + Region + defaultHWServiceLevel = api.UserServiceLevel1 + defaultSWServiceLevel = api.PoolServiceLevel1 + defaultCVSNetwork = "default" // discovery debug log constant discovery = "discovery" @@ -84,11 +58,6 @@ type NFSStorageDriver struct { volumeCreateTimeout time.Duration } -type Telemetry struct { - tridentconfig.Telemetry - Plugin string `json:"plugin"` -} - func (d *NFSStorageDriver) GetConfig() *drivers.GCPNFSStorageDriverConfig { return &d.Config } @@ -306,7 +275,7 @@ func (d *NFSStorageDriver) populateConfigurationDefaults( } if config.Network == "" { - config.Network = defaultNetwork + config.Network = defaultCVSNetwork } // VolumeCreateTimeoutSeconds is the timeout value in seconds. diff --git a/storage_drivers/gcp/gcp_gcnv.go b/storage_drivers/gcp/gcp_gcnv.go new file mode 100644 index 000000000..04993aaa0 --- /dev/null +++ b/storage_drivers/gcp/gcp_gcnv.go @@ -0,0 +1,2206 @@ +// Copyright 2024 NetApp, Inc. All Rights Reserved. + +package gcp + +import ( + "context" + "encoding/json" + "fmt" + "net" + "reflect" + "regexp" + "strconv" + "strings" + "time" + "unicode" + + "github.com/RoaringBitmap/roaring" + "github.com/google/uuid" + "go.uber.org/multierr" + + tridentconfig "github.com/netapp/trident/config" + . "github.com/netapp/trident/logging" + "github.com/netapp/trident/storage" + sa "github.com/netapp/trident/storage_attribute" + drivers "github.com/netapp/trident/storage_drivers" + "github.com/netapp/trident/storage_drivers/gcp/gcnvapi" + "github.com/netapp/trident/utils" + "github.com/netapp/trident/utils/errors" +) + +const ( + MinimumGCNVVolumeSizeBytesSW = uint64(1073741824) // 1 GiB + MinimumGCNVVolumeSizeBytesHW = uint64(107374182400) // 100 GiB + + // Constants for internal pool attributes + + CapacityPools = "capacityPools" + + nfsVersion3 = "3" + nfsVersion4 = "4" + nfsVersion41 = "4.1" +) + +var ( + supportedNFSVersions = []string{nfsVersion3, nfsVersion4, nfsVersion41} + storagePrefixRegex = regexp.MustCompile(`^$|^[a-zA-Z][a-zA-Z-]*$`) + volumeNameRegex = regexp.MustCompile(`^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`) + volumeCreationTokenRegex = regexp.MustCompile(`^[a-z]([a-z0-9-]{0,78}[a-z0-9])?$`) + gcpLabelRegex = regexp.MustCompile(`[^-_a-z0-9\p{L}]`) + csiRegex = regexp.MustCompile(`^pvc-[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}$`) + nfsMountPathRegex = regexp.MustCompile(`^(?P.+):/(?P.+)$`) +) + +// NASStorageDriver is for storage provisioning using the Google Cloud NetApp Volumes service. +type NASStorageDriver struct { + initialized bool + Config drivers.GCNVNASStorageDriverConfig + API gcnvapi.GCNV + telemetry *Telemetry + pools map[string]storage.Pool + volumeCreateTimeout time.Duration +} + +// Name returns the name of this driver. +func (d *NASStorageDriver) Name() string { + return tridentconfig.GCNVNASStorageDriverName +} + +// defaultBackendName returns the default name of the backend managed by this driver instance. +func (d *NASStorageDriver) defaultBackendName() string { + return fmt.Sprintf("%s_%s", strings.Replace(d.Name(), "-", "", -1), d.Config.APIKey.PrivateKeyID[0:5]) +} + +// BackendName returns the name of the backend managed by this driver instance. +func (d *NASStorageDriver) BackendName() string { + if d.Config.BackendName != "" { + return d.Config.BackendName + } else { + // Use the old naming scheme if no name is specified + return d.defaultBackendName() + } +} + +// poolName constructs the name of the pool reported by this driver instance. +func (d *NASStorageDriver) poolName(name string) string { + return fmt.Sprintf("%s_%s", d.BackendName(), strings.Replace(name, "-", "", -1)) +} + +// validateVolumeName checks that the name of a new volume matches the requirements of a GCNV volume name. +func (d *NASStorageDriver) validateVolumeName(name string) error { + if !volumeNameRegex.MatchString(name) { + return fmt.Errorf("volume name '%s' is not allowed; it must be 1-63 characters long, "+ + "begin with a letter, not end with a hyphen, and contain only letters, digits, and hyphens", name) + } + return nil +} + +// validateCreationToken checks that the creation token of a new volume matches the requirements of a creation token. +func (d *NASStorageDriver) validateCreationToken(name string) error { + if !volumeCreationTokenRegex.MatchString(name) { + return fmt.Errorf("volume internal name '%s' is not allowed; it must be 1-80 characters long, "+ + "begin with a letter, not end with a hyphen, and contain only letters, digits, and hyphens", name) + } + return nil +} + +// defaultCreateTimeout sets the driver timeout for volume create/delete operations. Docker gets more time, since +// it doesn't have a mechanism to retry. +func (d *NASStorageDriver) defaultCreateTimeout() time.Duration { + switch d.Config.DriverContext { + case tridentconfig.ContextDocker: + return tridentconfig.DockerCreateTimeout + default: + return gcnvapi.VolumeCreateTimeout + } +} + +// defaultTimeout controls the driver timeout for most workflows. +func (d *NASStorageDriver) defaultTimeout() time.Duration { + switch d.Config.DriverContext { + case tridentconfig.ContextDocker: + return tridentconfig.DockerDefaultTimeout + default: + return gcnvapi.DefaultTimeout + } +} + +// Initialize initializes this driver from the provided config. +func (d *NASStorageDriver) Initialize( + ctx context.Context, context tridentconfig.DriverContext, configJSON string, + commonConfig *drivers.CommonStorageDriverConfig, backendSecret map[string]string, backendUUID string, +) error { + fields := LogFields{"Method": "Initialize", "Type": "NASStorageDriver"} + Logd(ctx, commonConfig.StorageDriverName, commonConfig.DebugTraceFlags["method"]).WithFields(fields). + Trace(">>>> Initialize") + defer Logd(ctx, commonConfig.StorageDriverName, commonConfig.DebugTraceFlags["method"]).WithFields(fields). + Trace("<<<< Initialize") + + commonConfig.DriverContext = context + + // Initialize the driver's CommonStorageDriverConfig + d.Config.CommonStorageDriverConfig = commonConfig + + // Parse the config + config, err := d.initializeGCNVConfig(ctx, configJSON, commonConfig, backendSecret) + if err != nil { + return fmt.Errorf("error initializing %s driver. %v", d.Name(), err) + } + d.Config = *config + + if err = d.populateConfigurationDefaults(ctx, &d.Config); err != nil { + return fmt.Errorf("could not populate configuration defaults: %v", err) + } + + d.initializeStoragePools(ctx) + d.initializeTelemetry(ctx, backendUUID) + + // Unit tests mock the API layer, so we only use the real API interface if it doesn't already exist. + if d.API == nil { + if d.API, err = d.initializeGCNVAPIClient(ctx, &d.Config); err != nil { + return fmt.Errorf("error validating %s GCNV API. %v", d.Name(), err) + } + } + + if err = d.validate(ctx); err != nil { + return fmt.Errorf("error validating %s driver. %v", d.Name(), err) + } + + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + + Logc(ctx).WithFields(LogFields{ + "StoragePrefix": *config.StoragePrefix, + "Size": config.Size, + "ServiceLevel": config.ServiceLevel, + "NfsMountOptions": config.NFSMountOptions, + "LimitVolumeSize": config.LimitVolumeSize, + "ExportRule": config.ExportRule, + "VolumeCreateTimeoutSeconds": config.VolumeCreateTimeout, + }) + + d.initialized = true + return nil +} + +// Initialized returns whether this driver has been initialized (and not terminated). +func (d *NASStorageDriver) Initialized() bool { + return d.initialized +} + +// Terminate stops the driver prior to its being unloaded. +func (d *NASStorageDriver) Terminate(ctx context.Context, _ string) { + fields := LogFields{"Method": "Terminate", "Type": "NASStorageDriver"} + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Terminate") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Terminate") + + d.initialized = false +} + +// populateConfigurationDefaults fills in default values for configuration settings if not supplied in the config file +func (d *NASStorageDriver) populateConfigurationDefaults( + ctx context.Context, config *drivers.GCNVNASStorageDriverConfig, +) error { + fields := LogFields{"Method": "populateConfigurationDefaults", "Type": "NASStorageDriver"} + Logd(ctx, config.StorageDriverName, config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> populateConfigurationDefaults") + defer Logd(ctx, config.StorageDriverName, config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< populateConfigurationDefaults") + + if config.StoragePrefix == nil { + defaultPrefix := drivers.GetDefaultStoragePrefix(config.DriverContext) + defaultPrefix = strings.Replace(defaultPrefix, "_", "-", -1) + config.StoragePrefix = &defaultPrefix + } + + if config.Size == "" { + config.Size = drivers.DefaultVolumeSize + } + + if config.ServiceLevel == "" { + config.ServiceLevel = gcnvapi.ServiceLevelStandard + } + + if config.StorageClass == "" { + config.StorageClass = defaultStorageClass + } + + if config.NFSMountOptions == "" { + config.NFSMountOptions = defaultNfsMountOptions + } + + if config.SnapshotDir != "" { + // Set the snapshotDir provided in the config + snapDirFormatted, err := utils.GetFormattedBool(config.SnapshotDir) + if err != nil { + Logc(ctx).WithError(err).Errorf("Invalid boolean value for snapshotDir: %v.", config.SnapshotDir) + return fmt.Errorf("invalid boolean value for snapshotDir: %v", err) + } + config.SnapshotDir = snapDirFormatted + } + + if config.SnapshotReserve == "" { + config.SnapshotReserve = defaultSnapshotReserve + } + + if config.UnixPermissions == "" { + config.UnixPermissions = defaultUnixPermissions + } + + if config.LimitVolumeSize == "" { + config.LimitVolumeSize = defaultLimitVolumeSize + } + + if config.ExportRule == "" { + config.ExportRule = defaultExportRule + } + + if config.NASType == "" { + config.NASType = sa.NFS + } + + // VolumeCreateTimeoutSeconds is the timeout value in seconds. + volumeCreateTimeout := d.defaultCreateTimeout() + if config.VolumeCreateTimeout != "" { + i, err := strconv.ParseUint(d.Config.VolumeCreateTimeout, 10, 64) + if err != nil { + Logc(ctx).WithField("interval", d.Config.VolumeCreateTimeout).Errorf( + "Invalid volume create timeout period. %v", err) + return err + } + volumeCreateTimeout = time.Duration(i) * time.Second + } + d.volumeCreateTimeout = volumeCreateTimeout + + Logc(ctx).WithFields(LogFields{ + "StoragePrefix": *config.StoragePrefix, + "Size": config.Size, + "ServiceLevel": config.ServiceLevel, + "NFSMountOptions": config.NFSMountOptions, + "SnapshotDir": config.SnapshotDir, + "SnapshotReserve": config.SnapshotReserve, + "LimitVolumeSize": config.LimitVolumeSize, + "ExportRule": config.ExportRule, + "NetworkName": config.Network, + "VolumeCreateTimeoutSeconds": config.VolumeCreateTimeout, + }).Debugf("Configuration defaults") + + return nil +} + +// initializeStoragePools defines the pools reported to Trident, whether physical or virtual. +func (d *NASStorageDriver) initializeStoragePools(ctx context.Context) { + d.pools = make(map[string]storage.Pool) + + if len(d.Config.Storage) == 0 { + + Logc(ctx).Debug("No vpools defined, reporting single pool.") + + // No vpools defined, so report region/zone as a single pool + pool := storage.NewStoragePool(nil, d.poolName("pool")) + + pool.Attributes()[sa.BackendType] = sa.NewStringOffer(d.Name()) + pool.Attributes()[sa.Snapshots] = sa.NewBoolOffer(true) + pool.Attributes()[sa.Clones] = sa.NewBoolOffer(true) + pool.Attributes()[sa.Encryption] = sa.NewBoolOffer(true) + pool.Attributes()[sa.Replication] = sa.NewBoolOffer(false) + pool.Attributes()[sa.Labels] = sa.NewLabelOffer(d.Config.Labels) + pool.Attributes()[sa.NASType] = sa.NewStringOffer(d.Config.NASType) + + if d.Config.Region != "" { + pool.Attributes()[sa.Region] = sa.NewStringOffer(d.Config.Region) + } + if d.Config.Zone != "" { + pool.Attributes()[sa.Zone] = sa.NewStringOffer(d.Config.Zone) + } + + pool.InternalAttributes()[Size] = d.Config.Size + pool.InternalAttributes()[UnixPermissions] = d.Config.UnixPermissions + pool.InternalAttributes()[ServiceLevel] = utils.Title(d.Config.ServiceLevel) + pool.InternalAttributes()[SnapshotDir] = d.Config.SnapshotDir + pool.InternalAttributes()[SnapshotReserve] = d.Config.SnapshotReserve + pool.InternalAttributes()[ExportRule] = d.Config.ExportRule + pool.InternalAttributes()[Network] = d.Config.Network + pool.InternalAttributes()[CapacityPools] = strings.Join(d.Config.StoragePools, ",") + + pool.SetSupportedTopologies(d.Config.SupportedTopologies) + + d.pools[pool.Name()] = pool + } else { + + Logc(ctx).Debug("One or more vpools defined.") + + // Report a pool for each virtual pool in the config + for index, vpool := range d.Config.Storage { + + region := d.Config.Region + if vpool.Region != "" { + region = vpool.Region + } + + zone := d.Config.Zone + if vpool.Zone != "" { + zone = vpool.Zone + } + + size := d.Config.Size + if vpool.Size != "" { + size = vpool.Size + } + + unixPermissions := d.Config.UnixPermissions + if vpool.UnixPermissions != "" { + unixPermissions = vpool.UnixPermissions + } + + supportedTopologies := d.Config.SupportedTopologies + if vpool.SupportedTopologies != nil { + supportedTopologies = vpool.SupportedTopologies + } + + capacityPools := d.Config.StoragePools + if vpool.StoragePools != nil { + capacityPools = vpool.StoragePools + } + + serviceLevel := d.Config.ServiceLevel + if vpool.ServiceLevel != "" { + serviceLevel = vpool.ServiceLevel + } + + snapshotDir := d.Config.SnapshotDir + if vpool.SnapshotDir != "" { + snapshotDir = vpool.SnapshotDir + } + + snapshotReserve := d.Config.SnapshotReserve + if vpool.SnapshotReserve != "" { + snapshotReserve = vpool.SnapshotReserve + } + + exportRule := d.Config.ExportRule + if vpool.ExportRule != "" { + exportRule = vpool.ExportRule + } + + network := d.Config.Network + if vpool.Network != "" { + network = vpool.Network + } + + pool := storage.NewStoragePool(nil, d.poolName(fmt.Sprintf("pool_%d", index))) + + pool.Attributes()[sa.BackendType] = sa.NewStringOffer(d.Name()) + pool.Attributes()[sa.Snapshots] = sa.NewBoolOffer(true) + pool.Attributes()[sa.Clones] = sa.NewBoolOffer(true) + pool.Attributes()[sa.Encryption] = sa.NewBoolOffer(true) + pool.Attributes()[sa.Replication] = sa.NewBoolOffer(false) + pool.Attributes()[sa.Labels] = sa.NewLabelOffer(d.Config.Labels, vpool.Labels) + pool.Attributes()[sa.NASType] = sa.NewStringOffer(d.Config.NASType) + + if region != "" { + pool.Attributes()[sa.Region] = sa.NewStringOffer(region) + } + if zone != "" { + pool.Attributes()[sa.Zone] = sa.NewStringOffer(zone) + } + + pool.InternalAttributes()[Size] = size + pool.InternalAttributes()[UnixPermissions] = unixPermissions + pool.InternalAttributes()[ServiceLevel] = utils.Title(serviceLevel) + pool.InternalAttributes()[SnapshotDir] = snapshotDir + pool.InternalAttributes()[SnapshotReserve] = snapshotReserve + pool.InternalAttributes()[ExportRule] = exportRule + pool.InternalAttributes()[Network] = network + pool.InternalAttributes()[CapacityPools] = strings.Join(capacityPools, ",") + + pool.SetSupportedTopologies(supportedTopologies) + + d.pools[pool.Name()] = pool + } + } + + return +} + +// initializeTelemetry assembles all the telemetry data to be used as volume labels. +func (d *NASStorageDriver) initializeTelemetry(_ context.Context, backendUUID string) { + telemetry := tridentconfig.OrchestratorTelemetry + telemetry.TridentBackendUUID = backendUUID + d.telemetry = &Telemetry{ + Telemetry: telemetry, + Plugin: d.Name(), + } +} + +// initializeGCNVConfig parses the GCNV config, mixing in the specified common config. +func (d *NASStorageDriver) initializeGCNVConfig( + ctx context.Context, configJSON string, commonConfig *drivers.CommonStorageDriverConfig, + backendSecret map[string]string, +) (*drivers.GCNVNASStorageDriverConfig, error) { + fields := LogFields{"Method": "initializeGCNVConfig", "Type": "NASStorageDriver"} + Logd(ctx, commonConfig.StorageDriverName, commonConfig.DebugTraceFlags["method"]).WithFields(fields).Trace( + ">>>> initializeGCNVConfig") + defer Logd(ctx, commonConfig.StorageDriverName, commonConfig.DebugTraceFlags["method"]).WithFields(fields).Trace( + "<<<< initializeGCNVConfig") + + config := &drivers.GCNVNASStorageDriverConfig{} + config.CommonStorageDriverConfig = commonConfig + + // decode configJSON into GCNVNASStorageDriverConfig object + err := json.Unmarshal([]byte(configJSON), &config) + if err != nil { + return nil, fmt.Errorf("could not decode JSON configuration. %v", err) + } + + // Inject secret if not empty + if len(backendSecret) != 0 { + err = config.InjectSecrets(backendSecret) + if err != nil { + return nil, fmt.Errorf("could not inject backend secret; err: %v", err) + } + } + + return config, nil +} + +// initializeGCNVAPIClient returns a GCNV API client. +func (d *NASStorageDriver) initializeGCNVAPIClient( + ctx context.Context, config *drivers.GCNVNASStorageDriverConfig, +) (gcnvapi.GCNV, error) { + fields := LogFields{"Method": "initializeGCNVAPIClient", "Type": "NASStorageDriver"} + Logd(ctx, config.StorageDriverName, config.DebugTraceFlags["method"]).WithFields(fields).Trace( + ">>>> initializeGCNVAPIClient") + defer Logd(ctx, config.StorageDriverName, config.DebugTraceFlags["method"]).WithFields(fields).Trace( + "<<<< initializeGCNVAPIClient") + + sdkTimeout := gcnvapi.DefaultSDKTimeout + if config.SDKTimeout != "" { + if i, parseErr := strconv.ParseUint(d.Config.SDKTimeout, 10, 64); parseErr != nil { + Logc(ctx).WithField("interval", d.Config.SDKTimeout).WithError(parseErr).Error( + "Invalid value for SDK timeout.") + return nil, parseErr + } else { + sdkTimeout = time.Duration(i) * time.Second + } + } + + maxCacheAge := gcnvapi.DefaultMaxCacheAge + if config.MaxCacheAge != "" { + if i, parseErr := strconv.ParseUint(d.Config.MaxCacheAge, 10, 64); parseErr != nil { + Logc(ctx).WithField("interval", d.Config.MaxCacheAge).WithError(parseErr).Error( + "Invalid value for max cache age.") + return nil, parseErr + } else { + maxCacheAge = time.Duration(i) * time.Second + } + } + + gcnv, err := gcnvapi.NewDriver(ctx, &gcnvapi.ClientConfig{ + ProjectNumber: config.ProjectNumber, + Location: config.Location, + APIKey: &config.APIKey, + DebugTraceFlags: config.DebugTraceFlags, + SDKTimeout: sdkTimeout, + MaxCacheAge: maxCacheAge, + }) + if err != nil { + return nil, err + } + + // The storage pools should already be set up by this point. We register the pools with the + // API layer to enable matching of storage pools with discovered GCNV resources. + if err = gcnv.Init(ctx, d.pools); err != nil { + return nil, err + } + + return gcnv, nil +} + +// validate ensures the driver configuration and execution environment are valid and working. +func (d *NASStorageDriver) validate(ctx context.Context) error { + fields := LogFields{"Method": "validate", "Type": "NASStorageDriver"} + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> validate") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< validate") + + // Ensure storage prefix is compatible with cloud service + if err := validateGCNVStoragePrefix(*d.Config.StoragePrefix); err != nil { + return err + } + + // Validate pool-level attributes + for poolName, pool := range d.pools { + + // Validate service level (it is allowed to be blank) + serviceLevel := pool.InternalAttributes()[ServiceLevel] + switch serviceLevel { + case gcnvapi.ServiceLevelFlex, gcnvapi.ServiceLevelStandard, + gcnvapi.ServiceLevelPremium, gcnvapi.ServiceLevelExtreme, "": + break + default: + return fmt.Errorf("invalid service level in pool %s: %s", + poolName, pool.InternalAttributes()[ServiceLevel]) + } + + // Validate export rules + for _, rule := range strings.Split(pool.InternalAttributes()[ExportRule], ",") { + ipAddr := net.ParseIP(rule) + _, netAddr, _ := net.ParseCIDR(rule) + if ipAddr == nil && netAddr == nil { + return fmt.Errorf("invalid address/CIDR for exportRule in pool %s: %s", poolName, rule) + } + } + + // Validate snapshot dir + if pool.InternalAttributes()[SnapshotDir] != "" { + if _, err := strconv.ParseBool(pool.InternalAttributes()[SnapshotDir]); err != nil { + return fmt.Errorf("invalid boolean value for snapshotDir in pool %s; %v", poolName, err) + } + } + + // Validate snapshot reserve + if pool.InternalAttributes()[SnapshotReserve] != "" { + snapshotReserve, err := strconv.ParseInt(pool.InternalAttributes()[SnapshotReserve], 10, 0) + if err != nil { + return fmt.Errorf("invalid value for snapshotReserve in pool %s: %v", poolName, err) + } + if snapshotReserve < 0 || snapshotReserve > 90 { + return fmt.Errorf("invalid value for snapshotReserve in pool %s: %s", + poolName, pool.InternalAttributes()[SnapshotReserve]) + } + } + + // Validate unix permissions + if pool.InternalAttributes()[UnixPermissions] != "" { + if err := utils.ValidateOctalUnixPermissions(pool.InternalAttributes()[UnixPermissions]); err != nil { + return fmt.Errorf("invalid value for unixPermissions in pool %s; %v", poolName, err) + } + } + + // Validate default size + if _, err := utils.ConvertSizeToBytes(pool.InternalAttributes()[Size]); err != nil { + return fmt.Errorf("invalid value for default volume size in pool %s; %v", poolName, err) + } + } + + return nil +} + +// Create creates a new volume. +func (d *NASStorageDriver) Create( + ctx context.Context, volConfig *storage.VolumeConfig, storagePool storage.Pool, volAttributes map[string]sa.Request, +) error { + name := volConfig.InternalName + + fields := LogFields{ + "Method": "Create", + "Type": "NASStorageDriver", + "name": name, + "attrs": volAttributes, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Create") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Create") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // Make sure we got a valid name + if err := d.validateVolumeName(volConfig.Name); err != nil { + return err + } + + // Make sure we got a valid creation token + if err := d.validateCreationToken(name); err != nil { + return err + } + + // Get the pool since most default values are pool-specific + if storagePool == nil { + return errors.New("pool not specified") + } + pool, ok := d.pools[storagePool.Name()] + if !ok { + return fmt.Errorf("pool %s does not exist", storagePool.Name()) + } + + // If the volume already exists, bail out + volumeExists, extantVolume, err := d.API.VolumeExists(ctx, volConfig) + if err != nil { + return fmt.Errorf("error checking for existing volume %s; %v", name, err) + } + if volumeExists { + if extantVolume.State == gcnvapi.VolumeStateCreating { + // This is a retry and the volume still isn't ready, so no need to wait further. + return errors.VolumeCreatingError( + fmt.Sprintf("volume state is still %s, not %s", gcnvapi.VolumeStateCreating, gcnvapi.VolumeStateReady)) + } + + Logc(ctx).WithFields(LogFields{ + "name": name, + "state": extantVolume.State, + }).Warning("Volume already exists.") + + return drivers.NewVolumeExistsError(name) + } + + // Take service level from volume config first (handles Docker case), then from pool. + // Service level should not be empty at this point due to application of config defaults. + // The service level is needed to select the minimum allowable volume size. + serviceLevel := utils.Title(volConfig.ServiceLevel) + if serviceLevel == "" { + serviceLevel = pool.InternalAttributes()[ServiceLevel] + } + + minimumGCNVVolumeSizeBytes := MinimumGCNVVolumeSizeBytesHW + if serviceLevel == gcnvapi.ServiceLevelFlex { + minimumGCNVVolumeSizeBytes = MinimumGCNVVolumeSizeBytesSW + } + + // Take snapshot reserve from volume config first (handles Docker case), then from pool + snapshotReserve := volConfig.SnapshotReserve + if snapshotReserve == "" { + snapshotReserve = pool.InternalAttributes()[SnapshotReserve] + } + var snapshotReservePtr *int64 + var snapshotReserveInt int64 + if snapshotReserve != "" { + snapshotReserveInt, err = strconv.ParseInt(snapshotReserve, 10, 64) + if err != nil { + return fmt.Errorf("invalid value for snapshotReserve: %v", err) + } + snapshotReservePtr = &snapshotReserveInt + } + + // Determine volume size in bytes + requestedSize, err := utils.ConvertSizeToBytes(volConfig.Size) + if err != nil { + return fmt.Errorf("could not convert volume size %s; %v", volConfig.Size, err) + } + sizeBytes, err := strconv.ParseUint(requestedSize, 10, 64) + if err != nil { + return fmt.Errorf("%v is an invalid volume size; %v", volConfig.Size, err) + } + if sizeBytes == 0 { + defaultSize, _ := utils.ConvertSizeToBytes(pool.InternalAttributes()[Size]) + sizeBytes, _ = strconv.ParseUint(defaultSize, 10, 64) + } + if err = drivers.CheckMinVolumeSize(sizeBytes, MinimumVolumeSizeBytes); err != nil { + return err + } + + if sizeBytes < minimumGCNVVolumeSizeBytes { + + Logc(ctx).WithFields(LogFields{ + "name": name, + "requestedSize": sizeBytes, + "minimumSize": minimumGCNVVolumeSizeBytes, + }).Warningf("Requested size is too small. Setting volume size to the minimum allowable.") + + sizeBytes = minimumGCNVVolumeSizeBytes + } + + // Get the volume size based on the snapshot reserve + sizeWithReserveBytes := drivers.CalculateVolumeSizeBytes(ctx, name, sizeBytes, int(snapshotReserveInt)) + + _, _, err = drivers.CheckVolumeSizeLimits(ctx, sizeWithReserveBytes, d.Config.CommonStorageDriverConfig) + if err != nil { + return err + } + + // Determine mount options (volume config wins, followed by backend config) + mountOptions := d.Config.NFSMountOptions + if volConfig.MountOptions != "" { + mountOptions = volConfig.MountOptions + } + + // Take unix permissions from volume config first (handles Docker case & PVC annotations), then from pool + unixPermissions := volConfig.UnixPermissions + if unixPermissions == "" { + unixPermissions = pool.InternalAttributes()[UnixPermissions] + } + + // Determine protocol from mount options + var protocolTypes []string + var smbAccess, nfsV3Access, nfsV41Access bool + var apiExportRule gcnvapi.ExportRule + var exportPolicy *gcnvapi.ExportPolicy + var nfsVersion string + + if d.Config.NASType == sa.SMB { + protocolTypes = []string{gcnvapi.ProtocolTypeSMB} + } else { + nfsVersion, err = utils.GetNFSVersionFromMountOptions(mountOptions, "", supportedNFSVersions) + if err != nil { + return err + } + switch nfsVersion { + case nfsVersion3: + nfsV3Access = true + protocolTypes = []string{gcnvapi.ProtocolTypeNFSv3} + case nfsVersion4: + fallthrough + case nfsVersion41: + nfsV41Access = true + protocolTypes = []string{gcnvapi.ProtocolTypeNFSv41} + case "": + nfsV3Access = true + nfsV41Access = true + protocolTypes = []string{gcnvapi.ProtocolTypeNFSv3, gcnvapi.ProtocolTypeNFSv41} + } + + apiExportRule = gcnvapi.ExportRule{ + AllowedClients: pool.InternalAttributes()[ExportRule], + SMB: smbAccess, + Nfsv3: nfsV3Access, + Nfsv4: nfsV41Access, + RuleIndex: 1, + AccessType: gcnvapi.AccessTypeReadWrite, + } + + exportPolicy = &gcnvapi.ExportPolicy{ + Rules: []gcnvapi.ExportRule{apiExportRule}, + } + } + + // Set snapshot directory from volume config first (handles Docker case), then from pool + // If none is set, set it based on mountOption by default; for nfsv3 => false, nfsv4/4.1 => true + snapshotDir := volConfig.SnapshotDir + if snapshotDir == "" { + snapshotDir = pool.InternalAttributes()[SnapshotDir] + // If snapshot directory is not set at pool level, then set default value based on mount option + if snapshotDir == "" { + if strings.HasPrefix(nfsVersion, "3") { + snapshotDir = "false" + } else { + snapshotDir = "true" + } + } + } + snapshotDirBool, err := strconv.ParseBool(snapshotDir) + if err != nil { + return fmt.Errorf("invalid value for snapshotDir; %v", err) + } + + labels := d.getTelemetryLabels(ctx) + for k, v := range pool.GetLabels(ctx, "") { + if key, keyOK := d.fixGCPLabelKey(k); keyOK { + labels[key] = d.fixGCPLabelValue(v) + } + if len(labels) > gcnvapi.MaxLabelCount { + break + } + } + + // Update config to reflect values used to create volume + volConfig.Size = strconv.FormatUint(sizeBytes, 10) // requested size, not including reserve + volConfig.ServiceLevel = serviceLevel + volConfig.SnapshotDir = snapshotDir + volConfig.SnapshotReserve = snapshotReserve + volConfig.UnixPermissions = unixPermissions + + // Find matching capacity pools + cPools := d.API.CapacityPoolsForStoragePool(ctx, pool, serviceLevel) + if len(cPools) == 0 { + return fmt.Errorf("no GCNV storage pools found for Trident pool %s", pool.Name()) + } + + createErrors := multierr.Combine() + + // Try each capacity pool until one works + for _, cPool := range cPools { + + if d.Config.NASType == sa.SMB { + Logc(ctx).WithFields(LogFields{ + "capacityPool": cPool.Name, + "creationToken": name, + "size": sizeWithReserveBytes, + "serviceLevel": serviceLevel, + "snapshotDir": snapshotDirBool, + "snapshotReserve": snapshotReserve, + "protocolTypes": protocolTypes, + }).Debug("Creating volume.") + } else { + Logc(ctx).WithFields(LogFields{ + "capacityPool": cPool.Name, + "creationToken": name, + "size": sizeWithReserveBytes, + "unixPermissions": unixPermissions, + "serviceLevel": serviceLevel, + "snapshotDir": snapshotDirBool, + "snapshotReserve": snapshotReserve, + "protocolTypes": protocolTypes, + "exportPolicy": fmt.Sprintf("%+v", exportPolicy), + }).Debug("Creating volume.") + } + + createRequest := &gcnvapi.VolumeCreateRequest{ + Name: volConfig.Name, + CreationToken: name, + CapacityPool: cPool.Name, + SizeBytes: int64(sizeWithReserveBytes), + ExportPolicy: exportPolicy, + ProtocolTypes: protocolTypes, + UnixPermissions: unixPermissions, + Labels: labels, + SnapshotReserve: snapshotReservePtr, + SnapshotDirectory: snapshotDirBool, + SecurityStyle: gcnvapi.SecurityStyleUnix, + } + + // Add unix permissions and export policy fields only to NFS volume + if d.Config.NASType == sa.NFS { + createRequest.UnixPermissions = unixPermissions + createRequest.ExportPolicy = exportPolicy + } + + // Create the volume + volume, createErr := d.API.CreateVolume(ctx, createRequest) + if createErr != nil { + errMessage := fmt.Sprintf("GCNV pool %s; error creating volume %s: %v", cPool.Name, name, createErr) + Logc(ctx).Error(errMessage) + createErrors = multierr.Combine(createErrors, fmt.Errorf(errMessage)) + continue + } + + // Always save the full GCP ID + volConfig.InternalID = volume.FullName + + // Wait for creation to complete so that the mount targets are available + return d.waitForVolumeCreate(ctx, volume) + } + + return createErrors +} + +// CreateClone clones an existing volume. If a snapshot is not specified, one is created. +func (d *NASStorageDriver) CreateClone( + ctx context.Context, sourceVolConfig, cloneVolConfig *storage.VolumeConfig, storagePool storage.Pool, +) error { + name := cloneVolConfig.InternalName + source := cloneVolConfig.CloneSourceVolumeInternal + snapshot := cloneVolConfig.CloneSourceSnapshotInternal + + fields := LogFields{ + "Method": "CreateClone", + "Type": "NASStorageDriver", + "name": name, + "source": source, + "snapshot": snapshot, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> CreateClone") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< CreateClone") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // ensure new volume doesn't exist, fail if so + // get source volume, fail if nonexistent or if wrong region + // if snapshot specified, read snapshots from source, fail if nonexistent + // if no snap specified, create one, fail if error + // create volume from snapshot + + // Make sure we got a valid name + if err := d.validateVolumeName(cloneVolConfig.Name); err != nil { + return err + } + + // Make sure we got a valid creation token + if err := d.validateCreationToken(name); err != nil { + return err + } + + // Get the source volume + sourceVolume, err := d.API.Volume(ctx, sourceVolConfig) + if err != nil { + return fmt.Errorf("could not find source volume; %v", err) + } + + // If the volume already exists, bail out + volumeExists, extantVolume, err := d.API.VolumeExists(ctx, cloneVolConfig) + if err != nil { + return fmt.Errorf("error checking for existing volume %s; %v", name, err) + } + if volumeExists { + if extantVolume.State == gcnvapi.VolumeStateCreating { + // This is a retry and the volume still isn't ready, so no need to wait further. + return errors.VolumeCreatingError( + fmt.Sprintf("volume state is still %s, not %s", gcnvapi.VolumeStateCreating, gcnvapi.VolumeStateReady)) + } + return drivers.NewVolumeExistsError(name) + } + + var sourceSnapshot *gcnvapi.Snapshot + + if snapshot != "" { + + // Get the source snapshot + sourceSnapshot, err = d.API.SnapshotForVolume(ctx, sourceVolume, snapshot) + if err != nil { + return fmt.Errorf("could not find source snapshot; %v", err) + } + + // Ensure snapshot is in a usable state + if sourceSnapshot.State != gcnvapi.SnapshotStateReady { + return fmt.Errorf("source snapshot state is %s, it must be %s", + sourceSnapshot.State, gcnvapi.SnapshotStateReady) + } + + Logc(ctx).WithFields(LogFields{ + "snapshot": snapshot, + "source": sourceVolume.Name, + }).Debug("Found source snapshot.") + + } else { + + // No source snapshot specified, so create one + snapName := "snap-" + strings.ToLower(time.Now().UTC().Format(storage.SnapshotNameFormat)) + + Logc(ctx).WithFields(LogFields{ + "snapshot": snapName, + "source": sourceVolume.Name, + }).Debug("Creating source snapshot.") + + sourceSnapshot, err = d.API.CreateSnapshot(ctx, sourceVolume, snapName) + if err != nil { + return fmt.Errorf("could not create source snapshot; %v", err) + } + + // Wait for snapshot creation to complete + err = d.API.WaitForSnapshotState(ctx, sourceSnapshot, sourceVolume, gcnvapi.SnapshotStateReady, + []string{gcnvapi.SnapshotStateError}, gcnvapi.SnapshotTimeout) + if err != nil { + return err + } + + // Re-fetch the snapshot to populate the properties after create has completed + sourceSnapshot, err = d.API.SnapshotForVolume(ctx, sourceVolume, snapName) + if err != nil { + return fmt.Errorf("could not retrieve newly-created snapshot") + } + + Logc(ctx).WithFields(LogFields{ + "snapshot": sourceSnapshot.Name, + "source": sourceVolume.Name, + }).Debug("Created source snapshot.") + } + + // If RO clone is requested, don't create the volume on GCNV backend and return nil + if cloneVolConfig.ReadOnlyClone { + // Return error if snapshot directory is not enabled for RO clone + if !sourceVolume.SnapshotDirectory { + return fmt.Errorf("snapshot directory access is set to %t and readOnly clone is set to %t ", + sourceVolume.SnapshotDirectory, cloneVolConfig.ReadOnlyClone) + } + return nil + } + + var labels map[string]string + labels = d.updateTelemetryLabels(ctx, sourceVolume) + + if storage.IsStoragePoolUnset(storagePool) { + // Set the base label + storagePoolTemp := &storage.StoragePool{} + storagePoolTemp.SetAttributes(map[string]sa.Offer{ + sa.Labels: sa.NewLabelOffer(d.Config.Labels), + }) + for k, v := range storagePoolTemp.GetLabels(ctx, "") { + if key, keyOK := d.fixGCPLabelKey(k); keyOK { + labels[key] = d.fixGCPLabelValue(v) + } + if len(labels) > gcnvapi.MaxLabelCount { + break + } + } + } + + Logc(ctx).WithFields(LogFields{ + "creationToken": name, + "sourceVolume": sourceVolume.CreationToken, + "sourceSnapshot": sourceSnapshot.Name, + "unixPermissions": sourceVolume.UnixPermissions, + "snapshotReserve": sourceVolume.SnapshotReserve, + }).Debug("Cloning volume.") + + createRequest := &gcnvapi.VolumeCreateRequest{ + Name: cloneVolConfig.Name, + CreationToken: name, + CapacityPool: sourceVolume.CapacityPool, + SizeBytes: sourceVolume.SizeBytes, + ProtocolTypes: sourceVolume.ProtocolTypes, + Labels: labels, + SnapshotReserve: utils.Ptr(sourceVolume.SnapshotReserve), + SnapshotDirectory: sourceVolume.SnapshotDirectory, + SecurityStyle: sourceVolume.SecurityStyle, + SnapshotID: sourceSnapshot.FullName, + } + + // Add unix permissions and export policy fields only to NFS volume + if d.Config.NASType == sa.NFS { + createRequest.ExportPolicy = sourceVolume.ExportPolicy + createRequest.UnixPermissions = sourceVolume.UnixPermissions + } + + // Clone the volume + clone, err := d.API.CreateVolume(ctx, createRequest) + if err != nil { + return err + } + + // Always save the full GCP ID + cloneVolConfig.InternalID = clone.FullName + + // Wait for creation to complete so that the mount targets are available + return d.waitForVolumeCreate(ctx, clone) +} + +// Import finds an existing volume and makes it available for containers. If ImportNotManaged is false, the +// volume is fully brought under Trident's management. +func (d *NASStorageDriver) Import(ctx context.Context, volConfig *storage.VolumeConfig, originalName string) error { + fields := LogFields{ + "Method": "Import", + "Type": "NASStorageDriver", + "originalName": originalName, + "newName": volConfig.InternalName, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Import") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Import") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // Get the volume + volume, err := d.API.VolumeByName(ctx, originalName) + if err != nil { + return fmt.Errorf("could not find volume %s; %v", originalName, err) + } + + // Don't allow import for dual-protocol volume. + // For dual-protocol volume the ProtocolTypes has two values [NFSv3, CIFS] + if d.isDualProtocolVolume(volume) { + return fmt.Errorf("trident doesn't support importing a dual-protocol volume '%s'", originalName) + } + + // Ensure the volume may be imported by a capacity pool managed by this backend + if err = d.API.EnsureVolumeInValidCapacityPool(ctx, volume); err != nil { + return err + } + + // Get the volume size + volConfig.Size = strconv.FormatInt(volume.SizeBytes, 10) + + Logc(ctx).WithFields(LogFields{ + "creationToken": volume.CreationToken, + "managed": !volConfig.ImportNotManaged, + "state": volume.State, + "capacityPool": volume.CapacityPool, + "sizeBytes": volume.SizeBytes, + }).Debug("Found volume to import.") + + var snapshotDirAccess bool + // Modify the volume if Trident will manage its lifecycle + if !volConfig.ImportNotManaged { + if volConfig.SnapshotDir != "" { + if snapshotDirAccess, err = strconv.ParseBool(volConfig.SnapshotDir); err != nil { + return fmt.Errorf("could not import volume %s, snapshot directory access is set to %s", + originalName, volConfig.SnapshotDir) + } + } + + // Update the volume labels + labels := d.updateTelemetryLabels(ctx, volume) + + /*if d.Config.NASType == sa.SMB && volume.ProtocolTypes[0] == gcnvapi.ProtocolTypeSMB { + if err = d.API.ModifyVolume(ctx, volume, labels, nil, &snapshotDirAccess, &modifiedExportRule); err != nil { + Logc(ctx).WithField("originalName", originalName).WithError(err).Error( + "Could not import volume, volume modify failed.") + return fmt.Errorf("could not import volume %s, volume modify failed; %v", originalName, err) + } + + Logc(ctx).WithFields(LogFields{ + "name": volume.Name, + "creationToken": volume.CreationToken, + "labels": labels, + }).Info("Volume modified.") + + } else*/if d.Config.NASType == sa.NFS && (volume.ProtocolTypes[0] == gcnvapi.ProtocolTypeNFSv3 || volume. + ProtocolTypes[0] == gcnvapi.ProtocolTypeNFSv41) { + // Update volume unix permissions. Permissions specified in a PVC annotation take precedence + // over the backend's unixPermissions config. + unixPermissions := volConfig.UnixPermissions + if unixPermissions == "" { + unixPermissions = d.Config.UnixPermissions + } + if unixPermissions == "" { + unixPermissions = volume.UnixPermissions + } + if unixPermissions != "" { + if err = utils.ValidateOctalUnixPermissions(unixPermissions); err != nil { + return fmt.Errorf("could not import volume %s; %v", originalName, err) + } + } + + err = d.API.ModifyVolume(ctx, volume, labels, &unixPermissions, &snapshotDirAccess, nil) + if err != nil { + Logc(ctx).WithField("originalName", originalName).WithError(err).Error( + "Could not import volume, volume modify failed.") + return fmt.Errorf("could not import volume %s, volume modify failed; %v", originalName, err) + } + + Logc(ctx).WithFields(LogFields{ + "name": volume.Name, + "creationToken": volume.CreationToken, + "labels": labels, + "unixPermissions": unixPermissions, + }).Info("Volume modified.") + } else { + return fmt.Errorf("could not import volume '%s' due to backend and volume mismatch", originalName) + } + + if _, err = d.API.WaitForVolumeState( + ctx, volume, gcnvapi.VolumeStateReady, []string{gcnvapi.VolumeStateError}, d.defaultTimeout()); err != nil { + return fmt.Errorf("could not import volume %s; %v", originalName, err) + } + } + + // The GCNV creation token cannot be changed, so use it as the internal name + volConfig.InternalName = originalName + + // Always save the full GCP ID + volConfig.InternalID = volume.FullName + + return nil +} + +// Rename changes the name of a volume. Not supported by this driver. +func (d *NASStorageDriver) Rename(ctx context.Context, name, newName string) error { + fields := LogFields{ + "Method": "Rename", + "Type": "NASStorageDriver", + "name": name, + "newName": newName, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Rename") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Rename") + + // Rename is only needed for the import workflow, and we aren't currently renaming the + // GCNV volume when importing, so do nothing here lest we set the volume name incorrectly + // during an import failure cleanup. + return nil +} + +// getTelemetryLabels builds the standard telemetry labels that are set on each volume. +func (d *NASStorageDriver) getTelemetryLabels(_ context.Context) map[string]string { + return map[string]string{ + "version": d.fixGCPLabelValue(d.telemetry.TridentVersion), + "backend_uuid": d.fixGCPLabelValue(d.telemetry.TridentBackendUUID), + "platform": d.fixGCPLabelValue(d.telemetry.Platform), + "platform_version": d.fixGCPLabelValue(d.telemetry.PlatformVersion), + "plugin": d.fixGCPLabelValue(d.telemetry.Plugin), + } +} + +// updateTelemetryLabels updates a volume's labels to include the standard telemetry labels. +func (d *NASStorageDriver) updateTelemetryLabels(ctx context.Context, volume *gcnvapi.Volume) map[string]string { + if volume.Labels == nil { + volume.Labels = make(map[string]string) + } + + newLabels := volume.Labels + for k, v := range d.getTelemetryLabels(ctx) { + newLabels[k] = v + } + return newLabels +} + +// fixGCPLabelKey accepts a label key and modifies it to satisfy GCP label key rules, or returns +// false if not possible. +func (d *NASStorageDriver) fixGCPLabelKey(s string) (string, bool) { + // Check if the string is empty + if len(s) == 0 { + return "", false + } + + // Convert the string to lowercase + s = strings.ToLower(s) + + // Replace all disallowed characters with underscores + s = gcpLabelRegex.ReplaceAllStringFunc(s, func(m string) string { + return strings.Repeat("_", len(m)) + }) + + // Check if the first character is a lowercase letter + if !unicode.IsLower(rune(s[0])) { + return "", false + } + + // Shorten the string to a maximum of 63 characters + s = utils.ShortenString(s, gcnvapi.MaxLabelLength) + + return s, true +} + +// fixGCPLabelValue accepts a label value and modifies it to satisfy GCP label value rules. +func (d *NASStorageDriver) fixGCPLabelValue(s string) string { + // Convert the string to lowercase + s = strings.ToLower(s) + + // Replace all disallowed characters with underscores + s = gcpLabelRegex.ReplaceAllStringFunc(s, func(m string) string { + return strings.Repeat("_", len(m)) + }) + + // Shorten the string to a maximum of 63 characters + s = utils.ShortenString(s, gcnvapi.MaxLabelLength) + + return s +} + +// waitForVolumeCreate waits for volume creation to complete by reaching the Available state. If the +// volume reaches a terminal state (Error), the volume is deleted. If the wait times out and the volume +// is still creating, a VolumeCreatingError is returned so the caller may try again. +func (d *NASStorageDriver) waitForVolumeCreate(ctx context.Context, volume *gcnvapi.Volume) error { + state, err := d.API.WaitForVolumeState( + ctx, volume, gcnvapi.VolumeStateReady, []string{gcnvapi.VolumeStateError}, d.volumeCreateTimeout) + if err != nil { + + logFields := LogFields{"volume": volume.Name} + + switch state { + + case gcnvapi.VolumeStateUnspecified, gcnvapi.VolumeStateCreating: + Logc(ctx).WithFields(logFields).Debugf("Volume is in %s state.", state) + return errors.VolumeCreatingError(err.Error()) + + case gcnvapi.VolumeStateDeleting: + // Wait for deletion to complete + _, errDelete := d.API.WaitForVolumeState( + ctx, volume, gcnvapi.VolumeStateDeleted, []string{gcnvapi.VolumeStateError}, d.defaultTimeout()) + if errDelete != nil { + Logc(ctx).WithFields(logFields).WithError(errDelete).Error( + "Volume could not be cleaned up and must be manually deleted.") + } + return errDelete + + case gcnvapi.VolumeStateError: + // Delete a failed volume + errDelete := d.API.DeleteVolume(ctx, volume) + if errDelete != nil { + Logc(ctx).WithFields(logFields).WithError(errDelete).Error( + "Volume could not be cleaned up and must be manually deleted.") + return errDelete + } else { + Logc(ctx).WithField("volume", volume.Name).Info("Volume deleted.") + } + + default: + Logc(ctx).WithFields(logFields).Errorf("unexpected volume state %s found for volume", state) + } + } + + return nil +} + +// Destroy deletes a volume. +func (d *NASStorageDriver) Destroy(ctx context.Context, volConfig *storage.VolumeConfig) error { + name := volConfig.InternalName + + fields := LogFields{ + "Method": "Destroy", + "Type": "NASStorageDriver", + "name": name, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Destroy") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Destroy") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // If volume doesn't exist, return success + volumeExists, extantVolume, err := d.API.VolumeExists(ctx, volConfig) + if err != nil { + return fmt.Errorf("error checking for existing volume %s; %v", name, err) + } + if !volumeExists { + Logc(ctx).WithField("volume", name).Warn("Volume already deleted.") + return nil + } else if extantVolume.State == gcnvapi.VolumeStateDeleting { + // This is a retry, so give it more time before giving up again. + _, err = d.API.WaitForVolumeState( + ctx, extantVolume, gcnvapi.VolumeStateDeleted, []string{gcnvapi.VolumeStateError}, d.volumeCreateTimeout) + return err + } + + // Delete the volume + if err = d.API.DeleteVolume(ctx, extantVolume); err != nil { + return err + } + + Logc(ctx).WithField("volume", extantVolume.Name).Info("Volume deleted.") + + // Wait for deletion to complete + _, err = d.API.WaitForVolumeState(ctx, extantVolume, gcnvapi.VolumeStateDeleted, []string{gcnvapi.VolumeStateError}, d.defaultTimeout()) + return err +} + +// Publish the volume to the host specified in publishInfo. This method may or may not be running on the host +// where the volume will be mounted, so it should limit itself to updating access rules, initiator groups, etc. +// that require some host identity (but not locality) as well as storage controller API access. +func (d *NASStorageDriver) Publish( + ctx context.Context, volConfig *storage.VolumeConfig, publishInfo *utils.VolumePublishInfo, +) error { + var volume *gcnvapi.Volume + var err error + + name := volConfig.InternalName + fields := LogFields{ + "Method": "Publish", + "Type": "NASStorageDriver", + "name": name, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Publish") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Publish") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // If it's a RO clone, get source volume to populate publish info + if volConfig.ReadOnlyClone { + volume, err = d.API.VolumeByName(ctx, volConfig.CloneSourceVolumeInternal) + if err != nil { + return fmt.Errorf("could not find volume %s; %v", name, err) + } + } else { + // Get the volume + volume, err = d.API.Volume(ctx, volConfig) + if err != nil { + return fmt.Errorf("could not find volume %s; %v", name, err) + } + } + + if len(volume.MountTargets) == 0 { + return fmt.Errorf("volume %s has no mount targets", name) + } + + // Determine mount options (volume config wins, followed by backend config) + mountOptions := d.Config.NFSMountOptions + if volConfig.MountOptions != "" { + mountOptions = volConfig.MountOptions + } + + // Add required fields for attaching SMB volume + if d.Config.NASType == sa.SMB { + // publishInfo.SMBPath = volConfig.AccessInfo.SMBPath + // publishInfo.SMBServer = (volume.MountTargets)[0].ServerFqdn + // publishInfo.FilesystemType = sa.SMB + } else { + protocol := "" + nfsVersion, versionErr := utils.GetNFSVersionFromMountOptions(mountOptions, "", supportedNFSVersions) + if versionErr != nil { + return versionErr + } + switch nfsVersion { + case nfsVersion3: + protocol = gcnvapi.ProtocolTypeNFSv3 + case nfsVersion4: + fallthrough + case nfsVersion41: + protocol = gcnvapi.ProtocolTypeNFSv41 + default: + // No preference, use first listed NFS mount target + } + + server, share, exportErr := d.nfsExportComponentsForProtocol(volume, protocol) + if exportErr != nil { + return exportErr + } + + // Add fields needed by Attach + publishInfo.NfsPath = "/" + share + publishInfo.NfsServerIP = server + publishInfo.FilesystemType = sa.NFS + publishInfo.MountOptions = mountOptions + } + + return nil +} + +// CanSnapshot determines whether a snapshot as specified in the provided snapshot config may be taken. +func (d *NASStorageDriver) CanSnapshot(_ context.Context, _ *storage.SnapshotConfig, _ *storage.VolumeConfig) error { + return nil +} + +// GetSnapshot returns a snapshot of a volume, or an error if it does not exist. +func (d *NASStorageDriver) GetSnapshot( + ctx context.Context, snapConfig *storage.SnapshotConfig, volConfig *storage.VolumeConfig, +) (*storage.Snapshot, error) { + internalSnapName := snapConfig.InternalName + internalVolName := snapConfig.VolumeInternalName + fields := LogFields{ + "Method": "GetSnapshot", + "Type": "NASStorageDriver", + "snapshotName": internalSnapName, + "volumeName": internalVolName, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> GetSnapshot") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< GetSnapshot") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return nil, fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // Get the volume + volumeExists, extantVolume, err := d.API.VolumeExists(ctx, volConfig) + if err != nil { + return nil, fmt.Errorf("error checking for existing volume %s; %v", internalVolName, err) + } + if !volumeExists { + // The GCNV volume is backed by ONTAP, so if the volume doesn't exist, neither does the snapshot. + + Logc(ctx).WithFields(LogFields{ + "snapshotName": internalSnapName, + "volumeName": internalVolName, + }).Debug("Volume for snapshot not found.") + + return nil, nil + } + + // Get the snapshot + snapshot, err := d.API.SnapshotForVolume(ctx, extantVolume, internalSnapName) + if err != nil { + if errors.IsNotFoundError(err) { + return nil, nil + } + return nil, fmt.Errorf("could not check for existing snapshot; %v", err) + } + + if snapshot.State != gcnvapi.SnapshotStateReady { + return nil, fmt.Errorf("snapshot %s state is %s", internalSnapName, snapshot.State) + } + + created := snapshot.Created.UTC().Format(utils.TimestampFormat) + + Logc(ctx).WithFields(LogFields{ + "snapshotName": internalSnapName, + "volumeName": internalVolName, + "created": created, + }).Debug("Found snapshot.") + + return &storage.Snapshot{ + Config: snapConfig, + Created: created, + SizeBytes: 0, + State: storage.SnapshotStateOnline, + }, nil +} + +// GetSnapshots returns the list of snapshots associated with the specified volume. +func (d *NASStorageDriver) GetSnapshots( + ctx context.Context, volConfig *storage.VolumeConfig, +) ([]*storage.Snapshot, error) { + internalVolName := volConfig.InternalName + fields := LogFields{ + "Method": "GetSnapshots", + "Type": "NASStorageDriver", + "volumeName": internalVolName, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> GetSnapshots") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< GetSnapshots") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return nil, fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // Get the volume + volume, err := d.API.Volume(ctx, volConfig) + if err != nil { + return nil, fmt.Errorf("could not find volume %s; %v", internalVolName, err) + } + + snapshots, err := d.API.SnapshotsForVolume(ctx, volume) + if err != nil { + return nil, err + } + + snapshotList := make([]*storage.Snapshot, 0) + + for _, snapshot := range *snapshots { + + // Filter out snapshots in an unavailable state + if snapshot.State != gcnvapi.SnapshotStateReady { + continue + } + + snapshotList = append(snapshotList, &storage.Snapshot{ + Config: &storage.SnapshotConfig{ + Version: tridentconfig.OrchestratorAPIVersion, + Name: snapshot.Name, + InternalName: snapshot.Name, + VolumeName: volConfig.Name, + VolumeInternalName: volConfig.InternalName, + }, + Created: snapshot.Created.UTC().Format(utils.TimestampFormat), + SizeBytes: 0, + State: storage.SnapshotStateOnline, + }) + } + + return snapshotList, nil +} + +// CreateSnapshot creates a snapshot for the given volume. +func (d *NASStorageDriver) CreateSnapshot( + ctx context.Context, snapConfig *storage.SnapshotConfig, volConfig *storage.VolumeConfig, +) (*storage.Snapshot, error) { + internalSnapName := snapConfig.InternalName + internalVolName := snapConfig.VolumeInternalName + fields := LogFields{ + "Method": "CreateSnapshot", + "Type": "NASStorageDriver", + "snapshotName": internalSnapName, + "volumeName": internalVolName, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> CreateSnapshot") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< CreateSnapshot") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return nil, fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // Check if volume exists + volumeExists, sourceVolume, err := d.API.VolumeExists(ctx, volConfig) + if err != nil { + return nil, fmt.Errorf("error checking for existing volume %s; %v", internalVolName, err) + } + if !volumeExists { + return nil, fmt.Errorf("volume %s does not exist", internalVolName) + } + + // Create the snapshot + snapshot, err := d.API.CreateSnapshot(ctx, sourceVolume, internalSnapName) + if err != nil { + return nil, fmt.Errorf("could not create snapshot; %v", err) + } + + // Wait for snapshot creation to complete + err = d.API.WaitForSnapshotState( + ctx, snapshot, sourceVolume, gcnvapi.SnapshotStateReady, []string{gcnvapi.SnapshotStateError}, gcnvapi.SnapshotTimeout) + if err != nil { + return nil, err + } + + Logc(ctx).WithFields(LogFields{ + "snapshotName": snapConfig.InternalName, + "volumeName": snapConfig.VolumeInternalName, + }).Info("Snapshot created.") + + return &storage.Snapshot{ + Config: snapConfig, + Created: snapshot.Created.UTC().Format(utils.TimestampFormat), + SizeBytes: 0, + State: storage.SnapshotStateOnline, + }, nil +} + +// RestoreSnapshot restores a volume (in place) from a snapshot. +func (d *NASStorageDriver) RestoreSnapshot( + ctx context.Context, snapConfig *storage.SnapshotConfig, volConfig *storage.VolumeConfig, +) error { + internalSnapName := snapConfig.InternalName + internalVolName := snapConfig.VolumeInternalName + fields := LogFields{ + "Method": "RestoreSnapshot", + "Type": "NASStorageDriver", + "snapshotName": internalSnapName, + "volumeName": internalVolName, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> RestoreSnapshot") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< RestoreSnapshot") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // Get the volume + volume, err := d.API.Volume(ctx, volConfig) + if err != nil { + return fmt.Errorf("could not find volume %s; %v", internalVolName, err) + } + + // Get the snapshot + snapshot, err := d.API.SnapshotForVolume(ctx, volume, internalSnapName) + if err != nil { + return fmt.Errorf("unable to find snapshot %s: %v", internalSnapName, err) + } + + // Do the restore + if err = d.API.RestoreSnapshot(ctx, volume, snapshot); err != nil { + return err + } + + // Wait for snapshot deletion to complete + _, err = d.API.WaitForVolumeState(ctx, volume, gcnvapi.VolumeStateReady, + []string{gcnvapi.VolumeStateError, gcnvapi.VolumeStateDeleting, gcnvapi.VolumeStateDeleted}, gcnvapi.DefaultSDKTimeout, + ) + return err +} + +// DeleteSnapshot deletes a snapshot of a volume. +func (d *NASStorageDriver) DeleteSnapshot( + ctx context.Context, snapConfig *storage.SnapshotConfig, volConfig *storage.VolumeConfig, +) error { + internalSnapName := snapConfig.InternalName + internalVolName := snapConfig.VolumeInternalName + fields := LogFields{ + "Method": "DeleteSnapshot", + "Type": "NASStorageDriver", + "snapshotName": internalSnapName, + "volumeName": internalVolName, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> DeleteSnapshot") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< DeleteSnapshot") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // Get the volume + volumeExists, extantVolume, err := d.API.VolumeExists(ctx, volConfig) + if err != nil { + return fmt.Errorf("error checking for existing volume %s; %v", internalVolName, err) + } + if !volumeExists { + // The GCNV volume is backed by ONTAP, so if the volume doesn't exist, neither does the snapshot. + + Logc(ctx).WithFields(LogFields{ + "snapshotName": internalSnapName, + "volumeName": internalVolName, + }).Debug("Volume for snapshot not found.") + + return nil + } + + snapshot, err := d.API.SnapshotForVolume(ctx, extantVolume, internalSnapName) + if err != nil { + // If the snapshot is already gone, return success + if errors.IsNotFoundError(err) { + return nil + } + return fmt.Errorf("unable to find snapshot %s; %v", internalSnapName, err) + } + + if err = d.API.DeleteSnapshot(ctx, extantVolume, snapshot); err != nil { + return err + } + + // Wait for snapshot deletion to complete + return d.API.WaitForSnapshotState( + ctx, snapshot, extantVolume, gcnvapi.SnapshotStateDeleted, []string{gcnvapi.SnapshotStateError}, gcnvapi.SnapshotTimeout, + ) +} + +// List returns the list of volumes associated with this backend. +func (d *NASStorageDriver) List(ctx context.Context) ([]string, error) { + fields := LogFields{"Method": "List", "Type": "NASStorageDriver"} + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> List") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< List") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return nil, fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + volumes, err := d.API.Volumes(ctx) + if err != nil { + return nil, err + } + + prefix := *d.Config.StoragePrefix + volumeNames := make([]string, 0) + + for _, volume := range *volumes { + + // Filter out volumes in an unavailable state + switch volume.State { + case gcnvapi.VolumeStateDeleting, gcnvapi.VolumeStateError, gcnvapi.VolumeStateDisabled: + continue + } + + // Filter out volumes without the prefix (pass all if prefix is empty) + if !strings.HasPrefix(volume.CreationToken, prefix) { + continue + } + + volumeName := volume.CreationToken[len(prefix):] + volumeNames = append(volumeNames, volumeName) + } + + return volumeNames, nil +} + +// Get tests for the existence of a volume. +func (d *NASStorageDriver) Get(ctx context.Context, name string) error { + fields := LogFields{"Method": "Get", "Type": "NASStorageDriver"} + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Get") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Get") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + if _, err := d.API.VolumeByName(ctx, name); err != nil { + return fmt.Errorf("could not get volume %s; %v", name, err) + } + + return nil +} + +// Resize increases a volume's quota. +func (d *NASStorageDriver) Resize(ctx context.Context, volConfig *storage.VolumeConfig, sizeBytes uint64) error { + name := volConfig.InternalName + fields := LogFields{ + "Method": "Resize", + "Type": "NASStorageDriver", + "name": name, + "sizeBytes": sizeBytes, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Resize") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Resize") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // Get the volume + volume, err := d.API.Volume(ctx, volConfig) + if err != nil { + return fmt.Errorf("could not find volume %s; %v", name, err) + } + + // If the volume state isn't Available, return an error + if volume.State != gcnvapi.VolumeStateReady { + return fmt.Errorf("volume %s state is %s, not %s", name, volume.State, gcnvapi.VolumeStateReady) + } + + // Include the snapshot reserve in the new size + sizeWithReserveBytes := drivers.CalculateVolumeSizeBytes(ctx, name, sizeBytes, int(volume.SnapshotReserve)) + + // If the volume is already the requested size, there's nothing to do + if int64(sizeWithReserveBytes) == volume.SizeBytes { + return nil + } + + // Make sure we're not shrinking the volume + if int64(sizeWithReserveBytes) < volume.SizeBytes { + return fmt.Errorf("requested size %d is less than existing volume size %d", + sizeWithReserveBytes, volume.SizeBytes) + } + + // Make sure the request isn't above the configured maximum volume size (if any) + _, _, err = drivers.CheckVolumeSizeLimits(ctx, sizeWithReserveBytes, d.Config.CommonStorageDriverConfig) + if err != nil { + return err + } + + // Resize the volume + if err = d.API.ResizeVolume(ctx, volume, int64(sizeWithReserveBytes)); err != nil { + return err + } + + volConfig.Size = strconv.FormatUint(sizeBytes, 10) // requested size, not including reserve + return nil +} + +// GetStorageBackendSpecs retrieves storage capabilities and register pools with specified backend. +func (d *NASStorageDriver) GetStorageBackendSpecs(_ context.Context, backend storage.Backend) error { + backend.SetName(d.BackendName()) + + for _, pool := range d.pools { + pool.SetBackend(backend) + backend.AddStoragePool(pool) + } + + return nil +} + +// CreatePrepare is called prior to volume creation. Currently its only role is to create the internal volume name. +func (d *NASStorageDriver) CreatePrepare(ctx context.Context, volConfig *storage.VolumeConfig, pool storage.Pool) { + volConfig.InternalName = d.GetInternalVolumeName(ctx, volConfig, pool) +} + +// GetStorageBackendPhysicalPoolNames retrieves storage backend physical pools +func (d *NASStorageDriver) GetStorageBackendPhysicalPoolNames(context.Context) []string { + return []string{} +} + +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *NASStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.GCNVNASStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "NASStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. Project number + // 2. Location + // 3. Storage pool - contains at least one pool depending on the backend configuration. + cPools := d.API.CapacityPoolsForStoragePools(ctx) + backendPools := make([]drivers.GCNVNASStorageBackendPool, 0, len(cPools)) + for _, cPool := range cPools { + backendPool := drivers.GCNVNASStorageBackendPool{ + ProjectNumber: d.Config.ProjectNumber, + Location: cPool.Location, + StoragePool: cPool.Name, + } + backendPools = append(backendPools, backendPool) + } + return backendPools +} + +// GetInternalVolumeName accepts the name of a volume being created and returns what the internal name +// should be, depending on backend requirements and Trident's operating context. +func (d *NASStorageDriver) GetInternalVolumeName( + ctx context.Context, volConfig *storage.VolumeConfig, pool storage.Pool, +) string { + if tridentconfig.UsingPassthroughStore { + // With a passthrough store, the name mapping must remain reversible + return *d.Config.StoragePrefix + volConfig.Name + } else if csiRegex.MatchString(volConfig.Name) { + // If the name is from CSI (i.e. contains a UUID), just use it as-is + Logc(ctx).WithField("volumeInternal", volConfig.Name).Debug("Using volume name as internal name.") + return volConfig.Name + } else { + // Cloud volumes have strict limits on volume mount paths, so for cloud + // infrastructure like Trident, the simplest approach is to generate a + // UUID-based name with a prefix that won't exceed the length limit. + return "gcnv-" + uuid.NewString() + } +} + +// CreateFollowup is called after volume creation and sets the access info in the volume config. +func (d *NASStorageDriver) CreateFollowup(ctx context.Context, volConfig *storage.VolumeConfig) error { + var volume *gcnvapi.Volume + var err error + + name := volConfig.InternalName + fields := LogFields{ + "Method": "CreateFollowup", + "Type": "NASStorageDriver", + "name": name, + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> CreateFollowup") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< CreateFollowup") + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + // If it's a RO clone, get source volume to populate access details + if volConfig.ReadOnlyClone { + volume, err = d.API.VolumeByName(ctx, volConfig.CloneSourceVolumeInternal) + if err != nil { + return fmt.Errorf("could not find volume %s; %v", name, err) + } + } else { + // Get the volume + volume, err = d.API.Volume(ctx, volConfig) + if err != nil { + return fmt.Errorf("could not find volume %s; %v", name, err) + } + } + + // Ensure volume is in a good state + if volume.State != gcnvapi.VolumeStateReady { + return fmt.Errorf("volume %s is in %s state, not %s", name, volume.State, gcnvapi.VolumeStateReady) + } + + if len(volume.MountTargets) == 0 { + return fmt.Errorf("volume %s has no mount targets", volConfig.InternalName) + } + + // Set the mount target based on the NASType + if d.Config.NASType == sa.SMB { + // volConfig.AccessInfo.SMBPath = constructVolumeAccessPath(volConfig, volume, sa.SMB) + // volConfig.AccessInfo.SMBServer = (volume.MountTargets)[0].ServerFqdn + // volConfig.FileSystem = sa.SMB + } else { + server, share, exportErr := d.nfsExportComponentsForProtocol(volume, "") + if exportErr != nil { + return exportErr + } + + volConfig.AccessInfo.NfsPath = "/" + share + volConfig.AccessInfo.NfsServerIP = server + volConfig.FileSystem = sa.NFS + } + + return nil +} + +// GetProtocol returns the protocol supported by this driver (File). +func (d *NASStorageDriver) GetProtocol(context.Context) tridentconfig.Protocol { + return tridentconfig.File +} + +// StoreConfig adds this backend's config to the persistent config struct, as needed by Trident's persistence layer. +func (d *NASStorageDriver) StoreConfig(_ context.Context, b *storage.PersistentStorageBackendConfig) { + drivers.SanitizeCommonStorageDriverConfig(d.Config.CommonStorageDriverConfig) + b.GCNVConfig = &d.Config +} + +// GetExternalConfig returns a clone of this backend's config, sanitized for external consumption. +func (d *NASStorageDriver) GetExternalConfig(ctx context.Context) interface{} { + // Clone the config so we don't risk altering the original + var cloneConfig drivers.GCNVNASStorageDriverConfig + drivers.Clone(ctx, d.Config, &cloneConfig) + + cloneConfig.APIKey = drivers.GCPPrivateKey{ + Type: utils.REDACTED, + ProjectID: utils.REDACTED, + PrivateKeyID: utils.REDACTED, + PrivateKey: utils.REDACTED, + ClientEmail: utils.REDACTED, + ClientID: utils.REDACTED, + AuthURI: utils.REDACTED, + TokenURI: utils.REDACTED, + AuthProviderX509CertURL: utils.REDACTED, + ClientX509CertURL: utils.REDACTED, + } + cloneConfig.Credentials = map[string]string{ + drivers.KeyName: utils.REDACTED, + drivers.KeyType: utils.REDACTED, + } // redact the credentials + + return cloneConfig +} + +// GetVolumeForImport queries the storage backend for all relevant info about +// a single container volume managed by this driver and returns a VolumeExternal +// representation of the volume. For this driver, volumeID is the name used when +// creating the volume. +func (d *NASStorageDriver) GetVolumeForImport(ctx context.Context, volumeID string) (*storage.VolumeExternal, error) { + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + return nil, fmt.Errorf("could not update GCNV resource cache; %v", err) + } + + filesystem, err := d.API.VolumeByName(ctx, volumeID) + if err != nil { + return nil, err + } + + return d.getVolumeExternal(filesystem), nil +} + +// GetVolumeExternalWrappers queries the storage backend for all relevant info about +// container volumes managed by this driver. It then writes a VolumeExternal +// representation of each volume to the supplied channel, closing the channel +// when finished. +func (d *NASStorageDriver) GetVolumeExternalWrappers(ctx context.Context, channel chan *storage.VolumeExternalWrapper) { + fields := LogFields{"Method": "GetVolumeExternalWrappers", "Type": "NASStorageDriver"} + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> GetVolumeExternalWrappers") + defer Logd(ctx, d.Name(), + d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< GetVolumeExternalWrappers") + + // Let the caller know we're done by closing the channel + defer close(channel) + + // Update resource cache as needed + if err := d.API.RefreshGCNVResources(ctx); err != nil { + channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} + return + } + + // Get all volumes + volumes, err := d.API.Volumes(ctx) + if err != nil { + channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} + return + } + + prefix := *d.Config.StoragePrefix + + // Convert all volumes to VolumeExternal and write them to the channel + for _, volume := range *volumes { + + // Filter out volumes in an unavailable state + switch volume.State { + case gcnvapi.VolumeStateDeleting, gcnvapi.VolumeStateError, gcnvapi.VolumeStateDisabled: + continue + } + + // Filter out volumes without the prefix (pass all if prefix is empty) + if !strings.HasPrefix(volume.CreationToken, prefix) { + continue + } + + channel <- &storage.VolumeExternalWrapper{Volume: d.getVolumeExternal(volume), Error: nil} + } +} + +// getExternalVolume is a private method that accepts info about a volume +// as returned by the storage backend and formats it as a VolumeExternal +// object. +func (d *NASStorageDriver) getVolumeExternal(volumeAttrs *gcnvapi.Volume) *storage.VolumeExternal { + internalName := volumeAttrs.Name + name := internalName + if strings.HasPrefix(internalName, *d.Config.StoragePrefix) { + name = internalName[len(*d.Config.StoragePrefix):] + } + + volumeConfig := &storage.VolumeConfig{ + Version: tridentconfig.OrchestratorAPIVersion, + Name: name, + InternalName: volumeAttrs.CreationToken, + Size: strconv.FormatInt(volumeAttrs.SizeBytes, 10), + Protocol: tridentconfig.File, + SnapshotPolicy: "", + ExportPolicy: "", + SnapshotDir: strconv.FormatBool(volumeAttrs.SnapshotDirectory), + UnixPermissions: volumeAttrs.UnixPermissions, + StorageClass: "", + AccessMode: tridentconfig.ReadWriteMany, + AccessInfo: utils.VolumeAccessInfo{}, + BlockSize: "", + FileSystem: "", + ServiceLevel: volumeAttrs.ServiceLevel, + } + + return &storage.VolumeExternal{ + Config: volumeConfig, + Pool: drivers.UnsetPool, + } +} + +// String implements stringer interface for the NASStorageDriver driver. +func (d *NASStorageDriver) String() string { + return utils.ToStringRedacted(d, []string{"SDK"}, d.GetExternalConfig(context.Background())) +} + +// GoString implements GoStringer interface for the NASStorageDriver driver. +func (d *NASStorageDriver) GoString() string { + return d.String() +} + +// GetUpdateType returns a bitmap populated with updates to the driver. +func (d *NASStorageDriver) GetUpdateType(_ context.Context, driverOrig storage.Driver) *roaring.Bitmap { + bitmap := roaring.New() + dOrig, ok := driverOrig.(*NASStorageDriver) + if !ok { + bitmap.Add(storage.InvalidUpdate) + return bitmap + } + + if !reflect.DeepEqual(d.Config.StoragePrefix, dOrig.Config.StoragePrefix) { + bitmap.Add(storage.PrefixChange) + } + + if !drivers.AreSameCredentials(d.Config.Credentials, dOrig.Config.Credentials) { + bitmap.Add(storage.CredentialsChange) + } + + return bitmap +} + +// ReconcileNodeAccess updates a per-backend export policy to match the set of Kubernetes cluster +// nodes. Not supported by this driver. +func (d *NASStorageDriver) ReconcileNodeAccess(ctx context.Context, _ []*utils.Node, _, _ string) error { + fields := LogFields{ + "Method": "ReconcileNodeAccess", + "Type": "NASStorageDriver", + } + Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> ReconcileNodeAccess") + defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< ReconcileNodeAccess") + + return nil +} + +// validateGCNVStoragePrefix ensures the storage prefix is valid +func validateGCNVStoragePrefix(storagePrefix string) error { + if !storagePrefixRegex.MatchString(storagePrefix) { + return fmt.Errorf("storage prefix may only contain letters and hyphens and must begin with a letter") + } + return nil +} + +// GetCommonConfig returns driver's CommonConfig +func (d *NASStorageDriver) GetCommonConfig(context.Context) *drivers.CommonStorageDriverConfig { + return d.Config.CommonStorageDriverConfig +} + +func (d *NASStorageDriver) nfsExportComponentsForProtocol( + volume *gcnvapi.Volume, protocol string, +) (server, share string, err error) { + switch protocol { + case gcnvapi.ProtocolTypeNFSv3, gcnvapi.ProtocolTypeNFSv41, "": + // First find matching protocol + for _, mountTarget := range volume.MountTargets { + if mountTarget.Protocol == protocol { + return d.parseNFSExport(mountTarget.ExportPath) + } + } + // Fall back to any NFS mount + for _, mountTarget := range volume.MountTargets { + if utils.SliceContainsString( + []string{gcnvapi.ProtocolTypeNFSv3, gcnvapi.ProtocolTypeNFSv41}, mountTarget.Protocol) { + return d.parseNFSExport(mountTarget.ExportPath) + } + } + return "", "", fmt.Errorf("no NFS mount target found on volume %s", volume.Name) + default: + return "", "", fmt.Errorf("invalid NFS protocol (%s)", protocol) + } +} + +func (d *NASStorageDriver) parseNFSExport(export string) (server, share string, err error) { + match := nfsMountPathRegex.FindStringSubmatch(export) + + if match == nil { + err = fmt.Errorf("NFS export path %s is invalid", export) + return + } + + paramsMap := make(map[string]string) + for i, name := range nfsMountPathRegex.SubexpNames() { + if i > 0 && i <= len(match) { + paramsMap[name] = match[i] + } + } + + server = paramsMap["server"] + share = paramsMap["share"] + + return +} + +func (d *NASStorageDriver) isDualProtocolVolume(volume *gcnvapi.Volume) bool { + var nfs, smb bool + + for _, protocol := range volume.ProtocolTypes { + switch protocol { + case gcnvapi.ProtocolTypeNFSv3, gcnvapi.ProtocolTypeNFSv41: + nfs = true + case gcnvapi.ProtocolTypeSMB: + smb = true + } + } + return nfs && smb +} diff --git a/storage_drivers/ontap/ontap_common.go b/storage_drivers/ontap/ontap_common.go index 08bb13200..3b8b9705a 100644 --- a/storage_drivers/ontap/ontap_common.go +++ b/storage_drivers/ontap/ontap_common.go @@ -511,7 +511,7 @@ func resizeValidation( } // Ensure the final effective volume size is larger than the current volume size - newFlexvolSize := calculateFlexvolSizeBytes(ctx, name, requestedSizeBytes, snapshotReserveInt) + newFlexvolSize := drivers.CalculateVolumeSizeBytes(ctx, name, requestedSizeBytes, snapshotReserveInt) if newFlexvolSize < volSizeBytes { return 0, errors.UnsupportedCapacityRangeError(fmt.Errorf("effective volume size %d including any "+ "snapshot reserve is less than the existing volume size %d", newFlexvolSize, volSizeBytes)) @@ -2850,27 +2850,6 @@ func calculateFlexvolEconomySizeBytes( return flexvolSizeBytes } -// calculateFlexvolSizeBytes calculates the size of the Flexvol taking into account the snapshot reserve -func calculateFlexvolSizeBytes( - ctx context.Context, flexvol string, requestedSizeBytes uint64, snapshotReserve int, -) uint64 { - snapReserveDivisor := 1.0 - (float64(snapshotReserve) / 100.0) - - sizeWithSnapReserve := float64(requestedSizeBytes) / snapReserveDivisor - - flexvolSizeBytes := uint64(sizeWithSnapReserve) - - Logc(ctx).WithFields(LogFields{ - "flexvol": flexvol, - "snapReserveDivisor": snapReserveDivisor, - "requestedSize": requestedSizeBytes, - "sizeWithSnapReserve": sizeWithSnapReserve, - "flexvolSizeBytes": flexvolSizeBytes, - }).Debug("Calculated optimal size for Flexvol with snapshot reserve.") - - return flexvolSizeBytes -} - type GetVolumeInfoFunc func(ctx context.Context, volumeName string) (volume *api.Volume, err error) // getSnapshotReserveFromOntap takes a volume name and retrieves the snapshot policy and snapshot reserve diff --git a/storage_drivers/ontap/ontap_common_test.go b/storage_drivers/ontap/ontap_common_test.go index 7b9f59db3..269d76e9d 100644 --- a/storage_drivers/ontap/ontap_common_test.go +++ b/storage_drivers/ontap/ontap_common_test.go @@ -2435,7 +2435,7 @@ func TestConstructOntapNASQTreeVolumePath(t *testing.T) { "test_share", "flex-vol", &storage.VolumeConfig{ - Name: "volmeConfig", + Name: "volumeConfig", InternalName: "trident_pvc_vol", CloneSourceVolumeInternal: "cloneSourceInternal", CloneSourceSnapshot: "sourceSnapShot", @@ -2448,7 +2448,7 @@ func TestConstructOntapNASQTreeVolumePath(t *testing.T) { "", "flex-vol", &storage.VolumeConfig{ - Name: "volmeConfig", + Name: "volumeConfig", InternalName: "trident_pvc_vol", CloneSourceVolumeInternal: "cloneSourceInternal", CloneSourceSnapshot: "sourceSnapShot", diff --git a/storage_drivers/ontap/ontap_nas.go b/storage_drivers/ontap/ontap_nas.go index eca681158..84ca1bc06 100644 --- a/storage_drivers/ontap/ontap_nas.go +++ b/storage_drivers/ontap/ontap_nas.go @@ -278,7 +278,7 @@ func (d *NASStorageDriver) Create( sizeBytes = GetVolumeSize(sizeBytes, storagePool.InternalAttributes()[Size]) // Get the flexvol size based on the snapshot reserve - flexvolSize := calculateFlexvolSizeBytes(ctx, name, sizeBytes, snapshotReserveInt) + flexvolSize := drivers.CalculateVolumeSizeBytes(ctx, name, sizeBytes, snapshotReserveInt) size := strconv.FormatUint(flexvolSize, 10) diff --git a/storage_drivers/ontap/ontap_nas_flexgroup.go b/storage_drivers/ontap/ontap_nas_flexgroup.go index 5674ecc47..eafa576c7 100644 --- a/storage_drivers/ontap/ontap_nas_flexgroup.go +++ b/storage_drivers/ontap/ontap_nas_flexgroup.go @@ -574,7 +574,7 @@ func (d *NASFlexGroupStorageDriver) Create( return fmt.Errorf("%v is an invalid volume size: %v", volConfig.Size, err) } // get the flexgroup size based on the snapshot reserve - flexgroupSize := calculateFlexvolSizeBytes(ctx, name, sizeBytes, snapshotReserveInt) + flexgroupSize := drivers.CalculateVolumeSizeBytes(ctx, name, sizeBytes, snapshotReserveInt) sizeBytes = GetVolumeSize(flexgroupSize, storagePool.InternalAttributes()[Size]) if err != nil { return err diff --git a/storage_drivers/ontap/ontap_san.go b/storage_drivers/ontap/ontap_san.go index 091a5bce7..6d0d22056 100644 --- a/storage_drivers/ontap/ontap_san.go +++ b/storage_drivers/ontap/ontap_san.go @@ -345,7 +345,7 @@ func (d *SANStorageDriver) Create( lunSize := strconv.FormatUint(lunSizeBytes, 10) // Get the flexvol size based on the snapshot reserve - flexvolSize := calculateFlexvolSizeBytes(ctx, name, lunSizeBytes, snapshotReserveInt) + flexvolSize := drivers.CalculateVolumeSizeBytes(ctx, name, lunSizeBytes, snapshotReserveInt) // Add extra 10% to the Flexvol to account for LUN metadata flexvolBufferSize := uint64(LUNMetadataBufferMultiplier * float64(flexvolSize)) @@ -1349,7 +1349,7 @@ func (d *SANStorageDriver) Resize( Logc(ctx).WithField("name", name).Errorf("Could not get the snapshot reserve percentage for volume") } - newFlexvolSize := calculateFlexvolSizeBytes(ctx, name, requestedSizeBytes, snapshotReserveInt) + newFlexvolSize := drivers.CalculateVolumeSizeBytes(ctx, name, requestedSizeBytes, snapshotReserveInt) newFlexvolSize = uint64(LUNMetadataBufferMultiplier * float64(newFlexvolSize)) sameLUNSize := utils.VolumeSizeWithinTolerance(int64(requestedSizeBytes), int64(currentLunSize), diff --git a/storage_drivers/ontap/ontap_san_nvme.go b/storage_drivers/ontap/ontap_san_nvme.go index 2c4aabe1a..2daabd7d7 100644 --- a/storage_drivers/ontap/ontap_san_nvme.go +++ b/storage_drivers/ontap/ontap_san_nvme.go @@ -295,7 +295,7 @@ func (d *NVMeStorageDriver) Create( namespaceSizeBytes := GetVolumeSize(requestedSizeBytes, storagePool.InternalAttributes()[Size]) namespaceSize := strconv.FormatUint(namespaceSizeBytes, 10) // Get the FlexVol size based on the snapshot reserve. - flexVolSize := calculateFlexvolSizeBytes(ctx, name, namespaceSizeBytes, snapshotReserveInt) + flexVolSize := drivers.CalculateVolumeSizeBytes(ctx, name, namespaceSizeBytes, snapshotReserveInt) // Add extra 10% to the FlexVol to account for Namespace metadata. flexVolBufferSize := uint64(LUNMetadataBufferMultiplier * float64(flexVolSize)) @@ -1301,7 +1301,7 @@ func (d *NVMeStorageDriver) Resize( Logc(ctx).WithField("name", name).Errorf("Could not get the snapshot reserve percentage for volume.") } - newFlexVolSize := calculateFlexvolSizeBytes(ctx, name, requestedSizeBytes, snapshotReserveInt) + newFlexVolSize := drivers.CalculateVolumeSizeBytes(ctx, name, requestedSizeBytes, snapshotReserveInt) newFlexVolSize = uint64(LUNMetadataBufferMultiplier * float64(newFlexVolSize)) sameNamespaceSize := utils.VolumeSizeWithinTolerance(int64(requestedSizeBytes), int64(nsSizeBytes), diff --git a/storage_drivers/types.go b/storage_drivers/types.go index d6876c815..2a03e0273 100644 --- a/storage_drivers/types.go +++ b/storage_drivers/types.go @@ -52,6 +52,8 @@ func GetDriverConfigByName(driverName string) (DriverConfig, error) { storageDriverConfig = &AzureNASStorageDriverConfig{} case trident.GCPNFSStorageDriverName: storageDriverConfig = &GCPNFSStorageDriverConfig{} + case trident.GCNVNASStorageDriverName: + storageDriverConfig = &GCNVNASStorageDriverConfig{} case trident.FakeStorageDriverName: storageDriverConfig = &FakeStorageDriverConfig{} default: @@ -166,7 +168,8 @@ type AWSConfig struct { // within a backend. type StorageBackendPool interface { OntapFlexGroupStorageBackendPool | OntapStorageBackendPool | OntapEconomyStorageBackendPool | - ANFStorageBackendPool | ANFSubvolumeStorageBackendPool | SolidfireStorageBackendPool | GCPNFSStorageBackendPool + ANFStorageBackendPool | ANFSubvolumeStorageBackendPool | SolidfireStorageBackendPool | + GCPNFSStorageBackendPool | GCNVNASStorageBackendPool } // OntapFlexGroupStorageBackendPool is a non-overlapping section of an ONTAP flexgroup backend that may be used for @@ -708,6 +711,122 @@ func (d GCPNFSStorageDriverConfig) SpecOnlyValidation() error { return nil } +type GCNVNASStorageDriverConfig struct { + *CommonStorageDriverConfig + ProjectNumber string `json:"projectNumber"` + Location string `json:"location"` + APIKey GCPPrivateKey `json:"apiKey"` + NFSMountOptions string `json:"nfsMountOptions"` + VolumeCreateTimeout string `json:"volumeCreateTimeout"` + SDKTimeout string `json:"sdkTimeout"` + MaxCacheAge string `json:"maxCacheAge"` + NASType string `json:"nasType"` + GCNVNASStorageDriverPool + Storage []GCNVNASStorageDriverPool `json:"storage"` +} + +type GCNVNASStorageDriverPool struct { + Labels map[string]string `json:"labels"` + Region string `json:"region"` + Zone string `json:"zone"` + ServiceLevel string `json:"serviceLevel"` + StorageClass string `json:"storageClass"` + StoragePools []string `json:"storagePools"` + Network string `json:"network"` + SupportedTopologies []map[string]string `json:"supportedTopologies"` + GCNVNASStorageDriverConfigDefaults `json:"defaults"` +} + +// GCNVNASStorageBackendPool is a non-overlapping section of a GCNV backend that may be used for provisioning storage. +type GCNVNASStorageBackendPool struct { + ProjectNumber string `json:"projectNumber"` + Location string `json:"location"` + StoragePool string `json:"storagePool"` +} + +type GCNVNASStorageDriverConfigDefaults struct { + ExportRule string `json:"exportRule"` + SnapshotDir string `json:"snapshotDir"` + SnapshotReserve string `json:"snapshotReserve"` + UnixPermissions string `json:"unixPermissions"` + CommonStorageDriverConfigDefaults +} + +// Implement stringer interface for the GCNVNASStorageDriverConfig driver +func (d GCNVNASStorageDriverConfig) String() string { + return utils.ToStringRedacted(&d, []string{"ProjectNumber", "HostProjectNumber", "APIKey"}, nil) +} + +// Implement GoStringer interface for the GCNVNASStorageDriverConfig driver +func (d GCNVNASStorageDriverConfig) GoString() string { + return d.String() +} + +// InjectSecrets function replaces sensitive fields in the config with the field values in the map +func (d *GCNVNASStorageDriverConfig) InjectSecrets(secretMap map[string]string) error { + // NOTE: When the backend secrets are read in the CRD persistance layer they are converted to lower-case. + + var ok bool + if d.APIKey.PrivateKey, ok = secretMap[strings.ToLower("Private_Key")]; !ok { + return injectionError("Private_Key") + } + if d.APIKey.PrivateKeyID, ok = secretMap[strings.ToLower("Private_Key_ID")]; !ok { + return injectionError("Private_Key_ID") + } + + return nil +} + +// ExtractSecrets function builds a map of any sensitive fields it contains (credentials, etc.), +// and returns the the map. +func (d *GCNVNASStorageDriverConfig) ExtractSecrets() map[string]string { + secretMap := make(map[string]string) + + secretMap["Private_Key"] = d.APIKey.PrivateKey + secretMap["Private_Key_ID"] = d.APIKey.PrivateKeyID + + return secretMap +} + +// RemoveSecrets function removes sensitive fields it contains (credentials, etc.) +func (d *GCNVNASStorageDriverConfig) ResetSecrets() { + d.APIKey.PrivateKey = "" + d.APIKey.PrivateKeyID = "" +} + +// HideSensitiveWithSecretName function replaces sensitive fields it contains (credentials, etc.), +// with secretName. +func (d *GCNVNASStorageDriverConfig) HideSensitiveWithSecretName(secretName string) { + d.APIKey.PrivateKey = secretName + d.APIKey.PrivateKeyID = secretName +} + +// GetAndHideSensitive function builds a map of any sensitive fields it contains (credentials, etc.), +// replaces those fields with secretName and returns the the map. +func (d *GCNVNASStorageDriverConfig) GetAndHideSensitive(secretName string) map[string]string { + secretMap := d.ExtractSecrets() + d.HideSensitiveWithSecretName(secretName) + + return secretMap +} + +// CheckForCRDControllerForbiddenAttributes checks config for the keys forbidden by CRD controller and returns them +func (d GCNVNASStorageDriverConfig) CheckForCRDControllerForbiddenAttributes() []string { + return checkMapContainsAttributes(d.ExtractSecrets()) +} + +func (d GCNVNASStorageDriverConfig) SpecOnlyValidation() error { + if forbiddenList := d.CheckForCRDControllerForbiddenAttributes(); len(forbiddenList) > 0 { + return fmt.Errorf("input contains forbidden attributes: %v", forbiddenList) + } + + if !d.HasCredentials() { + return fmt.Errorf("input is missing the credentials field") + } + + return nil +} + type FakeStorageDriverConfig struct { *CommonStorageDriverConfig Protocol trident.Protocol `json:"protocol"` diff --git a/storage_drivers/types_test.go b/storage_drivers/types_test.go index 8ebe83bc9..ea955ac52 100644 --- a/storage_drivers/types_test.go +++ b/storage_drivers/types_test.go @@ -61,6 +61,11 @@ func TestGetDriverConfigByName(t *testing.T) { expectedDriverConfig: &GCPNFSStorageDriverConfig{}, errorExpected: false, }, + { + driverName: config.GCNVNASStorageDriverName, + expectedDriverConfig: &GCNVNASStorageDriverConfig{}, + errorExpected: false, + }, { driverName: config.FakeStorageDriverName, expectedDriverConfig: &FakeStorageDriverConfig{}, diff --git a/utils/utils.go b/utils/utils.go index 41c08437e..c5ce9f6ab 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -1116,3 +1116,11 @@ func ReplaceMultilineYAMLTag(originalYAML, tag, tagText string) string { return originalYAML } + +// ShortenString returns the specified string, shortened by dropping characters on the right side to the given limit. +func ShortenString(s string, maxLength int) string { + if len(s) > maxLength { + return s[:maxLength] + } + return s +} diff --git a/utils/utils_test.go b/utils/utils_test.go index 1bb4bff5b..de6ab0d39 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -638,6 +638,7 @@ func TestGetNFSVersionFromMountOptions(t *testing.T) { }{ // Positive tests {"", defaultVersion, supportedVersions, defaultVersion, false}, + {"", "", supportedVersions, "", false}, {"", defaultVersion, nil, defaultVersion, false}, {"vers=3", defaultVersion, supportedVersions, defaultVersion, false}, {"tcp, vers=3", defaultVersion, supportedVersions, defaultVersion, false}, @@ -1668,3 +1669,27 @@ func TestGetFormattedValidBool(t *testing.T) { }) } } + +func TestShortenString(t *testing.T) { + Log().Debug("Running TestShortenString...") + + type TestData struct { + Input string + Length int + Output string + } + + data := []TestData{ + {"", 0, ""}, + {"", 1, ""}, + {"text", 10, "text"}, + {"text", 3, "tex"}, + {" text ", 10, " text "}, + {"a123456789b123456789c123456789d123456789e123456789f123456789g123456789", 63, "a123456789b123456789c123456789d123456789e123456789f123456789g12"}, + } + + for _, d := range data { + result := ShortenString(d.Input, d.Length) + assert.Equal(t, d.Output, result) + } +}