diff --git a/.github/workflows/build-and-push-to-ghcr.yml b/.github/workflows/build-and-push-to-ghcr.yml index a5587ee..1a18c26 100644 --- a/.github/workflows/build-and-push-to-ghcr.yml +++ b/.github/workflows/build-and-push-to-ghcr.yml @@ -10,6 +10,11 @@ on: jobs: build_image_on_push: + permissions: + packages: write + security-events: write + actions: read + contents: read uses: ./.github/workflows/imagetoghcr-on-push.yaml with: image_name: "ionos-exporter" diff --git a/Dockerfile b/Dockerfile index 9dfda21..31ab7aa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,8 @@ COPY go.sum . RUN go mod download +RUN go mod tidy + FROM build_deps AS build COPY . . diff --git a/Documentation/arch_diagramm_io_exp.xml b/Documentation/arch_diagramm_io_exp.xml new file mode 100644 index 0000000..87bfaff --- /dev/null +++ b/Documentation/arch_diagramm_io_exp.xml @@ -0,0 +1,184 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Documentation/ionos_exporter_sequenzdiagram_postgres.drawio b/Documentation/ionos_exporter_sequenzdiagram_postgres.drawio new file mode 100644 index 0000000..745870c --- /dev/null +++ b/Documentation/ionos_exporter_sequenzdiagram_postgres.drawio @@ -0,0 +1,380 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Documentation/ionos_exporter_sequenzdiagram_s3.drawio b/Documentation/ionos_exporter_sequenzdiagram_s3.drawio new file mode 100644 index 0000000..39a28ae --- /dev/null +++ b/Documentation/ionos_exporter_sequenzdiagram_s3.drawio @@ -0,0 +1,423 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/charts/ionos-exporter/config.yaml b/charts/ionos-exporter/config.yaml new file mode 100644 index 0000000..8c3947b --- /dev/null +++ b/charts/ionos-exporter/config.yaml @@ -0,0 +1,37 @@ +metrics: +- name: ionos_dbaas_postgres_transactions:rate2m + description: Per-second average rate of SQL transactions (that have been committed), calculated over the last 2 minutes. + type: gauge +- name: ionos_dbaas_postgres_connections_count + description: Number of connections per instance and state. active, disabled, fastpath function call, idle, idle in transaction, idle in transaction (aborted). + type: gauge +- name: ionos_dbaas_postgres_cpu_rate5m + description: The average CPU utilization over the past 5 minutes. + type: gauge +- name: ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m + description: The rate of disk I/O time, in seconds, over a five-minute period. Provides insight into performance of a disk, as high values may indicate that the disk is being overused or is experiencing performance issues. + type: gauge +- name: ionos_dbaas_postgres_instance_count + description: Desired number of instances. The number of currently ready and running instances may be different. ionos_dbaas_postgres_role provides information about running instances split by role. + type: gauge +- name: ionos_dbaas_postgres_load5 + description: Linux load average for the last 5 minutes. This metric is represented as a number between 0 and 1 (can be greater than 1 on multicore machines), where 0 indicates that the CPU core is idle and 1 indicates that the CPU core is fully utilized. Higher values may indicate that the system is experiencing performance issues or is approaching capacity. + type: gauge +- name: ionos_dbaas_postgres_memory_available_bytes + description: Available memory in bytes. + type: gauge +- name: ionos_dbaas_postgres_memory_total_bytes + description: Total memory of the underlying machine in bytes. Some of it is used for our management and monitoring tools and not available to PostgreSQL. During horizontal scaling you might see different values for each instance. + type: gauge +- name: ionos_dbaas_postgres_role + description: Current role of the instance. Provides whether an instance is currently "master" or "replica". + type: gauge +- name: ionos_dbaas_postgres_storage_available_bytes + description: Free available disk space per instance in bytes. + type: gauge +- name: ionos_dbaas_postgres_storage_total_bytes + description: Total disk space per instance in bytes. During horizontal scaling you might see different values for each instance. + type: gauge +- name: ionos_dbaas_postgres_user_tables_idx_scan + description: Number of index scans per table/schema. + type: gauge \ No newline at end of file diff --git a/charts/ionos-exporter/templates/ConfigMap.yaml b/charts/ionos-exporter/templates/ConfigMap.yaml new file mode 100644 index 0000000..bab9f0b --- /dev/null +++ b/charts/ionos-exporter/templates/ConfigMap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ionos-exporter-config +data: + config.yaml: |- + {{ .Files.Get "config.yaml" | nindent 4 }} \ No newline at end of file diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index 497c1ab..cf04c2b 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -35,6 +35,8 @@ spec: - name: metrics containerPort: {{ .Values.containerPort }} protocol: TCP + args: + - "-config=/etc/ionos-exporter/config.yaml" livenessProbe: httpGet: path: /metrics @@ -46,20 +48,43 @@ spec: resources: {{- toYaml .Values.resources | nindent 12 }} env: + {{- if .Values.ionos.s3.enabled }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ .Values.ionos.credentials.secret_name }} + key: {{ .Values.ionos.s3.credentials.access_key }} + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.ionos.credentials.secret_name }} + key: {{ .Values.ionos.s3.credentials.secret_key }} + {{- end }} - name: IONOS_USERNAME valueFrom: secretKeyRef: - name: {{ .Values.ionos_credentials_secret_name }} - key: {{ .Values.ionos_credentials_username_key }} + name: {{ .Values.ionos.credentials.secret_name }} + key: {{ .Values.ionos.credentials.username_key }} - name: IONOS_PASSWORD valueFrom: secretKeyRef: - name: {{ .Values.ionos_credentials_secret_name }} - key: {{ .Values.ionos_credentials_password_key }} + name: {{ .Values.ionos.credentials.secret_name }} + key: {{ .Values.ionos.credentials.password_key }} + - name: IONOS_EXPORTER_S3_ENABLED + value: {{ .Values.ionos.s3.enabled | quote }} - name: IONOS_EXPORTER_APPLICATION_CONTAINER_PORT value: {{ .Values.containerPort | quote }} - name: IONOS_EXPORTER_API_CYCLE value: {{ .Values.ionosApiCycle | quote }} + volumeMounts: + - name: config-volume + readOnly: true + mountPath: /etc/ionos-exporter/config.yaml + subPath: config.yaml + volumes: + - name: config-volume + configMap: + name: ionos-exporter-config {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/charts/ionos-exporter/values.yaml b/charts/ionos-exporter/values.yaml index 25f983a..6e33fe3 100644 --- a/charts/ionos-exporter/values.yaml +++ b/charts/ionos-exporter/values.yaml @@ -10,9 +10,18 @@ image: imagePullSecrets: [] # credentials -ionos_credentials_secret_name: "ionos-exporter-credentials" -ionos_credentials_username_key: "username" -ionos_credentials_password_key: "password" +# ionos_credentials_secret_token: "ionos-exporter-token" +# ionos_credentials_token_key: "token" +ionos: + credentials: + secret_name: "ionos-exporter-credentials" + username_key: "username" + password_key: "password" + s3: + enabled: false + credentials: + secret_key: "secretKey" + access_key: "accessKey" service: type: ClusterIP diff --git a/env b/env new file mode 100644 index 0000000..b936298 --- /dev/null +++ b/env @@ -0,0 +1 @@ +IONOS_TOKEN="" \ No newline at end of file diff --git a/go.mod b/go.mod index df02995..624f546 100644 --- a/go.mod +++ b/go.mod @@ -3,21 +3,28 @@ module ionos-exporter go 1.20 require ( + github.com/aws/aws-sdk-go v1.52.0 + github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2 github.com/ionos-cloud/sdk-go/v6 v6.1.9 + github.com/joho/godotenv v1.5.1 github.com/prometheus/client_golang v1.16.0 + gopkg.in/yaml.v2 v2.4.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/kr/text v0.2.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect - golang.org/x/net v0.15.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/sys v0.21.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/go.sum b/go.sum index c5e63d3..4749301 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,11 @@ +github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI= +github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -11,10 +15,23 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2 h1:AaKbci+kVS6/k43VwJwmXxCJ7pzj9jwuOPqO8Wd5560= +github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2/go.mod h1:nmJEwuRX65A5/PxwvdFW0XrV+N6WFYnMV1TiIafAwz4= github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= @@ -23,18 +40,22 @@ github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= @@ -43,3 +64,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/internal/helper.go b/internal/helper.go index a2900a9..05318d0 100644 --- a/internal/helper.go +++ b/internal/helper.go @@ -2,9 +2,26 @@ package internal import ( "fmt" + "io/ioutil" + "log" "os" + + aws "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + "gopkg.in/yaml.v2" ) +type Config struct { + Metrics []MetricConfig `yaml:"metrics"` +} + +type MetricConfig struct { + Name string `yaml:"name"` + Description string `yaml:"description"` + Type string `yaml:"type"` +} + func GetEnv(key string, fallback string) string { value, ok := os.LookupEnv(key) if !ok { @@ -20,3 +37,35 @@ func GetEnv(key string, fallback string) string { } } } + +func GetHeadBucket(client *s3.S3, bucketName string) error { + input := &s3.HeadBucketInput{ + Bucket: aws.String(bucketName), + } + _, err := client.HeadBucket(input) + if err != nil { + if reqErr, ok := err.(awserr.RequestFailure); ok && reqErr.StatusCode() == 403 { + log.Printf("Skipping bucket %s due to Forbidden error: %v\n", bucketName, err) + return err + } + log.Printf("Problem getting the location for bucket %s: %v\n", bucketName, err) + return err + } + log.Printf("Bucket %s exists and is accessible\n", bucketName) + return nil +} + +func LoadConfig(filename string) (*Config, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + var config Config + err = yaml.Unmarshal(data, &config) + if err != nil { + return nil, err + } + + return &config, nil +} diff --git a/internal/ionos.go b/internal/ionos.go deleted file mode 100644 index 0fdd76a..0000000 --- a/internal/ionos.go +++ /dev/null @@ -1,110 +0,0 @@ -package internal - -import ( - "context" - "fmt" - "log" - "os" - "sync" - "time" - - ionoscloud "github.com/ionos-cloud/sdk-go/v6" -) - -var ( - CoresTotal int32 = 0 - RamTotal int32 = 0 - ServerTotal int32 = 0 - DataCenters int32 = 0 - IonosDatacenters = make(map[string]IonosDCResources) //Key is the name of the datacenter - depth int32 = 1 //Controls the detail depth of the response objects. -) - -type IonosDCResources struct { - Cores int32 // Amount of CPU cores in the whole DC, regardless whether it is a VM or Kubernetscluster - Ram int32 // Amount of RAM in the whole DC, regardless whether it is a VM or Kubernetscluster - Servers int32 // Amount of servers in the whole DC - DCId string // UUID od the datacenter -} - -func CollectResources(m *sync.RWMutex, cycletime int32) { - configuration := ionoscloud.NewConfigurationFromEnv() - apiClient := ionoscloud.NewAPIClient(configuration) - for { - datacenters, resp, err := apiClient.DataCentersApi.DatacentersGet(context.Background()).Depth(depth).Execute() - if err != nil { - fmt.Fprintf(os.Stderr, "Error when calling `DataCentersApi.DatacentersGet``: %v\n", err) - fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) - os.Exit(1) - } - newIonosDatacenters := make(map[string]IonosDCResources) - for _, datacenter := range *datacenters.Items { - var ( - coresTotalDC int32 = 0 - ramTotalDC int32 = 0 - serverTotalDC int32 = 0 - ) - servers, resp, err := apiClient.ServersApi.DatacentersServersGet(context.Background(), *datacenter.Id).Depth(depth).Execute() - if err != nil { - fmt.Fprintf(os.Stderr, "Error when calling `ServersApi.DatacentersServersGet``: %v\n", err) - fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) - } - serverTotalDC = int32(len(*servers.Items)) - for _, server := range *servers.Items { - coresTotalDC += *server.Properties.Cores - ramTotalDC += *server.Properties.Ram - } - newIonosDatacenters[*datacenter.Properties.Name] = IonosDCResources{ - DCId: *datacenter.Id, - Cores: coresTotalDC, - Ram: ramTotalDC, - Servers: serverTotalDC, - } - } - m.Lock() - IonosDatacenters = newIonosDatacenters - m.Unlock() - CalculateDCTotals(m) - time.Sleep(time.Duration(cycletime) * time.Second) - } -} -func CalculateDCTotals(m *sync.RWMutex) { - var ( - serverTotal int32 - ramTotal int32 - coresTotal int32 - datacentersTotal int32 - ) - m.RLock() - for _, dcResources := range IonosDatacenters { - serverTotal += dcResources.Servers - ramTotal += dcResources.Ram - coresTotal += dcResources.Cores - } - datacentersTotal = int32(len(IonosDatacenters)) - m.RUnlock() - m.Lock() - ServerTotal = serverTotal - RamTotal = ramTotal - CoresTotal = coresTotal - DataCenters = datacentersTotal - m.Unlock() -} -func PrintDCResources(m *sync.RWMutex) { - m.RLock() - defer m.RUnlock() - for dcName, dcResources := range IonosDatacenters { - fmt.Fprintf(os.Stdout, "%s:\n - UUID: %s\n", dcName, dcResources.DCId) - fmt.Fprintf(os.Stdout, " - Servers: %d\n", dcResources.Servers) - fmt.Fprintf(os.Stdout, " - Cores: %d\n", dcResources.Cores) - fmt.Fprintf(os.Stdout, " - Ram: %d GB\n", dcResources.Ram/1024) - } -} -func PrintDCTotals(m *sync.RWMutex) { - m.RLock() - defer m.RUnlock() - log.Printf("Total - Datacenters: %d\n", DataCenters) - log.Printf("Total - Servers: %d\n", ServerTotal) - log.Printf("Total - Cores: %d\n", CoresTotal) - log.Printf("Total - Ram: %d GB\n", RamTotal/1024) -} diff --git a/internal/ionos_collector.go b/internal/ionos_collector.go new file mode 100644 index 0000000..cd9069f --- /dev/null +++ b/internal/ionos_collector.go @@ -0,0 +1,190 @@ +package internal + +import ( + "os" + "sync" + + //"time" + + "github.com/prometheus/client_golang/prometheus" +) + +// Define a struct for you collector that contains pointers +// to prometheus descriptors for each metric you wish to expose. +// Note you can also include fields of other types if they provide utility +// but we just won't be exposing them as metrics. +type ionosCollector struct { + mutex *sync.RWMutex + coresMetric *prometheus.GaugeVec + ramMetric *prometheus.GaugeVec + serverMetric *prometheus.GaugeVec + dcCoresMetric *prometheus.GaugeVec + dcRamMetric *prometheus.GaugeVec + dcServerMetric *prometheus.GaugeVec + dcDCMetric *prometheus.GaugeVec + nlbsMetric *prometheus.GaugeVec + albsMetric *prometheus.GaugeVec + natsMetric *prometheus.GaugeVec + dcDCNLBMetric *prometheus.GaugeVec + dcDCALBMetric *prometheus.GaugeVec + dcDCNATMetric *prometheus.GaugeVec + dcNLBRulesMetric *prometheus.GaugeVec + dcALBRulesMetric *prometheus.GaugeVec + dcTotalIpsMetric prometheus.Gauge + apiFailuresMetric prometheus.Counter +} + +// You must create a constructor for you collector that +// initializes every descriptor and returns a pointer to the collector +func NewIonosCollector(m *sync.RWMutex) *ionosCollector { + return &ionosCollector{ + mutex: m, + coresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dc_cores_amount", + Help: "Shows the number of currently active cores in an IONOS datacenter", + }, []string{"datacenter"}), + ramMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dc_ram_gb", + Help: "Shows the number of currently active RAM in an IONOS datacenter", + }, []string{"datacenter"}), + serverMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dc_server_amount", + Help: "Shows the number of currently active servers in an IONOS datacenter", + }, []string{"datacenter"}), + dcCoresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_cores_amount", + Help: "Shows the number of currently active cores of an IONOS account", + }, []string{"account"}), + dcRamMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_ram_gb", + Help: "Shows the number of currently active RAM of an IONOS account", + }, []string{"account"}), + dcServerMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_server_amount", + Help: "Shows the number of currently active servers of an IONOS account", + }, []string{"account"}), + dcDCMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_datacenter_amount", + Help: "Shows the number of datacenters of an IONOS account", + }, []string{"account"}), + nlbsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_networkloadbalancer_amount", + Help: "Shows the number of active Network Loadbalancers in an IONOS datacenter", + }, []string{"datacenter", "nlb_name", "nlb_rules_name"}), + albsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_applicationloadbalancer_amount", + Help: "Shows the number of active Application Loadbalancers in an IONOS datacenter", + }, []string{"datacenter", "alb_name", "alb_rules_name"}), + natsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_nat_gateways_amount", + Help: "Shows the number of NAT Gateways in an IONOS datacenter", + }, []string{"datacenter"}), + dcDCNLBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_networkloadbalancer_amount", + Help: "Shows the total number of Network Loadbalancers in IONOS Account", + }, []string{"account"}), + dcDCALBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_applicationbalancer_amount", + Help: "Shows the total number of Application Loadbalancers in IONOS Account", + }, []string{"account"}), + dcDCNATMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_nat_gateways_amount", + Help: "Shows the total number of NAT Gateways in IONOS Account", + }, []string{"account"}), + dcNLBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_number_of_nlb_rules", + Help: "Shows the total number of NLB Rules in IONOS Account", + }, []string{"nlb_rules"}), + dcALBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_nmumber_of_alb_rules", + Help: "Shows the total number of ALB Rules in IONOS Account", + }, []string{"alb_rules"}), + dcTotalIpsMetric: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ionos_total_number_of_ips", + Help: "Shows the number of Ips in a IONOS", + }), + apiFailuresMetric: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ionos_api_failures_total", + Help: "Total number of failed API calls", + }), + } +} + +// Each and every collector must implement the Describe function. +// It essentially writes all descriptors to the prometheus desc channel. +// func (collector *ionosCollector) Describe(ch chan<- *prometheus.Desc) { +func (collector *ionosCollector) Describe(ch chan<- *prometheus.Desc) { + + //Update this section with the each metric you create for a given collector + collector.coresMetric.Describe(ch) + collector.ramMetric.Describe(ch) + collector.serverMetric.Describe(ch) + collector.dcCoresMetric.Describe(ch) + collector.dcRamMetric.Describe(ch) + collector.dcServerMetric.Describe(ch) + collector.dcDCMetric.Describe(ch) + collector.nlbsMetric.Describe(ch) + collector.albsMetric.Describe(ch) + collector.natsMetric.Describe(ch) + collector.dcDCNLBMetric.Describe(ch) + collector.dcDCALBMetric.Describe(ch) + collector.dcDCNATMetric.Describe(ch) + collector.dcALBRulesMetric.Describe(ch) + collector.dcNLBRulesMetric.Describe(ch) + collector.dcTotalIpsMetric.Describe(ch) + collector.apiFailuresMetric.Describe(ch) +} + +// Collect implements required collect function for all promehteus collectors +func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { + + //Implement logic here to determine proper metric value to return to prometheus + //for each descriptor or call other functions that do so. + account := os.Getenv("IONOS_ACCOUNT") + collector.mutex.RLock() + defer collector.mutex.RUnlock() + + // Reset metrics in case a datacenter was removed + collector.coresMetric.Reset() + collector.ramMetric.Reset() + collector.serverMetric.Reset() + collector.albsMetric.Reset() + collector.natsMetric.Reset() + collector.nlbsMetric.Reset() + // fmt.Println("Here are the metrics in ionosCollector", IonosDatacenters) + for dcName, dcResources := range IonosDatacenters { + //Write latest value for each metric in the prometheus metric channel. + collector.coresMetric.WithLabelValues(dcName).Set(float64(dcResources.Cores)) + collector.ramMetric.WithLabelValues(dcName).Set(float64(dcResources.Ram / 1024)) // MB -> GB + collector.serverMetric.WithLabelValues(dcName).Set(float64(dcResources.Servers)) + collector.nlbsMetric.WithLabelValues(dcName, dcResources.NLBName, dcResources.NLBRuleName).Set(float64(dcResources.NLBs)) + collector.albsMetric.WithLabelValues(dcName, dcResources.ALBName, dcResources.ALBRuleName).Set(float64(dcResources.ALBs)) + collector.natsMetric.WithLabelValues(dcName).Set(float64(dcResources.NATs)) + collector.dcTotalIpsMetric.Set(float64(dcResources.TotalIPs)) + collector.apiFailuresMetric.Add(float64(dcResources.TotalAPICallFailures)) + + } + + collector.dcCoresMetric.WithLabelValues(account).Set(float64(CoresTotal)) + collector.dcRamMetric.WithLabelValues(account).Set(float64(RamTotal / 1024)) // MB -> GB + collector.dcServerMetric.WithLabelValues(account).Set(float64(ServerTotal)) + collector.dcDCMetric.WithLabelValues(account).Set(float64(DataCenters)) + + collector.coresMetric.Collect(ch) + collector.ramMetric.Collect(ch) + collector.serverMetric.Collect(ch) + collector.dcCoresMetric.Collect(ch) + collector.dcRamMetric.Collect(ch) + collector.dcServerMetric.Collect(ch) + collector.dcDCMetric.Collect(ch) + collector.nlbsMetric.Collect(ch) + collector.albsMetric.Collect(ch) + collector.natsMetric.Collect(ch) + collector.dcDCNLBMetric.Collect(ch) + collector.dcDCALBMetric.Collect(ch) + collector.dcDCNATMetric.Collect(ch) + collector.dcNLBRulesMetric.Collect(ch) + collector.dcALBRulesMetric.Collect(ch) + collector.dcTotalIpsMetric.Collect(ch) + collector.apiFailuresMetric.Collect(ch) +} diff --git a/internal/ionos_scraper.go b/internal/ionos_scraper.go new file mode 100644 index 0000000..41f1e44 --- /dev/null +++ b/internal/ionos_scraper.go @@ -0,0 +1,408 @@ +package internal + +import ( + "context" + "fmt" + "log" + "os" + "sync" + "time" + + ionoscloud "github.com/ionos-cloud/sdk-go/v6" + "github.com/joho/godotenv" +) + +var ( + CoresTotal int32 = 0 + RamTotal int32 = 0 + ServerTotal int32 = 0 + DataCenters int32 = 0 + IonosDatacenters = make(map[string]IonosDCResources) //Key is the name of the datacenter + depth int32 = 1 +) + +type IonosDCResources struct { + Cores int32 // Amount of CPU cores in the whole DC, regardless whether it is a VM or Kubernetscluster + Ram int32 // Amount of RAM in the whole DC, regardless whether it is a VM or Kubernetscluster + Servers int32 // Amount of servers in the whole DC + DCId string // UUID od the datacenter + NLBs int32 //Number of Networkloadbalancers + ALBs int32 //Number of Applicationloadbalanceers + NATs int32 //Number of NAT Gateways + NLBRules int32 //Number of NLB Rules + ALBRules int32 //Number of ALB Rueles + ALBName string //ALB Name + NLBName string //NLB Name + NLBRuleName string //Rule name of NLB + ALBRuleName string //Rule name of ALB + IPName string //IP Name + TotalIPs int32 //Number of total IP-s + TotalAPICallFailures int32 +} + +func CollectResources(m *sync.RWMutex, envFile string, cycletime int32) { + + err := godotenv.Load(envFile) + if err != nil { + fmt.Println("Error loading .env file (optional)") + } + + cfgENV := ionoscloud.NewConfigurationFromEnv() + + cfgENV.Debug = false + apiClient := ionoscloud.NewAPIClient(cfgENV) + + totalAPICallFailures := 0 + for { + datacenters, resp, err := apiClient.DataCentersApi.DatacentersGet(context.Background()).Depth(depth).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling `DataCentersApi.DatacentersGet``: %v\n", err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + totalAPICallFailures++ + continue + } + newIonosDatacenters := make(map[string]IonosDCResources) + for _, datacenter := range *datacenters.Items { + var ( + coresTotalDC int32 = 0 + ramTotalDC int32 = 0 + serverTotalDC int32 = 0 + nlbTotalDC int32 = 0 + nlbTotalRulesDC int32 = 0 + albTotalRulesDC int32 = 0 + albTotalDC int32 = 0 + natTotalDC int32 = 0 + albNames string + nlbNames string + albRuleNames string + nlbRuleNames string + totalIPs int32 = 0 + totalAPICallFailures int32 = 0 + ) + servers, resp, err := apiClient.ServersApi.DatacentersServersGet(context.Background(), *datacenter.Id).Depth(depth).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling `ServersApi.DatacentersServersGet``: %v\n", err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + totalAPICallFailures++ + continue + } + + albList, err := fetchApplicationLoadbalancers(apiClient, &datacenter) + if err != nil { + fmt.Printf("Error retrieving ALBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } + nlbList, err := fetchNetworkLoadBalancers(apiClient, &datacenter) + if err != nil { + fmt.Printf("Error retrieving NLBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } + natList, err := fetchNATGateways(apiClient, &datacenter) + if err != nil { + fmt.Printf("Error retrieving NATs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } + ipBlocks, err := fetchIPBlocks(apiClient) + if err != nil { + fmt.Printf("Error retrieving IPs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } + + totalIPs = processIPBlocks(ipBlocks) + nlbNames, nlbTotalRulesDC = processNetworkLoadBalancers(nlbList) + albNames, albTotalRulesDC = processApplicationLoadBalancers(albList) + + nlbTotalDC = int32(len(*nlbList.Items)) + albTotalDC = int32(len(*albList.Items)) + natTotalDC = int32(len(*natList.Items)) + serverTotalDC = int32(len(*servers.Items)) + + for _, server := range *servers.Items { + coresTotalDC += *server.Properties.Cores + ramTotalDC += *server.Properties.Ram + } + + newIonosDatacenters[*datacenter.Properties.Name] = IonosDCResources{ + DCId: *datacenter.Id, + Cores: coresTotalDC, + Ram: ramTotalDC, + Servers: serverTotalDC, + NLBs: nlbTotalDC, + ALBs: albTotalDC, + NATs: natTotalDC, + NLBRules: nlbTotalRulesDC, + ALBRules: albTotalRulesDC, + ALBName: albNames, + NLBName: nlbNames, + ALBRuleName: albRuleNames, + NLBRuleName: nlbRuleNames, + TotalIPs: totalIPs, + TotalAPICallFailures: totalAPICallFailures, + } + + } + + m.Lock() + IonosDatacenters = newIonosDatacenters + m.Unlock() + CalculateDCTotals(m) + time.Sleep(time.Duration(cycletime) * time.Second) + } +} + +func CalculateDCTotals(m *sync.RWMutex) { + var ( + serverTotal int32 + ramTotal int32 + coresTotal int32 + datacentersTotal int32 + ) + m.RLock() + for _, dcResources := range IonosDatacenters { + serverTotal += dcResources.Servers + ramTotal += dcResources.Ram + coresTotal += dcResources.Cores + } + datacentersTotal = int32(len(IonosDatacenters)) + m.RUnlock() + m.Lock() + ServerTotal = serverTotal + RamTotal = ramTotal + CoresTotal = coresTotal + DataCenters = datacentersTotal + m.Unlock() +} +func PrintDCResources(m *sync.RWMutex) { + m.RLock() + defer m.RUnlock() + for dcName, dcResources := range IonosDatacenters { + fmt.Fprintf(os.Stdout, "%s:\n - UUID: %s\n", dcName, dcResources.DCId) + fmt.Fprintf(os.Stdout, " - Servers: %d\n", dcResources.Servers) + fmt.Fprintf(os.Stdout, "%s:\n - Cores: %d\n", dcName, dcResources.Cores) + fmt.Fprintf(os.Stdout, " - Ram: %d GB\n", dcResources.Ram/1024) + } +} +func PrintDCTotals(m *sync.RWMutex) { + m.RLock() + defer m.RUnlock() + log.Printf("Total - Datacenters: %d\n", DataCenters) + log.Printf("Total - Servers: %d\n", ServerTotal) + log.Printf("Total - Cores: %d\n", CoresTotal) + log.Printf("Total - Ram: %d GB\n", RamTotal/1024) +} + +/* +Retrieves a list of NAT Gateways which are associated with specific datanceter using the ionoscloud API Client + +Parameters: +apiClient: An instance of APIClient for making API Requests +datacenter Pointer to an ionoscloud.Datacenter object representing the target datacenter. + +Returns: +- *ionoscloud.NatGateways: A pointer to ionoscloud.NatGateways which has NAT List or an error if it fails +If successful, it returns a pointer to the fetched NATs, otherwise it returns nil and an error message. +*/ +func fetchNATGateways(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Datacenter) (*ionoscloud.NatGateways, error) { + datacenterId := *datacenter.Id + natList, resp, err := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), datacenterId).Depth(2).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling NATGateways API: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if natList.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + return &natList, nil +} + +/* +Retrieves a list of Network Load Balancers (NLB) which are associated with specific datanceter using the ionoscloud API Client + +Parameters: +apiClient: An instance of APIClient for making API Requests +datacenter Pointer to an ionoscloud.Datacenter object representing the target datacenter. + +Returns: +- *ionoscloud.NetworkLoadBalancers: A pointer to ionoscloud.ApplicationLoadbalancers which has ALB List or an error if it fails +If successful, it returns a pointer to the fetched ALBs, otherwise it returns nil and an error message. +*/ +func fetchNetworkLoadBalancers(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Datacenter) (*ionoscloud.NetworkLoadBalancers, error) { + datacenterId := *datacenter.Id + nlbList, resp, err := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), datacenterId).Depth(2).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling NetworkLoadbalancers API: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if nlbList.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + + return &nlbList, nil +} + +/* +retrievers a list of IP Blocks from ionoscloud API + +Parameters: + - apiClient: An instance of ionoscloud.APIClient + +Returns: + +- pointer to ionoscloud.IpBlocks containing the fetched IP blocks, or nil if there are no items +in the resource. +- error: An error if there was an issue making the API call or if no IP blocks were found. +*/ +func fetchIPBlocks(apiClient *ionoscloud.APIClient) (*ionoscloud.IpBlocks, error) { + ipBlocks, resp, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(2).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling IPBlocks API: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if ipBlocks.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + + return &ipBlocks, nil +} + +/* +Retrieves a list of Application Load Balancers (ALB) which are associated with specific datanceter using the ionoscloud API Client + +Parameters: +apiClient: An instance of APIClient for making API Requests +datacenter Pointer to an ionoscloud.Datacenter object representing the target datacenter. + +Returns: +- *ionoscloud.ApplicationLoadBalancers: A pointer to ionoscloud.ApplicationLoadbalancers which has ALB List or an error if it fails +If successful, it returns a pointer to the fetched ALBs, otherwise it returns nil and an error message. +*/ +func fetchApplicationLoadbalancers(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Datacenter) (*ionoscloud.ApplicationLoadBalancers, error) { + datacenterId := *datacenter.Id + albList, resp, err := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), datacenterId).Depth(2).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling ApplicationLoadBalancers API: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if albList.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + + return &albList, nil +} + +/* +Calculates total number of IP addresses from a list of IP Blocks + +Parameters: +- ipBlocks: A pointer to ionoscloud.IpBlocks containing a list of IP blocks to process. + +Returns: +- The total number of Ip addresses summed from all IP Blocks +*/ +func processIPBlocks(ipBlocks *ionoscloud.IpBlocks) int32 { + + var totalIPs int32 + + for _, ips := range *ipBlocks.Items { + if ips.Properties != nil && ips.Properties.Size != nil { + totalIPs += *ips.Properties.Size + } else { + fmt.Println("Ip Properties or Ip Properties Size is nil") + } + } + return totalIPs +} + +/* +process a list of Network Load Balancers to extract information about NLB names +and total forwarding rules across all NLBs. + +Parameter: + - a pointer to the NetworkLoadbalaners containig a list of NLBs to process + +Returns: + - string: names of loadbalancers + - int32: total number of forwarding rules + +If any NLB or its associated forwarding rules are nil, they are skipped during processing. +*/ +func processNetworkLoadBalancers(nlbList *ionoscloud.NetworkLoadBalancers) (string, int32) { + var ( + nlbNames string + nlbTotalRulesDC int32 + ) + + for _, nlb := range *nlbList.Items { + if nlb.Properties != nil && nlb.Properties.Name != nil { + nlbNames = *nlb.Properties.Name + } + nlbForwardingRules := nlb.Entities.Forwardingrules + if nlbForwardingRules != nil && nlbForwardingRules.Items != nil { + nlbTotalRulesDC = int32(len(*nlbForwardingRules.Items)) + for _, rule := range *nlbForwardingRules.Items { + if rule.Properties != nil && rule.Properties.Name != nil { + nlbNames = *rule.Properties.Name + } + } + } + } + return nlbNames, nlbTotalRulesDC +} + +/* +process a list of Application Load Balancers ALBs to extract information about ALB names and +total forwarding rules across al ALBs + +Parameters: + - a pointer to ApplicationLoadBalancers containing a list of ALBs to process + +Returns: + - string: names of application loadbalancers + - int32: total number of forwarding rules +*/ +func processApplicationLoadBalancers(albList *ionoscloud.ApplicationLoadBalancers) (string, int32) { + var ( + albNames string + albTotalRulesDC int32 + ) + + for _, alb := range *albList.Items { + if alb.Properties != nil && alb.Properties.Name != nil { + albNames = *alb.Properties.Name + } + albForwardingRules := alb.Entities.Forwardingrules + if albForwardingRules != nil && albForwardingRules.Items != nil { + albTotalRulesDC = int32(len(*albForwardingRules.Items)) + for _, rule := range *albForwardingRules.Items { + if rule.Properties != nil && rule.Properties.Name != nil { + albNames = *rule.Properties.Name + } + } + } + } + return albNames, albTotalRulesDC +} diff --git a/internal/postgres_collector.go b/internal/postgres_collector.go new file mode 100644 index 0000000..f9715a1 --- /dev/null +++ b/internal/postgres_collector.go @@ -0,0 +1,157 @@ +package internal + +import ( + "fmt" + "strconv" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type postgresCollector struct { + mutex *sync.RWMutex + postgresTotalRamMetric *prometheus.GaugeVec + postgresTotalCPUMetric *prometheus.GaugeVec + postgresTotalStorageMetric *prometheus.GaugeVec + postgresTransactionRateMetric *prometheus.GaugeVec + postgresTotalStorageBytesMetric *prometheus.GaugeVec + postgresAvailableStorageBytesMetric *prometheus.GaugeVec + postgresDiskIOMetric *prometheus.GaugeVec + postgresCpuRateMetric *prometheus.GaugeVec + postgresLoadMetric *prometheus.GaugeVec + postgresTotalMemoryAvailableBytes *prometheus.GaugeVec +} + +func NewPostgresCollector(m *sync.RWMutex) *postgresCollector { + return &postgresCollector{ + mutex: m, + postgresTotalRamMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_ram_in_cluster", + Help: "Gives the total ammount of allocated RAM in cluster", + }, []string{"cluster", "owner", "db"}), + postgresTotalCPUMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_cpu_in_cluster", + Help: "Gives a total amount of CPU Cores in Cluster", + }, []string{"cluster", "owner", "db"}), + postgresTotalStorageMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_storage_in_cluster", + Help: "Gives a total amount of Storage in Cluster", + }, []string{"cluster", "owner", "db"}), + postgresTransactionRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_transactions:rate2m", + Help: "Gives a Transaction Rate in postgres cluster in 2m", + }, []string{"cluster"}), + postgresTotalStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_storage_metric", + Help: "Gives a Total Storage Metric in Bytes", + }, []string{"cluster"}), + postgresAvailableStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_available_storage_metric", + Help: "Gives a Available Storage Metric in Bytes", + }, []string{"cluster"}), + postgresCpuRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgress_cpu_rate5m", + Help: "Gives a CPU Rate (Average Utilization) over the past 5 Minutes", + }, []string{"cluster"}), + postgresDiskIOMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m", + Help: "The rate of disk I/O time, in seconds, over a five-minute period.", + }, []string{"cluster"}), + postgresLoadMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_load5", + Help: "Linux load average for the last 5 minutes.", + }, []string{"cluster"}), + postgresTotalMemoryAvailableBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_memory_available_bytes", + Help: "Available memory in bytes", + }, []string{"cluster"}), + } +} + +func (collector *postgresCollector) Describe(ch chan<- *prometheus.Desc) { + collector.postgresTotalCPUMetric.Describe(ch) + collector.postgresTotalRamMetric.Describe(ch) + collector.postgresTotalStorageMetric.Describe(ch) + collector.postgresTransactionRateMetric.Describe(ch) + collector.postgresTotalStorageBytesMetric.Describe(ch) + collector.postgresAvailableStorageBytesMetric.Describe(ch) + collector.postgresCpuRateMetric.Describe(ch) + collector.postgresDiskIOMetric.Describe(ch) + collector.postgresLoadMetric.Describe(ch) + collector.postgresTotalMemoryAvailableBytes.Describe(ch) +} + +func (collector *postgresCollector) Collect(ch chan<- prometheus.Metric) { + collector.mutex.RLock() + defer collector.mutex.RUnlock() + + metricsMutex.Lock() + collector.postgresTotalCPUMetric.Reset() + collector.postgresTotalRamMetric.Reset() + collector.postgresTotalStorageMetric.Reset() + metricsMutex.Unlock() + + for postgresName, postgresResources := range IonosPostgresClusters { + + for _, telemetry := range postgresResources.Telemetry { + for _, value := range telemetry.Values { + if len(value) != 2 { + fmt.Printf("Unexpected value length: %v\n", value) + continue + } + metricValue, ok := value[1].(float64) + if !ok { + strValue, ok := value[1].(string) + if !ok { + fmt.Printf("Unexpected type for metric %s value: %v\n", telemetry.Values, value[1]) + continue + } + + var err error + metricValue, err = strconv.ParseFloat(strValue, 64) + if err != nil { + fmt.Printf("Failed to parse metric value: %v\n", err) + continue + } + } + switch telemetry.Metric["__name__"] { + case "ionos_dbaas_postgres_transactions:rate2m": + collector.postgresTransactionRateMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_storage_total_bytes": + collector.postgresTotalStorageBytesMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_storage_available_bytes": + collector.postgresAvailableStorageBytesMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_cpu_rate5m": + collector.postgresCpuRateMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m": + collector.postgresDiskIOMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_load5": + collector.postgresLoadMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_memory_available_bytes": + collector.postgresTotalMemoryAvailableBytes.WithLabelValues(postgresName).Set(float64(metricValue)) + default: + // fmt.Printf("Unrecognised metric: %s\n", telemetry.Metric["__name__"]) + continue + } + } + } + + for _, dbName := range postgresResources.DatabaseNames { + + collector.postgresTotalCPUMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.CPU)) + collector.postgresTotalRamMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.RAM)) + collector.postgresTotalStorageMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.Storage)) + } + + } + collector.postgresTotalCPUMetric.Collect(ch) + collector.postgresTotalRamMetric.Collect(ch) + collector.postgresTotalStorageMetric.Collect(ch) + collector.postgresTransactionRateMetric.Collect(ch) + collector.postgresTotalStorageBytesMetric.Collect(ch) + collector.postgresAvailableStorageBytesMetric.Collect(ch) + collector.postgresCpuRateMetric.Collect(ch) + collector.postgresDiskIOMetric.Collect(ch) + collector.postgresLoadMetric.Collect(ch) + collector.postgresTotalMemoryAvailableBytes.Collect(ch) +} diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go new file mode 100644 index 0000000..b2991e3 --- /dev/null +++ b/internal/postgres_scraper.go @@ -0,0 +1,222 @@ +package internal + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "sync" + "time" + + psql "github.com/ionos-cloud/sdk-go-dbaas-postgres" + "github.com/joho/godotenv" +) + +type IonosPostgresResources struct { + ClusterName string + CPU int32 + RAM int32 + Storage int32 + Owner string + DatabaseNames []string + Telemetry []TelemetryMetric +} + +type TelemetryMetric struct { + Metric map[string]string `json:"metric"` + Values [][]interface{} `json:"values"` +} + +type TelemetryResponse struct { + Status string `json:"status"` + Data struct { + ResultType string `json:"resultType"` + Result []TelemetryMetric `json:"result"` + } `json:"data"` +} + +var ( + ClusterCoresTotal int32 = 0 + ClusterRamTotal int32 = 0 + ClusterTotal int32 = 0 + IonosPostgresClusters = make(map[string]IonosPostgresResources) +) + +func PostgresCollectResources(m *sync.RWMutex, configPath, envFile string, cycletime int32) { + err := godotenv.Load(envFile) + if err != nil { + fmt.Println("Error loading .env file (optional)") + } + + cfgENV := psql.NewConfigurationFromEnv() + apiClient := psql.NewAPIClient(cfgENV) + + config, err := LoadConfig(configPath) + if err != nil { + fmt.Println("Failed to load config:", err) + } + for { + processCluster(apiClient, m, config.Metrics) + time.Sleep(time.Duration(cycletime) * time.Second) + } +} + +func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []MetricConfig) { + datacenters, err := fetchClusters(apiClient) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch clusters: %v\n", err) + } + if datacenters == nil || datacenters.Items == nil { + fmt.Fprintf(os.Stderr, "datacenters or datacenters Items are nil\n") + return + } + newIonosPostgresResources := make(map[string]IonosPostgresResources) + + for _, clusters := range *datacenters.Items { + if clusters.Id == nil || clusters.Properties == nil { + fmt.Fprintf(os.Stderr, "Cluster or Cluster Properties are nil\n") + continue + } + clusterName := clusters.Properties.DisplayName + if clusterName == nil { + fmt.Fprintf(os.Stderr, "Cluster name is nil\n") + continue + } + databaseNames, err := fetchDatabases(apiClient, *clusters.Id) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch databases for cluster %s: %v\n", *clusters.Properties.DisplayName, err) + continue + } + databaseOwner, err := fetchOwner(apiClient, *clusters.Id) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch owner for database %s: %v\n", *clusters.Properties.DisplayName, err) + continue + } + + telemetryData := make([]TelemetryMetric, 0) + + for _, metricConfig := range metrics { + telemetryResp, err := fetchTelemetryMetrics(os.Getenv("IONOS_TOKEN"), fmt.Sprintf("%s{postgres_cluster=\"%s\"}", metricConfig.Name, *clusters.Id)) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch telemetry metrics for cluster %s: %v\n", *clusters.Id, err) + continue + } + telemetryData = append(telemetryData, telemetryResp.Data.Result...) + } + + newIonosPostgresResources[*clusters.Properties.DisplayName] = IonosPostgresResources{ + ClusterName: *clusters.Properties.DisplayName, + CPU: *clusters.Properties.Cores, + RAM: *clusters.Properties.Ram, + Storage: *clusters.Properties.StorageSize, + DatabaseNames: databaseNames, + Owner: databaseOwner, + Telemetry: telemetryData, + } + } + m.Lock() + IonosPostgresClusters = newIonosPostgresResources + m.Unlock() + +} + +func fetchClusters(apiClient *psql.APIClient) (*psql.ClusterList, error) { + datacenters, resp, err := apiClient.ClustersApi.ClustersGet(context.Background()).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling ClustersApi: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if datacenters.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + + return &datacenters, nil +} + +func fetchDatabases(apiClient *psql.APIClient, clusterID string) ([]string, error) { + databases, resp, err := apiClient.DatabasesApi.DatabasesList(context.Background(), clusterID).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling DatabasesApi: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if databases.Items == nil { + return nil, fmt.Errorf("no databases found for cluster %s", clusterID) + } + + var databaseNames []string + + for _, db := range *databases.Items { + if db.Properties != nil && db.Properties.Name != nil { + databaseNames = append(databaseNames, *db.Properties.Name) + } + } + return databaseNames, nil +} + +func fetchOwner(apiClient *psql.APIClient, clusterID string) (string, error) { + databases, resp, err := apiClient.DatabasesApi.DatabasesList(context.Background(), clusterID).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling DatabasesApi: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return "", err + } + + if databases.Items == nil { + return "", fmt.Errorf("no databases found for cluster %s", clusterID) + } + var owner = "" + for _, db := range *databases.Items { + if db.Properties != nil && db.Properties.Owner != nil { + owner = *db.Properties.Owner + } + } + return owner, nil +} + +func fetchTelemetryMetrics(apiToken, query string) (*TelemetryResponse, error) { + req, err := http.NewRequest("GET", "https://dcd.ionos.com/telemetry/api/v1/query_range", nil) + if err != nil { + return nil, err + } + + q := req.URL.Query() + q.Add("query", query) + q.Add("start", time.Now().Add(-time.Hour).Format(time.RFC3339)) + q.Add("end", time.Now().Format(time.RFC3339)) + q.Add("step", "60") + req.URL.RawQuery = q.Encode() + + req.Header.Set("Authorization", "Bearer "+apiToken) + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var telemetryResp TelemetryResponse + if err := json.NewDecoder(resp.Body).Decode(&telemetryResp); err != nil { + fmt.Printf("Fialed to decode json response: %v\n", err) + return nil, err + } + + return &telemetryResp, nil +} diff --git a/internal/prometheus.go b/internal/prometheus.go index 2171211..3268cc0 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -10,117 +10,36 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// Define a struct for you collector that contains pointers -// to prometheus descriptors for each metric you wish to expose. -// Note you can also include fields of other types if they provide utility -// but we just won't be exposing them as metrics. -type ionosCollector struct { - mutex *sync.RWMutex - coresMetric *prometheus.GaugeVec - ramMetric *prometheus.GaugeVec - serverMetric *prometheus.GaugeVec - dcCoresMetric *prometheus.GaugeVec - dcRamMetric *prometheus.GaugeVec - dcServerMetric *prometheus.GaugeVec - dcDCMetric *prometheus.GaugeVec -} - -var mutex *sync.RWMutex +// var mutex *sync.RWMutex -// You must create a constructor for you collector that -// initializes every descriptor and returns a pointer to the collector -func newIonosCollector(m *sync.RWMutex) *ionosCollector { - mutex = m - return &ionosCollector{ - mutex: m, - coresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dc_cores_amount", - Help: "Shows the number of currently active cores in an IONOS datacenter", - }, []string{"datacenter"}), - ramMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dc_ram_gb", - Help: "Shows the number of currently active RAM in an IONOS datacenter", - }, []string{"datacenter"}), - serverMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dc_server_amount", - Help: "Shows the number of currently active servers in an IONOS datacenter", - }, []string{"datacenter"}), - dcCoresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_cores_amount", - Help: "Shows the number of currently active cores of an IONOS account", - }, []string{"account"}), - dcRamMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_ram_gb", - Help: "Shows the number of currently active RAM of an IONOS account", - }, []string{"account"}), - dcServerMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_server_amount", - Help: "Shows the number of currently active servers of an IONOS account", - }, []string{"account"}), - dcDCMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_datacenter_amount", - Help: "Shows the number of datacenters of an IONOS account", - }, []string{"account"}), - } +func (collector *ionosCollector) GetMutex() *sync.RWMutex { + return collector.mutex } -// Each and every collector must implement the Describe function. -// It essentially writes all descriptors to the prometheus desc channel. -func (collector *ionosCollector) Describe(ch chan<- *prometheus.Desc) { - - //Update this section with the each metric you create for a given collector - collector.coresMetric.Describe(ch) - collector.ramMetric.Describe(ch) - collector.serverMetric.Describe(ch) - collector.dcCoresMetric.Describe(ch) - collector.dcRamMetric.Describe(ch) - collector.dcServerMetric.Describe(ch) - collector.dcDCMetric.Describe(ch) +func (collector *s3Collector) GetMutex() *sync.RWMutex { + return collector.mutex } -// Collect implements required collect function for all promehteus collectors -func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { - - //Implement logic here to determine proper metric value to return to prometheus - //for each descriptor or call other functions that do so. - collector.mutex.RLock() - defer collector.mutex.RUnlock() - - // Reset metrics in case a datacenter was removed - collector.coresMetric.Reset() - collector.ramMetric.Reset() - collector.serverMetric.Reset() - for dcName, dcResources := range IonosDatacenters { - //Write latest value for each metric in the prometheus metric channel. - collector.coresMetric.WithLabelValues(dcName).Set(float64(dcResources.Cores)) - collector.ramMetric.WithLabelValues(dcName).Set(float64(dcResources.Ram / 1024)) // MB -> GB - collector.serverMetric.WithLabelValues(dcName).Set(float64(dcResources.Servers)) - } - collector.dcCoresMetric.WithLabelValues("SVS").Set(float64(CoresTotal)) - collector.dcRamMetric.WithLabelValues("SVS").Set(float64(RamTotal / 1024)) // MB -> GB - collector.dcServerMetric.WithLabelValues("SVS").Set(float64(ServerTotal)) - collector.dcDCMetric.WithLabelValues("SVS").Set(float64(DataCenters)) - - collector.coresMetric.Collect(ch) - collector.ramMetric.Collect(ch) - collector.serverMetric.Collect(ch) - collector.dcCoresMetric.Collect(ch) - collector.dcRamMetric.Collect(ch) - collector.dcServerMetric.Collect(ch) - collector.dcDCMetric.Collect(ch) -} -func (collector *ionosCollector) GetMutex() *sync.RWMutex { +func (collector *postgresCollector) GetMutex() *sync.RWMutex { return collector.mutex } func StartPrometheus(m *sync.RWMutex) { - ic := newIonosCollector(m) - prometheus.MustRegister(ic) - prometheus.MustRegister(httpRequestsTotal) - + dcMutex := &sync.RWMutex{} + s3Mutex := &sync.RWMutex{} + pgMutex := &sync.RWMutex{} + + ionosCollector := NewIonosCollector(dcMutex) + s3Collector := NewS3Collector(s3Mutex) + pgCollector := NewPostgresCollector(pgMutex) + + prometheus.MustRegister(ionosCollector) + prometheus.MustRegister(s3Collector) + prometheus.MustRegister(pgCollector) + prometheus.MustRegister(HttpRequestsTotal) } -var httpRequestsTotal = prometheus.NewCounterVec( +var HttpRequestsTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "http_requests_total", Help: "Total number of HTTP requests", @@ -130,8 +49,8 @@ var httpRequestsTotal = prometheus.NewCounterVec( ) func HealthCheck(w http.ResponseWriter, r *http.Request) { - PrintDCTotals(mutex) - httpRequestsTotal.WithLabelValues("/healthcheck", r.Method).Inc() + // PrintDCTotals(mutex) + HttpRequestsTotal.WithLabelValues("/healthcheck", r.Method).Inc() w.WriteHeader(http.StatusOK) io.WriteString(w, "OK") } diff --git a/internal/s3_collector.go b/internal/s3_collector.go new file mode 100644 index 0000000..18e31d1 --- /dev/null +++ b/internal/s3_collector.go @@ -0,0 +1,182 @@ +package internal + +import ( + "sync" + + //"time" + + "github.com/prometheus/client_golang/prometheus" +) + +type s3Collector struct { + mutex *sync.RWMutex + s3TotalGetRequestSizeMetric *prometheus.GaugeVec + s3TotalGetResponseSizeMetric *prometheus.GaugeVec + s3TotalPutRequestSizeMetric *prometheus.GaugeVec + s3TotalPutResponseSizeMetric *prometheus.GaugeVec + s3TotalPostRequestSizeMetric *prometheus.GaugeVec + s3TotalPostResponseSizeMetric *prometheus.GaugeVec + s3TotalHeadRequestSizeMetric *prometheus.GaugeVec + s3TotalHeadResponseSizeMetric *prometheus.GaugeVec + s3TotalNumberOfGetRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfPutRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfPostRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfHeadRequestsMetric *prometheus.GaugeVec +} + +func NewS3Collector(m *sync.RWMutex) *s3Collector { + return &s3Collector{ + mutex: m, + s3TotalGetRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_get_request_size_in_bytes", + Help: "Gives the total size of s3 GET Request in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalGetResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_get_response_size_in_bytes", + Help: "Gives the total size of s3 GET Response in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalPutRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_put_request_size_in_bytes", + Help: "Gives the total size of s3 PUT Request in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalPutResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_put_response_size_in_bytes", + Help: "Gives the total size of s3 PUT Response in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalPostRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_post_request_size_in_bytes", + Help: "Gives the total size of s3 POST Request in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalPostResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_post_response_size_in_bytes", + Help: "Gives the total size of s3 POST Response in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalHeadRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_head_request_size_in_bytes", + Help: "Gives the total size of s3 HEAD Request in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalHeadResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_head_response_size_in_bytes", + Help: "Gives the total size of s3 HEAD Response in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalNumberOfGetRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_get_requests", + Help: "Gives the total number of S3 GET HTTP Requests in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalNumberOfPutRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_put_requests", + Help: "Gives the total number of S3 PUT HTTP Requests in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalNumberOfPostRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_post_requests", + Help: "Gives the total number of S3 Post Requests in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalNumberOfHeadRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_head_requests", + Help: "Gives the total number of S3 HEAD HTTP Requests in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + } +} + +func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { + collector.s3TotalGetRequestSizeMetric.Describe(ch) + collector.s3TotalGetResponseSizeMetric.Describe(ch) + collector.s3TotalPutRequestSizeMetric.Describe(ch) + collector.s3TotalPutResponseSizeMetric.Describe(ch) + collector.s3TotalPostRequestSizeMetric.Describe(ch) + collector.s3TotalPostResponseSizeMetric.Describe(ch) + collector.s3TotalHeadRequestSizeMetric.Describe(ch) + collector.s3TotalHeadResponseSizeMetric.Describe(ch) + collector.s3TotalNumberOfGetRequestsMetric.Describe(ch) + collector.s3TotalNumberOfPutRequestsMetric.Describe(ch) + collector.s3TotalNumberOfPostRequestsMetric.Describe(ch) + collector.s3TotalNumberOfHeadRequestsMetric.Describe(ch) + +} + +func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { + + collector.mutex.RLock() + defer collector.mutex.RUnlock() + + metricsMutex.Lock() + collector.s3TotalGetRequestSizeMetric.Reset() + collector.s3TotalGetResponseSizeMetric.Reset() + collector.s3TotalPutRequestSizeMetric.Reset() + collector.s3TotalPutResponseSizeMetric.Reset() + collector.s3TotalPostRequestSizeMetric.Reset() + collector.s3TotalPostResponseSizeMetric.Reset() + collector.s3TotalHeadRequestSizeMetric.Reset() + collector.s3TotalHeadResponseSizeMetric.Reset() + collector.s3TotalNumberOfGetRequestsMetric.Reset() + collector.s3TotalNumberOfPutRequestsMetric.Reset() + collector.s3TotalNumberOfPostRequestsMetric.Reset() + collector.s3TotalNumberOfHeadRequestsMetric.Reset() + + defer metricsMutex.Unlock() + for s3Name, s3Resources := range IonosS3Buckets { + region := s3Resources.Regions + owner := s3Resources.Owner + tags := TagsForPrometheus[s3Name] + // if !ok { + // // fmt.Printf("No tags found for bucket %s\n", s3Name) + // } + //tags of buckets change to tags you have defined on s3 buckets + enviroment := tags["Enviroment"] + namespace := tags["Namespace"] + tenant := tags["Tenant"] + for method, requestSize := range s3Resources.RequestSizes { + + switch method { + case MethodGET: + collector.s3TotalGetRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) + case MethodPOST: + collector.s3TotalPostRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) + case MethodHEAD: + collector.s3TotalHeadRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) + case MethodPUT: + collector.s3TotalPutRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) + } + + } + for method, responseSize := range s3Resources.ResponseSizes { + + switch method { + case MethodGET: + collector.s3TotalGetResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodPOST: + collector.s3TotalPostResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodHEAD: + collector.s3TotalHeadResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodPUT: + collector.s3TotalPutResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + } + } + + for method, responseSize := range s3Resources.Methods { + switch method { + case MethodGET: + collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodPOST: + collector.s3TotalNumberOfPostRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodHEAD: + collector.s3TotalNumberOfHeadRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodPUT: + collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + } + } + } + + collector.s3TotalGetRequestSizeMetric.Collect(ch) + collector.s3TotalGetResponseSizeMetric.Collect(ch) + collector.s3TotalPutRequestSizeMetric.Collect(ch) + collector.s3TotalPutResponseSizeMetric.Collect(ch) + collector.s3TotalPostRequestSizeMetric.Collect(ch) + collector.s3TotalPostResponseSizeMetric.Collect(ch) + collector.s3TotalHeadRequestSizeMetric.Collect(ch) + collector.s3TotalHeadResponseSizeMetric.Collect(ch) + collector.s3TotalNumberOfGetRequestsMetric.Collect(ch) + collector.s3TotalNumberOfPutRequestsMetric.Collect(ch) + collector.s3TotalNumberOfPostRequestsMetric.Collect(ch) + collector.s3TotalNumberOfHeadRequestsMetric.Collect(ch) +} diff --git a/internal/s3_scraper.go b/internal/s3_scraper.go new file mode 100644 index 0000000..e4b90a9 --- /dev/null +++ b/internal/s3_scraper.go @@ -0,0 +1,316 @@ +package internal + +import ( + "bufio" + "fmt" + "io" + "log" + "os" + "regexp" + "strconv" + "sync" + "time" + + aws "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +type EndpointConfig struct { + Region string + AccessKey string + SecretKey string + Endpoint string +} + +var ( + IonosS3Buckets = make(map[string]Metrics) + TagsForPrometheus = make(map[string]map[string]string) + metricsMutex sync.Mutex +) + +type Metrics struct { + Methods map[string]int32 + RequestSizes map[string]int64 + ResponseSizes map[string]int64 + Regions string + Owner string +} + +const ( + MethodGET = "GET" + MethodPUT = "PUT" + MethodPOST = "POST" + MethodHEAD = "HEAD" + objectPerPage = 1000 + maxConcurrent = 10 +) + +func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S3, error) { + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(region), + Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""), + Endpoint: aws.String(endpoint), + }) + if err != nil { + log.Printf("Error establishing session with AWS S3 Endpoint: %v", err) + return nil, fmt.Errorf("error establishing session with AWS S3 Endpoint: %s", err) + } + return s3.New(sess), nil +} + +func S3CollectResources(m *sync.RWMutex, cycletime int32) { + secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") + accessKey := os.Getenv("AWS_ACCESS_KEY_ID") + + if accessKey == "" || secretKey == "" { + log.Println("AWS credentials are not set in the environment variables.") + return + } + endpoints := map[string]EndpointConfig{ + "eu-central-2": { + Region: "eu-central-2", + AccessKey: accessKey, + SecretKey: secretKey, + Endpoint: "https://s3-eu-central-2.ionoscloud.com", + }, + "de": { + Region: "de", + AccessKey: accessKey, + SecretKey: secretKey, + Endpoint: "https://s3-eu-central-1.ionoscloud.com", + }, + } + semaphore := make(chan struct{}, maxConcurrent) + for { + var wg sync.WaitGroup + for _, endpoint := range endpoints { + + if _, exists := IonosS3Buckets[endpoint.Endpoint]; exists { + continue + } + client, err := createS3ServiceClient(endpoint.Region, accessKey, secretKey, endpoint.Endpoint) + + if err != nil { + fmt.Printf("Error creating service client for endpoint %s: %v\n", endpoint, err) + continue + } + fmt.Println("Using service client for endpoint:", endpoint) + + result, err := client.ListBuckets(nil) + + if err != nil { + fmt.Println("Error while Listing Buckets", err) + continue + } + + for _, bucket := range result.Buckets { + bucketName := *bucket.Name + if _, exists := IonosS3Buckets[bucketName]; !exists { + metrics := Metrics{ + Methods: make(map[string]int32), + RequestSizes: make(map[string]int64), + ResponseSizes: make(map[string]int64), + Regions: "", + } + IonosS3Buckets[bucketName] = metrics + } + wg.Add(1) + fmt.Println("Processing Bucket: ", bucketName) + go func(client *s3.S3, bucketName string) { + defer wg.Done() + defer func() { + if r := recover(); r != nil { + log.Printf("Recovered in goroutine: %v", r) + } + }() + if err := GetHeadBucket(client, bucketName); err != nil { + if reqErr, ok := err.(awserr.RequestFailure); ok && reqErr.StatusCode() == 403 { + return + } + log.Println("Error checking the bucket head:", err) + return + } + semaphore <- struct{}{} + defer func() { + <-semaphore + }() + processBucket(client, bucketName) + }(client, bucketName) + } + + } + wg.Wait() + time.Sleep(time.Duration(cycletime) * time.Second) + } + +} + +func processBucket(client *s3.S3, bucketName string) { + + var wg sync.WaitGroup + logEntryRegex := regexp.MustCompile(`(GET|PUT|HEAD|POST) \/[^"]*" \d+ \S+ (\d+|-) (\d+|-) \d+ (\d+|-)`) + semaphore := make(chan struct{}, maxConcurrent) + + getBucketTags(client, bucketName) + metrics := Metrics{ + Methods: make(map[string]int32), + RequestSizes: make(map[string]int64), + ResponseSizes: make(map[string]int64), + Regions: "", + Owner: "", + } + metrics.Regions = *client.Config.Region + + continuationToken := "" + + getAclInput := &s3.GetBucketAclInput{ + Bucket: aws.String(bucketName), + } + getAclOutput, err := client.GetBucketAcl(getAclInput) + if err != nil { + log.Printf("Error retrieving ACL for bucket %s: %v\n", bucketName, err) + return + } + if len(*getAclOutput.Owner.DisplayName) > 0 { + metrics.Owner = *getAclOutput.Owner.DisplayName + } else { + metrics.Owner = "Unknown" + } + + for { + + objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(bucketName), + Prefix: aws.String("logs/"), + ContinuationToken: aws.String(continuationToken), + MaxKeys: aws.Int64(objectPerPage), + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case "NoSuchBucket": + log.Printf("bucket %s does not exist\n", bucketName) + default: + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "AccessDenied" { + log.Println("Bucket not in current endpoint skipping") + } + } + fmt.Printf("error listing objects in bucket %s: %s\n", bucketName, aerr.Message()) + } + } + return + } + if len(objectList.Contents) == 0 { + log.Printf("bucket %s does not contain any objects with the 'logs/' prefix\n", bucketName) + return + } + for _, object := range objectList.Contents { + wg.Add(1) + semaphore <- struct{}{} + go func(object *s3.Object) { + defer wg.Done() + defer func() { <-semaphore }() + processObject(client, bucketName, object, logEntryRegex, &metrics) + }(object) + } + if !aws.BoolValue(objectList.IsTruncated) { + break + } + continuationToken = *objectList.NextContinuationToken + } + wg.Wait() + metricsMutex.Lock() + IonosS3Buckets[bucketName] = metrics + metricsMutex.Unlock() +} + +func getBucketTags(client *s3.S3, bucketName string) { + tagsOutput, err := client.GetBucketTagging(&s3.GetBucketTaggingInput{ + Bucket: aws.String(bucketName), + }) + + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case s3.ErrCodeNoSuchBucket: + log.Printf("Bucket %s does not exist\n", bucketName) + return + case "NoSuchTagSet": + log.Printf("No tags set for Bucket %s\n", bucketName) + return + default: + log.Printf("Error retrieving tags in false endpoint for bucket %s: %s\n", bucketName, aerr.Message()) + return + } + } else { + log.Printf("Error retrieving tags for bucket %s: %s\n", bucketName, err.Error()) + return + } + } + tags := make(map[string]string) + for _, tag := range tagsOutput.TagSet { + tags[*tag.Key] = *tag.Value + } + + metricsMutex.Lock() + TagsForPrometheus[bucketName] = tags + metricsMutex.Unlock() +} + +func processObject(client *s3.S3, bucketName string, object *s3.Object, logEntryRegex *regexp.Regexp, metrics *Metrics) { + downloadInput := &s3.GetObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(*object.Key), + } + result, err := client.GetObject(downloadInput) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "AccessDenied" { + log.Printf("Access Denied error for object %s in bucket %s\n", *object.Key, bucketName) + return + } + log.Println("Error downloading object", err) + return + } + defer result.Body.Close() + + reader := bufio.NewReader(result.Body) + for { + line, err := reader.ReadBytes('\n') + if err != nil { + if err != io.EOF { + log.Println("Problem reading the body", err) + } + break + } + processLine(line, logEntryRegex, metrics) + } +} + +func processLine(line []byte, logEntryRegex *regexp.Regexp, metrics *Metrics) { + matches := logEntryRegex.FindAllStringSubmatch(string(line), -1) + for _, match := range matches { + metricsMutex.Lock() + method := match[1] + requestSizeStr := match[3] + responseSizeStr := match[2] + + if requestSizeStr != "-" { + requestSize, err := strconv.ParseInt(requestSizeStr, 10, 64) + if err == nil { + metrics.RequestSizes[method] += requestSize + } + } + if responseSizeStr != "-" { + responseSize, err := strconv.ParseInt(responseSizeStr, 10, 64) + if err == nil { + metrics.ResponseSizes[method] += responseSize + } + } + metrics.Methods[method]++ + metricsMutex.Unlock() + } +} diff --git a/main.go b/main.go index 2e23ac3..4bd1b49 100644 --- a/main.go +++ b/main.go @@ -1,9 +1,11 @@ package main import ( + "flag" "ionos-exporter/internal" "log" "net/http" + "os" "strconv" "sync" @@ -11,21 +13,40 @@ import ( ) var ( - mutex = &sync.RWMutex{} // Mutex to sync access to the Daatcenter map + m = &sync.RWMutex{} // Mutex to sync access to the Datacenter map exporterPort string // Port to be used for exposing the metrics ionos_api_cycle int32 // Cycle time in seconds to query the IONOS API for changes, not th ePrometheus scraping intervall ) func main() { + configPath := flag.String("config", "/etc/ionos-exporter/config.yaml", "Path to configuration file") + envFile := flag.String("env", "", "Path to env file (optional)") + flag.Parse() + if *envFile != "" { + if _, err := os.Stat(*configPath); os.IsNotExist(err) { + log.Printf("Warning: config file not found at %s, continuing without it", *configPath) + } + } + exporterPort = internal.GetEnv("IONOS_EXPORTER_APPLICATION_CONTAINER_PORT", "9100") - if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "900"), 10, 32); err != nil { + if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "200"), 10, 32); err != nil { log.Fatal("Cannot convert IONOS_API_CYCLE to int") } else { ionos_api_cycle = int32(cycletime) } - go internal.CollectResources(mutex, ionos_api_cycle) - internal.StartPrometheus(mutex) + go internal.CollectResources(m, *envFile, ionos_api_cycle) + if s3_enabled, err := strconv.ParseBool(internal.GetEnv("IONOS_EXPORTER_S3_ENABLED", "false")); s3_enabled == true { + if err != nil { + log.Fatal("Cannot convert IONOS_EXPORTER_S3_ENABLED value to bool") + } + go internal.S3CollectResources(m, ionos_api_cycle) + } + go internal.PostgresCollectResources(m, *configPath, *envFile, ionos_api_cycle) + + internal.PrintDCResources(m) + internal.StartPrometheus(m) http.Handle("/metrics", promhttp.Handler()) http.Handle("/healthcheck", http.HandlerFunc(internal.HealthCheck)) log.Fatal(http.ListenAndServe(":"+exporterPort, nil)) + }