From 75cbc310fa704e3c55274878144dc7f7241c1fc4 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 2 May 2024 14:40:46 +0200 Subject: [PATCH 01/55] implementation for s3 metrics and ipmetrics --- internal/ipcollector.go | 58 +++++++++++ internal/prometheus.go | 176 +++++++++++++++++++++++++++++-- internal/s3collector.go | 226 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 454 insertions(+), 6 deletions(-) create mode 100644 internal/ipcollector.go create mode 100644 internal/s3collector.go diff --git a/internal/ipcollector.go b/internal/ipcollector.go new file mode 100644 index 0000000..7a4525d --- /dev/null +++ b/internal/ipcollector.go @@ -0,0 +1,58 @@ +package internal + +import ( + "context" + "fmt" + "os" + + ionoscloud "github.com/ionos-cloud/sdk-go/v6" + "github.com/joho/godotenv" +) + +var ( + ipName string + totalIPs int32 + IonosIPs = make(map[string]IonosIPResources) +) + +type IonosIPResources struct { + IPName string + TotalIPs int32 +} + +func IPCollectResources(apiClient *ionoscloud.APIClient) { + file, _ := os.Create("Ipsoutput.txt") + + defer file.Close() + + oldStdout := os.Stdout + defer func() { os.Stdout = oldStdout }() + os.Stdout = file + + err := godotenv.Load(".env") + if err != nil { + fmt.Println("Error loading .env file") + } + + newIonosIPResources := make(map[string]IonosIPResources) + // newIonosIPResources := make(map[string]IonosIPResources) + ipBlocks, _, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(3).Execute() + + if err != nil { + fmt.Println("Problem with the API Client") + } + + totalIPs = 0 + for _, ips := range *ipBlocks.Items { + totalIPs += *ips.Properties.Size + fmt.Println("Hey this is the size of IPs", totalIPs) + + newIonosIPResources[*ips.Properties.Name] = IonosIPResources{ + + TotalIPs: totalIPs, + } + } + + fmt.Println("Heyo") + +} diff --git a/internal/prometheus.go b/internal/prometheus.go index 2171211..e243f88 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -3,6 +3,7 @@ package internal import ( "io" "net/http" + "os" "sync" //"time" @@ -25,14 +26,73 @@ type ionosCollector struct { dcDCMetric *prometheus.GaugeVec } +type lbCollector struct { + mutex *sync.RWMutex + nlbsMetric *prometheus.GaugeVec + albsMetric *prometheus.GaugeVec + natsMetric *prometheus.GaugeVec + dcDCNLBMetric *prometheus.GaugeVec + dcDCALBMetric *prometheus.GaugeVec + dcDCNATMetric *prometheus.GaugeVec + dcNLBRulesMetric *prometheus.GaugeVec + dcALBRulesMetric *prometheus.GaugeVec +} + +type s3Collector struct { + mutex *sync.RWMutex + s3TotalGetMethodSizeMetric *prometheus.GaugeVec + s3TotalPutMethodSizeMetric *prometheus.GaugeVec + s3TotalNumberOfGetRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfPutRequestsMetric *prometheus.GaugeVec +} + var mutex *sync.RWMutex +func newLBCollector(m *sync.RWMutex) *lbCollector { + mutex = m + return &lbCollector{ + mutex: &sync.RWMutex{}, + nlbsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_networkloadbalancer_amount", + Help: "Shows the number of active Network Loadbalancers in an IONOS datacenter", + }, []string{"datacenter", "nlb_name", "nlb_rules_name"}), + albsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_applicationloadbalancer_amount", + Help: "Shows the number of active Application Loadbalancers in an IONOS datacenter", + }, []string{"datacenter", "alb_name", "alb_rules_name"}), + natsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_nat_gateways_amount", + Help: "Shows the number of NAT Gateways in an IONOS datacenter", + }, []string{"datacenter"}), + dcDCNLBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_networkloadbalancer_amount", + Help: "Shows the total number of Network Loadbalancers in IONOS Account", + }, []string{"account"}), + dcDCALBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_applicationbalancer_amount", + Help: "Shows the total number of Application Loadbalancers in IONOS Account", + }, []string{"account"}), + dcDCNATMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_nat_gateways_amount", + Help: "Shows the total number of NAT Gateways in IONOS Account", + }, []string{"account"}), + dcNLBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_number_of_nlb_rules", + Help: "Shows the total number of NLB Rules in IONOS Account", + }, []string{"nlb_rules"}), + dcALBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_nmumber_of_alb_rules", + Help: "Shows the total number of ALB Rules in IONOS Account", + }, []string{"alb_rules"}), + } +} + // You must create a constructor for you collector that // initializes every descriptor and returns a pointer to the collector func newIonosCollector(m *sync.RWMutex) *ionosCollector { mutex = m return &ionosCollector{ - mutex: m, + mutex: &sync.RWMutex{}, coresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dc_cores_amount", Help: "Shows the number of currently active cores in an IONOS datacenter", @@ -64,6 +124,96 @@ func newIonosCollector(m *sync.RWMutex) *ionosCollector { } } +func newS3Collector(m *sync.RWMutex) *s3Collector { + mutex = m + return &s3Collector{ + mutex: &sync.RWMutex{}, + s3TotalGetMethodSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_size_of_get_requests_in_bytes", + Help: "Gives the total size of s3 GET HTTP Request in Bytes", + }, []string{"bucket_name"}), + s3TotalPutMethodSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_size_of_put_requests_in_bytes", + Help: "Gives the total size of s3 PUT HTTP Request in Bytes", + }, []string{"bucket_name"}), + s3TotalNumberOfGetRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_get_requests", + Help: "Gives the total number of S3 GET HTTP Requests", + }, []string{"bucket_name"}), + s3TotalNumberOfPutRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_put_requests", + Help: "Gives the total number of S3 PUT HTTP Requests", + }, []string{"bucket_name"}), + } +} + +func (collector *lbCollector) Describe(ch chan<- *prometheus.Desc) { + + collector.nlbsMetric.Describe(ch) + collector.albsMetric.Describe(ch) + collector.natsMetric.Describe(ch) + collector.dcDCNLBMetric.Describe(ch) + collector.dcDCALBMetric.Describe(ch) + collector.dcDCNATMetric.Describe(ch) + collector.dcALBRulesMetric.Describe(ch) + collector.dcNLBRulesMetric.Describe(ch) + +} + +func (collector *lbCollector) Collect(ch chan<- prometheus.Metric) { + collector.mutex.RLock() + defer collector.mutex.RUnlock() + + collector.albsMetric.Reset() + collector.natsMetric.Reset() + collector.nlbsMetric.Reset() + for lbName, lbResources := range IonosLoadbalancers { + collector.nlbsMetric.WithLabelValues(lbName, lbResources.NLBName, lbResources.NLBRuleName).Set(float64(lbResources.NLBs)) + collector.albsMetric.WithLabelValues(lbName, lbResources.ALBName, lbResources.ALBRuleName).Set(float64(lbResources.ALBs)) + collector.natsMetric.WithLabelValues(lbName).Set(float64(lbResources.NATs)) + } + + collector.nlbsMetric.Collect(ch) + collector.albsMetric.Collect(ch) + collector.natsMetric.Collect(ch) + collector.dcDCNLBMetric.Collect(ch) + collector.dcDCALBMetric.Collect(ch) + collector.dcDCNATMetric.Collect(ch) + collector.dcNLBRulesMetric.Collect(ch) + collector.dcALBRulesMetric.Collect(ch) +} + +func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { + collector.s3TotalGetMethodSizeMetric.Describe(ch) + collector.s3TotalPutMethodSizeMetric.Describe(ch) + collector.s3TotalNumberOfGetRequestsMetric.Describe(ch) + collector.s3TotalNumberOfPutRequestsMetric.Describe(ch) + +} +func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { + collector.mutex.RLock() + defer collector.mutex.RUnlock() + + collector.s3TotalGetMethodSizeMetric.Reset() + collector.s3TotalPutMethodSizeMetric.Reset() + collector.s3TotalNumberOfGetRequestsMetric.Reset() + collector.s3TotalNumberOfPutRequestsMetric.Reset() + + for s3Name, s3Resources := range IonosS3Buckets { + collector.s3TotalGetMethodSizeMetric.WithLabelValues(s3Name).Set(float64(s3Resources.TotalGetMethodSize)) + collector.s3TotalPutMethodSizeMetric.WithLabelValues(s3Name).Set(float64(s3Resources.TotalPutMethodSize)) + collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name).Set(float64(s3Resources.GetMethods)) + collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name).Set(float64(s3Resources.PutMethods)) + + } + + collector.s3TotalGetMethodSizeMetric.Collect(ch) + collector.s3TotalPutMethodSizeMetric.Collect(ch) + collector.s3TotalNumberOfGetRequestsMetric.Collect(ch) + collector.s3TotalNumberOfPutRequestsMetric.Collect(ch) + +} + // Each and every collector must implement the Describe function. // It essentially writes all descriptors to the prometheus desc channel. func (collector *ionosCollector) Describe(ch chan<- *prometheus.Desc) { @@ -83,6 +233,7 @@ func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { //Implement logic here to determine proper metric value to return to prometheus //for each descriptor or call other functions that do so. + account := os.Getenv("IONOS_ACCOUNT") collector.mutex.RLock() defer collector.mutex.RUnlock() @@ -95,11 +246,13 @@ func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { collector.coresMetric.WithLabelValues(dcName).Set(float64(dcResources.Cores)) collector.ramMetric.WithLabelValues(dcName).Set(float64(dcResources.Ram / 1024)) // MB -> GB collector.serverMetric.WithLabelValues(dcName).Set(float64(dcResources.Servers)) + } - collector.dcCoresMetric.WithLabelValues("SVS").Set(float64(CoresTotal)) - collector.dcRamMetric.WithLabelValues("SVS").Set(float64(RamTotal / 1024)) // MB -> GB - collector.dcServerMetric.WithLabelValues("SVS").Set(float64(ServerTotal)) - collector.dcDCMetric.WithLabelValues("SVS").Set(float64(DataCenters)) + + collector.dcCoresMetric.WithLabelValues(account).Set(float64(CoresTotal)) + collector.dcRamMetric.WithLabelValues(account).Set(float64(RamTotal / 1024)) // MB -> GB + collector.dcServerMetric.WithLabelValues(account).Set(float64(ServerTotal)) + collector.dcDCMetric.WithLabelValues(account).Set(float64(DataCenters)) collector.coresMetric.Collect(ch) collector.ramMetric.Collect(ch) @@ -113,11 +266,22 @@ func (collector *ionosCollector) GetMutex() *sync.RWMutex { return collector.mutex } +func (collector *s3Collector) GetMutex() *sync.RWMutex { + return collector.mutex +} + +func (collector *lbCollector) GetMutex() *sync.RWMutex { + return collector.mutex +} + func StartPrometheus(m *sync.RWMutex) { ic := newIonosCollector(m) + s3c := newS3Collector(m) + lbc := newLBCollector(m) prometheus.MustRegister(ic) + prometheus.MustRegister(s3c) + prometheus.MustRegister(lbc) prometheus.MustRegister(httpRequestsTotal) - } var httpRequestsTotal = prometheus.NewCounterVec( diff --git a/internal/s3collector.go b/internal/s3collector.go new file mode 100644 index 0000000..ef95556 --- /dev/null +++ b/internal/s3collector.go @@ -0,0 +1,226 @@ +package internal + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + aws "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +var ( + TotalGetMethods int32 = 0 + TotalGetMethodSize int64 = 0 + TotalPutMethodSize int64 = 0 + TotalPutMethods int32 = 0 + IonosS3Buckets = make(map[string]IonosS3Resources) +) + +type IonosS3Resources struct { + Name string + GetMethods int32 + PutMethods int32 + HeadMethods int32 + PostMethods int32 + TotalGetMethodSize int32 + TotalPutMethodSize int32 +} + +func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S3, error) { + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(region), + Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""), + Endpoint: aws.String(endpoint), + }) + if err != nil { + return nil, fmt.Errorf("error establishing session with AWS S3 Endpoint: %s", err) + } + return s3.New(sess), nil +} + +func S3CollectResources(m *sync.RWMutex, cycletime int32) { + // accessKey := os.Getenv("IONOS_ACCESS_KEY") + // secretKey := os.Getenv("IONOS_SECRET_KEY") + + file, _ := os.Create("S3ioutput.txt") + defer file.Close() + + oldStdout := os.Stdout + defer func() { os.Stdout = oldStdout }() + os.Stdout = file + //TODO YAML konfiguration + // Define endpoint configurations + endpoints := map[string]struct { + Region, AccessKey, SecretKey, Endpoint string + }{ + "de": {"de", "", "", "https://s3-eu-central-1.ionoscloud.com"}, + "eu-central-2": {"eu-central-2", "", "", "https://s3-eu-central-2.ionoscloud.com"}, + // Add more endpoints as needed + } + + bucketCounts := make(map[string]struct { + totalGetMethods int32 + totalPutMethods int32 + totalGetMethodSize int64 + totalPutMethodSize int64 + }) + newS3IonosResources := make(map[string]IonosS3Resources) + serviceClients := make(map[string]*s3.S3) + + // var totalLineCount int = 0 + // Create service clients for each endpoint + + for { + for endpoint, config := range endpoints { + if _, exists := serviceClients[endpoint]; exists { + continue + } + client, err := createS3ServiceClient(config.Region, config.AccessKey, config.SecretKey, config.Endpoint) + if err != nil { + fmt.Printf("Error creating service client for endpoint %s: %v\n", endpoint, err) + continue + } + serviceClients[endpoint] = client + } + for endpoint, client := range serviceClients { + fmt.Println("Using service client for endpoint: %s\n", endpoint) + // serviceClient := s3.New(sess) + + result, err := client.ListBuckets(nil) + if err != nil { + fmt.Println("Problem with the Listing of the Buckets") + } + + for _, buckets := range result.Buckets { + var ( + totalGetMethods int32 = 0 + totalPutMethods int32 = 0 + totalGetMethodSize int64 = 0 + totalPutMethodSize int64 = 0 + ) + + if HasLogsFolder(client, *buckets.Name) { + + objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(*buckets.Name), + }) + if err != nil { + fmt.Println("Could not use the service client to list objects") + continue + } + for _, object := range objectList.Contents { + // fmt.Println(aws.StringValue(object.Key)) + // fmt.Println("This is the object size", *object.Size) + //limitieren von Menge von Logs + downloadInput := &s3.GetObjectInput{ + Bucket: aws.String(*buckets.Name), + Key: aws.String(*object.Key), + } + + result, err := client.GetObject(downloadInput) + if err != nil { + fmt.Println("Error downloading object", err) + continue + } + defer result.Body.Close() + + logContent, err := io.ReadAll(result.Body) + if err != nil { + fmt.Println("Error reading log content:", err) + continue + } + // lines := strings.Split(string(logContent), "\n") + // lineCount := len(lines) + // totalLineCount += lineCount + + // fmt.Printf("Number of lines in %s/%s: %d\n", *buckets.Name, *object.Key, lineCount) + + // fmt.Println("These are the log lines", logLines) + fields := strings.Fields(string(logContent)) + + // bucketID := fields[0] + // bucketName := fields[1] + bucketMethod := fields[9] + // bucketResponseCode := fields[12] + // bucketMethodSize := fields[14] + + sizeStrGet := fields[14] + size, err := strconv.ParseInt(sizeStrGet, 10, 64) + + if err != nil { + fmt.Println("Error parsing PUT size:", err) + continue + } + + switch bucketMethod { + case "\"GET": + totalGetMethods++ + totalGetMethodSize += size + case "\"PUT": + totalPutMethods++ + totalPutMethodSize += size + default: + } + + // fmt.Printf("Log Line: %s, HTTP Method: %s\n", *object.Key, totalGetMethodSize) + // fmt.Printf("Log Line: %s, HTTP Method: %s\n", *object.Key, bucketMethod) + } + bucketCounts[*buckets.Name] = struct { + totalGetMethods int32 + totalPutMethods int32 + totalGetMethodSize int64 + totalPutMethodSize int64 + }{ + totalGetMethods: totalGetMethods, + totalPutMethods: totalPutMethods, + totalGetMethodSize: totalGetMethodSize, + totalPutMethodSize: totalPutMethodSize, + } + fmt.Println("This is the bucket Name", *buckets.Name) + + } + + } + + for bucketName, counts := range bucketCounts { + newS3IonosResources[bucketName] = IonosS3Resources{ + Name: bucketName, + GetMethods: counts.totalGetMethods, + PutMethods: counts.totalPutMethods, + TotalGetMethodSize: int32(counts.totalGetMethodSize), + TotalPutMethodSize: int32(counts.totalPutMethodSize), + } + } + IonosS3Buckets = newS3IonosResources + } + } + // CalculateS3Totals(m) + // time.Sleep(time.Duration(cycletime) * time.Second) +} + +// func CalculateS3Totals(m *sync.RWMutex) { +// var ( +// getMethodTotal int32 +// putMethodTotal int32 +// getMethodSizeTotal int64 +// putMethodSizeTotal int64 +// ) +// for _, s3Resources := range IonosS3Buckets { +// getMethodTotal += s3Resources.GetMethods +// putMethodTotal += s3Resources.PutMethods +// getMethodSizeTotal += int64(s3Resources.TotalGetMethodSize) +// putMethodSizeTotal += int64(s3Resources.TotalPutMethodSize) +// } +// TotalGetMethods = getMethodTotal + +// fmt.Println("Get method inside a calculate totals program", TotalGetMethods) +// TotalPutMethods = putMethodTotal +// TotalGetMethodSize = getMethodSizeTotal +// TotalPutMethodSize = putMethodSizeTotal +// } From 3043a1e1ceddfd7fd0a8b84a4c713ef4590bfe19 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 2 May 2024 14:53:16 +0200 Subject: [PATCH 02/55] initial push --- internal/helper.go | 32 ++++++++++++++++++++++++++++++++ internal/ionos.go | 43 +++++++++++++++++++++++++++++++++++++++---- main.go | 11 +++++++++-- 3 files changed, 80 insertions(+), 6 deletions(-) diff --git a/internal/helper.go b/internal/helper.go index a2900a9..89f8633 100644 --- a/internal/helper.go +++ b/internal/helper.go @@ -3,6 +3,11 @@ package internal import ( "fmt" "os" + + aws "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" ) func GetEnv(key string, fallback string) string { @@ -20,3 +25,30 @@ func GetEnv(key string, fallback string) string { } } } + +func NewS3ServiceClient() (*s3.S3, error) { + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("eu-central-2"), + Credentials: credentials.NewStaticCredentials("00e556b6437d8a8d1776", "LbypY0AmotQCDDckTz+cAPFI7l0eQvSFeQ1WxKtw", ""), + Endpoint: aws.String("https://s3-eu-central-2.ionoscloud.com"), + }) + + if err != nil { + return nil, err + } + return s3.New(sess), nil +} + +func HasLogsFolder(client *s3.S3, bucketName string) bool { + result, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(bucketName), + Prefix: aws.String("logs/"), + }) + + if err != nil { + fmt.Println("Error listing objects in bucket: ", err) + return false + } + + return len(result.Contents) > 0 +} diff --git a/internal/ionos.go b/internal/ionos.go index 0fdd76a..9dd6d27 100644 --- a/internal/ionos.go +++ b/internal/ionos.go @@ -9,6 +9,7 @@ import ( "time" ionoscloud "github.com/ionos-cloud/sdk-go/v6" + "github.com/joho/godotenv" ) var ( @@ -17,7 +18,7 @@ var ( ServerTotal int32 = 0 DataCenters int32 = 0 IonosDatacenters = make(map[string]IonosDCResources) //Key is the name of the datacenter - depth int32 = 1 //Controls the detail depth of the response objects. + depth int32 = 1 ) type IonosDCResources struct { @@ -28,8 +29,28 @@ type IonosDCResources struct { } func CollectResources(m *sync.RWMutex, cycletime int32) { - configuration := ionoscloud.NewConfigurationFromEnv() - apiClient := ionoscloud.NewAPIClient(configuration) + + file, _ := os.Create("ionosoutput.txt") + + defer file.Close() + + oldStdout := os.Stdout + defer func() { os.Stdout = oldStdout }() + os.Stdout = file + + err := godotenv.Load(".env") + if err != nil { + fmt.Println("Error loading .env file") + } + // username := os.Getenv("IONOS_USERNAME") + // password := os.Getenv("IONOS_PASSWORD") + // cfg := ionoscloud.NewConfiguration(username, password, "", "") + cfgENV := ionoscloud.NewConfigurationFromEnv() + + // cfg.Debug = true + cfgENV.Debug = true + apiClient := ionoscloud.NewAPIClient(cfgENV) + for { datacenters, resp, err := apiClient.DataCentersApi.DatacentersGet(context.Background()).Depth(depth).Execute() if err != nil { @@ -37,6 +58,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) os.Exit(1) } + fmt.Println("DATACENTER", datacenters) newIonosDatacenters := make(map[string]IonosDCResources) for _, datacenter := range *datacenters.Items { var ( @@ -45,29 +67,37 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { serverTotalDC int32 = 0 ) servers, resp, err := apiClient.ServersApi.DatacentersServersGet(context.Background(), *datacenter.Id).Depth(depth).Execute() + //fmt.Println("SERVERS", servers) if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `ServersApi.DatacentersServersGet``: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) } serverTotalDC = int32(len(*servers.Items)) + for _, server := range *servers.Items { coresTotalDC += *server.Properties.Cores ramTotalDC += *server.Properties.Ram } + newIonosDatacenters[*datacenter.Properties.Name] = IonosDCResources{ DCId: *datacenter.Id, Cores: coresTotalDC, Ram: ramTotalDC, Servers: serverTotalDC, } + } + m.Lock() IonosDatacenters = newIonosDatacenters + LoadbalancerCollector(apiClient) + IPCollectResources(apiClient) m.Unlock() CalculateDCTotals(m) time.Sleep(time.Duration(cycletime) * time.Second) } } + func CalculateDCTotals(m *sync.RWMutex) { var ( serverTotal int32 @@ -96,7 +126,7 @@ func PrintDCResources(m *sync.RWMutex) { for dcName, dcResources := range IonosDatacenters { fmt.Fprintf(os.Stdout, "%s:\n - UUID: %s\n", dcName, dcResources.DCId) fmt.Fprintf(os.Stdout, " - Servers: %d\n", dcResources.Servers) - fmt.Fprintf(os.Stdout, " - Cores: %d\n", dcResources.Cores) + fmt.Fprintf(os.Stdout, "%s:\n - Cores: %d\n", dcName, dcResources.Cores) fmt.Fprintf(os.Stdout, " - Ram: %d GB\n", dcResources.Ram/1024) } } @@ -108,3 +138,8 @@ func PrintDCTotals(m *sync.RWMutex) { log.Printf("Total - Cores: %d\n", CoresTotal) log.Printf("Total - Ram: %d GB\n", RamTotal/1024) } + +//problemen mit ionos log bucket konnte nicht testen richtig +//noch problemen mit aktuallisierung von log data wenn welche geloescht werden +//problem sa paralelizacijom. logove mogu kalkulisati kako treba +//ali ne tako brzo diff --git a/main.go b/main.go index 2e23ac3..57c4e01 100644 --- a/main.go +++ b/main.go @@ -17,15 +17,22 @@ var ( ) func main() { + //internal.CollectResources(mutex, ionos_api_cycle) + //internal.BasicAuthExample() exporterPort = internal.GetEnv("IONOS_EXPORTER_APPLICATION_CONTAINER_PORT", "9100") - if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "900"), 10, 32); err != nil { + if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "300"), 10, 32); err != nil { log.Fatal("Cannot convert IONOS_API_CYCLE to int") } else { ionos_api_cycle = int32(cycletime) } - go internal.CollectResources(mutex, ionos_api_cycle) + // internal.IPCollectResources() + // go internal.CollectResources(mutex, ionos_api_cycle) + go internal.S3CollectResources(mutex, ionos_api_cycle) + + //internal.PrintDCResources(mutex) internal.StartPrometheus(mutex) http.Handle("/metrics", promhttp.Handler()) http.Handle("/healthcheck", http.HandlerFunc(internal.HealthCheck)) log.Fatal(http.ListenAndServe(":"+exporterPort, nil)) + } From 367c7d4a9d5901365e4d41d406915bfffffdc739 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 2 May 2024 16:35:57 +0200 Subject: [PATCH 03/55] fixed problems with crashing while using go routine --- internal/ionos.go | 1 + internal/lbcollector.go | 107 ++++++++++++++++++++ internal/s3collector.go | 216 +++++++++++++++++++--------------------- main.go | 4 +- 4 files changed, 210 insertions(+), 118 deletions(-) create mode 100644 internal/lbcollector.go diff --git a/internal/ionos.go b/internal/ionos.go index 9dd6d27..6766d30 100644 --- a/internal/ionos.go +++ b/internal/ionos.go @@ -92,6 +92,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { IonosDatacenters = newIonosDatacenters LoadbalancerCollector(apiClient) IPCollectResources(apiClient) + S3CollectResources() m.Unlock() CalculateDCTotals(m) time.Sleep(time.Duration(cycletime) * time.Second) diff --git a/internal/lbcollector.go b/internal/lbcollector.go new file mode 100644 index 0000000..45c5c21 --- /dev/null +++ b/internal/lbcollector.go @@ -0,0 +1,107 @@ +package internal + +import ( + "context" + "fmt" + "os" + + ionoscloud "github.com/ionos-cloud/sdk-go/v6" +) + +var ( + nlbNames string + albNames string + nlbTotalRulesDC int32 + nlbRuleNames string + albTotalRulesDC int32 + albRuleNames string + + IonosLoadbalancers = make(map[string]IonosLBResources) +) + +type IonosLBResources struct { + NLBs int32 + ALBs int32 + NATs int32 + NLBRules int32 + ALBRules int32 + ALBName string + NLBName string + NLBRuleName string + ALBRuleName string +} + +func LoadbalancerCollector(apiClient *ionoscloud.APIClient) { + fmt.Println("Hey this is the Loadbalancer Collector") + + file, _ := os.Create("LoadBalancerOutput.txt") + + defer file.Close() + + oldStdout := os.Stdout + defer func() { os.Stdout = oldStdout }() + os.Stdout = file + datacenter, _, _ := apiClient.DataCentersApi.DatacentersGet(context.Background()).Depth(3).Execute() + + newIonosLBResources := make(map[string]IonosLBResources) + for _, datacenter := range *datacenter.Items { + + var ( + nlbTotalDC int32 = 0 + nlbTotalRulesDC int32 = 0 + albTotalRulesDC int32 = 0 + albTotalDC int32 = 0 + natTotalDC int32 = 0 + albNames string + nlbNames string + albRuleNames string + nlbRuleNames string + ) + + fmt.Println("These are the datacenter Names", datacenter.Properties.Name) + albList, _, _ := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + nlbList, _, _ := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + natList, _, _ := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), *datacenter.Id).Depth(3).Execute() + + for _, nlbRulesAndLabels := range *nlbList.Items { + + nlbNames = *nlbRulesAndLabels.Properties.Name + nlbForwardingRules := nlbRulesAndLabels.Entities.Forwardingrules + nlbTotalRulesDC = int32(len(*nlbForwardingRules.Items)) + + for _, ruleItems := range *nlbForwardingRules.Items { + nlbRuleNames = *ruleItems.Properties.Name + } + } + + for _, albRulesAndLabels := range *albList.Items { + + albNames = *albRulesAndLabels.Properties.Name + forwardingRules := albRulesAndLabels.Entities.Forwardingrules + albTotalRulesDC = int32(len(*forwardingRules.Items)) + + for _, ruleItems := range *forwardingRules.Items { + for _, ruleName := range *ruleItems.Properties.HttpRules { + albRuleNames = *ruleName.Name + } + } + + } + nlbTotalDC = int32(len(*nlbList.Items)) + albTotalDC = int32(len(*albList.Items)) + natTotalDC = int32(len(*natList.Items)) + + newIonosLBResources[*datacenter.Properties.Name] = IonosLBResources{ + NLBs: nlbTotalDC, + ALBs: albTotalDC, + NATs: natTotalDC, + NLBRules: nlbTotalRulesDC, + ALBRules: albTotalRulesDC, + ALBName: albNames, + NLBName: nlbNames, + ALBRuleName: albRuleNames, + NLBRuleName: nlbRuleNames, + } + } + IonosLoadbalancers = newIonosLBResources +} diff --git a/internal/s3collector.go b/internal/s3collector.go index ef95556..8025786 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -6,7 +6,6 @@ import ( "os" "strconv" "strings" - "sync" aws "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" @@ -44,7 +43,7 @@ func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S return s3.New(sess), nil } -func S3CollectResources(m *sync.RWMutex, cycletime int32) { +func S3CollectResources() { // accessKey := os.Getenv("IONOS_ACCESS_KEY") // secretKey := os.Getenv("IONOS_SECRET_KEY") @@ -59,8 +58,8 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { endpoints := map[string]struct { Region, AccessKey, SecretKey, Endpoint string }{ - "de": {"de", "", "", "https://s3-eu-central-1.ionoscloud.com"}, - "eu-central-2": {"eu-central-2", "", "", "https://s3-eu-central-2.ionoscloud.com"}, + "de": {"de", "00e556b6437d8a8d1776", "LbypY0AmotQCDDckTz+cAPFI7l0eQvSFeQ1WxKtw", "https://s3-eu-central-1.ionoscloud.com"}, + "eu-central-2": {"eu-central-2", "00e556b6437d8a8d1776", "LbypY0AmotQCDDckTz+cAPFI7l0eQvSFeQ1WxKtw", "https://s3-eu-central-2.ionoscloud.com"}, // Add more endpoints as needed } @@ -76,134 +75,119 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { // var totalLineCount int = 0 // Create service clients for each endpoint - for { - for endpoint, config := range endpoints { - if _, exists := serviceClients[endpoint]; exists { - continue - } - client, err := createS3ServiceClient(config.Region, config.AccessKey, config.SecretKey, config.Endpoint) - if err != nil { - fmt.Printf("Error creating service client for endpoint %s: %v\n", endpoint, err) - continue - } - serviceClients[endpoint] = client + for endpoint, config := range endpoints { + if _, exists := serviceClients[endpoint]; exists { + continue + } + client, err := createS3ServiceClient(config.Region, config.AccessKey, config.SecretKey, config.Endpoint) + if err != nil { + fmt.Printf("Error creating service client for endpoint %s: %v\n", endpoint, err) + continue } - for endpoint, client := range serviceClients { - fmt.Println("Using service client for endpoint: %s\n", endpoint) - // serviceClient := s3.New(sess) + serviceClients[endpoint] = client - result, err := client.ListBuckets(nil) + fmt.Println("Using service client for endpoint: %s\n", endpoint) + // serviceClient := s3.New(sess) + + result, err := client.ListBuckets(nil) + if err != nil { + fmt.Println("Problem with the Listing of the Buckets") + } + + for _, buckets := range result.Buckets { + var ( + totalGetMethods int32 = 0 + totalPutMethods int32 = 0 + totalGetMethodSize int64 = 0 + totalPutMethodSize int64 = 0 + ) + + objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(*buckets.Name), + Prefix: aws.String("logs/"), + }) if err != nil { - fmt.Println("Problem with the Listing of the Buckets") + fmt.Println("Could not use the service client to list objects") + continue + } + if len(objectList.Contents) == 0 { + continue } + for _, object := range objectList.Contents { + downloadInput := &s3.GetObjectInput{ + Bucket: aws.String(*buckets.Name), + Key: aws.String(*object.Key), + } - for _, buckets := range result.Buckets { - var ( - totalGetMethods int32 = 0 - totalPutMethods int32 = 0 - totalGetMethodSize int64 = 0 - totalPutMethodSize int64 = 0 - ) - - if HasLogsFolder(client, *buckets.Name) { - - objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ - Bucket: aws.String(*buckets.Name), - }) - if err != nil { - fmt.Println("Could not use the service client to list objects") - continue - } - for _, object := range objectList.Contents { - // fmt.Println(aws.StringValue(object.Key)) - // fmt.Println("This is the object size", *object.Size) - //limitieren von Menge von Logs - downloadInput := &s3.GetObjectInput{ - Bucket: aws.String(*buckets.Name), - Key: aws.String(*object.Key), - } - - result, err := client.GetObject(downloadInput) - if err != nil { - fmt.Println("Error downloading object", err) - continue - } - defer result.Body.Close() - - logContent, err := io.ReadAll(result.Body) - if err != nil { - fmt.Println("Error reading log content:", err) - continue - } - // lines := strings.Split(string(logContent), "\n") - // lineCount := len(lines) - // totalLineCount += lineCount - - // fmt.Printf("Number of lines in %s/%s: %d\n", *buckets.Name, *object.Key, lineCount) - - // fmt.Println("These are the log lines", logLines) - fields := strings.Fields(string(logContent)) - - // bucketID := fields[0] - // bucketName := fields[1] - bucketMethod := fields[9] - // bucketResponseCode := fields[12] - // bucketMethodSize := fields[14] - - sizeStrGet := fields[14] - size, err := strconv.ParseInt(sizeStrGet, 10, 64) - - if err != nil { - fmt.Println("Error parsing PUT size:", err) - continue - } - - switch bucketMethod { - case "\"GET": - totalGetMethods++ - totalGetMethodSize += size - case "\"PUT": - totalPutMethods++ - totalPutMethodSize += size - default: - } - - // fmt.Printf("Log Line: %s, HTTP Method: %s\n", *object.Key, totalGetMethodSize) - // fmt.Printf("Log Line: %s, HTTP Method: %s\n", *object.Key, bucketMethod) - } - bucketCounts[*buckets.Name] = struct { - totalGetMethods int32 - totalPutMethods int32 - totalGetMethodSize int64 - totalPutMethodSize int64 - }{ - totalGetMethods: totalGetMethods, - totalPutMethods: totalPutMethods, - totalGetMethodSize: totalGetMethodSize, - totalPutMethodSize: totalPutMethodSize, - } - fmt.Println("This is the bucket Name", *buckets.Name) + result, err := client.GetObject(downloadInput) + if err != nil { + fmt.Println("Error downloading object", err) + continue + } + defer result.Body.Close() + logContent, err := io.ReadAll(result.Body) + if err != nil { + fmt.Println("Error reading log content:", err) + continue } + fields := strings.Fields(string(logContent)) - } + bucketMethod := fields[9] - for bucketName, counts := range bucketCounts { - newS3IonosResources[bucketName] = IonosS3Resources{ - Name: bucketName, - GetMethods: counts.totalGetMethods, - PutMethods: counts.totalPutMethods, - TotalGetMethodSize: int32(counts.totalGetMethodSize), - TotalPutMethodSize: int32(counts.totalPutMethodSize), + sizeStrGet := fields[14] + sizeStrPut := fields[16] + if bucketMethod == "PUT" { + fmt.Println("This si the PUT Method") + } + sizeGet, err := strconv.ParseInt(sizeStrGet, 10, 64) + sizePut, err := strconv.ParseInt(sizeStrPut, 10, 64) + + if err != nil { + fmt.Println("Error parsing PUT size:", err) + continue + } + switch bucketMethod { + case "\"GET": + totalGetMethods++ + totalGetMethodSize += sizeGet + case "\"PUT": + totalPutMethods++ + totalPutMethodSize += sizePut + default: } } - IonosS3Buckets = newS3IonosResources + bucketCounts[*buckets.Name] = struct { + totalGetMethods int32 + totalPutMethods int32 + totalGetMethodSize int64 + totalPutMethodSize int64 + }{ + totalGetMethods: totalGetMethods, + totalPutMethods: totalPutMethods, + totalGetMethodSize: totalGetMethodSize, + totalPutMethodSize: totalPutMethodSize, + } + fmt.Println("This is the bucket Name", *buckets.Name) + } + + } + + for bucketName, counts := range bucketCounts { + newS3IonosResources[bucketName] = IonosS3Resources{ + Name: bucketName, + GetMethods: counts.totalGetMethods, + PutMethods: counts.totalPutMethods, + TotalGetMethodSize: int32(counts.totalGetMethodSize), + TotalPutMethodSize: int32(counts.totalPutMethodSize), } } - // CalculateS3Totals(m) + IonosS3Buckets = newS3IonosResources // time.Sleep(time.Duration(cycletime) * time.Second) } +// CalculateS3Totals(m) + // func CalculateS3Totals(m *sync.RWMutex) { // var ( // getMethodTotal int32 diff --git a/main.go b/main.go index 57c4e01..c3ee762 100644 --- a/main.go +++ b/main.go @@ -26,8 +26,8 @@ func main() { ionos_api_cycle = int32(cycletime) } // internal.IPCollectResources() - // go internal.CollectResources(mutex, ionos_api_cycle) - go internal.S3CollectResources(mutex, ionos_api_cycle) + go internal.CollectResources(mutex, ionos_api_cycle) + // go internal.S3CollectResources(mutex, ionos_api_cycle) //internal.PrintDCResources(mutex) internal.StartPrometheus(mutex) From 672a5d71fa215d1c711afa1ff690797fe0581e47 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 2 May 2024 16:39:21 +0200 Subject: [PATCH 04/55] x --- internal/s3collector.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/s3collector.go b/internal/s3collector.go index 8025786..53e282a 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -58,8 +58,8 @@ func S3CollectResources() { endpoints := map[string]struct { Region, AccessKey, SecretKey, Endpoint string }{ - "de": {"de", "00e556b6437d8a8d1776", "LbypY0AmotQCDDckTz+cAPFI7l0eQvSFeQ1WxKtw", "https://s3-eu-central-1.ionoscloud.com"}, - "eu-central-2": {"eu-central-2", "00e556b6437d8a8d1776", "LbypY0AmotQCDDckTz+cAPFI7l0eQvSFeQ1WxKtw", "https://s3-eu-central-2.ionoscloud.com"}, + "de": {"de", "", "+", "https://s3-eu-central-1.ionoscloud.com"}, + "eu-central-2": {"eu-central-2", "", "+", "https://s3-eu-central-2.ionoscloud.com"}, // Add more endpoints as needed } From b07829efb4d1d6e506a31a7296c81402bbb37610 Mon Sep 17 00:00:00 2001 From: efidoris Date: Mon, 6 May 2024 16:32:09 +0200 Subject: [PATCH 05/55] Wait groups work in progress --- go.mod | 8 +- go.sum | 17 +++ internal/ionos.go | 2 +- internal/prometheus.go | 31 ++--- internal/s3collector.go | 277 ++++++++++++++++++++++------------------ main.go | 13 +- 6 files changed, 200 insertions(+), 148 deletions(-) diff --git a/go.mod b/go.mod index df02995..54f0350 100644 --- a/go.mod +++ b/go.mod @@ -8,16 +8,20 @@ require ( ) require ( + github.com/aws/aws-lambda-go v1.47.0 // indirect + github.com/aws/aws-sdk-go v1.52.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/joho/godotenv v1.5.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect - golang.org/x/net v0.15.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/go.sum b/go.sum index c5e63d3..025f26a 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,12 @@ +github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= +github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= +github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI= +github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -13,8 +18,14 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= @@ -23,16 +34,20 @@ github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -43,3 +58,5 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internal/ionos.go b/internal/ionos.go index 6766d30..2e60ebd 100644 --- a/internal/ionos.go +++ b/internal/ionos.go @@ -92,7 +92,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { IonosDatacenters = newIonosDatacenters LoadbalancerCollector(apiClient) IPCollectResources(apiClient) - S3CollectResources() + // S3CollectResources() m.Unlock() CalculateDCTotals(m) time.Sleep(time.Duration(cycletime) * time.Second) diff --git a/internal/prometheus.go b/internal/prometheus.go index e243f88..2d06ec0 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -46,12 +46,11 @@ type s3Collector struct { s3TotalNumberOfPutRequestsMetric *prometheus.GaugeVec } -var mutex *sync.RWMutex +// var mutex *sync.RWMutex func newLBCollector(m *sync.RWMutex) *lbCollector { - mutex = m return &lbCollector{ - mutex: &sync.RWMutex{}, + mutex: m, nlbsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_networkloadbalancer_amount", Help: "Shows the number of active Network Loadbalancers in an IONOS datacenter", @@ -90,9 +89,8 @@ func newLBCollector(m *sync.RWMutex) *lbCollector { // You must create a constructor for you collector that // initializes every descriptor and returns a pointer to the collector func newIonosCollector(m *sync.RWMutex) *ionosCollector { - mutex = m return &ionosCollector{ - mutex: &sync.RWMutex{}, + mutex: m, coresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dc_cores_amount", Help: "Shows the number of currently active cores in an IONOS datacenter", @@ -125,9 +123,8 @@ func newIonosCollector(m *sync.RWMutex) *ionosCollector { } func newS3Collector(m *sync.RWMutex) *s3Collector { - mutex = m return &s3Collector{ - mutex: &sync.RWMutex{}, + mutex: m, s3TotalGetMethodSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_size_of_get_requests_in_bytes", Help: "Gives the total size of s3 GET HTTP Request in Bytes", @@ -194,24 +191,22 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { collector.mutex.RLock() defer collector.mutex.RUnlock() - collector.s3TotalGetMethodSizeMetric.Reset() - collector.s3TotalPutMethodSizeMetric.Reset() - collector.s3TotalNumberOfGetRequestsMetric.Reset() - collector.s3TotalNumberOfPutRequestsMetric.Reset() - for s3Name, s3Resources := range IonosS3Buckets { + collector.s3TotalGetMethodSizeMetric.Reset() + collector.s3TotalPutMethodSizeMetric.Reset() + collector.s3TotalNumberOfGetRequestsMetric.Reset() + collector.s3TotalNumberOfPutRequestsMetric.Reset() collector.s3TotalGetMethodSizeMetric.WithLabelValues(s3Name).Set(float64(s3Resources.TotalGetMethodSize)) collector.s3TotalPutMethodSizeMetric.WithLabelValues(s3Name).Set(float64(s3Resources.TotalPutMethodSize)) collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name).Set(float64(s3Resources.GetMethods)) collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name).Set(float64(s3Resources.PutMethods)) + collector.s3TotalGetMethodSizeMetric.Collect(ch) + collector.s3TotalPutMethodSizeMetric.Collect(ch) + collector.s3TotalNumberOfGetRequestsMetric.Collect(ch) + collector.s3TotalNumberOfPutRequestsMetric.Collect(ch) } - collector.s3TotalGetMethodSizeMetric.Collect(ch) - collector.s3TotalPutMethodSizeMetric.Collect(ch) - collector.s3TotalNumberOfGetRequestsMetric.Collect(ch) - collector.s3TotalNumberOfPutRequestsMetric.Collect(ch) - } // Each and every collector must implement the Describe function. @@ -294,7 +289,7 @@ var httpRequestsTotal = prometheus.NewCounterVec( ) func HealthCheck(w http.ResponseWriter, r *http.Request) { - PrintDCTotals(mutex) + // PrintDCTotals(mutex) httpRequestsTotal.WithLabelValues("/healthcheck", r.Method).Inc() w.WriteHeader(http.StatusOK) io.WriteString(w, "OK") diff --git a/internal/s3collector.go b/internal/s3collector.go index 53e282a..7c472ae 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -6,8 +6,11 @@ import ( "os" "strconv" "strings" + "sync" + "time" aws "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" @@ -31,6 +34,11 @@ type IonosS3Resources struct { TotalPutMethodSize int32 } +const ( + objectPerPage = 100 + maxConcurrent = 10 +) + func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S3, error) { sess, err := session.NewSession(&aws.Config{ Region: aws.String(region), @@ -43,7 +51,7 @@ func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S return s3.New(sess), nil } -func S3CollectResources() { +func S3CollectResources(m *sync.RWMutex, cycletime int32) { // accessKey := os.Getenv("IONOS_ACCESS_KEY") // secretKey := os.Getenv("IONOS_SECRET_KEY") @@ -60,151 +68,178 @@ func S3CollectResources() { }{ "de": {"de", "", "+", "https://s3-eu-central-1.ionoscloud.com"}, "eu-central-2": {"eu-central-2", "", "+", "https://s3-eu-central-2.ionoscloud.com"}, + // Add more endpoints as needed } - bucketCounts := make(map[string]struct { - totalGetMethods int32 - totalPutMethods int32 - totalGetMethodSize int64 - totalPutMethodSize int64 - }) - newS3IonosResources := make(map[string]IonosS3Resources) - serviceClients := make(map[string]*s3.S3) + var wg sync.WaitGroup + semaphore := make(chan struct{}, maxConcurrent) - // var totalLineCount int = 0 - // Create service clients for each endpoint + for { + for endpoint, config := range endpoints { + if _, exists := IonosS3Buckets[endpoint]; exists { + continue + } + + client, err := createS3ServiceClient(config.Region, config.AccessKey, config.SecretKey, config.Endpoint) + + if err != nil { + fmt.Printf("Erropr creating service client for endpoint %s: %v\n", endpoint, err) + continue + } + + fmt.Println("Using service client for endpoint:", endpoint) + + result, err := client.ListBuckets(nil) + + if err != nil { + fmt.Println("Error while Listing Buckets", err) + continue + } + + wg.Add(len(result.Buckets)) + + for _, bucket := range result.Buckets { + semaphore <- struct{}{} + go func(bucketName string) { + defer func() { + <-semaphore + wg.Done() + }() + + processBucket(client, bucketName) + }(*bucket.Name) + } - for endpoint, config := range endpoints { - if _, exists := serviceClients[endpoint]; exists { - continue - } - client, err := createS3ServiceClient(config.Region, config.AccessKey, config.SecretKey, config.Endpoint) - if err != nil { - fmt.Printf("Error creating service client for endpoint %s: %v\n", endpoint, err) - continue } - serviceClients[endpoint] = client + wg.Wait() + CalculateS3Totals(m) + fmt.Println("This is end of before sleep") - fmt.Println("Using service client for endpoint: %s\n", endpoint) - // serviceClient := s3.New(sess) + time.Sleep(time.Duration(cycletime) * time.Second) + } + +} + +func processBucket(client *s3.S3, bucketName string) { + var ( + totalGetMethods int32 = 0 + totalPutMethods int32 = 0 + totalGetMethodSize int64 = 0 + totalPutMethodSize int64 = 0 + ) + + continuationToken := "" + + for { + objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(bucketName), + Prefix: aws.String("logs/"), + ContinuationToken: aws.String(continuationToken), + MaxKeys: aws.Int64(objectPerPage), + }) - result, err := client.ListBuckets(nil) if err != nil { - fmt.Println("Problem with the Listing of the Buckets") + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case "NoSuchBucket": + fmt.Printf("bucket %s does not exist\n", bucketName) + default: + fmt.Printf("error listing objects in bucket %s: %s\n", bucketName, aerr.Message()) + } + } + return + } + + if len(objectList.Contents) == 0 { + fmt.Printf("bucket %s does not contain any objects with the 'logs/' prefix\n", bucketName) + return } - for _, buckets := range result.Buckets { - var ( - totalGetMethods int32 = 0 - totalPutMethods int32 = 0 - totalGetMethodSize int64 = 0 - totalPutMethodSize int64 = 0 - ) - - objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ - Bucket: aws.String(*buckets.Name), - Prefix: aws.String("logs/"), - }) + for _, object := range objectList.Contents { + downloadInput := &s3.GetObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(*object.Key), + } + + result, err := client.GetObject(downloadInput) + if err != nil { - fmt.Println("Could not use the service client to list objects") + fmt.Println("Error downloading object", err) continue } - if len(objectList.Contents) == 0 { + defer result.Body.Close() + + logContent, err := io.ReadAll(result.Body) + + if err != nil { + fmt.Println("Error reading log content:", err) continue } - for _, object := range objectList.Contents { - downloadInput := &s3.GetObjectInput{ - Bucket: aws.String(*buckets.Name), - Key: aws.String(*object.Key), - } - - result, err := client.GetObject(downloadInput) - if err != nil { - fmt.Println("Error downloading object", err) - continue - } - defer result.Body.Close() - logContent, err := io.ReadAll(result.Body) - if err != nil { - fmt.Println("Error reading log content:", err) - continue - } - fields := strings.Fields(string(logContent)) + fields := strings.Fields(string(logContent)) + bucketMethod := fields[9] - bucketMethod := fields[9] + sizeStrGet := fields[14] + sizeStrPut := fields[16] + sizeGet, err := strconv.ParseInt(sizeStrGet, 10, 64) - sizeStrGet := fields[14] - sizeStrPut := fields[16] - if bucketMethod == "PUT" { - fmt.Println("This si the PUT Method") - } - sizeGet, err := strconv.ParseInt(sizeStrGet, 10, 64) - sizePut, err := strconv.ParseInt(sizeStrPut, 10, 64) + if err != nil { + fmt.Println("Error parsing GET size:", err) + continue + } + sizePut, err := strconv.ParseInt(sizeStrPut, 10, 64) - if err != nil { - fmt.Println("Error parsing PUT size:", err) - continue - } - switch bucketMethod { - case "\"GET": - totalGetMethods++ - totalGetMethodSize += sizeGet - case "\"PUT": - totalPutMethods++ - totalPutMethodSize += sizePut - default: - } + if err != nil { + fmt.Println("Error parsing PUT size:", err) + continue } - bucketCounts[*buckets.Name] = struct { - totalGetMethods int32 - totalPutMethods int32 - totalGetMethodSize int64 - totalPutMethodSize int64 - }{ - totalGetMethods: totalGetMethods, - totalPutMethods: totalPutMethods, - totalGetMethodSize: totalGetMethodSize, - totalPutMethodSize: totalPutMethodSize, + + switch bucketMethod { + case "\"GET": + totalGetMethods++ + totalGetMethodSize += sizeGet + case "\"PUT": + totalPutMethods++ + totalPutMethodSize += sizePut + default: } - fmt.Println("This is the bucket Name", *buckets.Name) } - } - - for bucketName, counts := range bucketCounts { - newS3IonosResources[bucketName] = IonosS3Resources{ - Name: bucketName, - GetMethods: counts.totalGetMethods, - PutMethods: counts.totalPutMethods, - TotalGetMethodSize: int32(counts.totalGetMethodSize), - TotalPutMethodSize: int32(counts.totalPutMethodSize), + if !aws.BoolValue(objectList.IsTruncated) { + break } + + continuationToken = *objectList.NextContinuationToken + + } + IonosS3Buckets[bucketName] = IonosS3Resources{ + Name: bucketName, + GetMethods: totalGetMethods, + PutMethods: totalPutMethods, + TotalGetMethodSize: int32(totalGetMethodSize), + TotalPutMethodSize: int32(totalPutMethodSize), } - IonosS3Buckets = newS3IonosResources - // time.Sleep(time.Duration(cycletime) * time.Second) } -// CalculateS3Totals(m) - -// func CalculateS3Totals(m *sync.RWMutex) { -// var ( -// getMethodTotal int32 -// putMethodTotal int32 -// getMethodSizeTotal int64 -// putMethodSizeTotal int64 -// ) -// for _, s3Resources := range IonosS3Buckets { -// getMethodTotal += s3Resources.GetMethods -// putMethodTotal += s3Resources.PutMethods -// getMethodSizeTotal += int64(s3Resources.TotalGetMethodSize) -// putMethodSizeTotal += int64(s3Resources.TotalPutMethodSize) -// } -// TotalGetMethods = getMethodTotal - -// fmt.Println("Get method inside a calculate totals program", TotalGetMethods) -// TotalPutMethods = putMethodTotal -// TotalGetMethodSize = getMethodSizeTotal -// TotalPutMethodSize = putMethodSizeTotal -// } +func CalculateS3Totals(m *sync.RWMutex) { + var ( + getMethodTotal int32 + putMethodTotal int32 + getMethodSizeTotal int64 + putMethodSizeTotal int64 + ) + for _, s3Resources := range IonosS3Buckets { + getMethodTotal += s3Resources.GetMethods + putMethodTotal += s3Resources.PutMethods + getMethodSizeTotal += int64(s3Resources.TotalGetMethodSize) + putMethodSizeTotal += int64(s3Resources.TotalPutMethodSize) + } + m.Lock() + defer m.Unlock() + + TotalGetMethods = getMethodTotal + TotalPutMethods = putMethodTotal + TotalGetMethodSize = getMethodSizeTotal + TotalPutMethodSize = putMethodSizeTotal +} diff --git a/main.go b/main.go index c3ee762..647ce15 100644 --- a/main.go +++ b/main.go @@ -11,9 +11,10 @@ import ( ) var ( - mutex = &sync.RWMutex{} // Mutex to sync access to the Daatcenter map - exporterPort string // Port to be used for exposing the metrics - ionos_api_cycle int32 // Cycle time in seconds to query the IONOS API for changes, not th ePrometheus scraping intervall + dcMutex = &sync.RWMutex{} // Mutex to sync access to the Datacenter map + s3Mutex = &sync.RWMutex{} + exporterPort string // Port to be used for exposing the metrics + ionos_api_cycle int32 // Cycle time in seconds to query the IONOS API for changes, not th ePrometheus scraping intervall ) func main() { @@ -26,11 +27,11 @@ func main() { ionos_api_cycle = int32(cycletime) } // internal.IPCollectResources() - go internal.CollectResources(mutex, ionos_api_cycle) - // go internal.S3CollectResources(mutex, ionos_api_cycle) + go internal.CollectResources(dcMutex, ionos_api_cycle) + go internal.S3CollectResources(s3Mutex, ionos_api_cycle) //internal.PrintDCResources(mutex) - internal.StartPrometheus(mutex) + internal.StartPrometheus(dcMutex) http.Handle("/metrics", promhttp.Handler()) http.Handle("/healthcheck", http.HandlerFunc(internal.HealthCheck)) log.Fatal(http.ListenAndServe(":"+exporterPort, nil)) From f3568dda8af8c995bdd0082eaac8aa08de0abb1b Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 23 May 2024 12:26:14 +0200 Subject: [PATCH 06/55] implemented concurrency for s3 log parsing, fixed bugs, implemented all CRUD methods metrics --- internal/ionos.go | 10 +- internal/ipcollector.go | 13 +- internal/lbcollector.go | 15 +-- internal/prometheus.go | 166 +++++++++++++++++++++----- internal/s3collector.go | 254 ++++++++++++++++++++++------------------ main.go | 3 +- 6 files changed, 292 insertions(+), 169 deletions(-) diff --git a/internal/ionos.go b/internal/ionos.go index 2e60ebd..4cc7703 100644 --- a/internal/ionos.go +++ b/internal/ionos.go @@ -30,13 +30,13 @@ type IonosDCResources struct { func CollectResources(m *sync.RWMutex, cycletime int32) { - file, _ := os.Create("ionosoutput.txt") + // file, _ := os.Create("ionosoutput.txt") - defer file.Close() + // defer file.Close() - oldStdout := os.Stdout - defer func() { os.Stdout = oldStdout }() - os.Stdout = file + // oldStdout := os.Stdout + // defer func() { os.Stdout = oldStdout }() + // os.Stdout = file err := godotenv.Load(".env") if err != nil { diff --git a/internal/ipcollector.go b/internal/ipcollector.go index 7a4525d..1dfdcc4 100644 --- a/internal/ipcollector.go +++ b/internal/ipcollector.go @@ -3,7 +3,6 @@ package internal import ( "context" "fmt" - "os" ionoscloud "github.com/ionos-cloud/sdk-go/v6" "github.com/joho/godotenv" @@ -21,13 +20,13 @@ type IonosIPResources struct { } func IPCollectResources(apiClient *ionoscloud.APIClient) { - file, _ := os.Create("Ipsoutput.txt") + // file, _ := os.Create("Ipsoutput.txt") - defer file.Close() + // defer file.Close() - oldStdout := os.Stdout - defer func() { os.Stdout = oldStdout }() - os.Stdout = file + // oldStdout := os.Stdout + // defer func() { os.Stdout = oldStdout }() + // os.Stdout = file err := godotenv.Load(".env") if err != nil { @@ -53,6 +52,6 @@ func IPCollectResources(apiClient *ionoscloud.APIClient) { } } - fmt.Println("Heyo") + // fmt.Println("Heyo") } diff --git a/internal/lbcollector.go b/internal/lbcollector.go index 45c5c21..b874147 100644 --- a/internal/lbcollector.go +++ b/internal/lbcollector.go @@ -2,8 +2,6 @@ package internal import ( "context" - "fmt" - "os" ionoscloud "github.com/ionos-cloud/sdk-go/v6" ) @@ -32,15 +30,15 @@ type IonosLBResources struct { } func LoadbalancerCollector(apiClient *ionoscloud.APIClient) { - fmt.Println("Hey this is the Loadbalancer Collector") + // fmt.Println("Hey this is the Loadbalancer Collector") - file, _ := os.Create("LoadBalancerOutput.txt") + // file, _ := os.Create("LoadBalancerOutput.txt") - defer file.Close() + // defer file.Close() - oldStdout := os.Stdout - defer func() { os.Stdout = oldStdout }() - os.Stdout = file + // oldStdout := os.Stdout + // defer func() { os.Stdout = oldStdout }() + // os.Stdout = file datacenter, _, _ := apiClient.DataCentersApi.DatacentersGet(context.Background()).Depth(3).Execute() newIonosLBResources := make(map[string]IonosLBResources) @@ -58,7 +56,6 @@ func LoadbalancerCollector(apiClient *ionoscloud.APIClient) { nlbRuleNames string ) - fmt.Println("These are the datacenter Names", datacenter.Properties.Name) albList, _, _ := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() nlbList, _, _ := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() natList, _, _ := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), *datacenter.Id).Depth(3).Execute() diff --git a/internal/prometheus.go b/internal/prometheus.go index 2d06ec0..f496d54 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -39,11 +39,19 @@ type lbCollector struct { } type s3Collector struct { - mutex *sync.RWMutex - s3TotalGetMethodSizeMetric *prometheus.GaugeVec - s3TotalPutMethodSizeMetric *prometheus.GaugeVec - s3TotalNumberOfGetRequestsMetric *prometheus.GaugeVec - s3TotalNumberOfPutRequestsMetric *prometheus.GaugeVec + mutex *sync.RWMutex + s3TotalGetRequestSizeMetric *prometheus.GaugeVec + s3TotalGetResponseSizeMetric *prometheus.GaugeVec + s3TotalPutRequestSizeMetric *prometheus.GaugeVec + s3TotalPutResponseSizeMetric *prometheus.GaugeVec + s3TotalPostRequestSizeMetric *prometheus.GaugeVec + s3TotalPostResponseSizeMetric *prometheus.GaugeVec + s3TotalHeadRequestSizeMetric *prometheus.GaugeVec + s3TotalHeadResponseSizeMetric *prometheus.GaugeVec + s3TotalNumberOfGetRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfPutRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfPostRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfHeadRequestsMetric *prometheus.GaugeVec } // var mutex *sync.RWMutex @@ -122,25 +130,60 @@ func newIonosCollector(m *sync.RWMutex) *ionosCollector { } } +// maybe I should not define for every single one of methods an metric +// but like export the method_name also and just say total_request_size.. +// and it will show it for that method? func newS3Collector(m *sync.RWMutex) *s3Collector { return &s3Collector{ mutex: m, - s3TotalGetMethodSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_size_of_get_requests_in_bytes", - Help: "Gives the total size of s3 GET HTTP Request in Bytes", - }, []string{"bucket_name"}), - s3TotalPutMethodSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_size_of_put_requests_in_bytes", - Help: "Gives the total size of s3 PUT HTTP Request in Bytes", - }, []string{"bucket_name"}), + s3TotalGetRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_get_request_size_in_bytes", + Help: "Gives the total size of s3 GET Request in Bytes in one Bucket", + }, []string{"bucket_name", "method_name"}), + s3TotalGetResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_get_response_size_in_bytes", + Help: "Gives the total size of s3 GET Response in Bytes in one Bucket", + }, []string{"bucket_name", "method_name"}), + s3TotalPutRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_put_request_size_in_bytes", + Help: "Gives the total size of s3 PUT Request in Bytes in one Bucket", + }, []string{"bucket_name", "method_name"}), + s3TotalPutResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_put_response_size_in_bytes", + Help: "Gives the total size of s3 PUT Response in Bytes in one Bucket", + }, []string{"bucket_name", "method_name"}), + s3TotalPostRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_post_request_size_in_bytes", + Help: "Gives the total size of s3 POST Request in Bytes in one Bucket", + }, []string{"bucket_name", "method_name"}), + s3TotalPostResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_post_response_size_in_bytes", + Help: "Gives the total size of s3 POST Response in Bytes in one Bucket", + }, []string{"bucket_name", "method_name"}), + s3TotalHeadRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_head_request_size_in_bytes", + Help: "Gives the total size of s3 HEAD Request in Bytes in one Bucket", + }, []string{"bucket_name", "method_name"}), + s3TotalHeadResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_head_response_size_in_bytes", + Help: "Gives the total size of s3 HEAD Response in Bytes in one Bucket", + }, []string{"bucket_name", "method_name"}), s3TotalNumberOfGetRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_get_requests", - Help: "Gives the total number of S3 GET HTTP Requests", - }, []string{"bucket_name"}), + Help: "Gives the total number of S3 GET HTTP Requests in one Bucket", + }, []string{"bucket_name", "method_name"}), s3TotalNumberOfPutRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_put_requests", - Help: "Gives the total number of S3 PUT HTTP Requests", - }, []string{"bucket_name"}), + Help: "Gives the total number of S3 PUT HTTP Requests in one Bucket", + }, []string{"bucket_name", "method_name"}), + s3TotalNumberOfPostRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_post_requests", + Help: "Gives the total number of S3 Post Requests in one Bucket", + }, []string{"bucket_name", "method_name"}), + s3TotalNumberOfHeadRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_head_requests", + Help: "Gives the total number of S3 HEAD HTTP Requests in one Bucket", + }, []string{"bucket_name", "method_name"}), } } @@ -181,32 +224,91 @@ func (collector *lbCollector) Collect(ch chan<- prometheus.Metric) { } func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { - collector.s3TotalGetMethodSizeMetric.Describe(ch) - collector.s3TotalPutMethodSizeMetric.Describe(ch) + collector.s3TotalGetRequestSizeMetric.Describe(ch) + collector.s3TotalGetResponseSizeMetric.Describe(ch) + collector.s3TotalPutRequestSizeMetric.Describe(ch) + collector.s3TotalPutResponseSizeMetric.Describe(ch) + collector.s3TotalPostRequestSizeMetric.Describe(ch) + collector.s3TotalPostResponseSizeMetric.Describe(ch) + collector.s3TotalHeadRequestSizeMetric.Describe(ch) + collector.s3TotalHeadResponseSizeMetric.Describe(ch) collector.s3TotalNumberOfGetRequestsMetric.Describe(ch) collector.s3TotalNumberOfPutRequestsMetric.Describe(ch) + collector.s3TotalNumberOfPostRequestsMetric.Describe(ch) + collector.s3TotalNumberOfHeadRequestsMetric.Describe(ch) } func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { collector.mutex.RLock() defer collector.mutex.RUnlock() + collector.s3TotalGetRequestSizeMetric.Reset() + collector.s3TotalGetResponseSizeMetric.Reset() + collector.s3TotalPutRequestSizeMetric.Reset() + collector.s3TotalPutResponseSizeMetric.Reset() + collector.s3TotalPostRequestSizeMetric.Reset() + collector.s3TotalPostResponseSizeMetric.Reset() + collector.s3TotalHeadRequestSizeMetric.Reset() + collector.s3TotalHeadResponseSizeMetric.Reset() + collector.s3TotalNumberOfGetRequestsMetric.Reset() + collector.s3TotalNumberOfPutRequestsMetric.Reset() + collector.s3TotalNumberOfPostRequestsMetric.Reset() + collector.s3TotalNumberOfHeadRequestsMetric.Reset() + for s3Name, s3Resources := range IonosS3Buckets { - collector.s3TotalGetMethodSizeMetric.Reset() - collector.s3TotalPutMethodSizeMetric.Reset() - collector.s3TotalNumberOfGetRequestsMetric.Reset() - collector.s3TotalNumberOfPutRequestsMetric.Reset() - collector.s3TotalGetMethodSizeMetric.WithLabelValues(s3Name).Set(float64(s3Resources.TotalGetMethodSize)) - collector.s3TotalPutMethodSizeMetric.WithLabelValues(s3Name).Set(float64(s3Resources.TotalPutMethodSize)) - collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name).Set(float64(s3Resources.GetMethods)) - collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name).Set(float64(s3Resources.PutMethods)) - - collector.s3TotalGetMethodSizeMetric.Collect(ch) - collector.s3TotalPutMethodSizeMetric.Collect(ch) - collector.s3TotalNumberOfGetRequestsMetric.Collect(ch) - collector.s3TotalNumberOfPutRequestsMetric.Collect(ch) + for method, requestSize := range s3Resources.RequestSizes { + switch method { + case MethodGET: + collector.s3TotalGetRequestSizeMetric.WithLabelValues(s3Name, method).Set(float64(requestSize)) + case MethodPOST: + collector.s3TotalPostRequestSizeMetric.WithLabelValues(s3Name, method).Set(float64(requestSize)) + case MethodHEAD: + collector.s3TotalHeadRequestSizeMetric.WithLabelValues(s3Name, method).Set(float64(requestSize)) + case MethodPUT: + collector.s3TotalPutRequestSizeMetric.WithLabelValues(s3Name, method).Set(float64(requestSize)) + } + + } + for method, responseSize := range s3Resources.ResponseSizes { + switch method { + case MethodGET: + collector.s3TotalGetResponseSizeMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + case MethodPOST: + collector.s3TotalPostResponseSizeMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + case MethodHEAD: + collector.s3TotalHeadResponseSizeMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + case MethodPUT: + collector.s3TotalPutResponseSizeMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + } + } + + for method, responseSize := range s3Resources.Methods { + switch method { + case MethodGET: + collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + case MethodPOST: + collector.s3TotalNumberOfPostRequestsMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + case MethodHEAD: + collector.s3TotalNumberOfHeadRequestsMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + case MethodPUT: + collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + } + } } + collector.s3TotalGetRequestSizeMetric.Collect(ch) + collector.s3TotalGetResponseSizeMetric.Collect(ch) + collector.s3TotalPutRequestSizeMetric.Collect(ch) + collector.s3TotalPutResponseSizeMetric.Collect(ch) + collector.s3TotalPostRequestSizeMetric.Collect(ch) + collector.s3TotalPostResponseSizeMetric.Collect(ch) + collector.s3TotalHeadRequestSizeMetric.Collect(ch) + collector.s3TotalHeadResponseSizeMetric.Collect(ch) + collector.s3TotalNumberOfGetRequestsMetric.Collect(ch) + collector.s3TotalNumberOfPutRequestsMetric.Collect(ch) + collector.s3TotalNumberOfPostRequestsMetric.Collect(ch) + collector.s3TotalNumberOfHeadRequestsMetric.Collect(ch) + } // Each and every collector must implement the Describe function. diff --git a/internal/s3collector.go b/internal/s3collector.go index 7c472ae..b8077e6 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "os" + "regexp" "strconv" "strings" "sync" @@ -16,29 +17,40 @@ import ( "github.com/aws/aws-sdk-go/service/s3" ) +type EndpointConfig struct { + Region string + AccessKey string + SecretKey string + Endpoint string +} + var ( - TotalGetMethods int32 = 0 - TotalGetMethodSize int64 = 0 - TotalPutMethodSize int64 = 0 - TotalPutMethods int32 = 0 - IonosS3Buckets = make(map[string]IonosS3Resources) + // Global totals + TotalMetrics = Metrics{} + // IonosS3Buckets + IonosS3Buckets = make(map[string]Metrics) ) -type IonosS3Resources struct { - Name string - GetMethods int32 - PutMethods int32 - HeadMethods int32 - PostMethods int32 - TotalGetMethodSize int32 - TotalPutMethodSize int32 +type Metrics struct { + Methods map[string]int32 + RequestSizes map[string]int64 + ResponseSizes map[string]int64 } +const ( + MethodGET = "GET" + MethodPUT = "PUT" + MethodPOST = "POST" + MethodHEAD = "HEAD" +) + const ( objectPerPage = 100 maxConcurrent = 10 ) +var metricsMutex sync.Mutex + func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S3, error) { sess, err := session.NewSession(&aws.Config{ Region: aws.String(region), @@ -52,42 +64,48 @@ func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S } func S3CollectResources(m *sync.RWMutex, cycletime int32) { - // accessKey := os.Getenv("IONOS_ACCESS_KEY") - // secretKey := os.Getenv("IONOS_SECRET_KEY") - + accessKey := os.Getenv("AWS_ACCESS_KEY_ID") + secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") file, _ := os.Create("S3ioutput.txt") defer file.Close() oldStdout := os.Stdout defer func() { os.Stdout = oldStdout }() os.Stdout = file - //TODO YAML konfiguration - // Define endpoint configurations - endpoints := map[string]struct { - Region, AccessKey, SecretKey, Endpoint string - }{ - "de": {"de", "", "+", "https://s3-eu-central-1.ionoscloud.com"}, - "eu-central-2": {"eu-central-2", "", "+", "https://s3-eu-central-2.ionoscloud.com"}, - - // Add more endpoints as needed + fmt.Println("ACESSEKEY", accessKey) + if accessKey == "" || secretKey == "" { + fmt.Println("AWS credentials are not set in the environment variables.") + return + } + endpoints := map[string]EndpointConfig{ + "de": { + Region: "de", + AccessKey: accessKey, + SecretKey: secretKey, + Endpoint: "https://s3-eu-central-1.ionoscloud.com", + }, + "eu-central-2": { + Region: "eu-central-2", + AccessKey: accessKey, + SecretKey: secretKey, + Endpoint: "https://s3-eu-central-2.ionoscloud.com", + }, } - var wg sync.WaitGroup semaphore := make(chan struct{}, maxConcurrent) - for { + var wg sync.WaitGroup for endpoint, config := range endpoints { + if _, exists := IonosS3Buckets[endpoint]; exists { continue } - client, err := createS3ServiceClient(config.Region, config.AccessKey, config.SecretKey, config.Endpoint) if err != nil { fmt.Printf("Erropr creating service client for endpoint %s: %v\n", endpoint, err) continue } - fmt.Println("Using service client for endpoint:", endpoint) result, err := client.ListBuckets(nil) @@ -97,40 +115,56 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { continue } - wg.Add(len(result.Buckets)) - for _, bucket := range result.Buckets { + bucketName := *bucket.Name + wg.Add(1) + if err := GetHeadBucket(client, bucketName); err != nil { + if reqErr, ok := err.(awserr.RequestFailure); ok && reqErr.StatusCode() == 403 { + wg.Done() + continue + } + fmt.Println("Error checking the bucket head:", err) + wg.Done() + continue + } + semaphore <- struct{}{} go func(bucketName string) { defer func() { <-semaphore wg.Done() }() - processBucket(client, bucketName) }(*bucket.Name) + // wg.Wait() //when we want sequentiel here wait for bucket to finish } } + fmt.Println("Before the wait") wg.Wait() - CalculateS3Totals(m) - fmt.Println("This is end of before sleep") - + fmt.Println("After the wait") + fmt.Println("This is before sleep") time.Sleep(time.Duration(cycletime) * time.Second) } } func processBucket(client *s3.S3, bucketName string) { - var ( - totalGetMethods int32 = 0 - totalPutMethods int32 = 0 - totalGetMethodSize int64 = 0 - totalPutMethodSize int64 = 0 - ) + // var logEntryRegex = regexp.MustCompile(`(?)(GET|PUT|HEAD|POST) .+? (\d+) (\d+)`) + // var logEntryRegex = regexp.MustCompile(`(\w+) \/[^"]*" \d+ \S+ (\d+) - \d+ (\d+)`) + var logEntryRegex = regexp.MustCompile(`(GET|PUT|HEAD|POST) \/[^"]*" \d+ \S+ (\d+|-) (\d+|-) \d+ (\d+|-)`) - continuationToken := "" + // fmt.Println("Regex Pattern:", logEntryRegex.String()) + metrics := Metrics{ + Methods: make(map[string]int32), + RequestSizes: make(map[string]int64), + ResponseSizes: make(map[string]int64), + } + + semaphore := make(chan struct{}, maxConcurrent) + var wg sync.WaitGroup + continuationToken := "" for { objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucketName), @@ -145,6 +179,11 @@ func processBucket(client *s3.S3, bucketName string) { case "NoSuchBucket": fmt.Printf("bucket %s does not exist\n", bucketName) default: + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "AccessDenied" { + fmt.Println("ACCESS DENIED") + } + } fmt.Printf("error listing objects in bucket %s: %s\n", bucketName, aerr.Message()) } } @@ -157,89 +196,74 @@ func processBucket(client *s3.S3, bucketName string) { } for _, object := range objectList.Contents { - downloadInput := &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(*object.Key), - } - - result, err := client.GetObject(downloadInput) - - if err != nil { - fmt.Println("Error downloading object", err) - continue - } - defer result.Body.Close() - - logContent, err := io.ReadAll(result.Body) - - if err != nil { - fmt.Println("Error reading log content:", err) - continue - } + wg.Add(1) + semaphore <- struct{}{} + go func(bucketNme, objectkey string) { + defer func() { + <-semaphore + wg.Done() + }() + + downloadInput := &s3.GetObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(*object.Key), + } - fields := strings.Fields(string(logContent)) - bucketMethod := fields[9] + result, err := client.GetObject(downloadInput) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "AccessDenied" { + fmt.Printf("Access Denied error for object %s in bucket %s\n", *object.Key, bucketName) + return + } + } + fmt.Println("Error downloading object", err) + return + } + defer result.Body.Close() - sizeStrGet := fields[14] - sizeStrPut := fields[16] - sizeGet, err := strconv.ParseInt(sizeStrGet, 10, 64) + logContent, err := io.ReadAll(result.Body) - if err != nil { - fmt.Println("Error parsing GET size:", err) - continue - } - sizePut, err := strconv.ParseInt(sizeStrPut, 10, 64) + logLine := strings.Fields(string(logContent)) - if err != nil { - fmt.Println("Error parsing PUT size:", err) - continue - } + if err != nil { + fmt.Println("Problem reading the body", err) + } - switch bucketMethod { - case "\"GET": - totalGetMethods++ - totalGetMethodSize += sizeGet - case "\"PUT": - totalPutMethods++ - totalPutMethodSize += sizePut - default: - } + matches := logEntryRegex.FindAllStringSubmatch(string(logContent), -1) + fmt.Println("Matches:", matches) + for _, match := range matches { + method := match[1] + + requestSizeStr := match[2] + requestSize, err := strconv.ParseInt(requestSizeStr, 10, 64) + if err != nil { + fmt.Printf("Error parsing size : %v", err) + } + + responseSizeStr := match[3] + responseSize, err := strconv.ParseInt(responseSizeStr, 10, 64) + if err != nil { + fmt.Printf("Error parsing size: %v", err) + } + + metricsMutex.Lock() + // fmt.Println("Log line", logLine) + + metrics.Methods[method]++ + metrics.RequestSizes[method] += requestSize + metrics.ResponseSizes[method] += responseSize + metricsMutex.Unlock() + } + }(bucketName, *object.Key) } if !aws.BoolValue(objectList.IsTruncated) { break } - continuationToken = *objectList.NextContinuationToken - - } - IonosS3Buckets[bucketName] = IonosS3Resources{ - Name: bucketName, - GetMethods: totalGetMethods, - PutMethods: totalPutMethods, - TotalGetMethodSize: int32(totalGetMethodSize), - TotalPutMethodSize: int32(totalPutMethodSize), - } -} - -func CalculateS3Totals(m *sync.RWMutex) { - var ( - getMethodTotal int32 - putMethodTotal int32 - getMethodSizeTotal int64 - putMethodSizeTotal int64 - ) - for _, s3Resources := range IonosS3Buckets { - getMethodTotal += s3Resources.GetMethods - putMethodTotal += s3Resources.PutMethods - getMethodSizeTotal += int64(s3Resources.TotalGetMethodSize) - putMethodSizeTotal += int64(s3Resources.TotalPutMethodSize) } - m.Lock() - defer m.Unlock() - - TotalGetMethods = getMethodTotal - TotalPutMethods = putMethodTotal - TotalGetMethodSize = getMethodSizeTotal - TotalPutMethodSize = putMethodSizeTotal + wg.Wait() + IonosS3Buckets[bucketName] = metrics } diff --git a/main.go b/main.go index 647ce15..993a225 100644 --- a/main.go +++ b/main.go @@ -27,8 +27,9 @@ func main() { ionos_api_cycle = int32(cycletime) } // internal.IPCollectResources() - go internal.CollectResources(dcMutex, ionos_api_cycle) + // go internal.CollectResources(dcMutex, ionos_api_cycle) go internal.S3CollectResources(s3Mutex, ionos_api_cycle) + // internal.ListObjects() //internal.PrintDCResources(mutex) internal.StartPrometheus(dcMutex) From 3a3e559a59821addb89ba3ce9aa0526d025b9ef1 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 23 May 2024 14:26:27 +0200 Subject: [PATCH 07/55] fixing github action --- .github/workflows/build-and-push-to-ghcr.yml | 5 +++++ charts/ionos-exporter/templates/deployment.yaml | 15 +++++++++++++++ charts/ionos-exporter/values.yaml | 2 ++ 3 files changed, 22 insertions(+) diff --git a/.github/workflows/build-and-push-to-ghcr.yml b/.github/workflows/build-and-push-to-ghcr.yml index a5587ee..1a18c26 100644 --- a/.github/workflows/build-and-push-to-ghcr.yml +++ b/.github/workflows/build-and-push-to-ghcr.yml @@ -10,6 +10,11 @@ on: jobs: build_image_on_push: + permissions: + packages: write + security-events: write + actions: read + contents: read uses: ./.github/workflows/imagetoghcr-on-push.yaml with: image_name: "ionos-exporter" diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index 497c1ab..c918ae9 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -46,6 +46,21 @@ spec: resources: {{- toYaml .Values.resources | nindent 12 }} env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws-credentials-secret + key: accessKey + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-credentials-secret + key: secretKey + - name: IONOS_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.ionos_credentials_secret_token }} + key: {{ .Values.ionos_credentials_token_key }} - name: IONOS_USERNAME valueFrom: secretKeyRef: diff --git a/charts/ionos-exporter/values.yaml b/charts/ionos-exporter/values.yaml index 25f983a..da2861c 100644 --- a/charts/ionos-exporter/values.yaml +++ b/charts/ionos-exporter/values.yaml @@ -10,6 +10,8 @@ image: imagePullSecrets: [] # credentials +ionos_credentials_secret_token: "ionos-exporter-token" +ionos_credentials_token_key: "token" ionos_credentials_secret_name: "ionos-exporter-credentials" ionos_credentials_username_key: "username" ionos_credentials_password_key: "password" From 2c2356489287bc660cd00a6fa665448464d0e071 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 23 May 2024 14:33:18 +0200 Subject: [PATCH 08/55] moved GetHeadBucket from local file to helper.go --- internal/helper.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/internal/helper.go b/internal/helper.go index 89f8633..3a212db 100644 --- a/internal/helper.go +++ b/internal/helper.go @@ -2,9 +2,11 @@ package internal import ( "fmt" + "log" "os" aws "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" @@ -52,3 +54,20 @@ func HasLogsFolder(client *s3.S3, bucketName string) bool { return len(result.Contents) > 0 } + +func GetHeadBucket(client *s3.S3, bucketName string) error { + input := &s3.HeadBucketInput{ + Bucket: aws.String(bucketName), + } + _, err := client.HeadBucket(input) + if err != nil { + if reqErr, ok := err.(awserr.RequestFailure); ok && reqErr.StatusCode() == 403 { + log.Printf("Skipping bucket %s due to Forbidden error: %v\n", bucketName, err) + return err + } + log.Printf("Problem getting the location for bucket %s: %v\n", bucketName, err) + return err + } + log.Printf("Bucket %s exists and is accessible\n", bucketName) + return nil +} From 2222a26a50f92758a2b889fbcb15358692767e5f Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 23 May 2024 14:36:26 +0200 Subject: [PATCH 09/55] fixed declared and not used error --- internal/s3collector.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/s3collector.go b/internal/s3collector.go index b8077e6..2b4777b 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -6,7 +6,6 @@ import ( "os" "regexp" "strconv" - "strings" "sync" "time" @@ -225,7 +224,7 @@ func processBucket(client *s3.S3, bucketName string) { logContent, err := io.ReadAll(result.Body) - logLine := strings.Fields(string(logContent)) + // logLine := strings.Fields(string(logContent)) if err != nil { fmt.Println("Problem reading the body", err) From cf7f08b87645570f0a02878832e9f0bd152c218a Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 23 May 2024 14:41:09 +0200 Subject: [PATCH 10/55] added go mod tidy for installing dependencies --- Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Dockerfile b/Dockerfile index 9dfda21..31ab7aa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,8 @@ COPY go.sum . RUN go mod download +RUN go mod tidy + FROM build_deps AS build COPY . . From 1bc7e187731238f8e5ed996b855829505f3cf671 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 23 May 2024 16:34:11 +0200 Subject: [PATCH 11/55] removed some of log Statements and added secret management for s3 --- charts/ionos-exporter/templates/deployment.yaml | 13 ++++--------- charts/ionos-exporter/values.yaml | 6 ++++-- internal/s3collector.go | 1 - 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index c918ae9..2882c62 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -49,18 +49,13 @@ spec: - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: - name: aws-credentials-secret - key: accessKey + name: {{ .Values.ionos_credentials_secret_name }} + key: {{ .Values.ionos_s3_credentials_access_key }} - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: - name: aws-credentials-secret - key: secretKey - - name: IONOS_TOKEN - valueFrom: - secretKeyRef: - name: {{ .Values.ionos_credentials_secret_token }} - key: {{ .Values.ionos_credentials_token_key }} + name: {{ .Values.ionos_credentials_secret_name }} + key: {{ .Values.ionos_s3_credentials_secret_key }} - name: IONOS_USERNAME valueFrom: secretKeyRef: diff --git a/charts/ionos-exporter/values.yaml b/charts/ionos-exporter/values.yaml index da2861c..b55b3bc 100644 --- a/charts/ionos-exporter/values.yaml +++ b/charts/ionos-exporter/values.yaml @@ -10,11 +10,13 @@ image: imagePullSecrets: [] # credentials -ionos_credentials_secret_token: "ionos-exporter-token" -ionos_credentials_token_key: "token" +# ionos_credentials_secret_token: "ionos-exporter-token" +# ionos_credentials_token_key: "token" ionos_credentials_secret_name: "ionos-exporter-credentials" ionos_credentials_username_key: "username" ionos_credentials_password_key: "password" +ionos_s3_credentials_secret_key: "secretKey" +ionos_s3_credentials_access_key: "accessKey" service: type: ClusterIP diff --git a/internal/s3collector.go b/internal/s3collector.go index 2b4777b..da19e8d 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -71,7 +71,6 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { oldStdout := os.Stdout defer func() { os.Stdout = oldStdout }() os.Stdout = file - fmt.Println("ACESSEKEY", accessKey) if accessKey == "" || secretKey == "" { fmt.Println("AWS credentials are not set in the environment variables.") return From b1b2614291b25a7308d824cd8bb2223111e77c19 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 24 May 2024 08:50:45 +0200 Subject: [PATCH 12/55] resotred CollectResources func in main --- main.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/main.go b/main.go index 993a225..502e270 100644 --- a/main.go +++ b/main.go @@ -18,18 +18,14 @@ var ( ) func main() { - //internal.CollectResources(mutex, ionos_api_cycle) - //internal.BasicAuthExample() exporterPort = internal.GetEnv("IONOS_EXPORTER_APPLICATION_CONTAINER_PORT", "9100") if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "300"), 10, 32); err != nil { log.Fatal("Cannot convert IONOS_API_CYCLE to int") } else { ionos_api_cycle = int32(cycletime) } - // internal.IPCollectResources() - // go internal.CollectResources(dcMutex, ionos_api_cycle) + go internal.CollectResources(dcMutex, ionos_api_cycle) go internal.S3CollectResources(s3Mutex, ionos_api_cycle) - // internal.ListObjects() //internal.PrintDCResources(mutex) internal.StartPrometheus(dcMutex) From f7abeccbb75fbfedde4a300a4e3d38bece2a129c Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 24 May 2024 10:40:51 +0200 Subject: [PATCH 13/55] reverted api cycle to 900 --- charts/ionos-exporter/templates/deployment.yaml | 2 +- main.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index 2882c62..08c720e 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -16,7 +16,7 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} labels: - {{- include "ionos-exporter.selectorLabels" . | nindent 8 }} + {{- include "ionos-exporter.selectorLabel" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: diff --git a/main.go b/main.go index 502e270..9a35c36 100644 --- a/main.go +++ b/main.go @@ -19,7 +19,7 @@ var ( func main() { exporterPort = internal.GetEnv("IONOS_EXPORTER_APPLICATION_CONTAINER_PORT", "9100") - if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "300"), 10, 32); err != nil { + if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "900"), 10, 32); err != nil { log.Fatal("Cannot convert IONOS_API_CYCLE to int") } else { ionos_api_cycle = int32(cycletime) From 73d98ac69b82a5c9e115f70b691a14fbbba83030 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 24 May 2024 13:57:58 +0200 Subject: [PATCH 14/55] removed logging to a file --- internal/s3collector.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/s3collector.go b/internal/s3collector.go index da19e8d..6212704 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -65,12 +65,12 @@ func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S func S3CollectResources(m *sync.RWMutex, cycletime int32) { accessKey := os.Getenv("AWS_ACCESS_KEY_ID") secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") - file, _ := os.Create("S3ioutput.txt") - defer file.Close() + // file, _ := os.Create("S3ioutput.txt") + // defer file.Close() - oldStdout := os.Stdout - defer func() { os.Stdout = oldStdout }() - os.Stdout = file + // oldStdout := os.Stdout + // defer func() { os.Stdout = oldStdout }() + // os.Stdout = file if accessKey == "" || secretKey == "" { fmt.Println("AWS credentials are not set in the environment variables.") return From 8ea6e5d6fa27cf6fff1346e8fb73ba424a15ce91 Mon Sep 17 00:00:00 2001 From: efidoris Date: Tue, 28 May 2024 15:50:37 +0200 Subject: [PATCH 15/55] added new labels removed some comments --- internal/ionos.go | 2 +- internal/ipcollector.go | 12 -------- internal/prometheus.go | 50 ++++++++++++++++---------------- internal/s3collector.go | 63 +++++++++++++++++++++++++---------------- 4 files changed, 66 insertions(+), 61 deletions(-) diff --git a/internal/ionos.go b/internal/ionos.go index 4cc7703..65c663e 100644 --- a/internal/ionos.go +++ b/internal/ionos.go @@ -48,7 +48,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { cfgENV := ionoscloud.NewConfigurationFromEnv() // cfg.Debug = true - cfgENV.Debug = true + cfgENV.Debug = false apiClient := ionoscloud.NewAPIClient(cfgENV) for { diff --git a/internal/ipcollector.go b/internal/ipcollector.go index 1dfdcc4..bc6cbfa 100644 --- a/internal/ipcollector.go +++ b/internal/ipcollector.go @@ -20,21 +20,12 @@ type IonosIPResources struct { } func IPCollectResources(apiClient *ionoscloud.APIClient) { - // file, _ := os.Create("Ipsoutput.txt") - - // defer file.Close() - - // oldStdout := os.Stdout - // defer func() { os.Stdout = oldStdout }() - // os.Stdout = file - err := godotenv.Load(".env") if err != nil { fmt.Println("Error loading .env file") } newIonosIPResources := make(map[string]IonosIPResources) - // newIonosIPResources := make(map[string]IonosIPResources) ipBlocks, _, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(3).Execute() if err != nil { @@ -44,7 +35,6 @@ func IPCollectResources(apiClient *ionoscloud.APIClient) { totalIPs = 0 for _, ips := range *ipBlocks.Items { totalIPs += *ips.Properties.Size - fmt.Println("Hey this is the size of IPs", totalIPs) newIonosIPResources[*ips.Properties.Name] = IonosIPResources{ @@ -52,6 +42,4 @@ func IPCollectResources(apiClient *ionoscloud.APIClient) { } } - // fmt.Println("Heyo") - } diff --git a/internal/prometheus.go b/internal/prometheus.go index f496d54..52ef4e5 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -139,51 +139,51 @@ func newS3Collector(m *sync.RWMutex) *s3Collector { s3TotalGetRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_get_request_size_in_bytes", Help: "Gives the total size of s3 GET Request in Bytes in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalGetResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_get_response_size_in_bytes", Help: "Gives the total size of s3 GET Response in Bytes in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalPutRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_put_request_size_in_bytes", Help: "Gives the total size of s3 PUT Request in Bytes in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalPutResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_put_response_size_in_bytes", Help: "Gives the total size of s3 PUT Response in Bytes in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalPostRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_post_request_size_in_bytes", Help: "Gives the total size of s3 POST Request in Bytes in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalPostResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_post_response_size_in_bytes", Help: "Gives the total size of s3 POST Response in Bytes in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalHeadRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_head_request_size_in_bytes", Help: "Gives the total size of s3 HEAD Request in Bytes in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalHeadResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_head_response_size_in_bytes", Help: "Gives the total size of s3 HEAD Response in Bytes in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalNumberOfGetRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_get_requests", Help: "Gives the total number of S3 GET HTTP Requests in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalNumberOfPutRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_put_requests", Help: "Gives the total number of S3 PUT HTTP Requests in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalNumberOfPostRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_post_requests", Help: "Gives the total number of S3 Post Requests in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), s3TotalNumberOfHeadRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_head_requests", Help: "Gives the total number of S3 HEAD HTTP Requests in one Bucket", - }, []string{"bucket_name", "method_name"}), + }, []string{"bucket", "method", "region", "owner"}), } } @@ -256,42 +256,44 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { collector.s3TotalNumberOfHeadRequestsMetric.Reset() for s3Name, s3Resources := range IonosS3Buckets { + region := s3Resources.Regions + owner := s3Resources.Owner for method, requestSize := range s3Resources.RequestSizes { switch method { case MethodGET: - collector.s3TotalGetRequestSizeMetric.WithLabelValues(s3Name, method).Set(float64(requestSize)) + collector.s3TotalGetRequestSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(requestSize)) case MethodPOST: - collector.s3TotalPostRequestSizeMetric.WithLabelValues(s3Name, method).Set(float64(requestSize)) + collector.s3TotalPostRequestSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(requestSize)) case MethodHEAD: - collector.s3TotalHeadRequestSizeMetric.WithLabelValues(s3Name, method).Set(float64(requestSize)) + collector.s3TotalHeadRequestSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(requestSize)) case MethodPUT: - collector.s3TotalPutRequestSizeMetric.WithLabelValues(s3Name, method).Set(float64(requestSize)) + collector.s3TotalPutRequestSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(requestSize)) } } for method, responseSize := range s3Resources.ResponseSizes { switch method { case MethodGET: - collector.s3TotalGetResponseSizeMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + collector.s3TotalGetResponseSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) case MethodPOST: - collector.s3TotalPostResponseSizeMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + collector.s3TotalPostResponseSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) case MethodHEAD: - collector.s3TotalHeadResponseSizeMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + collector.s3TotalHeadResponseSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) case MethodPUT: - collector.s3TotalPutResponseSizeMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + collector.s3TotalPutResponseSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) } } for method, responseSize := range s3Resources.Methods { switch method { case MethodGET: - collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) case MethodPOST: - collector.s3TotalNumberOfPostRequestsMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + collector.s3TotalNumberOfPostRequestsMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) case MethodHEAD: - collector.s3TotalNumberOfHeadRequestsMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + collector.s3TotalNumberOfHeadRequestsMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) case MethodPUT: - collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name, method).Set(float64(responseSize)) + collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) } } } diff --git a/internal/s3collector.go b/internal/s3collector.go index 6212704..a2df25a 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -3,6 +3,7 @@ package internal import ( "fmt" "io" + "log" "os" "regexp" "strconv" @@ -34,6 +35,8 @@ type Metrics struct { Methods map[string]int32 RequestSizes map[string]int64 ResponseSizes map[string]int64 + Regions string + Owner string } const ( @@ -44,6 +47,7 @@ const ( ) const ( + //pagination 100 objects are on one page in a bucket objectPerPage = 100 maxConcurrent = 10 ) @@ -101,7 +105,7 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { client, err := createS3ServiceClient(config.Region, config.AccessKey, config.SecretKey, config.Endpoint) if err != nil { - fmt.Printf("Erropr creating service client for endpoint %s: %v\n", endpoint, err) + fmt.Printf("Error creating service client for endpoint %s: %v\n", endpoint, err) continue } fmt.Println("Using service client for endpoint:", endpoint) @@ -134,14 +138,11 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { }() processBucket(client, bucketName) }(*bucket.Name) - // wg.Wait() //when we want sequentiel here wait for bucket to finish + // wg.Wait() //when we want sequential parsing we ca wait here for bucket to finish } } - fmt.Println("Before the wait") wg.Wait() - fmt.Println("After the wait") - fmt.Println("This is before sleep") time.Sleep(time.Duration(cycletime) * time.Second) } @@ -158,11 +159,29 @@ func processBucket(client *s3.S3, bucketName string) { Methods: make(map[string]int32), RequestSizes: make(map[string]int64), ResponseSizes: make(map[string]int64), + Regions: "", + Owner: "", } + metrics.Regions = *client.Config.Region semaphore := make(chan struct{}, maxConcurrent) var wg sync.WaitGroup continuationToken := "" + //owner + getAclInput := &s3.GetBucketAclInput{ + Bucket: aws.String(bucketName), + } + getAclOutput, err := client.GetBucketAcl(getAclInput) + if err != nil { + log.Printf("Error retrieving ACL for bucket %s: %v\n", bucketName, err) + return + } + + if len(*getAclOutput.Owner.DisplayName) > 0 { + metrics.Owner = *getAclOutput.Owner.DisplayName + } else { + metrics.Owner = "Unknown" + } for { objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucketName), @@ -223,35 +242,31 @@ func processBucket(client *s3.S3, bucketName string) { logContent, err := io.ReadAll(result.Body) - // logLine := strings.Fields(string(logContent)) - if err != nil { fmt.Println("Problem reading the body", err) } - matches := logEntryRegex.FindAllStringSubmatch(string(logContent), -1) - fmt.Println("Matches:", matches) for _, match := range matches { method := match[1] - - requestSizeStr := match[2] - requestSize, err := strconv.ParseInt(requestSizeStr, 10, 64) - if err != nil { - fmt.Printf("Error parsing size : %v", err) + requestSizeStr := match[3] + responseSizeStr := match[2] + metricsMutex.Lock() + if requestSizeStr != "-" { + requestSize, err := strconv.ParseInt(requestSizeStr, 10, 64) + if err != nil { + fmt.Printf("Error parsing size: %v", err) + } + metrics.RequestSizes[method] += requestSize } - - responseSizeStr := match[3] - responseSize, err := strconv.ParseInt(responseSizeStr, 10, 64) - if err != nil { - fmt.Printf("Error parsing size: %v", err) + if responseSizeStr != "-" { + responseSize, err := strconv.ParseInt(responseSizeStr, 10, 64) + if err != nil { + fmt.Printf("Error parsing size: %v", err) + } + metrics.ResponseSizes[method] += responseSize } - metricsMutex.Lock() - // fmt.Println("Log line", logLine) - metrics.Methods[method]++ - metrics.RequestSizes[method] += requestSize - metrics.ResponseSizes[method] += responseSize metricsMutex.Unlock() } }(bucketName, *object.Key) From 3511721e571af68fc884a83a8063771e9536cb50 Mon Sep 17 00:00:00 2001 From: simoncolincap Date: Wed, 29 May 2024 07:42:01 +0000 Subject: [PATCH 16/55] Fix typo --- charts/ionos-exporter/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index 08c720e..2882c62 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -16,7 +16,7 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} labels: - {{- include "ionos-exporter.selectorLabel" . | nindent 8 }} + {{- include "ionos-exporter.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: From 0850549f614da8a179b42c9b9fca17b01ec13860 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 31 May 2024 11:48:49 +0200 Subject: [PATCH 17/55] fixed scraping inconsistencies of metrics and added error handling for lbcollector --- internal/ionos.go | 10 ++------ internal/lbcollector.go | 54 +++++++++++++++++++++++++++++------------ internal/prometheus.go | 7 ++++++ internal/s3collector.go | 20 +++++++-------- 4 files changed, 57 insertions(+), 34 deletions(-) diff --git a/internal/ionos.go b/internal/ionos.go index 65c663e..56bb75a 100644 --- a/internal/ionos.go +++ b/internal/ionos.go @@ -58,7 +58,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) os.Exit(1) } - fmt.Println("DATACENTER", datacenters) + // fmt.Println("DATACENTER", datacenters) newIonosDatacenters := make(map[string]IonosDCResources) for _, datacenter := range *datacenters.Items { var ( @@ -90,10 +90,9 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { m.Lock() IonosDatacenters = newIonosDatacenters + m.Unlock() LoadbalancerCollector(apiClient) IPCollectResources(apiClient) - // S3CollectResources() - m.Unlock() CalculateDCTotals(m) time.Sleep(time.Duration(cycletime) * time.Second) } @@ -139,8 +138,3 @@ func PrintDCTotals(m *sync.RWMutex) { log.Printf("Total - Cores: %d\n", CoresTotal) log.Printf("Total - Ram: %d GB\n", RamTotal/1024) } - -//problemen mit ionos log bucket konnte nicht testen richtig -//noch problemen mit aktuallisierung von log data wenn welche geloescht werden -//problem sa paralelizacijom. logove mogu kalkulisati kako treba -//ali ne tako brzo diff --git a/internal/lbcollector.go b/internal/lbcollector.go index b874147..32ee3e7 100644 --- a/internal/lbcollector.go +++ b/internal/lbcollector.go @@ -2,6 +2,7 @@ package internal import ( "context" + "fmt" ionoscloud "github.com/ionos-cloud/sdk-go/v6" ) @@ -56,33 +57,56 @@ func LoadbalancerCollector(apiClient *ionoscloud.APIClient) { nlbRuleNames string ) - albList, _, _ := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() - nlbList, _, _ := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + albList, _, err := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + if err != nil { + fmt.Printf("Error retrieving ALBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } + nlbList, _, err := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + if err != nil { + fmt.Printf("Error retrieving NLBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } natList, _, _ := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), *datacenter.Id).Depth(3).Execute() + if err != nil { + fmt.Printf("Error retrieving NATs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } for _, nlbRulesAndLabels := range *nlbList.Items { + if nlbRulesAndLabels.Properties != nil && nlbRulesAndLabels.Properties.Name != nil { + nlbNames = *nlbRulesAndLabels.Properties.Name + } - nlbNames = *nlbRulesAndLabels.Properties.Name nlbForwardingRules := nlbRulesAndLabels.Entities.Forwardingrules - nlbTotalRulesDC = int32(len(*nlbForwardingRules.Items)) - - for _, ruleItems := range *nlbForwardingRules.Items { - nlbRuleNames = *ruleItems.Properties.Name + if nlbForwardingRules != nil && nlbForwardingRules.Items != nil { + nlbTotalRulesDC = int32(len(*nlbForwardingRules.Items)) + for _, ruleItems := range *nlbForwardingRules.Items { + if ruleItems.Properties != nil && ruleItems.Properties.Name != nil { + nlbRuleNames = *ruleItems.Properties.Name + } + } } } for _, albRulesAndLabels := range *albList.Items { - - albNames = *albRulesAndLabels.Properties.Name + if albRulesAndLabels.Properties != nil && albRulesAndLabels.Properties.Name != nil { + albNames = *albRulesAndLabels.Properties.Name + } forwardingRules := albRulesAndLabels.Entities.Forwardingrules - albTotalRulesDC = int32(len(*forwardingRules.Items)) - - for _, ruleItems := range *forwardingRules.Items { - for _, ruleName := range *ruleItems.Properties.HttpRules { - albRuleNames = *ruleName.Name + if forwardingRules != nil && forwardingRules.Items != nil { + albTotalRulesDC = int32(len(*forwardingRules.Items)) + + for _, ruleItems := range *forwardingRules.Items { + if ruleItems.Properties != nil && ruleItems.Properties.HttpRules != nil { + for _, ruleName := range *ruleItems.Properties.HttpRules { + if ruleName.Name != nil { + albRuleNames = *ruleName.Name + } + } + } } } - } nlbTotalDC = int32(len(*nlbList.Items)) albTotalDC = int32(len(*albList.Items)) diff --git a/internal/prometheus.go b/internal/prometheus.go index 52ef4e5..925a05a 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -238,10 +238,13 @@ func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { collector.s3TotalNumberOfHeadRequestsMetric.Describe(ch) } + func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { collector.mutex.RLock() defer collector.mutex.RUnlock() + // fmt.Println("Here are the Metrics inside a s3Collector in Prometheus.go before reset", IonosS3Buckets) + metricsMutex.Lock() collector.s3TotalGetRequestSizeMetric.Reset() collector.s3TotalGetResponseSizeMetric.Reset() collector.s3TotalPutRequestSizeMetric.Reset() @@ -254,8 +257,11 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { collector.s3TotalNumberOfPutRequestsMetric.Reset() collector.s3TotalNumberOfPostRequestsMetric.Reset() collector.s3TotalNumberOfHeadRequestsMetric.Reset() + defer metricsMutex.Unlock() for s3Name, s3Resources := range IonosS3Buckets { + // fmt.Println("Collecting metrics for bucket:", s3Name) + // fmt.Printf("Request Sizes: %v, Response Sizes: %v, Methods: %v\n", s3Resources.RequestSizes, s3Resources.ResponseSizes, s3Resources.Methods) region := s3Resources.Regions owner := s3Resources.Owner for method, requestSize := range s3Resources.RequestSizes { @@ -340,6 +346,7 @@ func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { collector.coresMetric.Reset() collector.ramMetric.Reset() collector.serverMetric.Reset() + // fmt.Println("Here are the metrics in ionosCollector", IonosDatacenters) for dcName, dcResources := range IonosDatacenters { //Write latest value for each metric in the prometheus metric channel. collector.coresMetric.WithLabelValues(dcName).Set(float64(dcResources.Cores)) diff --git a/internal/s3collector.go b/internal/s3collector.go index a2df25a..052b597 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -25,9 +25,6 @@ type EndpointConfig struct { } var ( - // Global totals - TotalMetrics = Metrics{} - // IonosS3Buckets IonosS3Buckets = make(map[string]Metrics) ) @@ -47,8 +44,7 @@ const ( ) const ( - //pagination 100 objects are on one page in a bucket - objectPerPage = 100 + objectPerPage = 1000 maxConcurrent = 10 ) @@ -138,7 +134,7 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { }() processBucket(client, bucketName) }(*bucket.Name) - // wg.Wait() //when we want sequential parsing we ca wait here for bucket to finish + //wg.Wait() //when we want sequential parsing we ca wait here for bucket to finish } } @@ -153,8 +149,6 @@ func processBucket(client *s3.S3, bucketName string) { // var logEntryRegex = regexp.MustCompile(`(\w+) \/[^"]*" \d+ \S+ (\d+) - \d+ (\d+)`) var logEntryRegex = regexp.MustCompile(`(GET|PUT|HEAD|POST) \/[^"]*" \d+ \S+ (\d+|-) (\d+|-) \d+ (\d+|-)`) - // fmt.Println("Regex Pattern:", logEntryRegex.String()) - metrics := Metrics{ Methods: make(map[string]int32), RequestSizes: make(map[string]int64), @@ -213,6 +207,7 @@ func processBucket(client *s3.S3, bucketName string) { } for _, object := range objectList.Contents { + objectKey := *object.Key wg.Add(1) semaphore <- struct{}{} go func(bucketNme, objectkey string) { @@ -223,7 +218,7 @@ func processBucket(client *s3.S3, bucketName string) { downloadInput := &s3.GetObjectInput{ Bucket: aws.String(bucketName), - Key: aws.String(*object.Key), + Key: aws.String(objectKey), } result, err := client.GetObject(downloadInput) @@ -231,7 +226,7 @@ func processBucket(client *s3.S3, bucketName string) { if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "AccessDenied" { - fmt.Printf("Access Denied error for object %s in bucket %s\n", *object.Key, bucketName) + fmt.Printf("Access Denied error for object %s in bucket %s\n", objectKey, bucketName) return } } @@ -247,10 +242,10 @@ func processBucket(client *s3.S3, bucketName string) { } matches := logEntryRegex.FindAllStringSubmatch(string(logContent), -1) for _, match := range matches { + metricsMutex.Lock() method := match[1] requestSizeStr := match[3] responseSizeStr := match[2] - metricsMutex.Lock() if requestSizeStr != "-" { requestSize, err := strconv.ParseInt(requestSizeStr, 10, 64) if err != nil { @@ -275,8 +270,11 @@ func processBucket(client *s3.S3, bucketName string) { if !aws.BoolValue(objectList.IsTruncated) { break } + continuationToken = *objectList.NextContinuationToken } wg.Wait() + metricsMutex.Lock() IonosS3Buckets[bucketName] = metrics + metricsMutex.Unlock() } From 3b434661d9d964c368d4cb36c64175b88a1e8d0e Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 13 Jun 2024 11:46:02 +0200 Subject: [PATCH 18/55] added scraping of bucket tags and pushing them to prometheus --- go.mod | 7 +- internal/helper.go | 74 ++++++++++--- internal/ionos.go | 132 ++++++++++++++++++----- internal/ipcollector.go | 45 -------- internal/lbcollector.go | 128 ---------------------- internal/prometheus.go | 229 +++++++++++++++++++--------------------- internal/s3collector.go | 142 ++++++++++++++++++------- main.go | 2 +- 8 files changed, 382 insertions(+), 377 deletions(-) delete mode 100644 internal/ipcollector.go delete mode 100644 internal/lbcollector.go diff --git a/go.mod b/go.mod index 54f0350..93cff85 100644 --- a/go.mod +++ b/go.mod @@ -3,25 +3,24 @@ module ionos-exporter go 1.20 require ( + github.com/aws/aws-sdk-go v1.52.0 github.com/ionos-cloud/sdk-go/v6 v6.1.9 + github.com/joho/godotenv v1.5.1 github.com/prometheus/client_golang v1.16.0 ) require ( - github.com/aws/aws-lambda-go v1.47.0 // indirect - github.com/aws/aws-sdk-go v1.52.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/joho/godotenv v1.5.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sys v0.13.0 // indirect + golang.org/x/sys v0.21.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/internal/helper.go b/internal/helper.go index 3a212db..dd2c26b 100644 --- a/internal/helper.go +++ b/internal/helper.go @@ -7,8 +7,6 @@ import ( aws "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" ) @@ -28,19 +26,6 @@ func GetEnv(key string, fallback string) string { } } -func NewS3ServiceClient() (*s3.S3, error) { - sess, err := session.NewSession(&aws.Config{ - Region: aws.String("eu-central-2"), - Credentials: credentials.NewStaticCredentials("00e556b6437d8a8d1776", "LbypY0AmotQCDDckTz+cAPFI7l0eQvSFeQ1WxKtw", ""), - Endpoint: aws.String("https://s3-eu-central-2.ionoscloud.com"), - }) - - if err != nil { - return nil, err - } - return s3.New(sess), nil -} - func HasLogsFolder(client *s3.S3, bucketName string) bool { result, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucketName), @@ -71,3 +56,62 @@ func GetHeadBucket(client *s3.S3, bucketName string) error { log.Printf("Bucket %s exists and is accessible\n", bucketName) return nil } + +func addTagsToBucket(client *s3.S3, bucketName string) { + tags := []*s3.Tag{} + + switch bucketName { + case "nbc-bucket01-logs": + tags = []*s3.Tag{ + { + Key: aws.String("Tenant"), + Value: aws.String("Niedersachsen"), + }, + } + case "dbp-test-bucketlogs": + tags = []*s3.Tag{ + { + Key: aws.String("Tenant"), + Value: aws.String("Brandenburg"), + }, + } + case "dbp-test4logbucket": + tags = []*s3.Tag{ + { + Key: aws.String("Tenant"), + Value: aws.String("Thueringen"), + }, + } + case "dbp-test5-logbucket": + tags = []*s3.Tag{ + { + Key: aws.String("Tenant"), + Value: aws.String("HPIBosscloud"), + }, + } + default: + tags = []*s3.Tag{ + + { + Key: aws.String("Enviroment"), + Value: aws.String("Production"), + }, + { + Key: aws.String("Namespace"), + Value: aws.String("Some Namespace"), + }, + } + } + input := &s3.PutBucketTaggingInput{ + Bucket: aws.String(bucketName), + Tagging: &s3.Tagging{ + TagSet: tags, + }, + } + _, err := client.PutBucketTagging(input) + if err != nil { + log.Printf("Error adding tags to bucket %s: %v\n", bucketName, err) + } else { + fmt.Printf("Successfully added tags to bucekt %s\n", bucketName) + } +} diff --git a/internal/ionos.go b/internal/ionos.go index 56bb75a..743a21a 100644 --- a/internal/ionos.go +++ b/internal/ionos.go @@ -22,29 +22,29 @@ var ( ) type IonosDCResources struct { - Cores int32 // Amount of CPU cores in the whole DC, regardless whether it is a VM or Kubernetscluster - Ram int32 // Amount of RAM in the whole DC, regardless whether it is a VM or Kubernetscluster - Servers int32 // Amount of servers in the whole DC - DCId string // UUID od the datacenter + Cores int32 // Amount of CPU cores in the whole DC, regardless whether it is a VM or Kubernetscluster + Ram int32 // Amount of RAM in the whole DC, regardless whether it is a VM or Kubernetscluster + Servers int32 // Amount of servers in the whole DC + DCId string // UUID od the datacenter + NLBs int32 + ALBs int32 + NATs int32 + NLBRules int32 + ALBRules int32 + ALBName string + NLBName string + NLBRuleName string + ALBRuleName string + IPName string + TotalIPs int32 } func CollectResources(m *sync.RWMutex, cycletime int32) { - // file, _ := os.Create("ionosoutput.txt") - - // defer file.Close() - - // oldStdout := os.Stdout - // defer func() { os.Stdout = oldStdout }() - // os.Stdout = file - err := godotenv.Load(".env") if err != nil { fmt.Println("Error loading .env file") } - // username := os.Getenv("IONOS_USERNAME") - // password := os.Getenv("IONOS_PASSWORD") - // cfg := ionoscloud.NewConfiguration(username, password, "", "") cfgENV := ionoscloud.NewConfigurationFromEnv() // cfg.Debug = true @@ -62,16 +62,89 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { newIonosDatacenters := make(map[string]IonosDCResources) for _, datacenter := range *datacenters.Items { var ( - coresTotalDC int32 = 0 - ramTotalDC int32 = 0 - serverTotalDC int32 = 0 + coresTotalDC int32 = 0 + ramTotalDC int32 = 0 + serverTotalDC int32 = 0 + nlbTotalDC int32 = 0 + nlbTotalRulesDC int32 = 0 + albTotalRulesDC int32 = 0 + albTotalDC int32 = 0 + natTotalDC int32 = 0 + albNames string + nlbNames string + albRuleNames string + nlbRuleNames string + totalIPs int32 = 0 ) servers, resp, err := apiClient.ServersApi.DatacentersServersGet(context.Background(), *datacenter.Id).Depth(depth).Execute() - //fmt.Println("SERVERS", servers) if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `ServersApi.DatacentersServersGet``: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) } + albList, _, err := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + if err != nil { + fmt.Printf("Error retrieving ALBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } + nlbList, _, err := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + if err != nil { + fmt.Printf("Error retrieving NLBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } + natList, _, _ := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), *datacenter.Id).Depth(3).Execute() + if err != nil { + fmt.Printf("Error retrieving NATs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + continue + } + + ipBlocks, _, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(3).Execute() + + if err != nil { + fmt.Println("Problem with the API Client") + } + + for _, ips := range *ipBlocks.Items { + totalIPs += *ips.Properties.Size + } + + for _, nlbRulesAndLabels := range *nlbList.Items { + if nlbRulesAndLabels.Properties != nil && nlbRulesAndLabels.Properties.Name != nil { + nlbNames = *nlbRulesAndLabels.Properties.Name + } + + nlbForwardingRules := nlbRulesAndLabels.Entities.Forwardingrules + if nlbForwardingRules != nil && nlbForwardingRules.Items != nil { + nlbTotalRulesDC = int32(len(*nlbForwardingRules.Items)) + for _, ruleItems := range *nlbForwardingRules.Items { + if ruleItems.Properties != nil && ruleItems.Properties.Name != nil { + nlbRuleNames = *ruleItems.Properties.Name + } + } + } + } + + for _, albRulesAndLabels := range *albList.Items { + if albRulesAndLabels.Properties != nil && albRulesAndLabels.Properties.Name != nil { + albNames = *albRulesAndLabels.Properties.Name + } + forwardingRules := albRulesAndLabels.Entities.Forwardingrules + if forwardingRules != nil && forwardingRules.Items != nil { + albTotalRulesDC = int32(len(*forwardingRules.Items)) + + for _, ruleItems := range *forwardingRules.Items { + if ruleItems.Properties != nil && ruleItems.Properties.HttpRules != nil { + for _, ruleName := range *ruleItems.Properties.HttpRules { + if ruleName.Name != nil { + albRuleNames = *ruleName.Name + } + } + } + } + } + } + nlbTotalDC = int32(len(*nlbList.Items)) + albTotalDC = int32(len(*albList.Items)) + natTotalDC = int32(len(*natList.Items)) serverTotalDC = int32(len(*servers.Items)) for _, server := range *servers.Items { @@ -80,10 +153,20 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { } newIonosDatacenters[*datacenter.Properties.Name] = IonosDCResources{ - DCId: *datacenter.Id, - Cores: coresTotalDC, - Ram: ramTotalDC, - Servers: serverTotalDC, + DCId: *datacenter.Id, + Cores: coresTotalDC, + Ram: ramTotalDC, + Servers: serverTotalDC, + NLBs: nlbTotalDC, + ALBs: albTotalDC, + NATs: natTotalDC, + NLBRules: nlbTotalRulesDC, + ALBRules: albTotalRulesDC, + ALBName: albNames, + NLBName: nlbNames, + ALBRuleName: albRuleNames, + NLBRuleName: nlbRuleNames, + TotalIPs: totalIPs, } } @@ -91,8 +174,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { m.Lock() IonosDatacenters = newIonosDatacenters m.Unlock() - LoadbalancerCollector(apiClient) - IPCollectResources(apiClient) + // IPCollectResources(apiClient) CalculateDCTotals(m) time.Sleep(time.Duration(cycletime) * time.Second) } diff --git a/internal/ipcollector.go b/internal/ipcollector.go deleted file mode 100644 index bc6cbfa..0000000 --- a/internal/ipcollector.go +++ /dev/null @@ -1,45 +0,0 @@ -package internal - -import ( - "context" - "fmt" - - ionoscloud "github.com/ionos-cloud/sdk-go/v6" - "github.com/joho/godotenv" -) - -var ( - ipName string - totalIPs int32 - IonosIPs = make(map[string]IonosIPResources) -) - -type IonosIPResources struct { - IPName string - TotalIPs int32 -} - -func IPCollectResources(apiClient *ionoscloud.APIClient) { - err := godotenv.Load(".env") - if err != nil { - fmt.Println("Error loading .env file") - } - - newIonosIPResources := make(map[string]IonosIPResources) - ipBlocks, _, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(3).Execute() - - if err != nil { - fmt.Println("Problem with the API Client") - } - - totalIPs = 0 - for _, ips := range *ipBlocks.Items { - totalIPs += *ips.Properties.Size - - newIonosIPResources[*ips.Properties.Name] = IonosIPResources{ - - TotalIPs: totalIPs, - } - } - -} diff --git a/internal/lbcollector.go b/internal/lbcollector.go deleted file mode 100644 index 32ee3e7..0000000 --- a/internal/lbcollector.go +++ /dev/null @@ -1,128 +0,0 @@ -package internal - -import ( - "context" - "fmt" - - ionoscloud "github.com/ionos-cloud/sdk-go/v6" -) - -var ( - nlbNames string - albNames string - nlbTotalRulesDC int32 - nlbRuleNames string - albTotalRulesDC int32 - albRuleNames string - - IonosLoadbalancers = make(map[string]IonosLBResources) -) - -type IonosLBResources struct { - NLBs int32 - ALBs int32 - NATs int32 - NLBRules int32 - ALBRules int32 - ALBName string - NLBName string - NLBRuleName string - ALBRuleName string -} - -func LoadbalancerCollector(apiClient *ionoscloud.APIClient) { - // fmt.Println("Hey this is the Loadbalancer Collector") - - // file, _ := os.Create("LoadBalancerOutput.txt") - - // defer file.Close() - - // oldStdout := os.Stdout - // defer func() { os.Stdout = oldStdout }() - // os.Stdout = file - datacenter, _, _ := apiClient.DataCentersApi.DatacentersGet(context.Background()).Depth(3).Execute() - - newIonosLBResources := make(map[string]IonosLBResources) - for _, datacenter := range *datacenter.Items { - - var ( - nlbTotalDC int32 = 0 - nlbTotalRulesDC int32 = 0 - albTotalRulesDC int32 = 0 - albTotalDC int32 = 0 - natTotalDC int32 = 0 - albNames string - nlbNames string - albRuleNames string - nlbRuleNames string - ) - - albList, _, err := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() - if err != nil { - fmt.Printf("Error retrieving ALBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) - continue - } - nlbList, _, err := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() - if err != nil { - fmt.Printf("Error retrieving NLBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) - continue - } - natList, _, _ := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), *datacenter.Id).Depth(3).Execute() - if err != nil { - fmt.Printf("Error retrieving NATs for datacenter %s: %v\n", *datacenter.Properties.Name, err) - continue - } - - for _, nlbRulesAndLabels := range *nlbList.Items { - if nlbRulesAndLabels.Properties != nil && nlbRulesAndLabels.Properties.Name != nil { - nlbNames = *nlbRulesAndLabels.Properties.Name - } - - nlbForwardingRules := nlbRulesAndLabels.Entities.Forwardingrules - if nlbForwardingRules != nil && nlbForwardingRules.Items != nil { - nlbTotalRulesDC = int32(len(*nlbForwardingRules.Items)) - for _, ruleItems := range *nlbForwardingRules.Items { - if ruleItems.Properties != nil && ruleItems.Properties.Name != nil { - nlbRuleNames = *ruleItems.Properties.Name - } - } - } - } - - for _, albRulesAndLabels := range *albList.Items { - if albRulesAndLabels.Properties != nil && albRulesAndLabels.Properties.Name != nil { - albNames = *albRulesAndLabels.Properties.Name - } - forwardingRules := albRulesAndLabels.Entities.Forwardingrules - if forwardingRules != nil && forwardingRules.Items != nil { - albTotalRulesDC = int32(len(*forwardingRules.Items)) - - for _, ruleItems := range *forwardingRules.Items { - if ruleItems.Properties != nil && ruleItems.Properties.HttpRules != nil { - for _, ruleName := range *ruleItems.Properties.HttpRules { - if ruleName.Name != nil { - albRuleNames = *ruleName.Name - } - } - } - } - } - } - nlbTotalDC = int32(len(*nlbList.Items)) - albTotalDC = int32(len(*albList.Items)) - natTotalDC = int32(len(*natList.Items)) - - newIonosLBResources[*datacenter.Properties.Name] = IonosLBResources{ - NLBs: nlbTotalDC, - ALBs: albTotalDC, - NATs: natTotalDC, - NLBRules: nlbTotalRulesDC, - ALBRules: albTotalRulesDC, - ALBName: albNames, - NLBName: nlbNames, - ALBRuleName: albRuleNames, - NLBRuleName: nlbRuleNames, - } - } - IonosLoadbalancers = newIonosLBResources -} diff --git a/internal/prometheus.go b/internal/prometheus.go index 925a05a..0f8e451 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -1,6 +1,7 @@ package internal import ( + "fmt" "io" "net/http" "os" @@ -16,18 +17,14 @@ import ( // Note you can also include fields of other types if they provide utility // but we just won't be exposing them as metrics. type ionosCollector struct { - mutex *sync.RWMutex - coresMetric *prometheus.GaugeVec - ramMetric *prometheus.GaugeVec - serverMetric *prometheus.GaugeVec - dcCoresMetric *prometheus.GaugeVec - dcRamMetric *prometheus.GaugeVec - dcServerMetric *prometheus.GaugeVec - dcDCMetric *prometheus.GaugeVec -} - -type lbCollector struct { mutex *sync.RWMutex + coresMetric *prometheus.GaugeVec + ramMetric *prometheus.GaugeVec + serverMetric *prometheus.GaugeVec + dcCoresMetric *prometheus.GaugeVec + dcRamMetric *prometheus.GaugeVec + dcServerMetric *prometheus.GaugeVec + dcDCMetric *prometheus.GaugeVec nlbsMetric *prometheus.GaugeVec albsMetric *prometheus.GaugeVec natsMetric *prometheus.GaugeVec @@ -36,6 +33,7 @@ type lbCollector struct { dcDCNATMetric *prometheus.GaugeVec dcNLBRulesMetric *prometheus.GaugeVec dcALBRulesMetric *prometheus.GaugeVec + dcTotalIpsMetric *prometheus.GaugeVec } type s3Collector struct { @@ -56,44 +54,6 @@ type s3Collector struct { // var mutex *sync.RWMutex -func newLBCollector(m *sync.RWMutex) *lbCollector { - return &lbCollector{ - mutex: m, - nlbsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_networkloadbalancer_amount", - Help: "Shows the number of active Network Loadbalancers in an IONOS datacenter", - }, []string{"datacenter", "nlb_name", "nlb_rules_name"}), - albsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_applicationloadbalancer_amount", - Help: "Shows the number of active Application Loadbalancers in an IONOS datacenter", - }, []string{"datacenter", "alb_name", "alb_rules_name"}), - natsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_nat_gateways_amount", - Help: "Shows the number of NAT Gateways in an IONOS datacenter", - }, []string{"datacenter"}), - dcDCNLBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_networkloadbalancer_amount", - Help: "Shows the total number of Network Loadbalancers in IONOS Account", - }, []string{"account"}), - dcDCALBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_applicationbalancer_amount", - Help: "Shows the total number of Application Loadbalancers in IONOS Account", - }, []string{"account"}), - dcDCNATMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_nat_gateways_amount", - Help: "Shows the total number of NAT Gateways in IONOS Account", - }, []string{"account"}), - dcNLBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_number_of_nlb_rules", - Help: "Shows the total number of NLB Rules in IONOS Account", - }, []string{"nlb_rules"}), - dcALBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_nmumber_of_alb_rules", - Help: "Shows the total number of ALB Rules in IONOS Account", - }, []string{"alb_rules"}), - } -} - // You must create a constructor for you collector that // initializes every descriptor and returns a pointer to the collector func newIonosCollector(m *sync.RWMutex) *ionosCollector { @@ -127,102 +87,100 @@ func newIonosCollector(m *sync.RWMutex) *ionosCollector { Name: "ionos_total_datacenter_amount", Help: "Shows the number of datacenters of an IONOS account", }, []string{"account"}), + nlbsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_networkloadbalancer_amount", + Help: "Shows the number of active Network Loadbalancers in an IONOS datacenter", + }, []string{"datacenter", "nlb_name", "nlb_rules_name"}), + albsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_applicationloadbalancer_amount", + Help: "Shows the number of active Application Loadbalancers in an IONOS datacenter", + }, []string{"datacenter", "alb_name", "alb_rules_name"}), + natsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_nat_gateways_amount", + Help: "Shows the number of NAT Gateways in an IONOS datacenter", + }, []string{"datacenter"}), + dcDCNLBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_networkloadbalancer_amount", + Help: "Shows the total number of Network Loadbalancers in IONOS Account", + }, []string{"account"}), + dcDCALBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_applicationbalancer_amount", + Help: "Shows the total number of Application Loadbalancers in IONOS Account", + }, []string{"account"}), + dcDCNATMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_nat_gateways_amount", + Help: "Shows the total number of NAT Gateways in IONOS Account", + }, []string{"account"}), + dcNLBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_number_of_nlb_rules", + Help: "Shows the total number of NLB Rules in IONOS Account", + }, []string{"nlb_rules"}), + dcALBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_nmumber_of_alb_rules", + Help: "Shows the total number of ALB Rules in IONOS Account", + }, []string{"alb_rules"}), + dcTotalIpsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_number_of_ips", + Help: "Shows the number of Ips in a IONOS", + }, []string{"ip_amount"}), } } -// maybe I should not define for every single one of methods an metric -// but like export the method_name also and just say total_request_size.. -// and it will show it for that method? +// s3collector func returns all the metrics as gauges func newS3Collector(m *sync.RWMutex) *s3Collector { return &s3Collector{ mutex: m, s3TotalGetRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_get_request_size_in_bytes", Help: "Gives the total size of s3 GET Request in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalGetResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_get_response_size_in_bytes", Help: "Gives the total size of s3 GET Response in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalPutRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_put_request_size_in_bytes", Help: "Gives the total size of s3 PUT Request in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalPutResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_put_response_size_in_bytes", Help: "Gives the total size of s3 PUT Response in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalPostRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_post_request_size_in_bytes", Help: "Gives the total size of s3 POST Request in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalPostResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_post_response_size_in_bytes", Help: "Gives the total size of s3 POST Response in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalHeadRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_head_request_size_in_bytes", Help: "Gives the total size of s3 HEAD Request in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalHeadResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_head_response_size_in_bytes", Help: "Gives the total size of s3 HEAD Response in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalNumberOfGetRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_get_requests", Help: "Gives the total number of S3 GET HTTP Requests in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalNumberOfPutRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_put_requests", Help: "Gives the total number of S3 PUT HTTP Requests in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalNumberOfPostRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_post_requests", Help: "Gives the total number of S3 Post Requests in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), s3TotalNumberOfHeadRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "s3_total_number_of_head_requests", Help: "Gives the total number of S3 HEAD HTTP Requests in one Bucket", - }, []string{"bucket", "method", "region", "owner"}), + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), } } -func (collector *lbCollector) Describe(ch chan<- *prometheus.Desc) { - - collector.nlbsMetric.Describe(ch) - collector.albsMetric.Describe(ch) - collector.natsMetric.Describe(ch) - collector.dcDCNLBMetric.Describe(ch) - collector.dcDCALBMetric.Describe(ch) - collector.dcDCNATMetric.Describe(ch) - collector.dcALBRulesMetric.Describe(ch) - collector.dcNLBRulesMetric.Describe(ch) - -} - -func (collector *lbCollector) Collect(ch chan<- prometheus.Metric) { - collector.mutex.RLock() - defer collector.mutex.RUnlock() - - collector.albsMetric.Reset() - collector.natsMetric.Reset() - collector.nlbsMetric.Reset() - for lbName, lbResources := range IonosLoadbalancers { - collector.nlbsMetric.WithLabelValues(lbName, lbResources.NLBName, lbResources.NLBRuleName).Set(float64(lbResources.NLBs)) - collector.albsMetric.WithLabelValues(lbName, lbResources.ALBName, lbResources.ALBRuleName).Set(float64(lbResources.ALBs)) - collector.natsMetric.WithLabelValues(lbName).Set(float64(lbResources.NATs)) - } - - collector.nlbsMetric.Collect(ch) - collector.albsMetric.Collect(ch) - collector.natsMetric.Collect(ch) - collector.dcDCNLBMetric.Collect(ch) - collector.dcDCALBMetric.Collect(ch) - collector.dcDCNATMetric.Collect(ch) - collector.dcNLBRulesMetric.Collect(ch) - collector.dcALBRulesMetric.Collect(ch) -} - func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { collector.s3TotalGetRequestSizeMetric.Describe(ch) collector.s3TotalGetResponseSizeMetric.Describe(ch) @@ -243,7 +201,6 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { collector.mutex.RLock() defer collector.mutex.RUnlock() - // fmt.Println("Here are the Metrics inside a s3Collector in Prometheus.go before reset", IonosS3Buckets) metricsMutex.Lock() collector.s3TotalGetRequestSizeMetric.Reset() collector.s3TotalGetResponseSizeMetric.Reset() @@ -257,49 +214,59 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { collector.s3TotalNumberOfPutRequestsMetric.Reset() collector.s3TotalNumberOfPostRequestsMetric.Reset() collector.s3TotalNumberOfHeadRequestsMetric.Reset() + defer metricsMutex.Unlock() for s3Name, s3Resources := range IonosS3Buckets { - // fmt.Println("Collecting metrics for bucket:", s3Name) - // fmt.Printf("Request Sizes: %v, Response Sizes: %v, Methods: %v\n", s3Resources.RequestSizes, s3Resources.ResponseSizes, s3Resources.Methods) + region := s3Resources.Regions owner := s3Resources.Owner + tags, ok := TagsForPrometheus[s3Name] + if !ok { + fmt.Printf("No tags found for bucket %s\n", s3Name) + continue + } + + enviroment := tags["Enviroment"] + namespace := tags["Namespace"] + tenant := tags["Tenant"] + for method, requestSize := range s3Resources.RequestSizes { switch method { case MethodGET: - collector.s3TotalGetRequestSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(requestSize)) + collector.s3TotalGetRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) case MethodPOST: - collector.s3TotalPostRequestSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(requestSize)) + collector.s3TotalPostRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) case MethodHEAD: - collector.s3TotalHeadRequestSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(requestSize)) + collector.s3TotalHeadRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) case MethodPUT: - collector.s3TotalPutRequestSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(requestSize)) + collector.s3TotalPutRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) } } for method, responseSize := range s3Resources.ResponseSizes { switch method { case MethodGET: - collector.s3TotalGetResponseSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) + collector.s3TotalGetResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) case MethodPOST: - collector.s3TotalPostResponseSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) + collector.s3TotalPostResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) case MethodHEAD: - collector.s3TotalHeadResponseSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) + collector.s3TotalHeadResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) case MethodPUT: - collector.s3TotalPutResponseSizeMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) + collector.s3TotalPutResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) } } for method, responseSize := range s3Resources.Methods { switch method { case MethodGET: - collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) + collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) case MethodPOST: - collector.s3TotalNumberOfPostRequestsMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) + collector.s3TotalNumberOfPostRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) case MethodHEAD: - collector.s3TotalNumberOfHeadRequestsMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) + collector.s3TotalNumberOfHeadRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) case MethodPUT: - collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name, method, region, owner).Set(float64(responseSize)) + collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) } } } @@ -316,7 +283,6 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { collector.s3TotalNumberOfPutRequestsMetric.Collect(ch) collector.s3TotalNumberOfPostRequestsMetric.Collect(ch) collector.s3TotalNumberOfHeadRequestsMetric.Collect(ch) - } // Each and every collector must implement the Describe function. @@ -331,6 +297,15 @@ func (collector *ionosCollector) Describe(ch chan<- *prometheus.Desc) { collector.dcRamMetric.Describe(ch) collector.dcServerMetric.Describe(ch) collector.dcDCMetric.Describe(ch) + collector.nlbsMetric.Describe(ch) + collector.albsMetric.Describe(ch) + collector.natsMetric.Describe(ch) + collector.dcDCNLBMetric.Describe(ch) + collector.dcDCALBMetric.Describe(ch) + collector.dcDCNATMetric.Describe(ch) + collector.dcALBRulesMetric.Describe(ch) + collector.dcNLBRulesMetric.Describe(ch) + collector.dcTotalIpsMetric.Describe(ch) } // Collect implements required collect function for all promehteus collectors @@ -346,12 +321,20 @@ func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { collector.coresMetric.Reset() collector.ramMetric.Reset() collector.serverMetric.Reset() + collector.albsMetric.Reset() + collector.natsMetric.Reset() + collector.nlbsMetric.Reset() + collector.dcTotalIpsMetric.Reset() // fmt.Println("Here are the metrics in ionosCollector", IonosDatacenters) for dcName, dcResources := range IonosDatacenters { //Write latest value for each metric in the prometheus metric channel. collector.coresMetric.WithLabelValues(dcName).Set(float64(dcResources.Cores)) collector.ramMetric.WithLabelValues(dcName).Set(float64(dcResources.Ram / 1024)) // MB -> GB collector.serverMetric.WithLabelValues(dcName).Set(float64(dcResources.Servers)) + collector.nlbsMetric.WithLabelValues(dcName, dcResources.NLBName, dcResources.NLBRuleName).Set(float64(dcResources.NLBs)) + collector.albsMetric.WithLabelValues(dcName, dcResources.ALBName, dcResources.ALBRuleName).Set(float64(dcResources.ALBs)) + collector.natsMetric.WithLabelValues(dcName).Set(float64(dcResources.NATs)) + collector.dcTotalIpsMetric.WithLabelValues(dcName).Set(float64(dcResources.TotalIPs)) } @@ -367,6 +350,15 @@ func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { collector.dcRamMetric.Collect(ch) collector.dcServerMetric.Collect(ch) collector.dcDCMetric.Collect(ch) + collector.nlbsMetric.Collect(ch) + collector.albsMetric.Collect(ch) + collector.natsMetric.Collect(ch) + collector.dcDCNLBMetric.Collect(ch) + collector.dcDCALBMetric.Collect(ch) + collector.dcDCNATMetric.Collect(ch) + collector.dcNLBRulesMetric.Collect(ch) + collector.dcALBRulesMetric.Collect(ch) + collector.dcTotalIpsMetric.Collect(ch) } func (collector *ionosCollector) GetMutex() *sync.RWMutex { return collector.mutex @@ -376,17 +368,16 @@ func (collector *s3Collector) GetMutex() *sync.RWMutex { return collector.mutex } -func (collector *lbCollector) GetMutex() *sync.RWMutex { +func (collector *CustomCollector) GetMutex() *sync.RWMutex { return collector.mutex } func StartPrometheus(m *sync.RWMutex) { + ic := newIonosCollector(m) s3c := newS3Collector(m) - lbc := newLBCollector(m) prometheus.MustRegister(ic) prometheus.MustRegister(s3c) - prometheus.MustRegister(lbc) prometheus.MustRegister(httpRequestsTotal) } diff --git a/internal/s3collector.go b/internal/s3collector.go index 052b597..42eea6a 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -26,8 +26,12 @@ type EndpointConfig struct { var ( IonosS3Buckets = make(map[string]Metrics) + //map of maps for bucket tags stores tags for every bucket + //one bucket can have more tags. + TagsForPrometheus = make(map[string]map[string]string) ) +// object for Metrics type Metrics struct { Methods map[string]int32 RequestSizes map[string]int64 @@ -43,8 +47,9 @@ const ( MethodHEAD = "HEAD" ) +// how many objects to scan per page const ( - objectPerPage = 1000 + objectPerPage = 100 maxConcurrent = 10 ) @@ -76,18 +81,18 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { return } endpoints := map[string]EndpointConfig{ - "de": { - Region: "de", - AccessKey: accessKey, - SecretKey: secretKey, - Endpoint: "https://s3-eu-central-1.ionoscloud.com", - }, "eu-central-2": { Region: "eu-central-2", AccessKey: accessKey, SecretKey: secretKey, Endpoint: "https://s3-eu-central-2.ionoscloud.com", }, + "de": { + Region: "de", + AccessKey: accessKey, + SecretKey: secretKey, + Endpoint: "https://s3-eu-central-1.ionoscloud.com", + }, } semaphore := make(chan struct{}, maxConcurrent) @@ -115,26 +120,34 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { for _, bucket := range result.Buckets { bucketName := *bucket.Name - wg.Add(1) - if err := GetHeadBucket(client, bucketName); err != nil { - if reqErr, ok := err.(awserr.RequestFailure); ok && reqErr.StatusCode() == 403 { - wg.Done() - continue + if _, exists := IonosS3Buckets[bucketName]; !exists { + //check if exists if not initialise + metrics := Metrics{ + Methods: make(map[string]int32), + RequestSizes: make(map[string]int64), + ResponseSizes: make(map[string]int64), + Regions: config.Region, } - fmt.Println("Error checking the bucket head:", err) - wg.Done() - continue - } + IonosS3Buckets[bucketName] = metrics - semaphore <- struct{}{} - go func(bucketName string) { + } + wg.Add(1) + go func(client *s3.S3, bucketName string) { + defer wg.Done() + getBucketTags(client, bucketName) + if err := GetHeadBucket(client, bucketName); err != nil { + if reqErr, ok := err.(awserr.RequestFailure); ok && reqErr.StatusCode() == 403 { + return + } + fmt.Println("Error checking the bucket head:", err) + return + } + semaphore <- struct{}{} defer func() { <-semaphore - wg.Done() }() processBucket(client, bucketName) - }(*bucket.Name) - //wg.Wait() //when we want sequential parsing we ca wait here for bucket to finish + }(client, bucketName) } } @@ -144,10 +157,16 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { } +/* +function for processing buckets getting the Traffic of all the operations +and their sizes. +*/ func processBucket(client *s3.S3, bucketName string) { - // var logEntryRegex = regexp.MustCompile(`(?)(GET|PUT|HEAD|POST) .+? (\d+) (\d+)`) - // var logEntryRegex = regexp.MustCompile(`(\w+) \/[^"]*" \d+ \S+ (\d+) - \d+ (\d+)`) + + var wg sync.WaitGroup var logEntryRegex = regexp.MustCompile(`(GET|PUT|HEAD|POST) \/[^"]*" \d+ \S+ (\d+|-) (\d+|-) \d+ (\d+|-)`) + semaphore := make(chan struct{}, maxConcurrent) + continuationToken := "" metrics := Metrics{ Methods: make(map[string]int32), @@ -156,12 +175,9 @@ func processBucket(client *s3.S3, bucketName string) { Regions: "", Owner: "", } - metrics.Regions = *client.Config.Region - semaphore := make(chan struct{}, maxConcurrent) - var wg sync.WaitGroup - continuationToken := "" - //owner + + //getting owner getAclInput := &s3.GetBucketAclInput{ Bucket: aws.String(bucketName), } @@ -170,20 +186,23 @@ func processBucket(client *s3.S3, bucketName string) { log.Printf("Error retrieving ACL for bucket %s: %v\n", bucketName, err) return } - if len(*getAclOutput.Owner.DisplayName) > 0 { metrics.Owner = *getAclOutput.Owner.DisplayName } else { metrics.Owner = "Unknown" } + //main loop for { + + //get all objects in a bucket use max keys defined in global scope and go through + //the pages of a bucket objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucketName), Prefix: aws.String("logs/"), ContinuationToken: aws.String(continuationToken), MaxKeys: aws.Int64(objectPerPage), }) - + //error handling if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { @@ -192,7 +211,7 @@ func processBucket(client *s3.S3, bucketName string) { default: if awserr, ok := err.(awserr.Error); ok { if awserr.Code() == "AccessDenied" { - fmt.Println("ACCESS DENIED") + fmt.Println("Bucket not in current endpoint skipping") } } fmt.Printf("error listing objects in bucket %s: %s\n", bucketName, aerr.Message()) @@ -200,13 +219,15 @@ func processBucket(client *s3.S3, bucketName string) { } return } - + //check if the bucket has any objects in logs folder if len(objectList.Contents) == 0 { fmt.Printf("bucket %s does not contain any objects with the 'logs/' prefix\n", bucketName) return } - + //iterate through those objects and check the input of logs + //here we are using concurrency for _, object := range objectList.Contents { + objectKey := *object.Key wg.Add(1) semaphore <- struct{}{} @@ -215,14 +236,11 @@ func processBucket(client *s3.S3, bucketName string) { <-semaphore wg.Done() }() - downloadInput := &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(objectKey), } - result, err := client.GetObject(downloadInput) - if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "AccessDenied" { @@ -234,18 +252,21 @@ func processBucket(client *s3.S3, bucketName string) { return } defer result.Body.Close() - logContent, err := io.ReadAll(result.Body) - if err != nil { fmt.Println("Problem reading the body", err) } + //check for matches using regex we are checkign for GET, PUT, POST, HEAD + //and their response/request size matches := logEntryRegex.FindAllStringSubmatch(string(logContent), -1) + for _, match := range matches { metricsMutex.Lock() + method := match[1] requestSizeStr := match[3] responseSizeStr := match[2] + if requestSizeStr != "-" { requestSize, err := strconv.ParseInt(requestSizeStr, 10, 64) if err != nil { @@ -266,15 +287,56 @@ func processBucket(client *s3.S3, bucketName string) { } }(bucketName, *object.Key) } - + //if there is no more pages break the loop if !aws.BoolValue(objectList.IsTruncated) { break } - + //go to next page continuationToken = *objectList.NextContinuationToken } wg.Wait() + //make it thread safe with a mutex metricsMutex.Lock() IonosS3Buckets[bucketName] = metrics metricsMutex.Unlock() } + +/* +function for getting bucket Tags, takes two parameters, the service client +and the bucket name, then it checks for tags using the aws sdk GetBucketTagging +no return value it saves everything to map of maps for Tags which is sent +to prometheus +*/ +func getBucketTags(client *s3.S3, bucketName string) { + tagsOutput, err := client.GetBucketTagging(&s3.GetBucketTaggingInput{ + Bucket: aws.String(bucketName), + }) + + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case s3.ErrCodeNoSuchBucket: + fmt.Printf("Bucket %s does not exist\n", bucketName) + return + case "NoSuchTagSet": + fmt.Printf("No tags set for Bucket %s\n", bucketName) + return + default: + fmt.Printf("Error retrieving tags in false endpoint for bucket %s: %s\n", bucketName, aerr.Message()) + return + } + } else { + fmt.Printf("Error retrieving tags for bucket %s: %s\n", bucketName, err.Error()) + return + } + } + tags := make(map[string]string) + for _, tag := range tagsOutput.TagSet { + tags[*tag.Key] = *tag.Value + } + + metricsMutex.Lock() + TagsForPrometheus[bucketName] = tags + metricsMutex.Unlock() + +} diff --git a/main.go b/main.go index 9a35c36..8d8febf 100644 --- a/main.go +++ b/main.go @@ -19,7 +19,7 @@ var ( func main() { exporterPort = internal.GetEnv("IONOS_EXPORTER_APPLICATION_CONTAINER_PORT", "9100") - if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "900"), 10, 32); err != nil { + if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "200"), 10, 32); err != nil { log.Fatal("Cannot convert IONOS_API_CYCLE to int") } else { ionos_api_cycle = int32(cycletime) From eb303803da72bb4fbeaeebc7557c2da81ccd1dde Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 13 Jun 2024 11:53:52 +0200 Subject: [PATCH 19/55] added git.sum file --- go.sum | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/go.sum b/go.sum index 025f26a..277ce17 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,3 @@ -github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= -github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI= github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -20,11 +18,13 @@ github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+ github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= @@ -37,19 +37,17 @@ github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPH github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= @@ -60,3 +58,4 @@ google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cn google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= From c09f3afc97f14225becc91f40c132c642432e136 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 13 Jun 2024 11:57:53 +0200 Subject: [PATCH 20/55] removed customcollector.go --- internal/prometheus.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal/prometheus.go b/internal/prometheus.go index 0f8e451..61b05fd 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -368,10 +368,6 @@ func (collector *s3Collector) GetMutex() *sync.RWMutex { return collector.mutex } -func (collector *CustomCollector) GetMutex() *sync.RWMutex { - return collector.mutex -} - func StartPrometheus(m *sync.RWMutex) { ic := newIonosCollector(m) From 283b5a11e2eadb7db0cea59d2bd11e1ba7e1407c Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 21 Jun 2024 14:39:49 +0200 Subject: [PATCH 21/55] added error handling for number of IPs --- internal/ionos.go | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/internal/ionos.go b/internal/ionos.go index 743a21a..24433f4 100644 --- a/internal/ionos.go +++ b/internal/ionos.go @@ -80,31 +80,38 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `ServersApi.DatacentersServersGet``: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + continue } - albList, _, err := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + albList, resp, err := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() if err != nil { fmt.Printf("Error retrieving ALBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) continue } - nlbList, _, err := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + nlbList, resp, err := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() if err != nil { fmt.Printf("Error retrieving NLBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) continue } - natList, _, _ := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), *datacenter.Id).Depth(3).Execute() + natList, _, err := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), *datacenter.Id).Depth(3).Execute() if err != nil { fmt.Printf("Error retrieving NATs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) continue } - ipBlocks, _, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(3).Execute() - + ipBlocks, resp, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(3).Execute() if err != nil { - fmt.Println("Problem with the API Client") + fmt.Printf("Error retrieving IPs for datacenter %s: %v\n", *datacenter.Properties.Name, err) + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + continue } for _, ips := range *ipBlocks.Items { - totalIPs += *ips.Properties.Size + if ips.Properties != nil && ips.Properties.Size != nil { + totalIPs += *ips.Properties.Size + } } for _, nlbRulesAndLabels := range *nlbList.Items { @@ -174,7 +181,6 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { m.Lock() IonosDatacenters = newIonosDatacenters m.Unlock() - // IPCollectResources(apiClient) CalculateDCTotals(m) time.Sleep(time.Duration(cycletime) * time.Second) } From 8a1f92df920a8a84e2fe125837e19a7b36cc9eb5 Mon Sep 17 00:00:00 2001 From: efidoris Date: Mon, 24 Jun 2024 15:21:46 +0200 Subject: [PATCH 22/55] Added logger instead of print and changed number of ips from GaugeVec to Gauge --- internal/helper.go | 112 ++++++++++++++++++++-------------------- internal/prometheus.go | 11 ++-- internal/s3collector.go | 29 ++++++----- 3 files changed, 76 insertions(+), 76 deletions(-) diff --git a/internal/helper.go b/internal/helper.go index dd2c26b..56464db 100644 --- a/internal/helper.go +++ b/internal/helper.go @@ -57,61 +57,61 @@ func GetHeadBucket(client *s3.S3, bucketName string) error { return nil } -func addTagsToBucket(client *s3.S3, bucketName string) { - tags := []*s3.Tag{} +// func addTagsToBucket(client *s3.S3, bucketName string) { +// tags := []*s3.Tag{} - switch bucketName { - case "nbc-bucket01-logs": - tags = []*s3.Tag{ - { - Key: aws.String("Tenant"), - Value: aws.String("Niedersachsen"), - }, - } - case "dbp-test-bucketlogs": - tags = []*s3.Tag{ - { - Key: aws.String("Tenant"), - Value: aws.String("Brandenburg"), - }, - } - case "dbp-test4logbucket": - tags = []*s3.Tag{ - { - Key: aws.String("Tenant"), - Value: aws.String("Thueringen"), - }, - } - case "dbp-test5-logbucket": - tags = []*s3.Tag{ - { - Key: aws.String("Tenant"), - Value: aws.String("HPIBosscloud"), - }, - } - default: - tags = []*s3.Tag{ +// switch bucketName { +// case "nbc-bucket01-logs": +// tags = []*s3.Tag{ +// { +// Key: aws.String("Tenant"), +// Value: aws.String("Niedersachsen"), +// }, +// } +// case "dbp-test-bucketlogs": +// tags = []*s3.Tag{ +// { +// Key: aws.String("Tenant"), +// Value: aws.String("Brandenburg"), +// }, +// } +// case "dbp-test4logbucket": +// tags = []*s3.Tag{ +// { +// Key: aws.String("Tenant"), +// Value: aws.String("Thueringen"), +// }, +// } +// case "dbp-test5-logbucket": +// tags = []*s3.Tag{ +// { +// Key: aws.String("Tenant"), +// Value: aws.String("HPIBosscloud"), +// }, +// } +// default: +// tags = []*s3.Tag{ - { - Key: aws.String("Enviroment"), - Value: aws.String("Production"), - }, - { - Key: aws.String("Namespace"), - Value: aws.String("Some Namespace"), - }, - } - } - input := &s3.PutBucketTaggingInput{ - Bucket: aws.String(bucketName), - Tagging: &s3.Tagging{ - TagSet: tags, - }, - } - _, err := client.PutBucketTagging(input) - if err != nil { - log.Printf("Error adding tags to bucket %s: %v\n", bucketName, err) - } else { - fmt.Printf("Successfully added tags to bucekt %s\n", bucketName) - } -} +// { +// Key: aws.String("Enviroment"), +// Value: aws.String("Production"), +// }, +// { +// Key: aws.String("Namespace"), +// Value: aws.String("Some Namespace"), +// }, +// } +// } +// input := &s3.PutBucketTaggingInput{ +// Bucket: aws.String(bucketName), +// Tagging: &s3.Tagging{ +// TagSet: tags, +// }, +// } +// _, err := client.PutBucketTagging(input) +// if err != nil { +// log.Printf("Error adding tags to bucket %s: %v\n", bucketName, err) +// } else { +// fmt.Printf("Successfully added tags to bucekt %s\n", bucketName) +// } +// } diff --git a/internal/prometheus.go b/internal/prometheus.go index 61b05fd..e917a17 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -33,7 +33,7 @@ type ionosCollector struct { dcDCNATMetric *prometheus.GaugeVec dcNLBRulesMetric *prometheus.GaugeVec dcALBRulesMetric *prometheus.GaugeVec - dcTotalIpsMetric *prometheus.GaugeVec + dcTotalIpsMetric prometheus.Gauge } type s3Collector struct { @@ -119,10 +119,10 @@ func newIonosCollector(m *sync.RWMutex) *ionosCollector { Name: "ionos_total_nmumber_of_alb_rules", Help: "Shows the total number of ALB Rules in IONOS Account", }, []string{"alb_rules"}), - dcTotalIpsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + dcTotalIpsMetric: prometheus.NewGauge(prometheus.GaugeOpts{ Name: "ionos_total_number_of_ips", Help: "Shows the number of Ips in a IONOS", - }, []string{"ip_amount"}), + }), } } @@ -226,7 +226,7 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { fmt.Printf("No tags found for bucket %s\n", s3Name) continue } - + //tags of buckets change to tags you have defined on s3 buckets enviroment := tags["Enviroment"] namespace := tags["Namespace"] tenant := tags["Tenant"] @@ -324,7 +324,6 @@ func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { collector.albsMetric.Reset() collector.natsMetric.Reset() collector.nlbsMetric.Reset() - collector.dcTotalIpsMetric.Reset() // fmt.Println("Here are the metrics in ionosCollector", IonosDatacenters) for dcName, dcResources := range IonosDatacenters { //Write latest value for each metric in the prometheus metric channel. @@ -334,7 +333,7 @@ func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { collector.nlbsMetric.WithLabelValues(dcName, dcResources.NLBName, dcResources.NLBRuleName).Set(float64(dcResources.NLBs)) collector.albsMetric.WithLabelValues(dcName, dcResources.ALBName, dcResources.ALBRuleName).Set(float64(dcResources.ALBs)) collector.natsMetric.WithLabelValues(dcName).Set(float64(dcResources.NATs)) - collector.dcTotalIpsMetric.WithLabelValues(dcName).Set(float64(dcResources.TotalIPs)) + collector.dcTotalIpsMetric.Set(float64(dcResources.TotalIPs)) } diff --git a/internal/s3collector.go b/internal/s3collector.go index 42eea6a..43791c7 100644 --- a/internal/s3collector.go +++ b/internal/s3collector.go @@ -62,6 +62,7 @@ func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S Endpoint: aws.String(endpoint), }) if err != nil { + log.Printf("Error establishing session with AWS S3 Endpoint: %v", err) return nil, fmt.Errorf("error establishing session with AWS S3 Endpoint: %s", err) } return s3.New(sess), nil @@ -77,7 +78,7 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { // defer func() { os.Stdout = oldStdout }() // os.Stdout = file if accessKey == "" || secretKey == "" { - fmt.Println("AWS credentials are not set in the environment variables.") + log.Println("AWS credentials are nto set in the enviroment variables.") return } endpoints := map[string]EndpointConfig{ @@ -139,7 +140,7 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { if reqErr, ok := err.(awserr.RequestFailure); ok && reqErr.StatusCode() == 403 { return } - fmt.Println("Error checking the bucket head:", err) + log.Println("Error checking the bucket head:", err) return } semaphore <- struct{}{} @@ -207,11 +208,11 @@ func processBucket(client *s3.S3, bucketName string) { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case "NoSuchBucket": - fmt.Printf("bucket %s does not exist\n", bucketName) + log.Printf("bucket %s does not exist\n", bucketName) default: if awserr, ok := err.(awserr.Error); ok { if awserr.Code() == "AccessDenied" { - fmt.Println("Bucket not in current endpoint skipping") + log.Println("Bucket not in current endpoint skipping") } } fmt.Printf("error listing objects in bucket %s: %s\n", bucketName, aerr.Message()) @@ -221,7 +222,7 @@ func processBucket(client *s3.S3, bucketName string) { } //check if the bucket has any objects in logs folder if len(objectList.Contents) == 0 { - fmt.Printf("bucket %s does not contain any objects with the 'logs/' prefix\n", bucketName) + log.Printf("bucket %s does not contain any objects with the 'logs/' prefix\n", bucketName) return } //iterate through those objects and check the input of logs @@ -244,17 +245,17 @@ func processBucket(client *s3.S3, bucketName string) { if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "AccessDenied" { - fmt.Printf("Access Denied error for object %s in bucket %s\n", objectKey, bucketName) + log.Printf("Access Denied error for object %s in bucket %s\n", objectKey, bucketName) return } } - fmt.Println("Error downloading object", err) + log.Println("Error downloading object", err) return } defer result.Body.Close() logContent, err := io.ReadAll(result.Body) if err != nil { - fmt.Println("Problem reading the body", err) + log.Println("Problem reading the body", err) } //check for matches using regex we are checkign for GET, PUT, POST, HEAD //and their response/request size @@ -270,14 +271,14 @@ func processBucket(client *s3.S3, bucketName string) { if requestSizeStr != "-" { requestSize, err := strconv.ParseInt(requestSizeStr, 10, 64) if err != nil { - fmt.Printf("Error parsing size: %v", err) + log.Printf("Error parsing size: %v", err) } metrics.RequestSizes[method] += requestSize } if responseSizeStr != "-" { responseSize, err := strconv.ParseInt(responseSizeStr, 10, 64) if err != nil { - fmt.Printf("Error parsing size: %v", err) + log.Printf("Error parsing size: %v", err) } metrics.ResponseSizes[method] += responseSize } @@ -316,17 +317,17 @@ func getBucketTags(client *s3.S3, bucketName string) { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeNoSuchBucket: - fmt.Printf("Bucket %s does not exist\n", bucketName) + log.Printf("Bucket %s does not exist\n", bucketName) return case "NoSuchTagSet": - fmt.Printf("No tags set for Bucket %s\n", bucketName) + log.Printf("No tags set for Bucket %s\n", bucketName) return default: - fmt.Printf("Error retrieving tags in false endpoint for bucket %s: %s\n", bucketName, aerr.Message()) + log.Printf("Error retrieving tags in false endpoint for bucket %s: %s\n", bucketName, aerr.Message()) return } } else { - fmt.Printf("Error retrieving tags for bucket %s: %s\n", bucketName, err.Error()) + log.Printf("Error retrieving tags for bucket %s: %s\n", bucketName, err.Error()) return } } From 007e640c2fdd901f8469de824f6ef87fea2feadd Mon Sep 17 00:00:00 2001 From: efidoris Date: Tue, 2 Jul 2024 16:21:02 +0200 Subject: [PATCH 23/55] Added postgress cluster metrics and telemetry metrics --- config.yaml | 50 ++++++++ go.mod | 6 + go.sum | 11 ++ internal/postgres-test.go | 258 ++++++++++++++++++++++++++++++++++++++ internal/prometheus.go | 155 +++++++++++++++++++++++ main.go | 3 + 6 files changed, 483 insertions(+) create mode 100644 config.yaml create mode 100644 internal/postgres-test.go diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..dafe662 --- /dev/null +++ b/config.yaml @@ -0,0 +1,50 @@ +tenants: + - name: spsh + operations: + - clusters + - databases + - name: dbildungs + operations: + - clusters + + +metrics: + - name: ionos_dbaas_postgres_transactions:rate2m + description: Per-second average rate of SQL transactions (that have been committed), calculated over the last 2 minutes. + type: gauge + - name: ionos_dbaas_postgres_connections_count + description: Number of connections per instance and state. active, disabled, fastpath function call, idle, idle in transaction, idle in transaction (aborted). + type: gauge + - name: ionos_dbaas_postgres_cpu_rate5m + description: The average CPU utilization over the past 5 minutes. + type: gauge + - name: ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m + description: The rate of disk I/O time, in seconds, over a five-minute period. Provides insight into performance of a disk, as high values may indicate that the disk is being overused or is experiencing performance issues. + type: gauge + - name: ionos_dbaas_postgres_instance_count + description: Desired number of instances. The number of currently ready and running instances may be different. ionos_dbaas_postgres_role provides information about running instances split by role. + type: gauge + - name: ionos_dbaas_postgres_load5 + description: Linux load average for the last 5 minutes. This metric is represented as a number between 0 and 1 (can be greater than 1 on multicore machines), where 0 indicates that the CPU core is idle and 1 indicates that the CPU core is fully utilized. Higher values may indicate that the system is experiencing performance issues or is approaching capacity. + type: gauge + - name: ionos_dbaas_postgres_memory_available_bytes + description: Available memory in bytes. + type: gauge + - name: ionos_dbaas_postgres_memory_total_bytes + description: Total memory of the underlying machine in bytes. Some of it is used for our management and monitoring tools and not available to PostgreSQL. During horizontal scaling you might see different values for each instance. + type: gauge + - name: ionos_dbaas_postgres_role + description: Current role of the instance. Provides whether an instance is currently "master" or "replica". + type: gauge + - name: ionos_dbaas_postgres_storage_available_bytes + description: Free available disk space per instance in bytes. + type: gauge + - name: ionos_dbaas_postgres_storage_total_bytes + description: Total disk space per instance in bytes. During horizontal scaling you might see different values for each instance. + type: gauge + - name: ionos_dbaas_postgres_user_tables_idx_scan + description: Number of index scans per table/schema. + type: gauge + - name: ionos_dbaas_postgres_user_tables_seq_scan + description: Number of sequential scans per table/schema. A high number of sequential scans may indicate that an index should be added to improve performance. + type: gauge \ No newline at end of file diff --git a/go.mod b/go.mod index 93cff85..9e25c9f 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,9 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/ionos-cloud/sdk-go-bundle/products/dbaas/psql v0.1.0 // indirect + github.com/ionos-cloud/sdk-go-bundle/shared v0.1.0 // indirect + github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/prometheus/client_model v0.3.0 // indirect @@ -23,4 +26,7 @@ require ( golang.org/x/sys v0.21.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect + gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 277ce17..f4a3b3b 100644 --- a/go.sum +++ b/go.sum @@ -14,6 +14,12 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/ionos-cloud/sdk-go-bundle/products/dbaas/psql v0.1.0 h1:s/FZdHaews1vIRJYbJUvaO+Y9yYIiJ1z+s1HLMcCii0= +github.com/ionos-cloud/sdk-go-bundle/products/dbaas/psql v0.1.0/go.mod h1:8qebNZf4GeZm64BDUqeunDDw/jIQqhry6RyIhEqIHJE= +github.com/ionos-cloud/sdk-go-bundle/shared v0.1.0 h1:1psLbSn+i/wK0Samy8XqLxsqEnhTshz3Py1lbp5F/ao= +github.com/ionos-cloud/sdk-go-bundle/shared v0.1.0/go.mod h1:hDQ5fhujYFaYr1fD8bmmf4rlCKAsqKwczuBS0Z5PNBQ= +github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2 h1:AaKbci+kVS6/k43VwJwmXxCJ7pzj9jwuOPqO8Wd5560= +github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2/go.mod h1:nmJEwuRX65A5/PxwvdFW0XrV+N6WFYnMV1TiIafAwz4= github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -56,6 +62,11 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/internal/postgres-test.go b/internal/postgres-test.go new file mode 100644 index 0000000..4d11cae --- /dev/null +++ b/internal/postgres-test.go @@ -0,0 +1,258 @@ +package internal + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "sync" + "time" + + psql "github.com/ionos-cloud/sdk-go-dbaas-postgres" + "github.com/joho/godotenv" + "gopkg.in/yaml.v2" +) + +type Tenant struct { + Name string `yaml:"name"` + Operations []string `yaml:"operations"` +} + +type IonosPostgresResources struct { + ClusterName string + CPU int32 + RAM int32 + Storage int32 + Owner string + DatabaseNames []string + Telemetry []TelemetryMetric +} + +type TelemetryMetric struct { + Metric map[string]string `json:"metric"` + Values [][]interface{} `json:"values"` +} + +type TelemetryResponse struct { + Status string `json:status` + Data struct { + ResultType string `json:"resultType` + Result []TelemetryMetric `json:"result"` + } `json:"data"` +} + +var ( + ClusterCoresTotal int32 = 0 + ClusterRamTotal int32 = 0 + ClusterTotal int32 = 0 + IonosPostgresClusters = make(map[string]IonosPostgresResources) +) + +type Config struct { + Tenants []Tenant `yaml:"tenants"` + Metrics []Metric `yaml:"metrics"` +} + +type Metric struct { + Name string `yaml:"name"` + Description string `yaml:"description"` + Type string `yaml:"type"` +} + +func loadConfig(filename string) (*Config, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + var config Config + err = yaml.Unmarshal(data, &config) + if err != nil { + return nil, err + } + + return &config, nil +} + +func Testpsql(m *sync.RWMutex, cycletime int32) { + err := godotenv.Load(".env") + if err != nil { + fmt.Println("Error loading .env file") + } + cfgENV := psql.NewConfigurationFromEnv() + + apiClient := psql.NewAPIClient(cfgENV) + + config, err := loadConfig("config.yaml") + if err != nil { + log.Fatalf("Failed to load config: %v", err) + } + + for { + var wg sync.WaitGroup + for _, tenant := range config.Tenants { + wg.Add(1) + go func(tenant Tenant) { + defer wg.Done() + processCluster(apiClient, m, config.Metrics) + }(tenant) + } + wg.Wait() + time.Sleep(time.Duration(cycletime) * time.Second) + } +} + +func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric) { + datacenters, err := fetchClusters(apiClient) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch clusters: %v\n", err) + } + newIonosPostgresResources := make(map[string]IonosPostgresResources) + + for _, clusters := range *datacenters.Items { + databaseNames, err := fetchDatabases(apiClient, *clusters.Id) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch databases for cluster %s: %v\n", *clusters.Properties.DisplayName, err) + continue + } + databaseOwner, err := fetchOwner(apiClient, *clusters.Id) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch owner for database %s: %v\n", *clusters.Properties.DisplayName, err) + continue + } + + telemetryData := make([]TelemetryMetric, 0) + + for _, metricConfig := range metrics { + telemetryResp, err := fetchTelemetryMetrics(os.Getenv("IONOS_TOKEN"), fmt.Sprintf("%s{postgres_cluster=\"%s\"}", metricConfig.Name, *clusters.Id), *clusters.Id) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch telemetry metrics for cluster %s: %v\n", *clusters.Id, err) + continue + } + telemetryData = append(telemetryData, telemetryResp.Data.Result...) + } + + // fmt.Printf("Here are the database names %v", databaseNames) + newIonosPostgresResources[*clusters.Properties.DisplayName] = IonosPostgresResources{ + ClusterName: *clusters.Properties.DisplayName, + CPU: *clusters.Properties.Cores, + RAM: *clusters.Properties.Ram, + Storage: *clusters.Properties.StorageSize, + DatabaseNames: databaseNames, + Owner: databaseOwner, + Telemetry: telemetryData, + } + } + // clusterName := "spsh-dev-schulportal" + m.Lock() + // fmt.Printf("Here is the telemetryData for cluster '%s': %v\n", clusterName, newIonosPostgresResources[clusterName].Telemetry) + // fmt.Printf("Here is the map %v", newIonosPostgresResources["telemetryData"]) + IonosPostgresClusters = newIonosPostgresResources + m.Unlock() + +} + +func fetchClusters(apiClient *psql.APIClient) (*psql.ClusterList, error) { + datacenters, resp, err := apiClient.ClustersApi.ClustersGet(context.Background()).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling ClustersApi: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if datacenters.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + + return &datacenters, nil +} + +func fetchDatabases(apiClient *psql.APIClient, clusterID string) ([]string, error) { + databases, resp, err := apiClient.DatabasesApi.DatabasesList(context.Background(), clusterID).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling DatabasesApi: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if databases.Items == nil { + return nil, fmt.Errorf("no databases found for cluster %s", clusterID) + } + + var databaseNames []string + + for _, db := range *databases.Items { + if db.Properties != nil && db.Properties.Name != nil { + databaseNames = append(databaseNames, *db.Properties.Name) + } + } + return databaseNames, nil +} + +func fetchOwner(apiClient *psql.APIClient, clusterID string) (string, error) { + databases, resp, err := apiClient.DatabasesApi.DatabasesList(context.Background(), clusterID).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling DatabasesApi: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return "", err + } + + if databases.Items == nil { + return "", fmt.Errorf("no databases found for cluster %s", clusterID) + } + var owner = "" + for _, db := range *databases.Items { + if db.Properties != nil && db.Properties.Name != nil { + owner = *db.Properties.Owner + } + } + return owner, nil +} + +func fetchTelemetryMetrics(apiToken, query, clusterID string) (*TelemetryResponse, error) { + req, err := http.NewRequest("GET", "https://dcd.ionos.com/telemetry/api/v1/query_range", nil) + if err != nil { + return nil, err + } + + q := req.URL.Query() + q.Add("query", query) + q.Add("start", time.Now().Add(-time.Hour).Format(time.RFC3339)) + q.Add("end", time.Now().Format(time.RFC3339)) + q.Add("step", "60") + req.URL.RawQuery = q.Encode() + + req.Header.Set("Authorization", "Bearer "+apiToken) + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var telemetryResp TelemetryResponse + if err := json.NewDecoder(resp.Body).Decode(&telemetryResp); err != nil { + fmt.Printf("Fialed to decode json response: %v\n", err) + return nil, err + } + + // fmt.Printf("Telemetry Response: %+v\n", telemetryResp) + + return &telemetryResp, nil +} diff --git a/internal/prometheus.go b/internal/prometheus.go index e917a17..c3009c2 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -5,6 +5,7 @@ import ( "io" "net/http" "os" + "strconv" "sync" //"time" @@ -36,6 +37,20 @@ type ionosCollector struct { dcTotalIpsMetric prometheus.Gauge } +type postgresCollector struct { + mutex *sync.RWMutex + postgresTotalRamMetric *prometheus.GaugeVec + postgresTotalCPUMetric *prometheus.GaugeVec + postgresTotalStorageMetric *prometheus.GaugeVec + postgresTransactionRateMetric *prometheus.GaugeVec + postgresTotalStorageBytesMetric *prometheus.GaugeVec + postgresAvailableStorageBytesMetric *prometheus.GaugeVec + postgresDiskIOMetric *prometheus.GaugeVec + postgresCpuRateMetric *prometheus.GaugeVec + postgresLoadMetric *prometheus.GaugeVec + postgresTotalMemoryAvailableBytes *prometheus.GaugeVec +} + type s3Collector struct { mutex *sync.RWMutex s3TotalGetRequestSizeMetric *prometheus.GaugeVec @@ -181,6 +196,140 @@ func newS3Collector(m *sync.RWMutex) *s3Collector { } } +func newPostgresCollector(m *sync.RWMutex) *postgresCollector { + return &postgresCollector{ + mutex: m, + postgresTotalRamMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_ram_in_cluster", + Help: "Gives the total ammount of allocated RAM in cluster", + }, []string{"clusterName", "owner", "db"}), + postgresTotalCPUMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_cpu_in_cluster", + Help: "Gives a total amount of CPU Cores in Cluster", + }, []string{"clusterName", "owner", "db"}), + postgresTotalStorageMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_storage_in_cluster", + Help: "Gives a total amount of Storage in Cluster", + }, []string{"clusterName", "owner", "db"}), + postgresTransactionRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_transactions:rate2m", + Help: "Gives a Transaction Rate in postgres cluster in 2m", + }, []string{"clusterName"}), + postgresTotalStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_storage_metric", + Help: "Gives a Total Storage Metric in Bytes", + }, []string{"clusterName"}), + postgresAvailableStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_available_storage_metric", + Help: "Gives a Available Storage Metric in Bytes", + }, []string{"clusterName"}), + postgresCpuRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgress_cpu_rate5m", + Help: "Gives a CPU Rate (Average Utilization) over the past 5 Minutes", + }, []string{"clusterName"}), + postgresDiskIOMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m", + Help: "The rate of disk I/O time, in seconds, over a five-minute period.", + }, []string{"clusterName"}), + postgresLoadMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_load5", + Help: "Linux load average for the last 5 minutes.", + }, []string{"clusterName"}), + postgresTotalMemoryAvailableBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_memory_available_bytes", + Help: "Available memory in bytes", + }, []string{"clusterName"}), + } +} + +func (collector *postgresCollector) Describe(ch chan<- *prometheus.Desc) { + collector.postgresTotalCPUMetric.Describe(ch) + collector.postgresTotalRamMetric.Describe(ch) + collector.postgresTotalStorageMetric.Describe(ch) + collector.postgresTransactionRateMetric.Describe(ch) + collector.postgresTotalStorageBytesMetric.Describe(ch) + collector.postgresAvailableStorageBytesMetric.Describe(ch) + collector.postgresCpuRateMetric.Describe(ch) + collector.postgresDiskIOMetric.Describe(ch) + collector.postgresLoadMetric.Describe(ch) + collector.postgresTotalMemoryAvailableBytes.Describe(ch) +} + +func (collector *postgresCollector) Collect(ch chan<- prometheus.Metric) { + collector.mutex.RLock() + defer collector.mutex.RUnlock() + + metricsMutex.Lock() + collector.postgresTotalCPUMetric.Reset() + collector.postgresTotalRamMetric.Reset() + collector.postgresTotalStorageMetric.Reset() + metricsMutex.Unlock() + + for postgresName, postgresResources := range IonosPostgresClusters { + + for _, telemetry := range postgresResources.Telemetry { + for _, value := range telemetry.Values { + if len(value) != 2 { + fmt.Printf("Unexpected value length: %v\n", value) + continue + } + metricValue, ok := value[1].(float64) + if !ok { + strValue, ok := value[1].(string) + if !ok { + fmt.Printf("Unexpected type for metric %s value: %v\n", telemetry.Values, value[1]) + continue + } + + var err error + metricValue, err = strconv.ParseFloat(strValue, 64) + if err != nil { + fmt.Printf("Failed to parse metric value: %v\n", err) + continue + } + } + // fmt.Println("Telemetry Metric", telemetry.Metric) + switch telemetry.Metric["__name__"] { + case "ionos_dbaas_postgres_transactions:rate2m": + collector.postgresTransactionRateMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_storage_total_bytes": + collector.postgresTotalStorageBytesMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_storage_available_bytes": + collector.postgresAvailableStorageBytesMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_cpu_rate5m": + collector.postgresCpuRateMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m": + collector.postgresDiskIOMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_load5": + collector.postgresLoadMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_memory_available_bytes": + collector.postgresTotalMemoryAvailableBytes.WithLabelValues(postgresName).Set(float64(metricValue)) + default: + // fmt.Printf("Unrecognised metric: %s\n", telemetry.Metric["__name__"]) + continue + } + } + } + + for _, dbName := range postgresResources.DatabaseNames { + + collector.postgresTotalCPUMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.CPU)) + collector.postgresTotalRamMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.RAM)) + collector.postgresTotalStorageMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.Storage)) + } + + } + collector.postgresTotalCPUMetric.Collect(ch) + collector.postgresTotalRamMetric.Collect(ch) + collector.postgresTotalStorageMetric.Collect(ch) + collector.postgresTransactionRateMetric.Collect(ch) + collector.postgresTotalStorageBytesMetric.Collect(ch) + collector.postgresAvailableStorageBytesMetric.Collect(ch) + collector.postgresCpuRateMetric.Collect(ch) + collector.postgresDiskIOMetric.Collect(ch) + collector.postgresLoadMetric.Collect(ch) + collector.postgresTotalMemoryAvailableBytes.Collect(ch) +} func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { collector.s3TotalGetRequestSizeMetric.Describe(ch) collector.s3TotalGetResponseSizeMetric.Describe(ch) @@ -367,12 +516,18 @@ func (collector *s3Collector) GetMutex() *sync.RWMutex { return collector.mutex } +func (collector *postgresCollector) GetMutex() *sync.RWMutex { + return collector.mutex +} + func StartPrometheus(m *sync.RWMutex) { ic := newIonosCollector(m) s3c := newS3Collector(m) + pc := newPostgresCollector(m) prometheus.MustRegister(ic) prometheus.MustRegister(s3c) + prometheus.MustRegister(pc) prometheus.MustRegister(httpRequestsTotal) } diff --git a/main.go b/main.go index 8d8febf..4753f8c 100644 --- a/main.go +++ b/main.go @@ -13,6 +13,7 @@ import ( var ( dcMutex = &sync.RWMutex{} // Mutex to sync access to the Datacenter map s3Mutex = &sync.RWMutex{} + pgMutex = &sync.RWMutex{} exporterPort string // Port to be used for exposing the metrics ionos_api_cycle int32 // Cycle time in seconds to query the IONOS API for changes, not th ePrometheus scraping intervall ) @@ -26,6 +27,8 @@ func main() { } go internal.CollectResources(dcMutex, ionos_api_cycle) go internal.S3CollectResources(s3Mutex, ionos_api_cycle) + // internal.PgGet() + go internal.Testpsql(pgMutex, ionos_api_cycle) //internal.PrintDCResources(mutex) internal.StartPrometheus(dcMutex) From 298524d3d27ac9888a5e17479ecfa8ab332668c5 Mon Sep 17 00:00:00 2001 From: efidoris Date: Wed, 3 Jul 2024 16:51:20 +0200 Subject: [PATCH 24/55] refactoring prometheus.go@ --- internal/helper.go | 59 -- internal/ionos_collector.go | 182 ++++++ internal/{ionos.go => ionos_scraper.go} | 193 +++++-- internal/postgres_collector.go | 160 ++++++ .../{postgres-test.go => postgres_scraper.go} | 16 +- internal/prometheus.go | 522 +----------------- internal/s3_collector.go | 184 ++++++ internal/{s3collector.go => s3_scraper.go} | 3 +- main.go | 20 +- 9 files changed, 711 insertions(+), 628 deletions(-) create mode 100644 internal/ionos_collector.go rename internal/{ionos.go => ionos_scraper.go} (52%) create mode 100644 internal/postgres_collector.go rename internal/{postgres-test.go => postgres_scraper.go} (95%) create mode 100644 internal/s3_collector.go rename internal/{s3collector.go => s3_scraper.go} (99%) diff --git a/internal/helper.go b/internal/helper.go index 56464db..73ff613 100644 --- a/internal/helper.go +++ b/internal/helper.go @@ -56,62 +56,3 @@ func GetHeadBucket(client *s3.S3, bucketName string) error { log.Printf("Bucket %s exists and is accessible\n", bucketName) return nil } - -// func addTagsToBucket(client *s3.S3, bucketName string) { -// tags := []*s3.Tag{} - -// switch bucketName { -// case "nbc-bucket01-logs": -// tags = []*s3.Tag{ -// { -// Key: aws.String("Tenant"), -// Value: aws.String("Niedersachsen"), -// }, -// } -// case "dbp-test-bucketlogs": -// tags = []*s3.Tag{ -// { -// Key: aws.String("Tenant"), -// Value: aws.String("Brandenburg"), -// }, -// } -// case "dbp-test4logbucket": -// tags = []*s3.Tag{ -// { -// Key: aws.String("Tenant"), -// Value: aws.String("Thueringen"), -// }, -// } -// case "dbp-test5-logbucket": -// tags = []*s3.Tag{ -// { -// Key: aws.String("Tenant"), -// Value: aws.String("HPIBosscloud"), -// }, -// } -// default: -// tags = []*s3.Tag{ - -// { -// Key: aws.String("Enviroment"), -// Value: aws.String("Production"), -// }, -// { -// Key: aws.String("Namespace"), -// Value: aws.String("Some Namespace"), -// }, -// } -// } -// input := &s3.PutBucketTaggingInput{ -// Bucket: aws.String(bucketName), -// Tagging: &s3.Tagging{ -// TagSet: tags, -// }, -// } -// _, err := client.PutBucketTagging(input) -// if err != nil { -// log.Printf("Error adding tags to bucket %s: %v\n", bucketName, err) -// } else { -// fmt.Printf("Successfully added tags to bucekt %s\n", bucketName) -// } -// } diff --git a/internal/ionos_collector.go b/internal/ionos_collector.go new file mode 100644 index 0000000..4407fcf --- /dev/null +++ b/internal/ionos_collector.go @@ -0,0 +1,182 @@ +package internal + +import ( + "os" + "sync" + + //"time" + + "github.com/prometheus/client_golang/prometheus" +) + +// Define a struct for you collector that contains pointers +// to prometheus descriptors for each metric you wish to expose. +// Note you can also include fields of other types if they provide utility +// but we just won't be exposing them as metrics. +type ionosCollector struct { + mutex *sync.RWMutex + coresMetric *prometheus.GaugeVec + ramMetric *prometheus.GaugeVec + serverMetric *prometheus.GaugeVec + dcCoresMetric *prometheus.GaugeVec + dcRamMetric *prometheus.GaugeVec + dcServerMetric *prometheus.GaugeVec + dcDCMetric *prometheus.GaugeVec + nlbsMetric *prometheus.GaugeVec + albsMetric *prometheus.GaugeVec + natsMetric *prometheus.GaugeVec + dcDCNLBMetric *prometheus.GaugeVec + dcDCALBMetric *prometheus.GaugeVec + dcDCNATMetric *prometheus.GaugeVec + dcNLBRulesMetric *prometheus.GaugeVec + dcALBRulesMetric *prometheus.GaugeVec + dcTotalIpsMetric prometheus.Gauge +} + +// You must create a constructor for you collector that +// initializes every descriptor and returns a pointer to the collector +func NewIonosCollector(m *sync.RWMutex) *ionosCollector { + return &ionosCollector{ + mutex: m, + coresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dc_cores_amount", + Help: "Shows the number of currently active cores in an IONOS datacenter", + }, []string{"datacenter"}), + ramMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dc_ram_gb", + Help: "Shows the number of currently active RAM in an IONOS datacenter", + }, []string{"datacenter"}), + serverMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dc_server_amount", + Help: "Shows the number of currently active servers in an IONOS datacenter", + }, []string{"datacenter"}), + dcCoresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_cores_amount", + Help: "Shows the number of currently active cores of an IONOS account", + }, []string{"account"}), + dcRamMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_ram_gb", + Help: "Shows the number of currently active RAM of an IONOS account", + }, []string{"account"}), + dcServerMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_server_amount", + Help: "Shows the number of currently active servers of an IONOS account", + }, []string{"account"}), + dcDCMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_datacenter_amount", + Help: "Shows the number of datacenters of an IONOS account", + }, []string{"account"}), + nlbsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_networkloadbalancer_amount", + Help: "Shows the number of active Network Loadbalancers in an IONOS datacenter", + }, []string{"datacenter", "nlb_name", "nlb_rules_name"}), + albsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_applicationloadbalancer_amount", + Help: "Shows the number of active Application Loadbalancers in an IONOS datacenter", + }, []string{"datacenter", "alb_name", "alb_rules_name"}), + natsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_nat_gateways_amount", + Help: "Shows the number of NAT Gateways in an IONOS datacenter", + }, []string{"datacenter"}), + dcDCNLBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_networkloadbalancer_amount", + Help: "Shows the total number of Network Loadbalancers in IONOS Account", + }, []string{"account"}), + dcDCALBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_applicationbalancer_amount", + Help: "Shows the total number of Application Loadbalancers in IONOS Account", + }, []string{"account"}), + dcDCNATMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_nat_gateways_amount", + Help: "Shows the total number of NAT Gateways in IONOS Account", + }, []string{"account"}), + dcNLBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_number_of_nlb_rules", + Help: "Shows the total number of NLB Rules in IONOS Account", + }, []string{"nlb_rules"}), + dcALBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_total_nmumber_of_alb_rules", + Help: "Shows the total number of ALB Rules in IONOS Account", + }, []string{"alb_rules"}), + dcTotalIpsMetric: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ionos_total_number_of_ips", + Help: "Shows the number of Ips in a IONOS", + }), + } +} + +// Each and every collector must implement the Describe function. +// It essentially writes all descriptors to the prometheus desc channel. +// func (collector *ionosCollector) Describe(ch chan<- *prometheus.Desc) { +func (collector *ionosCollector) Describe(ch chan<- *prometheus.Desc) { + + //Update this section with the each metric you create for a given collector + collector.coresMetric.Describe(ch) + collector.ramMetric.Describe(ch) + collector.serverMetric.Describe(ch) + collector.dcCoresMetric.Describe(ch) + collector.dcRamMetric.Describe(ch) + collector.dcServerMetric.Describe(ch) + collector.dcDCMetric.Describe(ch) + collector.nlbsMetric.Describe(ch) + collector.albsMetric.Describe(ch) + collector.natsMetric.Describe(ch) + collector.dcDCNLBMetric.Describe(ch) + collector.dcDCALBMetric.Describe(ch) + collector.dcDCNATMetric.Describe(ch) + collector.dcALBRulesMetric.Describe(ch) + collector.dcNLBRulesMetric.Describe(ch) + collector.dcTotalIpsMetric.Describe(ch) +} + +// Collect implements required collect function for all promehteus collectors +func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { + + //Implement logic here to determine proper metric value to return to prometheus + //for each descriptor or call other functions that do so. + account := os.Getenv("IONOS_ACCOUNT") + collector.mutex.RLock() + defer collector.mutex.RUnlock() + + // Reset metrics in case a datacenter was removed + collector.coresMetric.Reset() + collector.ramMetric.Reset() + collector.serverMetric.Reset() + collector.albsMetric.Reset() + collector.natsMetric.Reset() + collector.nlbsMetric.Reset() + // fmt.Println("Here are the metrics in ionosCollector", IonosDatacenters) + for dcName, dcResources := range IonosDatacenters { + //Write latest value for each metric in the prometheus metric channel. + collector.coresMetric.WithLabelValues(dcName).Set(float64(dcResources.Cores)) + collector.ramMetric.WithLabelValues(dcName).Set(float64(dcResources.Ram / 1024)) // MB -> GB + collector.serverMetric.WithLabelValues(dcName).Set(float64(dcResources.Servers)) + collector.nlbsMetric.WithLabelValues(dcName, dcResources.NLBName, dcResources.NLBRuleName).Set(float64(dcResources.NLBs)) + collector.albsMetric.WithLabelValues(dcName, dcResources.ALBName, dcResources.ALBRuleName).Set(float64(dcResources.ALBs)) + collector.natsMetric.WithLabelValues(dcName).Set(float64(dcResources.NATs)) + collector.dcTotalIpsMetric.Set(float64(dcResources.TotalIPs)) + + } + + collector.dcCoresMetric.WithLabelValues(account).Set(float64(CoresTotal)) + collector.dcRamMetric.WithLabelValues(account).Set(float64(RamTotal / 1024)) // MB -> GB + collector.dcServerMetric.WithLabelValues(account).Set(float64(ServerTotal)) + collector.dcDCMetric.WithLabelValues(account).Set(float64(DataCenters)) + + collector.coresMetric.Collect(ch) + collector.ramMetric.Collect(ch) + collector.serverMetric.Collect(ch) + collector.dcCoresMetric.Collect(ch) + collector.dcRamMetric.Collect(ch) + collector.dcServerMetric.Collect(ch) + collector.dcDCMetric.Collect(ch) + collector.nlbsMetric.Collect(ch) + collector.albsMetric.Collect(ch) + collector.natsMetric.Collect(ch) + collector.dcDCNLBMetric.Collect(ch) + collector.dcDCALBMetric.Collect(ch) + collector.dcDCNATMetric.Collect(ch) + collector.dcNLBRulesMetric.Collect(ch) + collector.dcALBRulesMetric.Collect(ch) + collector.dcTotalIpsMetric.Collect(ch) +} diff --git a/internal/ionos.go b/internal/ionos_scraper.go similarity index 52% rename from internal/ionos.go rename to internal/ionos_scraper.go index 24433f4..33cf101 100644 --- a/internal/ionos.go +++ b/internal/ionos_scraper.go @@ -82,73 +82,32 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) continue } - albList, resp, err := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + + albList, err := fetchApplicationLoadbalancers(apiClient, &datacenter) if err != nil { fmt.Printf("Error retrieving ALBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) - fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) continue } - nlbList, resp, err := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), *datacenter.Id).Depth(3).Execute() + nlbList, err := fetchNetworkLoadBalancers(apiClient, &datacenter) if err != nil { fmt.Printf("Error retrieving NLBs for datacenter %s: %v\n", *datacenter.Properties.Name, err) - fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) continue } - natList, _, err := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), *datacenter.Id).Depth(3).Execute() + natList, err := fetchNATGateways(apiClient, &datacenter) if err != nil { fmt.Printf("Error retrieving NATs for datacenter %s: %v\n", *datacenter.Properties.Name, err) - fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) continue } - - ipBlocks, resp, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(3).Execute() + ipBlocks, err := fetchIPBlocks(apiClient) if err != nil { fmt.Printf("Error retrieving IPs for datacenter %s: %v\n", *datacenter.Properties.Name, err) - fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) continue } - for _, ips := range *ipBlocks.Items { - if ips.Properties != nil && ips.Properties.Size != nil { - totalIPs += *ips.Properties.Size - } - } - - for _, nlbRulesAndLabels := range *nlbList.Items { - if nlbRulesAndLabels.Properties != nil && nlbRulesAndLabels.Properties.Name != nil { - nlbNames = *nlbRulesAndLabels.Properties.Name - } + totalIPs = processIPBlocks(ipBlocks) + nlbNames, nlbTotalRulesDC = processNetworkLoadBalancers(nlbList) + albNames, albTotalRulesDC = processApplicationLoadBalancers(albList) - nlbForwardingRules := nlbRulesAndLabels.Entities.Forwardingrules - if nlbForwardingRules != nil && nlbForwardingRules.Items != nil { - nlbTotalRulesDC = int32(len(*nlbForwardingRules.Items)) - for _, ruleItems := range *nlbForwardingRules.Items { - if ruleItems.Properties != nil && ruleItems.Properties.Name != nil { - nlbRuleNames = *ruleItems.Properties.Name - } - } - } - } - - for _, albRulesAndLabels := range *albList.Items { - if albRulesAndLabels.Properties != nil && albRulesAndLabels.Properties.Name != nil { - albNames = *albRulesAndLabels.Properties.Name - } - forwardingRules := albRulesAndLabels.Entities.Forwardingrules - if forwardingRules != nil && forwardingRules.Items != nil { - albTotalRulesDC = int32(len(*forwardingRules.Items)) - - for _, ruleItems := range *forwardingRules.Items { - if ruleItems.Properties != nil && ruleItems.Properties.HttpRules != nil { - for _, ruleName := range *ruleItems.Properties.HttpRules { - if ruleName.Name != nil { - albRuleNames = *ruleName.Name - } - } - } - } - } - } nlbTotalDC = int32(len(*nlbList.Items)) albTotalDC = int32(len(*albList.Items)) natTotalDC = int32(len(*natList.Items)) @@ -181,7 +140,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { m.Lock() IonosDatacenters = newIonosDatacenters m.Unlock() - CalculateDCTotals(m) + // CalculateDCTotals(m) time.Sleep(time.Duration(cycletime) * time.Second) } } @@ -226,3 +185,137 @@ func PrintDCTotals(m *sync.RWMutex) { log.Printf("Total - Cores: %d\n", CoresTotal) log.Printf("Total - Ram: %d GB\n", RamTotal/1024) } + +func fetchNATGateways(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Datacenter) (*ionoscloud.NatGateways, error) { + datacenterId := *datacenter.Id + natList, resp, err := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), datacenterId).Depth(2).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling NATGateways API: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if natList.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + return &natList, nil +} + +func fetchNetworkLoadBalancers(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Datacenter) (*ionoscloud.NetworkLoadBalancers, error) { + datacenterId := *datacenter.Id + nlbList, resp, err := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), datacenterId).Depth(2).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling NetworkLoadbalancers API: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if nlbList.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + + return &nlbList, nil +} + +func fetchIPBlocks(apiClient *ionoscloud.APIClient) (*ionoscloud.IpBlocks, error) { + ipBlocks, resp, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(2).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling IPBlocks API: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if ipBlocks.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + + return &ipBlocks, nil +} + +func fetchApplicationLoadbalancers(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Datacenter) (*ionoscloud.ApplicationLoadBalancers, error) { + datacenterId := *datacenter.Id + albList, resp, err := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), datacenterId).Depth(2).Execute() + if err != nil { + fmt.Fprintf(os.Stderr, "Error when calling ApplicationLoadBalancers API: %v\n", err) + if resp != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + } else { + fmt.Fprintf(os.Stderr, "No HTTP response received\n") + } + return nil, err + } + + if albList.Items == nil { + return nil, fmt.Errorf("no items in resource") + } + + return &albList, nil +} + +func processIPBlocks(ipBlocks *ionoscloud.IpBlocks) int32 { + var totalIPs int32 + for _, ips := range *ipBlocks.Items { + if ips.Properties != nil && ips.Properties.Size != nil { + totalIPs += *ips.Properties.Size + } + } + return totalIPs +} + +func processNetworkLoadBalancers(nlbList *ionoscloud.NetworkLoadBalancers) (string, int32) { + var ( + nlbNames string + nlbTotalRulesDC int32 + ) + + for _, nlb := range *nlbList.Items { + if nlb.Properties != nil && nlb.Properties.Name != nil { + nlbNames = *nlb.Properties.Name + } + nlbForwardingRules := nlb.Entities.Forwardingrules + if nlbForwardingRules != nil && nlbForwardingRules.Items != nil { + nlbTotalRulesDC = int32(len(*nlbForwardingRules.Items)) + for _, rule := range *nlbForwardingRules.Items { + if rule.Properties != nil && rule.Properties.Name != nil { + nlbNames = *rule.Properties.Name + } + } + } + } + return nlbNames, nlbTotalRulesDC +} + +func processApplicationLoadBalancers(albList *ionoscloud.ApplicationLoadBalancers) (string, int32) { + var ( + albNames string + albTotalRulesDC int32 + ) + + for _, alb := range *albList.Items { + if alb.Properties != nil && alb.Properties.Name != nil { + albNames = *alb.Properties.Name + } + albForwardingRules := alb.Entities.Forwardingrules + if albForwardingRules != nil && albForwardingRules.Items != nil { + albTotalRulesDC = int32(len(*albForwardingRules.Items)) + for _, rule := range *albForwardingRules.Items { + if rule.Properties != nil && rule.Properties.Name != nil { + albNames = *rule.Properties.Name + } + } + } + } + return albNames, albTotalRulesDC +} diff --git a/internal/postgres_collector.go b/internal/postgres_collector.go new file mode 100644 index 0000000..38e55af --- /dev/null +++ b/internal/postgres_collector.go @@ -0,0 +1,160 @@ +package internal + +import ( + "fmt" + "strconv" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +//"time" + +type postgresCollector struct { + mutex *sync.RWMutex + postgresTotalRamMetric *prometheus.GaugeVec + postgresTotalCPUMetric *prometheus.GaugeVec + postgresTotalStorageMetric *prometheus.GaugeVec + postgresTransactionRateMetric *prometheus.GaugeVec + postgresTotalStorageBytesMetric *prometheus.GaugeVec + postgresAvailableStorageBytesMetric *prometheus.GaugeVec + postgresDiskIOMetric *prometheus.GaugeVec + postgresCpuRateMetric *prometheus.GaugeVec + postgresLoadMetric *prometheus.GaugeVec + postgresTotalMemoryAvailableBytes *prometheus.GaugeVec +} + +func NewPostgresCollector(m *sync.RWMutex) *postgresCollector { + return &postgresCollector{ + mutex: m, + postgresTotalRamMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_ram_in_cluster", + Help: "Gives the total ammount of allocated RAM in cluster", + }, []string{"clusterName", "owner", "db"}), + postgresTotalCPUMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_cpu_in_cluster", + Help: "Gives a total amount of CPU Cores in Cluster", + }, []string{"clusterName", "owner", "db"}), + postgresTotalStorageMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_storage_in_cluster", + Help: "Gives a total amount of Storage in Cluster", + }, []string{"clusterName", "owner", "db"}), + postgresTransactionRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_transactions:rate2m", + Help: "Gives a Transaction Rate in postgres cluster in 2m", + }, []string{"clusterName"}), + postgresTotalStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_total_storage_metric", + Help: "Gives a Total Storage Metric in Bytes", + }, []string{"clusterName"}), + postgresAvailableStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_available_storage_metric", + Help: "Gives a Available Storage Metric in Bytes", + }, []string{"clusterName"}), + postgresCpuRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgress_cpu_rate5m", + Help: "Gives a CPU Rate (Average Utilization) over the past 5 Minutes", + }, []string{"clusterName"}), + postgresDiskIOMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m", + Help: "The rate of disk I/O time, in seconds, over a five-minute period.", + }, []string{"clusterName"}), + postgresLoadMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_load5", + Help: "Linux load average for the last 5 minutes.", + }, []string{"clusterName"}), + postgresTotalMemoryAvailableBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ionos_dbaas_postgres_memory_available_bytes", + Help: "Available memory in bytes", + }, []string{"clusterName"}), + } +} + +func (collector *postgresCollector) Describe(ch chan<- *prometheus.Desc) { + collector.postgresTotalCPUMetric.Describe(ch) + collector.postgresTotalRamMetric.Describe(ch) + collector.postgresTotalStorageMetric.Describe(ch) + collector.postgresTransactionRateMetric.Describe(ch) + collector.postgresTotalStorageBytesMetric.Describe(ch) + collector.postgresAvailableStorageBytesMetric.Describe(ch) + collector.postgresCpuRateMetric.Describe(ch) + collector.postgresDiskIOMetric.Describe(ch) + collector.postgresLoadMetric.Describe(ch) + collector.postgresTotalMemoryAvailableBytes.Describe(ch) +} + +func (collector *postgresCollector) Collect(ch chan<- prometheus.Metric) { + collector.mutex.RLock() + defer collector.mutex.RUnlock() + + metricsMutex.Lock() + collector.postgresTotalCPUMetric.Reset() + collector.postgresTotalRamMetric.Reset() + collector.postgresTotalStorageMetric.Reset() + metricsMutex.Unlock() + + for postgresName, postgresResources := range IonosPostgresClusters { + + for _, telemetry := range postgresResources.Telemetry { + for _, value := range telemetry.Values { + if len(value) != 2 { + fmt.Printf("Unexpected value length: %v\n", value) + continue + } + metricValue, ok := value[1].(float64) + if !ok { + strValue, ok := value[1].(string) + if !ok { + fmt.Printf("Unexpected type for metric %s value: %v\n", telemetry.Values, value[1]) + continue + } + + var err error + metricValue, err = strconv.ParseFloat(strValue, 64) + if err != nil { + fmt.Printf("Failed to parse metric value: %v\n", err) + continue + } + } + // fmt.Println("Telemetry Metric", telemetry.Metric) + switch telemetry.Metric["__name__"] { + case "ionos_dbaas_postgres_transactions:rate2m": + collector.postgresTransactionRateMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_storage_total_bytes": + collector.postgresTotalStorageBytesMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_storage_available_bytes": + collector.postgresAvailableStorageBytesMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_cpu_rate5m": + collector.postgresCpuRateMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m": + collector.postgresDiskIOMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_load5": + collector.postgresLoadMetric.WithLabelValues(postgresName).Set(float64(metricValue)) + case "ionos_dbaas_postgres_memory_available_bytes": + collector.postgresTotalMemoryAvailableBytes.WithLabelValues(postgresName).Set(float64(metricValue)) + default: + // fmt.Printf("Unrecognised metric: %s\n", telemetry.Metric["__name__"]) + continue + } + } + } + + for _, dbName := range postgresResources.DatabaseNames { + + collector.postgresTotalCPUMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.CPU)) + collector.postgresTotalRamMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.RAM)) + collector.postgresTotalStorageMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.Storage)) + } + + } + collector.postgresTotalCPUMetric.Collect(ch) + collector.postgresTotalRamMetric.Collect(ch) + collector.postgresTotalStorageMetric.Collect(ch) + collector.postgresTransactionRateMetric.Collect(ch) + collector.postgresTotalStorageBytesMetric.Collect(ch) + collector.postgresAvailableStorageBytesMetric.Collect(ch) + collector.postgresCpuRateMetric.Collect(ch) + collector.postgresDiskIOMetric.Collect(ch) + collector.postgresLoadMetric.Collect(ch) + collector.postgresTotalMemoryAvailableBytes.Collect(ch) +} diff --git a/internal/postgres-test.go b/internal/postgres_scraper.go similarity index 95% rename from internal/postgres-test.go rename to internal/postgres_scraper.go index 4d11cae..73ed48d 100644 --- a/internal/postgres-test.go +++ b/internal/postgres_scraper.go @@ -77,15 +77,13 @@ func loadConfig(filename string) (*Config, error) { return &config, nil } -func Testpsql(m *sync.RWMutex, cycletime int32) { +func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { err := godotenv.Load(".env") if err != nil { fmt.Println("Error loading .env file") } cfgENV := psql.NewConfigurationFromEnv() - apiClient := psql.NewAPIClient(cfgENV) - config, err := loadConfig("config.yaml") if err != nil { log.Fatalf("Failed to load config: %v", err) @@ -113,6 +111,15 @@ func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric newIonosPostgresResources := make(map[string]IonosPostgresResources) for _, clusters := range *datacenters.Items { + if clusters.Id == nil || clusters.Properties == nil { + fmt.Fprintf(os.Stderr, "Cluster or Cluster Properties are nil\n") + continue + } + clusterName := clusters.Properties.DisplayName + if clusterName == nil { + fmt.Fprintf(os.Stderr, "Cluster name is nil\n") + continue + } databaseNames, err := fetchDatabases(apiClient, *clusters.Id) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fetch databases for cluster %s: %v\n", *clusters.Properties.DisplayName, err) @@ -146,10 +153,7 @@ func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric Telemetry: telemetryData, } } - // clusterName := "spsh-dev-schulportal" m.Lock() - // fmt.Printf("Here is the telemetryData for cluster '%s': %v\n", clusterName, newIonosPostgresResources[clusterName].Telemetry) - // fmt.Printf("Here is the map %v", newIonosPostgresResources["telemetryData"]) IonosPostgresClusters = newIonosPostgresResources m.Unlock() diff --git a/internal/prometheus.go b/internal/prometheus.go index c3009c2..4722930 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -1,11 +1,8 @@ package internal import ( - "fmt" "io" "net/http" - "os" - "strconv" "sync" //"time" @@ -13,501 +10,8 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// Define a struct for you collector that contains pointers -// to prometheus descriptors for each metric you wish to expose. -// Note you can also include fields of other types if they provide utility -// but we just won't be exposing them as metrics. -type ionosCollector struct { - mutex *sync.RWMutex - coresMetric *prometheus.GaugeVec - ramMetric *prometheus.GaugeVec - serverMetric *prometheus.GaugeVec - dcCoresMetric *prometheus.GaugeVec - dcRamMetric *prometheus.GaugeVec - dcServerMetric *prometheus.GaugeVec - dcDCMetric *prometheus.GaugeVec - nlbsMetric *prometheus.GaugeVec - albsMetric *prometheus.GaugeVec - natsMetric *prometheus.GaugeVec - dcDCNLBMetric *prometheus.GaugeVec - dcDCALBMetric *prometheus.GaugeVec - dcDCNATMetric *prometheus.GaugeVec - dcNLBRulesMetric *prometheus.GaugeVec - dcALBRulesMetric *prometheus.GaugeVec - dcTotalIpsMetric prometheus.Gauge -} - -type postgresCollector struct { - mutex *sync.RWMutex - postgresTotalRamMetric *prometheus.GaugeVec - postgresTotalCPUMetric *prometheus.GaugeVec - postgresTotalStorageMetric *prometheus.GaugeVec - postgresTransactionRateMetric *prometheus.GaugeVec - postgresTotalStorageBytesMetric *prometheus.GaugeVec - postgresAvailableStorageBytesMetric *prometheus.GaugeVec - postgresDiskIOMetric *prometheus.GaugeVec - postgresCpuRateMetric *prometheus.GaugeVec - postgresLoadMetric *prometheus.GaugeVec - postgresTotalMemoryAvailableBytes *prometheus.GaugeVec -} - -type s3Collector struct { - mutex *sync.RWMutex - s3TotalGetRequestSizeMetric *prometheus.GaugeVec - s3TotalGetResponseSizeMetric *prometheus.GaugeVec - s3TotalPutRequestSizeMetric *prometheus.GaugeVec - s3TotalPutResponseSizeMetric *prometheus.GaugeVec - s3TotalPostRequestSizeMetric *prometheus.GaugeVec - s3TotalPostResponseSizeMetric *prometheus.GaugeVec - s3TotalHeadRequestSizeMetric *prometheus.GaugeVec - s3TotalHeadResponseSizeMetric *prometheus.GaugeVec - s3TotalNumberOfGetRequestsMetric *prometheus.GaugeVec - s3TotalNumberOfPutRequestsMetric *prometheus.GaugeVec - s3TotalNumberOfPostRequestsMetric *prometheus.GaugeVec - s3TotalNumberOfHeadRequestsMetric *prometheus.GaugeVec -} - // var mutex *sync.RWMutex -// You must create a constructor for you collector that -// initializes every descriptor and returns a pointer to the collector -func newIonosCollector(m *sync.RWMutex) *ionosCollector { - return &ionosCollector{ - mutex: m, - coresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dc_cores_amount", - Help: "Shows the number of currently active cores in an IONOS datacenter", - }, []string{"datacenter"}), - ramMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dc_ram_gb", - Help: "Shows the number of currently active RAM in an IONOS datacenter", - }, []string{"datacenter"}), - serverMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dc_server_amount", - Help: "Shows the number of currently active servers in an IONOS datacenter", - }, []string{"datacenter"}), - dcCoresMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_cores_amount", - Help: "Shows the number of currently active cores of an IONOS account", - }, []string{"account"}), - dcRamMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_ram_gb", - Help: "Shows the number of currently active RAM of an IONOS account", - }, []string{"account"}), - dcServerMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_server_amount", - Help: "Shows the number of currently active servers of an IONOS account", - }, []string{"account"}), - dcDCMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_datacenter_amount", - Help: "Shows the number of datacenters of an IONOS account", - }, []string{"account"}), - nlbsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_networkloadbalancer_amount", - Help: "Shows the number of active Network Loadbalancers in an IONOS datacenter", - }, []string{"datacenter", "nlb_name", "nlb_rules_name"}), - albsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_applicationloadbalancer_amount", - Help: "Shows the number of active Application Loadbalancers in an IONOS datacenter", - }, []string{"datacenter", "alb_name", "alb_rules_name"}), - natsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_nat_gateways_amount", - Help: "Shows the number of NAT Gateways in an IONOS datacenter", - }, []string{"datacenter"}), - dcDCNLBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_networkloadbalancer_amount", - Help: "Shows the total number of Network Loadbalancers in IONOS Account", - }, []string{"account"}), - dcDCALBMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_applicationbalancer_amount", - Help: "Shows the total number of Application Loadbalancers in IONOS Account", - }, []string{"account"}), - dcDCNATMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_nat_gateways_amount", - Help: "Shows the total number of NAT Gateways in IONOS Account", - }, []string{"account"}), - dcNLBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_number_of_nlb_rules", - Help: "Shows the total number of NLB Rules in IONOS Account", - }, []string{"nlb_rules"}), - dcALBRulesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_total_nmumber_of_alb_rules", - Help: "Shows the total number of ALB Rules in IONOS Account", - }, []string{"alb_rules"}), - dcTotalIpsMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "ionos_total_number_of_ips", - Help: "Shows the number of Ips in a IONOS", - }), - } -} - -// s3collector func returns all the metrics as gauges -func newS3Collector(m *sync.RWMutex) *s3Collector { - return &s3Collector{ - mutex: m, - s3TotalGetRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_get_request_size_in_bytes", - Help: "Gives the total size of s3 GET Request in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalGetResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_get_response_size_in_bytes", - Help: "Gives the total size of s3 GET Response in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalPutRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_put_request_size_in_bytes", - Help: "Gives the total size of s3 PUT Request in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalPutResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_put_response_size_in_bytes", - Help: "Gives the total size of s3 PUT Response in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalPostRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_post_request_size_in_bytes", - Help: "Gives the total size of s3 POST Request in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalPostResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_post_response_size_in_bytes", - Help: "Gives the total size of s3 POST Response in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalHeadRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_head_request_size_in_bytes", - Help: "Gives the total size of s3 HEAD Request in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalHeadResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_head_response_size_in_bytes", - Help: "Gives the total size of s3 HEAD Response in Bytes in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalNumberOfGetRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_number_of_get_requests", - Help: "Gives the total number of S3 GET HTTP Requests in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalNumberOfPutRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_number_of_put_requests", - Help: "Gives the total number of S3 PUT HTTP Requests in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalNumberOfPostRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_number_of_post_requests", - Help: "Gives the total number of S3 Post Requests in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - s3TotalNumberOfHeadRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "s3_total_number_of_head_requests", - Help: "Gives the total number of S3 HEAD HTTP Requests in one Bucket", - }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), - } -} - -func newPostgresCollector(m *sync.RWMutex) *postgresCollector { - return &postgresCollector{ - mutex: m, - postgresTotalRamMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgres_total_ram_in_cluster", - Help: "Gives the total ammount of allocated RAM in cluster", - }, []string{"clusterName", "owner", "db"}), - postgresTotalCPUMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgres_total_cpu_in_cluster", - Help: "Gives a total amount of CPU Cores in Cluster", - }, []string{"clusterName", "owner", "db"}), - postgresTotalStorageMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgres_total_storage_in_cluster", - Help: "Gives a total amount of Storage in Cluster", - }, []string{"clusterName", "owner", "db"}), - postgresTransactionRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgres_transactions:rate2m", - Help: "Gives a Transaction Rate in postgres cluster in 2m", - }, []string{"clusterName"}), - postgresTotalStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgres_total_storage_metric", - Help: "Gives a Total Storage Metric in Bytes", - }, []string{"clusterName"}), - postgresAvailableStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgres_available_storage_metric", - Help: "Gives a Available Storage Metric in Bytes", - }, []string{"clusterName"}), - postgresCpuRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgress_cpu_rate5m", - Help: "Gives a CPU Rate (Average Utilization) over the past 5 Minutes", - }, []string{"clusterName"}), - postgresDiskIOMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m", - Help: "The rate of disk I/O time, in seconds, over a five-minute period.", - }, []string{"clusterName"}), - postgresLoadMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgres_load5", - Help: "Linux load average for the last 5 minutes.", - }, []string{"clusterName"}), - postgresTotalMemoryAvailableBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ionos_dbaas_postgres_memory_available_bytes", - Help: "Available memory in bytes", - }, []string{"clusterName"}), - } -} - -func (collector *postgresCollector) Describe(ch chan<- *prometheus.Desc) { - collector.postgresTotalCPUMetric.Describe(ch) - collector.postgresTotalRamMetric.Describe(ch) - collector.postgresTotalStorageMetric.Describe(ch) - collector.postgresTransactionRateMetric.Describe(ch) - collector.postgresTotalStorageBytesMetric.Describe(ch) - collector.postgresAvailableStorageBytesMetric.Describe(ch) - collector.postgresCpuRateMetric.Describe(ch) - collector.postgresDiskIOMetric.Describe(ch) - collector.postgresLoadMetric.Describe(ch) - collector.postgresTotalMemoryAvailableBytes.Describe(ch) -} - -func (collector *postgresCollector) Collect(ch chan<- prometheus.Metric) { - collector.mutex.RLock() - defer collector.mutex.RUnlock() - - metricsMutex.Lock() - collector.postgresTotalCPUMetric.Reset() - collector.postgresTotalRamMetric.Reset() - collector.postgresTotalStorageMetric.Reset() - metricsMutex.Unlock() - - for postgresName, postgresResources := range IonosPostgresClusters { - - for _, telemetry := range postgresResources.Telemetry { - for _, value := range telemetry.Values { - if len(value) != 2 { - fmt.Printf("Unexpected value length: %v\n", value) - continue - } - metricValue, ok := value[1].(float64) - if !ok { - strValue, ok := value[1].(string) - if !ok { - fmt.Printf("Unexpected type for metric %s value: %v\n", telemetry.Values, value[1]) - continue - } - - var err error - metricValue, err = strconv.ParseFloat(strValue, 64) - if err != nil { - fmt.Printf("Failed to parse metric value: %v\n", err) - continue - } - } - // fmt.Println("Telemetry Metric", telemetry.Metric) - switch telemetry.Metric["__name__"] { - case "ionos_dbaas_postgres_transactions:rate2m": - collector.postgresTransactionRateMetric.WithLabelValues(postgresName).Set(float64(metricValue)) - case "ionos_dbaas_postgres_storage_total_bytes": - collector.postgresTotalStorageBytesMetric.WithLabelValues(postgresName).Set(float64(metricValue)) - case "ionos_dbaas_postgres_storage_available_bytes": - collector.postgresAvailableStorageBytesMetric.WithLabelValues(postgresName).Set(float64(metricValue)) - case "ionos_dbaas_postgres_cpu_rate5m": - collector.postgresCpuRateMetric.WithLabelValues(postgresName).Set(float64(metricValue)) - case "ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m": - collector.postgresDiskIOMetric.WithLabelValues(postgresName).Set(float64(metricValue)) - case "ionos_dbaas_postgres_load5": - collector.postgresLoadMetric.WithLabelValues(postgresName).Set(float64(metricValue)) - case "ionos_dbaas_postgres_memory_available_bytes": - collector.postgresTotalMemoryAvailableBytes.WithLabelValues(postgresName).Set(float64(metricValue)) - default: - // fmt.Printf("Unrecognised metric: %s\n", telemetry.Metric["__name__"]) - continue - } - } - } - - for _, dbName := range postgresResources.DatabaseNames { - - collector.postgresTotalCPUMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.CPU)) - collector.postgresTotalRamMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.RAM)) - collector.postgresTotalStorageMetric.WithLabelValues(postgresName, postgresResources.Owner, dbName).Set(float64(postgresResources.Storage)) - } - - } - collector.postgresTotalCPUMetric.Collect(ch) - collector.postgresTotalRamMetric.Collect(ch) - collector.postgresTotalStorageMetric.Collect(ch) - collector.postgresTransactionRateMetric.Collect(ch) - collector.postgresTotalStorageBytesMetric.Collect(ch) - collector.postgresAvailableStorageBytesMetric.Collect(ch) - collector.postgresCpuRateMetric.Collect(ch) - collector.postgresDiskIOMetric.Collect(ch) - collector.postgresLoadMetric.Collect(ch) - collector.postgresTotalMemoryAvailableBytes.Collect(ch) -} -func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { - collector.s3TotalGetRequestSizeMetric.Describe(ch) - collector.s3TotalGetResponseSizeMetric.Describe(ch) - collector.s3TotalPutRequestSizeMetric.Describe(ch) - collector.s3TotalPutResponseSizeMetric.Describe(ch) - collector.s3TotalPostRequestSizeMetric.Describe(ch) - collector.s3TotalPostResponseSizeMetric.Describe(ch) - collector.s3TotalHeadRequestSizeMetric.Describe(ch) - collector.s3TotalHeadResponseSizeMetric.Describe(ch) - collector.s3TotalNumberOfGetRequestsMetric.Describe(ch) - collector.s3TotalNumberOfPutRequestsMetric.Describe(ch) - collector.s3TotalNumberOfPostRequestsMetric.Describe(ch) - collector.s3TotalNumberOfHeadRequestsMetric.Describe(ch) - -} - -func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { - collector.mutex.RLock() - defer collector.mutex.RUnlock() - - metricsMutex.Lock() - collector.s3TotalGetRequestSizeMetric.Reset() - collector.s3TotalGetResponseSizeMetric.Reset() - collector.s3TotalPutRequestSizeMetric.Reset() - collector.s3TotalPutResponseSizeMetric.Reset() - collector.s3TotalPostRequestSizeMetric.Reset() - collector.s3TotalPostResponseSizeMetric.Reset() - collector.s3TotalHeadRequestSizeMetric.Reset() - collector.s3TotalHeadResponseSizeMetric.Reset() - collector.s3TotalNumberOfGetRequestsMetric.Reset() - collector.s3TotalNumberOfPutRequestsMetric.Reset() - collector.s3TotalNumberOfPostRequestsMetric.Reset() - collector.s3TotalNumberOfHeadRequestsMetric.Reset() - - defer metricsMutex.Unlock() - - for s3Name, s3Resources := range IonosS3Buckets { - - region := s3Resources.Regions - owner := s3Resources.Owner - tags, ok := TagsForPrometheus[s3Name] - if !ok { - fmt.Printf("No tags found for bucket %s\n", s3Name) - continue - } - //tags of buckets change to tags you have defined on s3 buckets - enviroment := tags["Enviroment"] - namespace := tags["Namespace"] - tenant := tags["Tenant"] - - for method, requestSize := range s3Resources.RequestSizes { - switch method { - case MethodGET: - collector.s3TotalGetRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) - case MethodPOST: - collector.s3TotalPostRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) - case MethodHEAD: - collector.s3TotalHeadRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) - case MethodPUT: - collector.s3TotalPutRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) - } - - } - for method, responseSize := range s3Resources.ResponseSizes { - switch method { - case MethodGET: - collector.s3TotalGetResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) - case MethodPOST: - collector.s3TotalPostResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) - case MethodHEAD: - collector.s3TotalHeadResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) - case MethodPUT: - collector.s3TotalPutResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) - } - } - - for method, responseSize := range s3Resources.Methods { - switch method { - case MethodGET: - collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) - case MethodPOST: - collector.s3TotalNumberOfPostRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) - case MethodHEAD: - collector.s3TotalNumberOfHeadRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) - case MethodPUT: - collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) - } - } - } - - collector.s3TotalGetRequestSizeMetric.Collect(ch) - collector.s3TotalGetResponseSizeMetric.Collect(ch) - collector.s3TotalPutRequestSizeMetric.Collect(ch) - collector.s3TotalPutResponseSizeMetric.Collect(ch) - collector.s3TotalPostRequestSizeMetric.Collect(ch) - collector.s3TotalPostResponseSizeMetric.Collect(ch) - collector.s3TotalHeadRequestSizeMetric.Collect(ch) - collector.s3TotalHeadResponseSizeMetric.Collect(ch) - collector.s3TotalNumberOfGetRequestsMetric.Collect(ch) - collector.s3TotalNumberOfPutRequestsMetric.Collect(ch) - collector.s3TotalNumberOfPostRequestsMetric.Collect(ch) - collector.s3TotalNumberOfHeadRequestsMetric.Collect(ch) -} - -// Each and every collector must implement the Describe function. -// It essentially writes all descriptors to the prometheus desc channel. -func (collector *ionosCollector) Describe(ch chan<- *prometheus.Desc) { - - //Update this section with the each metric you create for a given collector - collector.coresMetric.Describe(ch) - collector.ramMetric.Describe(ch) - collector.serverMetric.Describe(ch) - collector.dcCoresMetric.Describe(ch) - collector.dcRamMetric.Describe(ch) - collector.dcServerMetric.Describe(ch) - collector.dcDCMetric.Describe(ch) - collector.nlbsMetric.Describe(ch) - collector.albsMetric.Describe(ch) - collector.natsMetric.Describe(ch) - collector.dcDCNLBMetric.Describe(ch) - collector.dcDCALBMetric.Describe(ch) - collector.dcDCNATMetric.Describe(ch) - collector.dcALBRulesMetric.Describe(ch) - collector.dcNLBRulesMetric.Describe(ch) - collector.dcTotalIpsMetric.Describe(ch) -} - -// Collect implements required collect function for all promehteus collectors -func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { - - //Implement logic here to determine proper metric value to return to prometheus - //for each descriptor or call other functions that do so. - account := os.Getenv("IONOS_ACCOUNT") - collector.mutex.RLock() - defer collector.mutex.RUnlock() - - // Reset metrics in case a datacenter was removed - collector.coresMetric.Reset() - collector.ramMetric.Reset() - collector.serverMetric.Reset() - collector.albsMetric.Reset() - collector.natsMetric.Reset() - collector.nlbsMetric.Reset() - // fmt.Println("Here are the metrics in ionosCollector", IonosDatacenters) - for dcName, dcResources := range IonosDatacenters { - //Write latest value for each metric in the prometheus metric channel. - collector.coresMetric.WithLabelValues(dcName).Set(float64(dcResources.Cores)) - collector.ramMetric.WithLabelValues(dcName).Set(float64(dcResources.Ram / 1024)) // MB -> GB - collector.serverMetric.WithLabelValues(dcName).Set(float64(dcResources.Servers)) - collector.nlbsMetric.WithLabelValues(dcName, dcResources.NLBName, dcResources.NLBRuleName).Set(float64(dcResources.NLBs)) - collector.albsMetric.WithLabelValues(dcName, dcResources.ALBName, dcResources.ALBRuleName).Set(float64(dcResources.ALBs)) - collector.natsMetric.WithLabelValues(dcName).Set(float64(dcResources.NATs)) - collector.dcTotalIpsMetric.Set(float64(dcResources.TotalIPs)) - - } - - collector.dcCoresMetric.WithLabelValues(account).Set(float64(CoresTotal)) - collector.dcRamMetric.WithLabelValues(account).Set(float64(RamTotal / 1024)) // MB -> GB - collector.dcServerMetric.WithLabelValues(account).Set(float64(ServerTotal)) - collector.dcDCMetric.WithLabelValues(account).Set(float64(DataCenters)) - - collector.coresMetric.Collect(ch) - collector.ramMetric.Collect(ch) - collector.serverMetric.Collect(ch) - collector.dcCoresMetric.Collect(ch) - collector.dcRamMetric.Collect(ch) - collector.dcServerMetric.Collect(ch) - collector.dcDCMetric.Collect(ch) - collector.nlbsMetric.Collect(ch) - collector.albsMetric.Collect(ch) - collector.natsMetric.Collect(ch) - collector.dcDCNLBMetric.Collect(ch) - collector.dcDCALBMetric.Collect(ch) - collector.dcDCNATMetric.Collect(ch) - collector.dcNLBRulesMetric.Collect(ch) - collector.dcALBRulesMetric.Collect(ch) - collector.dcTotalIpsMetric.Collect(ch) -} func (collector *ionosCollector) GetMutex() *sync.RWMutex { return collector.mutex } @@ -520,18 +24,22 @@ func (collector *postgresCollector) GetMutex() *sync.RWMutex { return collector.mutex } -func StartPrometheus(m *sync.RWMutex) { +// func StartPrometheus(m *sync.RWMutex) { +// dcMutex := &sync.RWMutex{} +// s3Mutex := &sync.RWMutex{} +// pgMutex := &sync.RWMutex{} - ic := newIonosCollector(m) - s3c := newS3Collector(m) - pc := newPostgresCollector(m) - prometheus.MustRegister(ic) - prometheus.MustRegister(s3c) - prometheus.MustRegister(pc) - prometheus.MustRegister(httpRequestsTotal) -} +// ionosCollector := NewIonosCollector(dcMutex) +// s3Collector := NewS3Collector(s3Mutex) +// pgCollector := NewPostgresCollector(pgMutex) + +// prometheus.MustRegister(ionosCollector) +// prometheus.MustRegister(s3Collector) +// prometheus.MustRegister(pgCollector) +// prometheus.MustRegister(HttpRequestsTotal) +// } -var httpRequestsTotal = prometheus.NewCounterVec( +var HttpRequestsTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "http_requests_total", Help: "Total number of HTTP requests", @@ -542,7 +50,7 @@ var httpRequestsTotal = prometheus.NewCounterVec( func HealthCheck(w http.ResponseWriter, r *http.Request) { // PrintDCTotals(mutex) - httpRequestsTotal.WithLabelValues("/healthcheck", r.Method).Inc() + HttpRequestsTotal.WithLabelValues("/healthcheck", r.Method).Inc() w.WriteHeader(http.StatusOK) io.WriteString(w, "OK") } diff --git a/internal/s3_collector.go b/internal/s3_collector.go new file mode 100644 index 0000000..b70f5ef --- /dev/null +++ b/internal/s3_collector.go @@ -0,0 +1,184 @@ +package internal + +import ( + "fmt" + "sync" + + //"time" + + "github.com/prometheus/client_golang/prometheus" +) + +type s3Collector struct { + mutex *sync.RWMutex + s3TotalGetRequestSizeMetric *prometheus.GaugeVec + s3TotalGetResponseSizeMetric *prometheus.GaugeVec + s3TotalPutRequestSizeMetric *prometheus.GaugeVec + s3TotalPutResponseSizeMetric *prometheus.GaugeVec + s3TotalPostRequestSizeMetric *prometheus.GaugeVec + s3TotalPostResponseSizeMetric *prometheus.GaugeVec + s3TotalHeadRequestSizeMetric *prometheus.GaugeVec + s3TotalHeadResponseSizeMetric *prometheus.GaugeVec + s3TotalNumberOfGetRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfPutRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfPostRequestsMetric *prometheus.GaugeVec + s3TotalNumberOfHeadRequestsMetric *prometheus.GaugeVec +} + +func NewS3Collector(m *sync.RWMutex) *s3Collector { + return &s3Collector{ + mutex: m, + s3TotalGetRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_get_request_size_in_bytes", + Help: "Gives the total size of s3 GET Request in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalGetResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_get_response_size_in_bytes", + Help: "Gives the total size of s3 GET Response in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalPutRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_put_request_size_in_bytes", + Help: "Gives the total size of s3 PUT Request in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalPutResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_put_response_size_in_bytes", + Help: "Gives the total size of s3 PUT Response in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalPostRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_post_request_size_in_bytes", + Help: "Gives the total size of s3 POST Request in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalPostResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_post_response_size_in_bytes", + Help: "Gives the total size of s3 POST Response in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalHeadRequestSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_head_request_size_in_bytes", + Help: "Gives the total size of s3 HEAD Request in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalHeadResponseSizeMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_head_response_size_in_bytes", + Help: "Gives the total size of s3 HEAD Response in Bytes in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalNumberOfGetRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_get_requests", + Help: "Gives the total number of S3 GET HTTP Requests in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalNumberOfPutRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_put_requests", + Help: "Gives the total number of S3 PUT HTTP Requests in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalNumberOfPostRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_post_requests", + Help: "Gives the total number of S3 Post Requests in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + s3TotalNumberOfHeadRequestsMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s3_total_number_of_head_requests", + Help: "Gives the total number of S3 HEAD HTTP Requests in one Bucket", + }, []string{"bucket", "method", "region", "owner", "enviroment", "namespace", "tenant"}), + } +} + +func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { + collector.s3TotalGetRequestSizeMetric.Describe(ch) + collector.s3TotalGetResponseSizeMetric.Describe(ch) + collector.s3TotalPutRequestSizeMetric.Describe(ch) + collector.s3TotalPutResponseSizeMetric.Describe(ch) + collector.s3TotalPostRequestSizeMetric.Describe(ch) + collector.s3TotalPostResponseSizeMetric.Describe(ch) + collector.s3TotalHeadRequestSizeMetric.Describe(ch) + collector.s3TotalHeadResponseSizeMetric.Describe(ch) + collector.s3TotalNumberOfGetRequestsMetric.Describe(ch) + collector.s3TotalNumberOfPutRequestsMetric.Describe(ch) + collector.s3TotalNumberOfPostRequestsMetric.Describe(ch) + collector.s3TotalNumberOfHeadRequestsMetric.Describe(ch) + +} + +func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { + collector.mutex.RLock() + defer collector.mutex.RUnlock() + + metricsMutex.Lock() + collector.s3TotalGetRequestSizeMetric.Reset() + collector.s3TotalGetResponseSizeMetric.Reset() + collector.s3TotalPutRequestSizeMetric.Reset() + collector.s3TotalPutResponseSizeMetric.Reset() + collector.s3TotalPostRequestSizeMetric.Reset() + collector.s3TotalPostResponseSizeMetric.Reset() + collector.s3TotalHeadRequestSizeMetric.Reset() + collector.s3TotalHeadResponseSizeMetric.Reset() + collector.s3TotalNumberOfGetRequestsMetric.Reset() + collector.s3TotalNumberOfPutRequestsMetric.Reset() + collector.s3TotalNumberOfPostRequestsMetric.Reset() + collector.s3TotalNumberOfHeadRequestsMetric.Reset() + + defer metricsMutex.Unlock() + + for s3Name, s3Resources := range IonosS3Buckets { + + region := s3Resources.Regions + owner := s3Resources.Owner + tags, ok := TagsForPrometheus[s3Name] + if !ok { + fmt.Printf("No tags found for bucket %s\n", s3Name) + continue + } + //tags of buckets change to tags you have defined on s3 buckets + enviroment := tags["Enviroment"] + namespace := tags["Namespace"] + tenant := tags["Tenant"] + + for method, requestSize := range s3Resources.RequestSizes { + switch method { + case MethodGET: + collector.s3TotalGetRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) + case MethodPOST: + collector.s3TotalPostRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) + case MethodHEAD: + collector.s3TotalHeadRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) + case MethodPUT: + collector.s3TotalPutRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) + } + + } + for method, responseSize := range s3Resources.ResponseSizes { + switch method { + case MethodGET: + collector.s3TotalGetResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodPOST: + collector.s3TotalPostResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodHEAD: + collector.s3TotalHeadResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodPUT: + collector.s3TotalPutResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + } + } + + for method, responseSize := range s3Resources.Methods { + switch method { + case MethodGET: + collector.s3TotalNumberOfGetRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodPOST: + collector.s3TotalNumberOfPostRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodHEAD: + collector.s3TotalNumberOfHeadRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + case MethodPUT: + collector.s3TotalNumberOfPutRequestsMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) + } + } + } + + collector.s3TotalGetRequestSizeMetric.Collect(ch) + collector.s3TotalGetResponseSizeMetric.Collect(ch) + collector.s3TotalPutRequestSizeMetric.Collect(ch) + collector.s3TotalPutResponseSizeMetric.Collect(ch) + collector.s3TotalPostRequestSizeMetric.Collect(ch) + collector.s3TotalPostResponseSizeMetric.Collect(ch) + collector.s3TotalHeadRequestSizeMetric.Collect(ch) + collector.s3TotalHeadResponseSizeMetric.Collect(ch) + collector.s3TotalNumberOfGetRequestsMetric.Collect(ch) + collector.s3TotalNumberOfPutRequestsMetric.Collect(ch) + collector.s3TotalNumberOfPostRequestsMetric.Collect(ch) + collector.s3TotalNumberOfHeadRequestsMetric.Collect(ch) +} diff --git a/internal/s3collector.go b/internal/s3_scraper.go similarity index 99% rename from internal/s3collector.go rename to internal/s3_scraper.go index 43791c7..c31e900 100644 --- a/internal/s3collector.go +++ b/internal/s3_scraper.go @@ -130,12 +130,10 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { Regions: config.Region, } IonosS3Buckets[bucketName] = metrics - } wg.Add(1) go func(client *s3.S3, bucketName string) { defer wg.Done() - getBucketTags(client, bucketName) if err := GetHeadBucket(client, bucketName); err != nil { if reqErr, ok := err.(awserr.RequestFailure); ok && reqErr.StatusCode() == 403 { return @@ -143,6 +141,7 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { log.Println("Error checking the bucket head:", err) return } + getBucketTags(client, bucketName) semaphore <- struct{}{} defer func() { <-semaphore diff --git a/main.go b/main.go index 4753f8c..87955c7 100644 --- a/main.go +++ b/main.go @@ -7,6 +7,7 @@ import ( "strconv" "sync" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) @@ -27,13 +28,24 @@ func main() { } go internal.CollectResources(dcMutex, ionos_api_cycle) go internal.S3CollectResources(s3Mutex, ionos_api_cycle) - // internal.PgGet() - go internal.Testpsql(pgMutex, ionos_api_cycle) - + go internal.PostgresCollectResources(pgMutex, ionos_api_cycle) + startPrometheus() //internal.PrintDCResources(mutex) - internal.StartPrometheus(dcMutex) + // internal.StartPrometheus(dcMutex) http.Handle("/metrics", promhttp.Handler()) http.Handle("/healthcheck", http.HandlerFunc(internal.HealthCheck)) log.Fatal(http.ListenAndServe(":"+exporterPort, nil)) } + +func startPrometheus() { + // Initialize and register collectors + ionosCollector := internal.NewIonosCollector(dcMutex) + s3Collector := internal.NewS3Collector(s3Mutex) + pgCollector := internal.NewPostgresCollector(pgMutex) + + prometheus.MustRegister(ionosCollector) + prometheus.MustRegister(s3Collector) + prometheus.MustRegister(pgCollector) + prometheus.MustRegister(internal.HttpRequestsTotal) +} From f063b1fdcd1db24232a39860202c6b42cca1efda Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 4 Jul 2024 17:25:08 +0200 Subject: [PATCH 25/55] Bug for when bucket doesnt have tags wont be pushed to prometheus fixed --- internal/postgres_collector.go | 1 - internal/postgres_scraper.go | 2 -- internal/prometheus.go | 28 ++++++++++++++-------------- internal/s3_collector.go | 8 ++------ internal/s3_scraper.go | 23 ++++++++++++++++------- main.go | 32 +++++++++----------------------- 6 files changed, 41 insertions(+), 53 deletions(-) diff --git a/internal/postgres_collector.go b/internal/postgres_collector.go index 38e55af..26c2165 100644 --- a/internal/postgres_collector.go +++ b/internal/postgres_collector.go @@ -116,7 +116,6 @@ func (collector *postgresCollector) Collect(ch chan<- prometheus.Metric) { continue } } - // fmt.Println("Telemetry Metric", telemetry.Metric) switch telemetry.Metric["__name__"] { case "ionos_dbaas_postgres_transactions:rate2m": collector.postgresTransactionRateMetric.WithLabelValues(postgresName).Set(float64(metricValue)) diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 73ed48d..0adf040 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -256,7 +256,5 @@ func fetchTelemetryMetrics(apiToken, query, clusterID string) (*TelemetryRespons return nil, err } - // fmt.Printf("Telemetry Response: %+v\n", telemetryResp) - return &telemetryResp, nil } diff --git a/internal/prometheus.go b/internal/prometheus.go index 4722930..3268cc0 100644 --- a/internal/prometheus.go +++ b/internal/prometheus.go @@ -24,20 +24,20 @@ func (collector *postgresCollector) GetMutex() *sync.RWMutex { return collector.mutex } -// func StartPrometheus(m *sync.RWMutex) { -// dcMutex := &sync.RWMutex{} -// s3Mutex := &sync.RWMutex{} -// pgMutex := &sync.RWMutex{} - -// ionosCollector := NewIonosCollector(dcMutex) -// s3Collector := NewS3Collector(s3Mutex) -// pgCollector := NewPostgresCollector(pgMutex) - -// prometheus.MustRegister(ionosCollector) -// prometheus.MustRegister(s3Collector) -// prometheus.MustRegister(pgCollector) -// prometheus.MustRegister(HttpRequestsTotal) -// } +func StartPrometheus(m *sync.RWMutex) { + dcMutex := &sync.RWMutex{} + s3Mutex := &sync.RWMutex{} + pgMutex := &sync.RWMutex{} + + ionosCollector := NewIonosCollector(dcMutex) + s3Collector := NewS3Collector(s3Mutex) + pgCollector := NewPostgresCollector(pgMutex) + + prometheus.MustRegister(ionosCollector) + prometheus.MustRegister(s3Collector) + prometheus.MustRegister(pgCollector) + prometheus.MustRegister(HttpRequestsTotal) +} var HttpRequestsTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ diff --git a/internal/s3_collector.go b/internal/s3_collector.go index b70f5ef..6243991 100644 --- a/internal/s3_collector.go +++ b/internal/s3_collector.go @@ -96,8 +96,6 @@ func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { } func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { - collector.mutex.RLock() - defer collector.mutex.RUnlock() metricsMutex.Lock() collector.s3TotalGetRequestSizeMetric.Reset() @@ -114,22 +112,19 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { collector.s3TotalNumberOfHeadRequestsMetric.Reset() defer metricsMutex.Unlock() - for s3Name, s3Resources := range IonosS3Buckets { - region := s3Resources.Regions owner := s3Resources.Owner tags, ok := TagsForPrometheus[s3Name] if !ok { fmt.Printf("No tags found for bucket %s\n", s3Name) - continue } //tags of buckets change to tags you have defined on s3 buckets enviroment := tags["Enviroment"] namespace := tags["Namespace"] tenant := tags["Tenant"] - for method, requestSize := range s3Resources.RequestSizes { + switch method { case MethodGET: collector.s3TotalGetRequestSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(requestSize)) @@ -143,6 +138,7 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { } for method, responseSize := range s3Resources.ResponseSizes { + switch method { case MethodGET: collector.s3TotalGetResponseSizeMetric.WithLabelValues(s3Name, method, region, owner, enviroment, namespace, tenant).Set(float64(responseSize)) diff --git a/internal/s3_scraper.go b/internal/s3_scraper.go index c31e900..c9e7540 100644 --- a/internal/s3_scraper.go +++ b/internal/s3_scraper.go @@ -49,7 +49,7 @@ const ( // how many objects to scan per page const ( - objectPerPage = 100 + objectPerPage = 1000 maxConcurrent = 10 ) @@ -95,7 +95,7 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { Endpoint: "https://s3-eu-central-1.ionoscloud.com", }, } - + //buffered channel that is like a semaphore semaphore := make(chan struct{}, maxConcurrent) for { var wg sync.WaitGroup @@ -132,8 +132,14 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { IonosS3Buckets[bucketName] = metrics } wg.Add(1) + fmt.Println("Processing Bucket: ", bucketName) go func(client *s3.S3, bucketName string) { defer wg.Done() + defer func() { + if r := recover(); r != nil { + log.Printf("Recovered in goroutine: %v", r) + } + }() if err := GetHeadBucket(client, bucketName); err != nil { if reqErr, ok := err.(awserr.RequestFailure); ok && reqErr.StatusCode() == 403 { return @@ -141,8 +147,9 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { log.Println("Error checking the bucket head:", err) return } - getBucketTags(client, bucketName) + //acquiring slot in semaphore blocking if the buffer is full semaphore <- struct{}{} + //release the semaphore when the goroutine completes defer func() { <-semaphore }() @@ -166,8 +173,8 @@ func processBucket(client *s3.S3, bucketName string) { var wg sync.WaitGroup var logEntryRegex = regexp.MustCompile(`(GET|PUT|HEAD|POST) \/[^"]*" \d+ \S+ (\d+|-) (\d+|-) \d+ (\d+|-)`) semaphore := make(chan struct{}, maxConcurrent) - continuationToken := "" + getBucketTags(client, bucketName) metrics := Metrics{ Methods: make(map[string]int32), RequestSizes: make(map[string]int64), @@ -177,6 +184,8 @@ func processBucket(client *s3.S3, bucketName string) { } metrics.Regions = *client.Config.Region + continuationToken := "" + //getting owner getAclInput := &s3.GetBucketAclInput{ Bucket: aws.String(bucketName), @@ -191,11 +200,10 @@ func processBucket(client *s3.S3, bucketName string) { } else { metrics.Owner = "Unknown" } + //main loop for { - //get all objects in a bucket use max keys defined in global scope and go through - //the pages of a bucket objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ Bucket: aws.String(bucketName), Prefix: aws.String("logs/"), @@ -219,7 +227,6 @@ func processBucket(client *s3.S3, bucketName string) { } return } - //check if the bucket has any objects in logs folder if len(objectList.Contents) == 0 { log.Printf("bucket %s does not contain any objects with the 'logs/' prefix\n", bucketName) return @@ -297,7 +304,9 @@ func processBucket(client *s3.S3, bucketName string) { wg.Wait() //make it thread safe with a mutex metricsMutex.Lock() + fmt.Println("METRICS", IonosS3Buckets) IonosS3Buckets[bucketName] = metrics + fmt.Println("METRICS", IonosS3Buckets) metricsMutex.Unlock() } diff --git a/main.go b/main.go index 87955c7..f29087c 100644 --- a/main.go +++ b/main.go @@ -7,16 +7,13 @@ import ( "strconv" "sync" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) var ( - dcMutex = &sync.RWMutex{} // Mutex to sync access to the Datacenter map - s3Mutex = &sync.RWMutex{} - pgMutex = &sync.RWMutex{} - exporterPort string // Port to be used for exposing the metrics - ionos_api_cycle int32 // Cycle time in seconds to query the IONOS API for changes, not th ePrometheus scraping intervall + m = &sync.RWMutex{} // Mutex to sync access to the Datacenter map + exporterPort string // Port to be used for exposing the metrics + ionos_api_cycle int32 // Cycle time in seconds to query the IONOS API for changes, not th ePrometheus scraping intervall ) func main() { @@ -26,26 +23,15 @@ func main() { } else { ionos_api_cycle = int32(cycletime) } - go internal.CollectResources(dcMutex, ionos_api_cycle) - go internal.S3CollectResources(s3Mutex, ionos_api_cycle) - go internal.PostgresCollectResources(pgMutex, ionos_api_cycle) - startPrometheus() + go internal.CollectResources(m, ionos_api_cycle) + go internal.S3CollectResources(m, ionos_api_cycle) + go internal.PostgresCollectResources(m, ionos_api_cycle) + + // startPrometheus() //internal.PrintDCResources(mutex) - // internal.StartPrometheus(dcMutex) + internal.StartPrometheus(m) http.Handle("/metrics", promhttp.Handler()) http.Handle("/healthcheck", http.HandlerFunc(internal.HealthCheck)) log.Fatal(http.ListenAndServe(":"+exporterPort, nil)) } - -func startPrometheus() { - // Initialize and register collectors - ionosCollector := internal.NewIonosCollector(dcMutex) - s3Collector := internal.NewS3Collector(s3Mutex) - pgCollector := internal.NewPostgresCollector(pgMutex) - - prometheus.MustRegister(ionosCollector) - prometheus.MustRegister(s3Collector) - prometheus.MustRegister(pgCollector) - prometheus.MustRegister(internal.HttpRequestsTotal) -} From 663ba850b4a665fbb5f176d08d16213c96eef9d2 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 4 Jul 2024 17:29:48 +0200 Subject: [PATCH 26/55] Added Config.yaml --- config.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/config.yaml b/config.yaml index dafe662..4289afd 100644 --- a/config.yaml +++ b/config.yaml @@ -44,7 +44,4 @@ metrics: type: gauge - name: ionos_dbaas_postgres_user_tables_idx_scan description: Number of index scans per table/schema. - type: gauge - - name: ionos_dbaas_postgres_user_tables_seq_scan - description: Number of sequential scans per table/schema. A high number of sequential scans may indicate that an index should be added to improve performance. type: gauge \ No newline at end of file From 675ba614fc7254a79ea133b9518ceb19d400e5b4 Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 4 Jul 2024 17:39:46 +0200 Subject: [PATCH 27/55] temp removal of postgress collection before i configure the deployment.yaml --- internal/postgres_scraper.go | 158 +++++++++++++++++------------------ main.go | 2 +- 2 files changed, 79 insertions(+), 81 deletions(-) diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 0adf040..d6200aa 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -5,14 +5,12 @@ import ( "encoding/json" "fmt" "io/ioutil" - "log" "net/http" "os" "sync" "time" psql "github.com/ionos-cloud/sdk-go-dbaas-postgres" - "github.com/joho/godotenv" "gopkg.in/yaml.v2" ) @@ -78,84 +76,84 @@ func loadConfig(filename string) (*Config, error) { } func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { - err := godotenv.Load(".env") - if err != nil { - fmt.Println("Error loading .env file") - } - cfgENV := psql.NewConfigurationFromEnv() - apiClient := psql.NewAPIClient(cfgENV) - config, err := loadConfig("config.yaml") - if err != nil { - log.Fatalf("Failed to load config: %v", err) - } - - for { - var wg sync.WaitGroup - for _, tenant := range config.Tenants { - wg.Add(1) - go func(tenant Tenant) { - defer wg.Done() - processCluster(apiClient, m, config.Metrics) - }(tenant) - } - wg.Wait() - time.Sleep(time.Duration(cycletime) * time.Second) - } -} - -func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric) { - datacenters, err := fetchClusters(apiClient) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fetch clusters: %v\n", err) - } - newIonosPostgresResources := make(map[string]IonosPostgresResources) - - for _, clusters := range *datacenters.Items { - if clusters.Id == nil || clusters.Properties == nil { - fmt.Fprintf(os.Stderr, "Cluster or Cluster Properties are nil\n") - continue - } - clusterName := clusters.Properties.DisplayName - if clusterName == nil { - fmt.Fprintf(os.Stderr, "Cluster name is nil\n") - continue - } - databaseNames, err := fetchDatabases(apiClient, *clusters.Id) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fetch databases for cluster %s: %v\n", *clusters.Properties.DisplayName, err) - continue - } - databaseOwner, err := fetchOwner(apiClient, *clusters.Id) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fetch owner for database %s: %v\n", *clusters.Properties.DisplayName, err) - continue - } - - telemetryData := make([]TelemetryMetric, 0) - - for _, metricConfig := range metrics { - telemetryResp, err := fetchTelemetryMetrics(os.Getenv("IONOS_TOKEN"), fmt.Sprintf("%s{postgres_cluster=\"%s\"}", metricConfig.Name, *clusters.Id), *clusters.Id) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fetch telemetry metrics for cluster %s: %v\n", *clusters.Id, err) - continue - } - telemetryData = append(telemetryData, telemetryResp.Data.Result...) - } - - // fmt.Printf("Here are the database names %v", databaseNames) - newIonosPostgresResources[*clusters.Properties.DisplayName] = IonosPostgresResources{ - ClusterName: *clusters.Properties.DisplayName, - CPU: *clusters.Properties.Cores, - RAM: *clusters.Properties.Ram, - Storage: *clusters.Properties.StorageSize, - DatabaseNames: databaseNames, - Owner: databaseOwner, - Telemetry: telemetryData, - } - } - m.Lock() - IonosPostgresClusters = newIonosPostgresResources - m.Unlock() + // err := godotenv.Load(".env") + // if err != nil { + // fmt.Println("Error loading .env file") + // } + // cfgENV := psql.NewConfigurationFromEnv() + // apiClient := psql.NewAPIClient(cfgENV) + // // config, err := loadConfig("config.yaml") + // // if err != nil { + // // log.Fatalf("Failed to load config: %v", err) + // // } + + // for { + // var wg sync.WaitGroup + // for _, tenant := range config.Tenants { + // wg.Add(1) + // go func(tenant Tenant) { + // defer wg.Done() + // processCluster(apiClient, m, config.Metrics) + // }(tenant) + // } + // wg.Wait() + // time.Sleep(time.Duration(cycletime) * time.Second) + // } + // } + + // func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric) { + // datacenters, err := fetchClusters(apiClient) + // if err != nil { + // fmt.Fprintf(os.Stderr, "Failed to fetch clusters: %v\n", err) + // } + // newIonosPostgresResources := make(map[string]IonosPostgresResources) + + // for _, clusters := range *datacenters.Items { + // if clusters.Id == nil || clusters.Properties == nil { + // fmt.Fprintf(os.Stderr, "Cluster or Cluster Properties are nil\n") + // continue + // } + // clusterName := clusters.Properties.DisplayName + // if clusterName == nil { + // fmt.Fprintf(os.Stderr, "Cluster name is nil\n") + // continue + // } + // databaseNames, err := fetchDatabases(apiClient, *clusters.Id) + // if err != nil { + // fmt.Fprintf(os.Stderr, "Failed to fetch databases for cluster %s: %v\n", *clusters.Properties.DisplayName, err) + // continue + // } + // databaseOwner, err := fetchOwner(apiClient, *clusters.Id) + // if err != nil { + // fmt.Fprintf(os.Stderr, "Failed to fetch owner for database %s: %v\n", *clusters.Properties.DisplayName, err) + // continue + // } + + // telemetryData := make([]TelemetryMetric, 0) + + // for _, metricConfig := range metrics { + // telemetryResp, err := fetchTelemetryMetrics(os.Getenv("IONOS_TOKEN"), fmt.Sprintf("%s{postgres_cluster=\"%s\"}", metricConfig.Name, *clusters.Id), *clusters.Id) + // if err != nil { + // fmt.Fprintf(os.Stderr, "Failed to fetch telemetry metrics for cluster %s: %v\n", *clusters.Id, err) + // continue + // } + // telemetryData = append(telemetryData, telemetryResp.Data.Result...) + // } + + // // fmt.Printf("Here are the database names %v", databaseNames) + // newIonosPostgresResources[*clusters.Properties.DisplayName] = IonosPostgresResources{ + // ClusterName: *clusters.Properties.DisplayName, + // CPU: *clusters.Properties.Cores, + // RAM: *clusters.Properties.Ram, + // Storage: *clusters.Properties.StorageSize, + // DatabaseNames: databaseNames, + // Owner: databaseOwner, + // Telemetry: telemetryData, + // } + // } + // m.Lock() + // IonosPostgresClusters = newIonosPostgresResources + // m.Unlock() } diff --git a/main.go b/main.go index f29087c..e7ee3a4 100644 --- a/main.go +++ b/main.go @@ -25,7 +25,7 @@ func main() { } go internal.CollectResources(m, ionos_api_cycle) go internal.S3CollectResources(m, ionos_api_cycle) - go internal.PostgresCollectResources(m, ionos_api_cycle) + // go internal.PostgresCollectResources(m, ionos_api_cycle) // startPrometheus() //internal.PrintDCResources(mutex) From a2f725583a01bdf4aa680ceb7dc6f6b2aa910ff9 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 5 Jul 2024 12:35:23 +0200 Subject: [PATCH 28/55] s3 refactor and ConfigMap definition --- charts/ionos-exporter/ConfigMap.yaml | 7 + charts/ionos-exporter/config.yaml | 55 +++++++ .../ionos-exporter/templates/deployment.yaml | 5 + config.yaml | 47 ------ internal/postgres_scraper.go | 143 +++++++++--------- internal/s3_scraper.go | 108 +++++++------ main.go | 2 +- 7 files changed, 189 insertions(+), 178 deletions(-) create mode 100644 charts/ionos-exporter/ConfigMap.yaml create mode 100644 charts/ionos-exporter/config.yaml delete mode 100644 config.yaml diff --git a/charts/ionos-exporter/ConfigMap.yaml b/charts/ionos-exporter/ConfigMap.yaml new file mode 100644 index 0000000..c10fd17 --- /dev/null +++ b/charts/ionos-exporter/ConfigMap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ionos-exporter-config +data: + config.yaml: |- + {{ .Files.Get "charts/ionos-exporter/config.yaml" | nindent 4 }} \ No newline at end of file diff --git a/charts/ionos-exporter/config.yaml b/charts/ionos-exporter/config.yaml new file mode 100644 index 0000000..620e938 --- /dev/null +++ b/charts/ionos-exporter/config.yaml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ionos-exporter-config + +data: + config.yaml: | + + tenants: + - name: spsh + operations: + - clusters + - databases + - name: dbildungs + operations: + - clusters + + + metrics: + - name: ionos_dbaas_postgres_transactions:rate2m + description: Per-second average rate of SQL transactions (that have been committed), calculated over the last 2 minutes. + type: gauge + - name: ionos_dbaas_postgres_connections_count + description: Number of connections per instance and state. active, disabled, fastpath function call, idle, idle in transaction, idle in transaction (aborted). + type: gauge + - name: ionos_dbaas_postgres_cpu_rate5m + description: The average CPU utilization over the past 5 minutes. + type: gauge + - name: ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m + description: The rate of disk I/O time, in seconds, over a five-minute period. Provides insight into performance of a disk, as high values may indicate that the disk is being overused or is experiencing performance issues. + type: gauge + - name: ionos_dbaas_postgres_instance_count + description: Desired number of instances. The number of currently ready and running instances may be different. ionos_dbaas_postgres_role provides information about running instances split by role. + type: gauge + - name: ionos_dbaas_postgres_load5 + description: Linux load average for the last 5 minutes. This metric is represented as a number between 0 and 1 (can be greater than 1 on multicore machines), where 0 indicates that the CPU core is idle and 1 indicates that the CPU core is fully utilized. Higher values may indicate that the system is experiencing performance issues or is approaching capacity. + type: gauge + - name: ionos_dbaas_postgres_memory_available_bytes + description: Available memory in bytes. + type: gauge + - name: ionos_dbaas_postgres_memory_total_bytes + description: Total memory of the underlying machine in bytes. Some of it is used for our management and monitoring tools and not available to PostgreSQL. During horizontal scaling you might see different values for each instance. + type: gauge + - name: ionos_dbaas_postgres_role + description: Current role of the instance. Provides whether an instance is currently "master" or "replica". + type: gauge + - name: ionos_dbaas_postgres_storage_available_bytes + description: Free available disk space per instance in bytes. + type: gauge + - name: ionos_dbaas_postgres_storage_total_bytes + description: Total disk space per instance in bytes. During horizontal scaling you might see different values for each instance. + type: gauge + - name: ionos_dbaas_postgres_user_tables_idx_scan + description: Number of index scans per table/schema. + type: gauge \ No newline at end of file diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index 2882c62..b6c3de5 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -46,6 +46,11 @@ spec: resources: {{- toYaml .Values.resources | nindent 12 }} env: + - name: CONFIG_CONTENT + valueFrom: + configMapKeyRef: + name: ionos-exporter-config + key: config.yaml - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: diff --git a/config.yaml b/config.yaml deleted file mode 100644 index 4289afd..0000000 --- a/config.yaml +++ /dev/null @@ -1,47 +0,0 @@ -tenants: - - name: spsh - operations: - - clusters - - databases - - name: dbildungs - operations: - - clusters - - -metrics: - - name: ionos_dbaas_postgres_transactions:rate2m - description: Per-second average rate of SQL transactions (that have been committed), calculated over the last 2 minutes. - type: gauge - - name: ionos_dbaas_postgres_connections_count - description: Number of connections per instance and state. active, disabled, fastpath function call, idle, idle in transaction, idle in transaction (aborted). - type: gauge - - name: ionos_dbaas_postgres_cpu_rate5m - description: The average CPU utilization over the past 5 minutes. - type: gauge - - name: ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m - description: The rate of disk I/O time, in seconds, over a five-minute period. Provides insight into performance of a disk, as high values may indicate that the disk is being overused or is experiencing performance issues. - type: gauge - - name: ionos_dbaas_postgres_instance_count - description: Desired number of instances. The number of currently ready and running instances may be different. ionos_dbaas_postgres_role provides information about running instances split by role. - type: gauge - - name: ionos_dbaas_postgres_load5 - description: Linux load average for the last 5 minutes. This metric is represented as a number between 0 and 1 (can be greater than 1 on multicore machines), where 0 indicates that the CPU core is idle and 1 indicates that the CPU core is fully utilized. Higher values may indicate that the system is experiencing performance issues or is approaching capacity. - type: gauge - - name: ionos_dbaas_postgres_memory_available_bytes - description: Available memory in bytes. - type: gauge - - name: ionos_dbaas_postgres_memory_total_bytes - description: Total memory of the underlying machine in bytes. Some of it is used for our management and monitoring tools and not available to PostgreSQL. During horizontal scaling you might see different values for each instance. - type: gauge - - name: ionos_dbaas_postgres_role - description: Current role of the instance. Provides whether an instance is currently "master" or "replica". - type: gauge - - name: ionos_dbaas_postgres_storage_available_bytes - description: Free available disk space per instance in bytes. - type: gauge - - name: ionos_dbaas_postgres_storage_total_bytes - description: Total disk space per instance in bytes. During horizontal scaling you might see different values for each instance. - type: gauge - - name: ionos_dbaas_postgres_user_tables_idx_scan - description: Number of index scans per table/schema. - type: gauge \ No newline at end of file diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index d6200aa..2d0fd2f 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "log" "net/http" "os" "sync" @@ -80,80 +81,76 @@ func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { // if err != nil { // fmt.Println("Error loading .env file") // } - // cfgENV := psql.NewConfigurationFromEnv() - // apiClient := psql.NewAPIClient(cfgENV) - // // config, err := loadConfig("config.yaml") - // // if err != nil { - // // log.Fatalf("Failed to load config: %v", err) - // // } - - // for { - // var wg sync.WaitGroup - // for _, tenant := range config.Tenants { - // wg.Add(1) - // go func(tenant Tenant) { - // defer wg.Done() - // processCluster(apiClient, m, config.Metrics) - // }(tenant) - // } - // wg.Wait() - // time.Sleep(time.Duration(cycletime) * time.Second) - // } - // } + cfgENV := psql.NewConfigurationFromEnv() + apiClient := psql.NewAPIClient(cfgENV) + config, err := loadConfig("./charts/ionos-exporter/config.yaml") + if err != nil { + log.Fatalf("Failed to load config: %v", err) + } + + for { + processCluster(apiClient, m, config.Metrics) + time.Sleep(time.Duration(cycletime) * time.Second) + } +} + +func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric) { + datacenters, err := fetchClusters(apiClient) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch clusters: %v\n", err) + } + if datacenters == nil || datacenters.Items == nil { + fmt.Fprintf(os.Stderr, "datacenters or datacenters Items are nil\n") + return + } + newIonosPostgresResources := make(map[string]IonosPostgresResources) - // func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric) { - // datacenters, err := fetchClusters(apiClient) - // if err != nil { - // fmt.Fprintf(os.Stderr, "Failed to fetch clusters: %v\n", err) - // } - // newIonosPostgresResources := make(map[string]IonosPostgresResources) - - // for _, clusters := range *datacenters.Items { - // if clusters.Id == nil || clusters.Properties == nil { - // fmt.Fprintf(os.Stderr, "Cluster or Cluster Properties are nil\n") - // continue - // } - // clusterName := clusters.Properties.DisplayName - // if clusterName == nil { - // fmt.Fprintf(os.Stderr, "Cluster name is nil\n") - // continue - // } - // databaseNames, err := fetchDatabases(apiClient, *clusters.Id) - // if err != nil { - // fmt.Fprintf(os.Stderr, "Failed to fetch databases for cluster %s: %v\n", *clusters.Properties.DisplayName, err) - // continue - // } - // databaseOwner, err := fetchOwner(apiClient, *clusters.Id) - // if err != nil { - // fmt.Fprintf(os.Stderr, "Failed to fetch owner for database %s: %v\n", *clusters.Properties.DisplayName, err) - // continue - // } - - // telemetryData := make([]TelemetryMetric, 0) - - // for _, metricConfig := range metrics { - // telemetryResp, err := fetchTelemetryMetrics(os.Getenv("IONOS_TOKEN"), fmt.Sprintf("%s{postgres_cluster=\"%s\"}", metricConfig.Name, *clusters.Id), *clusters.Id) - // if err != nil { - // fmt.Fprintf(os.Stderr, "Failed to fetch telemetry metrics for cluster %s: %v\n", *clusters.Id, err) - // continue - // } - // telemetryData = append(telemetryData, telemetryResp.Data.Result...) - // } - - // // fmt.Printf("Here are the database names %v", databaseNames) - // newIonosPostgresResources[*clusters.Properties.DisplayName] = IonosPostgresResources{ - // ClusterName: *clusters.Properties.DisplayName, - // CPU: *clusters.Properties.Cores, - // RAM: *clusters.Properties.Ram, - // Storage: *clusters.Properties.StorageSize, - // DatabaseNames: databaseNames, - // Owner: databaseOwner, - // Telemetry: telemetryData, - // } - // } - // m.Lock() - // IonosPostgresClusters = newIonosPostgresResources - // m.Unlock() + for _, clusters := range *datacenters.Items { + if clusters.Id == nil || clusters.Properties == nil { + fmt.Fprintf(os.Stderr, "Cluster or Cluster Properties are nil\n") + continue + } + clusterName := clusters.Properties.DisplayName + if clusterName == nil { + fmt.Fprintf(os.Stderr, "Cluster name is nil\n") + continue + } + databaseNames, err := fetchDatabases(apiClient, *clusters.Id) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch databases for cluster %s: %v\n", *clusters.Properties.DisplayName, err) + continue + } + databaseOwner, err := fetchOwner(apiClient, *clusters.Id) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch owner for database %s: %v\n", *clusters.Properties.DisplayName, err) + continue + } + + telemetryData := make([]TelemetryMetric, 0) + + for _, metricConfig := range metrics { + telemetryResp, err := fetchTelemetryMetrics(os.Getenv("IONOS_TOKEN"), fmt.Sprintf("%s{postgres_cluster=\"%s\"}", metricConfig.Name, *clusters.Id), *clusters.Id) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fetch telemetry metrics for cluster %s: %v\n", *clusters.Id, err) + continue + } + telemetryData = append(telemetryData, telemetryResp.Data.Result...) + } + + // fmt.Printf("Here are the database names %v", databaseNames) + newIonosPostgresResources[*clusters.Properties.DisplayName] = IonosPostgresResources{ + ClusterName: *clusters.Properties.DisplayName, + CPU: *clusters.Properties.Cores, + RAM: *clusters.Properties.Ram, + Storage: *clusters.Properties.StorageSize, + DatabaseNames: databaseNames, + Owner: databaseOwner, + Telemetry: telemetryData, + } + } + m.Lock() + IonosPostgresClusters = newIonosPostgresResources + m.Unlock() } diff --git a/internal/s3_scraper.go b/internal/s3_scraper.go index c9e7540..4820b0c 100644 --- a/internal/s3_scraper.go +++ b/internal/s3_scraper.go @@ -234,65 +234,13 @@ func processBucket(client *s3.S3, bucketName string) { //iterate through those objects and check the input of logs //here we are using concurrency for _, object := range objectList.Contents { - - objectKey := *object.Key wg.Add(1) semaphore <- struct{}{} - go func(bucketNme, objectkey string) { - defer func() { - <-semaphore - wg.Done() - }() - downloadInput := &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - } - result, err := client.GetObject(downloadInput) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "AccessDenied" { - log.Printf("Access Denied error for object %s in bucket %s\n", objectKey, bucketName) - return - } - } - log.Println("Error downloading object", err) - return - } - defer result.Body.Close() - logContent, err := io.ReadAll(result.Body) - if err != nil { - log.Println("Problem reading the body", err) - } - //check for matches using regex we are checkign for GET, PUT, POST, HEAD - //and their response/request size - matches := logEntryRegex.FindAllStringSubmatch(string(logContent), -1) - - for _, match := range matches { - metricsMutex.Lock() - - method := match[1] - requestSizeStr := match[3] - responseSizeStr := match[2] - - if requestSizeStr != "-" { - requestSize, err := strconv.ParseInt(requestSizeStr, 10, 64) - if err != nil { - log.Printf("Error parsing size: %v", err) - } - metrics.RequestSizes[method] += requestSize - } - if responseSizeStr != "-" { - responseSize, err := strconv.ParseInt(responseSizeStr, 10, 64) - if err != nil { - log.Printf("Error parsing size: %v", err) - } - metrics.ResponseSizes[method] += responseSize - } - - metrics.Methods[method]++ - metricsMutex.Unlock() - } - }(bucketName, *object.Key) + go func(object *s3.Object) { + defer wg.Done() + defer func() { <-semaphore }() + processObject(client, bucketName, object, logEntryRegex, &metrics) + }(object) } //if there is no more pages break the loop if !aws.BoolValue(objectList.IsTruncated) { @@ -349,3 +297,49 @@ func getBucketTags(client *s3.S3, bucketName string) { metricsMutex.Unlock() } + +func processObject(client *s3.S3, bucketName string, object *s3.Object, logEntryRegex *regexp.Regexp, metrics *Metrics) { + downloadInput := &s3.GetObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(*object.Key), + } + result, err := client.GetObject(downloadInput) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "AccessDenied" { + log.Printf("Access Denied error for object %s in bucket %s\n", *object.Key, bucketName) + return + } + log.Println("Error downloading object", err) + return + } + defer result.Body.Close() + + logContent, err := io.ReadAll(result.Body) + if err != nil { + log.Println("Problem reading the body", err) + return + } + matches := logEntryRegex.FindAllStringSubmatch(string(logContent), -1) + + for _, match := range matches { + metricsMutex.Lock() + method := match[1] + requestSizeStr := match[3] + responseSizeStr := match[2] + + if requestSizeStr != "-" { + requestSize, err := strconv.ParseInt(requestSizeStr, 10, 64) + if err == nil { + metrics.RequestSizes[method] += requestSize + } + } + if responseSizeStr != "-" { + responseSize, err := strconv.ParseInt(responseSizeStr, 10, 64) + if err == nil { + metrics.ResponseSizes[method] += responseSize + } + } + metrics.Methods[method]++ + metricsMutex.Unlock() + } +} diff --git a/main.go b/main.go index e7ee3a4..f29087c 100644 --- a/main.go +++ b/main.go @@ -25,7 +25,7 @@ func main() { } go internal.CollectResources(m, ionos_api_cycle) go internal.S3CollectResources(m, ionos_api_cycle) - // go internal.PostgresCollectResources(m, ionos_api_cycle) + go internal.PostgresCollectResources(m, ionos_api_cycle) // startPrometheus() //internal.PrintDCResources(mutex) From f184409604ac07645b87aee81a9d59622b76d707 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 5 Jul 2024 13:03:28 +0200 Subject: [PATCH 29/55] fixed configmap locatioN --- charts/ionos-exporter/config.yaml | 98 +++++++++---------- .../{ => templates}/ConfigMap.yaml | 0 2 files changed, 45 insertions(+), 53 deletions(-) rename charts/ionos-exporter/{ => templates}/ConfigMap.yaml (100%) diff --git a/charts/ionos-exporter/config.yaml b/charts/ionos-exporter/config.yaml index 620e938..861a089 100644 --- a/charts/ionos-exporter/config.yaml +++ b/charts/ionos-exporter/config.yaml @@ -1,55 +1,47 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: ionos-exporter-config +tenants: +- name: spsh + operations: + - clusters + - databases +- name: dbildungs + operations: + - clusters -data: - config.yaml: | - tenants: - - name: spsh - operations: - - clusters - - databases - - name: dbildungs - operations: - - clusters - - - metrics: - - name: ionos_dbaas_postgres_transactions:rate2m - description: Per-second average rate of SQL transactions (that have been committed), calculated over the last 2 minutes. - type: gauge - - name: ionos_dbaas_postgres_connections_count - description: Number of connections per instance and state. active, disabled, fastpath function call, idle, idle in transaction, idle in transaction (aborted). - type: gauge - - name: ionos_dbaas_postgres_cpu_rate5m - description: The average CPU utilization over the past 5 minutes. - type: gauge - - name: ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m - description: The rate of disk I/O time, in seconds, over a five-minute period. Provides insight into performance of a disk, as high values may indicate that the disk is being overused or is experiencing performance issues. - type: gauge - - name: ionos_dbaas_postgres_instance_count - description: Desired number of instances. The number of currently ready and running instances may be different. ionos_dbaas_postgres_role provides information about running instances split by role. - type: gauge - - name: ionos_dbaas_postgres_load5 - description: Linux load average for the last 5 minutes. This metric is represented as a number between 0 and 1 (can be greater than 1 on multicore machines), where 0 indicates that the CPU core is idle and 1 indicates that the CPU core is fully utilized. Higher values may indicate that the system is experiencing performance issues or is approaching capacity. - type: gauge - - name: ionos_dbaas_postgres_memory_available_bytes - description: Available memory in bytes. - type: gauge - - name: ionos_dbaas_postgres_memory_total_bytes - description: Total memory of the underlying machine in bytes. Some of it is used for our management and monitoring tools and not available to PostgreSQL. During horizontal scaling you might see different values for each instance. - type: gauge - - name: ionos_dbaas_postgres_role - description: Current role of the instance. Provides whether an instance is currently "master" or "replica". - type: gauge - - name: ionos_dbaas_postgres_storage_available_bytes - description: Free available disk space per instance in bytes. - type: gauge - - name: ionos_dbaas_postgres_storage_total_bytes - description: Total disk space per instance in bytes. During horizontal scaling you might see different values for each instance. - type: gauge - - name: ionos_dbaas_postgres_user_tables_idx_scan - description: Number of index scans per table/schema. - type: gauge \ No newline at end of file +metrics: +- name: ionos_dbaas_postgres_transactions:rate2m + description: Per-second average rate of SQL transactions (that have been committed), calculated over the last 2 minutes. + type: gauge +- name: ionos_dbaas_postgres_connections_count + description: Number of connections per instance and state. active, disabled, fastpath function call, idle, idle in transaction, idle in transaction (aborted). + type: gauge +- name: ionos_dbaas_postgres_cpu_rate5m + description: The average CPU utilization over the past 5 minutes. + type: gauge +- name: ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m + description: The rate of disk I/O time, in seconds, over a five-minute period. Provides insight into performance of a disk, as high values may indicate that the disk is being overused or is experiencing performance issues. + type: gauge +- name: ionos_dbaas_postgres_instance_count + description: Desired number of instances. The number of currently ready and running instances may be different. ionos_dbaas_postgres_role provides information about running instances split by role. + type: gauge +- name: ionos_dbaas_postgres_load5 + description: Linux load average for the last 5 minutes. This metric is represented as a number between 0 and 1 (can be greater than 1 on multicore machines), where 0 indicates that the CPU core is idle and 1 indicates that the CPU core is fully utilized. Higher values may indicate that the system is experiencing performance issues or is approaching capacity. + type: gauge +- name: ionos_dbaas_postgres_memory_available_bytes + description: Available memory in bytes. + type: gauge +- name: ionos_dbaas_postgres_memory_total_bytes + description: Total memory of the underlying machine in bytes. Some of it is used for our management and monitoring tools and not available to PostgreSQL. During horizontal scaling you might see different values for each instance. + type: gauge +- name: ionos_dbaas_postgres_role + description: Current role of the instance. Provides whether an instance is currently "master" or "replica". + type: gauge +- name: ionos_dbaas_postgres_storage_available_bytes + description: Free available disk space per instance in bytes. + type: gauge +- name: ionos_dbaas_postgres_storage_total_bytes + description: Total disk space per instance in bytes. During horizontal scaling you might see different values for each instance. + type: gauge +- name: ionos_dbaas_postgres_user_tables_idx_scan + description: Number of index scans per table/schema. + type: gauge \ No newline at end of file diff --git a/charts/ionos-exporter/ConfigMap.yaml b/charts/ionos-exporter/templates/ConfigMap.yaml similarity index 100% rename from charts/ionos-exporter/ConfigMap.yaml rename to charts/ionos-exporter/templates/ConfigMap.yaml From 064855cb8bbe41b01125ae53343f31777b034ddd Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 5 Jul 2024 13:19:35 +0200 Subject: [PATCH 30/55] fixed syntax for helm deployment --- charts/ionos-exporter/templates/deployment.yaml | 9 +++++++++ internal/postgres_scraper.go | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index b6c3de5..0218f17 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -75,6 +75,15 @@ spec: value: {{ .Values.containerPort | quote }} - name: IONOS_EXPORTER_API_CYCLE value: {{ .Values.ionosApiCycle | quote }} + volumeMounts: + - name: config-volume + readOnly: true + mountPath: /etc/ionos-exporter/config.yaml + subPath: config.yaml + volumes: + - name: config-volume + configMap: + name: ionos-exporter-config {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 2d0fd2f..0f2d704 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -83,7 +83,7 @@ func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { // } cfgENV := psql.NewConfigurationFromEnv() apiClient := psql.NewAPIClient(cfgENV) - config, err := loadConfig("./charts/ionos-exporter/config.yaml") + config, err := loadConfig("/etc/ionos-exporter/config.yaml") if err != nil { log.Fatalf("Failed to load config: %v", err) } From 6ef9510ed778ddd7808f87c6b5c1826afa94121d Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 5 Jul 2024 14:03:43 +0200 Subject: [PATCH 31/55] removed print statements used for debugging --- charts/ionos-exporter/templates/ConfigMap.yaml | 2 +- charts/ionos-exporter/templates/deployment.yaml | 10 +++++----- internal/s3_collector.go | 9 ++++----- internal/s3_scraper.go | 2 -- 4 files changed, 10 insertions(+), 13 deletions(-) diff --git a/charts/ionos-exporter/templates/ConfigMap.yaml b/charts/ionos-exporter/templates/ConfigMap.yaml index c10fd17..bab9f0b 100644 --- a/charts/ionos-exporter/templates/ConfigMap.yaml +++ b/charts/ionos-exporter/templates/ConfigMap.yaml @@ -4,4 +4,4 @@ metadata: name: ionos-exporter-config data: config.yaml: |- - {{ .Files.Get "charts/ionos-exporter/config.yaml" | nindent 4 }} \ No newline at end of file + {{ .Files.Get "config.yaml" | nindent 4 }} \ No newline at end of file diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index 0218f17..c513f72 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -75,11 +75,11 @@ spec: value: {{ .Values.containerPort | quote }} - name: IONOS_EXPORTER_API_CYCLE value: {{ .Values.ionosApiCycle | quote }} - volumeMounts: - - name: config-volume - readOnly: true - mountPath: /etc/ionos-exporter/config.yaml - subPath: config.yaml + volumeMounts: + - name: config-volume + readOnly: true + mountPath: /etc/ionos-exporter/config.yaml + subPath: config.yaml volumes: - name: config-volume configMap: diff --git a/internal/s3_collector.go b/internal/s3_collector.go index 6243991..cc0130e 100644 --- a/internal/s3_collector.go +++ b/internal/s3_collector.go @@ -1,7 +1,6 @@ package internal import ( - "fmt" "sync" //"time" @@ -115,10 +114,10 @@ func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { for s3Name, s3Resources := range IonosS3Buckets { region := s3Resources.Regions owner := s3Resources.Owner - tags, ok := TagsForPrometheus[s3Name] - if !ok { - fmt.Printf("No tags found for bucket %s\n", s3Name) - } + tags := TagsForPrometheus[s3Name] + // if !ok { + // // fmt.Printf("No tags found for bucket %s\n", s3Name) + // } //tags of buckets change to tags you have defined on s3 buckets enviroment := tags["Enviroment"] namespace := tags["Namespace"] diff --git a/internal/s3_scraper.go b/internal/s3_scraper.go index 4820b0c..6e25780 100644 --- a/internal/s3_scraper.go +++ b/internal/s3_scraper.go @@ -252,9 +252,7 @@ func processBucket(client *s3.S3, bucketName string) { wg.Wait() //make it thread safe with a mutex metricsMutex.Lock() - fmt.Println("METRICS", IonosS3Buckets) IonosS3Buckets[bucketName] = metrics - fmt.Println("METRICS", IonosS3Buckets) metricsMutex.Unlock() } From dde6c497afe72caa97fa903eb0e2e14ba49b16b2 Mon Sep 17 00:00:00 2001 From: efidoris Date: Tue, 9 Jul 2024 14:44:57 +0200 Subject: [PATCH 32/55] syntax fixes and added comments to the ionos_scrapper functions --- charts/ionos-exporter/config.yaml | 41 ++++++++++++++- internal/ionos_scraper.go | 86 ++++++++++++++++++++++++++++++- internal/postgres_collector.go | 22 ++++---- internal/postgres_scraper.go | 11 ++-- internal/s3_collector.go | 3 ++ 5 files changed, 143 insertions(+), 20 deletions(-) diff --git a/charts/ionos-exporter/config.yaml b/charts/ionos-exporter/config.yaml index 861a089..c5a71d9 100644 --- a/charts/ionos-exporter/config.yaml +++ b/charts/ionos-exporter/config.yaml @@ -44,4 +44,43 @@ metrics: type: gauge - name: ionos_dbaas_postgres_user_tables_idx_scan description: Number of index scans per table/schema. - type: gauge \ No newline at end of file + type: gauge + + +queries: + - metric: "s3_total_get_response_size_in_bytes" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_get_request_size_in_bytes" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_put_request_size_in_bytes" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_put_response_size_in_bytes" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_post_request_size_in_bytes" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_post_response_size_in_bytes" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_head_request_size_in_bytes" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_head_response_size_in_bytes" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_number_of_get_requests" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_number_of_put_requests" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_number_of_post_requests" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" + - metric: "s3_total_number_of_head_requests" + source: "dbpinfra-dev-servicecenter-1" + timeframe: "1d" diff --git a/internal/ionos_scraper.go b/internal/ionos_scraper.go index 33cf101..01004dc 100644 --- a/internal/ionos_scraper.go +++ b/internal/ionos_scraper.go @@ -56,7 +56,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `DataCentersApi.DatacentersGet``: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) - os.Exit(1) + continue } // fmt.Println("DATACENTER", datacenters) newIonosDatacenters := make(map[string]IonosDCResources) @@ -80,7 +80,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `ServersApi.DatacentersServersGet``: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) - continue + os.Exit(1) } albList, err := fetchApplicationLoadbalancers(apiClient, &datacenter) @@ -186,6 +186,17 @@ func PrintDCTotals(m *sync.RWMutex) { log.Printf("Total - Ram: %d GB\n", RamTotal/1024) } +/* +Retrieves a list of NAT Gateways which are associated with specific datanceter using the ionoscloud API Client + +Parameters: +apiClient: An instance of APIClient for making API Requests +datacenter Pointer to an ionoscloud.Datacenter object representing the target datacenter. + +Returns: +- *ionoscloud.NatGateways: A pointer to ionoscloud.NatGateways which has NAT List or an error if it fails +If successful, it returns a pointer to the fetched NATs, otherwise it returns nil and an error message. +*/ func fetchNATGateways(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Datacenter) (*ionoscloud.NatGateways, error) { datacenterId := *datacenter.Id natList, resp, err := apiClient.NATGatewaysApi.DatacentersNatgatewaysGet(context.Background(), datacenterId).Depth(2).Execute() @@ -205,6 +216,17 @@ func fetchNATGateways(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Da return &natList, nil } +/* +Retrieves a list of Network Load Balancers (NLB) which are associated with specific datanceter using the ionoscloud API Client + +Parameters: +apiClient: An instance of APIClient for making API Requests +datacenter Pointer to an ionoscloud.Datacenter object representing the target datacenter. + +Returns: +- *ionoscloud.NetworkLoadBalancers: A pointer to ionoscloud.ApplicationLoadbalancers which has ALB List or an error if it fails +If successful, it returns a pointer to the fetched ALBs, otherwise it returns nil and an error message. +*/ func fetchNetworkLoadBalancers(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Datacenter) (*ionoscloud.NetworkLoadBalancers, error) { datacenterId := *datacenter.Id nlbList, resp, err := apiClient.NetworkLoadBalancersApi.DatacentersNetworkloadbalancersGet(context.Background(), datacenterId).Depth(2).Execute() @@ -225,6 +247,18 @@ func fetchNetworkLoadBalancers(apiClient *ionoscloud.APIClient, datacenter *iono return &nlbList, nil } +/* +retrievers a list of IP Blocks from ionoscloud API + +Parameters: + - apiClient: An instance of ionoscloud.APIClient + +Returns: + +- pointer to ionoscloud.IpBlocks containing the fetched IP blocks, or nil if there are no items +in the resource. +- error: An error if there was an issue making the API call or if no IP blocks were found. +*/ func fetchIPBlocks(apiClient *ionoscloud.APIClient) (*ionoscloud.IpBlocks, error) { ipBlocks, resp, err := apiClient.IPBlocksApi.IpblocksGet(context.Background()).Depth(2).Execute() if err != nil { @@ -244,6 +278,17 @@ func fetchIPBlocks(apiClient *ionoscloud.APIClient) (*ionoscloud.IpBlocks, error return &ipBlocks, nil } +/* +Retrieves a list of Application Load Balancers (ALB) which are associated with specific datanceter using the ionoscloud API Client + +Parameters: +apiClient: An instance of APIClient for making API Requests +datacenter Pointer to an ionoscloud.Datacenter object representing the target datacenter. + +Returns: +- *ionoscloud.ApplicationLoadBalancers: A pointer to ionoscloud.ApplicationLoadbalancers which has ALB List or an error if it fails +If successful, it returns a pointer to the fetched ALBs, otherwise it returns nil and an error message. +*/ func fetchApplicationLoadbalancers(apiClient *ionoscloud.APIClient, datacenter *ionoscloud.Datacenter) (*ionoscloud.ApplicationLoadBalancers, error) { datacenterId := *datacenter.Id albList, resp, err := apiClient.ApplicationLoadBalancersApi.DatacentersApplicationloadbalancersGet(context.Background(), datacenterId).Depth(2).Execute() @@ -264,16 +309,42 @@ func fetchApplicationLoadbalancers(apiClient *ionoscloud.APIClient, datacenter * return &albList, nil } +/* +Calculates total number of IP addresses from a list of IP Blocks + +Parameters: +- ipBlocks: A pointer to ionoscloud.IpBlocks containing a list of IP blocks to process. + +Returns: +- The total number of Ip addresses summed from all IP Blocks +*/ func processIPBlocks(ipBlocks *ionoscloud.IpBlocks) int32 { + var totalIPs int32 + for _, ips := range *ipBlocks.Items { if ips.Properties != nil && ips.Properties.Size != nil { totalIPs += *ips.Properties.Size + } else { + fmt.Println("Ip Properties or Ip Properties Size is nil") } } return totalIPs } +/* +process a list of Network Load Balancers to extract information about NLB names +and total forwarding rules across all NLBs. + +Parameter: + - a pointer to the NetworkLoadbalaners containig a list of NLBs to process + +Returns: + - string: names of loadbalancers + - int32: total number of forwarding rules + +If any NLB or its associated forwarding rules are nil, they are skipped during processing. +*/ func processNetworkLoadBalancers(nlbList *ionoscloud.NetworkLoadBalancers) (string, int32) { var ( nlbNames string @@ -297,6 +368,17 @@ func processNetworkLoadBalancers(nlbList *ionoscloud.NetworkLoadBalancers) (stri return nlbNames, nlbTotalRulesDC } +/* +process a list of Application Load Balancers ALBs to extract information about ALB names and +total forwarding rules across al ALBs + +Parameters: + - a pointer to ApplicationLoadBalancers containing a list of ALBs to process + +Returns: + - string: names of application loadbalancers + - int32: total number of forwarding rules +*/ func processApplicationLoadBalancers(albList *ionoscloud.ApplicationLoadBalancers) (string, int32) { var ( albNames string diff --git a/internal/postgres_collector.go b/internal/postgres_collector.go index 26c2165..f9715a1 100644 --- a/internal/postgres_collector.go +++ b/internal/postgres_collector.go @@ -8,8 +8,6 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -//"time" - type postgresCollector struct { mutex *sync.RWMutex postgresTotalRamMetric *prometheus.GaugeVec @@ -30,43 +28,43 @@ func NewPostgresCollector(m *sync.RWMutex) *postgresCollector { postgresTotalRamMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgres_total_ram_in_cluster", Help: "Gives the total ammount of allocated RAM in cluster", - }, []string{"clusterName", "owner", "db"}), + }, []string{"cluster", "owner", "db"}), postgresTotalCPUMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgres_total_cpu_in_cluster", Help: "Gives a total amount of CPU Cores in Cluster", - }, []string{"clusterName", "owner", "db"}), + }, []string{"cluster", "owner", "db"}), postgresTotalStorageMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgres_total_storage_in_cluster", Help: "Gives a total amount of Storage in Cluster", - }, []string{"clusterName", "owner", "db"}), + }, []string{"cluster", "owner", "db"}), postgresTransactionRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgres_transactions:rate2m", Help: "Gives a Transaction Rate in postgres cluster in 2m", - }, []string{"clusterName"}), + }, []string{"cluster"}), postgresTotalStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgres_total_storage_metric", Help: "Gives a Total Storage Metric in Bytes", - }, []string{"clusterName"}), + }, []string{"cluster"}), postgresAvailableStorageBytesMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgres_available_storage_metric", Help: "Gives a Available Storage Metric in Bytes", - }, []string{"clusterName"}), + }, []string{"cluster"}), postgresCpuRateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgress_cpu_rate5m", Help: "Gives a CPU Rate (Average Utilization) over the past 5 Minutes", - }, []string{"clusterName"}), + }, []string{"cluster"}), postgresDiskIOMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgres_disk_io_time_weighted_seconds_rate5m", Help: "The rate of disk I/O time, in seconds, over a five-minute period.", - }, []string{"clusterName"}), + }, []string{"cluster"}), postgresLoadMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgres_load5", Help: "Linux load average for the last 5 minutes.", - }, []string{"clusterName"}), + }, []string{"cluster"}), postgresTotalMemoryAvailableBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "ionos_dbaas_postgres_memory_available_bytes", Help: "Available memory in bytes", - }, []string{"clusterName"}), + }, []string{"cluster"}), } } diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 0f2d704..5cd322e 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -12,6 +12,7 @@ import ( "time" psql "github.com/ionos-cloud/sdk-go-dbaas-postgres" + "github.com/joho/godotenv" "gopkg.in/yaml.v2" ) @@ -77,13 +78,13 @@ func loadConfig(filename string) (*Config, error) { } func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { - // err := godotenv.Load(".env") - // if err != nil { - // fmt.Println("Error loading .env file") - // } + err := godotenv.Load(".env") + if err != nil { + fmt.Println("Error loading .env file") + } cfgENV := psql.NewConfigurationFromEnv() apiClient := psql.NewAPIClient(cfgENV) - config, err := loadConfig("/etc/ionos-exporter/config.yaml") + config, err := loadConfig("./charts/ionos-exporter/config.yaml") if err != nil { log.Fatalf("Failed to load config: %v", err) } diff --git a/internal/s3_collector.go b/internal/s3_collector.go index cc0130e..18e31d1 100644 --- a/internal/s3_collector.go +++ b/internal/s3_collector.go @@ -96,6 +96,9 @@ func (collector *s3Collector) Describe(ch chan<- *prometheus.Desc) { func (collector *s3Collector) Collect(ch chan<- prometheus.Metric) { + collector.mutex.RLock() + defer collector.mutex.RUnlock() + metricsMutex.Lock() collector.s3TotalGetRequestSizeMetric.Reset() collector.s3TotalGetResponseSizeMetric.Reset() From 38039d1152af66d0ba2d1697fc57665087b77f97 Mon Sep 17 00:00:00 2001 From: efidoris Date: Mon, 15 Jul 2024 08:58:56 +0200 Subject: [PATCH 33/55] fixed config path loader --- internal/postgres_scraper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 5cd322e..7f5a862 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -84,7 +84,7 @@ func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { } cfgENV := psql.NewConfigurationFromEnv() apiClient := psql.NewAPIClient(cfgENV) - config, err := loadConfig("./charts/ionos-exporter/config.yaml") + config, err := loadConfig("/etc/ionos-exporter/config.yaml") if err != nil { log.Fatalf("Failed to load config: %v", err) } From ede28cdc08c495e40abc015df638c1e90771139c Mon Sep 17 00:00:00 2001 From: efidoris Date: Thu, 1 Aug 2024 14:56:18 +0200 Subject: [PATCH 34/55] added documentation and removed redundant code --- Documentation/arch_diagramm_io_exp.xml | 184 ++++++++ Documentation/cost_cal_SQD.drawio | 237 ++++++++++ ...os_exporter_sequenzdiagram_postgres.drawio | 380 ++++++++++++++++ .../ionos_exporter_sequenzdiagram_s3.drawio | 423 ++++++++++++++++++ charts/ionos-exporter/config.yaml | 55 +-- internal/helper.go | 33 ++ internal/ionos_collector.go | 42 +- internal/ionos_scraper.go | 94 ++-- internal/postgres_scraper.go | 48 +- internal/s3_scraper.go | 59 +-- 10 files changed, 1367 insertions(+), 188 deletions(-) create mode 100644 Documentation/arch_diagramm_io_exp.xml create mode 100644 Documentation/cost_cal_SQD.drawio create mode 100644 Documentation/ionos_exporter_sequenzdiagram_postgres.drawio create mode 100644 Documentation/ionos_exporter_sequenzdiagram_s3.drawio diff --git a/Documentation/arch_diagramm_io_exp.xml b/Documentation/arch_diagramm_io_exp.xml new file mode 100644 index 0000000..87bfaff --- /dev/null +++ b/Documentation/arch_diagramm_io_exp.xml @@ -0,0 +1,184 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Documentation/cost_cal_SQD.drawio b/Documentation/cost_cal_SQD.drawio new file mode 100644 index 0000000..0db3412 --- /dev/null +++ b/Documentation/cost_cal_SQD.drawio @@ -0,0 +1,237 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Documentation/ionos_exporter_sequenzdiagram_postgres.drawio b/Documentation/ionos_exporter_sequenzdiagram_postgres.drawio new file mode 100644 index 0000000..745870c --- /dev/null +++ b/Documentation/ionos_exporter_sequenzdiagram_postgres.drawio @@ -0,0 +1,380 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Documentation/ionos_exporter_sequenzdiagram_s3.drawio b/Documentation/ionos_exporter_sequenzdiagram_s3.drawio new file mode 100644 index 0000000..39a28ae --- /dev/null +++ b/Documentation/ionos_exporter_sequenzdiagram_s3.drawio @@ -0,0 +1,423 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/charts/ionos-exporter/config.yaml b/charts/ionos-exporter/config.yaml index c5a71d9..4d58982 100644 --- a/charts/ionos-exporter/config.yaml +++ b/charts/ionos-exporter/config.yaml @@ -1,13 +1,3 @@ -tenants: -- name: spsh - operations: - - clusters - - databases -- name: dbildungs - operations: - - clusters - - metrics: - name: ionos_dbaas_postgres_transactions:rate2m description: Per-second average rate of SQL transactions (that have been committed), calculated over the last 2 minutes. @@ -46,41 +36,10 @@ metrics: description: Number of index scans per table/schema. type: gauge - -queries: - - metric: "s3_total_get_response_size_in_bytes" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_get_request_size_in_bytes" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_put_request_size_in_bytes" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_put_response_size_in_bytes" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_post_request_size_in_bytes" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_post_response_size_in_bytes" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_head_request_size_in_bytes" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_head_response_size_in_bytes" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_number_of_get_requests" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_number_of_put_requests" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_number_of_post_requests" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" - - metric: "s3_total_number_of_head_requests" - source: "dbpinfra-dev-servicecenter-1" - timeframe: "1d" +endpoints: + - name: eu-central-2 + region: eu-central-2 + endpoint: https://s3-eu-central-2.ionoscloud.com + - name: de + region: de + endpoint: https://s3-eu-central-1.ionoscloud.com \ No newline at end of file diff --git a/internal/helper.go b/internal/helper.go index 73ff613..fed051b 100644 --- a/internal/helper.go +++ b/internal/helper.go @@ -2,14 +2,32 @@ package internal import ( "fmt" + "io/ioutil" "log" "os" aws "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" + "gopkg.in/yaml.v2" ) +type Config struct { + Tenants []TenantConfig `yaml:"tenants"` + Metrics []MetricConfig `yaml:"metrics"` + Endpoints []EndpointConfig `yaml:"endpoints"` +} + +type TenantConfig struct { + Name string `yaml:"name"` +} + +type MetricConfig struct { + Name string `yaml:"name"` + Description string `yaml:"description"` + Type string `yaml:"type"` +} + func GetEnv(key string, fallback string) string { value, ok := os.LookupEnv(key) if !ok { @@ -56,3 +74,18 @@ func GetHeadBucket(client *s3.S3, bucketName string) error { log.Printf("Bucket %s exists and is accessible\n", bucketName) return nil } + +func LoadConfig(filename string) (*Config, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + var config Config + err = yaml.Unmarshal(data, &config) + if err != nil { + return nil, err + } + + return &config, nil +} diff --git a/internal/ionos_collector.go b/internal/ionos_collector.go index 4407fcf..cd9069f 100644 --- a/internal/ionos_collector.go +++ b/internal/ionos_collector.go @@ -14,23 +14,24 @@ import ( // Note you can also include fields of other types if they provide utility // but we just won't be exposing them as metrics. type ionosCollector struct { - mutex *sync.RWMutex - coresMetric *prometheus.GaugeVec - ramMetric *prometheus.GaugeVec - serverMetric *prometheus.GaugeVec - dcCoresMetric *prometheus.GaugeVec - dcRamMetric *prometheus.GaugeVec - dcServerMetric *prometheus.GaugeVec - dcDCMetric *prometheus.GaugeVec - nlbsMetric *prometheus.GaugeVec - albsMetric *prometheus.GaugeVec - natsMetric *prometheus.GaugeVec - dcDCNLBMetric *prometheus.GaugeVec - dcDCALBMetric *prometheus.GaugeVec - dcDCNATMetric *prometheus.GaugeVec - dcNLBRulesMetric *prometheus.GaugeVec - dcALBRulesMetric *prometheus.GaugeVec - dcTotalIpsMetric prometheus.Gauge + mutex *sync.RWMutex + coresMetric *prometheus.GaugeVec + ramMetric *prometheus.GaugeVec + serverMetric *prometheus.GaugeVec + dcCoresMetric *prometheus.GaugeVec + dcRamMetric *prometheus.GaugeVec + dcServerMetric *prometheus.GaugeVec + dcDCMetric *prometheus.GaugeVec + nlbsMetric *prometheus.GaugeVec + albsMetric *prometheus.GaugeVec + natsMetric *prometheus.GaugeVec + dcDCNLBMetric *prometheus.GaugeVec + dcDCALBMetric *prometheus.GaugeVec + dcDCNATMetric *prometheus.GaugeVec + dcNLBRulesMetric *prometheus.GaugeVec + dcALBRulesMetric *prometheus.GaugeVec + dcTotalIpsMetric prometheus.Gauge + apiFailuresMetric prometheus.Counter } // You must create a constructor for you collector that @@ -102,6 +103,10 @@ func NewIonosCollector(m *sync.RWMutex) *ionosCollector { Name: "ionos_total_number_of_ips", Help: "Shows the number of Ips in a IONOS", }), + apiFailuresMetric: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ionos_api_failures_total", + Help: "Total number of failed API calls", + }), } } @@ -127,6 +132,7 @@ func (collector *ionosCollector) Describe(ch chan<- *prometheus.Desc) { collector.dcALBRulesMetric.Describe(ch) collector.dcNLBRulesMetric.Describe(ch) collector.dcTotalIpsMetric.Describe(ch) + collector.apiFailuresMetric.Describe(ch) } // Collect implements required collect function for all promehteus collectors @@ -155,6 +161,7 @@ func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { collector.albsMetric.WithLabelValues(dcName, dcResources.ALBName, dcResources.ALBRuleName).Set(float64(dcResources.ALBs)) collector.natsMetric.WithLabelValues(dcName).Set(float64(dcResources.NATs)) collector.dcTotalIpsMetric.Set(float64(dcResources.TotalIPs)) + collector.apiFailuresMetric.Add(float64(dcResources.TotalAPICallFailures)) } @@ -179,4 +186,5 @@ func (collector *ionosCollector) Collect(ch chan<- prometheus.Metric) { collector.dcNLBRulesMetric.Collect(ch) collector.dcALBRulesMetric.Collect(ch) collector.dcTotalIpsMetric.Collect(ch) + collector.apiFailuresMetric.Collect(ch) } diff --git a/internal/ionos_scraper.go b/internal/ionos_scraper.go index 01004dc..db3f0ed 100644 --- a/internal/ionos_scraper.go +++ b/internal/ionos_scraper.go @@ -22,21 +22,22 @@ var ( ) type IonosDCResources struct { - Cores int32 // Amount of CPU cores in the whole DC, regardless whether it is a VM or Kubernetscluster - Ram int32 // Amount of RAM in the whole DC, regardless whether it is a VM or Kubernetscluster - Servers int32 // Amount of servers in the whole DC - DCId string // UUID od the datacenter - NLBs int32 - ALBs int32 - NATs int32 - NLBRules int32 - ALBRules int32 - ALBName string - NLBName string - NLBRuleName string - ALBRuleName string - IPName string - TotalIPs int32 + Cores int32 // Amount of CPU cores in the whole DC, regardless whether it is a VM or Kubernetscluster + Ram int32 // Amount of RAM in the whole DC, regardless whether it is a VM or Kubernetscluster + Servers int32 // Amount of servers in the whole DC + DCId string // UUID od the datacenter + NLBs int32 //Number of Networkloadbalancers + ALBs int32 //Number of Applicationloadbalanceers + NATs int32 //Number of NAT Gateways + NLBRules int32 //Number of NLB Rules + ALBRules int32 //Number of ALB Rueles + ALBName string //ALB Name + NLBName string //NLB Name + NLBRuleName string //Rule name of NLB + ALBRuleName string //Rule name of ALB + IPName string //IP Name + TotalIPs int32 //Number of total IP-s + TotalAPICallFailures int32 } func CollectResources(m *sync.RWMutex, cycletime int32) { @@ -51,36 +52,40 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { cfgENV.Debug = false apiClient := ionoscloud.NewAPIClient(cfgENV) + totalAPICallFailures := 0 for { datacenters, resp, err := apiClient.DataCentersApi.DatacentersGet(context.Background()).Depth(depth).Execute() if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `DataCentersApi.DatacentersGet``: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) + totalAPICallFailures++ continue } // fmt.Println("DATACENTER", datacenters) newIonosDatacenters := make(map[string]IonosDCResources) for _, datacenter := range *datacenters.Items { var ( - coresTotalDC int32 = 0 - ramTotalDC int32 = 0 - serverTotalDC int32 = 0 - nlbTotalDC int32 = 0 - nlbTotalRulesDC int32 = 0 - albTotalRulesDC int32 = 0 - albTotalDC int32 = 0 - natTotalDC int32 = 0 - albNames string - nlbNames string - albRuleNames string - nlbRuleNames string - totalIPs int32 = 0 + coresTotalDC int32 = 0 + ramTotalDC int32 = 0 + serverTotalDC int32 = 0 + nlbTotalDC int32 = 0 + nlbTotalRulesDC int32 = 0 + albTotalRulesDC int32 = 0 + albTotalDC int32 = 0 + natTotalDC int32 = 0 + albNames string + nlbNames string + albRuleNames string + nlbRuleNames string + totalIPs int32 = 0 + totalAPICallFailures int32 = 0 ) servers, resp, err := apiClient.ServersApi.DatacentersServersGet(context.Background(), *datacenter.Id).Depth(depth).Execute() if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `ServersApi.DatacentersServersGet``: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", resp) - os.Exit(1) + totalAPICallFailures++ + continue } albList, err := fetchApplicationLoadbalancers(apiClient, &datacenter) @@ -119,20 +124,21 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { } newIonosDatacenters[*datacenter.Properties.Name] = IonosDCResources{ - DCId: *datacenter.Id, - Cores: coresTotalDC, - Ram: ramTotalDC, - Servers: serverTotalDC, - NLBs: nlbTotalDC, - ALBs: albTotalDC, - NATs: natTotalDC, - NLBRules: nlbTotalRulesDC, - ALBRules: albTotalRulesDC, - ALBName: albNames, - NLBName: nlbNames, - ALBRuleName: albRuleNames, - NLBRuleName: nlbRuleNames, - TotalIPs: totalIPs, + DCId: *datacenter.Id, + Cores: coresTotalDC, + Ram: ramTotalDC, + Servers: serverTotalDC, + NLBs: nlbTotalDC, + ALBs: albTotalDC, + NATs: natTotalDC, + NLBRules: nlbTotalRulesDC, + ALBRules: albTotalRulesDC, + ALBName: albNames, + NLBName: nlbNames, + ALBRuleName: albRuleNames, + NLBRuleName: nlbRuleNames, + TotalIPs: totalIPs, + TotalAPICallFailures: totalAPICallFailures, } } @@ -140,7 +146,7 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { m.Lock() IonosDatacenters = newIonosDatacenters m.Unlock() - // CalculateDCTotals(m) + CalculateDCTotals(m) time.Sleep(time.Duration(cycletime) * time.Second) } } diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 7f5a862..ef0f51e 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "log" "net/http" "os" @@ -13,14 +12,8 @@ import ( psql "github.com/ionos-cloud/sdk-go-dbaas-postgres" "github.com/joho/godotenv" - "gopkg.in/yaml.v2" ) -type Tenant struct { - Name string `yaml:"name"` - Operations []string `yaml:"operations"` -} - type IonosPostgresResources struct { ClusterName string CPU int32 @@ -37,9 +30,9 @@ type TelemetryMetric struct { } type TelemetryResponse struct { - Status string `json:status` + Status string `json:"status"` Data struct { - ResultType string `json:"resultType` + ResultType string `json:"resultType"` Result []TelemetryMetric `json:"result"` } `json:"data"` } @@ -51,32 +44,6 @@ var ( IonosPostgresClusters = make(map[string]IonosPostgresResources) ) -type Config struct { - Tenants []Tenant `yaml:"tenants"` - Metrics []Metric `yaml:"metrics"` -} - -type Metric struct { - Name string `yaml:"name"` - Description string `yaml:"description"` - Type string `yaml:"type"` -} - -func loadConfig(filename string) (*Config, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - - var config Config - err = yaml.Unmarshal(data, &config) - if err != nil { - return nil, err - } - - return &config, nil -} - func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { err := godotenv.Load(".env") if err != nil { @@ -84,18 +51,19 @@ func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { } cfgENV := psql.NewConfigurationFromEnv() apiClient := psql.NewAPIClient(cfgENV) - config, err := loadConfig("/etc/ionos-exporter/config.yaml") + config, err := LoadConfig("/etc/ionos-exporter/config.yaml") + // config, err := LoadConfig("./charts/ionos-exporter/config.yaml") if err != nil { log.Fatalf("Failed to load config: %v", err) } - + //tired to speed up the processing, ionos restricted number of requests for { processCluster(apiClient, m, config.Metrics) time.Sleep(time.Duration(cycletime) * time.Second) } } -func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric) { +func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []MetricConfig) { datacenters, err := fetchClusters(apiClient) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fetch clusters: %v\n", err) @@ -130,7 +98,7 @@ func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric telemetryData := make([]TelemetryMetric, 0) for _, metricConfig := range metrics { - telemetryResp, err := fetchTelemetryMetrics(os.Getenv("IONOS_TOKEN"), fmt.Sprintf("%s{postgres_cluster=\"%s\"}", metricConfig.Name, *clusters.Id), *clusters.Id) + telemetryResp, err := fetchTelemetryMetrics(os.Getenv("IONOS_TOKEN"), fmt.Sprintf("%s{postgres_cluster=\"%s\"}", metricConfig.Name, *clusters.Id)) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fetch telemetry metrics for cluster %s: %v\n", *clusters.Id, err) continue @@ -224,7 +192,7 @@ func fetchOwner(apiClient *psql.APIClient, clusterID string) (string, error) { return owner, nil } -func fetchTelemetryMetrics(apiToken, query, clusterID string) (*TelemetryResponse, error) { +func fetchTelemetryMetrics(apiToken, query string) (*TelemetryResponse, error) { req, err := http.NewRequest("GET", "https://dcd.ionos.com/telemetry/api/v1/query_range", nil) if err != nil { return nil, err diff --git a/internal/s3_scraper.go b/internal/s3_scraper.go index 6e25780..35677da 100644 --- a/internal/s3_scraper.go +++ b/internal/s3_scraper.go @@ -18,10 +18,9 @@ import ( ) type EndpointConfig struct { - Region string - AccessKey string - SecretKey string - Endpoint string + Name string `yaml:"name"` + Region string `yaml:"region"` + Endpoint string `yaml:"endpoint"` } var ( @@ -29,6 +28,7 @@ var ( //map of maps for bucket tags stores tags for every bucket //one bucket can have more tags. TagsForPrometheus = make(map[string]map[string]string) + metricsMutex sync.Mutex ) // object for Metrics @@ -41,20 +41,14 @@ type Metrics struct { } const ( - MethodGET = "GET" - MethodPUT = "PUT" - MethodPOST = "POST" - MethodHEAD = "HEAD" -) - -// how many objects to scan per page -const ( + MethodGET = "GET" + MethodPUT = "PUT" + MethodPOST = "POST" + MethodHEAD = "HEAD" objectPerPage = 1000 maxConcurrent = 10 ) -var metricsMutex sync.Mutex - func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S3, error) { sess, err := session.NewSession(&aws.Config{ Region: aws.String(region), @@ -69,42 +63,29 @@ func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S } func S3CollectResources(m *sync.RWMutex, cycletime int32) { - accessKey := os.Getenv("AWS_ACCESS_KEY_ID") + // config, err := LoadConfig("./charts/ionos-exporter/config.yaml") + config, err := LoadConfig("/etc/ionos-exporter/config.yaml") + if err != nil { + fmt.Println("Problem with loading the configuration yaml file", err) + } + // accessKey := config.Endpoints secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") - // file, _ := os.Create("S3ioutput.txt") - // defer file.Close() + accessKey := os.Getenv("AWS_ACCESS_KEY_ID") - // oldStdout := os.Stdout - // defer func() { os.Stdout = oldStdout }() - // os.Stdout = file if accessKey == "" || secretKey == "" { log.Println("AWS credentials are nto set in the enviroment variables.") return } - endpoints := map[string]EndpointConfig{ - "eu-central-2": { - Region: "eu-central-2", - AccessKey: accessKey, - SecretKey: secretKey, - Endpoint: "https://s3-eu-central-2.ionoscloud.com", - }, - "de": { - Region: "de", - AccessKey: accessKey, - SecretKey: secretKey, - Endpoint: "https://s3-eu-central-1.ionoscloud.com", - }, - } - //buffered channel that is like a semaphore + //buffered channel that is a semaphore semaphore := make(chan struct{}, maxConcurrent) for { var wg sync.WaitGroup - for endpoint, config := range endpoints { + for _, endpoint := range config.Endpoints { - if _, exists := IonosS3Buckets[endpoint]; exists { + if _, exists := IonosS3Buckets[endpoint.Endpoint]; exists { continue } - client, err := createS3ServiceClient(config.Region, config.AccessKey, config.SecretKey, config.Endpoint) + client, err := createS3ServiceClient(endpoint.Region, accessKey, secretKey, endpoint.Endpoint) if err != nil { fmt.Printf("Error creating service client for endpoint %s: %v\n", endpoint, err) @@ -127,7 +108,7 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { Methods: make(map[string]int32), RequestSizes: make(map[string]int64), ResponseSizes: make(map[string]int64), - Regions: config.Region, + Regions: "", } IonosS3Buckets[bucketName] = metrics } From 9a7928923badd8fe4a2841a7783c6556d3f27f28 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 2 Aug 2024 09:19:30 +0200 Subject: [PATCH 35/55] removed local loading of .env --- internal/ionos_scraper.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/internal/ionos_scraper.go b/internal/ionos_scraper.go index db3f0ed..272ff4d 100644 --- a/internal/ionos_scraper.go +++ b/internal/ionos_scraper.go @@ -9,7 +9,6 @@ import ( "time" ionoscloud "github.com/ionos-cloud/sdk-go/v6" - "github.com/joho/godotenv" ) var ( @@ -42,10 +41,10 @@ type IonosDCResources struct { func CollectResources(m *sync.RWMutex, cycletime int32) { - err := godotenv.Load(".env") - if err != nil { - fmt.Println("Error loading .env file") - } + // err := godotenv.Load(".env") + // if err != nil { + // fmt.Println("Error loading .env file") + // } cfgENV := ionoscloud.NewConfigurationFromEnv() // cfg.Debug = true From 289725d97c75653c7d02056bddfc863e47963830 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 2 Aug 2024 09:22:49 +0200 Subject: [PATCH 36/55] removed .env in postgres_scraper --- internal/postgres_scraper.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index ef0f51e..191569f 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -11,7 +11,6 @@ import ( "time" psql "github.com/ionos-cloud/sdk-go-dbaas-postgres" - "github.com/joho/godotenv" ) type IonosPostgresResources struct { @@ -45,10 +44,10 @@ var ( ) func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { - err := godotenv.Load(".env") - if err != nil { - fmt.Println("Error loading .env file") - } + // err := godotenv.Load(".env") + // if err != nil { + // fmt.Println("Error loading .env file") + // } cfgENV := psql.NewConfigurationFromEnv() apiClient := psql.NewAPIClient(cfgENV) config, err := LoadConfig("/etc/ionos-exporter/config.yaml") From defffdaa64a630fe685470691a287ecb2e30be49 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 2 Aug 2024 09:46:54 +0200 Subject: [PATCH 37/55] debugging s3 function not loading in pods --- charts/ionos-exporter/config.yaml | 1 + internal/s3_scraper.go | 1 + 2 files changed, 2 insertions(+) diff --git a/charts/ionos-exporter/config.yaml b/charts/ionos-exporter/config.yaml index 4d58982..6941cc4 100644 --- a/charts/ionos-exporter/config.yaml +++ b/charts/ionos-exporter/config.yaml @@ -36,6 +36,7 @@ metrics: description: Number of index scans per table/schema. type: gauge + endpoints: - name: eu-central-2 region: eu-central-2 diff --git a/internal/s3_scraper.go b/internal/s3_scraper.go index 35677da..11fadd1 100644 --- a/internal/s3_scraper.go +++ b/internal/s3_scraper.go @@ -63,6 +63,7 @@ func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S } func S3CollectResources(m *sync.RWMutex, cycletime int32) { + fmt.Println("in S3CollectResources") // config, err := LoadConfig("./charts/ionos-exporter/config.yaml") config, err := LoadConfig("/etc/ionos-exporter/config.yaml") if err != nil { From 41e00880ffad9ba609195dabb3f3bd256ed417db Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 2 Aug 2024 09:53:52 +0200 Subject: [PATCH 38/55] ommited usage of config.yaml for s3 scraping --- internal/s3_scraper.go | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/internal/s3_scraper.go b/internal/s3_scraper.go index 11fadd1..98974b1 100644 --- a/internal/s3_scraper.go +++ b/internal/s3_scraper.go @@ -17,10 +17,17 @@ import ( "github.com/aws/aws-sdk-go/service/s3" ) +// type EndpointConfig struct { +// Name string `yaml:"name"` +// Region string `yaml:"region"` +// Endpoint string `yaml:"endpoint"` +// } + type EndpointConfig struct { - Name string `yaml:"name"` - Region string `yaml:"region"` - Endpoint string `yaml:"endpoint"` + Region string + AccessKey string + SecretKey string + Endpoint string } var ( @@ -65,10 +72,10 @@ func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S func S3CollectResources(m *sync.RWMutex, cycletime int32) { fmt.Println("in S3CollectResources") // config, err := LoadConfig("./charts/ionos-exporter/config.yaml") - config, err := LoadConfig("/etc/ionos-exporter/config.yaml") - if err != nil { - fmt.Println("Problem with loading the configuration yaml file", err) - } + // config, err := LoadConfig("/etc/ionos-exporter/config.yaml") + // if err != nil { + // fmt.Println("Problem with loading the configuration yaml file", err) + // } // accessKey := config.Endpoints secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") accessKey := os.Getenv("AWS_ACCESS_KEY_ID") @@ -77,11 +84,25 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { log.Println("AWS credentials are nto set in the enviroment variables.") return } + endpoints := map[string]EndpointConfig{ + "eu-central-2": { + Region: "eu-central-2", + AccessKey: accessKey, + SecretKey: secretKey, + Endpoint: "https://s3-eu-central-2.ionoscloud.com", + }, + "de": { + Region: "de", + AccessKey: accessKey, + SecretKey: secretKey, + Endpoint: "https://s3-eu-central-1.ionoscloud.com", + }, + } //buffered channel that is a semaphore semaphore := make(chan struct{}, maxConcurrent) for { var wg sync.WaitGroup - for _, endpoint := range config.Endpoints { + for _, endpoint := range endpoints { if _, exists := IonosS3Buckets[endpoint.Endpoint]; exists { continue From 078fa1f313a9866a3581b9a3811bcbffbca90dc3 Mon Sep 17 00:00:00 2001 From: efidoris Date: Mon, 5 Aug 2024 11:13:33 +0200 Subject: [PATCH 39/55] syntax fixes, some commets removed --- charts/ionos-exporter/config.yaml | 11 +---------- internal/ionos_scraper.go | 2 +- internal/postgres_scraper.go | 8 +++++++- internal/s3_scraper.go | 13 ------------- main.go | 3 +-- 5 files changed, 10 insertions(+), 27 deletions(-) diff --git a/charts/ionos-exporter/config.yaml b/charts/ionos-exporter/config.yaml index 6941cc4..8c3947b 100644 --- a/charts/ionos-exporter/config.yaml +++ b/charts/ionos-exporter/config.yaml @@ -34,13 +34,4 @@ metrics: type: gauge - name: ionos_dbaas_postgres_user_tables_idx_scan description: Number of index scans per table/schema. - type: gauge - - -endpoints: - - name: eu-central-2 - region: eu-central-2 - endpoint: https://s3-eu-central-2.ionoscloud.com - - name: de - region: de - endpoint: https://s3-eu-central-1.ionoscloud.com \ No newline at end of file + type: gauge \ No newline at end of file diff --git a/internal/ionos_scraper.go b/internal/ionos_scraper.go index 272ff4d..50d2230 100644 --- a/internal/ionos_scraper.go +++ b/internal/ionos_scraper.go @@ -41,6 +41,7 @@ type IonosDCResources struct { func CollectResources(m *sync.RWMutex, cycletime int32) { + //for local testing // err := godotenv.Load(".env") // if err != nil { // fmt.Println("Error loading .env file") @@ -60,7 +61,6 @@ func CollectResources(m *sync.RWMutex, cycletime int32) { totalAPICallFailures++ continue } - // fmt.Println("DATACENTER", datacenters) newIonosDatacenters := make(map[string]IonosDCResources) for _, datacenter := range *datacenters.Items { var ( diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 191569f..04a953b 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -44,18 +44,24 @@ var ( ) func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { + //for local testing // err := godotenv.Load(".env") // if err != nil { // fmt.Println("Error loading .env file") // } cfgENV := psql.NewConfigurationFromEnv() apiClient := psql.NewAPIClient(cfgENV) + + //config has all metrics for postgres config, err := LoadConfig("/etc/ionos-exporter/config.yaml") + + //for local testing // config, err := LoadConfig("./charts/ionos-exporter/config.yaml") if err != nil { log.Fatalf("Failed to load config: %v", err) } - //tired to speed up the processing, ionos restricted number of requests + //tried to speed up the processing with concurrency, ionos restricted number of requests + //so many of them would not go through for { processCluster(apiClient, m, config.Metrics) time.Sleep(time.Duration(cycletime) * time.Second) diff --git a/internal/s3_scraper.go b/internal/s3_scraper.go index 98974b1..9b798c1 100644 --- a/internal/s3_scraper.go +++ b/internal/s3_scraper.go @@ -17,12 +17,6 @@ import ( "github.com/aws/aws-sdk-go/service/s3" ) -// type EndpointConfig struct { -// Name string `yaml:"name"` -// Region string `yaml:"region"` -// Endpoint string `yaml:"endpoint"` -// } - type EndpointConfig struct { Region string AccessKey string @@ -70,13 +64,6 @@ func createS3ServiceClient(region, accessKey, secretKey, endpoint string) (*s3.S } func S3CollectResources(m *sync.RWMutex, cycletime int32) { - fmt.Println("in S3CollectResources") - // config, err := LoadConfig("./charts/ionos-exporter/config.yaml") - // config, err := LoadConfig("/etc/ionos-exporter/config.yaml") - // if err != nil { - // fmt.Println("Problem with loading the configuration yaml file", err) - // } - // accessKey := config.Endpoints secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") accessKey := os.Getenv("AWS_ACCESS_KEY_ID") diff --git a/main.go b/main.go index f29087c..822d56c 100644 --- a/main.go +++ b/main.go @@ -27,8 +27,7 @@ func main() { go internal.S3CollectResources(m, ionos_api_cycle) go internal.PostgresCollectResources(m, ionos_api_cycle) - // startPrometheus() - //internal.PrintDCResources(mutex) + internal.PrintDCResources(m) internal.StartPrometheus(m) http.Handle("/metrics", promhttp.Handler()) http.Handle("/healthcheck", http.HandlerFunc(internal.HealthCheck)) From 4e075c4e520b206f2367bb2a68ad2ea0aebe4b5a Mon Sep 17 00:00:00 2001 From: efidoris Date: Tue, 6 Aug 2024 11:18:44 +0200 Subject: [PATCH 40/55] added buffer to reading lines --- internal/s3_scraper.go | 51 ++++++++++++++---------------------------- 1 file changed, 17 insertions(+), 34 deletions(-) diff --git a/internal/s3_scraper.go b/internal/s3_scraper.go index 9b798c1..e4b90a9 100644 --- a/internal/s3_scraper.go +++ b/internal/s3_scraper.go @@ -1,6 +1,7 @@ package internal import ( + "bufio" "fmt" "io" "log" @@ -25,14 +26,11 @@ type EndpointConfig struct { } var ( - IonosS3Buckets = make(map[string]Metrics) - //map of maps for bucket tags stores tags for every bucket - //one bucket can have more tags. + IonosS3Buckets = make(map[string]Metrics) TagsForPrometheus = make(map[string]map[string]string) metricsMutex sync.Mutex ) -// object for Metrics type Metrics struct { Methods map[string]int32 RequestSizes map[string]int64 @@ -68,7 +66,7 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { accessKey := os.Getenv("AWS_ACCESS_KEY_ID") if accessKey == "" || secretKey == "" { - log.Println("AWS credentials are nto set in the enviroment variables.") + log.Println("AWS credentials are not set in the environment variables.") return } endpoints := map[string]EndpointConfig{ @@ -85,7 +83,6 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { Endpoint: "https://s3-eu-central-1.ionoscloud.com", }, } - //buffered channel that is a semaphore semaphore := make(chan struct{}, maxConcurrent) for { var wg sync.WaitGroup @@ -112,7 +109,6 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { for _, bucket := range result.Buckets { bucketName := *bucket.Name if _, exists := IonosS3Buckets[bucketName]; !exists { - //check if exists if not initialise metrics := Metrics{ Methods: make(map[string]int32), RequestSizes: make(map[string]int64), @@ -137,9 +133,7 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { log.Println("Error checking the bucket head:", err) return } - //acquiring slot in semaphore blocking if the buffer is full semaphore <- struct{}{} - //release the semaphore when the goroutine completes defer func() { <-semaphore }() @@ -154,14 +148,10 @@ func S3CollectResources(m *sync.RWMutex, cycletime int32) { } -/* -function for processing buckets getting the Traffic of all the operations -and their sizes. -*/ func processBucket(client *s3.S3, bucketName string) { var wg sync.WaitGroup - var logEntryRegex = regexp.MustCompile(`(GET|PUT|HEAD|POST) \/[^"]*" \d+ \S+ (\d+|-) (\d+|-) \d+ (\d+|-)`) + logEntryRegex := regexp.MustCompile(`(GET|PUT|HEAD|POST) \/[^"]*" \d+ \S+ (\d+|-) (\d+|-) \d+ (\d+|-)`) semaphore := make(chan struct{}, maxConcurrent) getBucketTags(client, bucketName) @@ -176,7 +166,6 @@ func processBucket(client *s3.S3, bucketName string) { continuationToken := "" - //getting owner getAclInput := &s3.GetBucketAclInput{ Bucket: aws.String(bucketName), } @@ -191,7 +180,6 @@ func processBucket(client *s3.S3, bucketName string) { metrics.Owner = "Unknown" } - //main loop for { objectList, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ @@ -200,7 +188,6 @@ func processBucket(client *s3.S3, bucketName string) { ContinuationToken: aws.String(continuationToken), MaxKeys: aws.Int64(objectPerPage), }) - //error handling if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { @@ -221,8 +208,6 @@ func processBucket(client *s3.S3, bucketName string) { log.Printf("bucket %s does not contain any objects with the 'logs/' prefix\n", bucketName) return } - //iterate through those objects and check the input of logs - //here we are using concurrency for _, object := range objectList.Contents { wg.Add(1) semaphore <- struct{}{} @@ -232,26 +217,17 @@ func processBucket(client *s3.S3, bucketName string) { processObject(client, bucketName, object, logEntryRegex, &metrics) }(object) } - //if there is no more pages break the loop if !aws.BoolValue(objectList.IsTruncated) { break } - //go to next page continuationToken = *objectList.NextContinuationToken } wg.Wait() - //make it thread safe with a mutex metricsMutex.Lock() IonosS3Buckets[bucketName] = metrics metricsMutex.Unlock() } -/* -function for getting bucket Tags, takes two parameters, the service client -and the bucket name, then it checks for tags using the aws sdk GetBucketTagging -no return value it saves everything to map of maps for Tags which is sent -to prometheus -*/ func getBucketTags(client *s3.S3, bucketName string) { tagsOutput, err := client.GetBucketTagging(&s3.GetBucketTaggingInput{ Bucket: aws.String(bucketName), @@ -283,7 +259,6 @@ func getBucketTags(client *s3.S3, bucketName string) { metricsMutex.Lock() TagsForPrometheus[bucketName] = tags metricsMutex.Unlock() - } func processObject(client *s3.S3, bucketName string, object *s3.Object, logEntryRegex *regexp.Regexp, metrics *Metrics) { @@ -302,13 +277,21 @@ func processObject(client *s3.S3, bucketName string, object *s3.Object, logEntry } defer result.Body.Close() - logContent, err := io.ReadAll(result.Body) - if err != nil { - log.Println("Problem reading the body", err) - return + reader := bufio.NewReader(result.Body) + for { + line, err := reader.ReadBytes('\n') + if err != nil { + if err != io.EOF { + log.Println("Problem reading the body", err) + } + break + } + processLine(line, logEntryRegex, metrics) } - matches := logEntryRegex.FindAllStringSubmatch(string(logContent), -1) +} +func processLine(line []byte, logEntryRegex *regexp.Regexp, metrics *Metrics) { + matches := logEntryRegex.FindAllStringSubmatch(string(line), -1) for _, match := range matches { metricsMutex.Lock() method := match[1] From 3bdbeadbd73dba95c4169cefc961f3f47477e94a Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 9 Aug 2024 11:47:47 +0200 Subject: [PATCH 41/55] added config changes and added env template file for running localy --- env | 1 + go.mod | 12 +++++------- go.sum | 21 ++++++++++----------- internal/helper.go | 22 +--------------------- internal/ionos_scraper.go | 14 +++++++------- internal/postgres_scraper.go | 22 ++++++++-------------- main.go | 16 ++++++++++++++-- 7 files changed, 46 insertions(+), 62 deletions(-) create mode 100644 env diff --git a/env b/env new file mode 100644 index 0000000..b936298 --- /dev/null +++ b/env @@ -0,0 +1 @@ +IONOS_TOKEN="" \ No newline at end of file diff --git a/go.mod b/go.mod index 9e25c9f..624f546 100644 --- a/go.mod +++ b/go.mod @@ -4,29 +4,27 @@ go 1.20 require ( github.com/aws/aws-sdk-go v1.52.0 + github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2 github.com/ionos-cloud/sdk-go/v6 v6.1.9 github.com/joho/godotenv v1.5.1 github.com/prometheus/client_golang v1.16.0 + gopkg.in/yaml.v2 v2.4.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/ionos-cloud/sdk-go-bundle/products/dbaas/psql v0.1.0 // indirect - github.com/ionos-cloud/sdk-go-bundle/shared v0.1.0 // indirect - github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/kr/text v0.2.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect - golang.org/x/net v0.17.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.6.0 // indirect golang.org/x/sys v0.21.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect - gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect - gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index f4a3b3b..4749301 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -14,10 +15,6 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/ionos-cloud/sdk-go-bundle/products/dbaas/psql v0.1.0 h1:s/FZdHaews1vIRJYbJUvaO+Y9yYIiJ1z+s1HLMcCii0= -github.com/ionos-cloud/sdk-go-bundle/products/dbaas/psql v0.1.0/go.mod h1:8qebNZf4GeZm64BDUqeunDDw/jIQqhry6RyIhEqIHJE= -github.com/ionos-cloud/sdk-go-bundle/shared v0.1.0 h1:1psLbSn+i/wK0Samy8XqLxsqEnhTshz3Py1lbp5F/ao= -github.com/ionos-cloud/sdk-go-bundle/shared v0.1.0/go.mod h1:hDQ5fhujYFaYr1fD8bmmf4rlCKAsqKwczuBS0Z5PNBQ= github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2 h1:AaKbci+kVS6/k43VwJwmXxCJ7pzj9jwuOPqO8Wd5560= github.com/ionos-cloud/sdk-go-dbaas-postgres v1.1.2/go.mod h1:nmJEwuRX65A5/PxwvdFW0XrV+N6WFYnMV1TiIafAwz4= github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= @@ -28,6 +25,9 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -40,11 +40,13 @@ github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -53,7 +55,7 @@ golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= @@ -62,11 +64,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= -gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE= -gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/internal/helper.go b/internal/helper.go index fed051b..05318d0 100644 --- a/internal/helper.go +++ b/internal/helper.go @@ -13,13 +13,7 @@ import ( ) type Config struct { - Tenants []TenantConfig `yaml:"tenants"` - Metrics []MetricConfig `yaml:"metrics"` - Endpoints []EndpointConfig `yaml:"endpoints"` -} - -type TenantConfig struct { - Name string `yaml:"name"` + Metrics []MetricConfig `yaml:"metrics"` } type MetricConfig struct { @@ -44,20 +38,6 @@ func GetEnv(key string, fallback string) string { } } -func HasLogsFolder(client *s3.S3, bucketName string) bool { - result, err := client.ListObjectsV2(&s3.ListObjectsV2Input{ - Bucket: aws.String(bucketName), - Prefix: aws.String("logs/"), - }) - - if err != nil { - fmt.Println("Error listing objects in bucket: ", err) - return false - } - - return len(result.Contents) > 0 -} - func GetHeadBucket(client *s3.S3, bucketName string) error { input := &s3.HeadBucketInput{ Bucket: aws.String(bucketName), diff --git a/internal/ionos_scraper.go b/internal/ionos_scraper.go index 50d2230..41f1e44 100644 --- a/internal/ionos_scraper.go +++ b/internal/ionos_scraper.go @@ -9,6 +9,7 @@ import ( "time" ionoscloud "github.com/ionos-cloud/sdk-go/v6" + "github.com/joho/godotenv" ) var ( @@ -39,16 +40,15 @@ type IonosDCResources struct { TotalAPICallFailures int32 } -func CollectResources(m *sync.RWMutex, cycletime int32) { +func CollectResources(m *sync.RWMutex, envFile string, cycletime int32) { + + err := godotenv.Load(envFile) + if err != nil { + fmt.Println("Error loading .env file (optional)") + } - //for local testing - // err := godotenv.Load(".env") - // if err != nil { - // fmt.Println("Error loading .env file") - // } cfgENV := ionoscloud.NewConfigurationFromEnv() - // cfg.Debug = true cfgENV.Debug = false apiClient := ionoscloud.NewAPIClient(cfgENV) diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 04a953b..899917c 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -11,6 +11,7 @@ import ( "time" psql "github.com/ionos-cloud/sdk-go-dbaas-postgres" + "github.com/joho/godotenv" ) type IonosPostgresResources struct { @@ -43,25 +44,19 @@ var ( IonosPostgresClusters = make(map[string]IonosPostgresResources) ) -func PostgresCollectResources(m *sync.RWMutex, cycletime int32) { - //for local testing - // err := godotenv.Load(".env") - // if err != nil { - // fmt.Println("Error loading .env file") - // } +func PostgresCollectResources(m *sync.RWMutex, configPath, envFile string, cycletime int32) { + err := godotenv.Load(envFile) + if err != nil { + fmt.Println("Error loading .env file (optional)") + } + cfgENV := psql.NewConfigurationFromEnv() apiClient := psql.NewAPIClient(cfgENV) - //config has all metrics for postgres - config, err := LoadConfig("/etc/ionos-exporter/config.yaml") - - //for local testing - // config, err := LoadConfig("./charts/ionos-exporter/config.yaml") + config, err := LoadConfig(configPath) if err != nil { log.Fatalf("Failed to load config: %v", err) } - //tried to speed up the processing with concurrency, ionos restricted number of requests - //so many of them would not go through for { processCluster(apiClient, m, config.Metrics) time.Sleep(time.Duration(cycletime) * time.Second) @@ -111,7 +106,6 @@ func processCluster(apiClient *psql.APIClient, m *sync.RWMutex, metrics []Metric telemetryData = append(telemetryData, telemetryResp.Data.Result...) } - // fmt.Printf("Here are the database names %v", databaseNames) newIonosPostgresResources[*clusters.Properties.DisplayName] = IonosPostgresResources{ ClusterName: *clusters.Properties.DisplayName, CPU: *clusters.Properties.Cores, diff --git a/main.go b/main.go index 822d56c..1a3bade 100644 --- a/main.go +++ b/main.go @@ -1,9 +1,12 @@ package main import ( + "flag" + "fmt" "ionos-exporter/internal" "log" "net/http" + "os" "strconv" "sync" @@ -17,15 +20,24 @@ var ( ) func main() { + configPath := flag.String("config", "", "Path to configuration file") + envFile := flag.String("env", "", "Path to env file (optional)") + flag.Parse() + if *configPath == "" { + fmt.Println("Error: Config path is required") + flag.Usage() + os.Exit(1) + } + exporterPort = internal.GetEnv("IONOS_EXPORTER_APPLICATION_CONTAINER_PORT", "9100") if cycletime, err := strconv.ParseInt(internal.GetEnv("IONOS_EXPORTER_API_CYCLE", "200"), 10, 32); err != nil { log.Fatal("Cannot convert IONOS_API_CYCLE to int") } else { ionos_api_cycle = int32(cycletime) } - go internal.CollectResources(m, ionos_api_cycle) + go internal.CollectResources(m, *envFile, ionos_api_cycle) go internal.S3CollectResources(m, ionos_api_cycle) - go internal.PostgresCollectResources(m, ionos_api_cycle) + go internal.PostgresCollectResources(m, *configPath, *envFile, ionos_api_cycle) internal.PrintDCResources(m) internal.StartPrometheus(m) From fbd32527890577bb97cffd258c537dc7239be7c6 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 9 Aug 2024 12:02:46 +0200 Subject: [PATCH 42/55] added path to config in deploymenet yaml --- charts/ionos-exporter/templates/deployment.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index c513f72..f569715 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -35,6 +35,8 @@ spec: - name: metrics containerPort: {{ .Values.containerPort }} protocol: TCP + args: + - "-config=/etc/ionos-exporter/config.yaml" livenessProbe: httpGet: path: /metrics From f52ed8072adceb9051cc15ec156d93672dbcadb6 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 9 Aug 2024 12:08:57 +0200 Subject: [PATCH 43/55] added default path to config.yaml --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index 1a3bade..0300657 100644 --- a/main.go +++ b/main.go @@ -20,7 +20,7 @@ var ( ) func main() { - configPath := flag.String("config", "", "Path to configuration file") + configPath := flag.String("config", "/etc/ionos-exporter/config.yaml", "Path to configuration file") envFile := flag.String("env", "", "Path to env file (optional)") flag.Parse() if *configPath == "" { From 9bf933026ea8a8f87d14419fc52c7a7c9117a16b Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 9 Aug 2024 12:17:23 +0200 Subject: [PATCH 44/55] removed default path to config and removed os.Exit when n oconfig set --- main.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/main.go b/main.go index 0300657..bd4eb90 100644 --- a/main.go +++ b/main.go @@ -2,11 +2,9 @@ package main import ( "flag" - "fmt" "ionos-exporter/internal" "log" "net/http" - "os" "strconv" "sync" @@ -20,13 +18,11 @@ var ( ) func main() { - configPath := flag.String("config", "/etc/ionos-exporter/config.yaml", "Path to configuration file") + configPath := flag.String("config", "", "Path to configuration file") envFile := flag.String("env", "", "Path to env file (optional)") flag.Parse() if *configPath == "" { - fmt.Println("Error: Config path is required") flag.Usage() - os.Exit(1) } exporterPort = internal.GetEnv("IONOS_EXPORTER_APPLICATION_CONTAINER_PORT", "9100") From 5ec471d63fde9331ea7d8794d42c5239dbf99f1a Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 9 Aug 2024 12:24:31 +0200 Subject: [PATCH 45/55] trying gracefully skipping local setting of config yaml --- main.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/main.go b/main.go index bd4eb90..c02fba5 100644 --- a/main.go +++ b/main.go @@ -5,6 +5,7 @@ import ( "ionos-exporter/internal" "log" "net/http" + "os" "strconv" "sync" @@ -21,8 +22,10 @@ func main() { configPath := flag.String("config", "", "Path to configuration file") envFile := flag.String("env", "", "Path to env file (optional)") flag.Parse() - if *configPath == "" { - flag.Usage() + if *envFile != "" { + if _, err := os.Stat(*configPath); os.IsNotExist(err) { + log.Printf("Warning: config file not found at %s, continuing without it", *configPath) + } } exporterPort = internal.GetEnv("IONOS_EXPORTER_APPLICATION_CONTAINER_PORT", "9100") From ab7cdda452c03551a9e8e0853a983050a4f7b94f Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 9 Aug 2024 12:28:17 +0200 Subject: [PATCH 46/55] syntax fix --- internal/postgres_scraper.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 899917c..2d83d08 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "log" "net/http" "os" "sync" @@ -55,7 +54,7 @@ func PostgresCollectResources(m *sync.RWMutex, configPath, envFile string, cycle config, err := LoadConfig(configPath) if err != nil { - log.Fatalf("Failed to load config: %v", err) + fmt.Println("Failed to load config: %v", err) } for { processCluster(apiClient, m, config.Metrics) From 303846b5dc64ae6f4e7578465a6aa7c705f93d12 Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 9 Aug 2024 12:31:31 +0200 Subject: [PATCH 47/55] fixed nil dereference problem --- internal/postgres_scraper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index 2d83d08..dc9ab5d 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -54,7 +54,7 @@ func PostgresCollectResources(m *sync.RWMutex, configPath, envFile string, cycle config, err := LoadConfig(configPath) if err != nil { - fmt.Println("Failed to load config: %v", err) + fmt.Println("Failed to load config:", err) } for { processCluster(apiClient, m, config.Metrics) From cdef725bbf291bb24be49fd545988f31a85d85ea Mon Sep 17 00:00:00 2001 From: efidoris Date: Fri, 9 Aug 2024 12:32:26 +0200 Subject: [PATCH 48/55] added default path again --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index c02fba5..85b1b80 100644 --- a/main.go +++ b/main.go @@ -19,7 +19,7 @@ var ( ) func main() { - configPath := flag.String("config", "", "Path to configuration file") + configPath := flag.String("config", "/etc/ionos-exporter/config.yaml", "Path to configuration file") envFile := flag.String("env", "", "Path to env file (optional)") flag.Parse() if *envFile != "" { From f39fa28052ce9a18cd042948e533be601b497e80 Mon Sep 17 00:00:00 2001 From: efidoris Date: Wed, 14 Aug 2024 13:19:47 +0200 Subject: [PATCH 49/55] removed line configfile as enviroment variable only as parameter is now used --- charts/ionos-exporter/templates/deployment.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index f569715..71c556f 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -48,11 +48,6 @@ spec: resources: {{- toYaml .Values.resources | nindent 12 }} env: - - name: CONFIG_CONTENT - valueFrom: - configMapKeyRef: - name: ionos-exporter-config - key: config.yaml - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: From f256591e0f5e9262887a6494e325cacf32df4bc9 Mon Sep 17 00:00:00 2001 From: efidoris Date: Wed, 14 Aug 2024 13:25:25 +0200 Subject: [PATCH 50/55] Removed cost-calc sequenqce diagramm from Documentation --- Documentation/cost_cal_SQD.drawio | 237 ------------------------------ 1 file changed, 237 deletions(-) delete mode 100644 Documentation/cost_cal_SQD.drawio diff --git a/Documentation/cost_cal_SQD.drawio b/Documentation/cost_cal_SQD.drawio deleted file mode 100644 index 0db3412..0000000 --- a/Documentation/cost_cal_SQD.drawio +++ /dev/null @@ -1,237 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - From f9e9fabd5c4caf6c27bf49d301abc5a319a4e4f3 Mon Sep 17 00:00:00 2001 From: Jannik Badenhop Date: Thu, 10 Oct 2024 12:31:33 +0000 Subject: [PATCH 51/55] make s3 part optional --- .../ionos-exporter/templates/deployment.yaml | 20 +++++++++++-------- charts/ionos-exporter/values.yaml | 15 +++++++++----- main.go | 7 ++++++- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index 71c556f..4f84abb 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -48,26 +48,30 @@ spec: resources: {{- toYaml .Values.resources | nindent 12 }} env: + {{ if .Values.ionos.s3.enabled -}} - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: - name: {{ .Values.ionos_credentials_secret_name }} - key: {{ .Values.ionos_s3_credentials_access_key }} + name: {{ .Values.ionos.credentials.secret_name }} + key: {{ .Values.ionos.s3.credentials.access_key }} - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: - name: {{ .Values.ionos_credentials_secret_name }} - key: {{ .Values.ionos_s3_credentials_secret_key }} + name: {{ .Values.ionos.credentials.secret_name }} + key: {{ .Values.ionos.s3.credentials.secret_key }} + {{ end -}} - name: IONOS_USERNAME valueFrom: secretKeyRef: - name: {{ .Values.ionos_credentials_secret_name }} - key: {{ .Values.ionos_credentials_username_key }} + name: {{ .Values.ionos.credentials.secret_name }} + key: {{ .Values.ionos.credentials.username_key }} - name: IONOS_PASSWORD valueFrom: secretKeyRef: - name: {{ .Values.ionos_credentials_secret_name }} - key: {{ .Values.ionos_credentials_password_key }} + name: {{ .Values.ionos.credentials.secret_name }} + key: {{ .Values.ionos.credentials.password_key }} + - name: IONOS_EXPORTER_S3_ENABLED + value: {{ .Values.ionos.s3.enabled }} - name: IONOS_EXPORTER_APPLICATION_CONTAINER_PORT value: {{ .Values.containerPort | quote }} - name: IONOS_EXPORTER_API_CYCLE diff --git a/charts/ionos-exporter/values.yaml b/charts/ionos-exporter/values.yaml index b55b3bc..6e33fe3 100644 --- a/charts/ionos-exporter/values.yaml +++ b/charts/ionos-exporter/values.yaml @@ -12,11 +12,16 @@ imagePullSecrets: [] # credentials # ionos_credentials_secret_token: "ionos-exporter-token" # ionos_credentials_token_key: "token" -ionos_credentials_secret_name: "ionos-exporter-credentials" -ionos_credentials_username_key: "username" -ionos_credentials_password_key: "password" -ionos_s3_credentials_secret_key: "secretKey" -ionos_s3_credentials_access_key: "accessKey" +ionos: + credentials: + secret_name: "ionos-exporter-credentials" + username_key: "username" + password_key: "password" + s3: + enabled: false + credentials: + secret_key: "secretKey" + access_key: "accessKey" service: type: ClusterIP diff --git a/main.go b/main.go index 85b1b80..4bd1b49 100644 --- a/main.go +++ b/main.go @@ -35,7 +35,12 @@ func main() { ionos_api_cycle = int32(cycletime) } go internal.CollectResources(m, *envFile, ionos_api_cycle) - go internal.S3CollectResources(m, ionos_api_cycle) + if s3_enabled, err := strconv.ParseBool(internal.GetEnv("IONOS_EXPORTER_S3_ENABLED", "false")); s3_enabled == true { + if err != nil { + log.Fatal("Cannot convert IONOS_EXPORTER_S3_ENABLED value to bool") + } + go internal.S3CollectResources(m, ionos_api_cycle) + } go internal.PostgresCollectResources(m, *configPath, *envFile, ionos_api_cycle) internal.PrintDCResources(m) From 7f03d99962f230421131b24595bf7fc9f8d16d8c Mon Sep 17 00:00:00 2001 From: Jannik Badenhop Date: Thu, 10 Oct 2024 12:31:50 +0000 Subject: [PATCH 52/55] Fix if, Name -> Owner --- internal/postgres_scraper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/postgres_scraper.go b/internal/postgres_scraper.go index dc9ab5d..b2991e3 100644 --- a/internal/postgres_scraper.go +++ b/internal/postgres_scraper.go @@ -183,7 +183,7 @@ func fetchOwner(apiClient *psql.APIClient, clusterID string) (string, error) { } var owner = "" for _, db := range *databases.Items { - if db.Properties != nil && db.Properties.Name != nil { + if db.Properties != nil && db.Properties.Owner != nil { owner = *db.Properties.Owner } } From d9986d3bbde100ed0051172b540d1b69af981733 Mon Sep 17 00:00:00 2001 From: Jannik Badenhop Date: Thu, 10 Oct 2024 12:52:13 +0000 Subject: [PATCH 53/55] increase minor version and reset patch version, since values and defaults change in a potentially breaking way --- charts/ionos-exporter/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/ionos-exporter/Chart.yaml b/charts/ionos-exporter/Chart.yaml index 63a41f4..1325aa7 100644 --- a/charts/ionos-exporter/Chart.yaml +++ b/charts/ionos-exporter/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.12 +version: 0.1.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.0.4" +appVersion: "0.1.0" From b06d005cb425b2614221aa73c9713be84d98dad4 Mon Sep 17 00:00:00 2001 From: Jannik Badenhop Date: Thu, 10 Oct 2024 13:10:14 +0000 Subject: [PATCH 54/55] revert version changes --- charts/ionos-exporter/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/ionos-exporter/Chart.yaml b/charts/ionos-exporter/Chart.yaml index 1325aa7..63a41f4 100644 --- a/charts/ionos-exporter/Chart.yaml +++ b/charts/ionos-exporter/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.0.12 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.1.0" +appVersion: "0.0.4" From 7c6aeba22e8c50ca68aea82eb192db633290bde1 Mon Sep 17 00:00:00 2001 From: Jannik Badenhop Date: Fri, 11 Oct 2024 09:26:12 +0000 Subject: [PATCH 55/55] fix quoting and whitespace issues --- charts/ionos-exporter/templates/deployment.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/ionos-exporter/templates/deployment.yaml b/charts/ionos-exporter/templates/deployment.yaml index 4f84abb..cf04c2b 100644 --- a/charts/ionos-exporter/templates/deployment.yaml +++ b/charts/ionos-exporter/templates/deployment.yaml @@ -48,7 +48,7 @@ spec: resources: {{- toYaml .Values.resources | nindent 12 }} env: - {{ if .Values.ionos.s3.enabled -}} + {{- if .Values.ionos.s3.enabled }} - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: @@ -59,7 +59,7 @@ spec: secretKeyRef: name: {{ .Values.ionos.credentials.secret_name }} key: {{ .Values.ionos.s3.credentials.secret_key }} - {{ end -}} + {{- end }} - name: IONOS_USERNAME valueFrom: secretKeyRef: @@ -71,7 +71,7 @@ spec: name: {{ .Values.ionos.credentials.secret_name }} key: {{ .Values.ionos.credentials.password_key }} - name: IONOS_EXPORTER_S3_ENABLED - value: {{ .Values.ionos.s3.enabled }} + value: {{ .Values.ionos.s3.enabled | quote }} - name: IONOS_EXPORTER_APPLICATION_CONTAINER_PORT value: {{ .Values.containerPort | quote }} - name: IONOS_EXPORTER_API_CYCLE