forked from molu8bits/s3bucket_exporter
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.go
180 lines (148 loc) · 5.65 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
package main
import (
"flag"
"net/http"
"os"
"strconv"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/tropnikovvl/s3bucket_exporter/controllers"
)
var (
up = prometheus.NewDesc("s3_endpoint_up", "Connection to S3 successful", []string{"s3Endpoint", "s3Region"}, nil)
listenPort string
logLevel string
scrapeInterval string
s3Endpoint string
s3BucketNames string
s3AccessKey string
s3SecretKey string
s3Region string
s3ForcePathStyle bool
metricsMutex sync.RWMutex
cachedMetrics controllers.S3Summary
cachedError error
)
func envString(key, def string) string {
if x := os.Getenv(key); x != "" {
return x
}
return def
}
func envBool(key string, def bool) bool {
def2, err := strconv.ParseBool(os.Getenv(key))
if err != nil {
return def
}
return def2
}
func init() {
flag.StringVar(&s3Endpoint, "s3_endpoint", envString("S3_ENDPOINT", ""), "S3_ENDPOINT - eg. myceph.com:7480")
flag.StringVar(&s3AccessKey, "s3_access_key", envString("S3_ACCESS_KEY", ""), "S3_ACCESS_KEY - aws_access_key")
flag.StringVar(&s3SecretKey, "s3_secret_key", envString("S3_SECRET_KEY", ""), "S3_SECRET_KEY - aws_secret_key")
flag.StringVar(&s3BucketNames, "s3_bucket_names", envString("S3_BUCKET_NAMES", ""), "S3_BUCKET_NAMES")
flag.StringVar(&s3Region, "s3_region", envString("S3_REGION", "us-east-1"), "S3_REGION")
flag.StringVar(&listenPort, "listen_port", envString("LISTEN_PORT", ":9655"), "LISTEN_PORT e.g ':9655'")
flag.StringVar(&logLevel, "log_level", envString("LOG_LEVEL", "info"), "LOG_LEVEL")
flag.StringVar(&scrapeInterval, "scrape_interval", envString("SCRAPE_INTERVAL", "5m"), "SCRAPE_INTERVAL - eg. 30s, 5m, 1h")
flag.BoolVar(&s3ForcePathStyle, "s3_force_path_style", envBool("S3_FORCE_PATH_STYLE", false), "S3_FORCE_PATH_STYLE")
flag.Parse()
}
// S3Collector struct
type S3Collector struct{}
// Describe - Implements prometheus.Collector
func (c S3Collector) Describe(ch chan<- *prometheus.Desc) {
ch <- up
}
// Collect - Implements prometheus.Collector.
func (c S3Collector) Collect(ch chan<- prometheus.Metric) {
metricsMutex.RLock()
defer metricsMutex.RUnlock()
s3Status := 0
if cachedMetrics.S3Status {
s3Status = 1
}
if cachedError != nil {
ch <- prometheus.MustNewConstMetric(up, prometheus.GaugeValue, float64(s3Status), s3Endpoint, s3Region)
log.Errorf("Cached error: %v", cachedError)
return
}
ch <- prometheus.MustNewConstMetric(up, prometheus.GaugeValue, float64(s3Status), s3Endpoint, s3Region)
log.Debugf("Cached S3 metrics %s: %+v", s3Endpoint, cachedMetrics)
descS := prometheus.NewDesc("s3_total_size", "S3 Total Bucket Size", []string{"s3Endpoint", "s3Region"}, nil)
descON := prometheus.NewDesc("s3_total_object_number", "S3 Total Object Number", []string{"s3Endpoint", "s3Region"}, nil)
ch <- prometheus.MustNewConstMetric(descS, prometheus.GaugeValue, float64(cachedMetrics.S3Size), s3Endpoint, s3Region)
ch <- prometheus.MustNewConstMetric(descON, prometheus.GaugeValue, float64(cachedMetrics.S3ObjectNumber), s3Endpoint, s3Region)
for _, bucket := range cachedMetrics.S3Buckets {
descBucketS := prometheus.NewDesc("s3_bucket_size", "S3 Bucket Size", []string{"s3Endpoint", "s3Region", "bucketName"}, nil)
descBucketON := prometheus.NewDesc("s3_bucket_object_number", "S3 Bucket Object Number", []string{"s3Endpoint", "s3Region", "bucketName"}, nil)
ch <- prometheus.MustNewConstMetric(descBucketS, prometheus.GaugeValue, float64(bucket.BucketSize), s3Endpoint, s3Region, bucket.BucketName)
ch <- prometheus.MustNewConstMetric(descBucketON, prometheus.GaugeValue, float64(bucket.BucketObjectNumber), s3Endpoint, s3Region, bucket.BucketName)
}
}
func updateMetrics(interval time.Duration) {
for {
s3Conn := controllers.S3Conn{
S3ConnEndpoint: s3Endpoint,
S3ConnAccessKey: s3AccessKey,
S3ConnSecretKey: s3SecretKey,
S3ConnForcePathStyle: s3ForcePathStyle,
S3ConnRegion: s3Region,
}
metrics, err := controllers.S3UsageInfo(s3Conn, s3BucketNames)
metricsMutex.Lock()
cachedMetrics = metrics
cachedError = err
metricsMutex.Unlock()
if err != nil {
log.Errorf("Failed to update S3 metrics: %v", err)
} else {
log.Debugf("Updated S3 metrics: %+v", metrics)
}
log.Debugf("Waiting for %v before updating metrics", interval)
time.Sleep(interval)
}
}
func healthHandler(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
if _, err := w.Write([]byte("OK")); err != nil {
http.Error(w, "Failed to write response", http.StatusInternalServerError)
log.Errorf("Error writing health response: %v", err)
}
}
func main() {
level, err := log.ParseLevel(logLevel)
if err != nil {
log.Fatalf("Invalid log level: %s", logLevel)
}
log.SetLevel(level)
if s3AccessKey == "" || s3SecretKey == "" {
log.Fatal("S3 access key and secret key are required")
}
interval, err := time.ParseDuration(scrapeInterval)
if err != nil {
log.Fatalf("Invalid scrape interval: %s", scrapeInterval)
}
go updateMetrics(interval)
prometheus.MustRegister(S3Collector{})
http.Handle("/metrics", promhttp.Handler())
http.HandleFunc("/health", healthHandler)
srv := &http.Server{
Addr: listenPort,
ReadTimeout: 35 * time.Second,
WriteTimeout: 35 * time.Second,
IdleTimeout: 120 * time.Second,
}
log.Infof("Starting server on %s", listenPort)
if s3BucketNames != "" {
log.Infof("Monitoring buckets: %s in %s region", s3BucketNames, s3Region)
} else {
log.Infof("Monitoring all buckets in %s region", s3Region)
}
if err := srv.ListenAndServe(); err != nil {
log.Fatalf("Server failed to start: %v", err)
}
}