From e84b8f475d3ac2cf80ee4b11f9ff9e977532b57c Mon Sep 17 00:00:00 2001 From: kongfei605 Date: Sat, 21 Sep 2024 17:26:10 +0800 Subject: [PATCH] feat: apache exporter (#1059) * feat: apache exporter * chore: add license and readme --- agent/metrics_agent.go | 1 + conf/input.apache/apache.toml | 9 + inputs/apache/LICENSE | 21 + inputs/apache/README.md | 23 ++ inputs/apache/apache.go | 100 +++++ inputs/apache/exporter/collector.go | 576 ++++++++++++++++++++++++++++ 6 files changed, 730 insertions(+) create mode 100644 conf/input.apache/apache.toml create mode 100644 inputs/apache/LICENSE create mode 100644 inputs/apache/README.md create mode 100644 inputs/apache/apache.go create mode 100644 inputs/apache/exporter/collector.go diff --git a/agent/metrics_agent.go b/agent/metrics_agent.go index 6f6edc01..16f0d69a 100644 --- a/agent/metrics_agent.go +++ b/agent/metrics_agent.go @@ -13,6 +13,7 @@ import ( // auto registry _ "flashcat.cloud/categraf/inputs/aliyun" + _ "flashcat.cloud/categraf/inputs/apache" _ "flashcat.cloud/categraf/inputs/appdynamics" _ "flashcat.cloud/categraf/inputs/arp_packet" _ "flashcat.cloud/categraf/inputs/bind" diff --git a/conf/input.apache/apache.toml b/conf/input.apache/apache.toml new file mode 100644 index 00000000..d552d7a8 --- /dev/null +++ b/conf/input.apache/apache.toml @@ -0,0 +1,9 @@ +[[instances]] + +# https://statuslist.app/apache/apache-status-page-simple-setup-guide/ +# scrape_uri = "http://localhost/server-status/?auto" +# host_override = "" +# insecure = false +# custom_headers = {} +# level: debug,info,warn,error +# log_level = "info" diff --git a/inputs/apache/LICENSE b/inputs/apache/LICENSE new file mode 100644 index 00000000..ccc72afd --- /dev/null +++ b/inputs/apache/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 neezgee + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/inputs/apache/README.md b/inputs/apache/README.md new file mode 100644 index 00000000..f414f6e8 --- /dev/null +++ b/inputs/apache/README.md @@ -0,0 +1,23 @@ +forked from [apache/README.md](https://github.com/Lusitaniae/apache_exporter/tree/master/README.md) + +``` + +[[instances]] +## apache 如何设置server-status页面 https://statuslist.app/apache/apache-status-page-simple-setup-guide/ + +## 这里填写apache server-status页面的地址 +# scrape_uri = "http://localhost/server-status/?auto" + +## 是否覆盖host +# host_override = "" + +## 是否跳过https证书验证 +# insecure = false + +## 自定义请求header +# custom_headers = {} + +## 日志级别 +# level: debug,info,warn,error +# log_level = "info" +``` \ No newline at end of file diff --git a/inputs/apache/apache.go b/inputs/apache/apache.go new file mode 100644 index 00000000..b10462b2 --- /dev/null +++ b/inputs/apache/apache.go @@ -0,0 +1,100 @@ +package apache + +import ( + "fmt" + "log" + + "github.com/prometheus/common/promlog" + + "flashcat.cloud/categraf/config" + "flashcat.cloud/categraf/inputs" + "flashcat.cloud/categraf/inputs/apache/exporter" + "flashcat.cloud/categraf/types" +) + +const inputName = "apache" + +type Apache struct { + config.PluginConfig + Instances []*Instance `toml:"instances"` +} + +type Instance struct { + config.InstanceConfig + LogLevel string `toml:"log_level"` + exporter.Config + + e *exporter.Exporter +} + +var _ inputs.Input = new(Apache) +var _ inputs.SampleGatherer = new(Instance) +var _ inputs.InstancesGetter = new(Apache) + +func init() { + inputs.Add(inputName, func() inputs.Input { + return &Apache{} + }) +} + +func (a *Apache) Clone() inputs.Input { + return &Apache{} +} + +func (a *Apache) Name() string { + return inputName +} + +func (a *Apache) GetInstances() []inputs.Instance { + ret := make([]inputs.Instance, len(a.Instances)) + for i := 0; i < len(a.Instances); i++ { + ret[i] = a.Instances[i] + } + return ret +} + +func (a *Apache) Drop() { + + for _, i := range a.Instances { + if i == nil { + continue + } + + if i.e != nil { + i.e.Close() + } + } +} + +func (ins *Instance) Init() error { + if len(ins.ScrapeURI) == 0 { + return types.ErrInstancesEmpty + } + + if len(ins.LogLevel) == 0 { + ins.LogLevel = "info" + } + promlogConfig := &promlog.Config{ + Level: &promlog.AllowedLevel{}, + } + promlogConfig.Level.Set(ins.LogLevel) + logger := promlog.New(promlogConfig) + e, err := exporter.New(logger, &ins.Config) + + if err != nil { + return fmt.Errorf("could not instantiate mongodb lag exporter: %w", err) + } + + ins.e = e + return nil + +} + +func (ins *Instance) Gather(slist *types.SampleList) { + + // collect + err := inputs.Collect(ins.e, slist) + if err != nil { + log.Println("E! failed to collect metrics:", err) + } +} diff --git a/inputs/apache/exporter/collector.go b/inputs/apache/exporter/collector.go new file mode 100644 index 00000000..964a8290 --- /dev/null +++ b/inputs/apache/exporter/collector.go @@ -0,0 +1,576 @@ +// Copyright (c) 2015 neezgee +// +// Licensed under the MIT license: https://opensource.org/licenses/MIT +// Permission is granted to use, copy, modify, and redistribute the work. +// Full license information available in the project LICENSE file. +// + +package exporter + +import ( + "bufio" + "bytes" + "crypto/tls" + "fmt" + "io" + "net/http" + "regexp" + "strconv" + "strings" + "sync" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/version" +) + +const ( + namespace = "apache" +) + +var ( + // Regular expressions for matching proxy balancer status lines. + reProxyBalName = regexp.MustCompile(`ProxyBalancer\[\d+\]Name`) + reProxyBalWorker = regexp.MustCompile(`ProxyBalancer\[\d+\]Worker\[\d+\](\S+)`) +) + +type Exporter struct { + URI string + hostOverride string + customHeaders map[string]string + mutex sync.Mutex + client *http.Client + userAgent string + + up *prometheus.Desc + scrapeFailures prometheus.Counter + apacheVersion *prometheus.Desc + apacheInfo *prometheus.GaugeVec + generation *prometheus.GaugeVec + load *prometheus.GaugeVec + accessesTotal *prometheus.Desc + kBytesTotal *prometheus.Desc + durationTotal *prometheus.Desc + cpuTotal *prometheus.Desc + cpuload prometheus.Gauge + uptime *prometheus.Desc + workers *prometheus.GaugeVec + processes *prometheus.GaugeVec + connections *prometheus.GaugeVec + scoreboard *prometheus.GaugeVec + proxyBalancerStatus *prometheus.GaugeVec + proxyBalancerElected *prometheus.Desc + proxyBalancerBusy *prometheus.GaugeVec + proxyBalancerReqSize *prometheus.Desc + proxyBalancerRespSize *prometheus.Desc + logger log.Logger +} + +type Config struct { + ScrapeURI string `toml:"scrape_uri"` + HostOverride string `toml:"host_override"` + Insecure bool `toml:"insecure"` + CustomHeaders map[string]string `toml:"custom_headers"` +} + +func New(logger log.Logger, config *Config) (*Exporter, *Exporter) { + return &Exporter{ + URI: config.ScrapeURI, + hostOverride: config.HostOverride, + customHeaders: config.CustomHeaders, + logger: logger, + up: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "up"), + "Could the apache server be reached", + nil, + nil), + scrapeFailures: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "exporter_scrape_failures_total", + Help: "Number of errors while scraping apache.", + }), + apacheVersion: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "version"), + "Apache server version", + nil, + nil), + apacheInfo: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "info", + Help: "Apache version information", + }, + []string{"version", "mpm"}, + ), + generation: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "generation", + Help: "Apache restart generation", + }, + []string{"type"}, + ), + load: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "load", + Help: "Apache server load", + }, + []string{"interval"}, + ), + accessesTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "accesses_total"), + "Current total apache accesses (*)", + nil, + nil), + kBytesTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "sent_kilobytes_total"), + "Current total kbytes sent (*)", + nil, + nil), + durationTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "duration_ms_total"), + "Total duration of all registered requests in ms", + nil, + nil), + cpuTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "cpu_time_ms_total"), + "Apache CPU time", + []string{"type"}, nil, + ), + cpuload: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "cpuload", + Help: "The current percentage CPU used by each worker and in total by all workers combined (*)", + }), + uptime: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "uptime_seconds_total"), + "Current uptime in seconds (*)", + nil, + nil), + workers: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "workers", + Help: "Apache worker statuses", + }, + []string{"state"}, + ), + processes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "processes", + Help: "Apache process count", + }, + []string{"state"}, + ), + connections: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "connections", + Help: "Apache connection statuses", + }, + []string{"state"}, + ), + scoreboard: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "scoreboard", + Help: "Apache scoreboard statuses", + }, + []string{"state"}, + ), + proxyBalancerStatus: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "proxy_balancer_status", + Help: "Apache Proxy Balancer Statuses", + }, + []string{"balancer", "worker", "status"}, + ), + proxyBalancerElected: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "proxy_balancer_accesses_total"), + "Apache Proxy Balancer Request Count", + []string{"balancer", "worker"}, nil, + ), + proxyBalancerBusy: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "proxy_balancer_busy", + Help: "Apache Proxy Balancer Active Requests", + }, + []string{"balancer", "worker"}, + ), + proxyBalancerReqSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "proxy_balancer_request_kbytes_total"), + "Apache Proxy Balancer Request Count", + []string{"balancer", "worker"}, nil, + ), + proxyBalancerRespSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "proxy_balancer_response_kbytes_total"), + "Apache Proxy Balancer Request Count", + []string{"balancer", "worker"}, nil, + ), + client: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: config.Insecure}, + }, + }, + userAgent: fmt.Sprintf("Prometheus-Apache-Exporter/%s", version.Version), + }, nil +} + +// Describe implements Prometheus.Collector. +func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { + ch <- e.up + e.scrapeFailures.Describe(ch) + ch <- e.apacheVersion + e.apacheInfo.Describe(ch) + e.generation.Describe(ch) + e.load.Describe(ch) + ch <- e.accessesTotal + ch <- e.kBytesTotal + ch <- e.durationTotal + ch <- e.cpuTotal + e.cpuload.Describe(ch) + ch <- e.uptime + e.workers.Describe(ch) + e.processes.Describe(ch) + e.connections.Describe(ch) + e.scoreboard.Describe(ch) + e.proxyBalancerStatus.Describe(ch) + ch <- e.proxyBalancerElected + e.proxyBalancerBusy.Describe(ch) + ch <- e.proxyBalancerReqSize + ch <- e.proxyBalancerRespSize +} + +// Split colon separated string into two fields +func splitkv(s string) (string, string) { + if len(s) == 0 { + return s, s + } + + slice := strings.SplitN(s, ":", 2) + + if len(slice) == 1 { + return slice[0], "" + } + + return strings.TrimSpace(slice[0]), strings.TrimSpace(slice[1]) +} + +var scoreboardLabelMap = map[string]string{ + "_": "idle", + "S": "startup", + "R": "read", + "W": "reply", + "K": "keepalive", + "D": "dns", + "C": "closing", + "L": "logging", + "G": "graceful_stop", + "I": "idle_cleanup", + ".": "open_slot", +} + +func (e *Exporter) updateScoreboard(scoreboard string) { + e.scoreboard.Reset() + for _, v := range scoreboardLabelMap { + e.scoreboard.WithLabelValues(v) + } + + for _, worker_status := range scoreboard { + s := string(worker_status) + label, ok := scoreboardLabelMap[s] + if !ok { + label = s + } + e.scoreboard.WithLabelValues(label).Inc() + } +} +func (e *Exporter) Close() { + if e.client != nil { + e.client.CloseIdleConnections() + } +} +func (e *Exporter) collect(ch chan<- prometheus.Metric) error { + req, err := http.NewRequest(http.MethodGet, e.URI, nil) + if err != nil { + return fmt.Errorf("error building scraping request: %w", err) + } + + if e.hostOverride != "" { + req.Host = e.hostOverride + } + + for k, v := range e.customHeaders { + req.Header.Add(k, v) + } + + req.Header.Set("User-Agent", e.userAgent) + resp, err := e.client.Do(req) + if err != nil { + ch <- prometheus.MustNewConstMetric(e.up, prometheus.GaugeValue, 0) + return fmt.Errorf("error scraping Apache: %w", err) + } + ch <- prometheus.MustNewConstMetric(e.up, prometheus.GaugeValue, 1) + + data, err := io.ReadAll(resp.Body) + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + if err != nil { + data = []byte(err.Error()) + } + return fmt.Errorf("status %s (%d): %s", resp.Status, resp.StatusCode, data) + } + + connectionInfo := false + version := "UNKNOWN" + mpm := "UNKNOWN" + balancerName := "UNKNOWN" + workerName := "UNKNOWN" + cpuUser := 0.0 + cpuSystem := 0.0 + cpuFound := false + e.proxyBalancerStatus.Reset() + e.proxyBalancerBusy.Reset() + + scanner := bufio.NewScanner(bytes.NewReader(data)) + + for scanner.Scan() { + key, v := splitkv(scanner.Text()) + if err != nil { + continue + } + + switch { + case key == "ServerVersion": + version = v + tmpstr := strings.Split(v, "/")[1] + tmpstr = strings.Split(tmpstr, " ")[0] + vparts := strings.Split(tmpstr, ".") + tmpstr = fmt.Sprintf("%s.%02s%03s", vparts[0], vparts[1], vparts[2]) + + val, err := strconv.ParseFloat(tmpstr, 64) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric(e.apacheVersion, prometheus.GaugeValue, val) + case key == "ServerMPM": + mpm = v + case key == "ParentServerConfigGeneration": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.generation.WithLabelValues("config").Set(val) + case key == "ParentServerMPMGeneration": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.generation.WithLabelValues("mpm").Set(val) + case key == "Load1": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.load.WithLabelValues("1min").Set(val) + case key == "Load5": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.load.WithLabelValues("5min").Set(val) + case key == "Load15": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.load.WithLabelValues("15min").Set(val) + case key == "Total Accesses": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric(e.accessesTotal, prometheus.CounterValue, val) + case key == "Total kBytes": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric(e.kBytesTotal, prometheus.CounterValue, val) + case key == "Total Duration": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric(e.durationTotal, prometheus.CounterValue, val) + case key == "CPUUser": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + + cpuUser += val + cpuFound = true + case key == "CPUChildrenUser": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + + cpuUser += val + cpuFound = true + case key == "CPUSystem": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + + cpuSystem += val + cpuFound = true + case key == "CPUChildrenSystem": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + + cpuSystem += val + cpuFound = true + case key == "CPULoad": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.cpuload.Set(val) + case key == "Uptime": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric(e.uptime, prometheus.CounterValue, val) + case key == "BusyWorkers": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.workers.WithLabelValues("busy").Set(val) + case key == "IdleWorkers": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.workers.WithLabelValues("idle").Set(val) + case key == "Processes": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.processes.WithLabelValues("all").Set(val) + case key == "Stopping": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.processes.WithLabelValues("stopping").Set(val) + case key == "ConnsTotal": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.connections.WithLabelValues("total").Set(val) + connectionInfo = true + case key == "ConnsAsyncWriting": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.connections.WithLabelValues("writing").Set(val) + connectionInfo = true + case key == "ConnsAsyncKeepAlive": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.connections.WithLabelValues("keepalive").Set(val) + connectionInfo = true + case key == "ConnsAsyncClosing": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.connections.WithLabelValues("closing").Set(val) + connectionInfo = true + case key == "Scoreboard": + e.updateScoreboard(v) + e.scoreboard.Collect(ch) + + //ProxyBalancer[0]Name: balancer://sid2021 + //ProxyBalancer[0]Worker[0]Name: https://z-app-01:9143 + //ProxyBalancer[0]Worker[0]Status: Init Ok + //ProxyBalancer[0]Worker[0]Elected: 5808 + //... + case reProxyBalName.MatchString(key): + balancerName = v + case reProxyBalWorker.MatchString(key): + key := reProxyBalWorker.FindStringSubmatch(key)[1] + switch key { + case "Name": + workerName = v + case "Status": + e.proxyBalancerStatus.WithLabelValues(balancerName, workerName, v).Set(1) + case "Elected": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric(e.proxyBalancerElected, prometheus.CounterValue, val, balancerName, workerName) + case "Busy": + val, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + e.proxyBalancerBusy.WithLabelValues(balancerName, workerName).Set(val) + case "Sent": + val, err := strconv.ParseFloat(strings.TrimRight(v, "kK"), 64) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric(e.proxyBalancerReqSize, prometheus.CounterValue, val, balancerName, workerName) + case "Rcvd": + val, err := strconv.ParseFloat(strings.TrimRight(v, "kK"), 64) + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric(e.proxyBalancerRespSize, prometheus.CounterValue, val, balancerName, workerName) + } + } + } + + if cpuFound { + ch <- prometheus.MustNewConstMetric(e.cpuTotal, prometheus.CounterValue, 1000*cpuUser, "user") + ch <- prometheus.MustNewConstMetric(e.cpuTotal, prometheus.CounterValue, 1000*cpuSystem, "system") + } + + e.apacheInfo.WithLabelValues(version, mpm).Set(1) + + e.apacheInfo.Collect(ch) + e.generation.Collect(ch) + e.load.Collect(ch) + e.cpuload.Collect(ch) + e.workers.Collect(ch) + e.processes.Collect(ch) + if connectionInfo { + e.connections.Collect(ch) + } + + e.proxyBalancerStatus.Collect(ch) + e.proxyBalancerBusy.Collect(ch) + + return nil +} + +// Collect implements Prometheus.Collector. +func (e *Exporter) Collect(ch chan<- prometheus.Metric) { + e.mutex.Lock() // To protect metrics from concurrent collects. + defer e.mutex.Unlock() + if err := e.collect(ch); err != nil { + level.Error(e.logger).Log("msg", "Error scraping Apache:", "err", err) + e.scrapeFailures.Inc() + e.scrapeFailures.Collect(ch) + } +}