From 425556f62ef3739e908763258c961dcc3cb1305d Mon Sep 17 00:00:00 2001 From: wanjunlei Date: Mon, 30 Nov 2020 16:16:37 +0800 Subject: [PATCH 1/3] update helm chart Signed-off-by: wanjunlei --- helm/Chart.yaml | 4 ++-- helm/templates/clusterrolebindings.yaml | 2 +- helm/templates/clusterroles.yaml | 2 +- helm/templates/notificationmanagers.yaml | 26 +++++++++++++----------- helm/templates/rolebindings.yaml | 2 +- helm/templates/roles.yaml | 2 +- helm/templates/serviceaccount.yaml | 2 +- helm/templates/services.yaml | 2 +- helm/values.yaml | 16 +++++++-------- 9 files changed, 30 insertions(+), 28 deletions(-) diff --git a/helm/Chart.yaml b/helm/Chart.yaml index 36c9b983..e095d703 100644 --- a/helm/Chart.yaml +++ b/helm/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: 1.0.0 +appVersion: 0.1.0 description: Notification Manager manages notifications in multi-tenant K8s environment. It receives alerts or notifications from different senders and then send notifications to various tenant receivers based on alerts/notifications' tenant label like "namespace". name: notification-manager -version: 1.0.0 +version: 0.1.0 diff --git a/helm/templates/clusterrolebindings.yaml b/helm/templates/clusterrolebindings.yaml index 1a435e6f..aeac4bf2 100644 --- a/helm/templates/clusterrolebindings.yaml +++ b/helm/templates/clusterrolebindings.yaml @@ -24,4 +24,4 @@ subjects: - kind: ServiceAccount name: notification-manager-sa namespace: {{ .Release.Namespace }} - \ No newline at end of file + diff --git a/helm/templates/clusterroles.yaml b/helm/templates/clusterroles.yaml index 31dd62c2..ddab9fe6 100644 --- a/helm/templates/clusterroles.yaml +++ b/helm/templates/clusterroles.yaml @@ -104,4 +104,4 @@ rules: - subjectaccessreviews verbs: - create - \ No newline at end of file + diff --git a/helm/templates/notificationmanagers.yaml b/helm/templates/notificationmanagers.yaml index 165a8b30..2f21521a 100644 --- a/helm/templates/notificationmanagers.yaml +++ b/helm/templates/notificationmanagers.yaml @@ -5,26 +5,28 @@ metadata: app: notification-manager name: notification-manager spec: + {{- if .Values.notificationmanager.replicas }} replicas: {{ .Values.notificationmanager.replicas }} + {{- end}} image: {{ .Values.notificationmanager.image.repo }}:{{ .Values.notificationmanager.image.tag }} imagePullPolicy: {{ .Values.notificationmanager.image.pullPolicy }} serviceAccountName: notification-manager-sa portName: webhook - nodeSelector: + nodeSelector: {{- toYaml .Values.notificationmanager.nodeSelector | nindent 4 }} - affinity: + affinity: {{- toYaml .Values.notificationmanager.affinity | nindent 4 }} - tolerations: - {{- toYaml .Values.notificationmanager.tolerations | nindent 4 }} - resources: - {{- toYaml .Values.notificationmanager.resources | nindent 4 }} - receivers: - {{- toYaml .Values.notificationmanager.receivers | nindent 4 }} + tolerations: + {{- toYaml .Values.notificationmanager.tolerations | nindent 4 }} + resources: + {{- toYaml .Values.notificationmanager.resources | nindent 4 }} + receivers: + {{- toYaml .Values.notificationmanager.receivers | nindent 4 }} defaultConfigSelector: - {{- toYaml .Values.notificationmanager.defaultConfigSelector | nindent 4 }} + {{- toYaml .Values.notificationmanager.defaultConfigSelector | nindent 4 }} notificationManagerNamespaces: - {{- toYaml .Values.notificationmanager.notificationManagerNamespaces | nindent 4 }} + {{- toYaml .Values.notificationmanager.notificationManagerNamespaces | nindent 4 }} volumeMounts: - {{- toYaml .Values.notificationmanager.volumeMounts | nindent 4 }} + {{- toYaml .Values.notificationmanager.volumeMounts | nindent 4 }} volumes: - {{- toYaml .Values.notificationmanager.volumes | nindent 4 }} + {{- toYaml .Values.notificationmanager.volumes | nindent 4 }} diff --git a/helm/templates/rolebindings.yaml b/helm/templates/rolebindings.yaml index c6fda2f1..b724140f 100644 --- a/helm/templates/rolebindings.yaml +++ b/helm/templates/rolebindings.yaml @@ -10,4 +10,4 @@ subjects: - kind: ServiceAccount name: notification-manager-sa namespace: {{ .Release.Namespace }} - \ No newline at end of file + diff --git a/helm/templates/roles.yaml b/helm/templates/roles.yaml index e003dd28..6a3305a8 100644 --- a/helm/templates/roles.yaml +++ b/helm/templates/roles.yaml @@ -29,4 +29,4 @@ rules: - events verbs: - create - \ No newline at end of file + diff --git a/helm/templates/serviceaccount.yaml b/helm/templates/serviceaccount.yaml index 92f8e75c..f3f72e5b 100644 --- a/helm/templates/serviceaccount.yaml +++ b/helm/templates/serviceaccount.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: notification-manager-sa - \ No newline at end of file + diff --git a/helm/templates/services.yaml b/helm/templates/services.yaml index 1059ced1..f5663520 100644 --- a/helm/templates/services.yaml +++ b/helm/templates/services.yaml @@ -14,4 +14,4 @@ spec: control-plane: controller-manager sessionAffinity: None type: ClusterIP - \ No newline at end of file + diff --git a/helm/values.yaml b/helm/values.yaml index e6eada71..d5edb51b 100644 --- a/helm/values.yaml +++ b/helm/values.yaml @@ -9,10 +9,10 @@ operator: resources: limits: cpu: 50m - memory: 30Mi + memory: 50Mi requests: - cpu: 50m - memory: 30Mi + cpu: 5m + memory: 10Mi operator: image: repo: kubesphere/notification-manager-operator @@ -21,10 +21,10 @@ operator: resources: limits: cpu: 50m - memory: 30Mi + memory: 50Mi requests: - cpu: 50m - memory: 30Mi + cpu: 5m + memory: 20Mi nodeSelector: {} tolerations: [] affinity: {} @@ -39,9 +39,9 @@ notificationmanager: resources: limits: cpu: 500m - memory: 1Gi + memory: 500Mi requests: - cpu: 100m + cpu: 5m memory: 20Mi nodeSelector: {} tolerations: [] From 8d87d71973929b1ba1d357e23daef6fafa1b2ca8 Mon Sep 17 00:00:00 2001 From: wanjunlei Date: Tue, 1 Dec 2020 18:38:12 +0800 Subject: [PATCH 2/3] add adapter sink Signed-off-by: wanjunlei --- adapter/sink/Dockerfile | 29 ++ adapter/sink/Makefile | 24 ++ adapter/sink/README.md | 3 + adapter/sink/cmd/main.go | 380 ++++++++++++++++++++++++++ adapter/sink/cmd/types.go | 206 ++++++++++++++ adapter/sink/deploy/yaml/adapter.yaml | 69 +++++ adapter/sink/test/alert.json | 191 +++++++++++++ adapter/sink/test/samples/Dockerfile | 28 ++ adapter/sink/test/samples/main.go | 98 +++++++ adapter/sink/test/samples/socket.yaml | 43 +++ adapter/sink/test/send_alerts.sh | 2 + go.mod | 6 +- go.sum | 3 + 13 files changed, 1081 insertions(+), 1 deletion(-) create mode 100644 adapter/sink/Dockerfile create mode 100644 adapter/sink/Makefile create mode 100644 adapter/sink/README.md create mode 100644 adapter/sink/cmd/main.go create mode 100644 adapter/sink/cmd/types.go create mode 100644 adapter/sink/deploy/yaml/adapter.yaml create mode 100644 adapter/sink/test/alert.json create mode 100644 adapter/sink/test/samples/Dockerfile create mode 100644 adapter/sink/test/samples/main.go create mode 100644 adapter/sink/test/samples/socket.yaml create mode 100755 adapter/sink/test/send_alerts.sh diff --git a/adapter/sink/Dockerfile b/adapter/sink/Dockerfile new file mode 100644 index 00000000..489dabd9 --- /dev/null +++ b/adapter/sink/Dockerfile @@ -0,0 +1,29 @@ +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +FROM golang:1.13 as notification-adapter + +COPY cmd/main.go / +COPY cmd/type.go / +WORKDIR / +ENV GOPROXY=https://goproxy.io +RUN CGO_ENABLED=0 GO111MODULE=on go build -i -ldflags '-w -s' -o notification-adapter main.go type.go + +FROM alpine:3.9 + +COPY --from=notification-adapter /notification-adapter /usr/local/bin/ + +RUN apk add --update ca-certificates && update-ca-certificates +RUN apk add curl +RUN adduser -D -g kubesphere -u 1002 kubesphere +RUN chown -R kubesphere:kubesphere /usr/local/bin/notification-adapter +RUN apk add libcap +RUN setcap 'CAP_NET_BIND_SERVICE=+ep' /usr/local/bin/notification-adapter + +USER kubesphere +CMD ["sh"] diff --git a/adapter/sink/Makefile b/adapter/sink/Makefile new file mode 100644 index 00000000..50cb68ed --- /dev/null +++ b/adapter/sink/Makefile @@ -0,0 +1,24 @@ +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +IMG ?= kubespheredev/notification-adapter:v0.1.0 +AMD64 ?= -amd64 + +all: docker-build + +# Build kube-audit-operator binary +notification-adapter: + go build -o notification-adapter cmd/main.go + +# Build the docker image +docker-build: + docker buildx build --platform linux/amd64,linux/arm64 --push -f Dockerfile -t ${IMG} . + +# Build the docker image for arm64 +docker-build-amd64: + docker build -f Dockerfile -t ${IMG}${AMD64} . + +# Push the docker image +push-amd64: + docker push ${IMG}${AMD64} diff --git a/adapter/sink/README.md b/adapter/sink/README.md new file mode 100644 index 00000000..319781d1 --- /dev/null +++ b/adapter/sink/README.md @@ -0,0 +1,3 @@ +# notification-adapter + +Notification-adapter receive notifications from notifcation manager and export with the socket. \ No newline at end of file diff --git a/adapter/sink/cmd/main.go b/adapter/sink/cmd/main.go new file mode 100644 index 00000000..fad9bb06 --- /dev/null +++ b/adapter/sink/cmd/main.go @@ -0,0 +1,380 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "context" + "flag" + "fmt" + "github.com/emicklei/go-restful" + "github.com/golang/glog" + "github.com/prometheus/common/model" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/text/encoding/simplifiedchinese" + "golang.org/x/text/transform" + "io/ioutil" + "log" + "net" + "net/http" + "strconv" + "sync" + "time" +) + +const ( + TivoliAlertUnknown = 1 + TivoliAlertError = 3 + TivoliAlertCritical = 4 + + GoroutinesNumMax = 100 + WaitGoroutinesTimeout = 5 * time.Second + SendMessageTimeout = 5 * time.Second +) + +var ( + ip string + port int + goroutinesNum int + chanLen int + waitGoroutinesTimeout time.Duration + sendTimeout time.Duration + ch chan *Alert + si *Statistics + waitHandlerGroup sync.WaitGroup +) + +func main() { + + cmd := NewServerCommand() + + if err := cmd.Execute(); err != nil { + log.Fatalln(err) + } +} + +func AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&ip, "ip", "", "Socket ip") + fs.IntVar(&port, "port", 8080, "Socket port") + fs.IntVar(&goroutinesNum, "goroutines-num", GoroutinesNumMax, "the max num of goroutines to send alert,default 1000") + fs.IntVar(&chanLen, "channel-len", 1000, "the capability of channel, default 1000") + fs.DurationVar(&waitGoroutinesTimeout, "wait-timeout", WaitGoroutinesTimeout, "the time to wait for a new goroutines, default 5s") + fs.DurationVar(&sendTimeout, "send-timeout", SendMessageTimeout, "the time to send message, default 5s") +} + +func NewServerCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "notification-adapter", + Long: `The webhook to receive alert from notification manager, and send to socket`, + RunE: func(cmd *cobra.Command, args []string) error { + return Run() + }, + } + AddFlags(cmd.Flags()) + cmd.Flags().AddGoFlagSet(flag.CommandLine) + + return cmd +} + +func Run() error { + + pflag.VisitAll(func(flag *pflag.Flag) { + glog.Errorf("FLAG: --%s=%q", flag.Name, flag.Value) + }) + + ch = make(chan *Alert, chanLen) + si = NewStatisticsInfo(ch) + + go work() + + return httpserver() +} + +func httpserver() error { + container := restful.NewContainer() + ws := new(restful.WebService) + ws.Path(""). + Consumes(restful.MIME_JSON). + Produces(restful.MIME_JSON) + ws.Route(ws.GET("/statistics/status").To(statisticsStatusGet)) + ws.Route(ws.PUT("/statistics/status").To(statisticsStatusUpdate)) + ws.Route(ws.GET("/statistics/freshTime").To(statisticsFreshTimeGet)) + ws.Route(ws.PUT("/statistics/freshTime").To(statisticsFreshTimeUpdate)) + ws.Route(ws.GET("/statistics/info").To(statisticsInfo)) + ws.Route(ws.POST("/alerts").To(handler)) + ws.Route(ws.GET("/readiness").To(readiness)) + ws.Route(ws.GET("/liveness").To(readiness)) + ws.Route(ws.GET("/preStop").To(preStop)) + + container.Add(ws) + + server := &http.Server{ + Addr: ":8080", + Handler: container, + } + + if err := server.ListenAndServe(); err != nil { + glog.Fatal(err) + } + + return nil +} + +func work() { + routinesChan := make(chan interface{}, goroutinesNum) + + for { + alert := <-ch + if alert == nil { + break + } + alert.PullTime = time.Now() + + if err := tryAdd(routinesChan, waitGoroutinesTimeout); err != nil { + alert.WaitRoutinesTimeout = true + si.StatisticsStep(alert) + glog.Error("get goroutines timeout") + continue + } + + go func() { + defer func() { + <-routinesChan + si.StatisticsStep(alert) + }() + + stopCh := make(chan interface{}, 1) + go func() { + sendMessage(alert) + close(stopCh) + }() + + if err := wait(stopCh, sendTimeout); err != nil { + alert.SendTimeout = true + glog.Errorf("send alert timeout") + return + } + + alert.SendTime = time.Now() + }() + } +} + +func tryAdd(ch chan interface{}, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + select { + case ch <- struct{}{}: + return nil + case <-ctx.Done(): + return fmt.Errorf("timeout") + } +} + +func wait(ch chan interface{}, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + select { + case <-ch: + return nil + case <-ctx.Done(): + return fmt.Errorf("timeout") + } +} + +func handler(req *restful.Request, resp *restful.Response) { + + waitHandlerGroup.Add(1) + defer waitHandlerGroup.Done() + + body, err := ioutil.ReadAll(req.Request.Body) + if err != nil { + glog.Errorf("read request body error, %s", err) + err := resp.WriteHeaderAndEntity(http.StatusBadRequest, "") + if err != nil { + glog.Errorf("response error %s", err) + } + return + } + + alerts, err := NewAlerts(body) + if err != nil { + err := resp.WriteHeaderAndEntity(http.StatusBadRequest, "") + if err != nil { + glog.Errorf("response error %s", err) + } + return + } + + for _, alert := range alerts { + ch <- alert + alert.PushTime = time.Now() + } + + err = resp.WriteHeaderAndEntity(http.StatusOK, "") + if err != nil { + glog.Errorf("response error %s", err) + } +} + +func sendMessage(alert *Alert) { + + msg := getMessage(alert) + if len(msg) == 0 { + return + } + + addr := fmt.Sprintf("%s:%d", ip, port) + conn, err := net.DialTimeout("tcp", addr, sendTimeout) + if err != nil { + glog.Errorf("connect to %s error, %s", addr, err.Error()) + return + } + defer func() { + if err := conn.Close(); err != nil { + glog.Errorf("close connect error, %s", err.Error()) + } + }() + + reader := transform.NewReader(bytes.NewReader([]byte(msg)), simplifiedchinese.GBK.NewEncoder()) + bs, err := ioutil.ReadAll(reader) + if err != nil { + glog.Errorf("transform msg error, %s", err.Error()) + return + } + + body := bs + size := 0 + for { + n, err := conn.Write(body) + if err != nil { + glog.Errorf("write error, %s", err.Error()) + } + + size = size + n + if size == len(bs) { + break + } + + body = bs[size:] + } + + return +} + +func getMessage(alert *Alert) string { + + level := TivoliAlertUnknown + if t, ok := alert.Labels["severity"]; ok { + switch t { + case "critical": + level = TivoliAlertCritical + case "error": + case "warning": + level = TivoliAlertError + } + } + + if level < TivoliAlertError { + return "" + } + + return fmt.Sprintf("%s#%d#%s#%s#%s#%s#%d\n", + alert.Labels["cluster"], + model.LabelsToSignature(alert.Labels), + alert.Labels["alertname"], + alert.Labels["namespace"], + alert.Annotations["message"], + alert.Annotations["summaryCn"], + level) +} + +//readiness +func readiness(_ *restful.Request, resp *restful.Response) { + + responseWithHeaderAndEntity(resp, http.StatusOK, "") +} + +//preStop +func preStop(_ *restful.Request, resp *restful.Response) { + + waitHandlerGroup.Wait() + glog.Errorf("msg handler close, wait pool close") + close(ch) + responseWithHeaderAndEntity(resp, http.StatusOK, "") + glog.Flush() +} + +//get statistics fresh time +func statisticsFreshTimeGet(_ *restful.Request, resp *restful.Response) { + responseWithJson(resp, si.FreshTime) +} + +//set statistics fresh time +func statisticsFreshTimeUpdate(req *restful.Request, resp *restful.Response) { + + s := req.QueryParameter("freshTime") + n, err := strconv.Atoi(s) + if err != nil { + responseWithHeaderAndEntity(resp, http.StatusBadRequest, "parameter error") + return + } + + si.SetFreshTime(time.Second * time.Duration(n)) + responseWithJson(resp, "Success") +} + +//get statistics status +func statisticsStatusGet(_ *restful.Request, resp *restful.Response) { + responseWithJson(resp, si.Enable) +} + +//set statistics status +func statisticsStatusUpdate(req *restful.Request, resp *restful.Response) { + + enable := req.QueryParameter("enable") + b, err := strconv.ParseBool(enable) + if err != nil { + responseWithHeaderAndEntity(resp, http.StatusBadRequest, "parameter error") + return + } + + si.Enable = b + responseWithJson(resp, "Success") +} + +//get statistics info +func statisticsInfo(_ *restful.Request, resp *restful.Response) { + responseWithJson(resp, si.Print()) +} + +func responseWithJson(resp *restful.Response, value interface{}) { + e := resp.WriteAsJson(value) + if e != nil { + glog.Errorf("response error %s", e) + } +} + +func responseWithHeaderAndEntity(resp *restful.Response, status int, value interface{}) { + e := resp.WriteHeaderAndEntity(status, value) + if e != nil { + glog.Errorf("response error %s", e) + } +} diff --git a/adapter/sink/cmd/types.go b/adapter/sink/cmd/types.go new file mode 100644 index 00000000..415d145b --- /dev/null +++ b/adapter/sink/cmd/types.go @@ -0,0 +1,206 @@ +package main + +import ( + "encoding/json" + "fmt" + "github.com/golang/glog" + "github.com/json-iterator/go" + "github.com/prometheus/alertmanager/template" + "sync" + "time" +) + +type Alert struct { + *template.Alert + *TraceInfo +} + +type TraceInfo struct { + //The time when received the data + ReceivedTime time.Time + //The time when push the data to the channel. + PushTime time.Time + //The time when the data is pulled from channel + PullTime time.Time + //Does the event wait goroutines timeout + WaitRoutinesTimeout bool + //The time when the message send completely + SendTime time.Time + //Does the message send timeout + SendTimeout bool +} + +func NewAlerts(data []byte) ([]*Alert, error) { + + var d template.Data + + err := jsoniter.Unmarshal(data, &d) + if err != nil { + glog.Errorf("unmarshal failed with:%v,body is: %s", err, string(data)) + return nil, err + } + + var as []*Alert + for _, a := range d.Alerts { + for k, v := range d.CommonLabels { + a.Labels[k] = v + } + alert := Alert{ + &a, + &TraceInfo{ReceivedTime: time.Now()}, + } + as = append(as, &alert) + } + + return as, nil +} + +type Statistics struct { + ch chan *Alert + + Enable bool + FreshTime time.Duration + + wakeup chan int + + WaitToChanSum int64 + WaitToChanCount int + InChanSum int64 + InChanCount int + SendSum int64 + SendCount int + SendSuccessSum int64 + SendSuccessCount int + WaitGoroutinesTimeoutCount int + SendTimeoutCount int + + mu sync.Mutex +} + +func NewStatisticsInfo(ch chan *Alert) *Statistics { + info := &Statistics{ + ch: ch, + FreshTime: time.Minute * 5, + Enable: true, + } + + info.wakeup = make(chan int) + + go info.Watch() + + return info +} + +func (si *Statistics) Watch() { + + to := time.NewTimer(si.FreshTime) + for { + to.Reset(si.FreshTime) + select { + case <-si.wakeup: + case <-to.C: + si.Refresh() + } + } +} + +func (si *Statistics) StatisticsStep(a *Alert) { + + if !si.Enable || a == nil { + return + } + + si.mu.Lock() + defer si.mu.Unlock() + + if a.PushTime.Unix() > 0 { + si.WaitToChanSum += a.PushTime.UnixNano() - a.ReceivedTime.UnixNano() + si.WaitToChanCount++ + } + + if a.PullTime.Unix() > 0 { + si.InChanSum += a.PullTime.UnixNano() - a.PushTime.UnixNano() + si.InChanCount++ + } + + if a.SendTime.Unix() > 0 { + si.SendSum += a.SendTime.UnixNano() - a.PullTime.UnixNano() + si.SendCount++ + } + + if a.WaitRoutinesTimeout { + si.WaitGoroutinesTimeoutCount++ + } + + if a.SendTimeout { + si.SendTimeoutCount++ + } +} + +func (si *Statistics) SetFreshTime(t time.Duration) { + si.FreshTime = t + si.wakeup <- 1 +} + +func (si *Statistics) Refresh() { + + if !si.Enable { + return + } + + bs, err := json.Marshal(si.Print()) + if err != nil { + glog.Error(err) + } else { + glog.Error(string(bs)) + } + + si.mu.Lock() + defer si.mu.Unlock() + + si.WaitToChanSum = 0 + si.WaitToChanCount = 0 + si.InChanSum = 0 + si.InChanCount = 0 + si.SendSum = 0 + si.SendCount = 0 + si.WaitGoroutinesTimeoutCount = 0 + si.SendTimeoutCount = 0 +} + +func (si *Statistics) Print() map[string]string { + + si.mu.Lock() + defer si.mu.Unlock() + + m := make(map[string]string) + + if si.Enable { + + m["Length"] = fmt.Sprint(len(si.ch)) + + if si.WaitToChanCount > 0 { + m["WaitToChanSum"] = fmt.Sprintf("%.3fms", float64(si.WaitToChanSum)/1e6) + m["WaitToChanCount"] = fmt.Sprint(si.WaitToChanCount) + m["WaitToChanAverage"] = fmt.Sprintf("%.3fms", float64(si.WaitToChanSum)/float64(si.WaitToChanCount)/1e6) + } + if si.InChanCount > 0 { + m["InChanSum"] = fmt.Sprintf("%.3fms", float64(si.InChanSum)/1e6) + m["InChanCount"] = fmt.Sprint(si.InChanCount) + m["InChanAverage"] = fmt.Sprintf("%.3fms", float64(si.InChanSum)/float64(si.InChanCount)/1e6) + } + if si.SendCount > 0 { + m["SendSum"] = fmt.Sprintf("%.3fms", float64(si.SendSum)/1e6) + m["SendCount"] = fmt.Sprint(si.SendCount) + m["SendAverage"] = fmt.Sprintf("%.3fms", float64(si.SendSum)/float64(si.SendCount)/1e6) + } + if si.WaitGoroutinesTimeoutCount > 0 { + m["WaitGoroutinesTimeoutCount"] = fmt.Sprint(si.WaitGoroutinesTimeoutCount) + } + if si.SendTimeoutCount > 0 { + m["MatchTimeoutCount"] = fmt.Sprint(si.SendTimeoutCount) + } + } + + return m +} diff --git a/adapter/sink/deploy/yaml/adapter.yaml b/adapter/sink/deploy/yaml/adapter.yaml new file mode 100644 index 00000000..66c7478e --- /dev/null +++ b/adapter/sink/deploy/yaml/adapter.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: notification-adapter + namespace: kubesphere-monitoring-system +spec: + replicas: 1 + selector: + matchLabels: + name: notification-adapter + template: + metadata: + labels: + name: notification-adapter + spec: + containers: + - name: notification-adapter + image: kubespheredev/notification-adapter:latest + command: + - notification-adapter + - --ip=socket-server.kubesphere-monitoring-system + imagePullPolicy: Always + volumeMounts: + - mountPath: /etc/localtime + name: host-time + lifecycle: + preStop: + httpGet: + path: /preStop + port: 8080 + scheme: HTTP + livenessProbe: + failureThreshold: 3 + httpGet: + path: /readiness + port: 8080 + scheme: HTTP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readiness + port: 8080 + scheme: HTTP + resources: + limits: + cpu: 200m + memory: 500Mi + requests: + cpu: 20m + memory: 50Mi + volumes: + - hostPath: + path: /etc/localtime + type: "" + name: host-time + +--- +apiVersion: v1 +kind: Service +metadata: + name: notification-adapter + namespace: kubesphere-monitoring-system +spec: + selector: + name: notification-adapter + ports: + - name: http + port: 8080 + targetPort: 8080 diff --git a/adapter/sink/test/alert.json b/adapter/sink/test/alert.json new file mode 100644 index 00000000..7151db1d --- /dev/null +++ b/adapter/sink/test/alert.json @@ -0,0 +1,191 @@ +{ + "receiver": "Default", + "status": "firing", + "alerts": [ + { + "status": "firing", + "labels": { + "alertname": "CPUThrottlingHigh", + "container": "calico-kube-controllers", + "namespace": "kube-system", + "pod": "calico-kube-controllers-67f5689d7-sqg89", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "annotations": { + "message": "30.21% throttling of CPU in namespace kube-system for container calico-kube-controllers in pod calico-kube-controllers-67f5689d7-sqg89.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "startsAt": "2020-03-21T05:45:44.782098546Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", + "fingerprint": "83fb3d34d52108b0" + }, + { + "status": "firing", + "labels": { + "alertname": "CPUThrottlingHigh", + "container": "csi-qingcloud", + "namespace": "kube-system", + "pod": "csi-qingcloud-node-79kkf", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "annotations": { + "message": "39.64% throttling of CPU in namespace kube-system for container csi-qingcloud in pod csi-qingcloud-node-79kkf.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "startsAt": "2020-02-26T07:05:14.782098546Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", + "fingerprint": "84d1dea123d5625e" + }, + { + "status": "firing", + "labels": { + "alertname": "CPUThrottlingHigh", + "container": "csi-qingcloud", + "namespace": "kube-system", + "pod": "csi-qingcloud-node-7x9xj", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "annotations": { + "message": "41.86% throttling of CPU in namespace kube-system for container csi-qingcloud in pod csi-qingcloud-node-7x9xj.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "startsAt": "2020-04-17T01:27:44.782098546Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", + "fingerprint": "6ce3db5eb4926886" + }, + { + "status": "firing", + "labels": { + "alertname": "CPUThrottlingHigh", + "container": "csi-qingcloud", + "namespace": "kube-system", + "pod": "csi-qingcloud-node-fflql", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "annotations": { + "message": "37.5% throttling of CPU in namespace kube-system for container csi-qingcloud in pod csi-qingcloud-node-fflql.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "startsAt": "2020-02-26T07:04:44.782098546Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", + "fingerprint": "7c7ffa680c387617" + }, + { + "status": "firing", + "labels": { + "alertname": "CPUThrottlingHigh", + "container": "csi-qingcloud", + "namespace": "kube-system", + "pod": "csi-qingcloud-node-gcjt2", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "annotations": { + "message": "30.51% throttling of CPU in namespace kube-system for container csi-qingcloud in pod csi-qingcloud-node-gcjt2.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "startsAt": "2020-04-17T00:53:14.782098546Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", + "fingerprint": "70f57e4d878a83b0" + }, + { + "status": "firing", + "labels": { + "alertname": "CPUThrottlingHigh", + "container": "csi-qingcloud", + "namespace": "kube-system", + "pod": "csi-qingcloud-node-l5zwr", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "annotations": { + "message": "32.08% throttling of CPU in namespace kube-system for container csi-qingcloud in pod csi-qingcloud-node-l5zwr.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "startsAt": "2020-04-15T21:14:14.782098546Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", + "fingerprint": "a390724364453510" + }, + { + "status": "firing", + "labels": { + "alertname": "CPUThrottlingHigh", + "container": "csi-qingcloud", + "namespace": "kube-system", + "pod": "csi-qingcloud-node-zxmj4", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "annotations": { + "message": "32.65% throttling of CPU in namespace kube-system for container csi-qingcloud in pod csi-qingcloud-node-zxmj4.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "startsAt": "2020-04-16T22:48:44.782098546Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", + "fingerprint": "325cf83012fa4441" + }, + { + "status": "firing", + "labels": { + "alertname": "CPUThrottlingHigh", + "container": "csi-resizer", + "namespace": "kube-system", + "pod": "csi-qingcloud-controller-787679c7dd-lhg9p", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "annotations": { + "message": "28.18% throttling of CPU in namespace kube-system for container csi-resizer in pod csi-qingcloud-controller-787679c7dd-lhg9p.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "startsAt": "2020-04-16T19:55:14.782098546Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", + "fingerprint": "b75868ccf5391023" + }, + { + "status": "firing", + "labels": { + "alertname": "CPUThrottlingHigh", + "container": "csi-snapshotter", + "namespace": "kube-system", + "pod": "csi-qingcloud-controller-787679c7dd-lhg9p", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "annotations": { + "message": "27.98% throttling of CPU in namespace kube-system for container csi-snapshotter in pod csi-qingcloud-controller-787679c7dd-lhg9p.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "startsAt": "2020-04-16T23:12:14.782098546Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", + "fingerprint": "8d30069b6577cb0e" + } + ], + "groupLabels": { + "alertname": "CPUThrottlingHigh", + "namespace": "kube-system" + }, + "commonLabels": { + "alertname": "CPUThrottlingHigh", + "namespace": "kube-system", + "prometheus": "kubesphere-monitoring-system/k8s", + "severity": "warning" + }, + "commonAnnotations": { + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" + }, + "externalURL": "http://alertmanager-main-1:9093" +} diff --git a/adapter/sink/test/samples/Dockerfile b/adapter/sink/test/samples/Dockerfile new file mode 100644 index 00000000..b17b602d --- /dev/null +++ b/adapter/sink/test/samples/Dockerfile @@ -0,0 +1,28 @@ +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +FROM golang:1.13 as socket-server + +COPY / / +WORKDIR / +ENV GOPROXY=https://goproxy.io +RUN CGO_ENABLED=0 GO111MODULE=on go build -i -ldflags '-w -s' -o socket-server main.go + +FROM alpine:3.9 + +COPY --from=socket-server /socket-server /usr/local/bin/ + +RUN apk add --update ca-certificates && update-ca-certificates +RUN apk add curl +RUN adduser -D -g kubesphere -u 1002 kubesphere +RUN chown -R kubesphere:kubesphere /usr/local/bin/socket-server +RUN apk add libcap +RUN setcap 'CAP_NET_BIND_SERVICE=+ep' /usr/local/bin/socket-server + +USER kubesphere +CMD ["sh"] diff --git a/adapter/sink/test/samples/main.go b/adapter/sink/test/samples/main.go new file mode 100644 index 00000000..b70aee4c --- /dev/null +++ b/adapter/sink/test/samples/main.go @@ -0,0 +1,98 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "flag" + "fmt" + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/text/encoding/simplifiedchinese" + "golang.org/x/text/transform" + "io/ioutil" + "log" + "net" +) + +var ( + port int +) + +func AddFlags(fs *pflag.FlagSet) { + fs.IntVar(&port, "port", 8080, "Socket port") +} + +func main() { + cmd := newServerCommand() + + if err := cmd.Execute(); err != nil { + log.Fatalln(err) + } +} + +func newServerCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "notification-adapter", + Long: `The webhook to receive alert from notification manager, and send to socket`, + RunE: func(cmd *cobra.Command, args []string) error { + return Run() + }, + } + AddFlags(cmd.Flags()) + cmd.Flags().AddGoFlagSet(flag.CommandLine) + + return cmd +} + +func Run() error { + + pflag.VisitAll(func(flag *pflag.Flag) { + glog.Errorf("FLAG: --%s=%q", flag.Name, flag.Value) + }) + + l, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%d", port)) + if err != nil { + return err + } + + for { + conn, err := l.Accept() + if err != nil { + fmt.Printf("accept error, %s\n", err.Error()) + continue + } + + go func() { + bs, err := ioutil.ReadAll(conn) + if err != nil { + fmt.Printf("read error, %s\n", err.Error()) + return + } + + reader := transform.NewReader(bytes.NewReader(bs), simplifiedchinese.GBK.NewDecoder()) + d, err := ioutil.ReadAll(reader) + if err != nil { + fmt.Printf("transform error, %s\n", err.Error()) + return + } + + fmt.Println(string(d)) + }() + } +} diff --git a/adapter/sink/test/samples/socket.yaml b/adapter/sink/test/samples/socket.yaml new file mode 100644 index 00000000..351ecb90 --- /dev/null +++ b/adapter/sink/test/samples/socket.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: socket-server + namespace: kubesphere-monitoring-system +spec: + replicas: 1 + selector: + matchLabels: + name: socket-server + template: + metadata: + labels: + name: socket-server + spec: + containers: + - name: socket-server + image: kubespheredev/socket-server:latest + command: + - socket-server + imagePullPolicy: Always + volumeMounts: + - mountPath: /etc/localtime + name: host-time + volumes: + - hostPath: + path: /etc/localtime + type: "" + name: host-time + +--- +apiVersion: v1 +kind: Service +metadata: + name: socket-server + namespace: kubesphere-monitoring-system +spec: + selector: + name: socket-server + ports: + - name: http + port: 8080 + targetPort: 8080 diff --git a/adapter/sink/test/send_alerts.sh b/adapter/sink/test/send_alerts.sh new file mode 100755 index 00000000..56ba4395 --- /dev/null +++ b/adapter/sink/test/send_alerts.sh @@ -0,0 +1,2 @@ +#!/bin/bash +curl -XPOST -H 'Content-type':'application/json' -d @alert.json http://127.0.0.1:8080/alerts diff --git a/go.mod b/go.mod index c3a0faaa..0d2b21fc 100644 --- a/go.mod +++ b/go.mod @@ -3,16 +3,20 @@ module github.com/kubesphere/notification-manager go 1.13 require ( - github.com/ghodss/yaml v1.0.0 + github.com/emicklei/go-restful v2.9.5+incompatible github.com/go-chi/chi v4.0.3+incompatible github.com/go-kit/kit v0.9.0 github.com/go-logr/logr v0.1.0 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/json-iterator/go v1.1.8 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 github.com/onsi/ginkgo v1.11.0 github.com/onsi/gomega v1.8.1 github.com/prometheus/alertmanager v0.20.0 github.com/prometheus/common v0.7.0 + github.com/spf13/cobra v0.0.5 + github.com/spf13/pflag v1.0.5 + golang.org/x/text v0.3.2 gopkg.in/alecthomas/kingpin.v2 v2.2.6 k8s.io/api v0.17.2 k8s.io/apimachinery v0.17.2 diff --git a/go.sum b/go.sum index e0ff8052..bf62afcf 100644 --- a/go.sum +++ b/go.sum @@ -76,6 +76,7 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= @@ -154,6 +155,7 @@ github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09Vjb github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= @@ -339,6 +341,7 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= From b1d87b127025cac8c6ec744b339c77b26e1fdbe7 Mon Sep 17 00:00:00 2001 From: wanjunlei Date: Thu, 3 Dec 2020 11:24:20 +0800 Subject: [PATCH 3/3] resolve conversation Signed-off-by: wanjunlei --- adapter/sink/Dockerfile | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/adapter/sink/Dockerfile b/adapter/sink/Dockerfile index 489dabd9..5ff05eac 100644 --- a/adapter/sink/Dockerfile +++ b/adapter/sink/Dockerfile @@ -9,21 +9,19 @@ FROM golang:1.13 as notification-adapter COPY cmd/main.go / -COPY cmd/type.go / +COPY cmd/types.go / WORKDIR / ENV GOPROXY=https://goproxy.io -RUN CGO_ENABLED=0 GO111MODULE=on go build -i -ldflags '-w -s' -o notification-adapter main.go type.go +RUN CGO_ENABLED=0 GO111MODULE=on go build -i -ldflags '-w -s' -o notification-adapter main.go types.go FROM alpine:3.9 COPY --from=notification-adapter /notification-adapter /usr/local/bin/ -RUN apk add --update ca-certificates && update-ca-certificates -RUN apk add curl -RUN adduser -D -g kubesphere -u 1002 kubesphere -RUN chown -R kubesphere:kubesphere /usr/local/bin/notification-adapter -RUN apk add libcap -RUN setcap 'CAP_NET_BIND_SERVICE=+ep' /usr/local/bin/notification-adapter +RUN adduser -D -g kubesphere -u 1002 kubesphere && \ + chown -R kubesphere:kubesphere /usr/local/bin/notification-adapter && \ + apk add libcap && \ + setcap 'CAP_NET_BIND_SERVICE=+ep' /usr/local/bin/notification-adapter USER kubesphere CMD ["sh"]