diff --git a/config/crds/troubleshoot.sh_analyzers.yaml b/config/crds/troubleshoot.sh_analyzers.yaml index ae1e62ffe..982aaf093 100644 --- a/config/crds/troubleshoot.sh_analyzers.yaml +++ b/config/crds/troubleshoot.sh_analyzers.yaml @@ -1489,6 +1489,19 @@ spec: required: - outcomes type: object + velero: + properties: + annotations: + additionalProperties: + type: string + type: object + checkName: + type: string + exclude: + type: BoolString + strict: + type: BoolString + type: object weaveReport: properties: annotations: diff --git a/config/crds/troubleshoot.sh_preflights.yaml b/config/crds/troubleshoot.sh_preflights.yaml index fbb0d7b4b..949711ae7 100644 --- a/config/crds/troubleshoot.sh_preflights.yaml +++ b/config/crds/troubleshoot.sh_preflights.yaml @@ -1489,6 +1489,19 @@ spec: required: - outcomes type: object + velero: + properties: + annotations: + additionalProperties: + type: string + type: object + checkName: + type: string + exclude: + type: BoolString + strict: + type: BoolString + type: object weaveReport: properties: annotations: diff --git a/config/crds/troubleshoot.sh_supportbundles.yaml b/config/crds/troubleshoot.sh_supportbundles.yaml index c938845ae..405d62cc0 100644 --- a/config/crds/troubleshoot.sh_supportbundles.yaml +++ b/config/crds/troubleshoot.sh_supportbundles.yaml @@ -1520,6 +1520,19 @@ spec: required: - outcomes type: object + velero: + properties: + annotations: + additionalProperties: + type: string + type: object + checkName: + type: string + exclude: + type: BoolString + strict: + type: BoolString + type: object weaveReport: properties: annotations: diff --git a/go.mod b/go.mod index 5eb76f66b..15610291b 100644 --- a/go.mod +++ b/go.mod @@ -35,9 +35,11 @@ require ( github.com/spf13/viper v1.17.0 github.com/stretchr/testify v1.8.4 github.com/tj/go-spin v1.1.0 + github.com/vmware-tanzu/velero v1.12.0 go.opentelemetry.io/otel v1.19.0 go.opentelemetry.io/otel/sdk v1.19.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 + golang.org/x/mod v0.12.0 golang.org/x/sync v0.4.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.28.2 @@ -86,7 +88,6 @@ require ( github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/onsi/ginkgo v1.14.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rubenv/sql-migrate v1.3.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -105,7 +106,6 @@ require ( go.opentelemetry.io/otel/metric v1.19.0 // indirect go.opentelemetry.io/otel/trace v1.19.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.12.0 // indirect golang.org/x/tools v0.13.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect @@ -123,7 +123,7 @@ require ( github.com/Microsoft/go-winio v0.6.0 // indirect github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect github.com/andybalholm/brotli v1.0.1 // indirect - github.com/aws/aws-sdk-go v1.44.198 // indirect + github.com/aws/aws-sdk-go v1.44.253 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/c9s/goprocinfo v0.0.0-20170724085704-0010a05ce49f // indirect diff --git a/go.sum b/go.sum index 180050de6..79bdb7d99 100644 --- a/go.sum +++ b/go.sum @@ -192,7 +192,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= -github.com/Azure/azure-sdk-for-go v56.3.0+incompatible h1:DmhwMrUIvpeoTDiWRDtNHqelNUd3Og8JCkrLHQK795c= +github.com/Azure/azure-sdk-for-go v67.2.0+incompatible h1:Uu/Ww6ernvPTrpq31kITVTIm/I5jlJ1wjtEH/bmSB2k= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= @@ -244,8 +244,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.44.198 h1:kgnvxQv4/kP5M0nbxBx0Ac0so9ndr9f8Ti0g+NmPQF8= -github.com/aws/aws-sdk-go v1.44.198/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.253 h1:iqDd0okcH4ShfFexz2zzf4VmeDFf6NOMm07pHnEb8iY= +github.com/aws/aws-sdk-go v1.44.253/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -800,19 +800,14 @@ github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nwaples/rardecode v1.1.2 h1:Cj0yZY6T1Zx1R7AhTbyGSALm44/Mmq+BAPc4B/p/d3M= github.com/nwaples/rardecode v1.1.2/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -1001,6 +996,8 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vladimirvivien/gexe v0.2.0 h1:nbdAQ6vbZ+ZNsolCgSVb9Fno60kzSuvtzVh6Ytqi/xY= github.com/vladimirvivien/gexe v0.2.0/go.mod h1:LHQL00w/7gDUKIak24n801ABp8C+ni6eBht9vGVst8w= +github.com/vmware-tanzu/velero v1.12.0 h1:gN8PbQMYOAMv8OYE3RkIvxr6s6IoMS0Daxc+IQN0X5U= +github.com/vmware-tanzu/velero v1.12.0/go.mod h1:rY2UfdC2K9je9jtjnSBsZr8Zmg8hzePjG2W00Oe/CT4= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1149,7 +1146,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1246,14 +1242,12 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1268,7 +1262,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/pkg/analyze/analyzer.go b/pkg/analyze/analyzer.go index 0d6f4e681..a95b12a2c 100644 --- a/pkg/analyze/analyzer.go +++ b/pkg/analyze/analyzer.go @@ -1,16 +1,20 @@ package analyzer import ( + "bufio" + "bytes" "context" "encoding/json" "fmt" "reflect" "strconv" + "strings" "github.com/pkg/errors" troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2" "github.com/replicatedhq/troubleshoot/pkg/constants" "github.com/replicatedhq/troubleshoot/pkg/multitype" + "github.com/replicatedhq/troubleshoot/pkg/redact" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -226,6 +230,8 @@ func getAnalyzer(analyzer *troubleshootv1beta2.Analyze) Analyzer { return &AnalyzeRedis{analyzer: analyzer.Redis} case analyzer.CephStatus != nil: return &AnalyzeCephStatus{analyzer: analyzer.CephStatus} + case analyzer.Velero != nil: + return &AnalyzeVelero{analyzer: analyzer.Velero} case analyzer.Longhorn != nil: return &AnalyzeLonghorn{analyzer: analyzer.Longhorn} case analyzer.RegistryImages != nil: @@ -265,3 +271,18 @@ func DedupAnalyzers(allAnalyzers []*troubleshootv1beta2.Analyze) []*troubleshoot } return finalAnalyzers } + +func stripRedactedLines(yaml []byte) []byte { + buf := bytes.NewBuffer(yaml) + scanner := bufio.NewScanner(buf) + + out := []byte{} + + for scanner.Scan() { + line := strings.ReplaceAll(scanner.Text(), redact.MASK_TEXT, "HIDDEN") + out = append(out, []byte(line)...) + out = append(out, '\n') + } + + return out +} diff --git a/pkg/analyze/longhorn.go b/pkg/analyze/longhorn.go index a9e824452..09e28b2bf 100644 --- a/pkg/analyze/longhorn.go +++ b/pkg/analyze/longhorn.go @@ -1,19 +1,15 @@ package analyzer import ( - "bufio" - "bytes" "fmt" "path/filepath" "reflect" - "strings" "github.com/pkg/errors" troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2" "github.com/replicatedhq/troubleshoot/pkg/collect" longhornv1beta1 "github.com/replicatedhq/troubleshoot/pkg/longhorn/apis/longhorn/v1beta1" longhorntypes "github.com/replicatedhq/troubleshoot/pkg/longhorn/types" - "github.com/replicatedhq/troubleshoot/pkg/redact" "gopkg.in/yaml.v2" ) @@ -241,21 +237,6 @@ func analyzeLonghornEngine(engine *longhornv1beta1.Engine) *AnalyzeResult { return result } -func stripRedactedLines(yaml []byte) []byte { - buf := bytes.NewBuffer(yaml) - scanner := bufio.NewScanner(buf) - - out := []byte{} - - for scanner.Scan() { - line := strings.ReplaceAll(scanner.Text(), redact.MASK_TEXT, "HIDDEN") - out = append(out, []byte(line)...) - out = append(out, '\n') - } - - return out -} - func analyzeLonghornReplicaChecksums(volumeName string, checksums []map[string]string) *AnalyzeResult { result := &AnalyzeResult{ Title: fmt.Sprintf("Longhorn Volume Replica Corruption: %s", volumeName), diff --git a/pkg/analyze/types/restic_types.go b/pkg/analyze/types/restic_types.go new file mode 100644 index 000000000..2161e04e4 --- /dev/null +++ b/pkg/analyze/types/restic_types.go @@ -0,0 +1,62 @@ +package analyzer + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ResticRepositorySpec is the specification for a ResticRepository. +type ResticRepositorySpec struct { + // VolumeNamespace is the namespace this restic repository contains + // pod volume backups for. + VolumeNamespace string `json:"volumeNamespace"` + + // BackupStorageLocation is the name of the BackupStorageLocation + // that should contain this repository. + BackupStorageLocation string `json:"backupStorageLocation"` + + // ResticIdentifier is the full restic-compatible string for identifying + // this repository. + ResticIdentifier string `json:"resticIdentifier"` + + // MaintenanceFrequency is how often maintenance should be run. + MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"` +} + +// ResticRepositoryPhase represents the lifecycle phase of a ResticRepository. +// +kubebuilder:validation:Enum=New;Ready;NotReady +type ResticRepositoryPhase string + +const ( + ResticRepositoryPhaseNew ResticRepositoryPhase = "New" + ResticRepositoryPhaseReady ResticRepositoryPhase = "Ready" + ResticRepositoryPhaseNotReady ResticRepositoryPhase = "NotReady" +) + +// ResticRepositoryStatus is the current status of a ResticRepository. +type ResticRepositoryStatus struct { + // Phase is the current state of the ResticRepository. + // +optional + Phase ResticRepositoryPhase `json:"phase,omitempty"` + + // Message is a message about the current status of the ResticRepository. + // +optional + Message string `json:"message,omitempty"` + + // LastMaintenanceTime is the last time maintenance was run. + // +optional + // +nullable + LastMaintenanceTime *metav1.Time `json:"lastMaintenanceTime,omitempty"` +} + +type ResticRepository struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec ResticRepositorySpec `json:"spec,omitempty"` + + // +optional + Status ResticRepositoryStatus `json:"status,omitempty"` +} diff --git a/pkg/analyze/velero.go b/pkg/analyze/velero.go new file mode 100644 index 000000000..2512f5dfa --- /dev/null +++ b/pkg/analyze/velero.go @@ -0,0 +1,735 @@ +package analyzer + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + + appsV1 "k8s.io/api/apps/v1" + + restic_types "github.com/replicatedhq/troubleshoot/pkg/analyze/types" + "golang.org/x/mod/semver" + + "github.com/pkg/errors" + troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2" + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +type AnalyzeVelero struct { + analyzer *troubleshootv1beta2.VeleroAnalyze +} + +func (a *AnalyzeVelero) Title() string { + title := a.analyzer.CheckName + if title == "" { + title = "Velero" + } + + return title +} + +func (a *AnalyzeVelero) IsExcluded() (bool, error) { + return isExcluded(a.analyzer.Exclude) +} + +func (a *AnalyzeVelero) Analyze(getFile getCollectedFileContents, findFiles getChildCollectedFileContents) ([]*AnalyzeResult, error) { + results, err := a.veleroStatus(a.analyzer, getFile, findFiles) + if err != nil { + return nil, err + } + for i := range results { + results[i].Strict = a.analyzer.Strict.BoolOrDefaultFalse() + } + return results, nil +} + +func (a *AnalyzeVelero) veleroStatus(analyzer *troubleshootv1beta2.VeleroAnalyze, getFileContents getCollectedFileContents, findFiles getChildCollectedFileContents) ([]*AnalyzeResult, error) { + excludeFiles := []string{} + results := []*AnalyzeResult{} + + oldVeleroRepoType := false + veleroVersion, err := getVeleroVersion(excludeFiles, findFiles) + if err != nil { + return nil, errors.Wrap(err, "Unable to find velero deployment") + } + + // Check if the version string is valid erer + if !semver.IsValid(veleroVersion) { + return nil, errors.Errorf("Invalid velero semver: %s", veleroVersion) + } + + // check if veleroVersion is less than 1.10.x + compareResult := semver.Compare(veleroVersion, "1.10.0") + if compareResult < 0 { + oldVeleroRepoType = true + } + + if oldVeleroRepoType == true { + // old velero (v1.9.x) has a BackupRepositoryTypeRestic + // get resticrepositories.velero.io + resticRepositoriesDir := GetVeleroResticRepositoriesDirectory() + resticRepositoriesGlob := filepath.Join(resticRepositoriesDir, "*.json") + resticRepositoriesJson, err := findFiles(resticRepositoriesGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero restic repositories files under %s", resticRepositoriesDir) + } + resticRepositories := []*restic_types.ResticRepository{} + for key, resticRepositoryJson := range resticRepositoriesJson { + var resticRepositoryArray []*restic_types.ResticRepository + err := json.Unmarshal(resticRepositoryJson, &resticRepositories) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal restic repository json from %s", key) + } + resticRepositories = append(resticRepositories, resticRepositoryArray...) + } + results = append(results, analyzeResticRepositories(resticRepositories)...) + + } else { + + // velerov1.Version + // get backuprepositories.velero.io + backupRepositoriesDir := GetVeleroBackupRepositoriesDirectory() + backupRepositoriesGlob := filepath.Join(backupRepositoriesDir, "*.json") + backupRepositoriesJson, err := findFiles(backupRepositoriesGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero backup repositories files under %s", backupRepositoriesDir) + } + backupRepositories := []*velerov1.BackupRepository{} + for key, backupRepositoryJson := range backupRepositoriesJson { + var backupRepositoryArray []*velerov1.BackupRepository + err := json.Unmarshal(backupRepositoryJson, &backupRepositoryArray) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal backup repository json from %s", key) + } + backupRepositories = append(backupRepositories, backupRepositoryArray...) + } + results = append(results, analyzeBackupRepositories(backupRepositories)...) + + } + + // get backups.velero.io + backupsDir := GetVeleroBackupsDirectory() + backupsGlob := filepath.Join(backupsDir, "*.json") + veleroJSONs, err := findFiles(backupsGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero backup files") + } + backups := []*velerov1.Backup{} + for _, veleroJSON := range veleroJSONs { + if err != nil { + return nil, errors.Wrapf(err, "failed to read velero backup file %s", veleroJSON) + } + var veleroBackups []*velerov1.Backup + err = json.Unmarshal(veleroJSON, &veleroBackups) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal velero backup file %s", veleroJSON) + } + backups = append(backups, veleroBackups...) + } + + // get backupstoragelocations.velero.io + backupStorageLocationsDir := GetVeleroBackupStorageLocationsDirectory() + backupStorageLocationsGlob := filepath.Join(backupStorageLocationsDir, "*.json") + backupStorageLocationsJson, err := findFiles(backupStorageLocationsGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero backup storage locations files under %s", backupStorageLocationsDir) + } + backupStorageLocations := []*velerov1.BackupStorageLocation{} + for key, backupStorageLocationJson := range backupStorageLocationsJson { + var backupStorageLocationArray []*velerov1.BackupStorageLocation + err := json.Unmarshal(backupStorageLocationJson, &backupStorageLocationArray) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal backup storage location json from %s", key) + } + backupStorageLocations = append(backupStorageLocations, backupStorageLocationArray...) + } + + // get deletebackuprequests.velero.io + deleteBackupRequestsDir := GetVeleroDeleteBackupRequestsDirectory() + deleteBackupRequestsGlob := filepath.Join(deleteBackupRequestsDir, "*.json") + deleteBackupRequestsJson, err := findFiles(deleteBackupRequestsGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero delete backup requests files under %s", deleteBackupRequestsDir) + } + deleteBackupRequests := []*velerov1.DeleteBackupRequest{} + for key, deleteBackupRequestJson := range deleteBackupRequestsJson { + var deleteBackupRequestArray []*velerov1.DeleteBackupRequest + err := json.Unmarshal(deleteBackupRequestJson, &deleteBackupRequestArray) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal delete backup request json from %s", key) + } + deleteBackupRequests = append(deleteBackupRequests, deleteBackupRequestArray...) + } + + // get podvolumebackups.velero.io + podVolumeBackupsDir := GetVeleroPodVolumeBackupsDirectory() + podVolumeBackupsGlob := filepath.Join(podVolumeBackupsDir, "*.json") + podVolumeBackupsJson, err := findFiles(podVolumeBackupsGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero pod volume backups files under %s", podVolumeBackupsDir) + } + podVolumeBackups := []*velerov1.PodVolumeBackup{} + for key, podVolumeBackupJson := range podVolumeBackupsJson { + var podVolumeBackupArray []*velerov1.PodVolumeBackup + err := json.Unmarshal(podVolumeBackupJson, &podVolumeBackupArray) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal pod volume backup json from %s", key) + } + podVolumeBackups = append(podVolumeBackups, podVolumeBackupArray...) + } + + // get podvolumerestores.velero.io + podVolumeRestoresDir := GetVeleroPodVolumeRestoresDirectory() + podVolumeRestoresGlob := filepath.Join(podVolumeRestoresDir, "*.json") + podVolumeRestoresJson, err := findFiles(podVolumeRestoresGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero pod volume restores files under %s", podVolumeRestoresDir) + } + podVolumeRestores := []*velerov1.PodVolumeRestore{} + for key, podVolumeRestoreJson := range podVolumeRestoresJson { + var podVolumeRestoreArray []*velerov1.PodVolumeRestore + err := json.Unmarshal(podVolumeRestoreJson, &podVolumeRestoreArray) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal pod volume restore json from %s", key) + } + podVolumeRestores = append(podVolumeRestores, podVolumeRestoreArray...) + } + + // get restores.velero.io + restoresDir := GetVeleroRestoresDirectory() + restoresGlob := filepath.Join(restoresDir, "*.json") + restoresJson, err := findFiles(restoresGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero restores files under %s", restoresDir) + } + restores := []*velerov1.Restore{} + for key, restoreJson := range restoresJson { + var restoreArray []*velerov1.Restore + err := json.Unmarshal(restoreJson, &restoreArray) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal restore json from %s", key) + } + restores = append(restores, restoreArray...) + } + + // get schedules.velero.io + schedulesDir := GetVeleroSchedulesDirectory() + schedulesGlob := filepath.Join(schedulesDir, "*.json") + schedulesJson, err := findFiles(schedulesGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero schedules files under %s", schedulesDir) + } + schedules := []*velerov1.Schedule{} + for key, scheduleJson := range schedulesJson { + var scheduleArray []*velerov1.Schedule + err := json.Unmarshal(scheduleJson, &scheduleArray) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal schedule json from %s", key) + } + schedules = append(schedules, scheduleArray...) + } + + // get serverstatusrequests.velero.io + serverStatusRequestsDir := GetVeleroServerStatusRequestsDirectory() + serverStatusRequestsGlob := filepath.Join(serverStatusRequestsDir, "*.json") + serverStatusRequestsJson, err := findFiles(serverStatusRequestsGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero server status requests files under %s", serverStatusRequestsDir) + } + serverStatusRequests := []*velerov1.ServerStatusRequest{} + for key, serverStatusRequestJson := range serverStatusRequestsJson { + var serverStatusRequestArray []*velerov1.ServerStatusRequest + err := json.Unmarshal(serverStatusRequestJson, &serverStatusRequestArray) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal server status request json from %s", key) + } + serverStatusRequests = append(serverStatusRequests, serverStatusRequestArray...) + } + + // get volumesnapshotlocations.velero.io + volumeSnapshotLocationsDir := GetVeleroVolumeSnapshotLocationsDirectory() + volumeSnapshotLocationsGlob := filepath.Join(volumeSnapshotLocationsDir, "*.json") + volumeSnapshotLocationsJson, err := findFiles(volumeSnapshotLocationsGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero volume snapshot locations files under %s", volumeSnapshotLocationsDir) + } + volumeSnapshotLocations := []*velerov1.VolumeSnapshotLocation{} + for key, volumeSnapshotLocationJson := range volumeSnapshotLocationsJson { + var volumeSnapshotLocationArray []*velerov1.VolumeSnapshotLocation + err := json.Unmarshal(volumeSnapshotLocationJson, &volumeSnapshotLocationArray) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal volume snapshot location json from %s", key) + } + volumeSnapshotLocations = append(volumeSnapshotLocations, volumeSnapshotLocationArray...) + } + + logsDir := GetVeleroLogsDirectory() + logsGlob := filepath.Join(logsDir, "node-agent*", "*.log") + nodeAgentlogs, err := findFiles(logsGlob, excludeFiles) + if err != nil { + return nil, errors.Wrapf(err, "failed to find velero logs files under %s", logsDir) + } + + veleroLogsGlob := filepath.Join(logsDir, "velero*", "*.log") + veleroLogs, err := findFiles(veleroLogsGlob, excludeFiles) + + results = append(results, analyzeLogs(nodeAgentlogs, "node-agent*")...) + results = append(results, analyzeLogs(veleroLogs, "velero*")...) + results = append(results, analyzeBackups(backups)...) + results = append(results, analyzeBackupStorageLocations(backupStorageLocations)...) + results = append(results, analyzeDeleteBackupRequests(deleteBackupRequests)...) + results = append(results, analyzePodVolumeBackups(podVolumeBackups)...) + results = append(results, analyzePodVolumeRestores(podVolumeRestores)...) + results = append(results, analyzeRestores(restores)...) + results = append(results, analyzeSchedules(schedules)...) + results = append(results, analyzeVolumeSnapshotLocations(volumeSnapshotLocations)...) + + return aggregateResults(results), nil +} + +func analyzeBackupRepositories(backupRepositories []*velerov1.BackupRepository) []*AnalyzeResult { + results := []*AnalyzeResult{} + readyCount := 0 + backupRepositoriesResult := &AnalyzeResult{ + Title: "At least 1 Backup Repository configured", + } + if len(backupRepositories) == 0 { + backupRepositoriesResult.IsFail = true + backupRepositoriesResult.Message = "No backup repositories configured" + } else { + for _, backupRepository := range backupRepositories { + if backupRepository.Status.Phase != velerov1.BackupRepositoryPhaseReady { + result := &AnalyzeResult{ + Title: fmt.Sprintf("Backup Repository %s", backupRepository.Name), + } + result.Message = fmt.Sprintf("Backup Repository [%s] is in phase %s", backupRepository.Name, backupRepository.Status.Phase) + result.IsWarn = true + results = append(results, result) + } else { + readyCount++ + } + } + if readyCount > 0 { + backupRepositoriesResult.IsPass = true + backupRepositoriesResult.Message = fmt.Sprintf("Found %d backup repositories configured and %d Ready", len(backupRepositories), readyCount) + } else { + backupRepositoriesResult.IsWarn = true + backupRepositoriesResult.Message = fmt.Sprintf("Found %d configured backup repositories, but none are ready", len(backupRepositories)) + } + } + results = append(results, backupRepositoriesResult) + return results + +} + +func analyzeResticRepositories(resticRepositories []*restic_types.ResticRepository) []*AnalyzeResult { + results := []*AnalyzeResult{} + readyCount := 0 + resticRepositoriesResult := &AnalyzeResult{ + Title: "At least 1 Restic Repository configured", + } + if len(resticRepositories) == 0 { + resticRepositoriesResult.IsFail = true + resticRepositoriesResult.Message = "No restic repositories configured" + } else { + for _, resticRepository := range resticRepositories { + if resticRepository.Status.Phase != restic_types.ResticRepositoryPhaseReady { + result := &AnalyzeResult{ + Title: fmt.Sprintf("Restic Repository %s", resticRepository.GetName()), + } + result.Message = fmt.Sprintf("Restic Repository [%s] is in phase %s", resticRepository.Name, resticRepository.Status.Phase) + result.IsWarn = true + results = append(results, result) + } else { + readyCount++ + } + } + if readyCount > 0 { + resticRepositoriesResult.IsPass = true + resticRepositoriesResult.Message = fmt.Sprintf("Found %d restic repositories configured and %d Ready", len(resticRepositories), readyCount) + } else { + resticRepositoriesResult.IsWarn = true + resticRepositoriesResult.Message = fmt.Sprintf("Found %d configured restic repositories, but none are ready", len(resticRepositories)) + } + } + results = append(results, resticRepositoriesResult) + return results +} + +func analyzeBackups(backups []*velerov1.Backup) []*AnalyzeResult { + results := []*AnalyzeResult{} + + failedPhases := map[velerov1.BackupPhase]bool{ + velerov1.BackupPhaseFailed: true, + velerov1.BackupPhasePartiallyFailed: true, + velerov1.BackupPhaseFailedValidation: true, + velerov1.BackupPhaseFinalizingPartiallyFailed: true, + velerov1.BackupPhaseWaitingForPluginOperationsPartiallyFailed: true, + } + + for _, backup := range backups { + + if failedPhases[backup.Status.Phase] { + result := &AnalyzeResult{ + Title: fmt.Sprintf("Backup %s", backup.Name), + } + result.IsFail = true + result.Message = fmt.Sprintf("Backup %s phase is %s", backup.Name, backup.Status.Phase) + results = append(results, result) + + } + } + if len(backups) > 0 { + results = append(results, &AnalyzeResult{ + Title: "Velero Backups", + IsPass: true, + Message: fmt.Sprintf("Found %d backups", len(backups)), + }) + } + return results +} + +func analyzeBackupStorageLocations(backupStorageLocations []*velerov1.BackupStorageLocation) []*AnalyzeResult { + results := []*AnalyzeResult{} + availableCount := 0 + bslResult := &AnalyzeResult{ + Title: "At least 1 Backup Storage Location configured", + } + + if len(backupStorageLocations) == 0 { + bslResult.IsFail = true + bslResult.Message = "No backup storage locations configured" + } else { + for _, backupStorageLocation := range backupStorageLocations { + if backupStorageLocation.Status.Phase != velerov1.BackupStorageLocationPhaseAvailable { + result := &AnalyzeResult{ + Title: fmt.Sprintf("Backup Storage Location %s", backupStorageLocation.Name), + } + result.Message = fmt.Sprintf("Backup Storage Location [%s] is in phase %s", backupStorageLocation.Name, backupStorageLocation.Status.Phase) + result.IsWarn = true + results = append(results, result) + } else { + availableCount++ + } + } + if availableCount > 0 { + bslResult.IsPass = true + bslResult.Message = fmt.Sprintf("Found %d backup storage locations configured and %d Available", len(backupStorageLocations), availableCount) + } else { + bslResult.IsWarn = true + bslResult.Message = fmt.Sprintf("Found %d configured backup storage locations, but none are available", len(backupStorageLocations)) + } + } + results = append(results, bslResult) + + return results +} + +func analyzeDeleteBackupRequests(deleteBackupRequests []*velerov1.DeleteBackupRequest) []*AnalyzeResult { + results := []*AnalyzeResult{} + inProgressCount := 0 + if len(deleteBackupRequests) > 0 { + for _, deleteBackupRequest := range deleteBackupRequests { + if deleteBackupRequest.Status.Phase == velerov1.DeleteBackupRequestPhaseInProgress { + inProgressCount++ + } + } + if inProgressCount > 0 { + deleteBackupRequestsResult := &AnalyzeResult{ + Title: "Delete Backup Requests summary", + } + deleteBackupRequestsResult.IsWarn = true + deleteBackupRequestsResult.Message = fmt.Sprintf("Found %d delete backup requests in progress", inProgressCount) + results = append(results, deleteBackupRequestsResult) + } + } + + return results +} + +func analyzePodVolumeBackups(podVolumeBackups []*velerov1.PodVolumeBackup) []*AnalyzeResult { + results := []*AnalyzeResult{} + failures := 0 + if len(podVolumeBackups) > 0 { + for _, podVolumeBackup := range podVolumeBackups { + if podVolumeBackup.Status.Phase == velerov1.PodVolumeBackupPhaseFailed { + result := &AnalyzeResult{ + Title: fmt.Sprintf("Pod Volume Backup %s", podVolumeBackup.Name), + } + result.IsFail = true + result.Message = fmt.Sprintf("Pod Volume Backup %s phase is %s", podVolumeBackup.Name, podVolumeBackup.Status.Phase) + results = append(results, result) + failures++ + } + } + + if failures == 0 { + results = append(results, &AnalyzeResult{ + Title: "Pod Volume Backups", + IsPass: true, + Message: fmt.Sprintf("Found %d pod volume backups", len(podVolumeBackups)), + }) + } + } + + return results +} + +func analyzePodVolumeRestores(podVolumeRestores []*velerov1.PodVolumeRestore) []*AnalyzeResult { + results := []*AnalyzeResult{} + failures := 0 + + if len(podVolumeRestores) > 0 { + for _, podVolumeRestore := range podVolumeRestores { + if podVolumeRestore.Status.Phase == velerov1.PodVolumeRestorePhaseFailed { + result := &AnalyzeResult{ + Title: fmt.Sprintf("Pod Volume Restore %s", podVolumeRestore.Name), + } + result.IsFail = true + result.Message = fmt.Sprintf("Pod Volume Restore %s phase is %s", podVolumeRestore.Name, podVolumeRestore.Status.Phase) + results = append(results, result) + failures++ + } + } + if failures == 0 { + results = append(results, &AnalyzeResult{ + Title: "Pod Volume Restores", + IsPass: true, + Message: fmt.Sprintf("Found %d pod volume restores", len(podVolumeRestores)), + }) + } + } + return results +} + +func analyzeRestores(restores []*velerov1.Restore) []*AnalyzeResult { + results := []*AnalyzeResult{} + failures := 0 + + if len(restores) > 0 { + + failedPhases := map[velerov1.RestorePhase]bool{ + velerov1.RestorePhaseFailed: true, + velerov1.RestorePhasePartiallyFailed: true, + velerov1.RestorePhaseFailedValidation: true, + velerov1.RestorePhaseWaitingForPluginOperationsPartiallyFailed: true, + } + + for _, restore := range restores { + if failedPhases[restore.Status.Phase] { + result := &AnalyzeResult{ + Title: fmt.Sprintf("Restore %s", restore.Name), + } + result.IsFail = true + result.Message = fmt.Sprintf("Restore %s phase is %s", restore.Name, restore.Status.Phase) + results = append(results, result) + failures++ + } + } + if failures == 0 { + results = append(results, &AnalyzeResult{ + Title: "Velero Restores", + IsPass: true, + Message: fmt.Sprintf("Found %d restores", len(restores)), + }) + } + } + + return results +} + +func analyzeSchedules(schedules []*velerov1.Schedule) []*AnalyzeResult { + results := []*AnalyzeResult{} + failures := 0 + if len(schedules) > 0 { + for _, schedule := range schedules { + if schedule.Status.Phase == velerov1.SchedulePhaseFailedValidation { + result := &AnalyzeResult{ + Title: fmt.Sprintf("Schedule %s", schedule.Name), + } + result.IsFail = true + result.Message = fmt.Sprintf("Schedule %s phase is %s", schedule.Name, schedule.Status.Phase) + results = append(results, result) + failures++ + } + } + if failures == 0 { + results = append(results, &AnalyzeResult{ + Title: "Velero Schedules", + IsPass: true, + Message: fmt.Sprintf("Found %d schedules", len(schedules)), + }) + } + } + return results +} + +func analyzeVolumeSnapshotLocations(volumeSnapshotLocations []*velerov1.VolumeSnapshotLocation) []*AnalyzeResult { + results := []*AnalyzeResult{} + failures := 0 + if len(volumeSnapshotLocations) > 0 { + for _, volumeSnapshotLocation := range volumeSnapshotLocations { + if volumeSnapshotLocation.Status.Phase == velerov1.VolumeSnapshotLocationPhaseUnavailable { + result := &AnalyzeResult{ + Title: fmt.Sprintf("Volume Snapshot Location %s", volumeSnapshotLocation.Name), + } + result.IsFail = true + result.Message = fmt.Sprintf("Volume Snapshot Location %s phase is %s", volumeSnapshotLocation.Name, volumeSnapshotLocation.Status.Phase) + results = append(results, result) + failures++ + } + } + if failures == 0 { + results = append(results, &AnalyzeResult{ + Title: "Velero Volume Snapshot Locations", + IsPass: true, + Message: fmt.Sprintf("Found %d volume snapshot locations", len(volumeSnapshotLocations)), + }) + } + } + + return results +} + +func analyzeLogs(logs map[string][]byte, kind string) []*AnalyzeResult { + results := []*AnalyzeResult{} + if len(logs) > 0 { + for key, logBytes := range logs { + logContent := string(logBytes) + result := &AnalyzeResult{ + Title: fmt.Sprintf("Velero logs for pod [%s]", key), + } + if strings.Contains(logContent, "permission denied") { + result.IsWarn = true + result.Message = fmt.Sprintf("Found 'permission denied' in %s pod log file(s)", kind) + results = append(results, result) + continue + } + + if strings.Contains(logContent, "error") || strings.Contains(logContent, "panic") || strings.Contains(logContent, "fatal") { + result.IsWarn = true + result.Message = fmt.Sprintf("Found error|panic|fatal in %s pod log file(s)", kind) + results = append(results, result) + } + } + + results = append(results, &AnalyzeResult{ + Title: fmt.Sprintf("Velero Logs analysis for kind [%s]", kind), + IsPass: true, + Message: fmt.Sprintf("Found %d log files", len(logs)), + }) + } + return results +} + +func aggregateResults(results []*AnalyzeResult) []*AnalyzeResult { + out := []*AnalyzeResult{} + resultFailed := false + for _, result := range results { + if result.IsFail { + resultFailed = true + } + out = append(out, result) + } + if len(results) > 0 { + if resultFailed == false { + out = append(out, &AnalyzeResult{ + Title: "Velero Status", + IsPass: true, + Message: "Velero setup is healthy", + }) + } + if resultFailed == true { + out = append(out, &AnalyzeResult{ + Title: "Velero Status", + IsWarn: true, + Message: "Velero setup is not entirely healthy", + }) + } + } + + return out +} + +func getVeleroVersion(excludedFiles []string, findFiles getChildCollectedFileContents) (string, error) { + veleroDeploymentDir := "cluster-resources/deployments" + veleroVersion := "" + veleroDeploymentGlob := filepath.Join(veleroDeploymentDir, "velero.json") + veleroDeploymentJson, err := findFiles(veleroDeploymentGlob, excludedFiles) + if err != nil { + return "", errors.Wrapf(err, "failed to find velero deployment file under %s", veleroDeploymentDir) + } + var deploymentList *appsV1.DeploymentList + // should run only once + for key, veleroDeploymentJsonBytes := range veleroDeploymentJson { + err := json.Unmarshal(veleroDeploymentJsonBytes, &deploymentList) + if err != nil { + return "", errors.Wrapf(err, "failed to unmarshal velero deployment json from %s", key) + } + break + } + for _, deployment := range deploymentList.Items { + for _, container := range deployment.Spec.Template.Spec.Containers { + if container.Name == "velero" { + container_image := container.Image + veleroVersion = strings.Split(container_image, ":")[1] + return veleroVersion, nil + } + } + } + + return "", errors.Errorf("Unable to get velero version. Could not find velero container in deployment!") +} + +func GetVeleroBackupsDirectory() string { + return "cluster-resources/custom-resources/backups.velero.io" +} + +func GetVeleroBackupRepositoriesDirectory() string { + return "cluster-resources/custom-resources/backuprepositories.velero.io" +} + +func GetVeleroBackupStorageLocationsDirectory() string { + return "cluster-resources/custom-resources/backupstoragelocations.velero.io" +} + +func GetVeleroDeleteBackupRequestsDirectory() string { + return "cluster-resources/custom-resources/deletebackuprequests.velero.io" +} + +func GetVeleroDownloadRequestsDirectory() string { + return "cluster-resources/custom-resources/downloadrequests.velero.io" +} + +func GetVeleroLogsDirectory() string { + return "velero/logs" +} + +func GetVeleroPodVolumeBackupsDirectory() string { + return "cluster-resources/custom-resources/podvolumebackups.velero.io" +} + +func GetVeleroPodVolumeRestoresDirectory() string { + return "cluster-resources/custom-resources/podvolumerestores.velero.io" +} + +func GetVeleroRestoresDirectory() string { + return "cluster-resources/custom-resources/restores.velero.io" +} + +func GetVeleroSchedulesDirectory() string { + return "cluster-resources/custom-resources/schedules.velero.io" +} + +func GetVeleroServerStatusRequestsDirectory() string { + return "cluster-resources/custom-resources/serverstatusrequests.velero.io" +} + +func GetVeleroVolumeSnapshotLocationsDirectory() string { + return "cluster-resources/custom-resources/volumesnapshotlocations.velero.io" +} + +func GetVeleroResticRepositoriesDirectory() string { + return "cluster-resources/custom-resources/resticrepositories.velero.io" +} diff --git a/pkg/analyze/velero_test.go b/pkg/analyze/velero_test.go new file mode 100644 index 000000000..c0d39c2b7 --- /dev/null +++ b/pkg/analyze/velero_test.go @@ -0,0 +1,1020 @@ +package analyzer + +import ( + "encoding/json" + "reflect" + "testing" + "time" + + restic_types "github.com/replicatedhq/troubleshoot/pkg/analyze/types" + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestAnalyzeVelero_BackupRepositories(t *testing.T) { + type args struct { + backupRepositories []*velerov1.BackupRepository + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no backup repositories", + args: args{ + backupRepositories: []*velerov1.BackupRepository{}, + }, + want: []*AnalyzeResult{ + { + Title: "At least 1 Backup Repository configured", + Message: "No backup repositories configured", + IsFail: true, + }, + }, + }, + { + name: "1 backup repository and 1 Ready", + args: args{ + backupRepositories: []*velerov1.BackupRepository{ + { + Spec: velerov1.BackupRepositorySpec{ + BackupStorageLocation: "default", + VolumeNamespace: "velero", + }, + Status: velerov1.BackupRepositoryStatus{ + Phase: velerov1.BackupRepositoryPhaseReady, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "At least 1 Backup Repository configured", + Message: "Found 1 backup repositories configured and 1 Ready", + IsPass: true, + }, + }, + }, + { + name: "2 backup repositories and 1 Ready", + args: args{ + backupRepositories: []*velerov1.BackupRepository{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "default-default-restic-245sd", + Namespace: "velero", + }, + Spec: velerov1.BackupRepositorySpec{ + BackupStorageLocation: "default", + VolumeNamespace: "velero", + }, + Status: velerov1.BackupRepositoryStatus{ + Phase: velerov1.BackupRepositoryPhaseReady, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "builders-default-restic-jdtd8", + Namespace: "velero", + }, + Spec: velerov1.BackupRepositorySpec{ + BackupStorageLocation: "builders-default-restic-jdtd8", + VolumeNamespace: "velero", + }, + Status: velerov1.BackupRepositoryStatus{ + Phase: velerov1.BackupRepositoryPhaseNotReady, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Backup Repository builders-default-restic-jdtd8", + Message: "Backup Repository [builders-default-restic-jdtd8] is in phase NotReady", + IsWarn: true, + }, + { + Title: "At least 1 Backup Repository configured", + Message: "Found 2 backup repositories configured and 1 Ready", + IsPass: true, + }, + }, + }, + { + name: "1 backup repository and none Ready", + args: args{ + backupRepositories: []*velerov1.BackupRepository{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "velero", + }, + Spec: velerov1.BackupRepositorySpec{ + BackupStorageLocation: "default", + VolumeNamespace: "velero", + }, + Status: velerov1.BackupRepositoryStatus{ + Phase: velerov1.BackupRepositoryPhaseNotReady, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Backup Repository default", + Message: "Backup Repository [default] is in phase NotReady", + IsWarn: true, + }, + { + Title: "At least 1 Backup Repository configured", + Message: "Found 1 configured backup repositories, but none are ready", + IsWarn: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzeBackupRepositories(tt.args.backupRepositories); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzeBackupRepositories() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_ResticRepositories(t *testing.T) { + type args struct { + resticRepositories []*restic_types.ResticRepository + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no restic repositories", + args: args{ + resticRepositories: []*restic_types.ResticRepository{}, + }, + want: []*AnalyzeResult{ + { + Title: "At least 1 Restic Repository configured", + Message: "No restic repositories configured", + IsFail: true, + }, + }, + }, + { + name: "1 restic repository and 1 Ready", + args: args{ + resticRepositories: []*restic_types.ResticRepository{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "default-default-restic-245sd", + Namespace: "velero", + }, + Spec: restic_types.ResticRepositorySpec{ + BackupStorageLocation: "default", + VolumeNamespace: "velero", + }, + Status: restic_types.ResticRepositoryStatus{ + Phase: restic_types.ResticRepositoryPhaseReady, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "At least 1 Restic Repository configured", + Message: "Found 1 restic repositories configured and 1 Ready", + IsPass: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzeResticRepositories(tt.args.resticRepositories); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzeResticRepositories() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_Backups(t *testing.T) { + type args struct { + backups []*velerov1.Backup + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no backups", + args: args{ + backups: []*velerov1.Backup{}, + }, + want: []*AnalyzeResult{}, + }, + { + name: "1 backup and 1 Completed", + args: args{ + backups: []*velerov1.Backup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "observability-backup", + Namespace: "velero", + }, + Spec: velerov1.BackupSpec{ + IncludedNamespaces: []string{"monitoring"}, + }, + Status: velerov1.BackupStatus{ + Phase: velerov1.BackupPhaseCompleted, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Velero Backups", + Message: "Found 1 backups", + IsPass: true, + }, + }, + }, + { + name: "1 backup and 1 Failed", + args: args{ + backups: []*velerov1.Backup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "application-backup", + Namespace: "velero", + }, + Spec: velerov1.BackupSpec{ + IncludedNamespaces: []string{"shazam"}, + }, + Status: velerov1.BackupStatus{ + Phase: velerov1.BackupPhaseFailed, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Backup application-backup", + Message: "Backup application-backup phase is Failed", + IsFail: true, + }, + { + Title: "Velero Backups", + Message: "Found 1 backups", + IsPass: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzeBackups(tt.args.backups); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzeBackups() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_BackupStorageLocations(t *testing.T) { + type args struct { + backupStorageLocations []*velerov1.BackupStorageLocation + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no backup storage locations", + args: args{ + backupStorageLocations: []*velerov1.BackupStorageLocation{}, + }, + want: []*AnalyzeResult{ + { + Title: "At least 1 Backup Storage Location configured", + Message: "No backup storage locations configured", + IsFail: true, + }, + }, + }, + { + name: "1 backup storage location and 1 Available", + args: args{ + backupStorageLocations: []*velerov1.BackupStorageLocation{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "velero", + }, + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "aws", + }, + Status: velerov1.BackupStorageLocationStatus{ + Phase: velerov1.BackupStorageLocationPhaseAvailable, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "At least 1 Backup Storage Location configured", + Message: "Found 1 backup storage locations configured and 1 Available", + IsPass: true, + }, + }, + }, + { + name: "1 backup storage location and 1 Unavailable", + args: args{ + backupStorageLocations: []*velerov1.BackupStorageLocation{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "velero", + }, + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "aws", + }, + Status: velerov1.BackupStorageLocationStatus{ + Phase: velerov1.BackupStorageLocationPhaseUnavailable, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Backup Storage Location default", + Message: "Backup Storage Location [default] is in phase Unavailable", + IsWarn: true, + }, + { + Title: "At least 1 Backup Storage Location configured", + Message: "Found 1 configured backup storage locations, but none are available", + IsWarn: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzeBackupStorageLocations(tt.args.backupStorageLocations); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzeBackupStorageLocations() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_DeleteBackupRequests(t *testing.T) { + type args struct { + deleteBackupRequests []*velerov1.DeleteBackupRequest + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no backup delete requests", + args: args{ + deleteBackupRequests: []*velerov1.DeleteBackupRequest{}, + }, + want: []*AnalyzeResult{}, + }, + { + name: "backup delete requests completed", + args: args{ + deleteBackupRequests: []*velerov1.DeleteBackupRequest{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "observability-backup-20210308150016", + Namespace: "velero", + }, + Spec: velerov1.DeleteBackupRequestSpec{ + BackupName: "observability-backup", + }, + Status: velerov1.DeleteBackupRequestStatus{ + Phase: velerov1.DeleteBackupRequestPhaseProcessed, + }, + }, + }, + }, + want: []*AnalyzeResult{}, + }, + { + name: "backup delete requests summarize in progress", + args: args{ + deleteBackupRequests: []*velerov1.DeleteBackupRequest{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "observability-backup-20210308150016", + Namespace: "velero", + }, + Spec: velerov1.DeleteBackupRequestSpec{ + BackupName: "observability-backup", + }, + Status: velerov1.DeleteBackupRequestStatus{ + Phase: velerov1.DeleteBackupRequestPhaseInProgress, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Delete Backup Requests summary", + Message: "Found 1 delete backup requests in progress", + IsWarn: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzeDeleteBackupRequests(tt.args.deleteBackupRequests); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzeDeleteBackupRequests() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_PodVolumeBackups(t *testing.T) { + type args struct { + podVolumeBackups []*velerov1.PodVolumeBackup + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no pod volume backups", + args: args{ + podVolumeBackups: []*velerov1.PodVolumeBackup{}, + }, + want: []*AnalyzeResult{}, + }, + { + name: "pod volume backups", + args: args{ + podVolumeBackups: []*velerov1.PodVolumeBackup{ + { + Spec: velerov1.PodVolumeBackupSpec{ + Node: "test-node-1", + Pod: corev1.ObjectReference{ + Kind: "Pod", + Name: "kotsadm-76ddbc96c4-fsr88", + Namespace: "default", + }, + Volume: "backup", + BackupStorageLocation: "default", + }, + Status: velerov1.PodVolumeBackupStatus{ + Phase: velerov1.PodVolumeBackupPhaseCompleted, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Pod Volume Backups", + Message: "Found 1 pod volume backups", + IsPass: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzePodVolumeBackups(tt.args.podVolumeBackups); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzePodVolumeBackups() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_PodVolumeRestores(t *testing.T) { + type args struct { + podVolumeRestores []*velerov1.PodVolumeRestore + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no pod volume restores", + args: args{ + podVolumeRestores: []*velerov1.PodVolumeRestore{}, + }, + want: []*AnalyzeResult{}, + }, + { + name: "pod volume restores - no failures", + args: args{ + podVolumeRestores: []*velerov1.PodVolumeRestore{ + { + Spec: velerov1.PodVolumeRestoreSpec{ + Pod: corev1.ObjectReference{ + Kind: "Pod", + Name: "kotsadm-76ddbc96c4-fsr88", + Namespace: "default", + }, + Volume: "backup", + BackupStorageLocation: "default", + }, + Status: velerov1.PodVolumeRestoreStatus{ + Phase: velerov1.PodVolumeRestorePhaseCompleted, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Pod Volume Restores", + Message: "Found 1 pod volume restores", + IsPass: true, + }, + }, + }, + { + name: "pod volume restores - failures", + args: args{ + podVolumeRestores: []*velerov1.PodVolumeRestore{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "observability-backup-20210308150016", + Namespace: "velero", + }, + Spec: velerov1.PodVolumeRestoreSpec{ + Pod: corev1.ObjectReference{ + Kind: "Pod", + Name: "kotsadm-76ddbc96c4-fsr88", + Namespace: "default", + }, + Volume: "backup", + BackupStorageLocation: "default", + }, + Status: velerov1.PodVolumeRestoreStatus{ + Phase: velerov1.PodVolumeRestorePhaseFailed, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Pod Volume Restore observability-backup-20210308150016", + Message: "Pod Volume Restore observability-backup-20210308150016 phase is Failed", + IsFail: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzePodVolumeRestores(tt.args.podVolumeRestores); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzePodVolumeRestores() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_Restores(t *testing.T) { + type args struct { + restores []*velerov1.Restore + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no restores", + args: args{ + restores: []*velerov1.Restore{}, + }, + want: []*AnalyzeResult{}, + }, + { + name: "restores completed", + args: args{ + restores: []*velerov1.Restore{ + { + Spec: velerov1.RestoreSpec{ + BackupName: "observability-backup", + }, + Status: velerov1.RestoreStatus{ + Phase: velerov1.RestorePhaseCompleted, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Velero Restores", + Message: "Found 1 restores", + IsPass: true, + }, + }, + }, + { + name: "restores - failures", + args: args{ + restores: []*velerov1.Restore{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "observability-backup-20210308150016", + Namespace: "velero", + }, + Spec: velerov1.RestoreSpec{ + BackupName: "observability-backup", + }, + Status: velerov1.RestoreStatus{ + Phase: velerov1.RestorePhaseWaitingForPluginOperationsPartiallyFailed, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Restore observability-backup-20210308150016", + Message: "Restore observability-backup-20210308150016 phase is WaitingForPluginOperationsPartiallyFailed", + IsFail: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzeRestores(tt.args.restores); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzeRestores() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_Schedules(t *testing.T) { + type args struct { + schedules []*velerov1.Schedule + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no schedules", + args: args{ + schedules: []*velerov1.Schedule{}, + }, + want: []*AnalyzeResult{}, + }, + { + name: "schedules configured", + args: args{ + schedules: []*velerov1.Schedule{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "daily-backup", + Namespace: "velero", + }, + Spec: velerov1.ScheduleSpec{ + Schedule: "0 0 * * *", + Template: velerov1.BackupSpec{ + StorageLocation: "default", + IncludedNamespaces: []string{ + "default", + }, + IncludedResources: []string{ + "*", + }, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "velero", + }, + }, + }, + }, + Status: velerov1.ScheduleStatus{ + Phase: velerov1.SchedulePhaseEnabled, + LastBackup: &metav1.Time{ + Time: time.Date(2023, 3, 8, 15, 0, 16, 0, time.UTC), + }, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Velero Schedules", + Message: "Found 1 schedules", + IsPass: true, + }, + }, + }, + { + name: "schedules - failures", + args: args{ + schedules: []*velerov1.Schedule{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "daily-backup", + Namespace: "velero", + }, + Spec: velerov1.ScheduleSpec{ + Schedule: "0 0 * * *", + Template: velerov1.BackupSpec{ + StorageLocation: "default", + IncludedNamespaces: []string{ + "default", + }, + IncludedResources: []string{ + "*", + }, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "velero", + }, + }, + }, + }, + Status: velerov1.ScheduleStatus{ + Phase: velerov1.SchedulePhaseFailedValidation, + LastBackup: &metav1.Time{ + Time: time.Date(2023, 3, 8, 15, 0, 16, 0, time.UTC), + }, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Schedule daily-backup", + Message: "Schedule daily-backup phase is FailedValidation", + IsFail: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzeSchedules(tt.args.schedules); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzeSchedules() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_VolumeSnapshotLocations(t *testing.T) { + type args struct { + volumeSnapshotLocations []*velerov1.VolumeSnapshotLocation + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no volume snapshot locations", + args: args{ + volumeSnapshotLocations: []*velerov1.VolumeSnapshotLocation{}, + }, + want: []*AnalyzeResult{}, + }, + { + name: "volume snapshot locations configured", + args: args{ + volumeSnapshotLocations: []*velerov1.VolumeSnapshotLocation{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "velero", + }, + Spec: velerov1.VolumeSnapshotLocationSpec{ + Provider: "aws", + Config: map[string]string{ + "region": "us-east-1", + }, + }, + Status: velerov1.VolumeSnapshotLocationStatus{ + Phase: velerov1.VolumeSnapshotLocationPhaseAvailable, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Velero Volume Snapshot Locations", + Message: "Found 1 volume snapshot locations", + IsPass: true, + }, + }, + }, + { + name: "volume snapshot locations - failures", + args: args{ + volumeSnapshotLocations: []*velerov1.VolumeSnapshotLocation{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "velero", + }, + Spec: velerov1.VolumeSnapshotLocationSpec{ + Provider: "aws", + Config: map[string]string{ + "region": "us-east-1", + }, + }, + Status: velerov1.VolumeSnapshotLocationStatus{ + Phase: velerov1.VolumeSnapshotLocationPhaseUnavailable, + }, + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "Volume Snapshot Location default", + Message: "Volume Snapshot Location default phase is Unavailable", + IsFail: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzeVolumeSnapshotLocations(tt.args.volumeSnapshotLocations); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzeVolumeSnapshotLocations() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAnalyzeVelero_Logs(t *testing.T) { + type args struct { + logs map[string][]byte + kind string + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no logs", + args: args{ + logs: map[string][]byte{}, + kind: "node-agent*", + }, + want: []*AnalyzeResult{}, + }, + { + name: "logs - no errors in node-agent* pods", + args: args{ + logs: map[string][]byte{ + "node-agent-m6n9j": []byte("level=info msg=... backup=velero/sample-app controller=podvolumebacku"), + }, + kind: "node-agent*", + }, + want: []*AnalyzeResult{ + { + Title: "Velero Logs analysis for kind [node-agent*]", + Message: "Found 1 log files", + IsPass: true, + }, + }, + }, + { + name: "logs - no errors in velero* pods", + args: args{ + logs: map[string][]byte{ + "velero-788ff7c9dd-mslfl": []byte("level=info msg=BackupStorageLocations... controller=backup-storage-location"), + }, + kind: "velero*", + }, + want: []*AnalyzeResult{ + { + Title: "Velero Logs analysis for kind [velero*]", + Message: "Found 1 log files", + IsPass: true, + }, + }, + }, + { + name: "logs - errors in node-agent* pods", + args: args{ + logs: map[string][]byte{ + "node-agent-m6n9j": []byte("level=error msg=... backup=velero/sample-app controller=podvolumebacku"), + }, + kind: "node-agent*", + }, + want: []*AnalyzeResult{ + { + Title: "Velero logs for pod [node-agent-m6n9j]", + Message: "Found error|panic|fatal in node-agent* pod log file(s)", + IsWarn: true, + }, + { + Title: "Velero Logs analysis for kind [node-agent*]", + Message: "Found 1 log files", + IsPass: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := analyzeLogs(tt.args.logs, tt.args.kind); !reflect.DeepEqual(got, tt.want) { + t.Errorf("analyzeLogs() = %v, want %v", got, tt.want) + gotJSON, _ := json.MarshalIndent(got, "", " ") + wantJSON, _ := json.MarshalIndent(tt.want, "", " ") + t.Logf("\nGot: %s\nWant: %s", gotJSON, wantJSON) + } + }) + } +} + +func TestAnalyzeVelero_Results(t *testing.T) { + type args struct { + results []*AnalyzeResult + } + tests := []struct { + name string + args args + want []*AnalyzeResult + }{ + { + name: "no results", + args: args{ + results: []*AnalyzeResult{}, + }, + want: []*AnalyzeResult{}, + }, + { + name: "results - pass", + args: args{ + results: []*AnalyzeResult{ + { + Title: "random Velero CRD check", + IsPass: true, + Message: "CRD status is healthy", + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "random Velero CRD check", + IsPass: true, + Message: "CRD status is healthy", + }, + { + Title: "Velero Status", + IsPass: true, + Message: "Velero setup is healthy", + }, + }, + }, + { + name: "results - fail", + args: args{ + results: []*AnalyzeResult{ + { + Title: "random Velero CRD check failure", + IsFail: true, + Message: "CRD status - Failed", + }, + }, + }, + want: []*AnalyzeResult{ + { + Title: "random Velero CRD check failure", + IsFail: true, + Message: "CRD status - Failed", + }, + { + Title: "Velero Status", + IsWarn: true, + Message: "Velero setup is not entirely healthy", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := aggregateResults(tt.args.results); !reflect.DeepEqual(got, tt.want) { + t.Errorf("aggregateResults() = %v, want %v", got, tt.want) + gotJSON, _ := json.MarshalIndent(got, "", " ") + wantJSON, _ := json.MarshalIndent(tt.want, "", " ") + t.Logf("\nGot: %s\nWant: %s", gotJSON, wantJSON) + } + }) + } +} diff --git a/pkg/apis/troubleshoot/v1beta2/analyzer_shared.go b/pkg/apis/troubleshoot/v1beta2/analyzer_shared.go index 4cb171d30..62a2a2527 100644 --- a/pkg/apis/troubleshoot/v1beta2/analyzer_shared.go +++ b/pkg/apis/troubleshoot/v1beta2/analyzer_shared.go @@ -186,6 +186,10 @@ type CephStatusAnalyze struct { Namespace string `json:"namespace" yaml:"namespace"` } +type VeleroAnalyze struct { + AnalyzeMeta `json:",inline" yaml:",inline"` +} + type LonghornAnalyze struct { AnalyzeMeta `json:",inline" yaml:",inline"` Outcomes []*Outcome `json:"outcomes" yaml:"outcomes"` @@ -245,6 +249,7 @@ type Analyze struct { Mysql *DatabaseAnalyze `json:"mysql,omitempty" yaml:"mysql,omitempty"` Redis *DatabaseAnalyze `json:"redis,omitempty" yaml:"redis,omitempty"` CephStatus *CephStatusAnalyze `json:"cephStatus,omitempty" yaml:"cephStatus,omitempty"` + Velero *VeleroAnalyze `json:"velero,omitempty" yaml:"velero,omitempty"` Longhorn *LonghornAnalyze `json:"longhorn,omitempty" yaml:"longhorn,omitempty"` RegistryImages *RegistryImagesAnalyze `json:"registryImages,omitempty" yaml:"registryImages,omitempty"` WeaveReport *WeaveReportAnalyze `json:"weaveReport,omitempty" yaml:"weaveReport,omitempty"` diff --git a/pkg/apis/troubleshoot/v1beta2/zz_generated.deepcopy.go b/pkg/apis/troubleshoot/v1beta2/zz_generated.deepcopy.go index a5fe91e8d..9d388f7d6 100644 --- a/pkg/apis/troubleshoot/v1beta2/zz_generated.deepcopy.go +++ b/pkg/apis/troubleshoot/v1beta2/zz_generated.deepcopy.go @@ -169,6 +169,11 @@ func (in *Analyze) DeepCopyInto(out *Analyze) { *out = new(CephStatusAnalyze) (*in).DeepCopyInto(*out) } + if in.Velero != nil { + in, out := &in.Velero, &out.Velero + *out = new(VeleroAnalyze) + (*in).DeepCopyInto(*out) + } if in.Longhorn != nil { in, out := &in.Longhorn, &out.Longhorn *out = new(LonghornAnalyze) @@ -4654,6 +4659,22 @@ func (in *UDPPortStatusAnalyze) DeepCopy() *UDPPortStatusAnalyze { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VeleroAnalyze) DeepCopyInto(out *VeleroAnalyze) { + *out = *in + in.AnalyzeMeta.DeepCopyInto(&out.AnalyzeMeta) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VeleroAnalyze. +func (in *VeleroAnalyze) DeepCopy() *VeleroAnalyze { + if in == nil { + return nil + } + out := new(VeleroAnalyze) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WeaveReportAnalyze) DeepCopyInto(out *WeaveReportAnalyze) { *out = *in diff --git a/pkg/supportbundle/test/velero.yaml b/pkg/supportbundle/test/velero.yaml new file mode 100644 index 000000000..41d89c9e5 --- /dev/null +++ b/pkg/supportbundle/test/velero.yaml @@ -0,0 +1,11 @@ +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: velero +spec: + collectors: + - logs: + namespace: velero + name: velero/logs + analyzers: + - velero: {} diff --git a/pkg/types/types.go b/pkg/types/types.go index 3809912c4..d710362a7 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -1,6 +1,8 @@ package types -import "fmt" +import ( + "fmt" +) type NotFoundError struct { Name string diff --git a/schemas/analyzer-troubleshoot-v1beta2.json b/schemas/analyzer-troubleshoot-v1beta2.json index 023101929..592a4c94e 100644 --- a/schemas/analyzer-troubleshoot-v1beta2.json +++ b/schemas/analyzer-troubleshoot-v1beta2.json @@ -2255,6 +2255,26 @@ } } }, + "velero": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "checkName": { + "type": "string" + }, + "exclude": { + "oneOf": [{"type": "string"},{"type": "boolean"}] + }, + "strict": { + "oneOf": [{"type": "string"},{"type": "boolean"}] + } + } + }, "weaveReport": { "type": "object", "required": [ diff --git a/schemas/preflight-troubleshoot-v1beta2.json b/schemas/preflight-troubleshoot-v1beta2.json index 4e2ee222b..db19376bd 100644 --- a/schemas/preflight-troubleshoot-v1beta2.json +++ b/schemas/preflight-troubleshoot-v1beta2.json @@ -2255,6 +2255,26 @@ } } }, + "velero": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "checkName": { + "type": "string" + }, + "exclude": { + "oneOf": [{"type": "string"},{"type": "boolean"}] + }, + "strict": { + "oneOf": [{"type": "string"},{"type": "boolean"}] + } + } + }, "weaveReport": { "type": "object", "required": [ diff --git a/schemas/supportbundle-troubleshoot-v1beta2.json b/schemas/supportbundle-troubleshoot-v1beta2.json index c667cb53b..0fc868784 100644 --- a/schemas/supportbundle-troubleshoot-v1beta2.json +++ b/schemas/supportbundle-troubleshoot-v1beta2.json @@ -2301,6 +2301,26 @@ } } }, + "velero": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "checkName": { + "type": "string" + }, + "exclude": { + "oneOf": [{"type": "string"},{"type": "boolean"}] + }, + "strict": { + "oneOf": [{"type": "string"},{"type": "boolean"}] + } + } + }, "weaveReport": { "type": "object", "required": [