diff --git a/go.mod b/go.mod index 1a2701df8..1888465a6 100644 --- a/go.mod +++ b/go.mod @@ -2,16 +2,38 @@ module github.com/uyuni-project/uyuni-tools go 1.21 +toolchain go1.21.1 + require ( github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 github.com/briandowns/spinner v1.23.0 github.com/chai2010/gettext-go v1.0.2 github.com/spf13/cobra v1.8.0 + k8s.io/api v0.29.7 + k8s.io/apimachinery v0.29.7 + k8s.io/cli-runtime v0.29.7 ) require ( - github.com/creack/pty v1.1.17 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/creack/pty v1.1.18 // indirect github.com/fatih/color v1.7.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + golang.org/x/net v0.23.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/client-go v0.29.7 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) require ( @@ -30,9 +52,9 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.7.0 github.com/subosito/gotenv v1.2.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/term v0.22.0 - golang.org/x/text v0.16.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 + golang.org/x/text v0.14.0 // indirect gopkg.in/ini.v1 v1.51.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index d4a59e229..1887571d4 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,8 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -39,8 +41,9 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -55,10 +58,14 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -71,6 +78,12 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -108,19 +121,26 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -142,8 +162,14 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -166,6 +192,8 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= @@ -196,12 +224,15 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -213,6 +244,8 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -231,6 +264,8 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -245,6 +280,10 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -253,6 +292,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -266,19 +307,22 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -298,7 +342,13 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -322,9 +372,12 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= @@ -333,6 +386,7 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -342,4 +396,22 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.29.7 h1:Q2/thp7YYESgy0MGzxT9RvA/6doLJHBXSFH8GGLxSbc= +k8s.io/api v0.29.7/go.mod h1:mPimdbyuIjwoLtBEVIGVUYb4BKOE+44XHt/n4IqKsLA= +k8s.io/apimachinery v0.29.7 h1:ICXzya58Q7hyEEfnTrbmdfX1n1schSepX2KUfC2/ykc= +k8s.io/apimachinery v0.29.7/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/cli-runtime v0.29.7 h1:6IxyxaIm3N31+PKXb1K7Tpf+100mm9hd9HMMYWMH2QE= +k8s.io/cli-runtime v0.29.7/go.mod h1:0pcclC4k3rkzYNAvw3zeiPNtg8Buv0orK+5MuhEKFBU= +k8s.io/client-go v0.29.7 h1:vTtiFrGBKlcBhxaeZC4eDrqui1e108nsTyue/KU63IY= +k8s.io/client-go v0.29.7/go.mod h1:69BvVqdRozgR/9TP45u/oO0tfrdbP+I8RqrcCJQshzg= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/mgradm/cmd/inspect/kubernetes.go b/mgradm/cmd/inspect/kubernetes.go index eaf363872..7631b460d 100644 --- a/mgradm/cmd/inspect/kubernetes.go +++ b/mgradm/cmd/inspect/kubernetes.go @@ -13,9 +13,8 @@ import ( "github.com/rs/zerolog/log" "github.com/spf13/cobra" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared" - shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" @@ -32,11 +31,11 @@ func kuberneteInspect( return utils.Errorf(err, L("failed to determine image")) } - cnx := shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) + cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter) if len(serverImage) <= 0 { log.Debug().Msg("Use deployed image") - serverImage, err = adm_utils.RunningImage(cnx) + serverImage, err = kubernetes.GetRunningImage("uyuni") if err != nil { return errors.New(L("failed to find the image of the currently running server container: %s")) } @@ -46,7 +45,14 @@ func kuberneteInspect( if err != nil { return utils.Errorf(err, L("failed retrieving namespace")) } - inspectResult, err := shared_kubernetes.InspectKubernetes(namespace, serverImage, flags.Image.PullPolicy) + + // Get the SCC credentials secret if existing + pullSecret, err := kubernetes.GetRegistrySecret(namespace, &types.SCCCredentials{}, kubernetes.ServerApp) + if err != nil { + return err + } + + inspectResult, err := kubernetes.InspectServer(namespace, serverImage, flags.Image.PullPolicy, pullSecret) if err != nil { return utils.Errorf(err, L("inspect command failed")) } diff --git a/mgradm/cmd/inspect/podman.go b/mgradm/cmd/inspect/podman.go index 9202fb349..90f85c5ac 100644 --- a/mgradm/cmd/inspect/podman.go +++ b/mgradm/cmd/inspect/podman.go @@ -9,10 +9,8 @@ import ( "github.com/rs/zerolog/log" "github.com/spf13/cobra" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" . "github.com/uyuni-project/uyuni-tools/shared/l10n" - shared_podman "github.com/uyuni-project/uyuni-tools/shared/podman" + "github.com/uyuni-project/uyuni-tools/shared/podman" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) @@ -31,13 +29,12 @@ func podmanInspect( if len(serverImage) <= 0 { log.Debug().Msg("Use deployed image") - cnx := shared.NewConnection("podman", shared_podman.ServerContainerName, "") - serverImage, err = adm_utils.RunningImage(cnx) + serverImage, err = podman.GetRunningImage(podman.ServerContainerName) if err != nil { return utils.Errorf(err, L("failed to find the image of the currently running server container")) } } - inspectResult, err := shared_podman.Inspect(serverImage, flags.Image.PullPolicy, flags.SCC) + inspectResult, err := podman.Inspect(serverImage, flags.Image.PullPolicy, flags.SCC) if err != nil { return utils.Errorf(err, L("inspect command failed")) } diff --git a/mgradm/cmd/install/kubernetes/kubernetes.go b/mgradm/cmd/install/kubernetes/kubernetes.go index dd3bca7a5..dad793305 100644 --- a/mgradm/cmd/install/kubernetes/kubernetes.go +++ b/mgradm/cmd/install/kubernetes/kubernetes.go @@ -10,18 +10,14 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -type kubernetesInstallFlags struct { - shared.InstallFlags `mapstructure:",squash"` - Helm cmd_utils.HelmFlags -} - -func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetesInstallFlags]) *cobra.Command { +func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command { cmd := &cobra.Command{ Use: "kubernetes [fqdn]", Short: L("Install a new server on a kubernetes cluster"), @@ -37,10 +33,10 @@ NOTE: installing on a remote cluster is not supported yet! `), Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - var flags kubernetesInstallFlags + var flags kubernetes.KubernetesServerFlags flagsUpdater := func(v *viper.Viper) { - flags.InstallFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.InstallFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, @@ -48,6 +44,7 @@ NOTE: installing on a remote cluster is not supported yet! shared.AddInstallFlags(cmd) cmd_utils.AddHelmInstallFlag(cmd) + cmd_utils.AddVolumesFlags(cmd) return cmd } @@ -55,3 +52,13 @@ NOTE: installing on a remote cluster is not supported yet! func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command { return newCmd(globalFlags, installForKubernetes) } + +func installForKubernetes( + _ *types.GlobalFlags, + flags *kubernetes.KubernetesServerFlags, + cmd *cobra.Command, + args []string, +) error { + flags.Installation.CheckParameters(cmd, "kubectl") + return kubernetes.Reconcile(flags, args[0]) +} diff --git a/mgradm/cmd/install/kubernetes/kubernetes_test.go b/mgradm/cmd/install/kubernetes/kubernetes_test.go index 84139cba1..003035381 100644 --- a/mgradm/cmd/install/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/install/kubernetes/kubernetes_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/spf13/cobra" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" "github.com/uyuni-project/uyuni-tools/shared/testutils" "github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -17,15 +18,17 @@ import ( func TestParamsParsing(t *testing.T) { args := flagstests.InstallFlagsTestArgs() - args = append(args, flagstests.ServerHelmFlagsTestArgs...) + args = append(args, flagstests.ServerKubernetesFlagsTestArgs...) + args = append(args, flagstests.VolumesFlagsTestExpected...) args = append(args, "srv.fq.dn") // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *kubernetesInstallFlags, + tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, _ *cobra.Command, args []string, ) error { - flagstests.AssertInstallFlags(t, &flags.InstallFlags) - flagstests.AssertServerHelmFlags(t, &flags.Helm) + flagstests.AssertInstallFlags(t, &flags.ServerFlags) + flagstests.AssertServerKubernetesFlags(t, &flags.Kubernetes) + flagstests.AssertVolumesFlags(t, &flags.Volumes) testutils.AssertEquals(t, "Wrong FQDN", "srv.fq.dn", args[0]) return nil } diff --git a/mgradm/cmd/install/kubernetes/utils.go b/mgradm/cmd/install/kubernetes/utils.go deleted file mode 100644 index ff0b56514..000000000 --- a/mgradm/cmd/install/kubernetes/utils.go +++ /dev/null @@ -1,106 +0,0 @@ -// SPDX-FileCopyrightText: 2024 SUSE LLC -// -// SPDX-License-Identifier: Apache-2.0 - -//go:build !nok8s - -package kubernetes - -import ( - "fmt" - "os/exec" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - "github.com/spf13/cobra" - install_shared "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" - "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" - shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes" - . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/types" - shared_utils "github.com/uyuni-project/uyuni-tools/shared/utils" -) - -func installForKubernetes(_ *types.GlobalFlags, - flags *kubernetesInstallFlags, - cmd *cobra.Command, - args []string, -) error { - for _, binary := range []string{"kubectl", "helm"} { - if _, err := exec.LookPath(binary); err != nil { - return fmt.Errorf(L("install %s before running this command"), binary) - } - } - - flags.CheckParameters(cmd, "kubectl") - cnx := shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) - - fqdn := args[0] - - if err := shared_utils.IsValidFQDN(fqdn); err != nil { - return err - } - - helmArgs := []string{"--set", "timezone=" + flags.TZ} - if flags.Mirror != "" { - // TODO Handle claims for multi-node clusters - helmArgs = append(helmArgs, "--set", "mirror.hostPath="+flags.Mirror) - } - if flags.Debug.Java { - helmArgs = append(helmArgs, "--set", "exposeJavaDebug=true") - } - - // Check the kubernetes cluster setup - clusterInfos, err := shared_kubernetes.CheckCluster() - if err != nil { - return err - } - - // Deploy the SSL CA or server certificate - ca := types.SSLPair{} - sslArgs, err := kubernetes.DeployCertificate(&flags.Helm, &flags.SSL, "", &ca, clusterInfos.GetKubeconfig(), fqdn, - flags.Image.PullPolicy) - if err != nil { - return shared_utils.Errorf(err, L("cannot deploy certificate")) - } - helmArgs = append(helmArgs, sslArgs...) - - // Create a secret using SCC credentials if any are provided - helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Uyuni.Namespace, &flags.SCC) - if err != nil { - return err - } - - // Deploy Uyuni and wait for it to be up - if err := kubernetes.Deploy(cnx, flags.Image.Registry, &flags.Image, &flags.Helm, - clusterInfos, fqdn, flags.Debug.Java, false, helmArgs..., - ); err != nil { - return shared_utils.Errorf(err, L("cannot deploy uyuni")) - } - - // Create setup script + env variables and copy it to the container - envs := map[string]string{ - "NO_SSL": "Y", - } - - if err := install_shared.RunSetup(cnx, &flags.InstallFlags, args[0], envs); err != nil { - namespace, err := cnx.GetNamespace("") - if err != nil { - return shared_utils.Errorf(err, L("failed to stop service")) - } - if stopErr := shared_kubernetes.Stop(namespace, shared_kubernetes.ServerApp); stopErr != nil { - log.Error().Err(stopErr).Msg(L("failed to stop service")) - } - return err - } - - // The CA needs to be added to the database for Kickstart use. - err = adm_utils.ExecCommand(zerolog.DebugLevel, cnx, - "/usr/bin/rhn-ssl-dbstore", "--ca-cert=/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT") - if err != nil { - return shared_utils.Errorf(err, L("error storing the SSL CA certificate in database")) - } - return nil -} diff --git a/mgradm/cmd/install/podman/podman.go b/mgradm/cmd/install/podman/podman.go index 3e5b13036..5c11b5b83 100644 --- a/mgradm/cmd/install/podman/podman.go +++ b/mgradm/cmd/install/podman/podman.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/podman" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -15,8 +16,8 @@ import ( ) type podmanInstallFlags struct { - shared.InstallFlags `mapstructure:",squash"` - Podman podman.PodmanFlags + adm_utils.ServerFlags `mapstructure:",squash"` + Podman podman.PodmanFlags } func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanInstallFlags]) *cobra.Command { @@ -33,8 +34,8 @@ NOTE: installing on a remote podman is not supported yet! RunE: func(cmd *cobra.Command, args []string) error { var flags podmanInstallFlags flagsUpdater := func(v *viper.Viper) { - flags.InstallFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.InstallFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/install/podman/podman_test.go b/mgradm/cmd/install/podman/podman_test.go index fff498ca1..a37b790bf 100644 --- a/mgradm/cmd/install/podman/podman_test.go +++ b/mgradm/cmd/install/podman/podman_test.go @@ -21,8 +21,10 @@ func TestParamsParsing(t *testing.T) { args = append(args, "srv.fq.dn") // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *podmanInstallFlags, _ *cobra.Command, args []string) error { - flagstests.AssertInstallFlags(t, &flags.InstallFlags) + tester := func(_ *types.GlobalFlags, flags *podmanInstallFlags, + _ *cobra.Command, args []string, + ) error { + flagstests.AssertInstallFlags(t, &flags.ServerFlags) flagstests.AssertPodmanInstallFlags(t, &flags.Podman) testutils.AssertEquals(t, "Wrong FQDN", "srv.fq.dn", args[0]) return nil diff --git a/mgradm/cmd/install/podman/utils.go b/mgradm/cmd/install/podman/utils.go index fcf67769d..68f630058 100644 --- a/mgradm/cmd/install/podman/utils.go +++ b/mgradm/cmd/install/podman/utils.go @@ -7,15 +7,18 @@ package podman import ( "errors" "os/exec" + "path/filepath" + "strconv" "strings" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" - install_shared "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" "github.com/uyuni-project/uyuni-tools/mgradm/shared/coco" "github.com/uyuni-project/uyuni-tools/mgradm/shared/hub" "github.com/uyuni-project/uyuni-tools/mgradm/shared/podman" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared" . "github.com/uyuni-project/uyuni-tools/shared/l10n" shared_podman "github.com/uyuni-project/uyuni-tools/shared/podman" @@ -29,7 +32,9 @@ func waitForSystemStart( image string, flags *podmanInstallFlags, ) error { - err := podman.GenerateSystemdService(systemd, flags.TZ, image, flags.Debug.Java, flags.Mirror, flags.Podman.Args) + err := podman.GenerateSystemdService( + systemd, flags.Installation.TZ, image, flags.Installation.Debug.Java, flags.Mirror, flags.Podman.Args, + ) if err != nil { return err } @@ -55,7 +60,7 @@ func installForPodman( return err } - authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.SCC) + authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.Installation.SCC) if err != nil { return utils.Errorf(err, L("failed to login to registry.suse.com")) } @@ -67,7 +72,7 @@ func installForPodman( ) } - flags.CheckParameters(cmd, "podman") + flags.Installation.CheckParameters(cmd, "podman") if _, err := exec.LookPath("podman"); err != nil { return errors.New(L("install podman before running this command")) } @@ -93,26 +98,26 @@ func installForPodman( return utils.Errorf(err, L("cannot wait for system start")) } - caPassword := flags.SSL.Password - if flags.SSL.UseExisting() { + caPassword := flags.Installation.SSL.Password + if flags.Installation.SSL.UseExisting() { // We need to have a password for the generated CA, even though it will be thrown away after install caPassword = "dummy" } env := map[string]string{ - "CERT_O": flags.SSL.Org, - "CERT_OU": flags.SSL.OU, - "CERT_CITY": flags.SSL.City, - "CERT_STATE": flags.SSL.State, - "CERT_COUNTRY": flags.SSL.Country, - "CERT_EMAIL": flags.SSL.Email, - "CERT_CNAMES": strings.Join(append([]string{fqdn}, flags.SSL.Cnames...), ","), + "CERT_O": flags.Installation.SSL.Org, + "CERT_OU": flags.Installation.SSL.OU, + "CERT_CITY": flags.Installation.SSL.City, + "CERT_STATE": flags.Installation.SSL.State, + "CERT_COUNTRY": flags.Installation.SSL.Country, + "CERT_EMAIL": flags.Installation.SSL.Email, + "CERT_CNAMES": strings.Join(append([]string{fqdn}, flags.Installation.SSL.Cnames...), ","), "CERT_PASS": caPassword, } log.Info().Msg(L("Run setup command in the container")) - if err := install_shared.RunSetup(cnx, &flags.InstallFlags, fqdn, env); err != nil { + if err := runSetup(cnx, &flags.ServerFlags, fqdn, env); err != nil { if stopErr := systemd.StopService(shared_podman.ServerService); stopErr != nil { log.Error().Msgf(L("Failed to stop service: %v"), stopErr) } @@ -129,12 +134,12 @@ func installForPodman( if flags.Coco.Replicas > 0 { // This may need to be moved up later once more containers require DB access - if err := shared_podman.CreateDBSecrets(flags.DB.User, flags.DB.Password); err != nil { + if err := shared_podman.CreateDBSecrets(flags.Installation.DB.User, flags.Installation.DB.Password); err != nil { return err } if err := coco.SetupCocoContainer( systemd, authFile, flags.Image.Registry, flags.Coco, flags.Image, - flags.DB.Name, flags.DB.Port, + flags.Installation.DB.Name, flags.Installation.DB.Port, ); err != nil { return err } @@ -148,8 +153,10 @@ func installForPodman( } } - if flags.SSL.UseExisting() { - if err := podman.UpdateSSLCertificate(cnx, &flags.SSL.Ca, &flags.SSL.Server); err != nil { + if flags.Installation.SSL.UseExisting() { + if err := podman.UpdateSSLCertificate( + cnx, &flags.Installation.SSL.Ca, &flags.Installation.SSL.Server, + ); err != nil { return utils.Errorf(err, L("cannot update SSL certificate")) } } @@ -159,3 +166,121 @@ func installForPodman( } return nil } + +const setupName = "setup.sh" + +// runSetup execute the setup. +func runSetup(cnx *shared.Connection, flags *adm_utils.ServerFlags, fqdn string, env map[string]string) error { + tmpFolder, cleaner, err := generateSetupScript(&flags.Installation, fqdn, flags.Mirror, env) + if err != nil { + return err + } + defer cleaner() + + if err := cnx.Copy(filepath.Join(tmpFolder, setupName), "server:/tmp/setup.sh", "root", "root"); err != nil { + return utils.Errorf(err, L("cannot copy /tmp/setup.sh")) + } + + err = adm_utils.ExecCommand(zerolog.InfoLevel, cnx, "/tmp/setup.sh") + if err != nil { + return utils.Errorf(err, L("error running the setup script")) + } + if err := cnx.CopyCaCertificate(fqdn); err != nil { + return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) + } + + log.Info().Msgf(L("Server set up, login on https://%[1]s with %[2]s user"), fqdn, flags.Installation.Admin.Login) + return nil +} + +// generateSetupScript creates a temporary folder with the setup script to execute in the container. +// The script exports all the needed environment variables and calls uyuni's mgr-setup. +func generateSetupScript( + flags *adm_utils.InstallationFlags, + fqdn string, + mirror string, + extraEnv map[string]string, +) (string, func(), error) { + localHostValues := []string{ + "localhost", + "127.0.0.1", + "::1", + fqdn, + } + + localDB := utils.Contains(localHostValues, flags.DB.Host) + + dbHost := flags.DB.Host + reportdbHost := flags.ReportDB.Host + + if localDB { + dbHost = "localhost" + if reportdbHost == "" { + reportdbHost = "localhost" + } + } + env := map[string]string{ + "UYUNI_FQDN": fqdn, + "MANAGER_USER": flags.DB.User, + "MANAGER_PASS": flags.DB.Password, + "MANAGER_ADMIN_EMAIL": flags.Email, + "MANAGER_MAIL_FROM": flags.EmailFrom, + "MANAGER_ENABLE_TFTP": boolToString(flags.Tftp), + "LOCAL_DB": boolToString(localDB), + "MANAGER_DB_NAME": flags.DB.Name, + "MANAGER_DB_HOST": dbHost, + "MANAGER_DB_PORT": strconv.Itoa(flags.DB.Port), + "MANAGER_DB_PROTOCOL": flags.DB.Protocol, + "REPORT_DB_NAME": flags.ReportDB.Name, + "REPORT_DB_HOST": reportdbHost, + "REPORT_DB_PORT": strconv.Itoa(flags.ReportDB.Port), + "REPORT_DB_USER": flags.ReportDB.User, + "REPORT_DB_PASS": flags.ReportDB.Password, + "EXTERNALDB_ADMIN_USER": flags.DB.Admin.User, + "EXTERNALDB_ADMIN_PASS": flags.DB.Admin.Password, + "EXTERNALDB_PROVIDER": flags.DB.Provider, + "ISS_PARENT": flags.IssParent, + "ACTIVATE_SLP": "N", // Deprecated, will be removed soon + "SCC_USER": flags.SCC.User, + "SCC_PASS": flags.SCC.Password, + } + if mirror != "" { + env["MIRROR_PATH"] = "/mirror" + } + + // Add the extra environment variables + for key, value := range extraEnv { + env[key] = value + } + + scriptDir, cleaner, err := utils.TempDir() + if err != nil { + return "", nil, err + } + + dataTemplate := templates.MgrSetupScriptTemplateData{ + Env: env, + DebugJava: flags.Debug.Java, + OrgName: flags.Organization, + AdminLogin: flags.Admin.Login, + AdminPassword: strings.ReplaceAll(flags.Admin.Password, `"`, `\"`), + AdminFirstName: flags.Admin.FirstName, + AdminLastName: flags.Admin.LastName, + AdminEmail: flags.Admin.Email, + NoSSL: false, + } + + scriptPath := filepath.Join(scriptDir, setupName) + if err = utils.WriteTemplateToFile(dataTemplate, scriptPath, 0555, true); err != nil { + return "", cleaner, utils.Errorf(err, L("Failed to generate setup script")) + } + + return scriptDir, cleaner, nil +} + +func boolToString(value bool) string { + if value { + return "Y" + } + return "N" +} diff --git a/mgradm/cmd/install/shared/flags.go b/mgradm/cmd/install/shared/flags.go index 50d7534eb..0c01fc73c 100644 --- a/mgradm/cmd/install/shared/flags.go +++ b/mgradm/cmd/install/shared/flags.go @@ -5,114 +5,13 @@ package shared import ( - "fmt" - "net/mail" - "regexp" - "strings" - "github.com/spf13/cobra" cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - apiTypes "github.com/uyuni-project/uyuni-tools/shared/api/types" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/ssl" - "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -// DBFlags can store all values required to connect to a database. -type DBFlags struct { - Host string - Name string - Port int - User string - Password string - Protocol string - Provider string - Admin struct { - User string - Password string - } -} - -// DebugFlags contains information about enabled/disabled debug. -type DebugFlags struct { - Java bool -} - -// InstallFlags stores all the flags used by install command. -type InstallFlags struct { - TZ string - Email string - EmailFrom string - IssParent string - Mirror string - Tftp bool - DB DBFlags - ReportDB DBFlags - SSL cmd_utils.InstallSSLFlags - SCC types.SCCCredentials - Debug DebugFlags - Image types.ImageFlags `mapstructure:",squash"` - Coco cmd_utils.CocoFlags - HubXmlrpc cmd_utils.HubXmlrpcFlags - Admin apiTypes.User - Organization string -} - -// idChecker verifies that the value is a valid identifier. -func idChecker(value string) bool { - r := regexp.MustCompile(`^([[:alnum:]]|[._-])+$`) - if r.MatchString(value) { - return true - } - fmt.Println(L("Can only contain letters, digits . _ and -")) - return false -} - -// emailChecker verifies that the value is a valid email address. -func emailChecker(value string) bool { - address, err := mail.ParseAddress(value) - if err != nil || address.Name != "" || strings.ContainsAny(value, "<>") { - fmt.Println(L("Not a valid email address")) - return false - } - return true -} - -// CheckParameters checks parameters for install command. -func (flags *InstallFlags) CheckParameters(cmd *cobra.Command, command string) { - if flags.DB.Password == "" { - flags.DB.Password = utils.GetRandomBase64(30) - } - - if flags.ReportDB.Password == "" { - flags.ReportDB.Password = utils.GetRandomBase64(30) - } - - // Make sure we have all the required 3rd party flags or none - flags.SSL.CheckParameters() - - // Since we use cert-manager for self-signed certificates on kubernetes we don't need password for it - if !flags.SSL.UseExisting() && command == "podman" { - utils.AskPasswordIfMissing(&flags.SSL.Password, cmd.Flag("ssl-password").Usage, 0, 0) - } - - // Use the host timezone if the user didn't define one - if flags.TZ == "" { - flags.TZ = utils.GetLocalTimezone() - } - - utils.AskIfMissing(&flags.Email, cmd.Flag("email").Usage, 1, 128, emailChecker) - utils.AskIfMissing(&flags.EmailFrom, cmd.Flag("emailfrom").Usage, 0, 0, emailChecker) - - utils.AskIfMissing(&flags.Admin.Login, cmd.Flag("admin-login").Usage, 1, 64, idChecker) - utils.AskPasswordIfMissing(&flags.Admin.Password, cmd.Flag("admin-password").Usage, 5, 48) - utils.AskIfMissing(&flags.Organization, cmd.Flag("organization").Usage, 3, 128, nil) - - flags.SSL.Email = flags.Email - flags.Admin.Email = flags.Email -} - // AddInspectFlags add flags to inspect command. func AddInspectFlags(cmd *cobra.Command) { cmd_utils.AddSCCFlag(cmd) diff --git a/mgradm/cmd/install/shared/shared.go b/mgradm/cmd/install/shared/shared.go deleted file mode 100644 index 9a7659d1a..000000000 --- a/mgradm/cmd/install/shared/shared.go +++ /dev/null @@ -1,182 +0,0 @@ -// SPDX-FileCopyrightText: 2024 SUSE LLC -// -// SPDX-License-Identifier: Apache-2.0 - -package shared - -import ( - "errors" - "net/url" - "path/filepath" - "strconv" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" - "github.com/uyuni-project/uyuni-tools/shared/api" - "github.com/uyuni-project/uyuni-tools/shared/api/org" - . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/utils" -) - -const setupName = "setup.sh" - -// RunSetup execute the setup. -func RunSetup(cnx *shared.Connection, flags *InstallFlags, fqdn string, env map[string]string) error { - // Containers should be running now, check storage if it is using volume from already configured server - preconfigured := false - if isServerConfigured(cnx) { - log.Warn().Msg( - L("Server appears to be already configured. Installation will continue, but installation options may be ignored."), - ) - preconfigured = true - } - - tmpFolder, cleaner, err := generateSetupScript(flags, fqdn, env) - if err != nil { - return err - } - defer cleaner() - - if err := cnx.Copy(filepath.Join(tmpFolder, setupName), "server:/tmp/setup.sh", "root", "root"); err != nil { - return utils.Errorf(err, L("cannot copy /tmp/setup.sh")) - } - - err = adm_utils.ExecCommand(zerolog.InfoLevel, cnx, "/tmp/setup.sh") - if err != nil && !preconfigured { - return utils.Errorf(err, L("error running the setup script")) - } - if err := cnx.CopyCaCertificate(fqdn); err != nil { - return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) - } - - // Call the org.createFirst api if flags are passed - // This should not happen since the password is queried and enforced - if flags.Admin.Password != "" { - apiCnx := api.ConnectionDetails{ - Server: fqdn, - Insecure: false, - User: flags.Admin.Login, - Password: flags.Admin.Password, - } - - // Check if there is already admin user with given password and organization with same name - client, err := api.Init(&apiCnx) - if err != nil { - log.Error().Err(err).Msgf(L("unable to prepare API client")) - } - if err = client.Login(); err == nil { - if _, err := org.GetOrganizationDetails(&apiCnx, flags.Organization); err == nil { - log.Info().Msgf(L("Server organization already exists, reusing")) - } else { - log.Debug().Err(err).Msg("Error returned by server") - log.Warn().Msgf(L("Administration user already exists, but organization %s could not be found"), flags.Organization) - } - } else { - var connError *url.Error - if errors.As(err, &connError) { - // We were not able to connect to the server at all - return err - } - // We do not have any user existing, create one. CreateFirst skip user login - _, err := org.CreateFirst(&apiCnx, flags.Organization, &flags.Admin) - if err != nil { - if preconfigured { - log.Warn().Msgf(L("Administration user already exists, but provided credentials are not valid")) - } else { - return err - } - } - } - } - - log.Info().Msgf(L("Server set up, login on https://%[1]s with %[2]s user"), fqdn, flags.Admin.Login) - return nil -} - -// generateSetupScript creates a temporary folder with the setup script to execute in the container. -// The script exports all the needed environment variables and calls uyuni's mgr-setup. -// Podman or kubernetes-specific variables can be passed using extraEnv parameter. -func generateSetupScript(flags *InstallFlags, fqdn string, extraEnv map[string]string) (string, func(), error) { - localHostValues := []string{ - "localhost", - "127.0.0.1", - "::1", - fqdn, - } - - localDB := utils.Contains(localHostValues, flags.DB.Host) - - dbHost := flags.DB.Host - reportdbHost := flags.ReportDB.Host - - if localDB { - dbHost = "localhost" - if reportdbHost == "" { - reportdbHost = "localhost" - } - } - env := map[string]string{ - "UYUNI_FQDN": fqdn, - "MANAGER_USER": flags.DB.User, - "MANAGER_PASS": flags.DB.Password, - "MANAGER_ADMIN_EMAIL": flags.Email, - "MANAGER_MAIL_FROM": flags.EmailFrom, - "MANAGER_ENABLE_TFTP": boolToString(flags.Tftp), - "LOCAL_DB": boolToString(localDB), - "MANAGER_DB_NAME": flags.DB.Name, - "MANAGER_DB_HOST": dbHost, - "MANAGER_DB_PORT": strconv.Itoa(flags.DB.Port), - "MANAGER_DB_PROTOCOL": flags.DB.Protocol, - "REPORT_DB_NAME": flags.ReportDB.Name, - "REPORT_DB_HOST": reportdbHost, - "REPORT_DB_PORT": strconv.Itoa(flags.ReportDB.Port), - "REPORT_DB_USER": flags.ReportDB.User, - "REPORT_DB_PASS": flags.ReportDB.Password, - "EXTERNALDB_ADMIN_USER": flags.DB.Admin.User, - "EXTERNALDB_ADMIN_PASS": flags.DB.Admin.Password, - "EXTERNALDB_PROVIDER": flags.DB.Provider, - "ISS_PARENT": flags.IssParent, - "ACTIVATE_SLP": "N", // Deprecated, will be removed soon - "SCC_USER": flags.SCC.User, - "SCC_PASS": flags.SCC.Password, - } - if flags.Mirror != "" { - env["MIRROR_PATH"] = "/mirror" - } - - // Add the extra environment variables - for key, value := range extraEnv { - env[key] = value - } - - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return "", nil, err - } - - dataTemplate := templates.MgrSetupScriptTemplateData{ - Env: env, - DebugJava: flags.Debug.Java, - } - - scriptPath := filepath.Join(scriptDir, setupName) - if err = utils.WriteTemplateToFile(dataTemplate, scriptPath, 0555, true); err != nil { - return "", cleaner, utils.Errorf(err, L("Failed to generate setup script")) - } - - return scriptDir, cleaner, nil -} - -func boolToString(value bool) string { - if value { - return "Y" - } - return "N" -} - -func isServerConfigured(cnx *shared.Connection) bool { - return cnx.TestExistenceInPod("/root/.MANAGER_SETUP_COMPLETE") -} diff --git a/mgradm/cmd/migrate/kubernetes/dataExtractor.go b/mgradm/cmd/migrate/kubernetes/dataExtractor.go new file mode 100644 index 000000000..330e0e243 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/dataExtractor.go @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "errors" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + "gopkg.in/yaml.v2" +) + +// MigrationData represents the files and data extracted from the migration sync phase. +type MigrationData struct { + CaKey string + CaCert string + Data *utils.InspectResult + ServerCert string + ServerKey string +} + +func extractMigrationData( + namespace string, + image string, + pullPolicy string, + pullSecret string, + volume types.VolumeMount, +) (*MigrationData, error) { + // Run a pod reading the extracted data files from the volume. + // The data are written as a YAML dictionary where the key is the file name and the value its content. + out, err := kubernetes.RunPodLogs(namespace, "uyuni-data-extractor", image, + pullPolicy, pullSecret, []types.VolumeMount{volume}, + "sh", "-c", + "for f in /var/lib/uyuni-tools/*; do echo \"`basename $f`: |2\"; cat $f | sed 's/^/ /'; done", + ) + if err != nil { + return nil, err + } + + // Parse the content + files := make(map[string]string) + if err := yaml.Unmarshal(out, &files); err != nil { + return nil, utils.Errorf(err, L("failed to parse data extractor pod output")) + } + + var result MigrationData + for file, content := range files { + if file == "RHN-ORG-PRIVATE-SSL-KEY" { + result.CaKey = content + } else if file == "RHN-ORG-TRUSTED-SSL-CERT" { + result.CaCert = content + } else if file == "spacewalk.crt" { + result.ServerCert = content + } else if file == "spacewalk.key" { + result.ServerKey = content + } else if file == "data" { + parsedData, err := utils.ReadInspectDataString[utils.InspectResult]([]byte(content)) + if err != nil { + return nil, utils.Errorf(err, L("failed to parse migration data file")) + } + result.Data = parsedData + } + } + + if result.Data == nil { + return nil, errors.New(L("found no data file after migration")) + } + + return &result, nil +} diff --git a/mgradm/cmd/migrate/kubernetes/kubernetes.go b/mgradm/cmd/migrate/kubernetes/kubernetes.go index 7ed5635fe..6829c9308 100644 --- a/mgradm/cmd/migrate/kubernetes/kubernetes.go +++ b/mgradm/cmd/migrate/kubernetes/kubernetes.go @@ -10,21 +10,15 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/shared" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -type kubernetesMigrateFlags struct { - shared.MigrateFlags `mapstructure:",squash"` - Helm cmd_utils.HelmFlags - SCC types.SCCCredentials - SSL types.SSLCertGenerationFlags -} - -func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetesMigrateFlags]) *cobra.Command { - migrateCmd := &cobra.Command{ +func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command { + cmd := &cobra.Command{ Use: "kubernetes [source server FQDN]", Short: L("Migrate a remote server to containers running on a kubernetes cluster"), Long: L(`Migrate a remote server to containers running on a kubernetes cluster @@ -32,32 +26,49 @@ func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetesMigr This migration command assumes a few things: * the SSH configuration for the source server is complete, including user and all needed options to connect to the machine, - * an SSH agent is started and the key to use to connect to the server is added to it, * kubectl and helm are installed locally, * a working kubectl configuration should be set to connect to the cluster to deploy to +The SSH parameters may be left empty if the target Kubernetes namespace contains: + * an uyuni-migration-config ConfigMap with config and known_hosts items, + * an uyuni-migration-key secret with key and key.pub items with a passwordless key. + When migrating a server with a automatically generated SSL Root CA certificate, the private key password will be required to convert it to RSA in a kubernetes secret. This is not needed if the source server does not have a generated SSL CA certificate. - -NOTE: migrating to a remote cluster is not supported yet! `), Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - var flags kubernetesMigrateFlags + var flags kubernetes.KubernetesServerFlags flagsUpdater := func(v *viper.Viper) { - flags.MigrateFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.MigrateFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, } - shared.AddMigrateFlags(migrateCmd) - cmd_utils.AddHelmInstallFlag(migrateCmd) - migrateCmd.Flags().String("ssl-password", "", L("SSL CA generated private key password")) + shared.AddMigrateFlags(cmd) + cmd_utils.AddHelmInstallFlag(cmd) + cmd_utils.AddVolumesFlags(cmd) + + cmd.Flags().String("ssl-password", "", L("SSL CA generated private key password")) + + cmd.Flags().String("ssh-key-public", "", L("Path to the SSH public key to use to connect to the source server")) + cmd.Flags().String("ssh-key-private", "", + L("Path to the passwordless SSH private key to use to connect to the source server"), + ) + cmd.Flags().String("ssh-knownhosts", "", L("Path to the SSH known_hosts file to use to connect to the source server")) + cmd.Flags().String("ssh-config", "", L("Path to the SSH configuration file to use to connect to the source server")) + + const sshGroupID = "ssh" + _ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: sshGroupID, Title: L("SSH Configuration Flags")}) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-key-public", sshGroupID) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-key-private", sshGroupID) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-knownhosts", sshGroupID) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-config", sshGroupID) - return migrateCmd + return cmd } // NewCommand for kubernetes migration. diff --git a/mgradm/cmd/migrate/kubernetes/kubernetes_test.go b/mgradm/cmd/migrate/kubernetes/kubernetes_test.go index 02b9412f9..da5ecc1c2 100644 --- a/mgradm/cmd/migrate/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/migrate/kubernetes/kubernetes_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/spf13/cobra" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" "github.com/uyuni-project/uyuni-tools/shared/testutils" "github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -20,6 +21,10 @@ func TestParamsParsing(t *testing.T) { "--prepare", "--user", "sudoer", "--ssl-password", "sslsecret", + "--ssh-key-public", "path/ssh.pub", + "--ssh-key-private", "path/ssh", + "--ssh-knownhosts", "path/known_hosts", + "--ssh-config", "path/config", "source.fq.dn", } @@ -29,22 +34,28 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.DBUpdateImageFlagTestArgs...) args = append(args, flagstests.CocoFlagsTestArgs...) args = append(args, flagstests.HubXmlrpcFlagsTestArgs...) - args = append(args, flagstests.ServerHelmFlagsTestArgs...) + args = append(args, flagstests.ServerKubernetesFlagsTestArgs...) + args = append(args, flagstests.VolumesFlagsTestExpected...) // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *kubernetesMigrateFlags, + tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, _ *cobra.Command, args []string, ) error { - testutils.AssertTrue(t, "Prepare not set", flags.Prepare) + testutils.AssertTrue(t, "Prepare not set", flags.Migration.Prepare) flagstests.AssertMirrorFlag(t, flags.Mirror) - flagstests.AssertSCCFlag(t, &flags.SCC) + flagstests.AssertSCCFlag(t, &flags.Installation.SCC) flagstests.AssertImageFlag(t, &flags.Image) flagstests.AssertDBUpgradeImageFlag(t, &flags.DBUpgradeImage) flagstests.AssertCocoFlag(t, &flags.Coco) flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) - testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.User) - flagstests.AssertServerHelmFlags(t, &flags.Helm) - testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.SSL.Password) + testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.Migration.User) + flagstests.AssertServerKubernetesFlags(t, &flags.Kubernetes) + flagstests.AssertVolumesFlags(t, &flags.Volumes) + testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.Installation.SSL.Password) + testutils.AssertEquals(t, "Error parsing --ssh-key-public", "path/ssh.pub", flags.SSH.Key.Public) + testutils.AssertEquals(t, "Error parsing --ssh-key-private", "path/ssh", flags.SSH.Key.Private) + testutils.AssertEquals(t, "Error parsing --ssh-knownhosts", "path/known_hosts", flags.SSH.Knownhosts) + testutils.AssertEquals(t, "Error parsing --ssh-config", "path/config", flags.SSH.Config) testutils.AssertEquals(t, "Wrong FQDN", "source.fq.dn", args[0]) return nil } diff --git a/mgradm/cmd/migrate/kubernetes/migrationJob.go b/mgradm/cmd/migrate/kubernetes/migrationJob.go new file mode 100644 index 000000000..e445c2fe6 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/migrationJob.go @@ -0,0 +1,120 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const migrationJobName = "uyuni-data-sync" + +// Prepares and starts the synchronization job. +// +// This assumes the SSH key is stored in an uyuni-migration-key secret +// and the SSH config in an uyuni-migration-ssh ConfigMap with config and known_hosts keys. +func startMigrationJob( + namespace string, + serverImage string, + pullPolicy string, + pullSecret string, + fqdn string, + user string, + prepare bool, + mounts []types.VolumeMount, +) (string, error) { + job, err := getMigrationJob( + namespace, + serverImage, + pullPolicy, + pullSecret, + mounts, + fqdn, + user, + prepare, + ) + if err != nil { + return "", err + } + + // Run the job + return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the migration job")) +} + +func getMigrationJob( + namespace string, + image string, + pullPolicy string, + pullSecret string, + mounts []types.VolumeMount, + sourceFqdn string, + user string, + prepare bool, +) (*batch.Job, error) { + // Add mount and volume for the uyuni-migration-key secret with key and key.pub items + keyMount := core.VolumeMount{Name: "ssh-key", MountPath: "/root/.ssh/id_rsa", SubPath: "id_rsa"} + pubKeyMount := core.VolumeMount{Name: "ssh-key", MountPath: "/root/.ssh/id_rsa.pub", SubPath: "id_rsa.pub"} + + keyVolume := kubernetes.CreateSecretVolume("ssh-key", "uyuni-migration-key") + var keyMode int32 = 0600 + keyVolume.VolumeSource.Secret.Items = []core.KeyToPath{ + {Key: "key", Path: "id_rsa", Mode: &keyMode}, + {Key: "key.pub", Path: "id_rsa.pub"}, + } + + // Add mounts and volume for the uyuni-migration-ssh config map + // We need one mount for each file using subPath to not have 2 mounts on the same folder + knownHostsMount := core.VolumeMount{Name: "ssh-conf", MountPath: "/root/.ssh/known_hosts", SubPath: "known_hosts"} + sshConfMount := core.VolumeMount{Name: "ssh-conf", MountPath: "/root/.ssh/config", SubPath: "config"} + sshVolume := kubernetes.CreateConfigVolume("ssh-conf", "uyuni-migration-ssh") + + // Prepare the script + scriptData := templates.MigrateScriptTemplateData{ + Volumes: utils.ServerVolumeMounts, + SourceFqdn: sourceFqdn, + User: user, + Kubernetes: true, + Prepare: prepare, + } + + job, err := kubernetes.GetScriptJob(namespace, migrationJobName, image, pullPolicy, pullSecret, mounts, scriptData) + if err != nil { + return nil, err + } + + // Append the extra volumes and mounts + volumeMounts := job.Spec.Template.Spec.Containers[0].VolumeMounts + volumes := job.Spec.Template.Spec.Volumes + + volumeMounts = append(volumeMounts, keyMount, pubKeyMount, knownHostsMount, sshConfMount) + volumes = append(volumes, keyVolume, sshVolume) + + job.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts + job.Spec.Template.Spec.Volumes = volumes + + initScript := `cp -a /etc/systemd/system/multi-user.target.wants/. /mnt/etc-systemd-multi` + + job.Spec.Template.Spec.InitContainers = []core.Container{ + { + Name: "init-volumes", + Image: image, + ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy), + Command: []string{"sh", "-c", initScript}, + VolumeMounts: []core.VolumeMount{ + {Name: "etc-systemd-multi", MountPath: "/mnt/etc-systemd-multi"}, + }, + }, + } + + return job, nil +} diff --git a/mgradm/cmd/migrate/kubernetes/ssh.go b/mgradm/cmd/migrate/kubernetes/ssh.go new file mode 100644 index 000000000..a6a48ea0f --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/ssh.go @@ -0,0 +1,166 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + sshSecretName = "uyuni-migration-key" + sshConfigName = "uyuni-migration-ssh" +) + +func checkSSH(namespace string, flags *adm_utils.SSHFlags) error { + if exists, err := checkSSHKey(namespace); err != nil { + return err + } else if !exists && flags.Key.Public != "" && flags.Key.Private != "" { + if err := createSSHSecret(namespace, flags.Key.Private, flags.Key.Public); err != nil { + return err + } + } else if !exists { + return errors.New(L("no SSH key found to use for migration")) + } + + if exists, err := checkSSHConfig(namespace); err != nil { + return err + } else if !exists && flags.Knownhosts != "" { + // The config may be empty, but not the known_hosts + if err := createSSHConfig(namespace, flags.Config, flags.Knownhosts); err != nil { + return err + } + } else if !exists { + return errors.New(L("no SSH known_hosts and configuration found to use for migration")) + } + + return nil +} + +func checkSSHKey(namespace string) (bool, error) { + exists := false + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "secret", "-n", namespace, sshSecretName, "-o", "jsonpath={.data}", + ) + if err != nil { + if strings.Contains(err.Error(), "NotFound") { + log.Debug().Msg("Not found!") + // The secret was not found, it's not really an error + return exists, nil + } + return exists, utils.Errorf(err, L("failed to get %s SSH key secret"), sshSecretName) + } + exists = true + + var data map[string]string + if err := json.Unmarshal(out, &data); err != nil { + return exists, err + } + + for _, key := range []string{"key", "key.pub"} { + if value, ok := data[key]; !ok || value == "" { + return exists, fmt.Errorf(L("%[1]s secret misses the %[2]s value"), sshSecretName, key) + } + } + + return exists, nil +} + +func createSSHSecret(namespace string, keyPath string, pubKeyPath string) error { + keyContent, err := os.ReadFile(keyPath) + if err != nil { + return utils.Errorf(err, L("failed to read key file %s"), keyPath) + } + + pubContent, err := os.ReadFile(pubKeyPath) + if err != nil { + return utils.Errorf(err, L("failed to read public key file %s"), pubKeyPath) + } + + secret := core.Secret{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: sshSecretName, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + // It seems serializing this object automatically transforms the secrets to base64. + Data: map[string][]byte{ + "key": keyContent, + "key.pub": pubContent, + }, + } + + return kubernetes.Apply([]runtime.Object{&secret}, L("failed to create the SSH migration secret")) +} + +func checkSSHConfig(namespace string) (bool, error) { + exists := false + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "cm", "-n", namespace, sshConfigName, "-o", "jsonpath={.data}", + ) + if err != nil { + if strings.Contains(err.Error(), "NotFound") { + // The config map was not found, it's not really an error + return exists, nil + } + return exists, utils.Errorf(err, L("failed to get %s SSH ConfigMap"), sshConfigName) + } + exists = true + + var data map[string]string + if err := json.Unmarshal(out, &data); err != nil { + return exists, utils.Errorf(err, L("failed to parse SSH ConfigMap data")) + } + + // The known_hosts has to contain at least the entry for the source server. + if value, ok := data["known_hosts"]; !ok || value == "" { + return exists, fmt.Errorf(L("%[1]s ConfigMap misses the %[2]s value"), sshSecretName, "known_hosts") + } + + // An empty config is not an error. + if _, ok := data["config"]; !ok { + return exists, fmt.Errorf(L("%[1]s ConfigMap misses the %[2]s value"), sshSecretName, "config") + } + + return exists, nil +} + +func createSSHConfig(namespace string, configPath string, KnownhostsPath string) error { + configContent, err := os.ReadFile(configPath) + if err != nil { + return utils.Errorf(err, L("failed to read SSH config file %s"), configPath) + } + + knownhostsContent, err := os.ReadFile(KnownhostsPath) + if err != nil { + return utils.Errorf(err, L("failed to read SSH known_hosts file %s"), KnownhostsPath) + } + + configMap := core.ConfigMap{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"}, + ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: sshConfigName}, + Data: map[string]string{ + "config": string(configContent), + "known_hosts": string(knownhostsContent), + }, + } + return kubernetes.Apply([]runtime.Object{&configMap}, L("failed to create the SSH migration ConfigMap")) +} diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index 22632156e..b3366c05d 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -7,40 +7,35 @@ package kubernetes import ( - "encoding/base64" - "fmt" - "os/exec" + "os" "path" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/spf13/cobra" - migration_shared "github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/shared" "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/ssl" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) +const migrationDataPvcName = "migration-data" + func migrateToKubernetes( _ *types.GlobalFlags, - flags *kubernetesMigrateFlags, + flags *kubernetes.KubernetesServerFlags, _ *cobra.Command, args []string, ) error { - for _, binary := range []string{"kubectl", "helm"} { - if _, err := exec.LookPath(binary); err != nil { - return fmt.Errorf(L("install %s before running this command"), binary) - } + namespace := flags.Kubernetes.Uyuni.Namespace + + // Create the namespace if not present + if err := kubernetes.CreateNamespace(namespace); err != nil { + return err } - cnx := shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) - namespace, err := cnx.GetNamespace("") - if err != nil { - return utils.Errorf(err, L("failed retrieving namespace")) + + // Check the for the required SSH key and configuration + if err := checkSSH(namespace, &flags.SSH); err != nil { + return err } serverImage, err := utils.ComputeImage(flags.Image.Registry, utils.DefaultTag, flags.Image) @@ -53,193 +48,103 @@ func migrateToKubernetes( return err } - // Find the SSH Socket and paths for the migration - sshAuthSocket := migration_shared.GetSSHAuthSocket() - sshConfigPath, sshKnownhostsPath := migration_shared.GetSSHPaths() + mounts := kubernetes.GetServerMounts() + mounts = kubernetes.TuneMounts(mounts, &flags.Volumes) - // Prepare the migration script and folder - scriptDir, cleaner, err := adm_utils.GenerateMigrationScript(fqdn, flags.User, true, flags.Prepare) - if err != nil { - return utils.Errorf(err, L("failed to generate migration script")) - } - - defer cleaner() + // Add a mount and volume for the extracted data + migrationDataVolume := types.VolumeMount{Name: migrationDataPvcName, MountPath: "/var/lib/uyuni-tools"} + migrationMounts := append(mounts, migrationDataVolume) - // We don't need the SSL certs at this point of the migration - clusterInfos, err := shared_kubernetes.CheckCluster() - if err != nil { + if err := shared_kubernetes.CreatePersistentVolumeClaims(namespace, migrationMounts); err != nil { return err } - kubeconfig := clusterInfos.GetKubeconfig() - - // Install Uyuni with generated CA cert: an empty struct means no 3rd party cert - helmArgs := []string{} // Create a secret using SCC credentials if any are provided - helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Uyuni.Namespace, &flags.SCC) + pullSecret, err := shared_kubernetes.GetRegistrySecret( + flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SCC, shared_kubernetes.ServerApp, + ) if err != nil { return err } - // Deploy for running migration command - migrationArgs := append(helmArgs, - "--set", "migration.ssh.agentSocket="+sshAuthSocket, - "--set", "migration.ssh.configPath="+sshConfigPath, - "--set", "migration.ssh.knownHostsPath="+sshKnownhostsPath, - "--set", "migration.dataPath="+scriptDir, + jobName, err := startMigrationJob( + namespace, + serverImage, + flags.Image.PullPolicy, + pullSecret, + fqdn, + flags.Migration.User, + flags.Migration.Prepare, + migrationMounts, ) - - if err := kubernetes.Deploy(cnx, flags.Image.Registry, &flags.Image, &flags.Helm, - clusterInfos, fqdn, false, flags.Prepare, migrationArgs...); err != nil { - return utils.Errorf(err, L("cannot run deploy")) - } - - // This is needed because folder with script needs to be mounted - // check the node before scaling down - nodeName, err := shared_kubernetes.GetNode(namespace, shared_kubernetes.ServerFilter) if err != nil { - return utils.Errorf(err, L("cannot find node running uyuni")) - } - // Run the actual migration - if err := adm_utils.RunMigration(cnx, "migrate.sh"); err != nil { - return utils.Errorf(err, L("cannot run migration")) + return err } - extractedData, err := utils.ReadInspectData[utils.InspectResult](path.Join(scriptDir, "data")) - if err != nil { - return utils.Errorf(err, L("cannot read data from container")) + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to copy + if err := shared_kubernetes.WaitForJob(namespace, jobName, -1); err != nil { + return err } - // After each command we want to scale to 0 - err = shared_kubernetes.ReplicasTo(namespace, shared_kubernetes.ServerApp, 0) + // Read the extracted data from the migration volume + extractedData, err := extractMigrationData( + namespace, serverImage, flags.Image.PullPolicy, pullSecret, migrationDataVolume, + ) if err != nil { - return utils.Errorf(err, L("cannot set replicas to 0")) + return err } - if flags.Prepare { - log.Info().Msg(L("Migration prepared. Run the 'migrate' command without '--prepare' to finish the migration.")) - return nil + flags.Installation.TZ = extractedData.Data.Timezone + flags.Installation.Debug.Java = extractedData.Data.Debug + if extractedData.Data.HasHubXmlrpcAPI { + flags.HubXmlrpc.Replicas = 1 + flags.HubXmlrpc.IsChanged = true } + flags.Installation.DB.User = extractedData.Data.DBUser + flags.Installation.DB.Password = extractedData.Data.DBPassword + // TODO Are those two really needed in migration? + flags.Installation.DB.Name = extractedData.Data.DBName + flags.Installation.DB.Port = extractedData.Data.DBPort - defer func() { - // if something is running, we don't need to set replicas to 1 - if _, err = shared_kubernetes.GetNode(namespace, shared_kubernetes.ServerFilter); err != nil { - err = shared_kubernetes.ReplicasTo(namespace, shared_kubernetes.ServerApp, 1) - } - }() - - setupSSLArray, err := setupSSL(&flags.Helm, kubeconfig, scriptDir, flags.SSL.Password, flags.Image.PullPolicy) + sslDir, cleaner, err := utils.TempDir() if err != nil { - return utils.Errorf(err, L("cannot setup SSL")) - } - - helmArgs = append(helmArgs, - "--reset-values", - "--set", "timezone="+extractedData.Timezone, - ) - if flags.Mirror != "" { - log.Warn().Msgf(L("The mirror data will not be migrated, ensure it is available at %s"), flags.Mirror) - // TODO Handle claims for multi-node clusters - helmArgs = append(helmArgs, "--set", "mirror.hostPath="+flags.Mirror) + return err } - helmArgs = append(helmArgs, setupSSLArray...) + defer cleaner() - // Run uyuni upgrade using the new ssl certificate - if err = kubernetes.UyuniUpgrade( - serverImage, flags.Image.PullPolicy, &flags.Helm, kubeconfig, fqdn, clusterInfos.Ingress, helmArgs..., + // Extract the SSL data as files and pass them as arguments to share code with installation. + if err := writeToFile( + extractedData.CaCert, path.Join(sslDir, "ca.crt"), &flags.Installation.SSL.Ca.Root, ); err != nil { - return utils.Errorf(err, L("cannot upgrade helm chart to image %s using new SSL certificate"), serverImage) - } - - if err := shared_kubernetes.WaitForDeployment(namespace, "uyuni", "uyuni"); err != nil { - return utils.Errorf(err, L("cannot wait for deployment of %s"), serverImage) - } - - err = shared_kubernetes.ReplicasTo(namespace, shared_kubernetes.ServerApp, 0) - if err != nil { - return utils.Errorf(err, L("cannot set replicas to 0")) + return err } - oldPgVersion := extractedData.CurrentPgVersion - newPgVersion := extractedData.ImagePgVersion - - if oldPgVersion != newPgVersion { - if err := kubernetes.RunPgsqlVersionUpgrade(flags.Image.Registry, flags.Image, - flags.DBUpgradeImage, namespace, nodeName, oldPgVersion, newPgVersion, - ); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL version upgrade script")) - } + // The CA key shouldn't be stored as a temporary file. + if extractedData.CaKey != "" { + flags.Installation.SSL.Ca.Key = extractedData.CaKey } - schemaUpdateRequired := oldPgVersion != newPgVersion - if err := kubernetes.RunPgsqlFinalizeScript( - serverImage, flags.Image.PullPolicy, namespace, nodeName, schemaUpdateRequired, true, + if err := writeToFile( + extractedData.ServerCert, path.Join(sslDir, "srv.crt"), &flags.Installation.SSL.Server.Cert, ); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL finalisation script")) - } - - if err := kubernetes.RunPostUpgradeScript(serverImage, flags.Image.PullPolicy, namespace, nodeName); err != nil { - return utils.Errorf(err, L("cannot run post upgrade script")) + return err } - if err := kubernetes.UyuniUpgrade( - serverImage, flags.Image.PullPolicy, &flags.Helm, kubeconfig, fqdn, clusterInfos.Ingress, helmArgs..., + if err := writeToFile( + extractedData.ServerKey, path.Join(sslDir, "srv.key"), &flags.Installation.SSL.Server.Key, ); err != nil { - return utils.Errorf(err, L("cannot upgrade to image %s"), serverImage) - } - - if err := shared_kubernetes.WaitForDeployment(namespace, "uyuni", "uyuni"); err != nil { return err } - if err := cnx.CopyCaCertificate(fqdn); err != nil { - return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) - } - return nil + return kubernetes.Reconcile(flags, fqdn) } -// updateIssuer replaces the temporary SSL certificate issuer with the source server CA. -// Return additional helm args to use the SSL certificates. -func setupSSL( - helm *adm_utils.HelmFlags, - kubeconfig string, - scriptDir string, - password string, - pullPolicy string) ([]string, - error, -) { - caCert := path.Join(scriptDir, "RHN-ORG-TRUSTED-SSL-CERT") - caKey := path.Join(scriptDir, "RHN-ORG-PRIVATE-SSL-KEY") - - if utils.FileExists(caCert) && utils.FileExists(caKey) { - key := base64.StdEncoding.EncodeToString(ssl.GetRsaKey(caKey, password)) - - // Strip down the certificate text part - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "openssl", "x509", "-in", caCert) - if err != nil { - return []string{}, utils.Errorf(err, L("failed to strip text part from CA certificate")) - } - cert := base64.StdEncoding.EncodeToString(out) - ca := types.SSLPair{Cert: cert, Key: key} - - // An empty struct means no third party certificate - sslFlags := adm_utils.InstallSSLFlags{} - ret, err := kubernetes.DeployCertificate(helm, &sslFlags, cert, &ca, kubeconfig, "", pullPolicy) - if err != nil { - return []string{}, utils.Errorf(err, L("cannot deploy certificate")) +func writeToFile(content string, file string, flag *string) error { + if content != "" { + if err := os.WriteFile(file, []byte(content), 0600); err != nil { + return utils.Errorf(err, L("failed to write certificate to %s"), file) } - return ret, nil + *flag = file } - // Handle third party certificates and CA - sslFlags := adm_utils.InstallSSLFlags{ - Ca: types.CaChain{Root: caCert}, - Server: types.SSLPair{ - Key: path.Join(scriptDir, "spacewalk.key"), - Cert: path.Join(scriptDir, "spacewalk.crt"), - }, - } - if err := kubernetes.DeployExistingCertificate(helm, &sslFlags); err != nil { - return []string{}, nil - } - return []string{}, nil + return nil } diff --git a/mgradm/cmd/migrate/podman/podman.go b/mgradm/cmd/migrate/podman/podman.go index 73b4aeb27..90aa131b4 100644 --- a/mgradm/cmd/migrate/podman/podman.go +++ b/mgradm/cmd/migrate/podman/podman.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/shared" + cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" podman_utils "github.com/uyuni-project/uyuni-tools/shared/podman" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -15,9 +16,8 @@ import ( ) type podmanMigrateFlags struct { - shared.MigrateFlags `mapstructure:",squash"` - SCC types.SCCCredentials - Podman podman_utils.PodmanFlags + cmd_utils.ServerFlags `mapstructure:",squash"` + Podman podman_utils.PodmanFlags } func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanMigrateFlags]) *cobra.Command { @@ -38,8 +38,8 @@ NOTE: migrating to a remote podman is not supported yet! RunE: func(cmd *cobra.Command, args []string) error { var flags podmanMigrateFlags flagsUpdater := func(v *viper.Viper) { - flags.MigrateFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.MigrateFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/migrate/podman/podman_test.go b/mgradm/cmd/migrate/podman/podman_test.go index 61c2f62a5..a6b54a8e7 100644 --- a/mgradm/cmd/migrate/podman/podman_test.go +++ b/mgradm/cmd/migrate/podman/podman_test.go @@ -29,15 +29,17 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.PodmanFlagsTestArgs...) // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *podmanMigrateFlags, _ *cobra.Command, args []string) error { - testutils.AssertTrue(t, "Prepare not set", flags.Prepare) + tester := func(_ *types.GlobalFlags, flags *podmanMigrateFlags, + _ *cobra.Command, args []string, + ) error { + testutils.AssertTrue(t, "Prepare not set", flags.Migration.Prepare) flagstests.AssertMirrorFlag(t, flags.Mirror) - flagstests.AssertSCCFlag(t, &flags.SCC) + flagstests.AssertSCCFlag(t, &flags.Installation.SCC) flagstests.AssertImageFlag(t, &flags.Image) flagstests.AssertDBUpgradeImageFlag(t, &flags.DBUpgradeImage) flagstests.AssertCocoFlag(t, &flags.Coco) flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) - testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.User) + testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.Migration.User) flagstests.AssertPodmanInstallFlags(t, &flags.Podman) testutils.AssertEquals(t, "Wrong FQDN", "source.fq.dn", args[0]) return nil diff --git a/mgradm/cmd/migrate/podman/utils.go b/mgradm/cmd/migrate/podman/utils.go index a25ed963e..93865eb89 100644 --- a/mgradm/cmd/migrate/podman/utils.go +++ b/mgradm/cmd/migrate/podman/utils.go @@ -49,7 +49,7 @@ func migrateToPodman( return err } - authFile, cleaner, err := podman_utils.PodmanLogin(hostData, flags.SCC) + authFile, cleaner, err := podman_utils.PodmanLogin(hostData, flags.Installation.SCC) if err != nil { return utils.Errorf(err, L("failed to login to registry.suse.com")) } @@ -71,12 +71,13 @@ func migrateToPodman( sshConfigPath, sshKnownhostsPath := migration_shared.GetSSHPaths() extractedData, err := podman.RunMigration( - preparedImage, sshAuthSocket, sshConfigPath, sshKnownhostsPath, sourceFqdn, flags.User, flags.Prepare, + preparedImage, sshAuthSocket, sshConfigPath, sshKnownhostsPath, sourceFqdn, + flags.Migration.User, flags.Migration.Prepare, ) if err != nil { return utils.Errorf(err, L("cannot run migration script")) } - if flags.Prepare { + if flags.Migration.Prepare { log.Info().Msg(L("Migration prepared. Run the 'migrate' command without '--prepare' to finish the migration.")) return nil } diff --git a/mgradm/cmd/migrate/shared/flags.go b/mgradm/cmd/migrate/shared/flags.go index 75b5e402d..7d1d0b561 100644 --- a/mgradm/cmd/migrate/shared/flags.go +++ b/mgradm/cmd/migrate/shared/flags.go @@ -8,21 +8,8 @@ import ( "github.com/spf13/cobra" "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/types" ) -// MigrateFlags represents flag required by migration command. -type MigrateFlags struct { - Prepare bool - Image types.ImageFlags `mapstructure:",squash"` - DBUpgradeImage types.ImageFlags `mapstructure:"dbupgrade"` - Coco utils.CocoFlags - User string - Mirror string - HubXmlrpc utils.HubXmlrpcFlags - SCC types.SCCCredentials -} - // AddMigrateFlags add migration flags to a command. func AddMigrateFlags(cmd *cobra.Command) { cmd.Flags().Bool("prepare", false, L("Prepare the mgration - copy the data without stopping the source server.")) diff --git a/mgradm/cmd/scale/podman.go b/mgradm/cmd/scale/podman.go index 100d1c2f0..d40df4e33 100644 --- a/mgradm/cmd/scale/podman.go +++ b/mgradm/cmd/scale/podman.go @@ -31,7 +31,7 @@ func podmanScale( } if service == podman.HubXmlrpcService { if newReplicas > 1 { - return errors.New(L("Multiple Hub XML-RPC container replicas are not currently supported.")) + return errors.New(L("Multiple Hub XML-RPC API container replicas are not currently supported.")) } return systemd.ScaleService(newReplicas, service) } diff --git a/mgradm/cmd/uninstall/kubernetes.go b/mgradm/cmd/uninstall/kubernetes.go index 36816c666..5b24aa61c 100644 --- a/mgradm/cmd/uninstall/kubernetes.go +++ b/mgradm/cmd/uninstall/kubernetes.go @@ -7,9 +7,8 @@ package uninstall import ( - "fmt" + "strings" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/uyuni-project/uyuni-tools/shared" @@ -46,38 +45,26 @@ func uninstallForKubernetes( if err != nil { return err } - if err := kubernetes.HelmUninstall(serverNamespace, kubeconfig, kubernetes.ServerApp, !flags.Force); err != nil { - return err - } - // Remove the remaining configmap and secrets + // Remove all Uyuni resources if serverNamespace != "" { - _, err := utils.RunCmdOutput(zerolog.TraceLevel, "kubectl", "-n", serverNamespace, "get", "secret", "uyuni-ca") - caSecret := "uyuni-ca" - if err != nil { - caSecret = "" + objects := "job,deploy,svc,ingress,pvc,cm,secret" + if kubernetes.HasResource("ingressroutetcps") { + objects += ",middlewares,ingressroutetcps,ingressrouteudps" } + if kubernetes.HasResource("issuers") { + objects += ",issuers,certificates" + } + deleteCmd := []string{ + "kubectl", "delete", "-n", serverNamespace, objects, + "-l", kubernetes.AppLabel + "=" + kubernetes.ServerApp, + } if !flags.Force { - log.Info().Msgf(L("Would run %s"), fmt.Sprintf("kubectl delete -n %s configmap uyuni-ca", serverNamespace)) - log.Info().Msgf(L("Would run %s"), - fmt.Sprintf("kubectl delete -n %s secret uyuni-cert %s", serverNamespace, caSecret), - ) + log.Info().Msgf(L("Would run %s"), strings.Join(deleteCmd, " ")) } else { - log.Info().Msgf(L("Running %s"), fmt.Sprintf("kubectl delete -n %s configmap uyuni-ca", serverNamespace)) - if err := utils.RunCmd("kubectl", "delete", "-n", serverNamespace, "configmap", "uyuni-ca"); err != nil { - log.Info().Err(err).Msgf(L("Failed deleting config map")) - } - - log.Info().Msgf(L("Running %s"), fmt.Sprintf("kubectl delete -n %s secret uyuni-cert %s", serverNamespace, caSecret)) - - args := []string{"delete", "-n", serverNamespace, "secret", "uyuni-cert"} - if caSecret != "" { - args = append(args, caSecret) - } - err := utils.RunCmd("kubectl", args...) - if err != nil { - log.Info().Err(err).Msgf(L("Failed deleting secret")) + if err := utils.RunCmd(deleteCmd[0], deleteCmd[1:]...); err != nil { + return utils.Errorf(err, L("failed to delete server resources")) } } } @@ -87,15 +74,17 @@ func uninstallForKubernetes( // Since some storage plugins don't handle Delete policy, we may need to check for error events to avoid infinite loop // Uninstall cert-manager if we installed it - certManagerConnection := shared.NewConnection("kubectl", "", "") + certManagerConnection := shared.NewConnection("kubectl", "", "-linstalledby=mgradm") // TODO: re-add "-linstalledby=mgradm" filter once the label is added in helm release // mgradm/shared/kubernetes/certificates.go:124 was supposed to be addressing it certManagerNamespace, err := certManagerConnection.GetNamespace("cert-manager") if err != nil { return err } - if err := kubernetes.HelmUninstall(certManagerNamespace, kubeconfig, "cert-manager", !flags.Force); err != nil { - return err + if certManagerNamespace != "" { + if err := kubernetes.HelmUninstall(certManagerNamespace, kubeconfig, "cert-manager", !flags.Force); err != nil { + return err + } } // Remove the K3s Traefik config diff --git a/mgradm/cmd/upgrade/kubernetes/kubernetes.go b/mgradm/cmd/upgrade/kubernetes/kubernetes.go index 3b0aac6d6..bf4916b47 100644 --- a/mgradm/cmd/upgrade/kubernetes/kubernetes.go +++ b/mgradm/cmd/upgrade/kubernetes/kubernetes.go @@ -10,28 +10,24 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/upgrade/shared" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -type kubernetesUpgradeFlags struct { - shared.UpgradeFlags `mapstructure:",squash"` - Helm cmd_utils.HelmFlags -} - -func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetesUpgradeFlags]) *cobra.Command { +func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command { upgradeCmd := &cobra.Command{ Use: "kubernetes", Short: L("Upgrade a local server on kubernetes"), Long: L("Upgrade a local server on kubernetes"), Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { - var flags kubernetesUpgradeFlags + var flags kubernetes.KubernetesServerFlags flagsUpdater := func(v *viper.Viper) { - flags.UpgradeFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.UpgradeFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go b/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go index 5fefb4602..c1c8844c9 100644 --- a/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/spf13/cobra" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" "github.com/uyuni-project/uyuni-tools/shared/testutils" "github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -23,10 +24,10 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.CocoFlagsTestArgs...) args = append(args, flagstests.HubXmlrpcFlagsTestArgs...) args = append(args, flagstests.SCCFlagTestArgs...) - args = append(args, flagstests.ServerHelmFlagsTestArgs...) + args = append(args, flagstests.ServerKubernetesFlagsTestArgs...) // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *kubernetesUpgradeFlags, + tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, _ *cobra.Command, _ []string, ) error { flagstests.AssertImageFlag(t, &flags.Image) @@ -34,7 +35,7 @@ func TestParamsParsing(t *testing.T) { flagstests.AssertCocoFlag(t, &flags.Coco) flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) // TODO Assert SCC flags - flagstests.AssertServerHelmFlags(t, &flags.Helm) + flagstests.AssertServerKubernetesFlags(t, &flags.Kubernetes) return nil } diff --git a/mgradm/cmd/upgrade/kubernetes/utils.go b/mgradm/cmd/upgrade/kubernetes/utils.go index 6b7a05af1..96d89779b 100644 --- a/mgradm/cmd/upgrade/kubernetes/utils.go +++ b/mgradm/cmd/upgrade/kubernetes/utils.go @@ -13,10 +13,10 @@ import ( ) func upgradeKubernetes( - globalFlags *types.GlobalFlags, - flags *kubernetesUpgradeFlags, - cmd *cobra.Command, - args []string, + _ *types.GlobalFlags, + flags *kubernetes.KubernetesServerFlags, + _ *cobra.Command, + _ []string, ) error { - return kubernetes.Upgrade(globalFlags, &flags.Image, &flags.DBUpgradeImage, flags.Helm, cmd, args) + return kubernetes.Reconcile(flags, "") } diff --git a/mgradm/cmd/upgrade/podman/podman.go b/mgradm/cmd/upgrade/podman/podman.go index 72139d807..8cb5652c6 100644 --- a/mgradm/cmd/upgrade/podman/podman.go +++ b/mgradm/cmd/upgrade/podman/podman.go @@ -9,6 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/upgrade/shared" + cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/podman" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -16,9 +17,8 @@ import ( ) type podmanUpgradeFlags struct { - shared.UpgradeFlags `mapstructure:",squash"` - SCC types.SCCCredentials - Podman podman.PodmanFlags + cmd_utils.ServerFlags `mapstructure:",squash"` + Podman podman.PodmanFlags } func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanUpgradeFlags]) *cobra.Command { @@ -29,8 +29,8 @@ func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanUpgradeF RunE: func(cmd *cobra.Command, args []string) error { var flags podmanUpgradeFlags flagsUpdater := func(v *viper.Viper) { - flags.UpgradeFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.UpgradeFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/upgrade/podman/podman_test.go b/mgradm/cmd/upgrade/podman/podman_test.go index 42251f804..92cdf0c28 100644 --- a/mgradm/cmd/upgrade/podman/podman_test.go +++ b/mgradm/cmd/upgrade/podman/podman_test.go @@ -24,12 +24,14 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.PodmanFlagsTestArgs...) // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *podmanUpgradeFlags, _ *cobra.Command, _ []string) error { + tester := func(_ *types.GlobalFlags, flags *podmanUpgradeFlags, + _ *cobra.Command, _ []string, + ) error { flagstests.AssertImageFlag(t, &flags.Image) flagstests.AssertDBUpgradeImageFlag(t, &flags.DBUpgradeImage) flagstests.AssertCocoFlag(t, &flags.Coco) flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) - flagstests.AssertSCCFlag(t, &flags.SCC) + flagstests.AssertSCCFlag(t, &flags.ServerFlags.Installation.SCC) flagstests.AssertPodmanInstallFlags(t, &flags.Podman) return nil } diff --git a/mgradm/cmd/upgrade/podman/utils.go b/mgradm/cmd/upgrade/podman/utils.go index 9fc192ff8..740f8b2bd 100644 --- a/mgradm/cmd/upgrade/podman/utils.go +++ b/mgradm/cmd/upgrade/podman/utils.go @@ -21,7 +21,7 @@ func upgradePodman(_ *types.GlobalFlags, flags *podmanUpgradeFlags, _ *cobra.Com return err } - authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.SCC) + authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.Installation.SCC) if err != nil { return utils.Errorf(err, L("failed to login to registry.suse.com")) } diff --git a/mgradm/cmd/upgrade/shared/flags.go b/mgradm/cmd/upgrade/shared/flags.go index d11046cde..1d165e2fd 100644 --- a/mgradm/cmd/upgrade/shared/flags.go +++ b/mgradm/cmd/upgrade/shared/flags.go @@ -7,17 +7,8 @@ package shared import ( "github.com/spf13/cobra" "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared/types" ) -// UpgradeFlags represents flags used for upgrading a server. -type UpgradeFlags struct { - Image types.ImageFlags `mapstructure:",squash"` - DBUpgradeImage types.ImageFlags `mapstructure:"dbupgrade"` - Coco utils.CocoFlags - HubXmlrpc utils.HubXmlrpcFlags -} - // AddUpgradeFlags add upgrade flags to a command. func AddUpgradeFlags(cmd *cobra.Command) { utils.AddImageFlag(cmd) diff --git a/mgradm/shared/kubernetes/certificates.go b/mgradm/shared/kubernetes/certificates.go index c8ecdfe9b..dedec1e3b 100644 --- a/mgradm/shared/kubernetes/certificates.go +++ b/mgradm/shared/kubernetes/certificates.go @@ -2,11 +2,17 @@ // // SPDX-License-Identifier: Apache-2.0 +//go:build !nok8s + package kubernetes import ( "encoding/base64" + "errors" + "fmt" + "os" "path/filepath" + "strings" "time" "github.com/rs/zerolog" @@ -15,22 +21,38 @@ import ( cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/ssl" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" + + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) -func installTLSSecret(namespace string, serverCrt []byte, serverKey []byte, rootCaCrt []byte) error { - crdsDir, cleaner, err := utils.TempDir() +// Helm annotation to add in order to use cert-manager's uyuni CA issuer, in JSON format. +var ingressCertManagerAnnotation = fmt.Sprintf( + "ingressSSLAnnotations={\"cert-manager.io/issuer\": \"%s\"}", + kubernetes.CaIssuerName, +) + +// DeployExistingCertificate execute a deploy of an existing certificate. +func DeployExistingCertificate(namespace string, sslFlags *cmd_utils.InstallSSLFlags) error { + // Deploy the SSL Certificate secret and CA configmap + serverCrt, rootCaCrt := ssl.OrderCas(&sslFlags.Ca, &sslFlags.Server) + serverKey := utils.ReadFile(sslFlags.Server.Key) + + tempDir, cleaner, err := utils.TempDir() if err != nil { return err } defer cleaner() - secretPath := filepath.Join(crdsDir, "secret.yaml") + secretPath := filepath.Join(tempDir, "secret.yaml") log.Info().Msg(L("Creating SSL server certificate secret")) tlsSecretData := templates.TLSSecretTemplateData{ Namespace: namespace, - Name: "uyuni-cert", + Name: CertSecretName, Certificate: base64.StdEncoding.EncodeToString(serverCrt), Key: base64.StdEncoding.EncodeToString(serverKey), RootCa: base64.StdEncoding.EncodeToString(rootCaCrt), @@ -44,79 +66,112 @@ func installTLSSecret(namespace string, serverCrt []byte, serverKey []byte, root return utils.Errorf(err, L("Failed to create uyuni-crt TLS secret")) } - createCaConfig(namespace, rootCaCrt) - return nil + // Copy the CA cert into uyuni-ca config map as the container shouldn't have the CA secret + return createCaConfig(namespace, rootCaCrt) } -// Install cert-manager and its CRDs using helm in the cert-manager namespace if needed -// and then create a self-signed CA and issuers. -// Returns helm arguments to be added to use the issuer. -func installSSLIssuers(helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_utils.InstallSSLFlags, rootCa string, - tlsCert *types.SSLPair, kubeconfig, fqdn string, imagePullPolicy string) ([]string, error) { - // Install cert-manager if needed - if err := installCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { - return []string{}, utils.Errorf(err, L("cannot install cert manager")) +// DeployReusedCa deploys an existing SSL CA using an already installed cert-manager. +func DeployReusedCa(namespace string, ca *types.SSLPair) error { + log.Info().Msg(L("Creating cert-manager issuer for existing CA")) + tempDir, cleaner, err := utils.TempDir() + if err != nil { + return err + } + defer cleaner() + + issuerPath := filepath.Join(tempDir, "issuer.yaml") + + issuerData := templates.ReusedCaIssuerTemplateData{ + Namespace: namespace, + Key: ca.Key, + Certificate: ca.Cert, } + if err = utils.WriteTemplateToFile(issuerData, issuerPath, 0500, true); err != nil { + return utils.Errorf(err, L("failed to generate issuer definition")) + } + + err = utils.RunCmd("kubectl", "apply", "-f", issuerPath) + if err != nil { + log.Fatal().Err(err).Msg(L("Failed to create issuer")) + } + + return nil +} + +// DeployGenerateCa deploys a new SSL CA using cert-manager. +func DeployGeneratedCa( + namespace string, + sslFlags *cmd_utils.InstallSSLFlags, + fqdn string, +) error { log.Info().Msg(L("Creating SSL certificate issuer")) - crdsDir, cleaner, err := utils.TempDir() + tempDir, err := os.MkdirTemp("", "mgradm-*") if err != nil { - return []string{}, err + return utils.Errorf(err, L("failed to create temporary directory")) } - defer cleaner() + defer os.RemoveAll(tempDir) - issuerPath := filepath.Join(crdsDir, "issuer.yaml") + issuerPath := filepath.Join(tempDir, "issuer.yaml") - issuerData := templates.IssuerTemplateData{ - Namespace: helmFlags.Uyuni.Namespace, - Country: sslFlags.Country, - State: sslFlags.State, - City: sslFlags.City, - Org: sslFlags.Org, - OrgUnit: sslFlags.OU, - Email: sslFlags.Email, - Fqdn: fqdn, - RootCa: rootCa, - Key: tlsCert.Key, - Certificate: tlsCert.Cert, + issuerData := templates.GeneratedCaIssuerTemplateData{ + Namespace: namespace, + Country: sslFlags.Country, + State: sslFlags.State, + City: sslFlags.City, + Org: sslFlags.Org, + OrgUnit: sslFlags.OU, + Email: sslFlags.Email, + Fqdn: fqdn, } if err = utils.WriteTemplateToFile(issuerData, issuerPath, 0500, true); err != nil { - return []string{}, utils.Errorf(err, L("failed to generate issuer definition")) + return utils.Errorf(err, L("failed to generate issuer definition")) } err = utils.RunCmd("kubectl", "apply", "-f", issuerPath) if err != nil { - log.Fatal().Err(err).Msg(L("Failed to create issuer")) + return utils.Errorf(err, L("Failed to create issuer")) } - // Wait for issuer to be ready + return nil +} + +// Wait for issuer to be ready. +func waitForIssuer(namespace string, name string) error { for i := 0; i < 60; i++ { - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "get", "-o=jsonpath={.status.conditions[*].type}", - "issuer", "uyuni-ca-issuer", "-n", issuerData.Namespace) + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", + "-o=jsonpath={.status.conditions[*].type}", + "-n", namespace, + "issuer", name, + ) if err == nil && string(out) == "Ready" { - return []string{"--set-json", "ingressSSLAnnotations={\"cert-manager.io/issuer\": \"uyuni-ca-issuer\"}"}, nil + return nil } time.Sleep(1 * time.Second) } - log.Fatal().Msg(L("Issuer didn't turn ready after 60s")) - return []string{}, nil + return errors.New(L("Issuer didn't turn ready after 60s")) } -func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, imagePullPolicy string) error { - if !kubernetes.IsDeploymentReady("", "cert-manager") { +// InstallCertManager deploys the cert-manager helm chart with the CRDs. +func InstallCertManager(kubernetesFlags *cmd_utils.KubernetesFlags, kubeconfig string, imagePullPolicy string) error { + if ready, err := kubernetes.IsDeploymentReady("", "cert-manager"); err != nil { + return err + } else if !ready { log.Info().Msg(L("Installing cert-manager")) repo := "" - chart := helmFlags.CertManager.Chart - version := helmFlags.CertManager.Version - namespace := helmFlags.CertManager.Namespace + chart := kubernetesFlags.CertManager.Chart + version := kubernetesFlags.CertManager.Version + namespace := kubernetesFlags.CertManager.Namespace args := []string{ "--set", "crds.enabled=true", + "--set", "crds.keep=true", "--set-json", "global.commonLabels={\"installedby\": \"mgradm\"}", - "--set", "image.pullPolicy=" + kubernetes.GetPullPolicy(imagePullPolicy), + "--set", "image.pullPolicy=" + string(kubernetes.GetPullPolicy(imagePullPolicy)), } - extraValues := helmFlags.CertManager.Values + extraValues := kubernetesFlags.CertManager.Values if extraValues != "" { args = append(args, "-f", extraValues) } @@ -135,7 +190,7 @@ func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, image } // Wait for cert-manager to be ready - err := kubernetes.WaitForDeployment("", "cert-manager-webhook", "webhook") + err := kubernetes.WaitForDeployments("", "cert-manager-webhook") if err != nil { return utils.Errorf(err, L("cannot deploy")) } @@ -143,7 +198,7 @@ func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, image return nil } -func extractCaCertToConfig(namespace string) { +func extractCaCertToConfig(namespace string) error { // TODO Replace with [trust-manager](https://cert-manager.io/docs/projects/trust-manager/) to automate this const jsonPath = "-o=jsonpath={.data.ca\\.crt}" @@ -155,25 +210,43 @@ func extractCaCertToConfig(namespace string) { log.Info().Msgf(L("CA cert: %s"), string(out)) if err == nil && len(out) > 0 { log.Info().Msg(L("uyuni-ca configmap already existing, skipping extraction")) - return + return nil } - out, err = utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "get", "secret", "uyuni-ca", jsonPath, "-n", namespace) + out, err = utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "secret", "-n", namespace, "uyuni-ca", jsonPath, + ) if err != nil { - log.Fatal().Err(err).Msgf(L("Failed to get uyuni-ca certificate")) + return utils.Errorf(err, L("Failed to get uyuni-ca certificate")) } decoded, err := base64.StdEncoding.DecodeString(string(out)) if err != nil { - log.Fatal().Err(err).Msgf(L("Failed to base64 decode CA certificate")) + return utils.Errorf(err, L("Failed to base64 decode CA certificate")) } - createCaConfig(namespace, decoded) + return createCaConfig(namespace, decoded) } -func createCaConfig(namespace string, ca []byte) { - valueArg := "--from-literal=ca.crt=" + string(ca) - if err := utils.RunCmd("kubectl", "create", "configmap", "uyuni-ca", valueArg, "-n", namespace); err != nil { - log.Fatal().Err(err).Msg(L("Failed to create uyuni-ca config map from certificate")) - } +func createCaConfig(namespace string, ca []byte) error { + configMap := core.ConfigMap{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: "uyuni-ca", + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Data: map[string]string{ + "ca.crt": string(ca), + }, + } + return kubernetes.Apply([]runtime.Object{&configMap}, L("failed to create the SSH migration ConfigMap")) +} + +// HasIssuer returns true if the issuer is defined. +// +// False will be returned in case of errors or if the issuer resource doesn't exist on the cluster. +func HasIssuer(namespace string, name string) bool { + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "issuer", "-n", namespace, name, "-o", "name") + return err == nil && strings.TrimSpace(string(out)) != "" } diff --git a/mgradm/shared/kubernetes/certificates_test.go b/mgradm/shared/kubernetes/certificates_test.go new file mode 100644 index 000000000..772d57eda --- /dev/null +++ b/mgradm/shared/kubernetes/certificates_test.go @@ -0,0 +1,46 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "errors" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestHasIssuer(t *testing.T) { + type testType struct { + out string + err error + expected bool + } + + data := []testType{ + { + out: "issuer.cert-manager.io/someissuer\n", + err: nil, + expected: true, + }, + { + out: "any error\n", + err: errors.New("Any error"), + expected: false, + }, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i+1), test.expected, + HasIssuer("somens", "someissuer"), + ) + } +} diff --git a/mgradm/shared/kubernetes/coco.go b/mgradm/shared/kubernetes/coco.go new file mode 100644 index 000000000..e40ee6b64 --- /dev/null +++ b/mgradm/shared/kubernetes/coco.go @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // CocoApiDeployName is the deployment name for confidential computing attestations. + CocoDeployName = "uyuni-coco-attestation" +) + +// StartCocoDeployment installs the confidential computing deployment. +func StartCocoDeployment( + namespace string, + image string, + pullPolicy string, + pullSecret string, + replicas int, + dbPort int, + dbName string, +) error { + deploy := getCocoDeployment(namespace, image, pullPolicy, pullSecret, int32(replicas), dbPort, dbName) + return kubernetes.Apply([]runtime.Object{deploy}, + L("failed to create confidential computing attestations deployment"), + ) +} + +func getCocoDeployment( + namespace string, + image string, + pullPolicy string, + pullSecret string, + replicas int32, + dbPort int, + dbName string, +) *apps.Deployment { + cnxURL := fmt.Sprintf("jdbc:postgresql://%s:%d/%s", utils.DBServiceName, dbPort, dbName) + deploy := &apps.Deployment{ + TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: CocoDeployName, + Namespace: namespace, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.CocoComponent), + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + Selector: &meta.LabelSelector{ + MatchLabels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.CocoComponent), + }, + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.CocoComponent), + }, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "coco", + Image: image, + ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy), + Env: []core.EnvVar{ + {Name: "database_connection", Value: cnxURL}, + {Name: "database_user", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: DBSecret}, + Key: secretUsername, + }, + }}, + {Name: "database_password", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: DBSecret}, + Key: secretPassword, + }, + }}, + }, + }, + }, + }, + }, + }, + } + + if pullSecret != "" { + deploy.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + + return deploy +} diff --git a/mgradm/shared/kubernetes/db.go b/mgradm/shared/kubernetes/db.go new file mode 100644 index 000000000..da5ea7d69 --- /dev/null +++ b/mgradm/shared/kubernetes/db.go @@ -0,0 +1,55 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "strings" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // DBSecret is the name of the database credentials secret. + DBSecret = "db-credentials" + // ReportdbSecret is the name of the report database credentials secret. + ReportdbSecret = "reportdb-credentials" + SCCSecret = "scc-credentials" + secretUsername = "username" + secretPassword = "password" +) + +// CreateBasicAuthSecret creates a secret of type basic-auth. +func CreateBasicAuthSecret(namespace string, name string, user string, password string) error { + // Check if the secret is already existing + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "-n", namespace, "secret", name, "-o", "name") + if err == nil && strings.TrimSpace(string(out)) != "" { + return nil + } + + // Create the secret + secret := core.Secret{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), + }, + // It seems serializing this object automatically transforms the secrets to base64. + Data: map[string][]byte{ + secretUsername: []byte(user), + secretPassword: []byte(password), + }, + Type: core.SecretTypeBasicAuth, + } + + return kubernetes.Apply([]runtime.Object{&secret}, L("failed to create the secret")) +} diff --git a/mgradm/shared/kubernetes/dbFinalize.go b/mgradm/shared/kubernetes/dbFinalize.go new file mode 100644 index 000000000..b096f9872 --- /dev/null +++ b/mgradm/shared/kubernetes/dbFinalize.go @@ -0,0 +1,64 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + batch "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DBFinalizeJobName is the name of the Database finalization job. +const DBFinalizeJobName = "uyuni-db-finalize" + +// StartDBFinalizeJob starts the database finalization job. +func StartDBFinalizeJob( + namespace string, + serverImage string, + pullPolicy string, + pullSecret string, + schemaUpdateRequired bool, + migration bool, +) (string, error) { + log.Info().Msg(L("Running database finalization, this could be long depending on the size of the database…")) + job, err := getDBFinalizeJob(namespace, serverImage, pullPolicy, pullSecret, schemaUpdateRequired, migration) + if err != nil { + return "", err + } + + return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the database finalization job")) +} + +func getDBFinalizeJob( + namespace string, + image string, + pullPolicy string, + pullSecret string, + schemaUpdateRequired bool, + migration bool, +) (*batch.Job, error) { + mounts := []types.VolumeMount{ + {MountPath: "/var/lib/pgsql", Name: "var-pgsql"}, + {MountPath: "/etc/rhn", Name: "etc-rhn"}, + } + + // Prepare the script + scriptData := templates.FinalizePostgresTemplateData{ + RunAutotune: true, + RunReindex: migration, + RunSchemaUpdate: schemaUpdateRequired, + Migration: migration, + Kubernetes: true, + } + + return kubernetes.GetScriptJob(namespace, DBFinalizeJobName, image, pullPolicy, pullSecret, mounts, scriptData) +} diff --git a/mgradm/shared/kubernetes/dbUpgradeJob.go b/mgradm/shared/kubernetes/dbUpgradeJob.go new file mode 100644 index 000000000..014aa98c3 --- /dev/null +++ b/mgradm/shared/kubernetes/dbUpgradeJob.go @@ -0,0 +1,79 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DBUpgradeJobName is the name of the database upgrade job. +const DBUpgradeJobName = "uyuni-db-upgrade" + +// StartDBUpgradeJob starts the database upgrade job. +func StartDBUpgradeJob( + namespace string, + registry string, + image types.ImageFlags, + migrationImage types.ImageFlags, + pullSecret string, + oldPgsql string, + newPgsql string, +) (string, error) { + log.Info().Msgf(L("Upgrading PostgreSQL database from %[1]s to %[2]s…"), oldPgsql, newPgsql) + + var migrationImageURL string + var err error + if migrationImage.Name == "" { + imageName := fmt.Sprintf("-migration-%s-%s", oldPgsql, newPgsql) + migrationImageURL, err = utils.ComputeImage(registry, image.Tag, image, imageName) + } else { + migrationImageURL, err = utils.ComputeImage(registry, image.Tag, migrationImage) + } + if err != nil { + return "", utils.Errorf(err, L("failed to compute image URL")) + } + + log.Info().Msgf(L("Using database upgrade image %s"), migrationImageURL) + + job, err := getDBUpgradeJob(namespace, migrationImageURL, image.PullPolicy, pullSecret, oldPgsql, newPgsql) + if err != nil { + return "", err + } + + return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the database upgrade job")) +} + +func getDBUpgradeJob( + namespace string, + image string, + pullPolicy string, + pullSecret string, + oldPgsql string, + newPgsql string, +) (*batch.Job, error) { + mounts := []types.VolumeMount{ + {MountPath: "/var/lib/pgsql", Name: "var-pgsql"}, + } + + // Prepare the script + scriptData := templates.PostgreSQLVersionUpgradeTemplateData{ + OldVersion: oldPgsql, + NewVersion: newPgsql, + } + + return kubernetes.GetScriptJob(namespace, DBUpgradeJobName, image, pullPolicy, pullSecret, mounts, scriptData) +} diff --git a/mgradm/shared/kubernetes/deployment.go b/mgradm/shared/kubernetes/deployment.go new file mode 100644 index 000000000..1c2dfb348 --- /dev/null +++ b/mgradm/shared/kubernetes/deployment.go @@ -0,0 +1,344 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "strings" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/uyuni-project/uyuni-tools/shared/utils" + + cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// ServerDeployName is the name of the server deployment. +const ServerDeployName = "uyuni" + +// CreateServerDeployment creates a new deployment of the server. +func CreateServerDeployment( + namespace string, + serverImage string, + pullPolicy string, + timezone string, + debug bool, + mirrorPvName string, + pullSecret string, +) error { + if mirrorPvName != "" { + // Create a PVC using the required mirror PV + if err := kubernetes.CreatePersistentVolumeClaimForVolume(namespace, mirrorPvName); err != nil { + return err + } + } + + serverDeploy := GetServerDeployment( + namespace, serverImage, kubernetes.GetPullPolicy(pullPolicy), timezone, debug, mirrorPvName, pullSecret, + ) + + return kubernetes.Apply([]runtime.Object{serverDeploy}, L("failed to create the server deployment")) +} + +// GetServerDeployment computes the deployment object for an Uyuni server. +func GetServerDeployment( + namespace string, + image string, + pullPolicy core.PullPolicy, + timezone string, + debug bool, + mirrorPvName string, + pullSecret string, +) *apps.Deployment { + var replicas int32 = 1 + + runMount, runVolume := kubernetes.CreateTmpfsMount("/run", "256Mi") + cgroupMount, cgroupVolume := kubernetes.CreateHostPathMount( + "/sys/fs/cgroup", "/sys/fs/cgroup", core.HostPathDirectory, + ) + + // Compute the needed ports + ports := utils.GetServerPorts(debug) + + template := getServerPodTemplate(image, pullPolicy, timezone, pullSecret) + + template.Spec.Volumes = append(template.Spec.Volumes, runVolume, cgroupVolume) + template.Spec.Containers[0].Ports = kubernetes.ConvertPortMaps(ports) + template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts, + runMount, cgroupMount, + ) + + if mirrorPvName != "" { + // Add a mount for the mirror + template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts, + core.VolumeMount{ + Name: mirrorPvName, + MountPath: "/mirror", + }, + ) + + // Add the environment variable for the deployment to use the mirror + // This doesn't makes sense for migration as the setup script is not executed + template.Spec.Containers[0].Env = append(template.Spec.Containers[0].Env, + core.EnvVar{Name: "MIRROR_PATH", Value: "/mirror"}, + ) + } + + template.Spec.Containers[0].Lifecycle = &core.Lifecycle{ + PreStop: &core.LifecycleHandler{ + Exec: &core.ExecAction{ + Command: []string{"/bin/sh", "-c", "spacewalk-service stop && systemctl stop postgresql"}, + }, + }, + } + + template.Spec.Containers[0].ReadinessProbe = &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Port: intstr.FromInt(80), + Path: "/rhn/manager/api/api/getVersion", + }, + }, + PeriodSeconds: 30, + TimeoutSeconds: 20, + FailureThreshold: 5, + } + + template.Spec.Containers[0].LivenessProbe = &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Port: intstr.FromInt(80), + Path: "/rhn/manager/api/api/getVersion", + }, + }, + InitialDelaySeconds: 60, + PeriodSeconds: 60, + TimeoutSeconds: 20, + FailureThreshold: 5, + } + + deployment := apps.Deployment{ + TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: ServerDeployName, + Namespace: namespace, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + // As long as the container cannot scale, we need to stick to recreate strategy + // or the new deployed pods won't be ready. + Strategy: apps.DeploymentStrategy{Type: apps.RecreateDeploymentStrategyType}, + Selector: &meta.LabelSelector{ + MatchLabels: map[string]string{kubernetes.ComponentLabel: kubernetes.ServerComponent}, + }, + Template: template, + }, + } + + return &deployment +} + +// GetServerPodTemplate computes the pod template with the init container and the minimum viable volumes and mounts. +// This is intended to be shared with the setup job. +func getServerPodTemplate( + image string, + pullPolicy core.PullPolicy, + timezone string, + pullSecret string, +) core.PodTemplateSpec { + envs := []core.EnvVar{ + {Name: "TZ", Value: timezone}, + } + + mounts := GetServerMounts() + + // Convert our mounts to Kubernetes objects + volumeMounts := kubernetes.ConvertVolumeMounts(mounts) + + // The init mounts are the same mounts but in /mnt just for the init container populating the volumes + var initMounts []core.VolumeMount + for _, mount := range volumeMounts { + initMount := mount.DeepCopy() + initMount.MountPath = "/mnt" + initMount.MountPath + initMounts = append(initMounts, *initMount) + } + + volumes := kubernetes.CreateVolumes(mounts) + + caMount := core.VolumeMount{ + Name: "ca-cert", + MountPath: "/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT", + ReadOnly: true, + SubPath: "ca.crt", + } + tlsKeyMount := core.VolumeMount{Name: "tls-key", MountPath: "/etc/pki/spacewalk-tls"} + + caVolume := kubernetes.CreateConfigVolume("ca-cert", "uyuni-ca") + tlsKeyVolume := kubernetes.CreateSecretVolume("tls-key", "uyuni-cert") + var keyMode int32 = 0400 + tlsKeyVolume.VolumeSource.Secret.Items = []core.KeyToPath{ + {Key: "tls.crt", Path: "spacewalk.crt"}, + {Key: "tls.key", Path: "spacewalk.key", Mode: &keyMode}, + } + + initMounts = append(initMounts, tlsKeyMount) + volumeMounts = append(volumeMounts, caMount, tlsKeyMount) + volumes = append(volumes, caVolume, tlsKeyVolume) + + template := core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), + }, + Spec: core.PodSpec{ + InitContainers: []core.Container{ + { + Name: "init-volumes", + Image: image, + ImagePullPolicy: pullPolicy, + Command: []string{"sh", "-x", "-c", initScript}, + VolumeMounts: initMounts, + }, + }, + Containers: []core.Container{ + { + Name: "uyuni", + Image: image, + ImagePullPolicy: pullPolicy, + Env: envs, + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + }, + } + + if pullSecret != "" { + template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + return template +} + +const initScript = ` +# Fill he empty volumes +for vol in /var/lib/cobbler \ + /var/lib/salt \ + /var/lib/pgsql \ + /var/cache \ + /var/log \ + /srv/salt \ + /srv/www \ + /srv/tftpboot \ + /srv/formula_metadata \ + /srv/pillar \ + /srv/susemanager \ + /srv/spacewalk \ + /root \ + /etc/apache2 \ + /etc/rhn \ + /etc/systemd/system/multi-user.target.wants \ + /etc/systemd/system/sockets.target.wants \ + /etc/salt \ + /etc/tomcat \ + /etc/cobbler \ + /etc/sysconfig \ + /etc/postfix \ + /etc/sssd \ + /etc/pki/tls +do + chown --reference=$vol /mnt$vol; + chmod --reference=$vol /mnt$vol; + if [ -z "$(ls -A /mnt$vol)" ]; then + cp -a $vol/. /mnt$vol; + if [ "$vol" = "/srv/www" ]; then + ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /mnt$vol/RHN-ORG-TRUSTED-SSL-CERT; + fi + + if [ "$vol" = "/etc/pki/tls" ]; then + ln -s /etc/pki/spacewalk-tls/spacewalk.crt /mnt/etc/pki/tls/certs/spacewalk.crt; + ln -s /etc/pki/spacewalk-tls/spacewalk.key /mnt/etc/pki/tls/private/spacewalk.key; + fi + fi + + if [ "$vol" = "/etc/pki/tls" ]; then + cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/etc/pki/tls/private/pg-spacewalk.key; + chown postgres:postgres /mnt/etc/pki/tls/private/pg-spacewalk.key; + fi +done +` + +// GetServerMounts returns the volume mounts required for the server pod. +func GetServerMounts() []types.VolumeMount { + // Filter out the duplicate mounts to avoid issues applying the jobs + serverMounts := utils.ServerVolumeMounts + mounts := []types.VolumeMount{} + mountsSet := map[string]types.VolumeMount{} + for _, mount := range serverMounts { + switch mount.Name { + // Skip mounts that are not PVCs + case "ca-cert", "tls-key": + continue + } + if _, exists := mountsSet[mount.Name]; !exists { + mounts = append(mounts, mount) + mountsSet[mount.Name] = mount + } + } + + return mounts +} + +// TuneMounts adjusts the server mounts with the size and storage class passed by as parameters. +func TuneMounts(mounts []types.VolumeMount, flags *cmd_utils.VolumesFlags) []types.VolumeMount { + tunedMounts := []types.VolumeMount{} + for _, mount := range mounts { + class := flags.Class + var volumeFlags *cmd_utils.VolumeFlags + switch mount.Name { + case "var-pgsql": + volumeFlags = &flags.Database + case "var-spacewalk": + volumeFlags = &flags.Packages + case "var-cache": + volumeFlags = &flags.Cache + case "srv-www": + volumeFlags = &flags.Www + } + if volumeFlags != nil { + if volumeFlags.Class != "" { + class = volumeFlags.Class + } + mount.Size = volumeFlags.Size + } + mount.Class = class + tunedMounts = append(tunedMounts, mount) + } + return tunedMounts +} + +var runCmdOutput = utils.RunCmdOutput + +// getRunningServerImage extracts the main server container image from a running deployment. +func getRunningServerImage(namespace string) string { + out, err := runCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "deploy", "-n", namespace, ServerDeployName, + "-o", "jsonpath={.spec.template.spec.containers[0].image}", + ) + if err != nil { + // Errors could be that the namespace or deployment doesn't exist, just return no image. + log.Debug().Err(err).Msg("failed to get the running server container image") + return "" + } + return strings.TrimSpace(string(out)) +} diff --git a/mgradm/shared/kubernetes/deployment_test.go b/mgradm/shared/kubernetes/deployment_test.go new file mode 100644 index 000000000..e4f5578e7 --- /dev/null +++ b/mgradm/shared/kubernetes/deployment_test.go @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "errors" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestGetRunningServerImage(t *testing.T) { + type dataType struct { + err error + out string + expected string + } + data := []dataType{ + {nil, "registry.opensuse.org/uyuni/server:latest\n", "registry.opensuse.org/uyuni/server:latest"}, + {errors.New("deployment not found"), "", ""}, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + actual := getRunningServerImage("myns") + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i), test.expected, actual) + } +} diff --git a/mgradm/shared/kubernetes/flags.go b/mgradm/shared/kubernetes/flags.go new file mode 100644 index 000000000..5075def85 --- /dev/null +++ b/mgradm/shared/kubernetes/flags.go @@ -0,0 +1,18 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + +// KubernetesServerFlags is the aggregation of all flags for install, upgrade and migrate. +type KubernetesServerFlags struct { + utils.ServerFlags `mapstructure:",squash"` + Kubernetes utils.KubernetesFlags + Volumes utils.VolumesFlags + // SSH defines the SSH configuration to use to connect to the source server to migrate. + SSH utils.SSHFlags +} diff --git a/mgradm/shared/kubernetes/hubApi.go b/mgradm/shared/kubernetes/hubApi.go new file mode 100644 index 000000000..93edfb814 --- /dev/null +++ b/mgradm/shared/kubernetes/hubApi.go @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // HubAPIDeployName is the deployment name of the Hub API. + HubAPIDeployName = "uyuni-hub-api" + hubAPIServiceName = "hub-api" +) + +// InstallHubAPI installs the Hub API deployment and service. +func InstallHubAPI(namespace string, image string, pullPolicy string, pullSecret string) error { + if err := startHubAPIDeployment(namespace, image, pullPolicy, pullSecret); err != nil { + return err + } + + if err := createHubAPIService(namespace); err != nil { + return err + } + + // TODO Do we want an ingress to use port 80 / 443 from the outside too? + // This would have an impact on the user's scripts. + return nil +} + +func startHubAPIDeployment(namespace string, image string, pullPolicy string, pullSecret string) error { + deploy := getHubAPIDeployment(namespace, image, pullPolicy, pullSecret) + return kubernetes.Apply([]runtime.Object{deploy}, L("failed to create the hub API deployment")) +} + +func getHubAPIDeployment(namespace string, image string, pullPolicy string, pullSecret string) *apps.Deployment { + var replicas int32 = 1 + + deploy := &apps.Deployment{ + TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: HubAPIDeployName, + Namespace: namespace, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.HubAPIComponent), + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + Selector: &meta.LabelSelector{ + MatchLabels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.HubAPIComponent), + }, + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.HubAPIComponent), + }, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "uyuni-hub-api", + Image: image, + ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy), + Ports: []core.ContainerPort{ + { + ContainerPort: int32(2830), + }, + }, + Env: []core.EnvVar{ + {Name: "HUB_API_URL", Value: fmt.Sprintf("http://%s/rpc/api", utils.WebServiceName)}, + {Name: "HUB_CONNECT_TIMEOUT", Value: "10"}, + {Name: "HUB_REQUEST_TIMEOUT", Value: "10"}, + {Name: "HUB_CONNECT_USING_SSL", Value: "false"}, + }, + }, + }, + }, + }, + }, + } + + if pullSecret != "" { + deploy.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + return deploy +} + +func createHubAPIService(namespace string) error { + svc := getService(namespace, kubernetes.ServerApp, kubernetes.HubAPIComponent, hubAPIServiceName, core.ProtocolTCP, + utils.NewPortMap(utils.HubAPIServiceName, "api", 2830, 2830), + ) + return kubernetes.Apply([]runtime.Object{svc}, L("failed to create the hub API service")) +} diff --git a/mgradm/shared/kubernetes/ingress.go b/mgradm/shared/kubernetes/ingress.go new file mode 100644 index 000000000..337e5c16c --- /dev/null +++ b/mgradm/shared/kubernetes/ingress.go @@ -0,0 +1,206 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + net "k8s.io/api/networking/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CertSecretName is the name of the server SSL certificate secret to use. +const CertSecretName = "uyuni-cert" + +const ( + IngressNameSSL = "uyuni-ingress-ssl" + IngressNameSSLRedirect = "uyuni-ingress-ssl-redirect" + IngressNameNoSSL = "uyuni-ingress-nossl" +) + +// CreateIngress creates the ingress definitions for Uyuni server. +// +// fqdn is the fully qualified domain name associated with the Uyuni server. +// +// caIssuer is the name of the cert-manager to associate for the SSL routes. +// It can be empty if cert-manager is not used. +// +// ingressName is one of traefik or nginx. +func CreateIngress(namespace string, fqdn string, caIssuer string, ingressName string) error { + ingresses := GetIngresses(namespace, fqdn, caIssuer, ingressName) + return kubernetes.Apply(ingresses, L("failed to create the ingresses")) +} + +// GetIngresses returns the ingress definitions to create based on the name of the ingress. +// If ingressName is neither nginx nor traefik, no ingress rules are returned. +func GetIngresses(namespace string, fqdn string, caIssuer string, ingressName string) []*net.Ingress { + ingresses := []*net.Ingress{} + if ingressName != "nginx" && ingressName != "traefik" { + return ingresses + } + + ingresses = append(ingresses, + getSSLIngress(namespace, fqdn, caIssuer, ingressName), + getNoSSLIngress(namespace, fqdn, ingressName), + ) + sslRedirectIngress := getSSLRedirectIngress(namespace, fqdn, ingressName) + if sslRedirectIngress != nil { + ingresses = append(ingresses, sslRedirectIngress) + } + return ingresses +} + +func getSSLIngress(namespace string, fqdn string, caIssuer string, ingressName string) *net.Ingress { + annotations := map[string]string{} + if caIssuer != "" { + annotations["cert-manager.io/issuer"] = caIssuer + } + if ingressName == "traefik" { + annotations["traefik.ingress.kubernetes.io/router.tls"] = "true" + annotations["traefik.ingress.kubernetes.io/router.tls.domains.n.main"] = fqdn + annotations["traefik.ingress.kubernetes.io/router.entrypoints"] = "websecure,web" + } + + ingress := net.Ingress{ + TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: IngressNameSSL, + Annotations: annotations, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Spec: net.IngressSpec{ + TLS: []net.IngressTLS{ + {Hosts: []string{fqdn}, SecretName: CertSecretName}, + }, + Rules: []net.IngressRule{ + getIngressWebRule(fqdn), + }, + }, + } + + return &ingress +} + +func getSSLRedirectIngress(namespace string, fqdn string, ingressName string) *net.Ingress { + var ingress *net.Ingress + + // Nginx doesn't require a special ingress for the SSL redirection. + if ingressName == "traefik" { + ingress = &net.Ingress{ + TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: IngressNameSSLRedirect, + Annotations: map[string]string{ + "traefik.ingress.kubernetes.io/router.middlewares": "default-uyuni-https-redirect@kubernetescrd", + "traefik.ingress.kubernetes.io/router.entrypoints": "web", + }, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Spec: net.IngressSpec{ + Rules: []net.IngressRule{ + getIngressWebRule(fqdn), + }, + }, + } + } + + return ingress +} + +var noSSLPaths = []string{ + "/pub", + "/rhn/([^/])+/DownloadFile", + "/(rhn/)?rpc/api", + "/rhn/errors", + "/rhn/ty/TinyUrl", + "/rhn/websocket", + "/rhn/metrics", + "/cobbler_api", + "/cblr", + "/httpboot", + "/images", + "/cobbler", + "/os-images", + "/tftp", + "/docs", +} + +func getNoSSLIngress(namespace string, fqdn string, ingressName string) *net.Ingress { + annotations := map[string]string{} + if ingressName == "nginx" { + annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false" + } + if ingressName == "traefik" { + annotations["traefik.ingress.kubernetes.io/router.tls"] = "false" + annotations["traefik.ingress.kubernetes.io/router.entrypoints"] = "web" + } + + pathType := net.PathTypePrefix + paths := []net.HTTPIngressPath{} + for _, noSSLPath := range noSSLPaths { + paths = append(paths, net.HTTPIngressPath{ + Backend: webServiceBackend, + Path: noSSLPath, + PathType: &pathType, + }) + } + + ingress := net.Ingress{ + TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: IngressNameNoSSL, + Annotations: annotations, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Spec: net.IngressSpec{ + TLS: []net.IngressTLS{ + {Hosts: []string{fqdn}, SecretName: CertSecretName}, + }, + Rules: []net.IngressRule{ + { + Host: fqdn, + IngressRuleValue: net.IngressRuleValue{ + HTTP: &net.HTTPIngressRuleValue{Paths: paths}, + }, + }, + }, + }, + } + + return &ingress +} + +// build the ingress rule object catching all HTTP traffic. +func getIngressWebRule(fqdn string) net.IngressRule { + pathType := net.PathTypePrefix + + return net.IngressRule{ + Host: fqdn, + IngressRuleValue: net.IngressRuleValue{ + HTTP: &net.HTTPIngressRuleValue{ + Paths: []net.HTTPIngressPath{ + { + Backend: webServiceBackend, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + } +} + +var webServiceBackend net.IngressBackend = net.IngressBackend{ + Service: &net.IngressServiceBackend{ + Name: utils.WebServiceName, + Port: net.ServiceBackendPort{Number: 80}, + }, +} diff --git a/mgradm/shared/kubernetes/install.go b/mgradm/shared/kubernetes/install.go deleted file mode 100644 index 0fb54fdc6..000000000 --- a/mgradm/shared/kubernetes/install.go +++ /dev/null @@ -1,247 +0,0 @@ -// SPDX-FileCopyrightText: 2024 SUSE LLC -// -// SPDX-License-Identifier: Apache-2.0 - -package kubernetes - -import ( - "errors" - "fmt" - "os/exec" - - "github.com/rs/zerolog/log" - "github.com/spf13/cobra" - cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" - "github.com/uyuni-project/uyuni-tools/shared/kubernetes" - . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/ssl" - "github.com/uyuni-project/uyuni-tools/shared/types" - "github.com/uyuni-project/uyuni-tools/shared/utils" -) - -// HelmAppName is the Helm application name. -const HelmAppName = "uyuni" - -// Deploy execute a deploy of a given image and helm to a cluster. -func Deploy( - cnx *shared.Connection, - registry string, - imageFlags *types.ImageFlags, - helmFlags *cmd_utils.HelmFlags, - clusterInfos *kubernetes.ClusterInfos, - fqdn string, - debug bool, - prepare bool, - helmArgs ...string, -) error { - // If installing on k3s, install the traefik helm config in manifests - isK3s := clusterInfos.IsK3s() - IsRke2 := clusterInfos.IsRke2() - if !prepare { - if isK3s { - InstallK3sTraefikConfig(debug) - } else if IsRke2 { - kubernetes.InstallRke2NginxConfig(utils.TCPPorts, utils.UDPPorts, helmFlags.Uyuni.Namespace) - } - } - - serverImage, err := utils.ComputeImage(registry, utils.DefaultTag, *imageFlags) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - - // Install the uyuni server helm chart - if err := UyuniUpgrade( - serverImage, imageFlags.PullPolicy, helmFlags, clusterInfos.GetKubeconfig(), fqdn, clusterInfos.Ingress, helmArgs..., - ); err != nil { - return utils.Errorf(err, L("cannot upgrade")) - } - - // Wait for the pod to be started - err = kubernetes.WaitForDeployment(helmFlags.Uyuni.Namespace, HelmAppName, "uyuni") - if err != nil { - return utils.Errorf(err, L("cannot deploy")) - } - return cnx.WaitForServer() -} - -// DeployCertificate executre a deploy a new certificate given an helm. -func DeployCertificate(helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_utils.InstallSSLFlags, rootCa string, - ca *types.SSLPair, kubeconfig string, fqdn string, imagePullPolicy string) ([]string, error) { - helmArgs := []string{} - if sslFlags.UseExisting() { - if err := DeployExistingCertificate(helmFlags, sslFlags); err != nil { - return helmArgs, err - } - } else { - // Install cert-manager and a self-signed issuer ready for use - issuerArgs, err := installSSLIssuers(helmFlags, sslFlags, rootCa, ca, kubeconfig, fqdn, imagePullPolicy) - if err != nil { - return []string{}, utils.Errorf(err, L("cannot install cert-manager and self-sign issuer")) - } - helmArgs = append(helmArgs, issuerArgs...) - - // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - extractCaCertToConfig(helmFlags.Uyuni.Namespace) - } - - return helmArgs, nil -} - -// DeployExistingCertificate execute a deploy of an existing certificate. -func DeployExistingCertificate( - helmFlags *cmd_utils.HelmFlags, - sslFlags *cmd_utils.InstallSSLFlags, -) error { - // Deploy the SSL Certificate secret and CA configmap - serverCrt, rootCaCrt := ssl.OrderCas(&sslFlags.Ca, &sslFlags.Server) - serverKey := utils.ReadFile(sslFlags.Server.Key) - if err := installTLSSecret(helmFlags.Uyuni.Namespace, serverCrt, serverKey, rootCaCrt); err != nil { - return err - } - - // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - extractCaCertToConfig(helmFlags.Uyuni.Namespace) - return nil -} - -// UyuniUpgrade runs an helm upgrade using images and helm configuration as parameters. -func UyuniUpgrade(serverImage string, pullPolicy string, helmFlags *cmd_utils.HelmFlags, kubeconfig string, - fqdn string, ingress string, helmArgs ...string) error { - log.Info().Msg(L("Installing Uyuni")) - - // The guessed ingress is passed before the user's value to let the user override it in case we got it wrong. - helmParams := []string{ - "--set", "ingress=" + ingress, - } - - extraValues := helmFlags.Uyuni.Values - if extraValues != "" { - helmParams = append(helmParams, "-f", extraValues) - } - - // The values computed from the command line need to be last to override what could be in the extras - helmParams = append(helmParams, - "--set", "images.server="+serverImage, - "--set", "pullPolicy="+kubernetes.GetPullPolicy(pullPolicy), - "--set", "fqdn="+fqdn) - - helmParams = append(helmParams, helmArgs...) - - namespace := helmFlags.Uyuni.Namespace - chart := helmFlags.Uyuni.Chart - version := helmFlags.Uyuni.Version - return kubernetes.HelmUpgrade(kubeconfig, namespace, true, "", HelmAppName, chart, version, helmParams...) -} - -// Upgrade will upgrade a server in a kubernetes cluster. -func Upgrade( - _ *types.GlobalFlags, - image *types.ImageFlags, - upgradeImage *types.ImageFlags, - helm cmd_utils.HelmFlags, - _ *cobra.Command, - _ []string, -) error { - for _, binary := range []string{"kubectl", "helm"} { - if _, err := exec.LookPath(binary); err != nil { - return fmt.Errorf(L("install %s before running this command"), binary) - } - } - - cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter) - namespace, err := cnx.GetNamespace("") - if err != nil { - return utils.Errorf(err, L("failed retrieving namespace")) - } - - serverImage, err := utils.ComputeImage(image.Registry, utils.DefaultTag, *image) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - - inspectedValues, err := kubernetes.InspectKubernetes(namespace, serverImage, image.PullPolicy) - if err != nil { - return utils.Errorf(err, L("cannot inspect kubernetes values")) - } - - err = cmd_utils.SanityCheck(cnx, inspectedValues, serverImage) - if err != nil { - return err - } - - fqdn := inspectedValues.Fqdn - if fqdn == "" { - return errors.New(L("inspect function did non return fqdn value")) - } - - clusterInfos, err := kubernetes.CheckCluster() - if err != nil { - return err - } - kubeconfig := clusterInfos.GetKubeconfig() - - // this is needed because folder with script needs to be mounted - // check the node before scaling down - nodeName, err := kubernetes.GetNode(namespace, kubernetes.ServerFilter) - if err != nil { - return utils.Errorf(err, L("cannot find node running uyuni")) - } - - err = kubernetes.ReplicasTo(namespace, kubernetes.ServerApp, 0) - if err != nil { - return utils.Errorf(err, L("cannot set replica to 0")) - } - - defer func() { - // if something is running, we don't need to set replicas to 1 - if _, err = kubernetes.GetNode(namespace, kubernetes.ServerFilter); err != nil { - err = kubernetes.ReplicasTo(namespace, kubernetes.ServerApp, 1) - } - }() - if inspectedValues.ImagePgVersion > inspectedValues.CurrentPgVersion { - log.Info().Msgf(L("Previous PostgreSQL is %[1]s, new one is %[2]s. Performing a DB version upgrade…"), - inspectedValues.CurrentPgVersion, inspectedValues.ImagePgVersion) - - if err := RunPgsqlVersionUpgrade(image.Registry, *image, *upgradeImage, nodeName, namespace, - inspectedValues.CurrentPgVersion, inspectedValues.ImagePgVersion, - ); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL version upgrade script")) - } - } else if inspectedValues.ImagePgVersion == inspectedValues.CurrentPgVersion { - log.Info().Msgf(L("Upgrading to %s without changing PostgreSQL version"), inspectedValues.UyuniRelease) - } else { - return fmt.Errorf(L("trying to downgrade PostgreSQL from %[1]s to %[2]s"), - inspectedValues.CurrentPgVersion, inspectedValues.ImagePgVersion) - } - - schemaUpdateRequired := inspectedValues.CurrentPgVersion != inspectedValues.ImagePgVersion - if err := RunPgsqlFinalizeScript( - serverImage, image.PullPolicy, namespace, nodeName, schemaUpdateRequired, false, - ); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL finalize script")) - } - - if err := RunPostUpgradeScript(serverImage, image.PullPolicy, namespace, nodeName); err != nil { - return utils.Errorf(err, L("cannot run post upgrade script")) - } - - helmArgs := []string{} - - // Get the registry secret name if any - pullSecret, err := kubernetes.GetDeploymentImagePullSecret(namespace, kubernetes.ServerFilter) - if err != nil { - return err - } - if pullSecret != "" { - helmArgs = append(helmArgs, "--set", "registrySecret="+pullSecret) - } - - err = UyuniUpgrade(serverImage, image.PullPolicy, &helm, kubeconfig, fqdn, clusterInfos.Ingress, helmArgs...) - if err != nil { - return utils.Errorf(err, L("cannot upgrade to image %s"), serverImage) - } - - return kubernetes.WaitForDeployment(namespace, "uyuni", "uyuni") -} diff --git a/mgradm/shared/kubernetes/k3s.go b/mgradm/shared/kubernetes/k3s.go deleted file mode 100644 index 2aceb20e7..000000000 --- a/mgradm/shared/kubernetes/k3s.go +++ /dev/null @@ -1,214 +0,0 @@ -// SPDX-FileCopyrightText: 2024 SUSE LLC -// -// SPDX-License-Identifier: Apache-2.0 - -package kubernetes - -import ( - "fmt" - - "github.com/rs/zerolog/log" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared/kubernetes" - . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/types" - "github.com/uyuni-project/uyuni-tools/shared/utils" -) - -// InstallK3sTraefikConfig installs the K3s Traefik configuration. -func InstallK3sTraefikConfig(debug bool) { - tcpPorts := []types.PortMap{} - tcpPorts = append(tcpPorts, utils.TCPPorts...) - if debug { - tcpPorts = append(tcpPorts, utils.DebugPorts...) - } - - kubernetes.InstallK3sTraefikConfig(tcpPorts, utils.UDPPorts) -} - -// RunPgsqlVersionUpgrade perform a PostgreSQL major upgrade. -func RunPgsqlVersionUpgrade( - registry string, - image types.ImageFlags, - upgradeImage types.ImageFlags, - namespace string, - nodeName string, - oldPgsql string, - newPgsql string, -) error { - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return err - } - defer cleaner() - if newPgsql > oldPgsql { - log.Info().Msgf( - L("Previous PostgreSQL is %[1]s, new one is %[2]s. Performing a DB version upgrade…"), - oldPgsql, newPgsql, - ) - - pgsqlVersionUpgradeContainer := "uyuni-upgrade-pgsql" - - upgradeImageURL := "" - if upgradeImage.Name == "" { - upgradeImageURL, err = utils.ComputeImage( - registry, image.Tag, image, fmt.Sprintf("-migration-%s-%s", oldPgsql, newPgsql), - ) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - } else { - upgradeImageURL, err = utils.ComputeImage(registry, image.Tag, upgradeImage) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - } - - log.Info().Msgf(L("Using database upgrade image %s"), upgradeImageURL) - pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql, true) - if err != nil { - return utils.Errorf(err, L("cannot generate PostgreSQL database version upgrade script")) - } - - // delete pending pod and then check the node, because in presence of more than a pod GetNode return is wrong - if err := kubernetes.DeletePod(namespace, pgsqlVersionUpgradeContainer, kubernetes.ServerFilter); err != nil { - return utils.Errorf(err, L("cannot delete %s"), pgsqlVersionUpgradeContainer) - } - - // generate deploy data - pgsqlVersioUpgradeDeployData := types.Deployment{ - APIVersion: "v1", - Spec: &types.Spec{ - RestartPolicy: "Never", - NodeName: nodeName, - Containers: []types.Container{ - { - Name: pgsqlVersionUpgradeContainer, - VolumeMounts: append(utils.PgsqlRequiredVolumeMounts, - types.VolumeMount{MountPath: "/var/lib/uyuni-tools", Name: "var-lib-uyuni-tools"}), - }, - }, - Volumes: append(utils.PgsqlRequiredVolumes, - types.Volume{Name: "var-lib-uyuni-tools", HostPath: &types.HostPath{Path: scriptDir, Type: "Directory"}}), - }, - } - - // transform deploy in JSON - overridePgsqlVersioUpgrade, err := kubernetes.GenerateOverrideDeployment(pgsqlVersioUpgradeDeployData) - if err != nil { - return err - } - - err = kubernetes.RunPod( - namespace, pgsqlVersionUpgradeContainer, kubernetes.ServerFilter, upgradeImageURL, image.PullPolicy, - "/var/lib/uyuni-tools/"+pgsqlVersionUpgradeScriptName, overridePgsqlVersioUpgrade, - ) - if err != nil { - return utils.Errorf(err, L("error running container %s"), pgsqlVersionUpgradeContainer) - } - } - return nil -} - -// RunPgsqlFinalizeScript run the script with all the action required to a db after upgrade. -func RunPgsqlFinalizeScript( - serverImage string, pullPolicy string, namespace string, nodeName string, schemaUpdateRequired bool, migration bool, -) error { - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return err - } - defer cleaner() - pgsqlFinalizeContainer := "uyuni-finalize-pgsql" - pgsqlFinalizeScriptName, err := adm_utils.GenerateFinalizePostgresScript( - scriptDir, true, schemaUpdateRequired, true, migration, true, - ) - if err != nil { - return utils.Errorf(err, L("cannot generate PostgreSQL finalization script")) - } - // delete pending pod and then check the node, because in presence of more than a pod GetNode return is wrong - if err := kubernetes.DeletePod(namespace, pgsqlFinalizeContainer, kubernetes.ServerFilter); err != nil { - return utils.Errorf(err, L("cannot delete %s"), pgsqlFinalizeContainer) - } - // generate deploy data - pgsqlFinalizeDeployData := types.Deployment{ - APIVersion: "v1", - Spec: &types.Spec{ - RestartPolicy: "Never", - NodeName: nodeName, - Containers: []types.Container{ - { - Name: pgsqlFinalizeContainer, - VolumeMounts: append(utils.PgsqlRequiredVolumeMounts, - types.VolumeMount{MountPath: "/var/lib/uyuni-tools", Name: "var-lib-uyuni-tools"}), - }, - }, - Volumes: append(utils.PgsqlRequiredVolumes, - types.Volume{Name: "var-lib-uyuni-tools", HostPath: &types.HostPath{Path: scriptDir, Type: "Directory"}}), - }, - } - // transform deploy data in JSON - overridePgsqlFinalize, err := kubernetes.GenerateOverrideDeployment(pgsqlFinalizeDeployData) - if err != nil { - return err - } - err = kubernetes.RunPod( - namespace, pgsqlFinalizeContainer, kubernetes.ServerFilter, serverImage, pullPolicy, - "/var/lib/uyuni-tools/"+pgsqlFinalizeScriptName, overridePgsqlFinalize, - ) - if err != nil { - return utils.Errorf(err, L("error running container %s"), pgsqlFinalizeContainer) - } - return nil -} - -// RunPostUpgradeScript run the script with the changes to apply after the upgrade. -func RunPostUpgradeScript(serverImage string, pullPolicy string, namespace string, nodeName string) error { - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return err - } - defer cleaner() - postUpgradeContainer := "uyuni-post-upgrade" - postUpgradeScriptName, err := adm_utils.GeneratePostUpgradeScript(scriptDir) - if err != nil { - return utils.Errorf(err, L("cannot generate PostgreSQL finalization script")) - } - - // delete pending pod and then check the node, because in presence of more than a pod GetNode return is wrong - if err := kubernetes.DeletePod(namespace, postUpgradeContainer, kubernetes.ServerFilter); err != nil { - return utils.Errorf(err, L("cannot delete %s"), postUpgradeContainer) - } - // generate deploy data - postUpgradeDeployData := types.Deployment{ - APIVersion: "v1", - Spec: &types.Spec{ - RestartPolicy: "Never", - NodeName: nodeName, - Containers: []types.Container{ - { - Name: postUpgradeContainer, - VolumeMounts: append(utils.PgsqlRequiredVolumeMounts, - types.VolumeMount{MountPath: "/var/lib/uyuni-tools", Name: "var-lib-uyuni-tools"}), - }, - }, - Volumes: append(utils.PgsqlRequiredVolumes, - types.Volume{Name: "var-lib-uyuni-tools", HostPath: &types.HostPath{Path: scriptDir, Type: "Directory"}}), - }, - } - // transform deploy data in JSON - overridePostUpgrade, err := kubernetes.GenerateOverrideDeployment(postUpgradeDeployData) - if err != nil { - return err - } - - err = kubernetes.RunPod( - namespace, postUpgradeContainer, kubernetes.ServerFilter, serverImage, pullPolicy, - "/var/lib/uyuni-tools/"+postUpgradeScriptName, overridePostUpgrade, - ) - if err != nil { - return utils.Errorf(err, L("error running container %s"), postUpgradeContainer) - } - - return nil -} diff --git a/mgradm/shared/kubernetes/namespace.go b/mgradm/shared/kubernetes/namespace.go new file mode 100644 index 000000000..108554be0 --- /dev/null +++ b/mgradm/shared/kubernetes/namespace.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// CreateNamespace creates a kubernetes namespace. +func CreateNamespace(namespace string) error { + ns := core.Namespace{ + TypeMeta: meta.TypeMeta{Kind: "Namespace", APIVersion: "v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: namespace, + }, + } + return kubernetes.Apply([]runtime.Object{&ns}, L("failed to create the namespace")) +} diff --git a/mgradm/shared/kubernetes/node.go b/mgradm/shared/kubernetes/node.go new file mode 100644 index 000000000..11b860535 --- /dev/null +++ b/mgradm/shared/kubernetes/node.go @@ -0,0 +1,30 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" +) + +// deployNodeConfig deploy configuration files on the node. +func deployNodeConfig( + namespace string, + clusterInfos *kubernetes.ClusterInfos, + needsHub bool, + debug bool, +) error { + // If installing on k3s, install the traefik helm config in manifests + isK3s := clusterInfos.IsK3s() + IsRke2 := clusterInfos.IsRke2() + ports := getPortList(needsHub, debug) + if isK3s { + return kubernetes.InstallK3sTraefikConfig(ports) + } else if IsRke2 { + return kubernetes.InstallRke2NginxConfig(ports, namespace) + } + return nil +} diff --git a/mgradm/shared/kubernetes/ports.go b/mgradm/shared/kubernetes/ports.go new file mode 100644 index 000000000..d8f0ae6be --- /dev/null +++ b/mgradm/shared/kubernetes/ports.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// getPortList returns compiled lists of tcp and udp ports.. +func getPortList(hub bool, debug bool) []types.PortMap { + ports := utils.GetServerPorts(debug) + if hub { + ports = append(ports, utils.HubXmlrpcPorts...) + } + + return ports +} diff --git a/mgradm/shared/kubernetes/postUpgradeJob.go b/mgradm/shared/kubernetes/postUpgradeJob.go new file mode 100644 index 000000000..7d685514f --- /dev/null +++ b/mgradm/shared/kubernetes/postUpgradeJob.go @@ -0,0 +1,39 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + batch "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// PostUpgradeJobName is the name of the job apply the database changes after the upgrade. +const PostUpgradeJobName = "uyuni-post-upgrade" + +// StartPostUpgradeJob starts the job applying the database changes after the upgrade. +func StartPostUpgradeJob(namespace string, image string, pullPolicy string, pullSecret string) (string, error) { + log.Info().Msg(L("Performing post upgrade changes…")) + + job, err := getPostUpgradeJob(namespace, image, pullPolicy, pullSecret) + if err != nil { + return "", err + } + + return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the post upgrade job")) +} + +func getPostUpgradeJob(namespace string, image string, pullPolicy string, pullSecret string) (*batch.Job, error) { + scriptData := templates.PostUpgradeTemplateData{} + mounts := GetServerMounts() + + return kubernetes.GetScriptJob(namespace, PostUpgradeJobName, image, pullPolicy, pullSecret, mounts, scriptData) +} diff --git a/mgradm/shared/kubernetes/reconcile.go b/mgradm/shared/kubernetes/reconcile.go new file mode 100644 index 000000000..c0920738f --- /dev/null +++ b/mgradm/shared/kubernetes/reconcile.go @@ -0,0 +1,364 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "encoding/base64" + "errors" + "fmt" + "os" + "os/exec" + + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/ssl" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// Reconcile upgrades, migrate or install the server. +func Reconcile(flags *KubernetesServerFlags, fqdn string) error { + if _, err := exec.LookPath("kubectl"); err != nil { + return errors.New(L("install kubectl before running this command")) + } + + namespace := flags.Kubernetes.Uyuni.Namespace + // Create the namespace if not present + if err := CreateNamespace(namespace); err != nil { + return err + } + + serverImage, err := utils.ComputeImage(flags.Image.Registry, utils.DefaultTag, flags.Image) + if err != nil { + return utils.Errorf(err, L("failed to compute image URL")) + } + + // Create a secret using SCC credentials if any are provided + pullSecret, err := kubernetes.GetRegistrySecret( + flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SCC, kubernetes.ServerApp, + ) + if err != nil { + return err + } + + // Do we have an existing deployment to upgrade? + // This can be freshly synchronized data from a migration or a running instance to upgrade. + hasDeployment := kubernetes.HasDeployment(namespace, kubernetes.ServerFilter) + + // TODO Looking for the volume is probably not safe enough: it could be prepared by the admin + // Better check the PVC as we create them in all cases. + hasDatabase := kubernetes.HasVolume(namespace, "var-pgsql") + isMigration := hasDatabase && !hasDeployment + + cocoReplicas := kubernetes.GetReplicas(namespace, CocoDeployName) + if cocoReplicas != 0 && !flags.Coco.IsChanged { + // Upgrade: detect the number of running coco replicas + flags.Coco.Replicas = cocoReplicas + } + + var inspectedData utils.ServerInspectData + if hasDatabase { + // Inspect the image and the existing volumes + data, err := kubernetes.InspectServer(namespace, serverImage, flags.Image.PullPolicy, pullSecret) + if err != nil { + return err + } + inspectedData = *data + + // Use the inspected DB port and name if not defined in the flags + if flags.Installation.DB.Port == 0 && data.DBPort != 0 { + flags.Installation.DB.Port = data.DBPort + } + + if flags.Installation.DB.Name == "" && data.DBName != "" { + flags.Installation.DB.Name = data.DBName + } + + // Do we have a running server deploy? which version is it? + // If there is no deployment / image, don't check the uyuni / SUMA upgrades + var runningData *utils.ServerInspectData + if runningImage := getRunningServerImage(namespace); runningImage != "" { + runningData, err = kubernetes.InspectServer(namespace, runningImage, "Never", pullSecret) + if err != nil { + return err + } + } + + // Run sanity checks for upgrade + if err := adm_utils.SanityCheck(runningData, &inspectedData, serverImage); err != nil { + return err + } + + // Get the fqdn from the inspected data if possible. Ignore difference with input value for now. + fqdn = inspectedData.Fqdn + + if hasDeployment { + // Scale down all deployments relying on the DB since it will be brought down during upgrade. + if cocoReplicas > 0 { + if err := kubernetes.ReplicasTo(namespace, CocoDeployName, 0); err != nil { + return utils.Errorf(err, L("cannot set confidential computing containers replicas to 0")) + } + } + + // Scale down server deployment if present to upgrade the DB + if err := kubernetes.ReplicasTo(namespace, ServerDeployName, 0); err != nil { + return utils.Errorf(err, L("cannot set server replicas to 0")) + } + } + } + + // Don't check the FQDN too early or we may not have it in case of upgrade. + if err := utils.IsValidFQDN(fqdn); err != nil { + return err + } + + mounts := GetServerMounts() + mounts = TuneMounts(mounts, &flags.Volumes) + + if err := kubernetes.CreatePersistentVolumeClaims(namespace, mounts); err != nil { + return err + } + + if hasDatabase { + oldPgVersion := inspectedData.CurrentPgVersion + newPgVersion := inspectedData.ImagePgVersion + + // Run the DB Upgrade job if needed + if oldPgVersion < newPgVersion { + jobName, err := StartDBUpgradeJob( + namespace, flags.Image.Registry, flags.Image, flags.DBUpgradeImage, pullSecret, + oldPgVersion, newPgVersion, + ) + if err != nil { + return err + } + + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to upgrade + if err := kubernetes.WaitForJob(namespace, jobName, -1); err != nil { + return err + } + } else if oldPgVersion > newPgVersion { + return fmt.Errorf( + L("downgrading database from PostgreSQL %[1]d to %[2]d is not supported"), oldPgVersion, newPgVersion) + } + + // Run DB finalization job + schemaUpdateRequired := oldPgVersion != newPgVersion + jobName, err := StartDBFinalizeJob( + namespace, serverImage, flags.Image.PullPolicy, pullSecret, schemaUpdateRequired, isMigration, + ) + if err != nil { + return err + } + + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to reindex + if err := kubernetes.WaitForJob(namespace, jobName, -1); err != nil { + return err + } + + // Run the Post Upgrade job + jobName, err = StartPostUpgradeJob(namespace, serverImage, flags.Image.PullPolicy, pullSecret) + if err != nil { + return err + } + + if err := kubernetes.WaitForJob(namespace, jobName, 60); err != nil { + return err + } + } + + // Extract some data from the cluster to guess how to configure Uyuni. + clusterInfos, err := kubernetes.CheckCluster() + if err != nil { + return err + } + + if replicas := kubernetes.GetReplicas(namespace, ServerDeployName); replicas > 0 && !flags.HubXmlrpc.IsChanged { + // Upgrade: detect the number of existing hub xmlrpc replicas + flags.HubXmlrpc.Replicas = replicas + } + needsHub := flags.HubXmlrpc.Replicas > 0 + + // Install the traefik / nginx config on the node + // This will never be done in an operator. + if err := deployNodeConfig(namespace, clusterInfos, needsHub, flags.Installation.Debug.Java); err != nil { + return err + } + + // Deploy the SSL CA and server certificates + var caIssuer string + if flags.Installation.SSL.UseExisting() { + if err := DeployExistingCertificate(flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SSL); err != nil { + return err + } + } else if !HasIssuer(namespace, kubernetes.CaIssuerName) { + // cert-manager is not required for 3rd party certificates, only if we have the CA key. + // Note that in an operator we won't be able to install cert-manager and just wait for it to be installed. + kubeconfig := clusterInfos.GetKubeconfig() + + if err := InstallCertManager(&flags.Kubernetes, kubeconfig, flags.Image.PullPolicy); err != nil { + return utils.Errorf(err, L("cannot install cert manager")) + } + + if flags.Installation.SSL.UseMigratedCa() { + // Convert CA to RSA to use in a Kubernetes TLS secret. + // In an operator we would have to fail now if there is no SSL password as we cannot prompt it. + rootCA, err := os.ReadFile(flags.Installation.SSL.Ca.Root) + if err != nil { + return utils.Errorf(err, L("failed to read Root CA file")) + } + ca := types.SSLPair{ + Key: base64.StdEncoding.EncodeToString( + ssl.GetRsaKey(flags.Installation.SSL.Ca.Key, flags.Installation.SSL.Password), + ), + Cert: base64.StdEncoding.EncodeToString(ssl.StripTextFromCertificate(string(rootCA))), + } + + // Install the cert-manager issuers + if err := DeployReusedCa(namespace, &ca); err != nil { + return err + } + } else { + if err := DeployGeneratedCa(flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SSL, fqdn); err != nil { + return err + } + } + + // Wait for issuer to be ready + if err := waitForIssuer(flags.Kubernetes.Uyuni.Namespace, kubernetes.CaIssuerName); err != nil { + return err + } + + // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret + if err := extractCaCertToConfig(flags.Kubernetes.Uyuni.Namespace); err != nil { + return err + } + caIssuer = kubernetes.CaIssuerName + } + + // Create the Ingress routes before the deployments as those are triggering + // the creation of the uyuni-cert secret from cert-manager. + if err := CreateIngress(namespace, fqdn, caIssuer, clusterInfos.Ingress); err != nil { + return err + } + + // Wait for uyuni-cert secret to be ready + kubernetes.WaitForSecret(namespace, CertSecretName) + + // Create the services + if err := CreateServices(namespace, flags.Installation.Debug.Java); err != nil { + return err + } + + // Store the DB credentials in a secret. + if flags.Installation.DB.User != "" && flags.Installation.DB.Password != "" { + if err := CreateBasicAuthSecret( + namespace, DBSecret, flags.Installation.DB.User, flags.Installation.DB.Password, + ); err != nil { + return err + } + } + + if flags.Installation.ReportDB.User != "" && flags.Installation.ReportDB.Password != "" { + if err := CreateBasicAuthSecret( + namespace, ReportdbSecret, flags.Installation.ReportDB.User, flags.Installation.ReportDB.Password, + ); err != nil { + return err + } + } + + // This SCCSecret is used to mount the env variable in the setup job and is different from the + // pullSecret as it is of a different type: basic-auth vs docker. + if flags.Installation.SCC.User != "" && flags.Installation.SCC.Password != "" { + if err := CreateBasicAuthSecret( + namespace, SCCSecret, flags.Installation.SCC.User, flags.Installation.SCC.Password, + ); err != nil { + return err + } + } + + adminSecret := "admin-credentials" + if flags.Installation.Admin.Login != "" && flags.Installation.Admin.Password != "" { + if err := CreateBasicAuthSecret( + namespace, adminSecret, flags.Installation.Admin.Login, flags.Installation.Admin.Password, + ); err != nil { + return err + } + } + + // TODO For a migration or an upgrade this needs to be skipped + // Run the setup script. + // The script will be skipped if the server has already been setup. + jobName, err := StartSetupJob( + namespace, serverImage, kubernetes.GetPullPolicy(flags.Image.PullPolicy), pullSecret, + flags.Volumes.Mirror, &flags.Installation, fqdn, adminSecret, DBSecret, ReportdbSecret, SCCSecret, + ) + if err != nil { + return err + } + + if err := kubernetes.WaitForJob(namespace, jobName, 120); err != nil { + return err + } + + if clusterInfos.Ingress == "traefik" { + // Create the Traefik routes + if err := CreateTraefikRoutes(namespace, needsHub, flags.Installation.Debug.Java); err != nil { + return err + } + } + + // Start the server + if err := CreateServerDeployment( + namespace, serverImage, flags.Image.PullPolicy, flags.Installation.TZ, flags.Installation.Debug.Java, + flags.Volumes.Mirror, pullSecret, + ); err != nil { + return err + } + + deploymentsStarting := []string{ServerDeployName} + + // Start the Coco Deployments if requested. + if replicas := kubernetes.GetReplicas(namespace, CocoDeployName); replicas != 0 && !flags.Coco.IsChanged { + // Upgrade: detect the number of running coco replicas + flags.Coco.Replicas = replicas + } + if flags.Coco.Replicas > 0 { + cocoImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.Coco.Image) + if err != nil { + return err + } + if err := StartCocoDeployment( + namespace, cocoImage, flags.Image.PullPolicy, pullSecret, flags.Coco.Replicas, + flags.Installation.DB.Port, flags.Installation.DB.Name, + ); err != nil { + return err + } + deploymentsStarting = append(deploymentsStarting, CocoDeployName) + } + + // In an operator mind, the user would just change the custom resource to enable the feature. + if needsHub { + // Install Hub API deployment, service + hubAPIImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.HubXmlrpc.Image) + if err != nil { + return err + } + if err := InstallHubAPI(namespace, hubAPIImage, flags.Image.PullPolicy, pullSecret); err != nil { + return err + } + deploymentsStarting = append(deploymentsStarting, HubAPIDeployName) + } + + // Wait for all the other deployments to be ready + if err := kubernetes.WaitForDeployments(namespace, deploymentsStarting...); err != nil { + return err + } + + return nil +} diff --git a/mgradm/shared/kubernetes/services.go b/mgradm/shared/kubernetes/services.go new file mode 100644 index 000000000..f5fb3b4a4 --- /dev/null +++ b/mgradm/shared/kubernetes/services.go @@ -0,0 +1,112 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + "strings" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// CreateServices creates the kubernetes services for the server. +// +// If debug is true, the Java debug ports will be exposed. +func CreateServices(namespace string, debug bool) error { + services := GetServices(namespace, debug) + for _, svc := range services { + if !hasCustomService(namespace, svc.ObjectMeta.Name) { + if err := kubernetes.Apply([]*core.Service{svc}, L("failed to create the service")); err != nil { + return err + } + } + } + return nil +} + +// GetServices creates the definitions of all the services of the server. +// +// If debug is true, the Java debug ports will be exposed. +func GetServices(namespace string, debug bool) []*core.Service { + ports := utils.GetServerPorts(debug) + ports = append(ports, utils.DBPorts...) + + servicesPorts := map[string][]types.PortMap{} + for _, port := range ports { + svcPorts := servicesPorts[port.Service] + if svcPorts == nil { + svcPorts = []types.PortMap{} + } + svcPorts = append(svcPorts, port) + servicesPorts[port.Service] = svcPorts + } + + services := []*core.Service{} + for _, svcPorts := range servicesPorts { + protocol := core.ProtocolTCP + if svcPorts[0].Protocol == "udp" { + protocol = core.ProtocolUDP + } + services = append(services, + getService(namespace, kubernetes.ServerApp, kubernetes.ServerComponent, svcPorts[0].Service, protocol, svcPorts...), + ) + } + return services +} + +func getService( + namespace string, + app string, + component string, + name string, + protocol core.Protocol, + ports ...types.PortMap, +) *core.Service { + // TODO make configurable to allow NodePort and maybe LoadBalancer for exposed services. + serviceType := core.ServiceTypeClusterIP + + portObjs := []core.ServicePort{} + for _, port := range ports { + portObjs = append(portObjs, core.ServicePort{ + Name: port.Name, + Port: int32(port.Exposed), + TargetPort: intstr.FromInt(port.Port), + Protocol: protocol, + }) + } + + return &core.Service{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Service"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: kubernetes.GetLabels(app, component), + }, + Spec: core.ServiceSpec{ + Ports: portObjs, + Selector: map[string]string{kubernetes.ComponentLabel: component}, + Type: serviceType, + }, + } +} + +func hasCustomService(namespace string, name string) bool { + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "svc", "-n", namespace, name, + "-l", fmt.Sprintf("%s!=%s", kubernetes.AppLabel, kubernetes.ServerApp), + "-o", "jsonpath={.items[?(@.metadata.name=='db')]}", + ) + // Custom services don't have our app label! + return err == nil && strings.TrimSpace(string(out)) != "" +} diff --git a/mgradm/shared/kubernetes/setup.go b/mgradm/shared/kubernetes/setup.go new file mode 100644 index 000000000..45c3d5c87 --- /dev/null +++ b/mgradm/shared/kubernetes/setup.go @@ -0,0 +1,219 @@ +//SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "strings" + "time" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const SetupJobName = "uyuni-setup" + +// StartSetupJob creates the job setting up the server. +func StartSetupJob( + namespace string, + image string, + pullPolicy core.PullPolicy, + pullSecret string, + mirrorPvName string, + flags *adm_utils.InstallationFlags, + fqdn string, + adminSecret string, + dbSecret string, + reportdbSecret string, + sccSecret string, +) (string, error) { + job, err := GetSetupJob( + namespace, image, pullPolicy, pullSecret, mirrorPvName, flags, fqdn, + adminSecret, dbSecret, reportdbSecret, sccSecret, + ) + if err != nil { + return "", err + } + return job.ObjectMeta.Name, kubernetes.Apply([]*batch.Job{job}, L("failed to run the setup job")) +} + +// GetSetupJob creates the job definition object for the setup. +func GetSetupJob( + namespace string, + image string, + pullPolicy core.PullPolicy, + pullSecret string, + mirrorPvName string, + flags *adm_utils.InstallationFlags, + fqdn string, + adminSecret string, + dbSecret string, + reportdbSecret string, + sccSecret string, +) (*batch.Job, error) { + var maxFailures int32 = 0 + timestamp := time.Now().Format("20060102150405") + + template := getServerPodTemplate(image, pullPolicy, flags.TZ, pullSecret) + + script, err := generateSetupScript(flags) + if err != nil { + return nil, err + } + + template.Spec.Containers[0].Name = "setup" + template.Spec.Containers[0].Command = []string{"sh", "-c", script} + template.Spec.RestartPolicy = core.RestartPolicyNever + + optional := false + + localHostValues := []string{ + "localhost", + "127.0.0.1", + "::1", + fqdn, + } + + localDB := "N" + if utils.Contains(localHostValues, flags.DB.Host) { + localDB = "Y" + } + + // The DB and ReportDB port is expected to be the standard one. + // When using an external database with a custom port the only solution is to access it using + // its IP address and a headless service with a custom EndpointSlice. + // If this is too big a constraint, we'll have to accept the port as a parameter too. + env := []core.EnvVar{ + {Name: "NO_SSL", Value: "Y"}, + {Name: "UYUNI_FQDN", Value: fqdn}, + {Name: "ADMIN_USER", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: adminSecret}, + Key: "username", + Optional: &optional, + }, + }}, + {Name: "ADMIN_PASS", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: adminSecret}, + Key: "password", + Optional: &optional, + }, + }}, + {Name: "MANAGER_USER", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: dbSecret}, + Key: "username", + Optional: &optional, + }, + }}, + {Name: "MANAGER_PASS", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: dbSecret}, + Key: "password", + Optional: &optional, + }, + }}, + {Name: "MANAGER_ADMIN_EMAIL", Value: flags.Email}, + {Name: "MANAGER_MAIL_FROM", Value: flags.EmailFrom}, + {Name: "MANAGER_ENABLE_TFTP", Value: "Y"}, + {Name: "LOCAL_DB", Value: localDB}, + {Name: "MANAGER_DB_NAME", Value: flags.DB.Name}, + {Name: "MANAGER_DB_HOST", Value: flags.DB.Host}, + {Name: "MANAGER_DB_PORT", Value: "5432"}, + {Name: "MANAGER_DB_PROTOCOL", Value: "tcp"}, + {Name: "REPORT_DB_NAME", Value: flags.ReportDB.Name}, + {Name: "REPORT_DB_HOST", Value: flags.ReportDB.Host}, + {Name: "REPORT_DB_PORT", Value: "5432"}, + {Name: "REPORT_DB_USER", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: reportdbSecret}, + Key: "username", + Optional: &optional, + }, + }}, + {Name: "REPORT_DB_PASS", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: reportdbSecret}, + Key: "password", + Optional: &optional, + }, + }}, + {Name: "ISS_PARENT", Value: flags.IssParent}, + {Name: "ACTIVATE_SLP", Value: "N"}, + // TODO EXTERNALDB_* variables are not passed yet: only for AWS and it probably doesn't make sense for kubernetes yet. + } + + if sccSecret != "" { + env = append(env, + core.EnvVar{Name: "SCC_USER", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: sccSecret}, + Key: "username", + Optional: &optional, + }, + }}, + core.EnvVar{Name: "SCC_PASS", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: sccSecret}, + Key: "password", + Optional: &optional, + }, + }}, + ) + } + + if mirrorPvName != "" { + env = append(env, core.EnvVar{Name: "MIRROR_PATH", Value: "/mirror"}) + } + template.Spec.Containers[0].Env = env + + job := batch.Job{ + TypeMeta: meta.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: SetupJobName + "-" + timestamp, + Namespace: namespace, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Spec: batch.JobSpec{ + Template: template, + BackoffLimit: &maxFailures, + }, + } + + if pullSecret != "" { + job.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + + return &job, nil +} + +func generateSetupScript(flags *adm_utils.InstallationFlags) (string, error) { + template := templates.MgrSetupScriptTemplateData{ + Env: map[string]string{}, + DebugJava: flags.Debug.Java, + OrgName: flags.Organization, + AdminLogin: "$ADMIN_USER", + AdminPassword: "$ADMIN_PASS", + AdminFirstName: flags.Admin.FirstName, + AdminLastName: flags.Admin.LastName, + AdminEmail: flags.Admin.Email, + NoSSL: true, + } + + // Prepare the script + scriptBuilder := new(strings.Builder) + if err := template.Render(scriptBuilder); err != nil { + return "", utils.Errorf(err, L("failed to render setup script")) + } + return scriptBuilder.String(), nil +} diff --git a/mgradm/shared/kubernetes/traefik.go b/mgradm/shared/kubernetes/traefik.go new file mode 100644 index 000000000..7577369fb --- /dev/null +++ b/mgradm/shared/kubernetes/traefik.go @@ -0,0 +1,127 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + "html/template" + "io" + "os" + "path" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + + . "github.com/uyuni-project/uyuni-tools/shared/l10n" +) + +// CreateTraefikRoutes creates the routes and middleware wiring the traefik endpoints to their service. +func CreateTraefikRoutes(namespace string, hub bool, debug bool) error { + routeTemplate := template.Must(template.New("ingressRoute").Parse(ingressRouteTemplate)) + + tempDir, cleaner, err := utils.TempDir() + if err != nil { + return err + } + defer cleaner() + + filePath := path.Join(tempDir, "routes.yaml") + file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700) + if err != nil { + return utils.Errorf(err, L("failed to open %s for writing"), filePath) + } + defer file.Close() + + // Write the SSL Redirect middleware + _, err = file.WriteString(fmt.Sprintf(` +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: uyuni-https-redirect + namespace: "%s" + labels: + %s: %s +spec: + redirectScheme: + scheme: https + permanent: true +`, namespace, kubernetes.AppLabel, kubernetes.ServerApp)) + if err != nil { + return utils.Errorf(err, L("failed to write traefik middleware and routes to file")) + } + + // Write the routes from the endpoint to the services + for _, endpoint := range getPortList(hub, debug) { + _, err := file.WriteString("---\n") + if err != nil { + return utils.Errorf(err, L("failed to write traefik middleware and routes to file")) + } + if err := getTraefixRoute(routeTemplate, file, namespace, endpoint); err != nil { + return err + } + } + if err := file.Close(); err != nil { + return utils.Errorf(err, L("failed to close traefik middleware and routes file")) + } + + if _, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "apply", "-f", filePath); err != nil { + return utils.Errorf(err, L("failed to create traefik middleware and routes")) + } + return nil +} + +func getTraefixRoute(t *template.Template, writer io.Writer, namespace string, endpoint types.PortMap) error { + endpointName := kubernetes.GetTraefikEndpointName(endpoint) + protocol := "TCP" + if endpoint.Protocol == "udp" { + protocol = "UDP" + } + + data := routeData{ + Name: endpointName + "-route", + Namespace: namespace, + EndPoint: endpointName, + Service: endpoint.Service, + Port: endpoint.Exposed, + Protocol: protocol, + } + if err := t.Execute(writer, data); err != nil { + return utils.Errorf(err, L("failed to write traefik routes to file")) + } + return nil +} + +type routeData struct { + Name string + Namespace string + EndPoint string + Service string + Port int + Protocol string +} + +const ingressRouteTemplate = ` +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute{{ .Protocol }} +metadata: + name: {{ .Name }} + namespace: "{{ .Namespace }}" + labels: + ` + kubernetes.AppLabel + ": " + kubernetes.ServerApp + ` +spec: + entryPoints: + - {{ .EndPoint }} + routes: + - services: + - name: {{ .Service }} + port: {{ .Port }} +{{- if eq .Protocol "TCP" }} + match: ` + "HostSNI(`*`)" + ` +{{- end }} +` diff --git a/mgradm/shared/kubernetes/traefik_test.go b/mgradm/shared/kubernetes/traefik_test.go new file mode 100644 index 000000000..ba9eee3b8 --- /dev/null +++ b/mgradm/shared/kubernetes/traefik_test.go @@ -0,0 +1,82 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "bytes" + "html/template" + "testing" + + "github.com/uyuni-project/uyuni-tools/shared/testutils" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +func TestGetTraefikRouteTCP(t *testing.T) { + routeTemplate := template.Must(template.New("ingressRoute").Parse(ingressRouteTemplate)) + + var buf bytes.Buffer + err := getTraefixRoute(routeTemplate, &buf, "foo", utils.NewPortMap("svcname", "port1", 123, 456)) + if err != nil { + t.Errorf("Unexpected error: %s", err) + } + + actual := buf.String() + expected := ` +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: svcname-port1-route + namespace: "foo" + labels: + app.kubernetes.io/part-of: uyuni +spec: + entryPoints: + - svcname-port1 + routes: + - services: + - name: svcname + port: 123 + match: ` + "HostSNI(`*`)\n" + testutils.AssertEquals(t, "Wrong traefik route generated", expected, actual) +} + +func TestGetTraefikRouteUDP(t *testing.T) { + routeTemplate := template.Must(template.New("ingressRoute").Parse(ingressRouteTemplate)) + + var buf bytes.Buffer + err := getTraefixRoute(routeTemplate, &buf, "foo", + types.PortMap{ + Service: "svcname", + Name: "port1", + Exposed: 123, + Port: 456, + Protocol: "udp", + }) + if err != nil { + t.Errorf("Unexpected error: %s", err) + } + + actual := buf.String() + expected := ` +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteUDP +metadata: + name: svcname-port1-route + namespace: "foo" + labels: + app.kubernetes.io/part-of: uyuni +spec: + entryPoints: + - svcname-port1 + routes: + - services: + - name: svcname + port: 123 +` + testutils.AssertEquals(t, "Wrong traefik route generated", expected, actual) +} diff --git a/mgradm/shared/podman/podman.go b/mgradm/shared/podman/podman.go index 73a6d2adc..ebd2f69df 100644 --- a/mgradm/shared/podman/podman.go +++ b/mgradm/shared/podman/podman.go @@ -29,18 +29,9 @@ import ( // GetExposedPorts returns the port exposed. func GetExposedPorts(debug bool) []types.PortMap { - ports := []types.PortMap{ - utils.NewPortMap("https", 443, 443), - utils.NewPortMap("http", 80, 80), - } - ports = append(ports, utils.TCPPorts...) + ports := utils.GetServerPorts(debug) + ports = append(ports, utils.NewPortMap(utils.WebServiceName, "https", 443, 443)) ports = append(ports, utils.TCPPodmanPorts...) - ports = append(ports, utils.UDPPorts...) - - if debug { - ports = append(ports, utils.DebugPorts...) - } - return ports } @@ -56,7 +47,7 @@ func GenerateServerSystemdService(mirrorPath string, debug bool) error { ports := GetExposedPorts(debug) if _, err := exec.LookPath("csp-billing-adapter"); err == nil { - ports = append(ports, utils.NewPortMap("csp-billing", 18888, 18888)) + ports = append(ports, utils.NewPortMap("csp", "csp-billing", 18888, 18888)) args = append(args, "-e ISPAYG=1") } @@ -287,9 +278,7 @@ func RunPgsqlVersionUpgrade( log.Info().Msgf(L("Using database upgrade image %s"), preparedImage) - pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript( - scriptDir, oldPgsql, newPgsql, false, - ) + pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql) if err != nil { return utils.Errorf(err, L("cannot generate PostgreSQL database version upgrade script")) } @@ -383,9 +372,16 @@ func Upgrade( return utils.Errorf(err, L("cannot inspect podman values")) } - cnx := shared.NewConnection("podman", podman.ServerContainerName, "") + runningImage := podman.GetServiceImage(podman.ServerService) + var runningData *utils.ServerInspectData + if runningImage != "" { + runningData, err = Inspect(runningImage) + if err != nil { + return err + } + } - if err := adm_utils.SanityCheck(cnx, inspectedValues, preparedImage); err != nil { + if err := adm_utils.SanityCheck(runningData, inspectedValues, preparedImage); err != nil { return err } diff --git a/mgradm/shared/templates/issuerTemplate.go b/mgradm/shared/templates/issuerTemplate.go index 733c9531c..cddb86958 100644 --- a/mgradm/shared/templates/issuerTemplate.go +++ b/mgradm/shared/templates/issuerTemplate.go @@ -7,34 +7,18 @@ package templates import ( "io" "text/template" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" ) // Deploy self-signed issuer or CA Certificate and key. -const issuerTemplate = ` -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .Namespace }} - labels: - name: {{ .Namespace }} ---- -{{if and .Certificate .Key -}} -apiVersion: v1 -kind: Secret -type: kubernetes.io/tls -metadata: - name: uyuni-ca - namespace: {{ .Namespace }} -data: - ca.crt: {{ .RootCa }} - tls.crt: {{ .Certificate }} - tls.key: {{ .Key }} -{{- else }} -apiVersion: cert-manager.io/v1 +const generatedCaIssuerTemplate = `apiVersion: cert-manager.io/v1 kind: Issuer metadata: name: uyuni-issuer namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` spec: selfSigned: {} --- @@ -43,6 +27,8 @@ kind: Certificate metadata: name: uyuni-ca namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` spec: isCA: true {{- if or .Country .State .City .Org .OrgUnit }} @@ -78,36 +64,23 @@ spec: name: uyuni-issuer kind: Issuer group: cert-manager.io -{{- end }} --- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: uyuni-ca-issuer - namespace: {{ .Namespace }} -spec: - ca: - secretName: - uyuni-ca ` -// IssuerTemplateData represents information used to create issuer file. -type IssuerTemplateData struct { - Namespace string - Country string - State string - City string - Org string - OrgUnit string - Email string - Fqdn string - RootCa string - Certificate string - Key string +// GeneratedCaIssuerTemplateData is a template to render cert-manager issuers for a generated self-signed CA. +type GeneratedCaIssuerTemplateData struct { + Namespace string + Country string + State string + City string + Org string + OrgUnit string + Email string + Fqdn string } // Render creates issuer file. -func (data IssuerTemplateData) Render(wr io.Writer) error { - t := template.Must(template.New("issuer").Parse(issuerTemplate)) +func (data GeneratedCaIssuerTemplateData) Render(wr io.Writer) error { + t := template.Must(template.New("issuer").Parse(generatedCaIssuerTemplate + uyuniCaIssuer)) return t.Execute(wr, data) } diff --git a/mgradm/shared/templates/mgrSetupScriptTemplate.go b/mgradm/shared/templates/mgrSetupScriptTemplate.go index 2c17b5707..4909f12fe 100644 --- a/mgradm/shared/templates/mgrSetupScriptTemplate.go +++ b/mgradm/shared/templates/mgrSetupScriptTemplate.go @@ -11,6 +11,11 @@ import ( //nolint:lll const mgrSetupScriptTemplate = `#!/bin/sh +if test -e /root/.MANAGER_SETUP_COMPLETE; then + echo "Server appears to be already configured. Installation options may be ignored." + exit 0 +fi + {{- range $name, $value := .Env }} export {{ $name }}='{{ $value }}' {{- end }} @@ -24,15 +29,60 @@ echo 'JAVA_OPTS=" $JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=*:800 /usr/lib/susemanager/bin/mgr-setup -s -n RESULT=$? -# clean before leaving -rm $0 +# The CA needs to be added to the database for Kickstart use. +/usr/bin/rhn-ssl-dbstore --ca-cert=/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT + +echo "starting tomcat..." +(su -s /usr/bin/sh -g tomcat -G www -G susemanager tomcat /usr/lib/tomcat/server start)& + +echo "starting apache2..." +/usr/sbin/start_apache2 -k start + +if test -n "{{ .AdminPassword }}"; then + echo "Creating first user..." + {{ if .NoSSL }} + CURL_SCHEME="http" + {{ else }} + CURL_SCHEME="-k https" + {{ end }} + + curl -o /tmp/curl-retry -s --retry 7 $CURL_SCHEME://localhost/rhn/newlogin/CreateFirstUser.do + + HTTP_CODE=$(curl -o /dev/null -s -w %{http_code} $CURL_SCHEME://localhost/rhn/newlogin/CreateFirstUser.do) + if test "$HTTP_CODE" == "200"; then + echo "Creating administration user" + curl -s -o /tmp/curl_out \ + -d "orgName={{ .OrgName }}" \ + -d "adminLogin={{ .AdminLogin }}" \ + -d "adminPassword={{ .AdminPassword }}" \ + -d "firstName={{ .AdminFirstName }}" \ + -d "lastName={{ .AdminLastName }}" \ + -d "email={{ .AdminEmail }}" \ + $CURL_SCHEME://localhost/rhn/manager/api/org/createFirst + if ! grep -q '^{"success":true' /tmp/curl_out ; then + echo "Failed to create the administration user" + cat /tmp/curl_out + fi + rm -f /tmp/curl_out + elif test "$HTTP_CODE" == "403"; then + echo "Administration user already exists, reusing" + fi +fi + exit $RESULT ` // MgrSetupScriptTemplateData represents information used to create setup script. type MgrSetupScriptTemplateData struct { - Env map[string]string - DebugJava bool + Env map[string]string + NoSSL bool + DebugJava bool + AdminPassword string + AdminLogin string + AdminFirstName string + AdminLastName string + AdminEmail string + OrgName string } // Render will create setup script. diff --git a/mgradm/shared/templates/migrateScriptTemplate.go b/mgradm/shared/templates/migrateScriptTemplate.go index 67abc912a..fbdf3c86b 100644 --- a/mgradm/shared/templates/migrateScriptTemplate.go +++ b/mgradm/shared/templates/migrateScriptTemplate.go @@ -126,8 +126,10 @@ echo "Extracting time zone..." $SSH {{ .SourceFqdn }} timedatectl show -p Timezone >/var/lib/uyuni-tools/data echo "Extracting postgresql versions..." -echo "image_pg_version=$(rpm -qa --qf '%{VERSION}\n' 'name=postgresql[0-8][0-9]-server' | cut -d. -f1 | sort -n | tail -1)" >> /var/lib/uyuni-tools/data -echo "current_pg_version=$(cat /var/lib/pgsql/data/PG_VERSION)" >> /var/lib/uyuni-tools/data +image_pg_version=$(rpm -qa --qf '%{VERSION}\n' 'name=postgresql[0-8][0-9]-server' | cut -d. -f1 | sort -n | tail -1) +current_pg_version=$(cat /var/lib/pgsql/data/PG_VERSION) +echo "image_pg_version=$image_pg_version" >> /var/lib/uyuni-tools/data +echo "current_pg_version=$current_pg_version" >> /var/lib/uyuni-tools/data grep '^db_user' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data grep '^db_password' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data @@ -135,6 +137,7 @@ grep '^db_name' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data grep '^db_port' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data $SSH {{ .SourceFqdn }} sh -c "systemctl list-unit-files | grep hub-xmlrpc-api | grep -q active && echo has_hubxmlrpc=true || echo has_hubxmlrpc=false" >>/var/lib/uyuni-tools/data +(test $($SSH {{ .SourceFqdn }} grep jdwp -r /etc/tomcat/conf.d/ /etc/rhn/taskomatic.conf | wc -l) -gt 0 && echo debug=true || echo debug=false) >>/var/lib/uyuni-tools/data echo "Altering configuration for domain resolution..." sed 's/report_db_host = {{ .SourceFqdn }}/report_db_host = localhost/' -i /etc/rhn/rhn.conf; @@ -151,6 +154,9 @@ sed 's/--add-modules java.annotation,com.sun.xml.bind://' -i /etc/tomcat/conf.d/ sed 's/-XX:-UseConcMarkSweepGC//' -i /etc/tomcat/conf.d/* test -f /etc/tomcat/conf.d/remote_debug.conf && sed 's/address=[^:]*:/address=*:/' -i /etc/tomcat/conf.d/remote_debug.conf +# Alter rhn.conf to ensure mirror is set to /mirror if set at all +sed 's/server.susemanager.fromdir =.*/server.susemanager.fromdir = \/mirror/' -i /etc/rhn/rhn.conf + {{ if .Kubernetes }} echo 'server.no_ssl = 1' >> /etc/rhn/rhn.conf; echo "Extracting SSL certificate and authority" @@ -180,7 +186,6 @@ if test "extractedSSL" != "1"; then # For third party certificates, the CA chain is in the certificate file. rsync -e "$SSH" --rsync-path='sudo rsync' -avz {{ .SourceFqdn }}:/etc/pki/tls/private/spacewalk.key /var/lib/uyuni-tools/ rsync -e "$SSH" --rsync-path='sudo rsync' -avz {{ .SourceFqdn }}:/etc/pki/tls/certs/spacewalk.crt /var/lib/uyuni-tools/ - fi echo "Removing useless ssl-build folder..." diff --git a/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go b/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go index 39c63e740..723a6a5f8 100644 --- a/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go +++ b/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go @@ -13,6 +13,10 @@ import ( const postgresFinalizeScriptTemplate = `#!/bin/bash set -e +echo "Temporarily disable SSL in the posgresql configuration" +cp /var/lib/pgsql/data/postgresql.conf /var/lib/pgsql/data/postgresql.conf.bak +sed 's/^ssl/#ssl/' -i /var/lib/pgsql/data/postgresql.conf + {{ if .Migration }} echo "Adding database access for other containers..." db_user=$(sed -n '/^db_user/{s/^.*=[ \t]\+\(.*\)$/\1/ ; p}' /etc/rhn/rhn.conf) @@ -26,7 +30,7 @@ echo "Running smdba system-check autotuning..." smdba system-check autotuning {{ end }} echo "Starting Postgresql..." -su -s /bin/bash - postgres -c "/usr/share/postgresql/postgresql-script start" +HOME=/var/lib/pgsql PG_DATA=/var/lib/pgsql/data su -s /bin/bash -p postgres -c "/usr/share/postgresql/postgresql-script start" {{ if .RunReindex }} echo "Reindexing database. This may take a while, please do not cancel it!" database=$(sed -n "s/^\s*db_name\s*=\s*\([^ ]*\)\s*$/\1/p" /etc/rhn/rhn.conf) @@ -46,7 +50,6 @@ UPDATE rhnKickstartableTree SET base_path = CONCAT('/srv/www/distributions/', ta from dist_map WHERE dist_map.base_path = rhnKickstartableTree.base_path; DROP TABLE dist_map; EOT -{{ end }} echo "Schedule a system list update task..." spacewalk-sql --select-mode - </dev/null # Load locale for SUSE PGHOME=$(getent passwd postgres | cut -d ":" -f6) -#. $PGHOME/.i18n 2>/dev/null # Load locale for Enterprise Linux if [ -z $POSTGRES_LANG ]; then POSTGRES_LANG="en_US.UTF-8" [ ! -z $LC_CTYPE ] && POSTGRES_LANG=$LC_CTYPE @@ -45,8 +49,16 @@ echo "Running initdb using postgres user" echo "Any suggested command from the console should be run using postgres user" su -s /bin/bash - postgres -c "initdb -D /var/lib/pgsql/data --locale=$POSTGRES_LANG" echo "Successfully initialized new postgresql $NEW_VERSION database." + +echo "Temporarily disable SSL in the old posgresql configuration" +cp /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf.bak +sed 's/^ssl/#ssl/' -i /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf + su -s /bin/bash - postgres -c "pg_upgrade --old-bindir=/usr/lib/postgresql$OLD_VERSION/bin --new-bindir=/usr/lib/postgresql$NEW_VERSION/bin --old-datadir=/var/lib/pgsql/data-pg$OLD_VERSION --new-datadir=/var/lib/pgsql/data $FAST_UPGRADE" +echo "Enable SSL again" +cp /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf.bak /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf + cp /var/lib/pgsql/data-pg$OLD_VERSION/pg_hba.conf /var/lib/pgsql/data mv /var/lib/pgsql/data-pg$OLD_VERSION/pg_hba.conf /var/lib/pgsql/data-pg$OLD_VERSION/pg_hba.conf.migrated cp /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf /var/lib/pgsql/data/ @@ -58,7 +70,6 @@ echo "DONE"` type PostgreSQLVersionUpgradeTemplateData struct { OldVersion string NewVersion string - Kubernetes bool } // Render will create PostgreSQL upgrade script. diff --git a/mgradm/shared/templates/reusedCaIssuerTemplate.go b/mgradm/shared/templates/reusedCaIssuerTemplate.go new file mode 100644 index 000000000..b4f3edfa2 --- /dev/null +++ b/mgradm/shared/templates/reusedCaIssuerTemplate.go @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package templates + +import ( + "io" + "text/template" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" +) + +const uyuniCaIssuer = `apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: ` + kubernetes.CaIssuerName + ` + namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` +spec: + ca: + secretName: uyuni-ca +` + +const reusedCaIssuerTemplate = `apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: uyuni-ca + namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` +data: + ca.crt: {{ .Certificate }} + tls.crt: {{ .Certificate }} + tls.key: {{ .Key }} +--- +` + +// ReusedCaIssuerTemplateData is a template to render cert-manager issuer from an existing root CA. +type ReusedCaIssuerTemplateData struct { + Namespace string + Certificate string + Key string +} + +// Render creates issuer file. +func (data ReusedCaIssuerTemplateData) Render(wr io.Writer) error { + t := template.Must(template.New("issuer").Parse(reusedCaIssuerTemplate + uyuniCaIssuer)) + return t.Execute(wr, data) +} diff --git a/mgradm/shared/templates/tlsSecret.go b/mgradm/shared/templates/tlsSecret.go index bd4ecd6d0..934ae5a62 100644 --- a/mgradm/shared/templates/tlsSecret.go +++ b/mgradm/shared/templates/tlsSecret.go @@ -7,6 +7,8 @@ package templates import ( "io" "text/template" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" ) // Deploy self-signed issuer or CA Certificate and key. @@ -16,6 +18,8 @@ type: kubernetes.io/tls metadata: name: {{ .Name }} namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` data: ca.crt: {{ .RootCa }} tls.crt: {{ .Certificate }} diff --git a/mgradm/shared/utils/cmd_utils.go b/mgradm/shared/utils/cmd_utils.go index c8d617120..eeb806c9d 100644 --- a/mgradm/shared/utils/cmd_utils.go +++ b/mgradm/shared/utils/cmd_utils.go @@ -18,7 +18,12 @@ var defaultImage = path.Join(utils.DefaultRegistry, "server") // UseExisting return true if existing SSL Cert can be used. func (f *InstallSSLFlags) UseExisting() bool { - return f.Server.Cert != "" && f.Server.Key != "" && f.Ca.Root != "" + return f.Server.Cert != "" && f.Server.Key != "" && f.Ca.Root != "" && f.Ca.Key == "" +} + +// UseMigratedCa returns true if a migrated CA and key can be used. +func (f *InstallSSLFlags) UseMigratedCa() bool { + return f.Ca.Root != "" && f.Ca.Key != "" } // CheckParameters checks that all the required flags are passed if using 3rd party certificates. @@ -30,30 +35,57 @@ func (f *InstallSSLFlags) CheckParameters() { // AddHelmInstallFlag add Helm install flags to a command. func AddHelmInstallFlag(cmd *cobra.Command) { - defaultChart := fmt.Sprintf("oci://%s/server-helm", utils.DefaultHelmRegistry) - - cmd.Flags().String("helm-uyuni-namespace", "default", L("Kubernetes namespace where to install uyuni")) - cmd.Flags().String("helm-uyuni-chart", defaultChart, L("URL to the uyuni helm chart")) - cmd.Flags().String("helm-uyuni-version", "", L("Version of the uyuni helm chart")) - cmd.Flags().String("helm-uyuni-values", "", L("Path to a values YAML file to use for Uyuni helm install")) - cmd.Flags().String("helm-certmanager-namespace", "cert-manager", + cmd.Flags().String("kubernetes-uyuni-namespace", "default", L("Kubernetes namespace where to install uyuni")) + cmd.Flags().String("kubernetes-certmanager-namespace", "cert-manager", L("Kubernetes namespace where to install cert-manager"), ) - cmd.Flags().String("helm-certmanager-chart", "", + cmd.Flags().String("kubernetes-certmanager-chart", "", L("URL to the cert-manager helm chart. To be used for offline installations"), ) - cmd.Flags().String("helm-certmanager-version", "", L("Version of the cert-manager helm chart")) - cmd.Flags().String("helm-certmanager-values", "", L("Path to a values YAML file to use for cert-manager helm install")) + cmd.Flags().String("kubernetes-certmanager-version", "", L("Version of the cert-manager helm chart")) + cmd.Flags().String("kubernetes-certmanager-values", "", + L("Path to a values YAML file to use for cert-manager helm install"), + ) _ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "helm", Title: L("Helm Chart Flags")}) - _ = utils.AddFlagToHelpGroupID(cmd, "helm-uyuni-namespace", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-uyuni-chart", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-uyuni-version", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-uyuni-values", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-namespace", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-chart", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-version", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-values", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-uyuni-namespace", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-namespace", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-chart", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-version", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-values", "helm") +} + +const volumesFlagsGroupID = "volumes" + +// AddVolumesFlags adds the Kubernetes volumes configuration parameters to the command. +func AddVolumesFlags(cmd *cobra.Command) { + cmd.Flags().String("volumes-class", "", L("Default storage class for all the volumes")) + cmd.Flags().String("volumes-mirror", "", + L("PersistentVolume name to use as a mirror. Empty means no mirror is used"), + ) + + _ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: volumesFlagsGroupID, Title: L("Volumes Configuration Flags")}) + _ = utils.AddFlagToHelpGroupID(cmd, "volumes-class", volumesFlagsGroupID) + _ = utils.AddFlagToHelpGroupID(cmd, "volumes-mirror", volumesFlagsGroupID) + + addVolumeFlags(cmd, "database", "var-pgsql", "50Gi") + addVolumeFlags(cmd, "packages", "var-spacewalk", "100Gi") + addVolumeFlags(cmd, "www", "srv-www", "100Gi") + addVolumeFlags(cmd, "cache", "var-cache", "10Gi") +} + +func addVolumeFlags(cmd *cobra.Command, name string, volumeName string, size string) { + sizeName := fmt.Sprintf("volumes-%s-size", name) + cmd.Flags().String( + sizeName, size, fmt.Sprintf(L("Requested size for the %s volume"), volumeName), + ) + _ = utils.AddFlagToHelpGroupID(cmd, sizeName, volumesFlagsGroupID) + + className := fmt.Sprintf("volumes-%s-class", name) + cmd.Flags().String( + className, "", fmt.Sprintf(L("Requested storage class for the %s volume"), volumeName), + ) + _ = utils.AddFlagToHelpGroupID(cmd, className, volumesFlagsGroupID) } // AddContainerImageFlags add container image flags to command. diff --git a/mgradm/shared/utils/exec.go b/mgradm/shared/utils/exec.go index 93dad92e0..8ea2130b5 100644 --- a/mgradm/shared/utils/exec.go +++ b/mgradm/shared/utils/exec.go @@ -15,7 +15,6 @@ import ( "github.com/rs/zerolog/log" "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" "github.com/uyuni-project/uyuni-tools/shared" - "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/utils" ) @@ -56,12 +55,10 @@ func GeneratePgsqlVersionUpgradeScript( scriptDir string, oldPgVersion string, newPgVersion string, - kubernetes bool, ) (string, error) { data := templates.PostgreSQLVersionUpgradeTemplateData{ OldVersion: oldPgVersion, NewVersion: newPgVersion, - Kubernetes: kubernetes, } scriptName := "pgsqlVersionUpgrade.sh" @@ -137,118 +134,70 @@ func GenerateMigrationScript(sourceFqdn string, user string, kubernetes bool, pr return scriptDir, cleaner, nil } -// RunningImage returns the image running in the current system. -func RunningImage(cnx *shared.Connection) (string, error) { - command, err := cnx.GetCommand() - - switch command { - case "podman": - args := []string{"ps", "--format", "{{.Image}}", "--noheading"} - image, err := utils.RunCmdOutput(zerolog.DebugLevel, "podman", args...) - if err != nil { - return "", err - } - return strings.Trim(string(image), "\n"), nil - - case "kubectl": - - // FIXME this will work until containers 0 is uyuni. Then jsonpath should be something like - // {.items[0].spec.containers[?(@.name=="` + containerName + `")].image but there are problems - // using RunCmdOutput with an arguments with round brackets - args := []string{"get", "pods", kubernetes.ServerFilter, "-o", "jsonpath={.items[0].spec.containers[0].image}"} - image, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", args...) - - log.Info().Msgf(L("Image is: %s"), image) - if err != nil { - return "", err - } - return strings.Trim(string(image), "\n"), nil - } - - return command, err -} - // SanityCheck verifies if an upgrade can be run. -func SanityCheck(cnx *shared.Connection, inspectedValues *utils.ServerInspectData, serverImage string) error { - isUyuni, err := isUyuni(cnx) - if err != nil { - return utils.Errorf(err, L("cannot check server release")) - } - isUyuniImage := inspectedValues.UyuniRelease != "" - isSumaImage := inspectedValues.SuseManagerRelease != "" - - if isUyuni && isSumaImage { - return fmt.Errorf( - L("currently SUSE Manager %s is installed, instead the image is Uyuni. Upgrade is not supported"), - inspectedValues.SuseManagerRelease, - ) - } - - if !isUyuni && isUyuniImage { - return fmt.Errorf( - L("currently Uyuni %s is installed, instead the image is SUSE Manager. Upgrade is not supported"), - inspectedValues.UyuniRelease, - ) - } - - if isUyuni { - cnxArgs := []string{"s/Uyuni release //g", "/etc/uyuni-release"} - currentUyuniRelease, err := cnx.Exec("sed", cnxArgs...) - if err != nil { - return utils.Errorf(err, L("failed to read current uyuni release")) - } - log.Debug().Msgf("Current release is %s", string(currentUyuniRelease)) - if !isUyuniImage { - return fmt.Errorf(L("cannot fetch release from image %s"), serverImage) - } - log.Debug().Msgf("Image %s is %s", serverImage, inspectedValues.UyuniRelease) - if utils.CompareVersion(inspectedValues.UyuniRelease, string(currentUyuniRelease)) < 0 { +func SanityCheck( + runningValues *utils.ServerInspectData, + inspectedValues *utils.ServerInspectData, + serverImage string, +) error { + // Skip the uyuni / SUSE Manager release checks if the runningValues is nil. + if runningValues != nil { + isUyuni := runningValues.UyuniRelease != "" + isUyuniImage := inspectedValues.UyuniRelease != "" + isSumaImage := inspectedValues.SuseManagerRelease != "" + + if isUyuni && isSumaImage { return fmt.Errorf( - L("cannot downgrade from version %[1]s to %[2]s"), - string(currentUyuniRelease), inspectedValues.UyuniRelease, + L("currently SUSE Manager %s is installed, instead the image is Uyuni. Upgrade is not supported"), + inspectedValues.SuseManagerRelease, ) } - } else { - bCurrentSuseManagerRelease, err := cnx.Exec("sed", "s/.*(\\([0-9.]*\\)).*/\\1/g", "/etc/susemanager-release") - currentSuseManagerRelease := strings.TrimSuffix(string(bCurrentSuseManagerRelease), "\n") - if err != nil { - return utils.Errorf(err, L("failed to read current susemanager release")) - } - log.Debug().Msgf("Current release is %s", currentSuseManagerRelease) - if !isSumaImage { - return fmt.Errorf(L("cannot fetch release from image %s"), serverImage) - } - log.Debug().Msgf("Image %s is %s", serverImage, inspectedValues.SuseManagerRelease) - if utils.CompareVersion(inspectedValues.SuseManagerRelease, currentSuseManagerRelease) < 0 { + + if !isUyuni && isUyuniImage { return fmt.Errorf( - L("cannot downgrade from version %[1]s to %[2]s"), - currentSuseManagerRelease, inspectedValues.SuseManagerRelease, + L("currently Uyuni %s is installed, instead the image is SUSE Manager. Upgrade is not supported"), + inspectedValues.UyuniRelease, ) } + + if isUyuni { + currentUyuniRelease := runningValues.UyuniRelease + log.Debug().Msgf("Current release is %s", string(currentUyuniRelease)) + if !isUyuniImage { + return fmt.Errorf(L("cannot fetch release from image %s"), serverImage) + } + log.Debug().Msgf("Image %s is %s", serverImage, inspectedValues.UyuniRelease) + if utils.CompareVersion(inspectedValues.UyuniRelease, string(currentUyuniRelease)) < 0 { + return fmt.Errorf( + L("cannot downgrade from version %[1]s to %[2]s"), + string(currentUyuniRelease), inspectedValues.UyuniRelease, + ) + } + } else { + currentSuseManagerRelease := runningValues.SuseManagerRelease + log.Debug().Msgf("Current release is %s", currentSuseManagerRelease) + if !isSumaImage { + return fmt.Errorf(L("cannot fetch release from image %s"), serverImage) + } + log.Debug().Msgf("Image %s is %s", serverImage, inspectedValues.SuseManagerRelease) + if utils.CompareVersion(inspectedValues.SuseManagerRelease, currentSuseManagerRelease) < 0 { + return fmt.Errorf( + L("cannot downgrade from version %[1]s to %[2]s"), + currentSuseManagerRelease, inspectedValues.SuseManagerRelease, + ) + } + } } + // Perform PostgreSQL version checks. if inspectedValues.ImagePgVersion == "" { - return fmt.Errorf(L("cannot fetch postgresql version from %s"), serverImage) + return fmt.Errorf(L("cannot fetch PostgreSQL version from %s"), serverImage) } log.Debug().Msgf("Image %s has PostgreSQL %s", serverImage, inspectedValues.ImagePgVersion) if inspectedValues.CurrentPgVersion == "" { - return errors.New(L("posgresql is not installed in the current deployment")) + return errors.New(L("PostgreSQL is not installed in the current deployment")) } log.Debug().Msgf("Current deployment has PostgreSQL %s", inspectedValues.CurrentPgVersion) return nil } - -func isUyuni(cnx *shared.Connection) (bool, error) { - cnxArgs := []string{"/etc/uyuni-release"} - _, err := cnx.Exec("cat", cnxArgs...) - if err != nil { - cnxArgs := []string{"/etc/susemanager-release"} - _, err := cnx.Exec("cat", cnxArgs...) - if err != nil { - return false, errors.New(L("cannot find either /etc/uyuni-release or /etc/susemanagere-release")) - } - return false, nil - } - return true, nil -} diff --git a/mgradm/shared/utils/exec_test.go b/mgradm/shared/utils/exec_test.go new file mode 100644 index 000000000..b54f431d6 --- /dev/null +++ b/mgradm/shared/utils/exec_test.go @@ -0,0 +1,82 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "fmt" + "strings" + "testing" + + "github.com/uyuni-project/uyuni-tools/shared/testutils" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +func TestSanityCheck(t *testing.T) { + type dataType struct { + oldUyuniRelease string + oldSumaRelease string + oldPgsqlVersion string + newUyuniRelease string + newSumaRelease string + newPgsqlVersion string + errorPart string + } + data := []dataType{ + {"2024.07", "", "16", "2024.13", "", "17", ""}, + {"", "5.0.1", "16", "", "5.1.0", "17", ""}, + { + "2024.13", "", "17", "2024.07", "", "16", + "cannot downgrade", + }, + { + "", "5.1.0", "17", "", "5.0.1", "16", + "cannot downgrade", + }, + { + "2024.07", "", "16", "", "5.1.0", "17", + "Upgrade is not supported", + }, + { + "", "5.1.0", "17", "2024.07", "", "16", + "Upgrade is not supported", + }, + { + "2024.07", "", "16", "2024.13", "", "", + "cannot fetch PostgreSQL", + }, + { + "2024.07", "", "", "2024.13", "", "17", + "PostgreSQL is not installed", + }, + } + + for i, test := range data { + runningValues := utils.ServerInspectData{ + UyuniRelease: test.oldUyuniRelease, + SuseManagerRelease: test.oldSumaRelease, + } + newValues := utils.ServerInspectData{ + CommonInspectData: utils.CommonInspectData{ + CurrentPgVersion: test.oldPgsqlVersion, + ImagePgVersion: test.newPgsqlVersion, + }, + UyuniRelease: test.newUyuniRelease, + SuseManagerRelease: test.newSumaRelease, + } + err := SanityCheck(&runningValues, &newValues, "path/to/image") + if test.errorPart != "" { + if err != nil { + testutils.AssertTrue( + t, fmt.Sprintf("test %d: Unexpected error message: %s", i+1, err), + strings.Contains(err.Error(), test.errorPart), + ) + } else { + t.Errorf("test %d: expected an error, got none", i+1) + } + } else { + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected error", i+1), nil, err) + } + } +} diff --git a/mgradm/shared/utils/flags.go b/mgradm/shared/utils/flags.go new file mode 100644 index 000000000..dee0770a0 --- /dev/null +++ b/mgradm/shared/utils/flags.go @@ -0,0 +1,138 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "fmt" + "net/mail" + "regexp" + "strings" + + "github.com/spf13/cobra" + apiTypes "github.com/uyuni-project/uyuni-tools/shared/api/types" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// ServerFlags is a structure hosting the parameters for installation, migration and upgrade. +type ServerFlags struct { + Image types.ImageFlags `mapstructure:",squash"` + Coco CocoFlags + Mirror string + HubXmlrpc HubXmlrpcFlags + Migration MigrationFlags `mapstructure:",squash"` + Installation InstallationFlags `mapstructure:",squash"` + // DBUpgradeImage is the image to use to perform the database upgrade. + DBUpgradeImage types.ImageFlags `mapstructure:"dbupgrade"` +} + +// MigrationFlags contains the parameters that are used only for migration. +type MigrationFlags struct { + // Prepare defines whether to run the full migration or just the data synchronization. + Prepare bool + // SourceUser is the username to use to connect to the source server in a migration. + User string +} + +// InstallationFlags contains the parameters that are used only for the installation of a new server. +type InstallationFlags struct { + TZ string + Email string + EmailFrom string + IssParent string + Tftp bool + DB DBFlags + ReportDB DBFlags + SSL InstallSSLFlags + SCC types.SCCCredentials + Debug DebugFlags + Admin apiTypes.User + Organization string +} + +// CheckParameters checks parameters for install command. +func (flags *InstallationFlags) CheckParameters(cmd *cobra.Command, command string) { + if flags.DB.Password == "" { + flags.DB.Password = utils.GetRandomBase64(30) + } + + if flags.ReportDB.Password == "" { + flags.ReportDB.Password = utils.GetRandomBase64(30) + } + + // Make sure we have all the required 3rd party flags or none + flags.SSL.CheckParameters() + + // Since we use cert-manager for self-signed certificates on kubernetes we don't need password for it + if !flags.SSL.UseExisting() && command == "podman" { + utils.AskPasswordIfMissing(&flags.SSL.Password, cmd.Flag("ssl-password").Usage, 0, 0) + } + + // Use the host timezone if the user didn't define one + if flags.TZ == "" { + flags.TZ = utils.GetLocalTimezone() + } + + utils.AskIfMissing(&flags.Email, cmd.Flag("email").Usage, 1, 128, emailChecker) + utils.AskIfMissing(&flags.EmailFrom, cmd.Flag("emailfrom").Usage, 0, 0, emailChecker) + + utils.AskIfMissing(&flags.Admin.Login, cmd.Flag("admin-login").Usage, 1, 64, idChecker) + utils.AskPasswordIfMissing(&flags.Admin.Password, cmd.Flag("admin-password").Usage, 5, 48) + utils.AskIfMissing(&flags.Organization, cmd.Flag("organization").Usage, 3, 128, nil) + + flags.SSL.Email = flags.Email + flags.Admin.Email = flags.Email +} + +// DBFlags can store all values required to connect to a database. +type DBFlags struct { + Host string + Name string + Port int + User string + Password string + Protocol string + Provider string + Admin struct { + User string + Password string + } +} + +// DebugFlags contains information about enabled/disabled debug. +type DebugFlags struct { + Java bool +} + +// idChecker verifies that the value is a valid identifier. +func idChecker(value string) bool { + r := regexp.MustCompile(`^([[:alnum:]]|[._-])+$`) + if r.MatchString(value) { + return true + } + fmt.Println(L("Can only contain letters, digits . _ and -")) + return false +} + +// emailChecker verifies that the value is a valid email address. +func emailChecker(value string) bool { + address, err := mail.ParseAddress(value) + if err != nil || address.Name != "" || strings.ContainsAny(value, "<>") { + fmt.Println(L("Not a valid email address")) + return false + } + return true +} + +// SSHFlags is the structure holding the SSH configuration to use to connect to the source server to migrate. +type SSHFlags struct { + Key struct { + Public string + Private string + } + Knownhosts string + Config string +} diff --git a/mgradm/cmd/install/shared/flags_test.go b/mgradm/shared/utils/flags_test.go similarity index 98% rename from mgradm/cmd/install/shared/flags_test.go rename to mgradm/shared/utils/flags_test.go index 2ee7d6d30..9f43f5c9b 100644 --- a/mgradm/cmd/install/shared/flags_test.go +++ b/mgradm/shared/utils/flags_test.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package shared +package utils import "testing" diff --git a/mgradm/shared/utils/types.go b/mgradm/shared/utils/types.go index b8c71bbea..77abb1cd2 100644 --- a/mgradm/shared/utils/types.go +++ b/mgradm/shared/utils/types.go @@ -15,8 +15,8 @@ type InstallSSLFlags struct { Server types.SSLPair } -// HelmFlags stores Uyuni and Cert Manager Helm information. -type HelmFlags struct { +// KubernetesFlags stores Uyuni and Cert Manager kubernetes specific parameters. +type KubernetesFlags struct { Uyuni types.ChartFlags CertManager types.ChartFlags } @@ -34,3 +34,28 @@ type CocoFlags struct { Image types.ImageFlags `mapstructure:",squash"` IsChanged bool } + +// VolumeFlags stores the persistent volume claims configuration. +type VolumesFlags struct { + // Class is the default storage class for all the persistent volume claims. + Class string + // Database is the configuration of the var-pgsql volume. + Database VolumeFlags + // Packages is the configuration of the var-spacewalk volume containing the synchronizede repositories. + Packages VolumeFlags + // Www is the configuration of the srv-www volume containing the imags and distributions. + Www VolumeFlags + // Cache is the configuration of the var-cache volume. + Cache VolumeFlags + // Mirror is the PersistentVolume name to use in case of a mirror setup. + // An empty value means no mirror will be used. + Mirror string +} + +// VolumeFlags is the configuration of one volume. +type VolumeFlags struct { + // Size is the requested size of the volume using kubernetes values like '100Gi'. + Size string + // Class is the storage class of the volume. + Class string +} diff --git a/mgrpxy/cmd/install/kubernetes/utils.go b/mgrpxy/cmd/install/kubernetes/utils.go index 3d966ffc4..c65332986 100644 --- a/mgrpxy/cmd/install/kubernetes/utils.go +++ b/mgrpxy/cmd/install/kubernetes/utils.go @@ -49,15 +49,20 @@ func installForKubernetes(_ *types.GlobalFlags, // If installing on k3s, install the traefik helm config in manifests isK3s := clusterInfos.IsK3s() IsRke2 := clusterInfos.IsRke2() + ports := shared_utils.GetProxyPorts() if isK3s { - shared_kubernetes.InstallK3sTraefikConfig(shared_utils.ProxyTCPPorts, shared_utils.UDPPorts) + err = shared_kubernetes.InstallK3sTraefikConfig(ports) } else if IsRke2 { - shared_kubernetes.InstallRke2NginxConfig(shared_utils.ProxyTCPPorts, shared_utils.UDPPorts, - flags.Helm.Proxy.Namespace) + err = shared_kubernetes.InstallRke2NginxConfig(ports, flags.Helm.Proxy.Namespace) + } + if err != nil { + return err } helmArgs := []string{"--set", "ingress=" + clusterInfos.Ingress} - helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Proxy.Namespace, &flags.SCC) + helmArgs, err = shared_kubernetes.AddSCCSecret( + helmArgs, flags.Helm.Proxy.Namespace, &flags.SCC, shared_kubernetes.ProxyApp, + ) if err != nil { return err } diff --git a/mgrpxy/shared/kubernetes/deploy.go b/mgrpxy/shared/kubernetes/deploy.go index 4118932b6..2a037bcf4 100644 --- a/mgrpxy/shared/kubernetes/deploy.go +++ b/mgrpxy/shared/kubernetes/deploy.go @@ -86,7 +86,7 @@ func Deploy(imageFlags *utils.ProxyImageFlags, helmFlags *HelmFlags, configDir s "--set", "images.proxy-tftpd="+imageFlags.GetContainerImage("tftpd"), "--set", "repository="+imageFlags.Registry, "--set", "version="+imageFlags.Tag, - "--set", "pullPolicy="+kubernetes.GetPullPolicy(imageFlags.PullPolicy)) + "--set", "pullPolicy="+string(kubernetes.GetPullPolicy(imageFlags.PullPolicy))) helmParams = append(helmParams, helmArgs...) @@ -97,7 +97,7 @@ func Deploy(imageFlags *utils.ProxyImageFlags, helmFlags *HelmFlags, configDir s } // Wait for the pod to be started - return kubernetes.WaitForDeployment(helmFlags.Proxy.Namespace, helmAppName, "uyuni-proxy") + return kubernetes.WaitForDeployments(helmFlags.Proxy.Namespace, helmAppName) } func getSSHYaml(directory string) (string, error) { diff --git a/mgrpxy/shared/podman/podman.go b/mgrpxy/shared/podman/podman.go index d64c39951..56c0f6108 100644 --- a/mgrpxy/shared/podman/podman.go +++ b/mgrpxy/shared/podman/podman.go @@ -53,7 +53,7 @@ func GenerateSystemdService( ports := []types.PortMap{} ports = append(ports, shared_utils.ProxyTCPPorts...) ports = append(ports, shared_utils.ProxyPodmanPorts...) - ports = append(ports, shared_utils.UDPPorts...) + ports = append(ports, shared_utils.TftpPorts...) // Pod dataPod := templates.PodTemplateData{ diff --git a/shared/connection.go b/shared/connection.go index b28e6bd2a..c55939812 100644 --- a/shared/connection.go +++ b/shared/connection.go @@ -6,7 +6,6 @@ package shared import ( "bytes" - "encoding/json" "errors" "fmt" "os" @@ -73,8 +72,8 @@ func (c *Connection) GetCommand() (string, error) { if err == nil { hasKubectl = true if out, err := utils.RunCmdOutput( - zerolog.DebugLevel, "kubectl", "--request-timeout=30s", "get", "pod", c.kubernetesFilter, "-A", - "-o=jsonpath={.items[*].metadata.name}", + zerolog.DebugLevel, "kubectl", "--request-timeout=30s", "get", "deploy", c.kubernetesFilter, + "-A", "-o=jsonpath={.items[*].metadata.name}", ); err != nil { log.Info().Msg(L("kubectl not configured to connect to a cluster, ignoring")) } else if len(bytes.TrimSpace(out)) != 0 { @@ -124,7 +123,7 @@ func (c *Connection) GetCommand() (string, error) { // GetNamespace finds the namespace of the running pod // appName is the name of the application to look for, if not provided it will be guessed based on the filter. // filters are additional filters to use to find the pod. -func (c *Connection) GetNamespace(appName string, filters ...string) (string, error) { +func (c *Connection) GetNamespace(appName string) (string, error) { // skip if namespace is already set if c.namespace != "" { return c.namespace, nil @@ -154,23 +153,17 @@ func (c *Connection) GetNamespace(appName string, filters ...string) (string, er } } - // retrieving namespace from helm release - clusterInfos, clusterInfosErr := kubernetes.CheckCluster() - if clusterInfosErr != nil { - return "", utils.Errorf(clusterInfosErr, L("failed to discover the cluster type")) - } - - kubeconfig := clusterInfos.GetKubeconfig() - if !kubernetes.HasHelmRelease(appName, kubeconfig) { - return "", fmt.Errorf(L("no %s helm release installed on the cluster"), appName) - } - - var namespaceErr error - c.namespace, namespaceErr = extractNamespaceFromConfig(appName, kubeconfig, filters...) - if namespaceErr != nil { - return "", utils.Errorf(namespaceErr, L("failed to find the %s deployment namespace"), appName) + // retrieving namespace from the first installed object we can find matching the filter. + // This assumes that the server or proxy has been installed only in one namespace + // with the current cluster credentials. + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "all", "-A", c.kubernetesFilter, + "-o", "jsonpath={.items[*].metadata.namespace}", + ) + if err != nil { + return "", utils.Errorf(err, L("failed to guest namespace")) } - + c.namespace = strings.TrimSpace(strings.Split(string(out), " ")[0]) return c.namespace, nil } @@ -384,6 +377,11 @@ func (c *Connection) TestExistenceInPod(dstpath string) bool { case "podman": commandArgs = append(commandArgs, "test", "-e", dstpath) case "kubectl": + namespace, err := c.GetNamespace("") + if err != nil { + log.Fatal().Err(err).Msg(L("failed to detect the namespace")) + } + commandArgs = append(commandArgs, "-n", namespace) commandArgs = append(commandArgs, "-c", "uyuni", "test", "-e", dstpath) default: log.Fatal().Msgf(L("unknown container kind: %s"), command) @@ -524,33 +522,3 @@ func (c *Connection) RunSupportConfig(tmpDir string) ([]string, error) { } return files, nil } - -// extractNamespaceFromConfig extracts the namespace of a given application -// from the Helm release information. -func extractNamespaceFromConfig(appName string, kubeconfig string, filters ...string) (string, error) { - args := []string{} - if kubeconfig != "" { - args = append(args, "--kubeconfig", kubeconfig) - } - args = append(args, "list", "-aA", "-f", appName, "-o", "json") - args = append(args, filters...) - - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "helm", args...) - if err != nil { - return "", utils.Errorf(err, L("failed to detect %s's namespace using helm"), appName) - } - - var data []releaseInfo - if err = json.Unmarshal(out, &data); err != nil { - return "", utils.Errorf(err, L("helm provided an invalid JSON output")) - } - - if len(data) == 1 { - return data[0].Namespace, nil - } - return "", errors.New(L("found no or more than one deployment")) -} - -type releaseInfo struct { - Namespace string `mapstructure:"namespace"` -} diff --git a/shared/kubernetes/apply.go b/shared/kubernetes/apply.go new file mode 100644 index 000000000..e36bb6666 --- /dev/null +++ b/shared/kubernetes/apply.go @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "os" + "path" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/printers" +) + +// Apply runs kubectl apply for the provided objects. +// +// The message should be a user-friendly localized message to provide in case of error. +func Apply[T runtime.Object](objects []T, message string) error { + tempDir, cleaner, err := utils.TempDir() + if err != nil { + return err + } + defer cleaner() + + // Run the job + definitionPath := path.Join(tempDir, "definition.yaml") + if err := YamlFile(objects, definitionPath); err != nil { + return err + } + + if err := utils.RunCmdStdMapping(zerolog.DebugLevel, "kubectl", "apply", "-f", definitionPath); err != nil { + return utils.Errorf(err, message) + } + return nil +} + +// YamlFile generates a YAML file from a list of kubernetes objects. +func YamlFile[T runtime.Object](objects []T, path string) error { + printer := printers.YAMLPrinter{} + file, err := os.Create(path) + if err != nil { + return utils.Errorf(err, L("failed to create %s YAML file"), path) + } + defer func() { + if err := file.Close(); err != nil { + log.Error().Err(err).Msgf(L("failed to close %s YAML file"), path) + } + }() + + for _, obj := range objects { + err = printer.PrintObj(obj, file) + if err != nil { + return utils.Errorf(err, L("failed to write PVC to file")) + } + } + + return nil +} diff --git a/shared/kubernetes/converters.go b/shared/kubernetes/converters.go new file mode 100644 index 000000000..946931b77 --- /dev/null +++ b/shared/kubernetes/converters.go @@ -0,0 +1,43 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/types" + core "k8s.io/api/core/v1" +) + +// ConvertVolumeMounts converts the internal volume mounts into Kubernetes' ones. +func ConvertVolumeMounts(mounts []types.VolumeMount) []core.VolumeMount { + res := []core.VolumeMount{} + + for _, mount := range mounts { + converted := core.VolumeMount{ + Name: mount.Name, + MountPath: mount.MountPath, + } + res = append(res, converted) + } + + return res +} + +// ConvertPortMaps converts the internal port maps to Kubernetes ContainerPorts. +func ConvertPortMaps(ports []types.PortMap) []core.ContainerPort { + res := []core.ContainerPort{} + + for _, port := range ports { + protocol := core.ProtocolTCP + if port.Protocol == "udp" { + protocol = core.ProtocolUDP + } + converted := core.ContainerPort{ + ContainerPort: int32(port.Exposed), + Protocol: protocol, + } + res = append(res, converted) + } + return res +} diff --git a/shared/kubernetes/deploy.go b/shared/kubernetes/deploy.go new file mode 100644 index 000000000..a69489942 --- /dev/null +++ b/shared/kubernetes/deploy.go @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "strconv" + "strings" + + "github.com/rs/zerolog" +) + +// HasDeployment returns true when a deployment matching the kubectl get filter is existing in the namespace. +func HasDeployment(namespace string, filter string) bool { + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "deploy", "-n", namespace, filter, "-o", "name") + if err == nil && strings.TrimSpace(string(out)) != "" { + return true + } + return false +} + +// GetReplicas return the number of replicas of a deployment. +// +// If no such deployment exists, 0 will be returned as if there was a deployment scaled down to 0. +func GetReplicas(namespace string, name string) int { + out, err := runCmdOutput(zerolog.DebugLevel, + "kubectl", "get", "deploy", "-n", namespace, name, "-o", "jsonpath={.status.replicas}", + ) + if err != nil { + return 0 + } + replicas, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + return 0 + } + return replicas +} diff --git a/shared/kubernetes/deploy_test.go b/shared/kubernetes/deploy_test.go new file mode 100644 index 000000000..8e925b2ea --- /dev/null +++ b/shared/kubernetes/deploy_test.go @@ -0,0 +1,58 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "errors" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestHasDeployment(t *testing.T) { + type dataType struct { + out string + err error + expected bool + } + + data := []dataType{ + {"deployment.apps/traefik\n", nil, true}, + {"\n", nil, false}, + {"Some error", errors.New("Some error"), false}, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i+1), test.expected, + HasDeployment("kube-system", "-lapp.kubernetes.io/name=traefik"), + ) + } +} + +func TestGetReplicas(t *testing.T) { + type dataType struct { + out string + err error + expected int + } + data := []dataType{ + {"2\n", nil, 2}, + {"no such deploy\n", errors.New("No such deploy"), 0}, + {"invalid output\n", nil, 0}, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i+1), + test.expected, GetReplicas("uyuni", "uyuni-hub-api")) + } +} diff --git a/shared/kubernetes/inspect.go b/shared/kubernetes/inspect.go new file mode 100644 index 000000000..b168faa50 --- /dev/null +++ b/shared/kubernetes/inspect.go @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// InspectServer check values on a given image and deploy. +func InspectServer( + namespace string, + serverImage string, + pullPolicy string, + pullSecret string, +) (*utils.ServerInspectData, error) { + podName := "uyuni-image-inspector" + + tempDir, cleaner, err := utils.TempDir() + if err != nil { + return nil, err + } + defer cleaner() + inspector := utils.NewServerInspector(tempDir) + // We need the inspector to write to the pod's logs instead of a file + inspector.DataPath = "/dev/stdout" + script, err := inspector.GenerateScriptString() + if err != nil { + return nil, err + } + + out, err := RunPodLogs( + namespace, podName, serverImage, pullPolicy, pullSecret, + []types.VolumeMount{utils.EtcRhnVolumeMount, utils.VarPgsqlVolumeMount}, + "sh", "-c", script, + ) + if err != nil { + return nil, err + } + + // Parse the data + inspectedData, err := utils.ReadInspectDataString[utils.ServerInspectData]([]byte(out)) + if err != nil { + return nil, utils.Errorf(err, L("failed to parse the inspected data")) + } + return inspectedData, nil +} diff --git a/shared/kubernetes/job.go b/shared/kubernetes/job.go new file mode 100644 index 000000000..05a3a73e9 --- /dev/null +++ b/shared/kubernetes/job.go @@ -0,0 +1,78 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "strings" + "time" + + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetScriptJob prepares the definition of a kubernetes job running a shell script from a template. +// The name is suffixed with a time stamp to avoid collisions. +func GetScriptJob( + namespace string, + name string, + image string, + pullPolicy string, + pullSecret string, + mounts []types.VolumeMount, + template utils.Template, +) (*batch.Job, error) { + var maxFailures int32 + + // Convert our mounts to Kubernetes objects + volumeMounts := ConvertVolumeMounts(mounts) + volumes := CreateVolumes(mounts) + + // Prepare the script + scriptBuilder := new(strings.Builder) + if err := template.Render(scriptBuilder); err != nil { + return nil, err + } + + timestamp := time.Now().Format("20060102150405") + + // Create the job object running the script wrapped as a sh command + job := batch.Job{ + TypeMeta: meta.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: name + "-" + timestamp, + Namespace: namespace, + Labels: GetLabels(ServerApp, ""), + }, + Spec: batch.JobSpec{ + Template: core.PodTemplateSpec{ + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "runner", + Image: image, + ImagePullPolicy: GetPullPolicy(pullPolicy), + Command: []string{"sh", "-c", scriptBuilder.String()}, + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + RestartPolicy: core.RestartPolicyNever, + }, + }, + BackoffLimit: &maxFailures, + }, + } + + if pullSecret != "" { + job.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + + return &job, nil +} diff --git a/shared/kubernetes/k3s.go b/shared/kubernetes/k3s.go index c394415b3..c1ff3a480 100644 --- a/shared/kubernetes/k3s.go +++ b/shared/kubernetes/k3s.go @@ -5,10 +5,11 @@ package kubernetes import ( + "errors" "fmt" "os" - "os/exec" - "path" + "regexp" + "strconv" "time" "github.com/rs/zerolog" @@ -18,36 +19,76 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/utils" ) -const k3sTraefikConfigPath = "/var/lib/rancher/k3s/server/manifests/k3s-traefik-config.yaml" +const k3sTraefikConfigPath = "/var/lib/rancher/k3s/server/manifests/uyuni-traefik-config.yaml" +const k3sTraefikMainConfigPath = "/var/lib/rancher/k3s/server/manifests/traefik.yaml" // InstallK3sTraefikConfig install K3s Traefik configuration. -func InstallK3sTraefikConfig(tcpPorts []types.PortMap, udpPorts []types.PortMap) { +func InstallK3sTraefikConfig(ports []types.PortMap) error { log.Info().Msg(L("Installing K3s Traefik configuration")) + endpoints := []types.PortMap{} + for _, port := range ports { + port.Name = GetTraefikEndpointName(port) + endpoints = append(endpoints, port) + } + version, err := getTraefikChartMajorVersion() + if err != nil { + return err + } + data := K3sTraefikConfigTemplateData{ - TCPPorts: tcpPorts, - UDPPorts: udpPorts, + Ports: endpoints, + ExposeBoolean: version < 27, } - if err := utils.WriteTemplateToFile(data, k3sTraefikConfigPath, 0600, false); err != nil { - log.Fatal().Err(err).Msgf(L("Failed to write K3s Traefik configuration")) + if err := utils.WriteTemplateToFile(data, k3sTraefikConfigPath, 0600, true); err != nil { + return utils.Errorf(err, L("Failed to write Traefik configuration")) } // Wait for traefik to be back - waitForTraefik() + return waitForTraefik() +} + +// GetTraefikEndpointName computes the traefik endpoint name from the service and port names. +// Those names should be less than 15 characters long. +func GetTraefikEndpointName(portmap types.PortMap) string { + svc := shortenName(portmap.Service) + name := shortenName(portmap.Name) + if name != svc { + return fmt.Sprintf("%s-%s", svc, name) + } + return name } -func waitForTraefik() { +func shortenName(name string) string { + shorteningMap := map[string]string{ + "taskomatic": "tasko", + "metrics": "mtrx", + "postgresql": "pgsql", + "exporter": "xport", + "uyuni-proxy-tcp": "uyuni", + "uyuni-proxy-udp": "uyuni", + } + short := shorteningMap[name] + if short == "" { + short = name + } + return short +} + +func waitForTraefik() error { log.Info().Msg(L("Waiting for Traefik to be reloaded")) - for i := 0; i < 60; i++ { + for i := 0; i < 120; i++ { out, err := utils.RunCmdOutput(zerolog.TraceLevel, "kubectl", "get", "job", "-n", "kube-system", "-o", "jsonpath={.status.completionTime}", "helm-install-traefik") if err == nil { completionTime, err := time.Parse(time.RFC3339, string(out)) if err == nil && time.Since(completionTime).Seconds() < 60 { - break + return nil } } + time.Sleep(1 * time.Second) } + return errors.New(L("Failed to reload Traefik")) } // UninstallK3sTraefikConfig uninstall K3s Traefik configuration. @@ -60,7 +101,9 @@ func UninstallK3sTraefikConfig(dryRun bool) { log.Error().Err(err).Msg(L("failed to write empty traefik configuration")) } else { // Wait for traefik to be back - waitForTraefik() + if err := waitForTraefik(); err != nil { + log.Error().Err(err).Msg(L("failed to uninstall traefik configuration")) + } } } else { log.Info().Msg(L("Would reinstall Traefik without additionnal configuration")) @@ -70,72 +113,23 @@ func UninstallK3sTraefikConfig(dryRun bool) { utils.UninstallFile(k3sTraefikConfigPath, dryRun) } -// InspectKubernetes check values on a given image and deploy. -func InspectKubernetes(namespace string, serverImage string, pullPolicy string) (*utils.ServerInspectData, error) { - for _, binary := range []string{"kubectl", "helm"} { - if _, err := exec.LookPath(binary); err != nil { - return nil, fmt.Errorf(L("install %s before running this command"), binary) - } - } - - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return nil, err - } - defer cleaner() - - inspector := utils.NewServerInspector(scriptDir) - if err := inspector.GenerateScript(); err != nil { - return nil, err - } - - command := path.Join(utils.InspectContainerDirectory, utils.InspectScriptFilename) - - const podName = "inspector" - - // delete pending pod and then check the node, because in presence of more than a pod GetNode return is wrong - if err := DeletePod(namespace, podName, ServerFilter); err != nil { - return nil, utils.Errorf(err, L("cannot delete %s"), podName) - } - - // this is needed because folder with script needs to be mounted - nodeName, err := GetNode(namespace, ServerFilter) +func getTraefikChartMajorVersion() (int, error) { + out, err := os.ReadFile(k3sTraefikMainConfigPath) if err != nil { - return nil, utils.Errorf(err, L("cannot find node running uyuni")) + return 0, utils.Errorf(err, L("failed to read the traefik configuration")) } - - // generate deploy data - deployData := types.Deployment{ - APIVersion: "v1", - Spec: &types.Spec{ - RestartPolicy: "Never", - NodeName: nodeName, - Containers: []types.Container{ - { - Name: podName, - VolumeMounts: append(utils.PgsqlRequiredVolumeMounts, - types.VolumeMount{MountPath: "/var/lib/uyuni-tools", Name: "var-lib-uyuni-tools"}), - Image: serverImage, - }, - }, - Volumes: append(utils.PgsqlRequiredVolumes, - types.Volume{Name: "var-lib-uyuni-tools", HostPath: &types.HostPath{Path: scriptDir, Type: "Directory"}}), - }, + matches := regexp.MustCompile(`traefik-([0-9]+)`).FindStringSubmatch(string(out)) + if matches == nil { + return 0, errors.New(L("traefik configuration file doesn't contain the helm chart version")) } - // transform deploy data in JSON - override, err := GenerateOverrideDeployment(deployData) - if err != nil { - return nil, err - } - err = RunPod(namespace, podName, ServerFilter, serverImage, pullPolicy, command, override) - if err != nil { - return nil, utils.Errorf(err, L("cannot run inspect pod")) + if len(matches) != 2 { + return 0, errors.New(L("failed to find traefik helm chart version")) } - inspectResult, err := inspector.ReadInspectData() + majorVersion, err := strconv.Atoi(matches[1]) if err != nil { - return nil, utils.Errorf(err, L("cannot inspect data")) + return 0, utils.Errorf(err, L("")) } - return inspectResult, err + return majorVersion, nil } diff --git a/shared/kubernetes/k3sTraefikTemplate.go b/shared/kubernetes/k3sTraefikTemplate.go index ac94f6acf..4528d20d8 100644 --- a/shared/kubernetes/k3sTraefikTemplate.go +++ b/shared/kubernetes/k3sTraefikTemplate.go @@ -19,26 +19,29 @@ metadata: spec: valuesContent: |- ports: -{{- range .TCPPorts }} - {{ .Name }}: - port: {{ .Port }} - expose: true - exposedPort: {{ .Exposed }} - protocol: TCP -{{- end }} -{{- range .UDPPorts }} +{{- range .Ports }} {{ .Name }}: port: {{ .Port }} + {{- if $.ExposeBoolean }} expose: true + {{- else }} + expose: + default: true + {{- end }} exposedPort: {{ .Exposed }} + {{- if eq .Protocol "udp" }} protocol: UDP + {{- else }} + protocol: TCP + {{- end }} {{- end }} ` // K3sTraefikConfigTemplateData represents information used to create K3s Traefik helm chart. type K3sTraefikConfigTemplateData struct { - TCPPorts []types.PortMap - UDPPorts []types.PortMap + Ports []types.PortMap + // Set to true before traefik chart v27 + ExposeBoolean bool } // Render will create the helm chart configuation for K3sTraefik. diff --git a/shared/kubernetes/k3s_test.go b/shared/kubernetes/k3s_test.go new file mode 100644 index 000000000..a7d0ed1a3 --- /dev/null +++ b/shared/kubernetes/k3s_test.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "testing" + + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// Test that the generated endpoints are valid for traefik. +func Test_GetTraefikEndpointName(t *testing.T) { + ports := utils.GetServerPorts(true) + ports = append(ports, utils.HubXmlrpcPorts...) + ports = append(ports, utils.GetProxyPorts()...) + + for _, port := range ports { + actual := GetTraefikEndpointName(port) + // Traefik would fail if the name is longer than 15 characters + if len(actual) > 15 { + t.Errorf("Traefik endpoint name has more than 15 characters: %s", actual) + } + } +} diff --git a/shared/kubernetes/kubernetes.go b/shared/kubernetes/kubernetes.go index 6a8a6e7d4..168d4665e 100644 --- a/shared/kubernetes/kubernetes.go +++ b/shared/kubernetes/kubernetes.go @@ -8,7 +8,6 @@ import ( "encoding/base64" "fmt" "os" - "path" "strings" "github.com/rs/zerolog" @@ -16,6 +15,9 @@ import ( . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // ClusterInfos represent cluster information. @@ -101,7 +103,7 @@ func Restart(namespace string, app string) error { // Start starts the pod. func Start(namespace string, app string) error { // if something is running, we don't need to set replicas to 1 - if _, err := GetNode(namespace, "-lapp="+app); err != nil { + if _, err := GetNode(namespace, "-l"+AppLabel+"="+app); err != nil { return ReplicasTo(namespace, app, 1) } log.Debug().Msgf("Already running") @@ -154,7 +156,14 @@ func GetSecret(secretName string, filter string) (string, error) { } // createDockerSecret creates a secret of docker type to authenticate registries. -func createDockerSecret(namespace string, name string, registry string, username string, password string) error { +func createDockerSecret( + namespace string, + name string, + registry string, + username string, + password string, + appLabel string, +) error { authString := fmt.Sprintf("%s:%s", username, password) auth := base64.StdEncoding.EncodeToString([]byte(authString)) configjson := fmt.Sprintf( @@ -162,47 +171,51 @@ func createDockerSecret(namespace string, name string, registry string, username registry, username, password, auth, ) - secret := fmt.Sprintf(` -apiVersion: v1 -kind: Secret -type: kubernetes.io/dockerconfigjson -metadata: - namespace: %s - name: %s -data: - .dockerconfigjson: %s -`, namespace, name, base64.StdEncoding.EncodeToString([]byte(configjson))) - - tempDir, cleaner, err := utils.TempDir() - if err != nil { - return err - } - defer cleaner() + secret := core.Secret{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: GetLabels(appLabel, ""), + }, + // It seems serializing this object automatically transforms the secrets to base64. + Data: map[string][]byte{ + ".dockerconfigjson": []byte(configjson), + }, + Type: core.SecretTypeDockerConfigJson, + } + return Apply([]runtime.Object{&secret}, fmt.Sprintf(L("failed to create the %s docker secret"), name)) +} - // Run the job - definitionPath := path.Join(tempDir, "definition.yaml") - if err := os.WriteFile(definitionPath, []byte(secret), 0600); err != nil { - return utils.Errorf(err, L("failed to write %s secret definition file"), name) +// AddSccSecret creates a secret holding the SCC credentials and adds it to the helm args. +func AddSCCSecret(helmArgs []string, namespace string, scc *types.SCCCredentials, appLabel string) ([]string, error) { + secret, err := GetRegistrySecret(namespace, scc, appLabel) + if secret != "" { + helmArgs = append(helmArgs, secret) } + return helmArgs, err +} + +// GetRegistrySecret creates a docker secret holding the SCC credentials and returns the secret name. +func GetRegistrySecret(namespace string, scc *types.SCCCredentials, appLabel string) (string, error) { + const secretName = "registry-credentials" - if err := utils.RunCmdStdMapping(zerolog.DebugLevel, "kubectl", "apply", "-f", definitionPath); err != nil { - return utils.Errorf(err, L("failed to define %s secret"), name) + // Return the existing secret if any. + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "-n", namespace, "secret", secretName, "-o", "name") + if err == nil && strings.TrimSpace(string(out)) != "" { + return secretName, nil } - return nil -} -// AddSCCSecret creates a secret holding the SCC credentials and adds it to the helm args. -func AddSCCSecret(helmArgs []string, namespace string, scc *types.SCCCredentials) ([]string, error) { + // Create the secret if SCC user and password are passed. if scc.User != "" && scc.Password != "" { - secretName := "scc-credentials" if err := createDockerSecret( - namespace, secretName, "registry.suse.com", scc.User, scc.Password, + namespace, secretName, "registry.suse.com", scc.User, scc.Password, appLabel, ); err != nil { - return helmArgs, err + return "", err } - helmArgs = append(helmArgs, "--set", "registrySecret="+secretName) + return secretName, nil } - return helmArgs, nil + return "", nil } // GetDeploymentImagePullSecret returns the name of the image pull secret of a deployment. @@ -218,3 +231,11 @@ func GetDeploymentImagePullSecret(namespace string, filter string) (string, erro return strings.TrimSpace(string(out)), nil } + +// HasResource checks if a resource is available on the cluster. +func HasResource(name string) bool { + if err := utils.RunCmd("kubectl", "explain", name); err != nil { + return false + } + return true +} diff --git a/shared/kubernetes/pod.go b/shared/kubernetes/pod.go new file mode 100644 index 000000000..f047e7671 --- /dev/null +++ b/shared/kubernetes/pod.go @@ -0,0 +1,103 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + "path" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// RunPodLogs runs a pod, waits for it to finish and returns it logs. +// +// This should be used only to run very fast tasks. +func RunPodLogs( + namespace string, + name string, + image string, + pullPolicy string, + pullSecret string, + volumesMounts []types.VolumeMount, + cmd ...string, +) ([]byte, error) { + // Read the file from the volume from a container into stdout + mounts := ConvertVolumeMounts(volumesMounts) + volumes := CreateVolumes(volumesMounts) + + // Use a pod here since this is a very simple task reading out a file from a volume + pod := core.Pod{ + TypeMeta: meta.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{"app": name}, + }, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: name, + Image: image, + ImagePullPolicy: GetPullPolicy(pullPolicy), + Command: cmd, + VolumeMounts: mounts, + }, + }, + Volumes: volumes, + RestartPolicy: core.RestartPolicyNever, + }, + } + + if pullSecret != "" { + pod.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + + tempDir, cleaner, err := utils.TempDir() + if err != nil { + return nil, err + } + defer cleaner() + + // Run the pod + podPath := path.Join(tempDir, "pod.yaml") + if err := YamlFile([]runtime.Object{&pod}, podPath); err != nil { + return nil, err + } + + if err := utils.RunCmd("kubectl", "apply", "-f", podPath); err != nil { + return nil, utils.Errorf(err, L("failed to run the %s pod"), name) + } + if err := Apply( + []runtime.Object{&pod}, fmt.Sprintf(L("failed to run the %s pod"), name), + ); err != nil { + return nil, err + } + + if err := WaitForPod(namespace, name, 60); err != nil { + return nil, err + } + + data, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "logs", "-n", namespace, name) + if err != nil { + return nil, utils.Errorf(err, L("failed to get the %s pod logs"), name) + } + + defer func() { + if err := DeletePod(namespace, name, "-lapp="+name); err != nil { + log.Err(err).Msgf(L("failed to delete the %s pod"), name) + } + }() + + return data, nil +} diff --git a/shared/kubernetes/pvc.go b/shared/kubernetes/pvc.go new file mode 100644 index 000000000..b14736ccd --- /dev/null +++ b/shared/kubernetes/pvc.go @@ -0,0 +1,275 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// CreatePersistentVolumeClaims creates all the PVCs described by the mounts. +func CreatePersistentVolumeClaims( + namespace string, + mounts []types.VolumeMount, +) error { + pvcs := GetPersistentVolumeClaims( + namespace, + "", + core.ReadWriteOnce, + false, + GetLabels(ServerApp, ""), + mounts, + ) + + for _, pvc := range pvcs { + if !hasPersistentVolumeClaim(pvc.ObjectMeta.Namespace, pvc.ObjectMeta.Name) { + if err := Apply( + []*core.PersistentVolumeClaim{pvc}, + fmt.Sprintf(L("failed to create %s persistent volume claim"), pvc.ObjectMeta.Name), + ); err != nil { + return err + } + } + } + return nil +} + +func hasPersistentVolumeClaim(namespace string, name string) bool { + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "pvc", "-n", namespace, name, "-o", "name") + return err == nil && strings.TrimSpace(string(out)) != "" +} + +// Contains the data extracted from the PV to create the linked PVC for it. +type pvData struct { + ClaimRef struct { + Name string + Namespace string + } + StorageClass string + AccessModes []core.PersistentVolumeAccessMode + Size string +} + +// CreatePersistentVolumeClaimForVolume creates a PVC bound to a specific Volume. +func CreatePersistentVolumeClaimForVolume( + namespace string, + volumeName string, +) error { + // Get the PV Storage class and claimRef + out, err := utils.RunCmdOutput(zerolog.DebugLevel, + "kubectl", "get", "pv", volumeName, "-n", namespace, + "-o", `jsonpath={"{\"claimRef\": "}{.spec.claimRef}, "storageClass": "{.spec.storageClassName}", `+ + `"accessModes": {.spec.accessModes}, "size": "{.spec.capacity.storage}{"\"}"}`, + ) + if err != nil { + return err + } + var pv pvData + if err := json.Unmarshal(out, &pv); err != nil { + return utils.Errorf(err, L("failed to parse pv data")) + } + + // Ensure the claimRef of the volume is for our PVC + if pv.ClaimRef.Name != volumeName && pv.ClaimRef.Namespace != namespace { + return fmt.Errorf(L("the %[1]s volume should reference the %[2]s claim in %[3]s namespace"), volumeName, namespace) + } + + // Create the PVC object + pvc := newPersistentVolumeClaim(namespace, volumeName, pv.StorageClass, pv.Size, pv.AccessModes, false) + + return Apply([]runtime.Object{&pvc}, L("failed to run the persistent volume claims")) +} + +// GetPersistentVolumeClaims creates the PVC objects matching a list of volume mounts. +func GetPersistentVolumeClaims( + namespace string, + storageClass string, + accessMode core.PersistentVolumeAccessMode, + matchPvByLabel bool, + labels map[string]string, + mounts []types.VolumeMount, +) []*core.PersistentVolumeClaim { + var claims []*core.PersistentVolumeClaim + + for _, mount := range mounts { + size := mount.Size + if size == "" { + log.Warn().Msgf(L("no size defined for PersistentVolumeClaim %s, using 10Mi as default"), mount.Name) + size = "10Mi" + } + pv := newPersistentVolumeClaim( + namespace, + mount.Name, + storageClass, + size, + []core.PersistentVolumeAccessMode{accessMode}, + matchPvByLabel, + ) + pv.ObjectMeta.SetLabels(labels) + claims = append(claims, &pv) + } + + return claims +} + +// Creates a PVC from a few common values. +func newPersistentVolumeClaim( + namespace string, + name string, + storageClass string, + size string, + accessModes []core.PersistentVolumeAccessMode, + matchPvByLabel bool, +) core.PersistentVolumeClaim { + pvc := core.PersistentVolumeClaim{ + TypeMeta: v1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: v1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: core.PersistentVolumeClaimSpec{ + AccessModes: accessModes, + Resources: core.VolumeResourceRequirements{ + Requests: core.ResourceList{"storage": resource.MustParse(size)}, + }, + }, + } + + if storageClass != "" { + pvc.Spec.StorageClassName = &storageClass + } + + if matchPvByLabel { + pvc.Spec.Selector = &v1.LabelSelector{ + MatchLabels: map[string]string{"data": name}, + } + } + + return pvc +} + +func createMount(mountPath string) core.VolumeMount { + pattern := regexp.MustCompile("[^a-zA-Z]+") + name := strings.Trim(pattern.ReplaceAllString(mountPath, "-"), "-") + return core.VolumeMount{ + MountPath: mountPath, + Name: name, + } +} + +// CreateTmpfsMount creates a temporary volume and its mount. +func CreateTmpfsMount(mountPath string, size string) (core.VolumeMount, core.Volume) { + mount := createMount(mountPath) + + parsedSize := resource.MustParse(size) + volume := core.Volume{ + Name: mount.Name, + VolumeSource: core.VolumeSource{ + EmptyDir: &core.EmptyDirVolumeSource{ + Medium: core.StorageMediumMemory, + SizeLimit: &parsedSize, + }, + }, + } + return mount, volume +} + +// CreateHostPathMount creates the mount and volume for a host path. +// This is not secure and tied to the availability on the node, only use when needed. +func CreateHostPathMount( + mountPath string, + hostPath string, + sourceType core.HostPathType, +) (core.VolumeMount, core.Volume) { + mount := createMount(mountPath) + + volume := core.Volume{ + Name: mount.Name, + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: hostPath, + Type: &sourceType, + }, + }, + } + return mount, volume +} + +// CreateSecretMount creates the volume for a secret. +func CreateSecretVolume(name string, secretName string) core.Volume { + volume := core.Volume{ + Name: name, + VolumeSource: core.VolumeSource{ + Secret: &core.SecretVolumeSource{ + SecretName: secretName, + }, + }, + } + + return volume +} + +// CreateConfigVolume creates the volume for a ConfigMap. +func CreateConfigVolume(name string, configMapName string) core.Volume { + volume := core.Volume{ + Name: name, + VolumeSource: core.VolumeSource{ + ConfigMap: &core.ConfigMapVolumeSource{ + LocalObjectReference: core.LocalObjectReference{ + Name: configMapName, + }, + }, + }, + } + + return volume +} + +// CreateVolumes creates PVC-based volumes matching the internal volumes mounts. +func CreateVolumes(mounts []types.VolumeMount) []core.Volume { + volumes := []core.Volume{} + + for _, mount := range mounts { + volume := core.Volume{ + Name: mount.Name, + VolumeSource: core.VolumeSource{ + PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ + ClaimName: mount.Name, + }, + }, + } + volumes = append(volumes, volume) + } + + return volumes +} + +var runCmdOutput = utils.RunCmdOutput + +// HasVolume returns true if the pvcName persistent volume claim is bound. +func HasVolume(namespace string, pvcName string) bool { + out, err := runCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "pvc", "-n", namespace, pvcName, "-o", "jsonpath={.status.phase}", + ) + if err != nil { + return false + } + return strings.TrimSpace(string(out)) == "Bound" +} diff --git a/shared/kubernetes/pvc_test.go b/shared/kubernetes/pvc_test.go new file mode 100644 index 000000000..58879270c --- /dev/null +++ b/shared/kubernetes/pvc_test.go @@ -0,0 +1,55 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "errors" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestHasVolume(t *testing.T) { + type dataType struct { + err error + out string + expected bool + } + data := []dataType{ + {nil, "Bound\n", true}, + {nil, "Pending\n", false}, + {errors.New("PVC not found"), "", false}, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + actual := HasVolume("myns", "thepvc") + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected output", i), test.expected, actual) + } +} + +func TestHasPersistentVolumeClaim(t *testing.T) { + type dataType struct { + err error + out string + expected bool + } + data := []dataType{ + {nil, "persistentvolumeclaim/var-pgsql\n", true}, + {errors.New("PVC not found"), "", false}, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + actual := hasPersistentVolumeClaim("myns", "thepvc") + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected output", i), test.expected, actual) + } +} diff --git a/shared/kubernetes/rke2.go b/shared/kubernetes/rke2.go index 9a1993a9a..f43bb323d 100644 --- a/shared/kubernetes/rke2.go +++ b/shared/kubernetes/rke2.go @@ -14,19 +14,29 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/utils" ) -const rke2NginxConfigPath = "/var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml" +const rke2NginxConfigPath = "/var/lib/rancher/rke2/server/manifests/uyuni-ingress-nginx-config.yaml" -// InstallRke2NginxConfig install Rke2 Nginx configuration. -func InstallRke2NginxConfig(tcpPorts []types.PortMap, udpPorts []types.PortMap, namespace string) { +// InstallRke2NgixConfig install Rke2 Nginx configuration. +func InstallRke2NginxConfig(ports []types.PortMap, namespace string) error { log.Info().Msg(L("Installing RKE2 Nginx configuration")) + tcpPorts := []types.PortMap{} + udpPorts := []types.PortMap{} + for _, port := range ports { + if port.Protocol == "udp" { + udpPorts = append(udpPorts, port) + } else { + tcpPorts = append(tcpPorts, port) + } + } + data := Rke2NginxConfigTemplateData{ Namespace: namespace, TCPPorts: tcpPorts, UDPPorts: udpPorts, } - if err := utils.WriteTemplateToFile(data, rke2NginxConfigPath, 0600, false); err != nil { - log.Fatal().Err(err).Msgf(L("Failed to write Rke2 nginx configuration")) + if err := utils.WriteTemplateToFile(data, rke2NginxConfigPath, 0600, true); err != nil { + return utils.Errorf(err, L("Failed to write Rke2 nginx configuration")) } // Wait for the nginx controller to be back @@ -40,6 +50,7 @@ func InstallRke2NginxConfig(tcpPorts []types.PortMap, udpPorts []types.PortMap, } } } + return nil } // UninstallRke2NginxConfig uninstall Rke2 Nginx configuration. diff --git a/shared/kubernetes/utils.go b/shared/kubernetes/utils.go index 8c3f6cb84..07910d3d7 100644 --- a/shared/kubernetes/utils.go +++ b/shared/kubernetes/utils.go @@ -6,7 +6,6 @@ package kubernetes import ( "encoding/json" - "errors" "fmt" "strconv" "strings" @@ -17,92 +16,109 @@ import ( . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" ) -// ServerApp represent the server app name. -const ServerApp = "uyuni" +const ( + // AppLabel is the app label name. + AppLabel = "app.kubernetes.io/part-of" + // ComponentLabel is the component label name. + ComponentLabel = "app.kubernetes.io/component" +) -// ServerFilter represents filter used to check server app. -const ServerFilter = "-lapp=" + ServerApp +const ( + // ServerApp is the server app name. + ServerApp = "uyuni" -// ProxyApp represnet the proxy app name. -const ProxyApp = "uyuni-proxy" + // ProxyApp is the proxy app name. + ProxyApp = "uyuni-proxy" +) -// ProxyFilter represents filter used to check proxy app. -const ProxyFilter = "-lapp=" + ProxyApp +const ( + // ServerComponent is the value of the component label for the server resources. + ServerComponent = "server" + // HubApiComponent is the value of the component label for the Hub API resources. + HubAPIComponent = "hub-api" + // CocoComponent is the value of the component label for the confidential computing attestation resources. + CocoComponent = "coco" +) -// WaitForDeployment waits at most 60s for a kubernetes deployment to have at least one replica. -// See [isDeploymentReady] for more details. -func WaitForDeployment(namespace string, name string, appName string) error { - // Find the name of a replica pod - // Using the app label is a shortcut, not the 100% acurate way to get from deployment to pod - podName := "" - jsonpath := fmt.Sprintf("jsonpath={.items[?(@.metadata.labels.app==\"%s\")].metadata.name}", appName) - cmdArgs := []string{"get", "pod", "-o", jsonpath} - cmdArgs = addNamespace(cmdArgs, namespace) +// ServerFilter represents filter used to check server app. +const ServerFilter = "-l" + AppLabel + "=" + ServerApp - for i := 0; i < 60; i++ { - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", cmdArgs...) - if err == nil { - podName = string(out) - break - } - } +// ServerFilter represents filter used to check proxy app. +const ProxyFilter = "-l" + AppLabel + "=" + ProxyApp - // We need to wait for the image to be pulled as this can add quite some time - // Setting a timeout on this is very hard since it hightly depends on network speed and image size - // List the Pulled events from the pod as we may not see the Pulling if the image was already downloaded - err := WaitForPulledImage(namespace, podName) - if err != nil { - return utils.Errorf(err, L("failed to pull image")) +// CaIssuerName is the name of the server CA issuer deployed if cert-manager is used. +const CaIssuerName = "uyuni-ca-issuer" + +// GetLabels creates the label map with the app and component. +// The component label may be an empty string to skip it. +func GetLabels(app string, component string) map[string]string { + labels := map[string]string{ + AppLabel: app, } + if component != "" { + labels[ComponentLabel] = component + } + return labels +} - log.Info().Msgf(L("Waiting for %[1]s deployment to be ready in %[2]s namespace\n"), name, namespace) - // Wait for a replica to be ready - for i := 0; i < 120; i++ { - // TODO Look for pod failures - if IsDeploymentReady(namespace, name) { - return nil +// WaitForDeployment waits for a kubernetes deployment to have at least one replica. +func WaitForDeployments(namespace string, names ...string) error { + log.Info().Msgf( + NL("Waiting for %[1]s deployment to be ready in %[2]s namespace\n", + "Waiting for %[1]s deployments to be ready in %[2]s namespace\n", len(names)), + strings.Join(names, ", "), namespace) + + deploymentsStarting := names + // Wait for ever for all deployments to be ready + for len(deploymentsStarting) > 0 { + starting := []string{} + for _, deploymentName := range deploymentsStarting { + ready, err := IsDeploymentReady(namespace, deploymentName) + if err != nil { + return err + } + if !ready { + starting = append(starting, deploymentName) + } + deploymentsStarting = starting + } + if len(deploymentsStarting) > 0 { + time.Sleep(1 * time.Second) } - time.Sleep(1 * time.Second) } - return fmt.Errorf( - L("failed to find a ready replica for deployment %[1]s in namespace %[2]s after 120s"), name, namespace, - ) + return nil } -// WaitForPulledImage wait that image is pulled. -func WaitForPulledImage(namespace string, podName string) error { - log.Info().Msgf(L("Waiting for image of %[1]s pod in %[2]s namespace to be pulled"), podName, namespace) - pulledArgs := []string{"get", "event", - "-o", "jsonpath={.items[?(@.reason==\"Pulled\")].message}", - "--field-selector", "involvedObject.name=" + podName} - - pulledArgs = addNamespace(pulledArgs, namespace) - failedArgs := []string{"get", "event", - "-o", "jsonpath={range .items[?(@.reason==\"Failed\")]}{.message}{\"\\n\"}{end}", - "--field-selector", "involvedObject.name=" + podName} - failedArgs = addNamespace(failedArgs, namespace) +// WaitForRunningDeployment waits for a deployment to have at least one replica in running state. +func WaitForRunningDeployment(namespace string, name string) error { + log.Info().Msgf(L("Waiting for %[1]s deployment to be started in %[2]s namespace\n"), name, namespace) for { - // Look for events indicating an image pull issue - out, err := utils.RunCmdOutput(zerolog.TraceLevel, "kubectl", failedArgs...) + pods, err := getPodsForDeployment(namespace, name) if err != nil { - return fmt.Errorf(L("failed to get failed events for pod %s"), podName) - } - lines := strings.Split(string(out), "\n") - for _, line := range lines { - if strings.HasPrefix(line, "Failed to pull image") { - return errors.New(L("failed to pull image")) - } + return err } - // Has the image pull finished? - out, err = utils.RunCmdOutput(zerolog.TraceLevel, "kubectl", pulledArgs...) - if err != nil { - return fmt.Errorf(L("failed to get events for pod %s"), podName) - } - if len(out) > 0 { - break + if len(pods) > 0 { + jsonPath := "jsonpath={.status.containerStatuses[*].state.running.startedAt}" + if len(pods) > 1 { + jsonPath = "jsonpath={.items[*].status.containerStatuses[*].state.running.startedAt}" + } + out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "get", "pod", "-n", namespace, + "-o", jsonPath, + strings.Join(pods, " "), + ) + if err != nil { + return utils.Errorf(err, L("failed to check if the deployment has running pods")) + } + if strings.TrimSpace(string(out)) != "" { + break + } + if err := hasAllPodsFailed(namespace, pods, name); err != nil { + return err + } } time.Sleep(1 * time.Second) } @@ -110,9 +126,9 @@ func WaitForPulledImage(namespace string, podName string) error { } // IsDeploymentReady returns true if a kubernetes deployment has at least one ready replica. -// The name can also be a filter parameter like -lapp=uyuni. +// // An empty namespace means searching through all the namespaces. -func IsDeploymentReady(namespace string, name string) bool { +func IsDeploymentReady(namespace string, name string) (bool, error) { jsonpath := fmt.Sprintf("jsonpath={.items[?(@.metadata.name==\"%s\")].status.readyReplicas}", name) args := []string{"get", "-o", jsonpath, "deploy"} args = addNamespace(args, namespace) @@ -121,10 +137,130 @@ func IsDeploymentReady(namespace string, name string) bool { // kubectl errors out if the deployment or namespace doesn't exist if err == nil { if replicas, _ := strconv.Atoi(string(out)); replicas > 0 { - return true + return true, nil } } - return false + + pods, err := getPodsForDeployment(namespace, name) + if err != nil { + return false, err + } + + if err := hasAllPodsFailed(namespace, pods, name); err != nil { + return false, err + } + + return false, nil +} + +func hasAllPodsFailed(namespace string, names []string, deployment string) error { + failedPods := 0 + for _, podName := range names { + if failed, err := isPodFailed(namespace, podName); err != nil { + return err + } else if failed { + failedPods = failedPods + 1 + } + } + if len(names) > 0 && failedPods == len(names) { + return fmt.Errorf(L("all the pods of %s deployment have a failure"), deployment) + } + return nil +} + +func getPodsForDeployment(namespace string, name string) ([]string, error) { + rs, err := getCurrentDeploymentReplicaSet(namespace, name) + if err != nil { + return []string{}, err + } + + // Check if all replica set pods have failed to start + return getPodsFromOwnerReference(namespace, rs) +} + +func getCurrentDeploymentReplicaSet(namespace string, name string) (string, error) { + // Get the replicasets matching the deployments and their revision as + // Kubernetes doesn't remove the old replicasets after update. + revisionPath := "{.metadata.annotations['deployment\\.kubernetes\\.io/revision']}" + rsArgs := []string{ + "get", "rs", "-o", + fmt.Sprintf( + "jsonpath={range .items[?(@.metadata.ownerReferences[0].name=='%s')]}{.metadata.name},%s {end}", + name, revisionPath, + ), + } + rsArgs = addNamespace(rsArgs, namespace) + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", rsArgs...) + if err != nil { + return "", utils.Errorf(err, L("failed to list ReplicaSets for deployment %s"), name) + } + replicasetsOut := strings.TrimSpace(string(out)) + // No replica, no deployment + if replicasetsOut == "" { + return "", nil + } + + // Get the current deployment revision to look for + out, err = runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "deploy", "-n", namespace, name, + "-o", "jsonpath="+revisionPath, + ) + if err != nil { + return "", utils.Errorf(err, L("failed to get the %s deployment revision"), name) + } + revision := strings.TrimSpace(string(out)) + + replicasets := strings.Split(replicasetsOut, " ") + for _, rs := range replicasets { + data := strings.SplitN(rs, ",", 2) + if len(data) != 2 { + return "", fmt.Errorf(L("invalid replicasset response: :%s"), replicasetsOut) + } + if data[1] == revision { + return data[0], nil + } + } + return "", nil +} + +func getPodsFromOwnerReference(namespace string, owner string) ([]string, error) { + jsonpath := fmt.Sprintf("jsonpath={.items[?(@.metadata.ownerReferences[0].name=='%s')].metadata.name}", owner) + podArgs := []string{"get", "pod", "-o", jsonpath} + podArgs = addNamespace(podArgs, namespace) + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", podArgs...) + if err != nil { + return []string{}, utils.Errorf(err, L("failed to find pods for owner reference %s"), owner) + } + + outStr := strings.TrimSpace(string(out)) + + pods := []string{} + if outStr != "" { + pods = strings.Split(outStr, " ") + } + return pods, nil +} + +// isPodFailed checks if any of the containers of the pod are in BackOff state. +// +// An empty namespace means searching through all the namespaces. +func isPodFailed(namespace string, name string) (bool, error) { + // If a container failed to pull the image it status will have waiting.reason = ImagePullBackOff + // If a container crashed its status will have waiting.reason = CrashLoopBackOff + filter := fmt.Sprintf(".items[?(@.metadata.name==\"%s\")]", name) + jsonpath := fmt.Sprintf("jsonpath={%[1]s.status.containerStatuses[*].state.waiting.reason}"+ + "{%[1]s.status.initContainerStatuses[*].state.waiting.reason}", filter) + args := []string{"get", "pod", "-n", namespace, "-o", jsonpath} + args = addNamespace(args, namespace) + + out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", args...) + if err != nil { + return true, utils.Errorf(err, L("failed to get the status of %s pod"), name) + } + statuses := string(out) + if strings.Contains(statuses, "CrashLoopBackOff") || strings.Contains(statuses, "ImagePullBackOff") { + return true, nil + } + return false, nil } // DeploymentStatus represents the kubernetes deployment status. @@ -150,35 +286,22 @@ func GetDeploymentStatus(namespace string, name string) (*DeploymentStatus, erro return &status, nil } -// ReplicasTo set the replica for an app to the given value. -// Scale the number of replicas of the server. -func ReplicasTo(namespace string, app string, replica uint) error { - args := []string{"scale", "deploy", app, "--replicas"} - log.Debug().Msgf("Setting replicas for pod in %s to %d", app, replica) - args = append(args, fmt.Sprint(replica), "-n", namespace) +// ReplicasTo set the replicas for a deployment to the given value. +func ReplicasTo(namespace string, name string, replica uint) error { + args := []string{"scale", "-n", namespace, "deploy", name, "--replicas", strconv.FormatUint(uint64(replica), 10)} + log.Debug().Msgf("Setting replicas for deployment in %s to %d", name, replica) _, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", args...) if err != nil { return utils.Errorf(err, L("cannot run kubectl %s"), args) } - pods, err := GetPods(namespace, "-lapp="+app) - if err != nil { - return utils.Errorf(err, L("cannot get pods for %s"), app) + if err := waitForReplicas(namespace, name, replica); err != nil { + return err } - for _, pod := range pods { - if len(pod) > 0 { - err = waitForReplica(namespace, pod, replica) - if err != nil { - return utils.Errorf(err, L("replica to %d failed"), replica) - } - } - } - - log.Debug().Msgf("Replicas for pod in %s are now %d", app, replica) - - return err + log.Debug().Msgf("Replicas for %s deployment in %s are now %d", name, namespace, replica) + return nil } func isPodRunning(namespace string, podname string, filter string) (bool, error) { @@ -206,36 +329,11 @@ func GetPods(namespace string, filter string) (pods []string, err error) { return pods, err } -func waitForReplicaZero(namespace string, podname string) error { - waitSeconds := 120 - cmdArgs := []string{"get", "pod", podname, "-n", namespace} - - for i := 0; i < waitSeconds; i++ { - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", cmdArgs...) - /* Assume that if the command return an error at the first iteration, it's because it failed, - * next iteration because the pod was actually deleted - */ - if err != nil && i == 0 { - return utils.Errorf(err, L("cannot get pod informations %s"), podname) - } - outStr := strings.TrimSuffix(string(out), "\n") - if len(outStr) == 0 { - log.Debug().Msgf("Pod %s has been deleted", podname) - return nil - } - time.Sleep(1 * time.Second) - } - return fmt.Errorf(L("cannot set replicas for %s to zero"), podname) -} - -func waitForReplica(namespace string, podname string, replica uint) error { +func waitForReplicas(namespace string, name string, replicas uint) error { waitSeconds := 120 - log.Debug().Msgf("Checking replica for %s ready to %d", podname, replica) - if replica == 0 { - return waitForReplicaZero(namespace, podname) - } + log.Debug().Msgf("Checking replica for %s ready to %d", name, replicas) cmdArgs := []string{ - "get", "pod", podname, "-n", namespace, "--output=custom-columns=STATUS:.status.phase", "--no-headers", + "get", "deploy", name, "-n", namespace, "-o", "jsonpath={.status.readyReplicas}", "--no-headers", } for i := 0; i < waitSeconds; i++ { @@ -243,12 +341,18 @@ func waitForReplica(namespace string, podname string, replica uint) error { if err != nil { return utils.Errorf(err, L("cannot execute %s"), strings.Join(cmdArgs, string(" "))) } - outStr := strings.TrimSuffix(string(out), "\n") - if string(outStr) == "Running" { - log.Debug().Msgf("%s pod replica is now %d", podname, replica) - break + outStr := strings.TrimSpace(string(out)) + var readyReplicas uint64 + if outStr != "" { + var err error + readyReplicas, err = strconv.ParseUint(outStr, 10, 8) + if err != nil { + return utils.Errorf(err, L("invalid replicas result")) + } + } + if uint(readyReplicas) == replicas { + return nil } - log.Debug().Msgf("Pod %s replica is %s in %d seconds.", podname, string(out), i) time.Sleep(1 * time.Second) } return nil @@ -263,12 +367,12 @@ func addNamespace(args []string, namespace string) []string { return args } -// GetPullPolicy return pullpolicy in lower case, if exists. -func GetPullPolicy(name string) string { - policies := map[string]string{ - "always": "Always", - "never": "Never", - "ifnotpresent": "IfNotPresent", +// GetPullPolicy returns the kubernetes PullPolicy value, if exists. +func GetPullPolicy(name string) core.PullPolicy { + policies := map[string]core.PullPolicy{ + "always": core.PullAlways, + "never": core.PullNever, + "ifnotpresent": core.PullIfNotPresent, } policy := policies[strings.ToLower(name)] if policy == "" { @@ -287,7 +391,10 @@ func RunPod( command string, override ...string, ) error { - arguments := []string{"run", podname, "-n", namespace, "--image", image, "--image-pull-policy", pullPolicy, filter} + arguments := []string{ + "run", "--rm", "-n", namespace, "--attach", "--pod-running-timeout=3h", "--restart=Never", podname, + "--image", image, "--image-pull-policy", pullPolicy, filter, + } if len(override) > 0 { arguments = append(arguments, `--override-type=strategic`) @@ -303,14 +410,6 @@ func RunPod( return utils.Errorf(err, PL("The first placeholder is a command", "cannot run %[1]s using image %[2]s"), command, image) } - err = waitForPod(namespace, podname) - if err != nil { - return utils.Errorf(err, L("deleting pod %s. Status fails with error"), podname) - } - - defer func() { - err = DeletePod(namespace, podname, filter) - }() return nil } @@ -332,36 +431,6 @@ func DeletePod(namespace string, podname string, filter string) error { return nil } -func waitForPod(namespace string, podname string) error { - status := "Succeeded" - waitSeconds := 120 - log.Debug().Msgf( - "Checking status for %s pod. Waiting %s seconds until status is %s", - podname, strconv.Itoa(waitSeconds), status, - ) - cmdArgs := []string{ - "get", "pod", podname, "-n", namespace, "--output=custom-columns=STATUS:.status.phase", "--no-headers", - } - var err error - for i := 0; i < waitSeconds; i++ { - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", cmdArgs...) - outStr := strings.TrimSuffix(string(out), "\n") - if err != nil { - return utils.Errorf(err, L("cannot execute %s"), strings.Join(cmdArgs, string(" "))) - } - if strings.EqualFold(outStr, status) { - log.Debug().Msgf("%s pod status is %s", podname, status) - return nil - } - if strings.EqualFold(outStr, "Failed") { - return utils.Errorf(err, L("error during execution of %s"), strings.Join(cmdArgs, string(" "))) - } - log.Debug().Msgf("Pod %s status is %s for %d seconds.", podname, outStr, i) - time.Sleep(1 * time.Second) - } - return utils.Errorf(err, L("pod %[1]s status is not %[2]s in %[3]d seconds"), podname, status, waitSeconds) -} - // GetNode return the node where the app is running. func GetNode(namespace string, filter string) (string, error) { nodeName := "" @@ -389,3 +458,18 @@ func GenerateOverrideDeployment(deployData types.Deployment) (string, error) { } return string(ret), nil } + +// GetRunningImage returns the image of containerName for the server running in the current system. +func GetRunningImage(containerName string) (string, error) { + args := []string{ + "get", "pods", "-A", ServerFilter, + "-o", "jsonpath={.items[0].spec.containers[?(@.name=='" + containerName + "')].image}", + } + image, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", args...) + + log.Debug().Msgf("%[1]s container image is: %[2]s", containerName, image) + if err != nil { + return "", err + } + return strings.Trim(string(image), "\n"), nil +} diff --git a/shared/kubernetes/utils_test.go b/shared/kubernetes/utils_test.go new file mode 100644 index 000000000..a4a25d5ca --- /dev/null +++ b/shared/kubernetes/utils_test.go @@ -0,0 +1,124 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +func TestGetCurrentDeploymentReplicaSet(t *testing.T) { + type testType struct { + rsOut string + rsErr error + revisionOut string + revisionErr error + expected string + expectedError bool + } + + testCases := []testType{ + { + rsOut: "uyuni-64d597fccf,1 uyuni-66f7677dc6,2\n", + rsErr: nil, + revisionOut: "2\n", + revisionErr: nil, + expected: "uyuni-66f7677dc6", + expectedError: false, + }, + { + rsOut: "uyuni-64d597fccf,1\n", + rsErr: nil, + revisionOut: "1\n", + revisionErr: nil, + expected: "uyuni-64d597fccf", + expectedError: false, + }, + { + rsOut: "\n", + rsErr: nil, + revisionOut: "not found\n", + revisionErr: errors.New("not found"), + expected: "", + expectedError: false, + }, + { + rsOut: "get rs error\n", + rsErr: errors.New("get rs error"), + revisionOut: "1\n", + revisionErr: nil, + expected: "", + expectedError: true, + }, + { + rsOut: "uyuni-64d597fccf,1\n", + rsErr: nil, + revisionOut: "get rev error\n", + revisionErr: errors.New("get rev error"), + expected: "", + expectedError: true, + }, + } + + for i, test := range testCases { + runCmdOutput = func(_ zerolog.Level, _ string, args ...string) ([]byte, error) { + if utils.Contains(args, "rs") { + return []byte(test.rsOut), test.rsErr + } + return []byte(test.revisionOut), test.revisionErr + } + actual, err := getCurrentDeploymentReplicaSet("uyunins", "uyuni") + caseMsg := fmt.Sprintf("test %d: ", i+1) + testutils.AssertEquals(t, fmt.Sprintf("%sunexpected error raised: %s", caseMsg, err), + test.expectedError, err != nil, + ) + testutils.AssertEquals(t, caseMsg+"unexpected result", test.expected, actual) + } +} + +func TestGetPodsFromOwnerReference(t *testing.T) { + type testType struct { + out string + err error + expected []string + } + + data := []testType{ + { + out: "pod1 pod2 pod3\n", + err: nil, + expected: []string{"pod1", "pod2", "pod3"}, + }, + { + out: "\n", + err: nil, + expected: []string{}, + }, + { + out: "error\n", + err: errors.New("some error"), + expected: []string{}, + }, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + actual, err := getPodsFromOwnerReference("myns", "owner") + if test.err == nil { + testutils.AssertTrue(t, "Shouldn't have raise an error", err == nil) + } else { + testutils.AssertTrue(t, "Unexpected error raised", strings.Contains(err.Error(), test.err.Error())) + } + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i+1), test.expected, actual) + } +} diff --git a/shared/kubernetes/waiters.go b/shared/kubernetes/waiters.go new file mode 100644 index 000000000..4d280f0ba --- /dev/null +++ b/shared/kubernetes/waiters.go @@ -0,0 +1,100 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + "strings" + "time" + + "github.com/rs/zerolog" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// WaitForSecret waits for a secret to be available. +func WaitForSecret(namespace string, secret string) { + for i := 0; ; i++ { + if err := utils.RunCmd("kubectl", "get", "-n", namespace, "secret", secret); err == nil { + break + } + time.Sleep(1 * time.Second) + } +} + +// WaitForJob waits for a job to be completed before timeout seconds. +// +// If the timeout value is 0 the job will be awaited for for ever. +func WaitForJob(namespace string, name string, timeout int) error { + for i := 0; ; i++ { + status, err := jobStatus(namespace, name) + if err != nil { + return err + } + if status == "error" { + return fmt.Errorf( + L("%[1]s job failed, run kubectl logs -n %[2]s --tail=-1 -ljob-name=%[1]s for details"), + name, namespace, + ) + } + if status == "success" { + return nil + } + + if timeout > 0 && i == timeout { + return fmt.Errorf(L("%[1]s job failed to complete within %[2]d seconds"), name, timeout) + } + time.Sleep(1 * time.Second) + } +} + +func jobStatus(namespace string, name string) (string, error) { + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "job", "-n", namespace, name, + "-o", "jsonpath={.status.succeeded},{.status.failed}", + ) + if err != nil { + return "", utils.Errorf(err, L("failed to get %s job status"), name) + } + results := strings.SplitN(strings.TrimSpace(string(out)), ",", 2) + if len(results) != 2 { + return "", fmt.Errorf(L("invalid job status response: '%s'"), string(out)) + } + if results[0] == "1" { + return "success", nil + } else if results[1] == "1" { + return "error", nil + } + return "", nil +} + +// WaitForPod waits for a pod to complete before timeout seconds. +// +// If the timeout value is 0 the pod will be awaited for for ever. +func WaitForPod(namespace string, pod string, timeout int) error { + for i := 0; ; i++ { + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "pod", "-n", namespace, pod, + "-o", "jsonpath={.status.containerStatuses[0].state.terminated.reason}", + ) + if err != nil { + return utils.Errorf(err, L("failed to get %s pod status"), pod) + } + status := strings.TrimSpace(string(out)) + if status != "" { + if status == "Completed" { + return nil + } + return fmt.Errorf(L("%[1]s pod failed with status %[2]s"), pod, status) + } + + if timeout > 0 && i == timeout { + return fmt.Errorf(L("%[1]s pod failed to complete within %[2]d seconds"), pod, timeout) + } + time.Sleep(1 * time.Second) + } +} diff --git a/shared/ssl/ssl.go b/shared/ssl/ssl.go index 730f4561d..cfc91b8d9 100644 --- a/shared/ssl/ssl.go +++ b/shared/ssl/ssl.go @@ -7,6 +7,7 @@ package ssl import ( "bytes" "errors" + "io" "os" "os/exec" "strings" @@ -240,14 +241,22 @@ func optionalFile(file string) { } } -// GetRsaKey converts an SSL key to RSA. -func GetRsaKey(keyPath string, password string) []byte { +// Converts an SSL key to RSA. +func GetRsaKey(keyContent string, password string) []byte { // Kubernetes only handles RSA private TLS keys, convert and strip password caPassword := password utils.AskPasswordIfMissing(&caPassword, L("Source server SSL CA private key password"), 0, 0) // Convert the key file to RSA format for kubectl to handle it - cmd := exec.Command("openssl", "rsa", "-in", keyPath, "-passin", "env:pass") + cmd := exec.Command("openssl", "rsa", "-passin", "env:pass") + stdin, err := cmd.StdinPipe() + if err != nil { + log.Fatal().Err(err).Msg(L("Failed to open openssl rsa process input stream")) + } + if _, err := io.WriteString(stdin, keyContent); err != nil { + log.Fatal().Err(err).Msg(L("Failed to write openssl key content to input stream")) + } + cmd.Env = append(cmd.Env, "pass="+caPassword) out, err := cmd.Output() if err != nil { @@ -255,3 +264,20 @@ func GetRsaKey(keyPath string, password string) []byte { } return out } + +// StripTextFromCertificate removes the optional text part of an x509 certificate. +func StripTextFromCertificate(certContent string) []byte { + cmd := exec.Command("openssl", "x509") + stdin, err := cmd.StdinPipe() + if err != nil { + log.Fatal().Err(err).Msg(L("Failed to open openssl x509 process input stream")) + } + if _, err := io.WriteString(stdin, certContent); err != nil { + log.Fatal().Err(err).Msg(L("Failed to write SSL certificate to input stream")) + } + out, err := cmd.Output() + if err != nil { + log.Fatal().Err(err).Msg(L("failed to strip text part from CA certificate")) + } + return out +} diff --git a/shared/ssl/ssl_test.go b/shared/ssl/ssl_test.go index b5d055f93..1d0b5342f 100644 --- a/shared/ssl/ssl_test.go +++ b/shared/ssl/ssl_test.go @@ -152,7 +152,8 @@ func TestOrderCasChain2(t *testing.T) { } func TestGetRsaKey(t *testing.T) { - actual := string(GetRsaKey("testdata/RootCA.key", "secret")) + key := testutils.ReadFile(t, "testdata/RootCA.key") + actual := string(GetRsaKey(key, "secret")) if !strings.HasPrefix(actual, "-----BEGIN PRIVATE KEY-----\nMIIEugIBADANBgkqhkiG9w0BAQEFAAS") || !strings.HasSuffix(actual, "DKY9SmW6QD+RJwbMc4M=\n-----END PRIVATE KEY-----\n") { t.Errorf("Unexpected generated RSA key: %s", actual) diff --git a/shared/testutils/flagstests/mgradm.go b/shared/testutils/flagstests/mgradm.go index d0b5ee6d0..0d0a741ea 100644 --- a/shared/testutils/flagstests/mgradm.go +++ b/shared/testutils/flagstests/mgradm.go @@ -12,24 +12,18 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/types" ) -// ServerHelmFlagsTestArgs is the expected values for AssertHelmInstallFlags. -var ServerHelmFlagsTestArgs = []string{ - "--helm-uyuni-namespace", "uyunins", - "--helm-uyuni-chart", "oci://srv/uyuni", - "--helm-uyuni-version", "1.2.3", - "--helm-uyuni-values", "uyuni/values.yaml", - "--helm-certmanager-namespace", "certmanagerns", - "--helm-certmanager-chart", "oci://srv/certmanager", - "--helm-certmanager-version", "4.5.6", - "--helm-certmanager-values", "certmanager/values.yaml", +// ServerKubernetesFlagsTestArgs are the expected values for AssertServerKubernetesFlags. +var ServerKubernetesFlagsTestArgs = []string{ + "--kubernetes-uyuni-namespace", "uyunins", + "--kubernetes-certmanager-namespace", "certmanagerns", + "--kubernetes-certmanager-chart", "oci://srv/certmanager", + "--kubernetes-certmanager-version", "4.5.6", + "--kubernetes-certmanager-values", "certmanager/values.yaml", } -// AssertServerHelmFlags checks that all Helm flags are parsed correctly. -func AssertServerHelmFlags(t *testing.T, flags *utils.HelmFlags) { +// AssertServerKubernetesFlags checks that all Kubernetes flags are parsed correctly. +func AssertServerKubernetesFlags(t *testing.T, flags *utils.KubernetesFlags) { testutils.AssertEquals(t, "Error parsing --helm-uyuni-namespace", "uyunins", flags.Uyuni.Namespace) - testutils.AssertEquals(t, "Error parsing --helm-uyuni-chart", "oci://srv/uyuni", flags.Uyuni.Chart) - testutils.AssertEquals(t, "Error parsing --helm-uyuni-version", "1.2.3", flags.Uyuni.Version) - testutils.AssertEquals(t, "Error parsing --helm-uyuni-values", "uyuni/values.yaml", flags.Uyuni.Values) testutils.AssertEquals(t, "Error parsing --helm-certmanager-namespace", "certmanagerns", flags.CertManager.Namespace, ) @@ -42,6 +36,34 @@ func AssertServerHelmFlags(t *testing.T, flags *utils.HelmFlags) { ) } +// VolumesFlagsTestExpected is the expected values for AssertVolumesFlags. +var VolumesFlagsTestExpected = []string{ + "--volumes-class", "MyStorageClass", + "--volumes-mirror", "mirror-pv", + "--volumes-database-size", "123Gi", + "--volumes-database-class", "dbclass", + "--volumes-packages-size", "456Gi", + "--volumes-packages-class", "pkgclass", + "--volumes-www-size", "123Mi", + "--volumes-www-class", "wwwclass", + "--volumes-cache-size", "789Gi", + "--volumes-cache-class", "cacheclass", +} + +// AssertVolumesFlags checks that all the volumes flags are parsed correctly. +func AssertVolumesFlags(t *testing.T, flags *utils.VolumesFlags) { + testutils.AssertEquals(t, "Error parsing --volumes-class", "MyStorageClass", flags.Class) + testutils.AssertEquals(t, "Error parsing --volumes-mirror", "mirror-pv", flags.Mirror) + testutils.AssertEquals(t, "Error parsing --volumes-database-size", "123Gi", flags.Database.Size) + testutils.AssertEquals(t, "Error parsing --volumes-database-class", "dbclass", flags.Database.Class) + testutils.AssertEquals(t, "Error parsing --volumes-packages-size", "456Gi", flags.Packages.Size) + testutils.AssertEquals(t, "Error parsing --volumes-packages-class", "pkgclass", flags.Packages.Class) + testutils.AssertEquals(t, "Error parsing --volumes-www-size", "123Mi", flags.Www.Size) + testutils.AssertEquals(t, "Error parsing --volumes-www-class", "wwwclass", flags.Www.Class) + testutils.AssertEquals(t, "Error parsing --volumes-cache-size", "789Gi", flags.Cache.Size) + testutils.AssertEquals(t, "Error parsing --volumes-cache-class", "cacheclass", flags.Cache.Class) +} + // ImageFlagsTestArgs is the expected values for AssertImageFlag. var ImageFlagsTestArgs = []string{ "--image", "path/to/image", @@ -58,7 +80,7 @@ func AssertImageFlag(t *testing.T, flags *types.ImageFlags) { testutils.AssertEquals(t, "Error parsing --pullPolicy", "never", flags.PullPolicy) } -// DBUpdateImageFlagTestArgs is the expected values for AssertDbUpgradeImageFlag. +// DBUpdateImageFlagTestArgs is the expected values for AssertDBUpgradeImageFlag. var DBUpdateImageFlagTestArgs = []string{ "--dbupgrade-image", "dbupgradeimg", "--dbupgrade-tag", "dbupgradetag", diff --git a/shared/testutils/flagstests/mgradm_install.go b/shared/testutils/flagstests/mgradm_install.go index 7cd401223..edfb76f47 100644 --- a/shared/testutils/flagstests/mgradm_install.go +++ b/shared/testutils/flagstests/mgradm_install.go @@ -7,7 +7,7 @@ package flagstests import ( "testing" - "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared/testutils" ) @@ -58,42 +58,47 @@ var InstallFlagsTestArgs = func() []string { } // AssertInstallFlags checks that all the install flags are parsed correctly. -func AssertInstallFlags(t *testing.T, flags *shared.InstallFlags) { - testutils.AssertEquals(t, "Error parsing --tz", "CEST", flags.TZ) - testutils.AssertEquals(t, "Error parsing --email", "admin@foo.bar", flags.Email) - testutils.AssertEquals(t, "Error parsing --emailfrom", "sender@foo.bar", flags.EmailFrom) - testutils.AssertEquals(t, "Error parsing --issParent", "parent.iss.com", flags.IssParent) - testutils.AssertEquals(t, "Error parsing --db-user", "dbuser", flags.DB.User) - testutils.AssertEquals(t, "Error parsing --db-password", "dbpass", flags.DB.Password) - testutils.AssertEquals(t, "Error parsing --db-name", "dbname", flags.DB.Name) - testutils.AssertEquals(t, "Error parsing --db-host", "dbhost", flags.DB.Host) - testutils.AssertEquals(t, "Error parsing --db-port", 1234, flags.DB.Port) - testutils.AssertEquals(t, "Error parsing --db-protocol", "dbprot", flags.DB.Protocol) - testutils.AssertEquals(t, "Error parsing --db-admin-user", "dbadmin", flags.DB.Admin.User) - testutils.AssertEquals(t, "Error parsing --db-admin-password", "dbadminpass", flags.DB.Admin.Password) - testutils.AssertEquals(t, "Error parsing --db-provider", "aws", flags.DB.Provider) - testutils.AssertEquals(t, "Error parsing --tftp", false, flags.Tftp) - testutils.AssertEquals(t, "Error parsing --reportdb-user", "reportdbuser", flags.ReportDB.User) - testutils.AssertEquals(t, "Error parsing --reportdb-password", "reportdbpass", flags.ReportDB.Password) - testutils.AssertEquals(t, "Error parsing --reportdb-name", "reportdbname", flags.ReportDB.Name) - testutils.AssertEquals(t, "Error parsing --reportdb-host", "reportdbhost", flags.ReportDB.Host) - testutils.AssertEquals(t, "Error parsing --reportdb-port", 5678, flags.ReportDB.Port) - AssertSSLGenerationFlags(t, &flags.SSL.SSLCertGenerationFlags) - testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.SSL.Password) +func AssertInstallFlags(t *testing.T, flags *utils.ServerFlags) { + testutils.AssertEquals(t, "Error parsing --tz", "CEST", flags.Installation.TZ) + testutils.AssertEquals(t, "Error parsing --email", "admin@foo.bar", flags.Installation.Email) + testutils.AssertEquals(t, "Error parsing --emailfrom", "sender@foo.bar", flags.Installation.EmailFrom) + testutils.AssertEquals(t, "Error parsing --issParent", "parent.iss.com", flags.Installation.IssParent) + testutils.AssertEquals(t, "Error parsing --db-user", "dbuser", flags.Installation.DB.User) + testutils.AssertEquals(t, "Error parsing --db-password", "dbpass", flags.Installation.DB.Password) + testutils.AssertEquals(t, "Error parsing --db-name", "dbname", flags.Installation.DB.Name) + testutils.AssertEquals(t, "Error parsing --db-host", "dbhost", flags.Installation.DB.Host) + testutils.AssertEquals(t, "Error parsing --db-port", 1234, flags.Installation.DB.Port) + testutils.AssertEquals(t, "Error parsing --db-protocol", "dbprot", flags.Installation.DB.Protocol) + testutils.AssertEquals(t, "Error parsing --db-admin-user", "dbadmin", flags.Installation.DB.Admin.User) + testutils.AssertEquals(t, "Error parsing --db-admin-password", "dbadminpass", flags.Installation.DB.Admin.Password) + testutils.AssertEquals(t, "Error parsing --db-provider", "aws", flags.Installation.DB.Provider) + testutils.AssertEquals(t, "Error parsing --tftp", false, flags.Installation.Tftp) + testutils.AssertEquals(t, "Error parsing --reportdb-user", "reportdbuser", flags.Installation.ReportDB.User) + testutils.AssertEquals(t, "Error parsing --reportdb-password", "reportdbpass", flags.Installation.ReportDB.Password) + testutils.AssertEquals(t, "Error parsing --reportdb-name", "reportdbname", flags.Installation.ReportDB.Name) + testutils.AssertEquals(t, "Error parsing --reportdb-host", "reportdbhost", flags.Installation.ReportDB.Host) + testutils.AssertEquals(t, "Error parsing --reportdb-port", 5678, flags.Installation.ReportDB.Port) + testutils.AssertEquals(t, "Error parsing --ssl-cname", []string{"cname1", "cname2"}, flags.Installation.SSL.Cnames) + testutils.AssertEquals(t, "Error parsing --ssl-country", "OS", flags.Installation.SSL.Country) + testutils.AssertEquals(t, "Error parsing --ssl-state", "sslstate", flags.Installation.SSL.State) + testutils.AssertEquals(t, "Error parsing --ssl-city", "sslcity", flags.Installation.SSL.City) + testutils.AssertEquals(t, "Error parsing --ssl-org", "sslorg", flags.Installation.SSL.Org) + testutils.AssertEquals(t, "Error parsing --ssl-ou", "sslou", flags.Installation.SSL.OU) + testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.Installation.SSL.Password) testutils.AssertEquals(t, "Error parsing --ssl-ca-intermediate", - []string{"path/inter1.crt", "path/inter2.crt"}, flags.SSL.Ca.Intermediate, + []string{"path/inter1.crt", "path/inter2.crt"}, flags.Installation.SSL.Ca.Intermediate, ) - testutils.AssertEquals(t, "Error parsing --ssl-ca-root", "path/root.crt", flags.SSL.Ca.Root) - testutils.AssertEquals(t, "Error parsing --ssl-server-cert", "path/srv.crt", flags.SSL.Server.Cert) - testutils.AssertEquals(t, "Error parsing --ssl-server-key", "path/srv.key", flags.SSL.Server.Key) - testutils.AssertTrue(t, "Error parsing --debug-java", flags.Debug.Java) - testutils.AssertEquals(t, "Error parsing --admin-login", "adminuser", flags.Admin.Login) - testutils.AssertEquals(t, "Error parsing --admin-password", "adminpass", flags.Admin.Password) - testutils.AssertEquals(t, "Error parsing --admin-firstName", "adminfirst", flags.Admin.FirstName) - testutils.AssertEquals(t, "Error parsing --admin-lastName", "adminlast", flags.Admin.LastName) - testutils.AssertEquals(t, "Error parsing --organization", "someorg", flags.Organization) + testutils.AssertEquals(t, "Error parsing --ssl-ca-root", "path/root.crt", flags.Installation.SSL.Ca.Root) + testutils.AssertEquals(t, "Error parsing --ssl-server-cert", "path/srv.crt", flags.Installation.SSL.Server.Cert) + testutils.AssertEquals(t, "Error parsing --ssl-server-key", "path/srv.key", flags.Installation.SSL.Server.Key) + testutils.AssertTrue(t, "Error parsing --debug-java", flags.Installation.Debug.Java) + testutils.AssertEquals(t, "Error parsing --admin-login", "adminuser", flags.Installation.Admin.Login) + testutils.AssertEquals(t, "Error parsing --admin-password", "adminpass", flags.Installation.Admin.Password) + testutils.AssertEquals(t, "Error parsing --admin-firstName", "adminfirst", flags.Installation.Admin.FirstName) + testutils.AssertEquals(t, "Error parsing --admin-lastName", "adminlast", flags.Installation.Admin.LastName) + testutils.AssertEquals(t, "Error parsing --organization", "someorg", flags.Installation.Organization) AssertMirrorFlag(t, flags.Mirror) - AssertSCCFlag(t, &flags.SCC) + AssertSCCFlag(t, &flags.Installation.SCC) AssertImageFlag(t, &flags.Image) AssertCocoFlag(t, &flags.Coco) AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) diff --git a/shared/types/deployment.go b/shared/types/deployment.go index c9563168e..164725d21 100644 --- a/shared/types/deployment.go +++ b/shared/types/deployment.go @@ -7,6 +7,8 @@ package types type VolumeMount struct { MountPath string `json:"mountPath,omitempty"` Name string `json:"name,omitempty"` + Size string `json:"size,omitempty"` + Class string `json:"class,omitempty"` } // Container type used for mapping pod definition structure. diff --git a/shared/types/networks.go b/shared/types/networks.go index 30e895e81..b94f18ab6 100644 --- a/shared/types/networks.go +++ b/shared/types/networks.go @@ -6,6 +6,7 @@ package types // PortMap describes a port. type PortMap struct { + Service string Name string Exposed int Port int diff --git a/shared/types/ssl.go b/shared/types/ssl.go index 5daf2bef8..f80b18371 100644 --- a/shared/types/ssl.go +++ b/shared/types/ssl.go @@ -20,6 +20,8 @@ type SSLCertGenerationFlags struct { type CaChain struct { Root string Intermediate []string + // Key is the CA key file in the case of a migration of a self-generate CA. + Key string } // SSLPair is a type for SSL Cert and Key. diff --git a/shared/utils/exec.go b/shared/utils/exec.go index 574cfc336..bed607f40 100644 --- a/shared/utils/exec.go +++ b/shared/utils/exec.go @@ -5,6 +5,8 @@ package utils import ( + "bytes" + "errors" "fmt" "os/exec" "strings" @@ -64,11 +66,18 @@ func RunCmdOutput(logLevel zerolog.Level, command string, args ...string) ([]byt s.Start() // Start the spinner } localLogger.Debug().Msgf("Running: %s %s", command, strings.Join(args, " ")) - output, err := exec.Command(command, args...).Output() + cmd := exec.Command(command, args...) + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + output, err := cmd.Output() if logLevel != zerolog.Disabled { s.Stop() } localLogger.Trace().Msgf("Command output: %s, error: %s", output, err) + message := strings.TrimSpace(errBuf.String()) + if message != "" { + err = errors.New(message) + } return output, err } diff --git a/shared/utils/inspector.go b/shared/utils/inspector.go index 5e4de9da9..78b21599e 100644 --- a/shared/utils/inspector.go +++ b/shared/utils/inspector.go @@ -8,6 +8,7 @@ import ( "bytes" "os" "path" + "strings" "github.com/rs/zerolog/log" "github.com/spf13/viper" @@ -46,6 +47,21 @@ type BaseInspector struct { Values []types.InspectData } +// GenerateScriptString creates the inspector script and returns it as a string. +func (i *BaseInspector) GenerateScriptString() (string, error) { + data := templates.InspectTemplateData{ + Param: i.Values, + OutputFile: i.GetDataPath(), + } + + scriptBuilder := new(strings.Builder) + if err := data.Render(scriptBuilder); err != nil { + return "", err + } + + return scriptBuilder.String(), nil +} + // GenerateScript is a common implementation for all inspectors. func (i *BaseInspector) GenerateScript() error { log.Debug().Msgf("Generating inspect script in %s", i.GetScriptPath()) @@ -83,6 +99,13 @@ func ReadInspectData[T any](dataFile string) (*T, error) { return nil, Errorf(err, L("cannot read file %s"), dataFile) } + return ReadInspectDataString[T](data) +} + +// ReadInspectDataString returns an unmarshalled object of type T from the data as a string. +// +// This function is most likely to be used for the implementation of the inspectors, but can also be used directly. +func ReadInspectDataString[T any](data []byte) (*T, error) { viper.SetConfigType("env") if err := viper.MergeConfig(bytes.NewBuffer(data)); err != nil { return nil, Errorf(err, L("cannot read config")) diff --git a/shared/utils/ports.go b/shared/utils/ports.go index 6cd7e7402..01b2e16a5 100644 --- a/shared/utils/ports.go +++ b/shared/utils/ports.go @@ -4,47 +4,100 @@ package utils -import "github.com/uyuni-project/uyuni-tools/shared/types" +import ( + "github.com/uyuni-project/uyuni-tools/shared/types" +) + +const ( + // WebServiceName is the name of the server web service. + WebServiceName = "web" + // SaltServiceName is the name of the server salt service. + SaltServiceName = "salt" + // CobblerServiceName is the name of the server cobbler service. + CobblerServiceName = "cobbler" + // ReportdbServiceName is the name of the server report database service. + ReportdbServiceName = "reportdb" + // DBServiceName is the name of the server internal database service. + DBServiceName = "db" + // TaskoServiceName is the name of the server taskomatic service. + TaskoServiceName = "taskomatic" + // TftpServiceName is the name of the server tftp service. + TftpServiceName = "tftp" + // TomcatServiceName is the name of the server tomcat service. + TomcatServiceName = "tomcat" + // SearchServiceName is the name of the server search service. + SearchServiceName = "search" + + // HubAPIServiceName is the name of the server hub API service. + HubAPIServiceName = "hub-api" + + // ProxyTCPServiceName is the name of the proxy TCP service. + ProxyTCPServiceName = "uyuni-proxy-tcp" + + // ProxyUDPServiceName is the name of the proxy UDP service. + ProxyUDPServiceName = "uyuni-proxy-udp" +) // NewPortMap is a constructor for PortMap type. -func NewPortMap(name string, exposed int, port int) types.PortMap { +func NewPortMap(service string, name string, exposed int, port int) types.PortMap { return types.PortMap{ + Service: service, Name: name, Exposed: exposed, Port: port, } } -// TCPPorts are the tcp ports required by the server -// The port names should be less than 15 characters long and lowercased for traefik to eat them. -var TCPPorts = []types.PortMap{ - NewPortMap("postgres", 5432, 5432), - NewPortMap("salt-publish", 4505, 4505), - NewPortMap("salt-request", 4506, 4506), - NewPortMap("cobbler", 25151, 25151), - NewPortMap("psql-mtrx", 9187, 9187), - NewPortMap("tasko-jmx-mtrx", 5556, 5556), - NewPortMap("tomcat-jmx-mtrx", 5557, 5557), - NewPortMap("tasko-mtrx", 9800, 9800), +// WebPorts is the list of ports for the server web service. +var WebPorts = []types.PortMap{ + NewPortMap(WebServiceName, "http", 80, 80), } -// TCPPodmanPorts are the tcp ports required by the server on podman. -var TCPPodmanPorts = []types.PortMap{ - // TODO: Replace Node exporter with cAdvisor - NewPortMap("node-exporter", 9100, 9100), +// ReportDBPorts is the list of ports for the server report db service. +var ReportDBPorts = []types.PortMap{ + NewPortMap(ReportdbServiceName, "pgsql", 5432, 5432), + NewPortMap(ReportdbServiceName, "exporter", 9187, 9187), +} + +// DBPorts is the list of ports for the server internal db service. +var DBPorts = []types.PortMap{ + NewPortMap(DBServiceName, "pgsql", 5432, 5432), + NewPortMap(DBServiceName, "exporter", 9187, 9187), } -// DebugPorts are the port used by dev for debugging applications. -var DebugPorts = []types.PortMap{ - // We can't expose on port 8000 since traefik already uses it - NewPortMap("tomcat-debug", 8003, 8003), - NewPortMap("tasko-debug", 8001, 8001), - NewPortMap("search-debug", 8002, 8002), +// SaltPorts is the list of ports for the server salt service. +var SaltPorts = []types.PortMap{ + NewPortMap(SaltServiceName, "publish", 4505, 4505), + NewPortMap(SaltServiceName, "request", 4506, 4506), } -// UDPPorts are the udp ports required by the server. -var UDPPorts = []types.PortMap{ +// CobblerPorts is the list of ports for the server cobbler service. +var CobblerPorts = []types.PortMap{ + NewPortMap(CobblerServiceName, "cobbler", 25151, 25151), +} + +// TaskoPorts is the list of ports for the server taskomatic service. +var TaskoPorts = []types.PortMap{ + NewPortMap(TaskoServiceName, "jmx", 5556, 5556), + NewPortMap(TaskoServiceName, "mtrx", 9800, 9800), + NewPortMap(TaskoServiceName, "debug", 8001, 8001), +} + +// TomcatPorts is the list of ports for the server tomcat service. +var TomcatPorts = []types.PortMap{ + NewPortMap(TomcatServiceName, "jmx", 5557, 5557), + NewPortMap(TomcatServiceName, "debug", 8003, 8003), +} + +// SearchPorts is the list of ports for the server search service. +var SearchPorts = []types.PortMap{ + NewPortMap(SearchServiceName, "debug", 8002, 8002), +} + +// TftpPorts is the list of ports for the server tftp service. +var TftpPorts = []types.PortMap{ { + Service: TftpServiceName, Name: "tftp", Exposed: 69, Port: 69, @@ -52,20 +105,67 @@ var UDPPorts = []types.PortMap{ }, } +// GetServerPorts returns all the server container ports. +// +// if debug is set to true, the debug ports are added to the list. +func GetServerPorts(debug bool) []types.PortMap { + ports := []types.PortMap{} + ports = appendPorts(ports, debug, WebPorts...) + ports = appendPorts(ports, debug, ReportDBPorts...) + ports = appendPorts(ports, debug, SaltPorts...) + ports = appendPorts(ports, debug, CobblerPorts...) + ports = appendPorts(ports, debug, TaskoPorts...) + ports = appendPorts(ports, debug, TomcatPorts...) + ports = appendPorts(ports, debug, SearchPorts...) + ports = appendPorts(ports, debug, TftpPorts...) + + return ports +} + +func appendPorts(ports []types.PortMap, debug bool, newPorts ...types.PortMap) []types.PortMap { + for _, newPort := range newPorts { + if debug || newPort.Name != "debug" && !debug { + ports = append(ports, newPort) + } + } + return ports +} + +// TCPPodmanPorts are the tcp ports required by the server on podman. +var TCPPodmanPorts = []types.PortMap{ + // TODO: Replace Node exporter with cAdvisor + NewPortMap("tomcat", "node-exporter", 9100, 9100), +} + // HubXmlrpcPorts are the tcp ports required by the Hub XMLRPC API service. var HubXmlrpcPorts = []types.PortMap{ - NewPortMap("hub-xmlrpc", 2830, 2830), + NewPortMap(HubAPIServiceName, "xmlrpc", 2830, 2830), } // ProxyTCPPorts are the tcp ports required by the proxy. var ProxyTCPPorts = []types.PortMap{ - NewPortMap("ssh", 8022, 22), - NewPortMap("salt-publish", 4505, 4505), - NewPortMap("salt-request", 4506, 4506), + NewPortMap(ProxyTCPServiceName, "ssh", 8022, 22), + NewPortMap(ProxyTCPServiceName, "publish", 4505, 4505), + NewPortMap(ProxyTCPServiceName, "request", 4506, 4506), } // ProxyPodmanPorts are the http/s ports required by the proxy. var ProxyPodmanPorts = []types.PortMap{ - NewPortMap("https", 443, 443), - NewPortMap("http", 80, 80), + NewPortMap(ProxyTCPServiceName, "https", 443, 443), + NewPortMap(ProxyTCPServiceName, "http", 80, 80), +} + +// GetProxyPorts returns all the proxy container ports. +func GetProxyPorts() []types.PortMap { + ports := []types.PortMap{} + ports = appendPorts(ports, false, ProxyTCPPorts...) + ports = appendPorts(ports, false, types.PortMap{ + Service: ProxyUDPServiceName, + Name: "tftp", + Exposed: 69, + Port: 69, + Protocol: "udp", + }) + + return ports } diff --git a/shared/utils/ports_test.go b/shared/utils/ports_test.go new file mode 100644 index 000000000..c5f1af20f --- /dev/null +++ b/shared/utils/ports_test.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "testing" + + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestGetServerPorts(t *testing.T) { + allPorts := len(WebPorts) + len(ReportDBPorts) + len(SaltPorts) + len(CobblerPorts) + + len(TaskoPorts) + len(TomcatPorts) + len(SearchPorts) + len(TftpPorts) + + ports := GetServerPorts(false) + testutils.AssertEquals(t, "Wrong number of ports without debug ones", allPorts-3, len(ports)) + + ports = GetServerPorts(true) + testutils.AssertEquals(t, "Wrong number of ports with debug ones", allPorts, len(ports)) +} diff --git a/shared/utils/utils.go b/shared/utils/utils.go index 968afb255..131286f07 100644 --- a/shared/utils/utils.go +++ b/shared/utils/utils.go @@ -46,6 +46,7 @@ type InspectResult struct { CommonInspectData `mapstructure:",squash"` Timezone string HasHubXmlrpcAPI bool `mapstructure:"has_hubxmlrpc"` + Debug bool `mapstructure:"debug"` } func checkValueSize(value string, min int, max int) bool { @@ -257,7 +258,7 @@ func GetLocalTimezone() string { if err != nil { log.Fatal().Err(err).Msgf(L("Failed to run %s"), "timedatectl show --value -p Timezone") } - return string(out) + return strings.TrimSpace(string(out)) } // IsEmptyDirectory return true if a given directory is empty. diff --git a/shared/utils/utils_test.go b/shared/utils/utils_test.go index bc21ece86..b7707275c 100644 --- a/shared/utils/utils_test.go +++ b/shared/utils/utils_test.go @@ -387,3 +387,9 @@ func TestSaveBinaryData(t *testing.T) { fmt.Sprintf("%v", data), fmt.Sprintf("%v", storedData), ) } + +func TestCompareVersion(t *testing.T) { + testutils.AssertTrue(t, "2024.07 is not inferior to 2024.13", CompareVersion("2024.07", "2024.13") < 0) + testutils.AssertTrue(t, "2024.13 is not superior to 2024.07", CompareVersion("2024.13", "2024.07") > 0) + testutils.AssertTrue(t, "2024.13 is not equal to 2024.13", CompareVersion("2024.13", "2024.13") == 0) +} diff --git a/shared/utils/volumes.go b/shared/utils/volumes.go index 4181a8bda..84a2a7993 100644 --- a/shared/utils/volumes.go +++ b/shared/utils/volumes.go @@ -6,98 +6,58 @@ package utils import "github.com/uyuni-project/uyuni-tools/shared/types" +// EtcRhnVolumeMount defines the /etc/rhn volume mount. +var EtcRhnVolumeMount = types.VolumeMount{MountPath: "/etc/rhn", Name: "etc-rhn", Size: "1Mi"} + +// VarPgsqlVolumeMount defines the /var/lib/pgsql volume mount. +var VarPgsqlVolumeMount = types.VolumeMount{MountPath: "/var/lib/pgsql", Name: "var-pgsql", Size: "50Gi"} + +// RootVolumeMount defines the /root volume mount. +var RootVolumeMount = types.VolumeMount{MountPath: "/root", Name: "root", Size: "1Mi"} + // PgsqlRequiredVolumeMounts represents volumes mount used by PostgreSQL. var PgsqlRequiredVolumeMounts = []types.VolumeMount{ - {MountPath: "/etc/pki/tls", Name: "etc-tls"}, - {MountPath: "/var/lib/pgsql", Name: "var-pgsql"}, - {MountPath: "/etc/rhn", Name: "etc-rhn"}, + {MountPath: "/etc/pki/tls", Name: "etc-tls", Size: "1Mi"}, + VarPgsqlVolumeMount, + EtcRhnVolumeMount, {MountPath: "/etc/pki/spacewalk-tls", Name: "tls-key"}, } -// PgsqlRequiredVolumes represents volumes used by PostgreSQL. -var PgsqlRequiredVolumes = []types.Volume{ - {Name: "etc-tls", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-tls"}}, - {Name: "var-pgsql", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-pgsql"}}, - {Name: "etc-rhn", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-rhn"}}, - {Name: "tls-key", - Secret: &types.Secret{ - SecretName: "uyuni-cert", Items: []types.SecretItem{ - {Key: "tls.crt", Path: "spacewalk.crt"}, - {Key: "tls.key", Path: "spacewalk.key"}, - }, - }, - }, +// etcServerVolumeMounts represents volumes mounted in /etc folder. +var etcServerVolumeMounts = []types.VolumeMount{ + {MountPath: "/etc/apache2", Name: "etc-apache2", Size: "1Mi"}, + {MountPath: "/etc/systemd/system/multi-user.target.wants", Name: "etc-systemd-multi", Size: "1Mi"}, + {MountPath: "/etc/systemd/system/sockets.target.wants", Name: "etc-systemd-sockets", Size: "1Mi"}, + {MountPath: "/etc/salt", Name: "etc-salt", Size: "1Mi"}, + {MountPath: "/etc/tomcat", Name: "etc-tomcat", Size: "1Mi"}, + {MountPath: "/etc/cobbler", Name: "etc-cobbler", Size: "1Mi"}, + {MountPath: "/etc/sysconfig", Name: "etc-sysconfig", Size: "20Mi"}, + {MountPath: "/etc/postfix", Name: "etc-postfix", Size: "1Mi"}, + {MountPath: "/etc/sssd", Name: "etc-sssd", Size: "1Mi"}, } -// EtcServerVolumeMounts represents volumes mounted in /etc folder. -var EtcServerVolumeMounts = []types.VolumeMount{ - {MountPath: "/etc/apache2", Name: "etc-apache2"}, - {MountPath: "/etc/systemd/system/multi-user.target.wants", Name: "etc-systemd-multi"}, - {MountPath: "/etc/systemd/system/sockets.target.wants", Name: "etc-systemd-sockets"}, - {MountPath: "/etc/salt", Name: "etc-salt"}, - {MountPath: "/etc/rhn", Name: "etc-rhn"}, - {MountPath: "/etc/tomcat", Name: "etc-tomcat"}, - {MountPath: "/etc/cobbler", Name: "etc-cobbler"}, - {MountPath: "/etc/sysconfig", Name: "etc-sysconfig"}, - {MountPath: "/etc/postfix", Name: "etc-postfix"}, - {MountPath: "/etc/sssd", Name: "etc-sssd"}, -} - -// EtcServerVolumes represents volumes used for configuration. -var EtcServerVolumes = []types.Volume{ - {Name: "etc-apache2", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-apache2"}}, - {Name: "etc-systemd-multi", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-systemd-multi"}}, - {Name: "etc-systemd-sockets", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-systemd-sockets"}}, - {Name: "etc-salt", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-salt"}}, - {Name: "etc-tomcat", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-tomcat"}}, - {Name: "etc-cobbler", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-cobbler"}}, - {Name: "etc-sysconfig", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-sysconfig"}}, - {Name: "etc-postfix", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-postfix"}}, - {Name: "etc-rhn", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-rhn"}}, - {Name: "etc-sssd", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-sssd"}}, -} - -var etcAndPgsqlVolumeMounts = append(PgsqlRequiredVolumeMounts, EtcServerVolumeMounts[:]...) -var etcAndPgsqlVolumes = append(PgsqlRequiredVolumes, EtcServerVolumes[:]...) +var etcAndPgsqlVolumeMounts = append(PgsqlRequiredVolumeMounts, etcServerVolumeMounts[:]...) // ServerVolumeMounts should match the volumes mapping from the container definition in both // the helm chart and the systemctl services definitions. var ServerVolumeMounts = append([]types.VolumeMount{ - {MountPath: "/var/lib/cobbler", Name: "var-cobbler"}, - {MountPath: "/var/lib/rhn/search", Name: "var-search"}, - {MountPath: "/var/lib/salt", Name: "var-salt"}, - {MountPath: "/var/cache", Name: "var-cache"}, - {MountPath: "/var/spacewalk", Name: "var-spacewalk"}, - {MountPath: "/var/log", Name: "var-log"}, - {MountPath: "/srv/salt", Name: "srv-salt"}, - {MountPath: "/srv/www/", Name: "srv-www"}, - {MountPath: "/srv/tftpboot", Name: "srv-tftpboot"}, - {MountPath: "/srv/formula_metadata", Name: "srv-formulametadata"}, - {MountPath: "/srv/pillar", Name: "srv-pillar"}, - {MountPath: "/srv/susemanager", Name: "srv-susemanager"}, - {MountPath: "/srv/spacewalk", Name: "srv-spacewalk"}, - {MountPath: "/root", Name: "root"}, - {MountPath: "/etc/pki/trust/anchors", Name: "ca-cert"}, + {MountPath: "/var/lib/cobbler", Name: "var-cobbler", Size: "10Mi"}, + {MountPath: "/var/lib/rhn/search", Name: "var-search", Size: "10Gi"}, + {MountPath: "/var/lib/salt", Name: "var-salt", Size: "10Mi"}, + {MountPath: "/var/cache", Name: "var-cache", Size: "10Gi"}, + {MountPath: "/var/spacewalk", Name: "var-spacewalk", Size: "100Gi"}, + {MountPath: "/var/log", Name: "var-log", Size: "2Gi"}, + {MountPath: "/srv/salt", Name: "srv-salt", Size: "10Mi"}, + {MountPath: "/srv/www/", Name: "srv-www", Size: "100Gi"}, + {MountPath: "/srv/tftpboot", Name: "srv-tftpboot", Size: "300Mi"}, + {MountPath: "/srv/formula_metadata", Name: "srv-formulametadata", Size: "10Mi"}, + {MountPath: "/srv/pillar", Name: "srv-pillar", Size: "10Mi"}, + {MountPath: "/srv/susemanager", Name: "srv-susemanager", Size: "1Mi"}, + {MountPath: "/srv/spacewalk", Name: "srv-spacewalk", Size: "10Mi"}, + RootVolumeMount, + {MountPath: "/etc/pki/trust/anchors/", Name: "ca-cert"}, }, etcAndPgsqlVolumeMounts[:]...) -// ServerVolumes match the volume with Persistent Volume Claim. -var ServerVolumes = append([]types.Volume{ - {Name: "var-cobbler", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-cobbler"}}, - {Name: "var-salt", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-salt"}}, - {Name: "var-cache", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-cache"}}, - {Name: "var-spacewalk", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-spacewalk"}}, - {Name: "var-log", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-log"}}, - {Name: "srv-salt", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-salt"}}, - {Name: "srv-www", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-www"}}, - {Name: "srv-tftpboot", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-tftpboot"}}, - {Name: "srv-formulametadata", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-formulametadata"}}, - {Name: "srv-pillar", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-pillar"}}, - {Name: "srv-susemanager", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-susemanager"}}, - {Name: "srv-spacewalk", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-spacewalk"}}, - {Name: "root", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "root"}}, - {Name: "ca-cert", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "ca-cert"}}, -}, etcAndPgsqlVolumes[:]...) - // HubXmlrpcVolumeMounts represents volumes used by Hub Xmlrpc container. var HubXmlrpcVolumeMounts = []types.VolumeMount{ {MountPath: "/etc/pki/trust/anchors", Name: "ca-cert"}, diff --git a/uyuni-tools.changes.cbosdo.k8s-refactoring b/uyuni-tools.changes.cbosdo.k8s-refactoring new file mode 100644 index 000000000..847c0c1df --- /dev/null +++ b/uyuni-tools.changes.cbosdo.k8s-refactoring @@ -0,0 +1 @@ +- Stop using a helm chart to install the server diff --git a/uyuni-tools.changes.nadvornik.hub-xmlrpc2 b/uyuni-tools.changes.nadvornik.hub-xmlrpc2 new file mode 100644 index 000000000..7202a3c2c --- /dev/null +++ b/uyuni-tools.changes.nadvornik.hub-xmlrpc2 @@ -0,0 +1 @@ +- Handle Hub XML-RPC during migration and upgrade and add Kubernetes support diff --git a/uyuni-tools.spec b/uyuni-tools.spec index 0a07f5d67..3d4b1bcfa 100644 --- a/uyuni-tools.spec +++ b/uyuni-tools.spec @@ -291,6 +291,13 @@ Fish command line completion support for %{name_ctl}. tar -zxf %{SOURCE1} %build +%ifarch i386 +%if 0%{?debian} +# Disable CGO build for debian 32 bits to avoid cross-compilation +export CGO_ENABLED=0 +%endif +%endif + export GOFLAGS=-mod=vendor mkdir -p bin UTILS_PATH="%{provider_prefix}/shared/utils"