diff --git a/.github/workflows/backend.yaml b/.github/workflows/backend.yaml index c1f23eb91d..7a0216af8e 100644 --- a/.github/workflows/backend.yaml +++ b/.github/workflows/backend.yaml @@ -155,7 +155,7 @@ jobs: strategy: fail-fast: true matrix: - flink: [ '1.14', '1.15', '1.16', '1.17', '1.18', '1.19', '1.20' ] + flink: [ '1.15', '1.16', '1.17', '1.18', '1.19', '1.20' ] runs-on: ubuntu-latest services: registry: @@ -165,6 +165,9 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 + - name: Init Docker Network + run: | + docker network create -d bridge --subnet 172.28.0.0/16 --gateway 172.28.0.1 dinky_net - name: Download artifact uses: actions/download-artifact@v4 with: @@ -189,14 +192,22 @@ jobs: FLINK_VERSION=${{ matrix.flink }} tags: | localhost:5000/dinky/dinky-test:flink + - name: Build Flink Image + uses: docker/build-push-action@v5 + with: + context: . + file: ./e2e_test/docker-compose-env/FlinkDockerfile + # 是否 docker push + push: true + build-args: | + FLINK_VERSION=${{ matrix.flink }} + tags: | + localhost:5000/dinky/flink:flink - name: Init Env Jar run: | wget -O e2e_test/docker-compose-env/dinky/mysql-connector-java-8.0.30.jar https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.30/mysql-connector-java-8.0.30.jar && wget -O e2e_test/docker-compose-env/flink/flink-shaded-hadoop-2-uber-2.8.3-10.0.jar https://repo1.maven.org/maven2/org/apache/flink/flink-shaded-hadoop-2-uber/2.8.3-10.0/flink-shaded-hadoop-2-uber-2.8.3-10.0.jar && wget -O e2e_test/docker-compose-env/dinky/javax.ws.rs-api-2.1.1.jar https://repo1.maven.org/maven2/javax/ws/rs/javax.ws.rs-api/2.1.1/javax.ws.rs-api-2.1.1.jar - - name: Init Docker Network - run: | - docker network create -d bridge dinky_net - name: Init Run Docker MySQL uses: hoverkraft-tech/compose-action@v2.0.2 with: @@ -209,16 +220,53 @@ jobs: uses: hoverkraft-tech/compose-action@v2.0.2 with: compose-file: ./e2e_test/docker-compose-env/hadoop/docker-compose.yml - - name: Replace Flink docker-compose yml - run: | - export FLINK_VERSION=${{ matrix.flink }} && envsubst < ./e2e_test/docker-compose-env/flink/docker-compose.yml > ./e2e_test/docker-compose-env/flink/docker-compose-${{ matrix.flink }}.yml - name: Init Run Docker Flink uses: hoverkraft-tech/compose-action@v2.0.2 with: - compose-file: ./e2e_test/docker-compose-env/flink/docker-compose-${{ matrix.flink }}.yml - + compose-file: ./e2e_test/docker-compose-env/flink/docker-compose.yml + # k8s env + - name: Init k3s + uses: nolar/setup-k3d-k3s@v1 + with: + version: v1.25.16+k3s4 + k3d-args: -s 1 --network dinky_net --api-port 172.28.0.1:6550 + - name: Get k3s kube config + run: k3d kubeconfig get --all && mkdir ./kube && k3d kubeconfig get --all > ./kube/k3s.yaml && sed -i 's/0.0.0.0/172.28.0.1/g' ./kube/k3s.yaml + - name: Init k8s RBAC and namespace + run: | + kubectl create namespace dinky + kubectl create serviceaccount dinky -n dinky + kubectl create clusterrolebinding flink-role-binding-dinky --clusterrole=edit --serviceaccount=dinky:dinky + - name: Init k3s main images + run: | + docker exec k3d-k3s-default-server-0 crictl pull library/busybox:latest + docker exec k3d-k3s-default-server-0 crictl pull flink:${{ matrix.flink }}-scala_2.12-java8 + docker pull localhost:5000/dinky/flink:flink + docker tag localhost:5000/dinky/flink:flink dinky/flink:flink + docker save -o flink.tar dinky/flink:flink + k3d images import ./flink.tar + rm -rf ./flink.tar + - name: Test k3s host + run: | + curl -k https://172.28.0.1:6550 - name: Cp Flink Jar Deps - run: docker cp dinky:/opt/dinky/ ./dinky-release + run: | + docker cp dinky:/opt/dinky/ ./dinky-release + mv ./dinky-release/jar/dinky-app*.jar e2e_test/docker-compose-env/dinky/dinky-app.jar + - name: Run python http server + run: | + mkdir -p logs + ls e2e_test/docker-compose-env/dinky/ + nohup python -m http.server -d e2e_test/docker-compose-env/dinky/ 9001 > ./logs/python_http.log & - name: Run Docker Python Script run: | - docker run -v ./dinky-release/extends/flink${{ matrix.flink }}:/flink/lib -v ./e2e_test/docker-compose-env/dinky/mysql-connector-java-8.0.30.jar:/flink/lib/mysql-connector-java-8.0.30.jar -v./e2e_test/docker-compose-env/flink/conf:/flink/conf -v ./dinky-release/jar:/dinky/jar -v./e2e_test/tools:/app -w /app --net dinky_net --rm --entrypoint /bin/bash python:3.9 -c 'pip install -r requirements.txt && python main.py dinky:8888' + docker run -v ./e2e_test/tools:/app -w /app -v ./kube:/kube -v ./e2e_test/docker-compose-env/dinky:/dinky/jar -v ./dinky-release/extends/flink${{ matrix.flink }}:/opt/flink/lib -v ./e2e_test/docker-compose-env/dinky/mysql-connector-java-8.0.30.jar:/opt/flink/lib/mysql-connector-java-8.0.30.jar --net dinky_net --rm --entrypoint /bin/bash python:3.9 -c 'pip install -r requirements.txt && python main.py dinky:8888 ${{ matrix.flink }}' + - name: Get k8s pods info and logs + if: ${{ always() }} + run: | + chmod -R 755 ./e2e_test/view_k8s_all_pod_logs.sh + ./e2e_test/view_k8s_all_pod_logs.sh dinky + - name: Get Python HttpServer log + if: ${{ always() }} + run: | + cat ./logs/python_http.log diff --git a/.github/workflows/docker_build.yaml b/.github/workflows/docker_build.yaml index 0333ed3dd1..d1ee09b57e 100644 --- a/.github/workflows/docker_build.yaml +++ b/.github/workflows/docker_build.yaml @@ -23,14 +23,98 @@ on: dinky_version: description: 'dinky version' required: true - docker_space: - description: 'docker space(eg: dinky)' - required: true jobs: - build_releases: - name: build releases + build_front: + name: Build_NPM runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: dorny/paths-filter@v2 + id: filter + with: + filters: | + frontend: + - 'dinky-web/**' + - uses: actions/setup-node@v3 + with: + node-version: 16 + - name: Get npm cache directory + id: npm-cache-dir + run: | + echo "::set-output name=dir::$(npm config get cache)" + - uses: actions/cache@v3 + id: npm-cache # use this to check for `cache-hit` ==> if: steps.npm-cache.outputs.cache-hit != 'true' + with: + path: | + ${{ steps.npm-cache-dir.outputs.dir }} + dinky-web/dist + key: ${{ runner.os }}-node-${{ hashFiles('dinky-web/**/package.json') }} + restore-keys: | + ${{ runner.os }}-node- + - name: Install Dependencies + run: cd dinky-web && npm install --no-audit --progress=false --legacy-peer-deps + - name: Npm Web Build + run: cd dinky-web && npm run build + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: dinky-web + path: ./dinky-web/dist + + + build_release: + name: Build Release + runs-on: ubuntu-latest + strategy: + fail-fast: true + matrix: + flink: [ '1.14', '1.15', '1.16', '1.17', '1.18', '1.19', '1.20' ] + env: + MAVEN_OPTS: -Xmx2G -Xms2G + steps: + - uses: actions/checkout@v3 + # maven编译 + - name: Set up JDK + uses: actions/setup-java@v2 + with: + java-version: 8 + distribution: 'adopt' + - name: Cache local Maven repository + uses: actions/cache@v3 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/flink + key: ${{ runner.os }}-maven-${{ hashFiles('pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - name: Cache local Flink repository + uses: actions/cache@v3 + with: + path: ~/.m2/repository/org/apache/flink + key: ${{ runner.os }}-${{ matrix.flink }}-maven-${{ hashFiles('pom.xml') }} + restore-keys: | + ${{ runner.os }}-${{ matrix.flink }}-maven- + - name: Build and Package + run: | + ./mvnw -B clean install \ + -Dmaven.test.skip=true \ + -Dspotless.check.skip=true \ + -Denforcer.skip=false \ + -Dmaven.javadoc.skip=true \ + -P prod,flink-single-version,flink-${{ matrix.flink }},maven-central \ + --no-snapshot-updates + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: dinky-realease-${{ matrix.flink }} + path: ./build/dinky-release*.tar.gz + + build_image: + name: build image + runs-on: ubuntu-latest + needs: [build_front,build_release] strategy: fail-fast: true matrix: @@ -39,6 +123,18 @@ jobs: # git checkout 代码 - name: Checkout uses: actions/checkout@v4 + - name: Download backed artifact + uses: actions/download-artifact@v4 + with: + name: dinky-realease-${{ matrix.flink }} + path: ./build + - name: Download front artifact + uses: actions/download-artifact@v4 + with: + name: dinky-web + path: ./build/dist + - run: | + tree ./build # 设置 QEMU, 后面 docker buildx 依赖此. - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -54,16 +150,28 @@ jobs: # DOCKERHUB_TOKEN: docker hub => Account Setting => Security 创建. username: ${{ secrets.DOCKER_IO_USER }} password: ${{ secrets.DOCKER_IO_PASS }} + # 登录 aliyun docker hub + - name: Login to Aliyun Docker + uses: docker/login-action@v3 + with: + registry: registry.cn-hangzhou.aliyuncs.com + username: ${{ secrets.DOCKER_ALIYUN_USER }} + password: ${{ secrets.DOCKER_ALIYUN_PASS }} # 构建 Docker 并推送到 Docker hub - name: Build and push id: docker_build uses: docker/build-push-action@v5 with: + platforms: linux/amd64,linux/arm64 file: ./deploy/docker/Dockerfile # 是否 docker push push: true + context: . build-args: | FLINK_VERSION=${{ matrix.flink }} DINKY_VERSION=${{ inputs.dinky_version }} tags: | - ${{inputs.docker_space}}/dinky-standalone-server:${{ inputs.dinky_version }}-flink${{ matrix.flink }} + dinkydocker/dinky-standalone-server:${{ inputs.dinky_version }}-flink${{ matrix.flink }} + registry.cn-hangzhou.aliyuncs.com/dinky/dinky-standalone-server:${{ inputs.dinky_version }}-flink${{ matrix.flink }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile index d51347236c..370b929583 100755 --- a/deploy/docker/Dockerfile +++ b/deploy/docker/Dockerfile @@ -1,52 +1,22 @@ ARG FLINK_VERSION ARG DINKY_VERSION -FROM flink:${FLINK_VERSION}-scala_2.12-java8 as flink-base - -FROM node:18.15.0-alpine3.17 AS ui-build -WORKDIR /build/ - -ENV NODE_OPTIONS=--openssl-legacy-provider -ENV UMI_ENV=production - -# 单独分离 package.json,是为了安装依赖可最大限度利用缓存 -ADD ./dinky-web/package.json /build/package.json -RUN npm install --legacy-peer-deps -ADD ./dinky-web . -RUN npm run build - -FROM maven:3.9-eclipse-temurin-8-alpine AS build -WORKDIR /build/ -ARG FLINK_VERSION -ARG DINKY_VERSION -ENV FLINK_VERSION=${FLINK_VERSION} -ENV DINKY_VERSION=${DINKY_VERSION} - -ADD . . -COPY --from=ui-build /build/dist/ /build/dinky-web/dist/ - -RUN mvn package -Dmaven.test.skip=true -P prod,flink-single-version,flink-${FLINK_VERSION},fast -RUN mkdir release && \ - tar -C release -xvf build/dinky-release-${FLINK_VERSION}-*.tar.gz && \ - mv release/dinky-release-* release/dinky - - -FROM eclipse-temurin:8-jre-jammy - +FROM flink:${FLINK_VERSION}-scala_2.12-java8 +RUN rm -f /opt/flink/lib/flink-table-planner-loader*.jar && cp /opt/flink/opt/flink-python*.jar /opt/flink/lib/ && cp /opt/flink/opt/flink-table-planner*.jar /opt/flink/lib/ 2>/dev/null || : ARG FLINK_VERSION ENV FLINK_VERSION=${FLINK_VERSION} ENV DINKY_HOME=/opt/dinky/ ENV H2_DB=./tmp/db/h2 -WORKDIR /opt/dinky/ USER root -COPY --from=build /build/release/dinky /opt/dinky/ -COPY --from=flink-base /opt/flink/lib/*.jar /opt/dinky/extends/flink${FLINK_VERSION}/flink/ -RUN rm -f /opt/dinky/extends/flink${FLINK_VERSION}/flink/flink-table-planner-loader*.jar +ADD build/dinky-release*.tar.gz /opt +RUN mv /opt/dinky-release* /opt/dinky && ln -s /opt/flink/lib/* /opt/dinky/extends/flink${FLINK_VERSION}/ && mkdir /opt/dinky/config/static +ADD build/dist/ /opt/dinky/config/static + +WORKDIR /opt/dinky/ -COPY --from=flink-base /opt/flink/opt/flink-table-planner*.jar /opt/dinky/extends/flink${FLINK_VERSION}/flink/ RUN mkdir /opt/dinky/customJar && chmod -R 777 /opt/dinky/ && sed -i 's/-Xms512M -Xmx2048M -XX:PermSize=512M/-XX:+UseContainerSupport -XX:InitialRAMPercentage=70.0 -XX:MaxRAMPercentage=70.0/g' ./bin/auto.sh diff --git a/dinky-admin/src/main/java/org/dinky/interceptor/PostgreSQLPrepareInterceptor.java b/dinky-admin/src/main/java/org/dinky/interceptor/PostgreSQLPrepareInterceptor.java index 1ac29a1362..8f22cbdfc7 100644 --- a/dinky-admin/src/main/java/org/dinky/interceptor/PostgreSQLPrepareInterceptor.java +++ b/dinky-admin/src/main/java/org/dinky/interceptor/PostgreSQLPrepareInterceptor.java @@ -44,7 +44,12 @@ public Object intercept(final Invocation invocation) throws Throwable { BoundSql boundSql = statementHandler.getBoundSql(); Field field = boundSql.getClass().getDeclaredField("sql"); field.setAccessible(true); - field.set(boundSql, boundSql.getSql().replace("`", "\"").toLowerCase()); + field.set( + boundSql, + boundSql.getSql() + .replace("`", "\"") + .replace("concat('%', ?, '%')", "concat('%', ?::text, '%')") + .toLowerCase()); return invocation.proceed(); } diff --git a/dinky-admin/src/main/java/org/dinky/service/impl/JobInstanceServiceImpl.java b/dinky-admin/src/main/java/org/dinky/service/impl/JobInstanceServiceImpl.java index f06adb2a21..236942209e 100644 --- a/dinky-admin/src/main/java/org/dinky/service/impl/JobInstanceServiceImpl.java +++ b/dinky-admin/src/main/java/org/dinky/service/impl/JobInstanceServiceImpl.java @@ -41,7 +41,6 @@ import org.dinky.data.model.mapping.ClusterInstanceMapping; import org.dinky.data.result.ProTableResult; import org.dinky.data.vo.task.JobInstanceVo; -import org.dinky.executor.ExecutorConfig; import org.dinky.explainer.lineage.LineageBuilder; import org.dinky.explainer.lineage.LineageResult; import org.dinky.job.FlinkJobTask; @@ -295,7 +294,7 @@ public void refreshJobByTaskIds(Integer... taskIds) { @Override public LineageResult getLineage(Integer id) { History history = getJobInfoDetail(id).getHistory(); - return LineageBuilder.getColumnLineageByLogicalPlan(history.getStatement(), ExecutorConfig.DEFAULT); + return LineageBuilder.getColumnLineageByLogicalPlan(history.getStatement(), history.getConfigJson()); } @Override diff --git a/dinky-admin/src/main/java/org/dinky/service/impl/TaskServiceImpl.java b/dinky-admin/src/main/java/org/dinky/service/impl/TaskServiceImpl.java index 652523a30d..8fa5e3dbe1 100644 --- a/dinky-admin/src/main/java/org/dinky/service/impl/TaskServiceImpl.java +++ b/dinky-admin/src/main/java/org/dinky/service/impl/TaskServiceImpl.java @@ -1101,10 +1101,10 @@ public List getUserTasks(Integer userId) { private Boolean hasTaskOperatePermission(Integer firstLevelOwner, List secondLevelOwners) { boolean isFirstLevelOwner = firstLevelOwner != null && firstLevelOwner == StpUtil.getLoginIdAsInt(); if (TaskOwnerLockStrategyEnum.OWNER.equals( - SystemConfiguration.getInstances().getTaskOwnerLockStrategy())) { + SystemConfiguration.getInstances().getTaskOwnerLockStrategy().getValue())) { return isFirstLevelOwner; } else if (TaskOwnerLockStrategyEnum.OWNER_AND_MAINTAINER.equals( - SystemConfiguration.getInstances().getTaskOwnerLockStrategy())) { + SystemConfiguration.getInstances().getTaskOwnerLockStrategy().getValue())) { return isFirstLevelOwner || (secondLevelOwners != null && secondLevelOwners.contains(StpUtil.getLoginIdAsInt())); } diff --git a/dinky-core/src/main/java/org/dinky/api/FlinkAPI.java b/dinky-core/src/main/java/org/dinky/api/FlinkAPI.java index 912f3f0ddd..4e7fcf0593 100644 --- a/dinky-core/src/main/java/org/dinky/api/FlinkAPI.java +++ b/dinky-core/src/main/java/org/dinky/api/FlinkAPI.java @@ -112,7 +112,7 @@ private String getResult(String route) { } private JsonNode post(String route, String body) { - String url = NetConstant.SLASH + route; + String url = address + NetConstant.SLASH + route; if (!address.startsWith(NetConstant.HTTP) && !address.startsWith(NetConstant.HTTPS)) { url = NetConstant.HTTP + url; } diff --git a/dinky-core/src/main/java/org/dinky/executor/VariableManager.java b/dinky-core/src/main/java/org/dinky/executor/VariableManager.java index 97a3df295f..2f8b6d221c 100644 --- a/dinky-core/src/main/java/org/dinky/executor/VariableManager.java +++ b/dinky-core/src/main/java/org/dinky/executor/VariableManager.java @@ -134,25 +134,21 @@ public Object getVariable(String variableName) { checkArgument( !StringUtils.isNullOrWhitespaceOnly(variableName), "sql variable name or jexl key cannot be null or empty."); - try { - if (variables.containsKey(variableName)) { - return variables.get(variableName); - } - // load expression variable class - if (parseAndMatchExpressionVariable(variableName)) { - return ENGINE.eval(variableName, EngineContextHolder.getEngineContext(), null); - } - return null; - } catch (Exception e) { - String error = format( - "The variable name or jexl key of sql \"${%s}\" does not exist.\n" - + "Please follow the following methods to resolve the problem:\n" - + "1. global variables are enabled ? \n" - + "2. variable is exists ? it`s defined in sql ? or global variable is defined ? \n" - + "3. If it is a custom function variable, please check whether the class is loaded correctly", - variableName); - throw new BusException(error); + if (variables.containsKey(variableName)) { + return variables.get(variableName); + } + // load expression variable class + if (parseAndMatchExpressionVariable(variableName)) { + return ENGINE.eval(variableName, EngineContextHolder.getEngineContext(), null); } + String error = format( + "The variable name or jexl key of sql \"${%s}\" does not exist.\n" + + "Please follow the following methods to resolve the problem:\n" + + "1. global variables are enabled ? \n" + + "2. variable is exists ? it`s defined in sql ? or global variable is defined ? \n" + + "3. If it is a custom function variable, please check whether the class is loaded correctly", + variableName); + throw new BusException(error); } public boolean parseAndMatchExpressionVariable(String variableName) { diff --git a/dinky-core/src/main/java/org/dinky/job/runner/JobPipelineRunner.java b/dinky-core/src/main/java/org/dinky/job/runner/JobPipelineRunner.java index 15cbb8e689..c78c36d2d3 100644 --- a/dinky-core/src/main/java/org/dinky/job/runner/JobPipelineRunner.java +++ b/dinky-core/src/main/java/org/dinky/job/runner/JobPipelineRunner.java @@ -83,7 +83,6 @@ public void run(JobStatement jobStatement) throws Exception { } else { log.error( "Only one pipeline job is executed. The statement has be skipped: " + jobStatement.getStatement()); - return; } } diff --git a/dinky-gateway/src/main/java/org/dinky/gateway/kubernetes/KubernetesGateway.java b/dinky-gateway/src/main/java/org/dinky/gateway/kubernetes/KubernetesGateway.java index 2821583367..47cb9f075c 100644 --- a/dinky-gateway/src/main/java/org/dinky/gateway/kubernetes/KubernetesGateway.java +++ b/dinky-gateway/src/main/java/org/dinky/gateway/kubernetes/KubernetesGateway.java @@ -49,11 +49,11 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import alluxio.shaded.client.org.apache.commons.lang3.StringUtils; import cn.hutool.core.io.FileUtil; import cn.hutool.core.lang.Assert; import cn.hutool.core.text.StrFormatter; import cn.hutool.core.util.ReflectUtil; +import cn.hutool.core.util.StrUtil; import io.fabric8.kubernetes.api.model.Pod; import lombok.Data; import lombok.EqualsAndHashCode; @@ -150,7 +150,7 @@ protected void initConfig() { boolean isValidTaskName(String jobName) { String JOB_NAME_PATTERN = "^[a-z0-9][a-z0-9.-]*[a-z0-9]$"; Pattern pattern = Pattern.compile(JOB_NAME_PATTERN); - if (StringUtils.isBlank(jobName)) { + if (StrUtil.isBlank(jobName)) { return false; } Matcher matcher = pattern.matcher(jobName); diff --git a/dinky-web/src/pages/DataStudio/CenterTabContent/SqlTask/index.tsx b/dinky-web/src/pages/DataStudio/CenterTabContent/SqlTask/index.tsx index 50b3200eec..9a16a0554e 100644 --- a/dinky-web/src/pages/DataStudio/CenterTabContent/SqlTask/index.tsx +++ b/dinky-web/src/pages/DataStudio/CenterTabContent/SqlTask/index.tsx @@ -237,6 +237,7 @@ export const SqlTask = memo((props: FlinkSqlProps & any) => { statement: sqlConvertForm?.initSqlStatement ?? '' }); setOriginStatementValue(sqlConvertForm?.initSqlStatement ?? ''); + updateCenterTab({ ...props.tabData, params: newParams }); if (params?.statement && params?.statement !== sqlConvertForm?.initSqlStatement) { setDiff([ { key: 'statement', server: sqlConvertForm?.initSqlStatement, cache: params.statement } diff --git a/dinky-web/src/pages/RegCenter/Document/constans.ts b/dinky-web/src/pages/RegCenter/Document/constans.ts index bdd66fdc3b..15f36bc33c 100644 --- a/dinky-web/src/pages/RegCenter/Document/constans.ts +++ b/dinky-web/src/pages/RegCenter/Document/constans.ts @@ -17,7 +17,7 @@ * */ -import { l } from "@/utils/intl"; +import { l } from '@/utils/intl'; export const DOCUMENT_CATEGORY_ENUMS = { Variable: { text: 'Variable', value: 'Variable' }, @@ -34,31 +34,46 @@ export const DOCUMENT_CATEGORY_ENUMS = { */ export const DOCUMENT_TYPE_ENUMS = { SQL_TEMPLATE: { text: l('rc.doc.type.codeSnippetOrTemplate'), value: 'SQL_TEMPLATE' }, - FLINK_OPTIONS: { text: l( 'rc.doc.type.flinkParam'), value: 'FLINK_OPTIONS' }, - FUN_UDF: { text: l( 'rc.doc.type.functionOrUDF'), value: 'FUN_UDF' }, - OTHER: { text: l( 'rc.doc.type.other'), value: 'OTHER' } + FLINK_OPTIONS: { text: l('rc.doc.type.flinkParam'), value: 'FLINK_OPTIONS' }, + FUN_UDF: { text: l('rc.doc.type.functionOrUDF'), value: 'FUN_UDF' }, + OTHER: { text: l('rc.doc.type.other'), value: 'OTHER' } }; export const DOCUMENT_FUNCTION_TYPE_ENUMS = { - COMPARE_FUNCTION: { text: l( 'rc.doc.function.type.compare'), value: 'COMPARE_FUNCTION' }, - LOGICAL_FUNCTION: { text: l( 'rc.doc.function.type.logical'), value: 'LOGICAL_FUNCTION' }, - ARITHMETIC_FUNCTIONS: { text: l( 'rc.doc.function.type.arithmetic'), value: 'ARITHMETIC_FUNCTIONS' }, - STRING_FUNCTIONS: { text: l( 'rc.doc.function.type.string'), value: 'STRING_FUNCTIONS' }, - TIME_FUNCTION: { text: l( 'rc.doc.function.type.time'), value: 'TIME_FUNCTION' }, - CONDITIONAL_FUNCTION: { text: l( 'rc.doc.function.type.conditional'), value: 'CONDITIONAL_FUNCTION' }, - TYPE_CONVER_FUNCTION: { text: l( 'rc.doc.function.type.typeConver'), value: 'TYPE_CONVER_FUNCTION' }, - COLLECTION_FUNCTION: { text: l( 'rc.doc.function.type.collection'), value: 'COLLECTION_FUNCTION' }, + COMPARE_FUNCTION: { text: l('rc.doc.function.type.compare'), value: 'COMPARE_FUNCTION' }, + LOGICAL_FUNCTION: { text: l('rc.doc.function.type.logical'), value: 'LOGICAL_FUNCTION' }, + ARITHMETIC_FUNCTIONS: { + text: l('rc.doc.function.type.arithmetic'), + value: 'ARITHMETIC_FUNCTIONS' + }, + STRING_FUNCTIONS: { text: l('rc.doc.function.type.string'), value: 'STRING_FUNCTIONS' }, + TIME_FUNCTION: { text: l('rc.doc.function.type.time'), value: 'TIME_FUNCTION' }, + CONDITIONAL_FUNCTION: { + text: l('rc.doc.function.type.conditional'), + value: 'CONDITIONAL_FUNCTION' + }, + TYPE_CONVER_FUNCTION: { + text: l('rc.doc.function.type.typeConver'), + value: 'TYPE_CONVER_FUNCTION' + }, + COLLECTION_FUNCTION: { text: l('rc.doc.function.type.collection'), value: 'COLLECTION_FUNCTION' }, VALUE_CONSTRUCTION_FUNCTION: { - text: l( 'rc.doc.function.type.valueConstruction'), + text: l('rc.doc.function.type.valueConstruction'), value: 'VALUE_CONSTRUCTION_FUNCTION Construction函数' }, - VALUE_ACCESS_FUNCTION: { text: l( 'rc.doc.function.type.valueAccess'), value: 'VALUE_ACCESS_FUNCTION' }, - GROUP_FUNCTION: { text:l( 'rc.doc.function.type.group'), value: 'GROUP_FUNCTION' }, - HASH_FUNCTION: { text: l( 'rc.doc.function.type.hash'), value: 'HASH_FUNCTION' }, - AGGREGATE_FUNCTION: { text:l( 'rc.doc.function.type.aggregate'), value: 'AGGREGATE_FUNCTION' }, - COLUMN_FUNCTION: { text: l( 'rc.doc.function.type.column'), value: 'COLUMN_FUNCTION' }, - TABLE_AGGREGATE_FUNCTION: { text: l( 'rc.doc.function.type.tableAggregate'), value: 'TABLE_AGGREGATE_FUNCTION' }, - OTHER_FUNCTION: { text: l( 'rc.doc.function.type.other'), value: 'OTHER_FUNCTION' } + VALUE_ACCESS_FUNCTION: { + text: l('rc.doc.function.type.valueAccess'), + value: 'VALUE_ACCESS_FUNCTION' + }, + GROUP_FUNCTION: { text: l('rc.doc.function.type.group'), value: 'GROUP_FUNCTION' }, + HASH_FUNCTION: { text: l('rc.doc.function.type.hash'), value: 'HASH_FUNCTION' }, + AGGREGATE_FUNCTION: { text: l('rc.doc.function.type.aggregate'), value: 'AGGREGATE_FUNCTION' }, + COLUMN_FUNCTION: { text: l('rc.doc.function.type.column'), value: 'COLUMN_FUNCTION' }, + TABLE_AGGREGATE_FUNCTION: { + text: l('rc.doc.function.type.tableAggregate'), + value: 'TABLE_AGGREGATE_FUNCTION' + }, + OTHER_FUNCTION: { text: l('rc.doc.function.type.other'), value: 'OTHER_FUNCTION' } }; /** diff --git a/e2e_test/docker-compose-env/FlinkDockerfile b/e2e_test/docker-compose-env/FlinkDockerfile new file mode 100644 index 0000000000..658b71054e --- /dev/null +++ b/e2e_test/docker-compose-env/FlinkDockerfile @@ -0,0 +1,5 @@ + +ARG FLINK_VERSION + +FROM flink:${FLINK_VERSION}-scala_2.12-java8 as flink-base +RUN rm -f /opt/flink/lib/flink-table-planner-loader*.jar && cp /opt/flink/opt/flink-python*.jar /opt/flink/lib/ && cp /opt/flink/opt/flink-table-planner*.jar /opt/flink/lib/ 2>/dev/null || : diff --git a/e2e_test/docker-compose-env/dinky/docker-compose.yml b/e2e_test/docker-compose-env/dinky/docker-compose.yml index 5de79aa65a..7ac7eef790 100644 --- a/e2e_test/docker-compose-env/dinky/docker-compose.yml +++ b/e2e_test/docker-compose-env/dinky/docker-compose.yml @@ -17,7 +17,7 @@ services: - ./mysql-connector-java-8.0.30.jar:/opt/dinky/lib/mysql-connector-java-8.0.30.jar - ./javax.ws.rs-api-2.1.1.jar:/opt/dinky/lib/javax.ws.rs-api-2.1.1.jar - ../flink/flink-shaded-hadoop-2-uber-2.8.3-10.0.jar:/opt/dinky/lib/flink-shaded-hadoop-2-uber-2.8.3-10.0.jar - - ../flink/conf/log4j-console.properties:/flink/conf/log4j-console.properties - - ../hadoop:/flink/conf + - ../flink/conf/log4j-console.properties:/opt/flink/conf/log4j-console.properties + - ../hadoop:/opt/flink/conf networks: - dinky_net diff --git a/e2e_test/docker-compose-env/flink/docker-compose.yml b/e2e_test/docker-compose-env/flink/docker-compose.yml index cd300ec22a..c04c7b66e6 100644 --- a/e2e_test/docker-compose-env/flink/docker-compose.yml +++ b/e2e_test/docker-compose-env/flink/docker-compose.yml @@ -7,10 +7,14 @@ services: hostname: jobmanager container_name: jobmanager restart: always - image: flink:${FLINK_VERSION}-scala_2.12-java8 + image: localhost:5000/dinky/flink:flink command: jobmanager environment: - HADOOP_CONF_DIR=/opt/flink/conf + - | + FLINK_PROPERTIES= + jobmanager.rpc.address: jobmanager + parallelism.default: 1 volumes: - ./conf:/opt/flink/conf - ./flink-shaded-hadoop-2-uber-2.8.3-10.0.jar:/opt/flink/lib/flink-shaded-hadoop-2-uber-2.8.3-10.0.jar @@ -19,10 +23,15 @@ services: taskmanager: hostname: taskmanager container_name: taskmanager - image: flink:${FLINK_VERSION}-scala_2.12-java8 + image: localhost:5000/dinky/flink:flink command: taskmanager environment: - HADOOP_CONF_DIR=/opt/flink/conf + - | + FLINK_PROPERTIES= + jobmanager.rpc.address: jobmanager + taskmanager.numberOfTaskSlots: 20 + parallelism.default: 1 volumes: - ./conf:/opt/flink/conf - ./flink-shaded-hadoop-2-uber-2.8.3-10.0.jar:/opt/flink/lib/flink-shaded-hadoop-2-uber-2.8.3-10.0.jar diff --git a/e2e_test/docker-compose-env/hadoop/hadoop.env b/e2e_test/docker-compose-env/hadoop/hadoop.env index fe34f877a4..74998c92c1 100644 --- a/e2e_test/docker-compose-env/hadoop/hadoop.env +++ b/e2e_test/docker-compose-env/hadoop/hadoop.env @@ -26,8 +26,8 @@ YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true YARN_CONF_yarn_timeline___service_hostname=historyserver YARN_CONF_mapreduce_map_output_compress=true YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec -YARN_CONF_yarn_nodemanager_resource_memory___mb=16384 -YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8 +YARN_CONF_yarn_nodemanager_resource_memory___mb=163840 +YARN_CONF_yarn_nodemanager_resource_cpu___vcores=80 YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5 YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle @@ -40,4 +40,4 @@ MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m MAPRED_CONF_yarn_app_mapreduce_am_env=HADOOP_MAPRED_HOME=/data/docker-compose/hadoop-3.2.1/ MAPRED_CONF_mapreduce_map_env=HADOOP_MAPRED_HOME=/data/docker-compose/hadoop-3.2.1/ -MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/data/docker-compose/hadoop-3.2.1/ \ No newline at end of file +MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/data/docker-compose/hadoop-3.2.1/ diff --git a/e2e_test/tools/config.py b/e2e_test/tools/config.py index c006cd4d9c..45ca9935b1 100644 --- a/e2e_test/tools/config.py +++ b/e2e_test/tools/config.py @@ -1,9 +1,63 @@ +import sys +from logger import log + +dinky_addr = sys.argv[1] +flink_version = sys.argv[2] +dinky_app_jar = 'dinky-app.jar' + # standalone standalone_address = "jobmanager:8282" # yarn -yarn_flink_lib="/flink/lib" -yarn_flink_conf="/flink/conf" -yarn_hadoop_conf="/flink/conf" -yarn_dinky_app_jar="/dinky/jar" +yarn_flink_lib = "/opt/flink/lib" +yarn_flink_conf = "/opt/flink/conf" +yarn_hadoop_conf = "/opt/flink/conf" +yarn_dinky_app_jar = "/dinky/jar" + +podTemplate=""" +apiVersion: v1 +kind: Pod +metadata: + name: jobmanager-pod-template +spec: + initContainers: + - name: artifacts-fetcher-dinky + image: library/busybox:latest + imagePullPolicy: Never + # Use wget or other tools to get user jars from remote storage + command: [ 'wget', 'http://172.28.0.1:9001/dinky-app.jar', '-O', '/flink-usrlib/dinky-app.jar' ] + volumeMounts: + - mountPath: /flink-usrlib + name: flink-usrlib + - name: artifacts-fetcher-mysql + image: library/busybox:latest + imagePullPolicy: Never + # Use wget or other tools to get user jars from remote storage + command: [ 'wget', 'http://172.28.0.1:9001/mysql-connector-java-8.0.30.jar', '-O', '/flink-usrlib/mysql-connector-java-8.0.30.jar' ] + volumeMounts: + - mountPath: /flink-usrlib + name: flink-usrlib + + containers: + # Do not change the main container name + - name: flink-main-container + resources: + requests: + ephemeral-storage: 2048Mi + limits: + ephemeral-storage: 2048Mi + volumeMounts: + - mountPath: /opt/flink/usrlib + name: flink-usrlib + volumes: + - name: flink-usrlib + emptyDir: { } + +""" +log.info(f""" +==================================================== + all config dinky address: {dinky_addr} + flink version: {flink_version} +==================================================== +""") diff --git a/e2e_test/tools/env.py b/e2e_test/tools/env.py index d68e663a8a..0ce58b4f6c 100644 --- a/e2e_test/tools/env.py +++ b/e2e_test/tools/env.py @@ -1,3 +1,5 @@ +from typing import Optional + from requests import Session import urllib.parse as urlparse from hdfs.client import Client @@ -29,7 +31,20 @@ def addStandaloneCluster(session: Session) -> int: raise Exception(f"Cluster {name} not found") -def addYarnCluster(session: Session) -> int: +def addApplicationCluster(session: Session, params: dict) -> Optional[int]: + name = params['name'] + test_connection_yarn_resp = session.post(url("api/clusterConfiguration/testConnect"), json=params) + assertRespOk(test_connection_yarn_resp, "Test yarn connectivity") + test_connection_yarn_resp = session.put(url("api/clusterConfiguration/saveOrUpdate"), json=params) + assertRespOk(test_connection_yarn_resp, "Add Yarn Application Cluster") + get_app_list = session.get(url(f"api/clusterConfiguration/list?keyword={name}"), json=params) + assertRespOk(get_app_list, "Get Yarn Application Cluster") + for data in get_app_list.json()["data"]: + if data["name"] == name: + return data['id'] + + +def addYarnCluster(session: Session) -> Optional[int]: client = Client("http://namenode:9870") flink_lib_path = yarn_flink_lib client.makedirs(flink_lib_path) @@ -38,14 +53,9 @@ def addYarnCluster(session: Session) -> int: for file in files: filepath = os.path.join(root, file) client.upload(flink_lib_path + "/" + file, filepath) - jar_path = yarn_dinky_app_jar - client.makedirs(jar_path) - for root, dirs, files in os.walk(jar_path): - for file in files: - if file.endswith(".jar") and file.__contains__("dinky-app"): - filepath = os.path.join(root, file) - jar_path = filepath - client.upload(jar_path, filepath) + client.makedirs(yarn_dinky_app_jar) + dinky_app_hdfs_jar_path = yarn_dinky_app_jar + "/" + dinky_app_jar + client.upload(dinky_app_hdfs_jar_path, dinky_app_hdfs_jar_path) name = "yarn-test" params = { "type": "yarn-application", @@ -67,17 +77,63 @@ def addYarnCluster(session: Session) -> int: } }, "appConfig": { - "userJarPath": "hdfs://" + jar_path + "userJarPath": "hdfs://" + dinky_app_hdfs_jar_path } } } log.info(f"Adding yarn application cluster, parameters:{params}") - test_connection_yarn_resp = session.post(url("api/clusterConfiguration/testConnect"), json=params) - assertRespOk(test_connection_yarn_resp, "Test yarn connectivity") - test_connection_yarn_resp = session.put(url("api/clusterConfiguration/saveOrUpdate"), json=params) - assertRespOk(test_connection_yarn_resp, "Add Yarn Application Cluster") - get_app_list = session.get(url(f"api/clusterConfiguration/list?keyword={name}"), json=params) - assertRespOk(get_app_list, "Get Yarn Application Cluster") - for data in get_app_list.json()["data"]: - if data["name"] == name: - return data['id'] + return addApplicationCluster(session, params) + + +def addK8sNativeCluster(session: Session) -> Optional[int]: + with open('/kube/k3s.yaml', 'r') as f: + kube_config_content = f.read() + name = "k8s-native-test" + params = { + "type": "kubernetes-application", + "name": name, + "enabled": True, + "config": { + "kubernetesConfig": { + "configuration": { + "kubernetes.rest-service.exposed.type": "NodePort", + "kubernetes.namespace": "dinky", + "kubernetes.service-account": "dinky", + "kubernetes.container.image": f"dinky/flink:flink" + }, + "ingressConfig": { + "kubernetes.ingress.enabled": False + }, + "kubeConfig": kube_config_content, + "podTemplate": podTemplate, + }, + "clusterConfig": { + "flinkConfigPath": "/opt/flink/conf" + }, + "flinkConfig": { + "flinkConfigList": [ + { + "name": "user.artifacts.raw-http-enabled", + "value": "true" + }, + { + "name": "kubernetes.flink.conf.dir", + "value": "/opt/flink/conf" + }, + { + "name": "kubernetes.container.image.pull-policy", + "value": "Never" + } + ], + "configuration": { + "jobmanager.memory.process.size": "1024mb", + "taskmanager.memory.process.size": "1024mb" + } + }, + "appConfig": { + "userJarPath": "local:/opt/flink/usrlib/" + dinky_app_jar, + } + } + } + log.info(f"Adding k8s native application cluster, parameters:{params}") + return addApplicationCluster(session, params) diff --git a/e2e_test/tools/httpUtil.py b/e2e_test/tools/httpUtil.py index d491a3a462..a4c1f60203 100644 --- a/e2e_test/tools/httpUtil.py +++ b/e2e_test/tools/httpUtil.py @@ -1,10 +1,8 @@ -import sys -from logger import log -from requests import Response from json import JSONDecodeError -dinky_addr = sys.argv[1] -log.info(f"The address of the current request:{dinky_addr}") +from requests import Response + +from config import dinky_addr def url(path: str): diff --git a/e2e_test/tools/main.py b/e2e_test/tools/main.py index 5f18e656d7..80c5f3a2f4 100644 --- a/e2e_test/tools/main.py +++ b/e2e_test/tools/main.py @@ -1,6 +1,6 @@ import requests -from env import addStandaloneCluster, addYarnCluster +from env import addStandaloneCluster, addYarnCluster, addK8sNativeCluster from login import login from task import addCatalogue, Task @@ -9,7 +9,8 @@ login(session) clusterId = addStandaloneCluster(session) yarn_cluster_id = addYarnCluster(session) + k8s_native_cluster_id = addK8sNativeCluster(session) catalogue = addCatalogue(session, "flink-sql-task") sql = "DROP TABLE IF EXISTS source_table3;\r\nCREATE TABLE IF NOT EXISTS source_table3(\r\n--订单id\r\n`order_id` BIGINT,\r\n--产品\r\n\r\n`product` BIGINT,\r\n--金额\r\n`amount` BIGINT,\r\n\r\n--支付时间\r\n`order_time` as CAST(CURRENT_TIMESTAMP AS TIMESTAMP(3)), -- `在这里插入代码片`\r\n--WATERMARK\r\nWATERMARK FOR order_time AS order_time - INTERVAL '2' SECOND\r\n) WITH(\r\n'connector' = 'datagen',\r\n 'rows-per-second' = '1',\r\n 'fields.order_id.min' = '1',\r\n 'fields.order_id.max' = '2',\r\n 'fields.amount.min' = '1',\r\n 'fields.amount.max' = '10',\r\n 'fields.product.min' = '1',\r\n 'fields.product.max' = '2'\r\n);\r\n\r\n-- SELECT * FROM source_table3 LIMIT 10;\r\n\r\nDROP TABLE IF EXISTS sink_table5;\r\nCREATE TABLE IF NOT EXISTS sink_table5(\r\n--产品\r\n`product` BIGINT,\r\n--金额\r\n`amount` BIGINT,\r\n--支付时间\r\n`order_time` TIMESTAMP(3),\r\n--1分钟时间聚合总数\r\n`one_minute_sum` BIGINT\r\n) WITH(\r\n'connector'='print'\r\n);\r\n\r\nINSERT INTO sink_table5\r\nSELECT\r\nproduct,\r\namount,\r\norder_time,\r\nSUM(amount) OVER(\r\nPARTITION BY product\r\nORDER BY order_time\r\n-- 标识统计范围是1个 product 的最近 1 分钟的数据\r\nRANGE BETWEEN INTERVAL '1' MINUTE PRECEDING AND CURRENT ROW\r\n) as one_minute_sum\r\nFROM source_table3;" - flink_sql_datagen_test = Task(session, clusterId, yarn_cluster_id, catalogue.id, "flink-sql-datagen-test", sql) + flink_sql_datagen_test = Task(session, clusterId, yarn_cluster_id,k8s_native_cluster_id, catalogue.id, "flink-sql-datagen-test", sql) flink_sql_datagen_test.runFlinkTask(wait_time=10, is_async=True) diff --git a/e2e_test/tools/task.py b/e2e_test/tools/task.py index 07b52db7b3..b58d063030 100644 --- a/e2e_test/tools/task.py +++ b/e2e_test/tools/task.py @@ -19,18 +19,22 @@ class FlinkRunMode(Enum): LOCAL = "local" STANDALONE = "standalone" YARN_APPLICATION = "yarn-application" + KUBERNETES_APPLICATION = "kubernetes-application" @staticmethod def getAllMode(): - return [FlinkRunMode.LOCAL, FlinkRunMode.STANDALONE, FlinkRunMode.YARN_APPLICATION] + # todo 这里暂时剔除 local,因为并发场景下,会出现接口卡住问题 + return [FlinkRunMode.STANDALONE, FlinkRunMode.YARN_APPLICATION, FlinkRunMode.KUBERNETES_APPLICATION] class Task: - def __init__(self, session: requests.Session, cluster_id: int, yarn_cluster_id: int, parent_id: int, name: str, + def __init__(self, session: requests.Session, cluster_id: int, yarn_cluster_id: int, k8s_native_cluster_id: int, + parent_id: int, name: str, statement): self.session = session self.cluster_id = cluster_id self.yarn_cluster_id = yarn_cluster_id + self.k8s_native_cluster_id = k8s_native_cluster_id self.parent_id = parent_id self.name = name self.statement = statement @@ -69,6 +73,8 @@ def addTask(self, name: str, parent_id: int = 0, dialect: str = "FlinkSql", params["task"]["clusterId"] = self.cluster_id elif run_model == FlinkRunMode.YARN_APPLICATION: params["task"]["clusterConfigurationId"] = self.yarn_cluster_id + elif run_model == FlinkRunMode.KUBERNETES_APPLICATION: + params["task"]["clusterConfigurationId"] = self.k8s_native_cluster_id add_parent_dir_resp = session.put(url("api/catalogue/saveOrUpdateCatalogueAndTask"), json=params) assertRespOk(add_parent_dir_resp, "Create a task") get_all_tasks_resp = session.post(url("api/catalogue/getCatalogueTreeData"), json={ @@ -102,7 +108,7 @@ def runTask(self, taskId: int) -> int: assertRespOk(run_task_resp, "Run Task") return run_task_resp.json()['data']['jobInstanceId'] - def runFlinkTask(self, modes: list[FlinkRunMode] = FlinkRunMode.getAllMode(), wait_time: int = 10, + def runFlinkTask(self, modes: list[FlinkRunMode] = FlinkRunMode.getAllMode(), wait_time: int = 20, is_async: bool = False): name = self.name statement = self.statement @@ -111,7 +117,7 @@ def runFlinkTask(self, modes: list[FlinkRunMode] = FlinkRunMode.getAllMode(), wa f"======================\nA Flink task is currently executed,name: {name}, statement: \n{statement}\n ======================") def taskFunc(mode: FlinkRunMode): - flink_task_name = name + "-" + mode.name + flink_task_name = name + "-" + mode.value task = self.addTask(flink_task_name, parent_id, "FlinkSql", statement, mode) job_instance_id = self.runTask(task.task_id) sleep(wait_time) @@ -121,7 +127,7 @@ def taskFunc(mode: FlinkRunMode): if is_async: with concurrent.futures.ThreadPoolExecutor() as executor: - results = [executor.submit(taskFunc, model ) for model in modes] + results = [executor.submit(taskFunc, model) for model in modes] for result in results: result.result() else: diff --git a/e2e_test/view_k8s_all_pod_logs.sh b/e2e_test/view_k8s_all_pod_logs.sh new file mode 100644 index 0000000000..6b134a0108 --- /dev/null +++ b/e2e_test/view_k8s_all_pod_logs.sh @@ -0,0 +1,32 @@ +# 检查是否提供了命名空间参数 +if [ -z "$1" ]; then + echo "Usage: $0 " + exit 1 +fi + +NAMESPACE=$1 + +# 获取指定命名空间下的所有Pod +PODS=$(kubectl get pods -n $NAMESPACE -o jsonpath='{.items[*].metadata.name}') + +for POD in $PODS; do + echo "================= Pod: $POD 信息 =================" + kubectl describe pod $POD -n $NAMESPACE + echo "================= Init Container 日志 =================" + INIT_CONTAINERS=$(kubectl get pod $POD -n $NAMESPACE -o jsonpath='{.spec.initContainers[*].name}') + for INIT_CONTAINER in $INIT_CONTAINERS; do + echo "Init Container: $INIT_CONTAINER 的日志" + kubectl logs -n $NAMESPACE $POD -c $INIT_CONTAINER --previous 2>&1 + kubectl logs -n $NAMESPACE $POD -c $INIT_CONTAINER 2>&1 + echo "----------------------------------------------------" + done + echo "================= 普通 Container 日志 =================" + CONTAINERS=$(kubectl get pod $POD -n $NAMESPACE -o jsonpath='{.spec.containers[*].name}') + for CONTAINER in $CONTAINERS; do + echo "Container: $CONTAINER 的日志" + kubectl logs -n $NAMESPACE $POD -c $CONTAINER --previous 2>&1 + kubectl logs -n $NAMESPACE $POD -c $CONTAINER 2>&1 + echo "----------------------------------------------------" + done + echo "=====================================================" +done