diff --git a/README.md b/README.md index fc47b15..b57dc0f 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,6 @@ These examples showcase various applications and use cases of serverless archite - [Introduction](#introduction) - [Getting Started](#getting-started) -- [Examples](#examples) - [Contribution Guidelines](#contribution-guidelines) - [License](#license) @@ -21,17 +20,6 @@ The `serverless-workflow-examples` repository offers a wide range of examples th 2. **Navigate to Desired Example:** Check the folder structure to find the example that suits your needs. 3. **Follow Individual Example README:** Each example contains its README file with specific instructions and requirements. -### Examples - -This repository is structured into several categories for ease of navigation. Each category contains relevant serverless workflow examples: - -- [Basic Functions](./basic-functions) -- [API Integration](./api-integration) -- [Complex Workflows](./complex-workflows) -- [Custom Solutions](./custom-solutions) - -Find more details in the corresponding folders. - ### Contribution Guidelines We welcome contributions to this repository. If you have an example or an improvement, please follow our [Contribution Guidelines](./CONTRIBUTING.md). diff --git a/basic-functions/.dockerignore b/basic-functions/.dockerignore deleted file mode 100644 index 94810d0..0000000 --- a/basic-functions/.dockerignore +++ /dev/null @@ -1,5 +0,0 @@ -* -!target/*-runner -!target/*-runner.jar -!target/lib/* -!target/quarkus-app/* \ No newline at end of file diff --git a/basic-functions/.gitignore b/basic-functions/.gitignore deleted file mode 100644 index 693002a..0000000 --- a/basic-functions/.gitignore +++ /dev/null @@ -1,40 +0,0 @@ -#Maven -target/ -pom.xml.tag -pom.xml.releaseBackup -pom.xml.versionsBackup -release.properties -.flattened-pom.xml - -# Eclipse -.project -.classpath -.settings/ -bin/ - -# IntelliJ -.idea -*.ipr -*.iml -*.iws - -# NetBeans -nb-configuration.xml - -# Visual Studio Code -.vscode -.factorypath - -# OSX -.DS_Store - -# Vim -*.swp -*.swo - -# patch -*.orig -*.rej - -# Local environment -.env diff --git a/basic-functions/.mvn/wrapper/.gitignore b/basic-functions/.mvn/wrapper/.gitignore deleted file mode 100644 index e72f5e8..0000000 --- a/basic-functions/.mvn/wrapper/.gitignore +++ /dev/null @@ -1 +0,0 @@ -maven-wrapper.jar diff --git a/basic-functions/.mvn/wrapper/MavenWrapperDownloader.java b/basic-functions/.mvn/wrapper/MavenWrapperDownloader.java deleted file mode 100644 index 1708393..0000000 --- a/basic-functions/.mvn/wrapper/MavenWrapperDownloader.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import java.net.*; -import java.io.*; -import java.nio.channels.*; -import java.util.Properties; - -public class MavenWrapperDownloader -{ - private static final String WRAPPER_VERSION = "3.1.1"; - - /** - * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. - */ - private static final String DEFAULT_DOWNLOAD_URL = - "https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/" + WRAPPER_VERSION - + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; - - /** - * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to use instead of the - * default one. - */ - private static final String MAVEN_WRAPPER_PROPERTIES_PATH = ".mvn/wrapper/maven-wrapper.properties"; - - /** - * Path where the maven-wrapper.jar will be saved to. - */ - private static final String MAVEN_WRAPPER_JAR_PATH = ".mvn/wrapper/maven-wrapper.jar"; - - /** - * Name of the property which should be used to override the default download url for the wrapper. - */ - private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; - - public static void main( String args[] ) - { - System.out.println( "- Downloader started" ); - File baseDirectory = new File( args[0] ); - System.out.println( "- Using base directory: " + baseDirectory.getAbsolutePath() ); - - // If the maven-wrapper.properties exists, read it and check if it contains a custom - // wrapperUrl parameter. - File mavenWrapperPropertyFile = new File( baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH ); - String url = DEFAULT_DOWNLOAD_URL; - if ( mavenWrapperPropertyFile.exists() ) - { - FileInputStream mavenWrapperPropertyFileInputStream = null; - try - { - mavenWrapperPropertyFileInputStream = new FileInputStream( mavenWrapperPropertyFile ); - Properties mavenWrapperProperties = new Properties(); - mavenWrapperProperties.load( mavenWrapperPropertyFileInputStream ); - url = mavenWrapperProperties.getProperty( PROPERTY_NAME_WRAPPER_URL, url ); - } - catch ( IOException e ) - { - System.out.println( "- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'" ); - } - finally - { - try - { - if ( mavenWrapperPropertyFileInputStream != null ) - { - mavenWrapperPropertyFileInputStream.close(); - } - } - catch ( IOException e ) - { - // Ignore ... - } - } - } - System.out.println( "- Downloading from: " + url ); - - File outputFile = new File( baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH ); - if ( !outputFile.getParentFile().exists() ) - { - if ( !outputFile.getParentFile().mkdirs() ) - { - System.out.println( "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() - + "'" ); - } - } - System.out.println( "- Downloading to: " + outputFile.getAbsolutePath() ); - try - { - downloadFileFromURL( url, outputFile ); - System.out.println( "Done" ); - System.exit( 0 ); - } - catch ( Throwable e ) - { - System.out.println( "- Error downloading" ); - e.printStackTrace(); - System.exit( 1 ); - } - } - - private static void downloadFileFromURL( String urlString, File destination ) - throws Exception - { - if ( System.getenv( "MVNW_USERNAME" ) != null && System.getenv( "MVNW_PASSWORD" ) != null ) - { - String username = System.getenv( "MVNW_USERNAME" ); - char[] password = System.getenv( "MVNW_PASSWORD" ).toCharArray(); - Authenticator.setDefault( new Authenticator() - { - @Override - protected PasswordAuthentication getPasswordAuthentication() - { - return new PasswordAuthentication( username, password ); - } - } ); - } - URL website = new URL( urlString ); - ReadableByteChannel rbc; - rbc = Channels.newChannel( website.openStream() ); - FileOutputStream fos = new FileOutputStream( destination ); - fos.getChannel().transferFrom( rbc, 0, Long.MAX_VALUE ); - fos.close(); - rbc.close(); - } - -} diff --git a/basic-functions/.mvn/wrapper/maven-wrapper.properties b/basic-functions/.mvn/wrapper/maven-wrapper.properties deleted file mode 100644 index 61a2ef1..0000000 --- a/basic-functions/.mvn/wrapper/maven-wrapper.properties +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.6/apache-maven-3.8.6-bin.zip -wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.1/maven-wrapper-3.1.1.jar diff --git a/basic-functions/README.md b/basic-functions/README.md deleted file mode 100644 index 7efa1d3..0000000 --- a/basic-functions/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# Basic-functions - -This project is a basic example which uses: - -- Sample functions from Java internal -- Sample functions from the Quarkus application. -- Sample OpenAPI functions from GitHub using custom REST to be able to get the token from the user. -- A way to validate data using dataInputstream, so input validation can be done - -## Dependencies - -- GitHub Token with the following permissions: - - Read and Write access to code - - Read access to metadata -- Knative workflow plugin - -### Kogito - -To avoid issues with openapi, the pom.xml file is updated to use the snapshot -versions - -## How to run: - -Using knative workflow tool: - -``` -kn workflow quarkus run -``` - -And the application will run using docker in `localhost:8080`, the workflow can -be executed running the following command: - -The first time using this, it'll return some json, and a branch will be created -``` -curl \ - -H 'Content-Type:application/json' \ - -H 'Accept:application/json' \ - "http://localhost:8080/hello" \ - -d '{"github_token": "TOKEN_TO_BE_USED","branch": "newBranch", "org": "eloycoto", "repo": "dotfiles", "base_branch": "master" }' | jq . -``` - -Response: -``` -{ - "id": "9d523585-a874-406e-ae8f-010901031c8b", - "workflowdata": { - "github_token": "TOKEN_TO_BE_USED", - "branch": "newBranch", - "org": "eloycoto", - "repo": "dotfiles", - "base_branch": "master", - "message": "Hello World", - "ref": "refs/heads/newBranch", - "node_id": "MDM6UmVmOTg2NzA0MzpyZWZzL2hlYWRzL3Rlc3RUZXN0LWpvLW1lcmRh", - "url": "https://api.github.com/repos/eloycoto/dotfiles/git/refs/heads/org", - "object": { - "sha": "c4c7f4c46fff1ef11cd46ce6782f4bcbecdbf1b9", - "type": "commit", - "url": "https://api.github.com/repos/eloycoto/dotfiles/git/commits/c4c7f4c46fff1ef11cd46ce6782f4bcbecdbf1b9" - } - } -} -``` - diff --git a/basic-functions/mvnw b/basic-functions/mvnw deleted file mode 100755 index eaa3d30..0000000 --- a/basic-functions/mvnw +++ /dev/null @@ -1,316 +0,0 @@ -#!/bin/sh -# ---------------------------------------------------------------------------- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# ---------------------------------------------------------------------------- - -# ---------------------------------------------------------------------------- -# Maven Start Up Batch script -# -# Required ENV vars: -# ------------------ -# JAVA_HOME - location of a JDK home dir -# -# Optional ENV vars -# ----------------- -# M2_HOME - location of maven2's installed home dir -# MAVEN_OPTS - parameters passed to the Java VM when running Maven -# e.g. to debug Maven itself, use -# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -# MAVEN_SKIP_RC - flag to disable loading of mavenrc files -# ---------------------------------------------------------------------------- - -if [ -z "$MAVEN_SKIP_RC" ] ; then - - if [ -f /usr/local/etc/mavenrc ] ; then - . /usr/local/etc/mavenrc - fi - - if [ -f /etc/mavenrc ] ; then - . /etc/mavenrc - fi - - if [ -f "$HOME/.mavenrc" ] ; then - . "$HOME/.mavenrc" - fi - -fi - -# OS specific support. $var _must_ be set to either true or false. -cygwin=false; -darwin=false; -mingw=false -case "`uname`" in - CYGWIN*) cygwin=true ;; - MINGW*) mingw=true;; - Darwin*) darwin=true - # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home - # See https://developer.apple.com/library/mac/qa/qa1170/_index.html - if [ -z "$JAVA_HOME" ]; then - if [ -x "/usr/libexec/java_home" ]; then - export JAVA_HOME="`/usr/libexec/java_home`" - else - export JAVA_HOME="/Library/Java/Home" - fi - fi - ;; -esac - -if [ -z "$JAVA_HOME" ] ; then - if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=`java-config --jre-home` - fi -fi - -if [ -z "$M2_HOME" ] ; then - ## resolve links - $0 may be a link to maven's home - PRG="$0" - - # need this for relative symlinks - while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG="`dirname "$PRG"`/$link" - fi - done - - saveddir=`pwd` - - M2_HOME=`dirname "$PRG"`/.. - - # make it fully qualified - M2_HOME=`cd "$M2_HOME" && pwd` - - cd "$saveddir" - # echo Using m2 at $M2_HOME -fi - -# For Cygwin, ensure paths are in UNIX format before anything is touched -if $cygwin ; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --unix "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --unix "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --unix "$CLASSPATH"` -fi - -# For Mingw, ensure paths are in UNIX format before anything is touched -if $mingw ; then - [ -n "$M2_HOME" ] && - M2_HOME="`(cd "$M2_HOME"; pwd)`" - [ -n "$JAVA_HOME" ] && - JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" -fi - -if [ -z "$JAVA_HOME" ]; then - javaExecutable="`which javac`" - if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then - # readlink(1) is not available as standard on Solaris 10. - readLink=`which readlink` - if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then - if $darwin ; then - javaHome="`dirname \"$javaExecutable\"`" - javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" - else - javaExecutable="`readlink -f \"$javaExecutable\"`" - fi - javaHome="`dirname \"$javaExecutable\"`" - javaHome=`expr "$javaHome" : '\(.*\)/bin'` - JAVA_HOME="$javaHome" - export JAVA_HOME - fi - fi -fi - -if [ -z "$JAVACMD" ] ; then - if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - else - JAVACMD="`\\unset -f command; \\command -v java`" - fi -fi - -if [ ! -x "$JAVACMD" ] ; then - echo "Error: JAVA_HOME is not defined correctly." >&2 - echo " We cannot execute $JAVACMD" >&2 - exit 1 -fi - -if [ -z "$JAVA_HOME" ] ; then - echo "Warning: JAVA_HOME environment variable is not set." -fi - -CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher - -# traverses directory structure from process work directory to filesystem root -# first directory with .mvn subdirectory is considered project base directory -find_maven_basedir() { - - if [ -z "$1" ] - then - echo "Path not specified to find_maven_basedir" - return 1 - fi - - basedir="$1" - wdir="$1" - while [ "$wdir" != '/' ] ; do - if [ -d "$wdir"/.mvn ] ; then - basedir=$wdir - break - fi - # workaround for JBEAP-8937 (on Solaris 10/Sparc) - if [ -d "${wdir}" ]; then - wdir=`cd "$wdir/.."; pwd` - fi - # end of workaround - done - echo "${basedir}" -} - -# concatenates all lines of a file -concat_lines() { - if [ -f "$1" ]; then - echo "$(tr -s '\n' ' ' < "$1")" - fi -} - -BASE_DIR=`find_maven_basedir "$(pwd)"` -if [ -z "$BASE_DIR" ]; then - exit 1; -fi - -########################################################################################## -# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -# This allows using the maven wrapper in projects that prohibit checking in binary data. -########################################################################################## -if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found .mvn/wrapper/maven-wrapper.jar" - fi -else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." - fi - if [ -n "$MVNW_REPOURL" ]; then - jarUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.1.1/maven-wrapper-3.1.1.jar" - else - jarUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.1/maven-wrapper-3.1.1.jar" - fi - while IFS="=" read key value; do - case "$key" in (wrapperUrl) jarUrl="$value"; break ;; - esac - done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" - if [ "$MVNW_VERBOSE" = true ]; then - echo "Downloading from: $jarUrl" - fi - wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" - if $cygwin; then - wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` - fi - - if command -v wget > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found wget ... using wget" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - wget "$jarUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" - else - wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" - fi - elif command -v curl > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found curl ... using curl" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - curl -o "$wrapperJarPath" "$jarUrl" -f - else - curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f - fi - - else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Falling back to using Java to download" - fi - javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" - # For Cygwin, switch paths to Windows format before running javac - if $cygwin; then - javaClass=`cygpath --path --windows "$javaClass"` - fi - if [ -e "$javaClass" ]; then - if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Compiling MavenWrapperDownloader.java ..." - fi - # Compiling the Java class - ("$JAVA_HOME/bin/javac" "$javaClass") - fi - if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - # Running the downloader - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Running MavenWrapperDownloader.java ..." - fi - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") - fi - fi - fi -fi -########################################################################################## -# End of extension -########################################################################################## - -export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} -if [ "$MVNW_VERBOSE" = true ]; then - echo $MAVEN_PROJECTBASEDIR -fi -MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" - -# For Cygwin, switch paths to Windows format before running java -if $cygwin; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --path --windows "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --windows "$CLASSPATH"` - [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` -fi - -# Provide a "standardized" way to retrieve the CLI args that will -# work with both Windows and non-Windows executions. -MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" -export MAVEN_CMD_LINE_ARGS - -WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -exec "$JAVACMD" \ - $MAVEN_OPTS \ - $MAVEN_DEBUG_OPTS \ - -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.home=${M2_HOME}" \ - "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ - ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/basic-functions/mvnw.cmd b/basic-functions/mvnw.cmd deleted file mode 100644 index abb7c32..0000000 --- a/basic-functions/mvnw.cmd +++ /dev/null @@ -1,188 +0,0 @@ -@REM ---------------------------------------------------------------------------- -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM https://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM ---------------------------------------------------------------------------- - -@REM ---------------------------------------------------------------------------- -@REM Maven Start Up Batch script -@REM -@REM Required ENV vars: -@REM JAVA_HOME - location of a JDK home dir -@REM -@REM Optional ENV vars -@REM M2_HOME - location of maven2's installed home dir -@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending -@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven -@REM e.g. to debug Maven itself, use -@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files -@REM ---------------------------------------------------------------------------- - -@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' -@echo off -@REM set title of command window -title %0 -@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' -@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% - -@REM set %HOME% to equivalent of $HOME -if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") - -@REM Execute a user defined script before this one -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre -@REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* -if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* -:skipRcPre - -@setlocal - -set ERROR_CODE=0 - -@REM To isolate internal variables from possible post scripts, we use another setlocal -@setlocal - -@REM ==== START VALIDATION ==== -if not "%JAVA_HOME%" == "" goto OkJHome - -echo. -echo Error: JAVA_HOME not found in your environment. >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -:OkJHome -if exist "%JAVA_HOME%\bin\java.exe" goto init - -echo. -echo Error: JAVA_HOME is set to an invalid directory. >&2 -echo JAVA_HOME = "%JAVA_HOME%" >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -@REM ==== END VALIDATION ==== - -:init - -@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". -@REM Fallback to current working directory if not found. - -set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% -IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir - -set EXEC_DIR=%CD% -set WDIR=%EXEC_DIR% -:findBaseDir -IF EXIST "%WDIR%"\.mvn goto baseDirFound -cd .. -IF "%WDIR%"=="%CD%" goto baseDirNotFound -set WDIR=%CD% -goto findBaseDir - -:baseDirFound -set MAVEN_PROJECTBASEDIR=%WDIR% -cd "%EXEC_DIR%" -goto endDetectBaseDir - -:baseDirNotFound -set MAVEN_PROJECTBASEDIR=%EXEC_DIR% -cd "%EXEC_DIR%" - -:endDetectBaseDir - -IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig - -@setlocal EnableExtensions EnableDelayedExpansion -for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a -@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% - -:endReadAdditionalConfig - -SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" -set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" -set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.1/maven-wrapper-3.1.1.jar" - -FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( - IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B -) - -@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -@REM This allows using the maven wrapper in projects that prohibit checking in binary data. -if exist %WRAPPER_JAR% ( - if "%MVNW_VERBOSE%" == "true" ( - echo Found %WRAPPER_JAR% - ) -) else ( - if not "%MVNW_REPOURL%" == "" ( - SET DOWNLOAD_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.1.1/maven-wrapper-3.1.1.jar" - ) - if "%MVNW_VERBOSE%" == "true" ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %DOWNLOAD_URL% - ) - - powershell -Command "&{"^ - "$webclient = new-object System.Net.WebClient;"^ - "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ - "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ - "}"^ - "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ - "}" - if "%MVNW_VERBOSE%" == "true" ( - echo Finished downloading %WRAPPER_JAR% - ) -) -@REM End of extension - -@REM Provide a "standardized" way to retrieve the CLI args that will -@REM work with both Windows and non-Windows executions. -set MAVEN_CMD_LINE_ARGS=%* - -%MAVEN_JAVA_EXE% ^ - %JVM_CONFIG_MAVEN_PROPS% ^ - %MAVEN_OPTS% ^ - %MAVEN_DEBUG_OPTS% ^ - -classpath %WRAPPER_JAR% ^ - "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ - %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* -if ERRORLEVEL 1 goto error -goto end - -:error -set ERROR_CODE=1 - -:end -@endlocal & set ERROR_CODE=%ERROR_CODE% - -if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost -@REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" -if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" -:skipRcPost - -@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%"=="on" pause - -if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% - -cmd /C exit /B %ERROR_CODE% diff --git a/basic-functions/pom.xml b/basic-functions/pom.xml deleted file mode 100644 index 8f69649..0000000 --- a/basic-functions/pom.xml +++ /dev/null @@ -1,163 +0,0 @@ - - - 4.0.0 - org.acme - basic-functions - 1.0.0-SNAPSHOT - - 3.10.1 - 17 - UTF-8 - UTF-8 - quarkus-bom - io.quarkus.platform - 2.16.9.Final - true - 3.0.0-M7 - - - - - ${quarkus.platform.group-id} - ${quarkus.platform.artifact-id} - ${quarkus.platform.version} - pom - import - - - org.kie.kogito - kogito-bom - 2.0.0-SNAPSHOT - pom - import - - - - - - io.quarkus - quarkus-kubernetes - - - org.kie.kogito - kogito-quarkus-serverless-workflow - - - io.quarkus - quarkus-smallrye-health - - - org.kie.kogito - kogito-addons-quarkus-knative-eventing - - - org.kie.kogito - kogito-addons-quarkus-source-files - - - org.kie.kogito - kogito-quarkus-serverless-workflow-devui - - - org.kie.kogito - kogito-addons-quarkus-data-index-inmemory - - - io.quarkus - quarkus-resteasy-jackson - - - io.quarkus - quarkus-arc - - - io.quarkus - quarkus-junit5 - test - - - - - - jboss nexus - jboss nexus - https://repository.jboss.org/nexus/content/repositories/snapshots - - - Quarkus nexus - Quarkus nexus - https://s01.oss.sonatype.org/content/repositories/snapshots - - - - - - ${quarkus.platform.group-id} - quarkus-maven-plugin - ${quarkus.platform.version} - true - - - - build - generate-code - generate-code-tests - - - - - - maven-compiler-plugin - ${compiler-plugin.version} - - - -parameters - - - - - maven-surefire-plugin - ${surefire-plugin.version} - - - org.jboss.logmanager.LogManager - ${maven.home} - - - - - maven-failsafe-plugin - ${surefire-plugin.version} - - - - integration-test - verify - - - - ${project.build.directory}/${project.build.finalName}-runner - org.jboss.logmanager.LogManager - ${maven.home} - - - - - - - - - - native - - - native - - - - false - native - - - - diff --git a/basic-functions/src/main/docker/Dockerfile.jvm b/basic-functions/src/main/docker/Dockerfile.jvm deleted file mode 100644 index 399bf7f..0000000 --- a/basic-functions/src/main/docker/Dockerfile.jvm +++ /dev/null @@ -1,93 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/basic-functions-jvm . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/basic-functions-jvm -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005) like this : EXPOSE 8080 5005 -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/basic-functions-jvm -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-17:1.15 - -ENV LANGUAGE='en_US:en' - - -# We make four distinct layers so if there are application changes the library layers can be re-used -COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ -COPY --chown=185 target/quarkus-app/*.jar /deployments/ -COPY --chown=185 target/quarkus-app/app/ /deployments/app/ -COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - diff --git a/basic-functions/src/main/docker/Dockerfile.legacy-jar b/basic-functions/src/main/docker/Dockerfile.legacy-jar deleted file mode 100644 index db7e861..0000000 --- a/basic-functions/src/main/docker/Dockerfile.legacy-jar +++ /dev/null @@ -1,89 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -Dquarkus.package.type=legacy-jar -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/basic-functions-legacy-jar . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/basic-functions-legacy-jar -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005) like this : EXPOSE 8080 5005 -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/basic-functions-legacy-jar -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-17:1.15 - -ENV LANGUAGE='en_US:en' - - -COPY target/lib/* /deployments/lib/ -COPY target/*-runner.jar /deployments/quarkus-run.jar - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" diff --git a/basic-functions/src/main/docker/Dockerfile.native b/basic-functions/src/main/docker/Dockerfile.native deleted file mode 100644 index 74772cb..0000000 --- a/basic-functions/src/main/docker/Dockerfile.native +++ /dev/null @@ -1,27 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# -# Before building the container image run: -# -# ./mvnw package -Pnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native -t quarkus/basic-functions . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/basic-functions -# -### -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.6 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -CMD ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/basic-functions/src/main/docker/Dockerfile.native-micro b/basic-functions/src/main/docker/Dockerfile.native-micro deleted file mode 100644 index 82091fc..0000000 --- a/basic-functions/src/main/docker/Dockerfile.native-micro +++ /dev/null @@ -1,30 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# It uses a micro base image, tuned for Quarkus native executables. -# It reduces the size of the resulting container image. -# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. -# -# Before building the container image run: -# -# ./mvnw package -Pnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/basic-functions . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/basic-functions -# -### -FROM quay.io/quarkus/quarkus-micro-image:2.0 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -CMD ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/basic-functions/src/main/java/dev/parodos/utils/Utils.java b/basic-functions/src/main/java/dev/parodos/utils/Utils.java deleted file mode 100644 index 230c435..0000000 --- a/basic-functions/src/main/java/dev/parodos/utils/Utils.java +++ /dev/null @@ -1,16 +0,0 @@ -package dev.parodos.utils; - - -import javax.enterprise.context.ApplicationScoped; - -@ApplicationScoped -public class Utils { - - public void isValidUser(int userID) { - System.out.println("Checking user ID: " + userID); - if (userID == 0) { - System.out.println("Failed with the userID" + userID); - throw new IllegalArgumentException("Invalid user ID"); - } - } -} diff --git a/basic-functions/src/main/resources/application.properties b/basic-functions/src/main/resources/application.properties deleted file mode 100644 index 6c36c89..0000000 --- a/basic-functions/src/main/resources/application.properties +++ /dev/null @@ -1 +0,0 @@ -quarkus.kubernetes-client.trust-certs=true diff --git a/basic-functions/src/main/resources/schemas/input.json b/basic-functions/src/main/resources/schemas/input.json deleted file mode 100644 index 37a24db..0000000 --- a/basic-functions/src/main/resources/schemas/input.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "$id": "classpath:/schema/input.json", - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Input", - "description": "schema for input description", - "type": "object", - "properties": { - "github_token": { - "description": "The token to use to authenticate with GitHub", - "type": "string" - }, - "branch": { - "description": "the branch to create in the repo", - "type": "string" - }, - "repo": { - "description": "the repo to be used", - "type": "string", - "examples": [ - "dotfiles" - ] - }, - "org": { - "description": "the org to be used", - "type": "string", - "examples": [ - "eloycoto" - ] - }, - "base_branch": { - "description": "the base branch to be used to send to move2kube", - "type": "string", - "default": "main" - } - }, - "required": ["github_token", "repo", "branch", "org"] -} diff --git a/basic-functions/src/main/resources/workflow.sw.yaml b/basic-functions/src/main/resources/workflow.sw.yaml deleted file mode 100644 index 3575199..0000000 --- a/basic-functions/src/main/resources/workflow.sw.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: BasicExample -version: '1.0' -specVersion: 0.8.0 -name: "Basic Example" -start: createBranchState -errors: -- name: InvalidArgumentException - code: java.lang.IllegalArgumentException -dataInputSchema: schemas/input.json -functions: -- name: isValidUser - type: custom - operation: service:java:dev.parodos.utils.Utils::isValidUser -- name: printMessage - type: custom - operation: sysout -- name: logInfo - type: custom - operation: sysout:INFO -- name: createBranch - type: custom - operation: rest:post:https://api.github.com:443/repos/{org}/{repo}/git/refs -- name: getBranch - type: custom - operation: rest:get:https://api.github.com:443/repos/{org}/{repo}/git/refs/heads/{base_branch} -states: -- name: createBranchState - type: operation - actions: - - name: getBaseBranch - functionRef: - refName: getBranch - arguments: - HEADER_Authorization: '"Bearer " + .github_token' - HEADER_Content-type: application/json - repo: ".repo" - org: ".org" - base_branch: ".base_branch" - - name: createBranch - functionRef: - refName: createBranch - arguments: - HEADER_Authorization: '"Bearer " + .github_token' - HEADER_Content-type: application/json - ref: '"refs/heads/" + .branch' - sha: ".object.sha" - repo: ".repo" - org: ".org" - - name: getURLforBranch - functionRef: - refName: logInfo - arguments: - message: '"URL is: " + .object.url' - onErrors: - - errorRef: InvalidArgumentException - transition: invalidBranch - end: true -- name: invalidBranch - type: operation - actions: - - name: InvalidBranchMessage - functionRef: - refName: printMessage - arguments: - message: Branch cannot be created - end: true diff --git a/deployment/kustomize/README.md b/deployment/kustomize/README.md deleted file mode 100644 index c93f80f..0000000 --- a/deployment/kustomize/README.md +++ /dev/null @@ -1,180 +0,0 @@ -Deploy SonataFlow services and workflows - -## Prerequisites -* An Openshift, Minikube or Openshift Local cluster -* You must be logged in to the cluster from command line - -## Minikube - -```shell -minikube start --cpus 4 --memory 10240 --addons registry --addons metrics-server --insecure-registry "10.0.0.0/24" --insecure-registry "localhost:5000" -``` - -## Deploy PostgreSQL, Data Index, Jobs Service - -```shell -kustomize build kustomize/base/ | kubectl apply -f - -``` -You may have the following error: -```shell -error: unable to recognize "STDIN": no matches for kind "SonataFlowPlatform" in version "sonataflow.org/v1alpha08" -``` -In that case, simply re-run -```shell -kustomize build kustomize/base/ | kubectl apply -f - -``` - -### Configure DB -The PostgreSQL database is automatically running the initialization script [V1.35.0__create_runtime_PostgreSQL.sql](base/V1.35.0__create_runtime_PostgreSQL.sql) in order to setup the schemas needed to enable workflow persistence. - -You can manually execute this by following the below steps (Optional): -1. Redirect the PostgreSQL service to your local host -```bash -kubectl port-forward --namespace postgres svc/postgres-db-service 5432:5432 & -``` -2. Download the DDL archive for your version (here it's 1.44): https://repo1.maven.org/maven2/org/kie/kogito/kogito-ddl/ -2. Decompress the file -3. Create the schema for the Kogito runtime: -```bash -PGPASSWORD="sonataflow" psql --host 127.0.0.1 -U sonataflow -d sonataflow -p 5432 -a -f /postgresql/V1.35.0__create_runtime_PostgreSQL.sql -``` -Find the credentials and the DB name in the [file postgres.properties](base/postgres.properties), above we are using the default values. - -See https://sonataflow.org/serverlessworkflow/latest/persistence/postgresql-flyway-migration.html#manually-executing-scripts for more information about the migration. -## Deploy sample workflows -### Greeting workflow -```shell -kustomize build kustomize/workflows/sonataflow-greeting/ | kubectl apply -f - -``` -### Event with timeout -This sample is waiting for at maximum 30 seconds for `event1` then for `event2` to be received. -```shell -kustomize build kustomize/workflows/sonataflow-event-timeout/ | kubectl apply -f - -``` - -## Testing the Sample Work Flows -### Greeting workflow -Once the deployment above is complete, we need to get the workflow route of the workflow greeting service: -* For OpenShift: -```shell -kubectl get route -``` - -*For k8s: -```shell -kubectl get svc -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -... -greeting ClusterIP 10.105.141.132 80/TCP 166m -... - -kubectl port-forward svc/greeting 8080:80 & -``` - -Now that we have the route, execute the following to trigger an execution. - -* Check if you get a response from the greeting workflow -```shell -curl -X POST -H 'Content-Type:application/json' -H 'Accept:application/json' -d '{"name": "SonataFlow", "language": "English"}' http:///greeting -``` -* A sample response -```json -{"id":"bf05e03f-a996-4482-aff7-89aa4a173be9","workflowdata":{"name":"SonataFlow","language":"English","greeting":"Hello from JSON Workflow, "}} -``` -### Event with timeout -Once the deployment above is complete, we need to get the workflow route of the workflow `event-timeout` service: -* For OpenShift: -```shell -kubectl get route -``` - -* For k8s: -```shell -kubectl get svc -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -... -event-timeout ClusterIP 10.110.66.233 80/TCP 62m -... - -kubectl port-forward svc/event-timeout 8081:80 & -``` - -Now that we have the route, execute the following to trigger an execution. - -* Check if you get a response from the greeting workflow -```shell -curl -X POST -H 'Content-Type:application/json' -H 'Accept:application/json' -d '{}' http:///event-timeout -``` -* A sample response -```json -{"id":"dda51c0b-01ec-4630-a494-8f25682be7d6","workflowdata":{}} -``` - -* You can check the running workflows instances by sending -```shell -curl -i -X GET -H 'Content-Type:application/json' -H 'Accept:application/json' -d '{}' 'http://localhost:8081/event-timeout' -``` -* A sample response -```json -[{"id":"dda51c0b-01ec-4630-a494-8f25682be7d6","workflowdata":{}}] -``` - -Then we need to send the `event1` cloud event: take the `id` from the previous response and replace it in the following command -```shell -curl -i -X POST -H 'Content-Type: application/cloudevents+json' -d '{"datacontenttype": "application/json", "specversion":"1.0","id":"","source":"/local/curl","type":"event1_event_type","data": "{\"eventData\":\"Event1 sent from UI\"}", "kogitoprocrefid": "" }' http://localhost:8081/ -HTTP/1.1 202 Accepted -content-length: 0 -``` -In the above request, `id` can be any UUID and `kogitoprocrefid` shall be the `id` of the previously started workflow. - -Then `event2` shall be sent: -```shell -curl -i -X POST -H 'Content-Type: application/cloudevents+json' -d '{"datacontenttype": "application/json", "specversion":"1.0","id":"","source":"/local/curl","type":"event2_event_type","data": "{\"eventData\":\"Event2 sent from UI\"}", "kogitoprocrefid": "" }' http://localhost:8081/ -HTTP/1.1 202 Accepted -content-length: 0 -``` - -In both request, it is the field `type` that specify the event sent. - -If both request are send within 30 seconds each, in the logs of the `event-timeout` pod, we should see a message: -```shell -kubectl logs -f event-timeout-76dc79855f-hj57m -... -INFO [org.kie.kog.ser.wor.act.SysoutAction] (kogito-event-executor-1) event-state-timeouts: dda51c0b-01ec-4630-a494-8f25682be7d6 has finalized. The event1 was received. -- The event2 was received. -... -``` - -If not, the log message would indicate which event(s) was(were) not received. - -Either after the 2 events have been received or after 1 minutes (timeouts expired), you should not see the `id` of the workflow when listing the running instances. - -You can see the timeout expiration notification(s) being sent by the `JobService` in the logs of its pod: -```shell -kubectl logs -f jobs-service-bddc7ff9d-hpsrq -... -# Trigger timeout for event1 -2023-09-25 14:48:50,642 jobs-service-bddc7ff9d-hpsrq INFO [org.kie.kogito.jobs.service.job.DelegateJob:-1] (vert.x-eventloop-thread-0) Executing for context JobDetails[id='0f6a5b02-d46e-45da-9008-f02173d76cad', correlationId='0f6a5b02-d46e-45da-9008-f02173d76cad', status=SCHEDULED, lastUpdate=null, retries=0, executionCounter=0, scheduledId='null', recipient=RecipientInstance{recipient=HttpRecipient{url='http://10.110.66.233:80/management/jobs/event-timeout/instances//timers/-1', method='POST', headers={processInstanceId=, nodeInstanceId=43effaa9-1ab5-4b61-9dd9-ba52c3d07221, processId=event-timeout, rootProcessId=null, rootProcessInstanceId=null, Content-Type=application/json}, queryParams={}, payload=org.kie.kogito.jobs.service.api.recipient.http.HttpRecipientJsonPayloadData@f06caf04} org.kie.kogito.jobs.service.api.recipient.http.HttpRecipient@6a6342fe}, trigger=org.kie.kogito.timer.impl.SimpleTimerTrigger@352c2edd, executionTimeout=null, executionTimeoutUnit=null] -2023-09-25 14:48:50,660 jobs-service-bddc7ff9d-hpsrq INFO [org.kie.kogito.jobs.service.job.DelegateJob:-1] (vert.x-eventloop-thread-0) Executed successfully with response JobExecutionResponse[message='null', code='200', timestamp=2023-09-25T14:48:50.659996Z[GMT], jobId='0f6a5b02-d46e-45da-9008-f02173d76cad'] -# Trigger timeout for event2 -2023-09-25 14:49:20,653 jobs-service-bddc7ff9d-hpsrq INFO [org.kie.kogito.jobs.service.job.DelegateJob:-1] (vert.x-eventloop-thread-0) Executing for context JobDetails[id='297b2578-bef8-4699-b175-1e032e57f87c', correlationId='297b2578-bef8-4699-b175-1e032e57f87c', status=SCHEDULED, lastUpdate=null, retries=0, executionCounter=0, scheduledId='null', recipient=RecipientInstance{recipient=HttpRecipient{url='http://10.110.66.233:80/management/jobs/event-timeout/instances//timers/-1', method='POST', headers={processInstanceId=, nodeInstanceId=69e1e801-0071-4250-84a9-79ccdf4834f1, processId=event-timeout, rootProcessId=, rootProcessInstanceId=, Content-Type=application/json}, queryParams={}, payload=org.kie.kogito.jobs.service.api.recipient.http.HttpRecipientJsonPayloadData@3652907c} org.kie.kogito.jobs.service.api.recipient.http.HttpRecipient@e96d7d8d}, trigger=org.kie.kogito.timer.impl.SimpleTimerTrigger@726fefbd, executionTimeout=null, executionTimeoutUnit=null] -2023-09-25 14:49:20,672 jobs-service-bddc7ff9d-hpsrq INFO [org.kie.kogito.jobs.service.job.DelegateJob:-1] (vert.x-eventloop-thread-0) Executed successfully with response JobExecutionResponse[message='null', code='200', timestamp=2023-09-25T14:49:20.672492Z[GMT], jobId='297b2578-bef8-4699-b175-1e032e57f87c'] -... -``` - -While the workflow is still active (the 2 events were not received and the timeouts did not expire yet), you can see the instance in the DB: -```shell -PGPASSWORD="sonataflow" psql --host 127.0.0.1 -U sonataflow -d sonataflow -p 5432 -sonataflow=# select id, process_id, version, process_version from process_instances ; - id | process_id | version | process_version ---------------------------------------+---------------+---------+----------------- - 150ed623-cf87-4b43-a1fe-11fe22434cfb | event-timeout | 0 | 0.0.1 -``` -The `payload` column was not queried on purpose. - -Once the workflow has terminated, the entry is removed from the DB. -## Delete the deployment -```shell -kustomize build base/ | kubectl apply -f - -``` -If not all resources were delete, repeat the last action one more time. - diff --git a/deployment/kustomize/base/V1.35.0__create_runtime_PostgreSQL.sql b/deployment/kustomize/base/V1.35.0__create_runtime_PostgreSQL.sql deleted file mode 100644 index 892b673..0000000 --- a/deployment/kustomize/base/V1.35.0__create_runtime_PostgreSQL.sql +++ /dev/null @@ -1,23 +0,0 @@ --- To be used with kogito-addons-quarkus-persistence-jdbc for Quarkus or kogito-addons-springboot-persistence-jdbc for SpringBoot -CREATE TABLE process_instances -( - id character(36) NOT NULL, - payload bytea NOT NULL, - process_id character varying NOT NULL, - version bigint, - process_version character varying, - CONSTRAINT process_instances_pkey PRIMARY KEY (id) -); -CREATE INDEX idx_process_instances_process_id ON process_instances (process_id, id, process_version); - -CREATE TABLE correlation_instances -( - id character(36) NOT NULL, - encoded_correlation_id character varying(36) NOT NULL UNIQUE, - correlated_id character varying(36) NOT NULL, - correlation json NOT NULL, - version bigint, - CONSTRAINT correlation_instances_pkey PRIMARY KEY (id) -); -CREATE INDEX idx_correlation_instances_encoded_id ON correlation_instances (encoded_correlation_id); -CREATE INDEX idx_correlation_instances_correlated_id ON correlation_instances (correlated_id); diff --git a/deployment/kustomize/base/kustomization.yaml b/deployment/kustomize/base/kustomization.yaml deleted file mode 100644 index ba37b6d..0000000 --- a/deployment/kustomize/base/kustomization.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -secretGenerator: -- name: postgres-secrets - namespace: postgres - envs: - - postgres.properties - -- name: postgres-secrets - envs: - - postgres.properties - -resources: -- postgres.yaml -- ../sonataflow/ - -images: -- name: quay.io/kiegroup/kogito-serverless-operator - newTag: "1.44" -- name: quay.io/kiegroup/kogito-data-index-postgresql - newTag: "1.44.1" -- name: quay.io/kiegroup/kogito-jobs-service-postgresql - newTag: "1.44.1" -- name: postgres - newTag: "15" - -generatorOptions: - disableNameSuffixHash: true - -configMapGenerator: - - name: sonataflow-runtime-ddl - namespace: postgres - files: - - V1.35.0__create_runtime_PostgreSQL.sql \ No newline at end of file diff --git a/deployment/kustomize/base/postgres.properties b/deployment/kustomize/base/postgres.properties deleted file mode 100644 index e5b98e4..0000000 --- a/deployment/kustomize/base/postgres.properties +++ /dev/null @@ -1,4 +0,0 @@ -POSTGRES_USER=sonataflow -POSTGRES_PASSWORD=sonataflow -POSTGRES_DB=sonataflow -PGDATA=/var/lib/postgresql/data/mydata diff --git a/deployment/kustomize/base/postgres.yaml b/deployment/kustomize/base/postgres.yaml deleted file mode 100644 index 939f2dc..0000000 --- a/deployment/kustomize/base/postgres.yaml +++ /dev/null @@ -1,92 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: postgres ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: postgres-pvc - namespace: postgres - labels: - app: postgres-db -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi ---- -kind: StatefulSet -apiVersion: apps/v1 -metadata: - name: postgres-db - namespace: postgres - labels: - app: postgres-db -spec: - replicas: 1 - selector: - matchLabels: - app: postgres-db - serviceName: postgres-db-service - template: - metadata: - labels: - app: postgres-db - spec: - containers: - - name: postgres-db - image: postgres - imagePullPolicy: 'IfNotPresent' - ports: - - containerPort: 5432 - volumeMounts: - - name: postgres-storage - mountPath: dbfiles - - name: init-scripts - mountPath: "/docker-entrypoint-initdb.d" - readinessProbe: - exec: - command: ["pg_isready"] - initialDelaySeconds: 15 - timeoutSeconds: 2 - livenessProbe: - exec: - command: ["pg_isready"] - initialDelaySeconds: 15 - timeoutSeconds: 2 - envFrom: - - secretRef: - name: postgres-secrets - env: - - name: PGDATA - value: /var/lib/postgresql/data/dbfiles - - name: POSTGRES_DB - value: sonataflow - - name: POSTGRES_HOST_AUTH_METHOD - value: password - - name: POSTGRES_INITDB_ARGS - value: "-U sonataflow" - volumes: - - name: postgres-storage - persistentVolumeClaim: - claimName: postgres-pvc - - name: init-scripts - configMap: - name: sonataflow-runtime-ddl ---- -kind: Service -apiVersion: v1 -metadata: - name: postgres-db-service - namespace: postgres - labels: - app: postgres-db -spec: - selector: - app: postgres-db - ports: - - protocol: TCP - port: 5432 - targetPort: 5432 diff --git a/deployment/kustomize/sonataflow/addons/data-index/application.properties b/deployment/kustomize/sonataflow/addons/data-index/application.properties deleted file mode 100644 index c173c59..0000000 --- a/deployment/kustomize/sonataflow/addons/data-index/application.properties +++ /dev/null @@ -1,20 +0,0 @@ -quarkus.http.port=8080 -quarkus.http.cors=true -quarkus.http.cors.origins=/.*/ -quarkus.profile=http-events-support -#quarkus.log.category."org.kie.kogito.index".min-level=DEBUG -quarkus.log.category."org.kie.kogito.index".level=DEBUG - -quarkus.datasource.data_index.db-kind=postgresql -quarkus.datasource.jdbc.url=jdbc:postgresql://postgres-db-service.postgres:5432/sonataflow?currentSchema=data-index-service -quarkus.hibernate-orm.database.generation=update -quarkus.flyway.migrate-at-start=true -quarkus.flyway.table=data-index-flyway - -#kogito.data-index.quarkus_profile=http-events-support - -# Disable kafka client health check since the quarkus-http connector is being used instead. -quarkus.smallrye-health.check."io.quarkus.kafka.client.health.KafkaHealthCheck".enabled=false -#quarkus.kafka.devservices.enabled=false -#quarkus.kafka.health.enabled=false - diff --git a/deployment/kustomize/sonataflow/addons/data-index/data-index.yaml b/deployment/kustomize/sonataflow/addons/data-index/data-index.yaml deleted file mode 100644 index 5221736..0000000 --- a/deployment/kustomize/sonataflow/addons/data-index/data-index.yaml +++ /dev/null @@ -1,88 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: data-index -spec: - replicas: 1 - selector: - matchLabels: - app: data-index - template: - metadata: - labels: - app: data-index - spec: - containers: - - name: data-index - image: quay.io/kiegroup/kogito-data-index-postgresql:1.44.1 - resources: - requests: - memory: "256Mi" - cpu: "500m" - limits: - memory: "512Mi" - cpu: "1000m" - ports: - - containerPort: 8080 - env: - - name: QUARKUS_PROFILE - value: http-events-support - - name: KOGITO_DATA_INDEX_QUARKUS_PROFILE - value: http-events-support - - name: QUARKUS_DATASOURCE_USERNAME - valueFrom: - secretKeyRef: - name: postgres-secrets - key: POSTGRES_USER - - name: QUARKUS_DATASOURCE_PASSWORD - valueFrom: - secretKeyRef: - name: postgres-secrets - key: POSTGRES_PASSWORD - volumeMounts: - - name: application-config - mountPath: "/home/kogito/config" - livenessProbe: - failureThreshold: 3 - httpGet: - path: /q/health/live - port: 8080 - scheme: HTTP - initialDelaySeconds: 0 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 10 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /q/health/ready - port: 8080 - scheme: HTTP - initialDelaySeconds: 0 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 10 - volumes: - - name: application-config - configMap: - name: data-index-properties - initContainers: - - name: init-postgres - image: busybox:1.36 - imagePullPolicy: IfNotPresent - command: ['sh', '-c', 'until nc -vz postgres-db-service.postgres.svc.cluster.local 5432; do echo "Waiting for postgres server"; sleep 3; done;'] ---- -kind: Service -apiVersion: v1 -metadata: - name: data-index-service - labels: - app: data-index -spec: - selector: - app: data-index - ports: - - protocol: TCP - port: 80 - targetPort: 8080 - type: NodePort diff --git a/deployment/kustomize/sonataflow/addons/data-index/kustomization.yaml b/deployment/kustomize/sonataflow/addons/data-index/kustomization.yaml deleted file mode 100644 index 95fdda6..0000000 --- a/deployment/kustomize/sonataflow/addons/data-index/kustomization.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -generatorOptions: - disableNameSuffixHash: true - -configMapGenerator: -- name: data-index-properties - files: - - application.properties - -resources: -- data-index.yaml diff --git a/deployment/kustomize/sonataflow/addons/jobs-service/application.properties b/deployment/kustomize/sonataflow/addons/jobs-service/application.properties deleted file mode 100644 index b5a9b4b..0000000 --- a/deployment/kustomize/sonataflow/addons/jobs-service/application.properties +++ /dev/null @@ -1,18 +0,0 @@ -kogito.service.url=http://jobs-service-service - -quarkus.log.level=INFO -quarkus.datasource.db-kind=postgresql -quarkus.datasource.jdbc.url=jdbc:postgresql://postgres-db-service.postgres:5432/sonataflow?currentSchema=jobs-service -quarkus.datasource.reactive.url=postgresql://postgres-db-service.postgres:5432/sonataflow?search_path=jobs-service -quarkus.smallrye-health.check."io.quarkus.kafka.client.health.KafkaHealthCheck".enabled=false - -jobs-service.persistence=postgresql -#quarkus.reactive-messaging.auto-connector-attachment=false - -kogito.jobs-service.knative-events=false -# Enable the job status change events notification. -kogito.jobs-service.http.job-status-change-events=true -# Deliver the job status change events directly to the data-index-service. -mp.messaging.outgoing.kogito-job-service-job-status-events-http.url=http://data-index-service/jobs -# Disable the K_SINK injection health check since we knative eventing is not being used. -quarkus.smallrye-health.check."org.kie.kogito.jobs.service.messaging.http.health.knative.KSinkInjectionHealthCheck".enabled=false diff --git a/deployment/kustomize/sonataflow/addons/jobs-service/jobs-service.yaml b/deployment/kustomize/sonataflow/addons/jobs-service/jobs-service.yaml deleted file mode 100644 index 87f06fb..0000000 --- a/deployment/kustomize/sonataflow/addons/jobs-service/jobs-service.yaml +++ /dev/null @@ -1,87 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: jobs-service - labels: - app: jobs-service -spec: - replicas: 1 - selector: - matchLabels: - app: jobs-service - template: - metadata: - labels: - app: jobs-service - spec: - containers: - - name: jobs-service - image: quay.io/kiegroup/kogito-jobs-service-postgresql:1.44 - resources: - requests: - memory: "256Mi" - cpu: "500m" - limits: - memory: "512Mi" - cpu: "1000m" - ports: - - containerPort: 8080 - env: - - name: QUARKUS_DATASOURCE_USERNAME - valueFrom: - secretKeyRef: - name: postgres-secrets - key: POSTGRES_USER - - name: QUARKUS_DATASOURCE_PASSWORD - valueFrom: - secretKeyRef: - name: postgres-secrets - key: POSTGRES_PASSWORD - volumeMounts: - - name: application-config - mountPath: "/home/kogito/config" - livenessProbe: - failureThreshold: 3 - httpGet: - path: /q/health/live - port: 8080 - scheme: HTTP - initialDelaySeconds: 0 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 10 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /q/health/ready - port: 8080 - scheme: HTTP - initialDelaySeconds: 0 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 10 - volumes: - - name: application-config - configMap: - name: jobs-service-properties - initContainers: - - name: init-postgres - image: busybox:1.36 - imagePullPolicy: IfNotPresent - command: ['sh', '-c', 'until nc -vz postgres-db-service.postgres.svc.cluster.local 5432; do echo "Waiting for postgres server"; sleep 3; done;'] ---- -kind: Service -apiVersion: v1 -metadata: - name: jobs-service-service - labels: - app: jobs-service -spec: - selector: - app: jobs-service - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 8080 - type: NodePort diff --git a/deployment/kustomize/sonataflow/addons/jobs-service/kustomization.yaml b/deployment/kustomize/sonataflow/addons/jobs-service/kustomization.yaml deleted file mode 100644 index d7bd735..0000000 --- a/deployment/kustomize/sonataflow/addons/jobs-service/kustomization.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -generatorOptions: - disableNameSuffixHash: true - -configMapGenerator: -- name: jobs-service-properties - files: - - application.properties - -resources: -- jobs-service.yaml diff --git a/deployment/kustomize/sonataflow/addons/kustomization.yaml b/deployment/kustomize/sonataflow/addons/kustomization.yaml deleted file mode 100644 index be37ff1..0000000 --- a/deployment/kustomize/sonataflow/addons/kustomization.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- data-index/ -- jobs-service/ -- sonataflow-builder-config.yaml -- sonataflow-platform.yaml -- service-discovery-rbac.yaml diff --git a/deployment/kustomize/sonataflow/addons/service-discovery-rbac.yaml b/deployment/kustomize/sonataflow/addons/service-discovery-rbac.yaml deleted file mode 100644 index b5c41c4..0000000 --- a/deployment/kustomize/sonataflow/addons/service-discovery-rbac.yaml +++ /dev/null @@ -1,48 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: service-discovery-role -rules: -- apiGroups: - - "" - resources: - - pods - - services - verbs: - - get - - list -- apiGroups: - - apps - resources: - - deployments - - statefulsets - verbs: - - get - - list -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list -# Knative -- apiGroups: - - serving.knative.dev - resources: - - services - verbs: - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: service-discovery-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: service-discovery-role -subjects: -- kind: ServiceAccount - name: default diff --git a/deployment/kustomize/sonataflow/addons/sonataflow-builder-config.yaml b/deployment/kustomize/sonataflow/addons/sonataflow-builder-config.yaml deleted file mode 100644 index 2b601cd..0000000 --- a/deployment/kustomize/sonataflow/addons/sonataflow-builder-config.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -data: - DEFAULT_BUILDER_RESOURCE_NAME: Dockerfile - DEFAULT_WORKFLOW_EXTENSION: .sw.json - Dockerfile: "FROM quay.io/kiegroup/kogito-swf-builder-nightly:latest AS builder\n\n# - variables that can be overridden by the builder\n# To add a Quarkus extension - to your application\nARG QUARKUS_EXTENSIONS\n# Args to pass to the Quarkus CLI - add extension command\nARG QUARKUS_ADD_EXTENSION_ARGS\n\n# Copy from build context - to skeleton resources project\nCOPY --chmod=644 * ./resources/\n\nRUN /home/kogito/launch/build-app.sh - ./resources\n \n#=============================\n# Runtime Run\n#=============================\nFROM - registry.access.redhat.com/ubi8/openjdk-11:latest\n\nENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'\n - \ \n# We make four distinct layers so if there are application changes the library - layers can be re-used\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/lib/ - /deployments/lib/\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/*.jar - /deployments/\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/app/ - /deployments/app/\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/quarkus/ - /deployments/quarkus/\n\nEXPOSE 8080\nUSER 185\nENV AB_JOLOKIA_OFF=\"\"\nENV JAVA_OPTS=\"-Dquarkus.http.host=0.0.0.0 - -Djava.util.logging.manager=org.jboss.logmanager.LogManager\"\nENV JAVA_APP_JAR=\"/deployments/quarkus-run.jar\"\n" -kind: ConfigMap -metadata: - name: sonataflow-operator-builder-config diff --git a/deployment/kustomize/sonataflow/addons/sonataflow-platform.yaml b/deployment/kustomize/sonataflow/addons/sonataflow-platform.yaml deleted file mode 100644 index 19fef82..0000000 --- a/deployment/kustomize/sonataflow/addons/sonataflow-platform.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: sonataflow.org/v1alpha08 -kind: SonataFlowPlatform -metadata: - name: sonataflow-platform -spec: - build: - template: - buildArgs: - - name: QUARKUS_EXTENSIONS - # value: org.kie.kogito:kogito-addons-quarkus-jobs-knative-eventing:2.0.0-SNAPSHOT,org.kie.kogito:kogito-addons-quarkus-persistence-jdbc:2.0.0-SNAPSHOT,io.quarkus:quarkus-jdbc-postgresql:2.16.10.Final,io.quarkus:quarkus-agroal:2.16.10.Final - value: org.kie.kogito:kogito-addons-quarkus-jobs-management,org.kie.kogito:kogito-addons-quarkus-persistence-jdbc:2.0.0-SNAPSHOT,io.quarkus:quarkus-jdbc-postgresql:2.16.10.Final,io.quarkus:quarkus-agroal:2.16.10.Final - config: - strategyOptions: - KanikoBuildCacheEnabled: "true" diff --git a/deployment/kustomize/sonataflow/kustomization.yaml b/deployment/kustomize/sonataflow/kustomization.yaml deleted file mode 100644 index 19fa9a4..0000000 --- a/deployment/kustomize/sonataflow/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- sonataflow-operator.yaml -- addons/ diff --git a/deployment/kustomize/sonataflow/sonataflow-operator.yaml b/deployment/kustomize/sonataflow/sonataflow-operator.yaml deleted file mode 100644 index 671a950..0000000 --- a/deployment/kustomize/sonataflow/sonataflow-operator.yaml +++ /dev/null @@ -1,3626 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: sonataflow-operator-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: sonataflowbuilds.sonataflow.org -spec: - group: sonataflow.org - names: - kind: SonataFlowBuild - listKind: SonataFlowBuildList - plural: sonataflowbuilds - shortNames: - - sfb - - sfbuild - - sfbuilds - singular: sonataflowbuild - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.imageTag - name: Image - type: string - - jsonPath: .status.buildPhase - name: Phase - type: string - name: v1alpha08 - schema: - openAPIV3Schema: - description: SonataFlowBuild is an internal custom resource to control workflow - build instances in the target platform - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: SonataFlowBuildSpec an abstraction over the actual build - process performed by the platform. - properties: - arguments: - description: 'Arguments lists the command line arguments to send to - the internal builder command. Depending on the build method you - might set this attribute instead of BuildArgs. For example: ".spec.arguments=verbose=3". - Please see the SonataFlow guides.' - items: - type: string - type: array - buildArgs: - description: Optional build arguments that can be set to the internal - build (e.g. Docker ARG) - items: - description: EnvVar represents an environment variable present in - a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot - be resolved, the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the - string literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists or - not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. Cannot - be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath is - written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed - resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envs: - description: Optional environment variables to add to the internal - build - items: - description: EnvVar represents an environment variable present in - a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot - be resolved, the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the - string literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists or - not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. Cannot - be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath is - written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed - resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - resources: - description: Resources optional compute resource requirements for - the builder - properties: - claims: - description: "Claims lists the names of resources, defined in - spec.resourceClaims, that are used by this container. \n This - is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be set - for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims - of the Pod where this field is used. It makes that resource - available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - timeout: - description: Timeout defines the Build maximum execution duration. - The Build deadline is set to the Build start time plus the Timeout - duration. If the Build deadline is exceeded, the Build context is - canceled, and its phase set to BuildPhaseFailed. - format: duration - type: string - type: object - status: - description: SonataFlowBuildStatus defines the observed state of SonataFlowBuild - properties: - buildPhase: - description: Current phase of the build - type: string - error: - description: Last error found during build - type: string - imageTag: - description: The final image tag produced by this build instance - type: string - innerBuild: - description: InnerBuild is a reference to an internal build object, - which can be anything known only to internal builders. - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: sonataflowplatforms.sonataflow.org -spec: - group: sonataflow.org - names: - kind: SonataFlowPlatform - listKind: SonataFlowPlatformList - plural: sonataflowplatforms - shortNames: - - sfp - - sfplatform - - sfplatforms - singular: sonataflowplatform - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.cluster - name: Cluster - type: string - - jsonPath: .status.conditions[?(@.type=='Succeed')].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=='Succeed')].reason - name: Reason - type: string - name: v1alpha08 - schema: - openAPIV3Schema: - description: SonataFlowPlatform is the descriptor for the workflow platform - infrastructure. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: SonataFlowPlatformSpec defines the desired state of SonataFlowPlatform - properties: - build: - description: Attributes for building workflows in the target platform - properties: - config: - description: Describes the platform configuration for building - workflows. - properties: - baseImage: - description: a base image that can be used as base layer for - all images. It can be useful if you want to provide some - custom base image with further utility software - type: string - registry: - description: Registry the registry where to publish the built - image - properties: - address: - description: the URI to access - type: string - ca: - description: the configmap which stores the Certificate - Authority - type: string - insecure: - description: if the container registry is insecure (ie, - http only) - type: boolean - organization: - description: the registry organization - type: string - secret: - description: the secret where credentials are stored - type: string - type: object - strategy: - description: BuildStrategy to use to build workflows in the - platform. Usually, the operator elect the strategy based - on the platform. Note that this field might be read only - in certain scenarios. - type: string - strategyOptions: - additionalProperties: - type: string - description: BuildStrategyOptions additional options to add - to the build strategy. See https://sonataflow.org/serverlessworkflow/main/cloud/operator/build-and-deploy-workflows.html - type: object - timeout: - description: how much time to wait before time out the build - process - type: string - type: object - template: - description: Describes a build template for building workflows. - Base for the internal SonataFlowBuild resource. - properties: - arguments: - description: 'Arguments lists the command line arguments to - send to the internal builder command. Depending on the build - method you might set this attribute instead of BuildArgs. - For example: ".spec.arguments=verbose=3". Please see the - SonataFlow guides.' - items: - type: string - type: array - buildArgs: - description: Optional build arguments that can be set to the - internal build (e.g. Docker ARG) - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables - in the container and any service environment variables. - If a variable cannot be resolved, the reference in - the input string will be unchanged. Double $$ are - reduced to a single $, which allows for escaping the - $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce - the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the - variable exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or - its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: 'Selects a field of the pod: supports - metadata.name, metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envs: - description: Optional environment variables to add to the - internal build - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables - in the container and any service environment variables. - If a variable cannot be resolved, the reference in - the input string will be unchanged. Double $$ are - reduced to a single $, which allows for escaping the - $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce - the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the - variable exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or - its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: 'Selects a field of the pod: supports - metadata.name, metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - resources: - description: Resources optional compute resource requirements - for the builder - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - timeout: - description: Timeout defines the Build maximum execution duration. - The Build deadline is set to the Build start time plus the - Timeout duration. If the Build deadline is exceeded, the - Build context is canceled, and its phase set to BuildPhaseFailed. - format: duration - type: string - type: object - type: object - devMode: - description: Attributes for running workflows in devmode (immutable, - no build required) - properties: - baseImage: - description: Base image to run the Workflow in dev mode instead - of the operator's default. - type: string - type: object - type: object - status: - description: SonataFlowPlatformStatus defines the observed state of SonataFlowPlatform - properties: - cluster: - description: Cluster what kind of cluster you're running (ie, plain - Kubernetes or OpenShift) - enum: - - kubernetes - - openshift - type: string - conditions: - description: The latest available observations of a resource's current - state. - items: - description: Condition describes the common structure for conditions - in our types - properties: - lastUpdateTime: - description: The last time this condition was updated. - format: date-time - type: string - message: - description: A human-readable message indicating details about - the transition. - type: string - reason: - description: The reason for the condition's last transition. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type condition for the given object - type: string - required: - - status - - type - type: object - type: array - info: - additionalProperties: - type: string - description: Info generic information related to the build - type: object - observedGeneration: - description: The generation observed by the deployment controller. - format: int64 - type: integer - version: - description: Version the operator version controlling this Platform - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: sonataflows.sonataflow.org -spec: - group: sonataflow.org - names: - kind: SonataFlow - listKind: SonataFlowList - plural: sonataflows - shortNames: - - sf - - workflow - - workflows - singular: sonataflow - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sonataflow\.org\/profile - name: Profile - type: string - - jsonPath: .metadata.annotations.sonataflow\.org\/version - name: Version - type: string - - jsonPath: .status.endpoint - name: URL - type: string - - jsonPath: .status.conditions[?(@.type=='Running')].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=='Running')].reason - name: Reason - type: string - name: v1alpha08 - schema: - openAPIV3Schema: - description: SonataFlow is the descriptor representation for a workflow application - based on the CNCF Serverless Workflow specification. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: SonataFlowSpec defines the desired state of SonataFlow - properties: - flow: - description: "Flow describes the contents of the Workflow definition - following the CNCF Serverless Workflow Specification. The attributes - not part of the flow are defined by the Custom Resource metadata - information, as follows: \n - Id, name, and key are replaced by - the Custom Resource's name. Must follow the Kubernetes naming patterns - (RFC1123). \n - Description can be added in the CR's annotation - field sonataflow.org/description \n - Version is also defined in - the CR's annotation, field sonataflow.org/version \n - SpecVersion - is in the CR's apiVersion, for example v1alpha08 means that it follows - the specification version 0.8." - properties: - annotations: - description: Annotations List of helpful terms describing the - workflows intended purpose, subject areas, or other important - qualities. - items: - type: string - type: array - auth: - description: Auth definitions can be used to define authentication - information that should be applied to resources defined in the - operation property of function definitions. It is not used as - authentication information for the function invocation, but - just to access the resource containing the function invocation - information. - x-kubernetes-preserve-unknown-fields: true - autoRetries: - description: AutoRetries If set to true, actions should automatically - be retried on unchecked errors. Default is false - type: boolean - constants: - additionalProperties: - description: RawMessage is a raw encoded JSON value. It implements - Marshaler and Unmarshaler and can be used to delay JSON decoding - or precompute a JSON encoding. - format: byte - type: string - description: Constants Workflow constants are used to define static, - and immutable, data which is available to Workflow Expressions. - type: object - dataInputSchema: - description: DataInputSchema URI of the JSON Schema used to validate - the workflow data input - properties: - failOnValidationErrors: - type: boolean - schema: - type: string - required: - - failOnValidationErrors - - schema - type: object - errors: - description: Defines checked errors that can be explicitly handled - during workflow execution. - items: - description: Error declaration for workflow definitions - properties: - code: - description: Code OnError code. Can be used in addition - to the name to help runtimes resolve to technical errors/exceptions. - Should not be defined if error is set to '*'. - type: string - description: - description: OnError description. - type: string - name: - description: Name Domain-specific error name. - type: string - required: - - name - type: object - type: array - events: - items: - description: Event used to define events and their correlations - properties: - correlation: - description: Define event correlation rules for this event. - Only used for consumed events. - items: - description: Correlation define event correlation rules - for an event. Only used for `consumed` events - properties: - contextAttributeName: - description: CloudEvent Extension Context Attribute - name - type: string - contextAttributeValue: - description: CloudEvent Extension Context Attribute - value - type: string - required: - - contextAttributeName - type: object - type: array - dataOnly: - description: If `true`, only the Event payload is accessible - to consuming Workflow states. If `false`, both event payload - and context attributes should be accessible. Defaults - to true. - type: boolean - kind: - default: consumed - description: Defines the CloudEvent as either 'consumed' - or 'produced' by the workflow. Defaults to `consumed`. - enum: - - consumed - - produced - type: string - metadata: - additionalProperties: - type: object - description: Metadata information - type: object - name: - description: Unique event name. - type: string - source: - description: CloudEvent source. - type: string - type: - description: CloudEvent type. - type: string - required: - - name - - type - type: object - type: array - functions: - items: - description: Function ... - properties: - authRef: - description: References an auth definition name to be used - to access to resource defined in the operation parameter. - type: string - metadata: - additionalProperties: - type: object - description: Metadata information - type: object - name: - description: Unique function name - type: string - operation: - description: If type is `rest`, #. - If type is `rpc`, ##. - If type is `expression`, defines the workflow expression. - If the type is `custom`, #. - type: string - type: - default: rest - description: Defines the function type. Is either `custom`, - `rest`, `rpc`, `expression`, `graphql`, `odata` or `asyncapi`. - Default is `rest`. - enum: - - rest - - rpc - - expression - - graphql - - odata - - asyncapi - - custom - type: string - required: - - name - - operation - type: object - type: array - keepActive: - description: If "true", workflow instances is not terminated when - there are no active execution paths. Instance can be terminated - with "terminate end definition" or reaching defined "workflowExecTimeout" - type: boolean - metadata: - description: Metadata custom information shared with the runtime. - x-kubernetes-preserve-unknown-fields: true - retries: - items: - description: Retry ... - properties: - delay: - description: Time delay between retry attempts (ISO 8601 - duration format) - type: string - increment: - description: Static value by which the delay increases during - each attempt (ISO 8601 time format) - type: string - jitter: - description: 'If float type, maximum amount of random time - added or subtracted from the delay between each retry - relative to total delay (between 0 and 1). If string type, - absolute maximum amount of random time added or subtracted - from the delay between each retry (ISO 8601 duration format) - TODO: make iso8601duration compatible this type' - properties: - floatVal: - type: number - strVal: - type: string - type: - description: Type represents the stored type of Float32OrString. - format: int64 - type: integer - type: object - maxAttempts: - anyOf: - - type: integer - - type: string - description: Maximum number of retry attempts. - x-kubernetes-int-or-string: true - maxDelay: - description: Maximum time delay between retry attempts (ISO - 8601 duration format) - type: string - multiplier: - description: Numeric value, if specified the delay between - retries is multiplied by this value. - properties: - floatVal: - type: number - strVal: - type: string - type: - description: Type represents the stored type of Float32OrString. - format: int64 - type: integer - type: object - name: - description: Unique retry strategy name - type: string - required: - - maxAttempts - - name - type: object - type: array - secrets: - description: Secrets allow you to access sensitive information, - such as passwords, OAuth tokens, ssh keys, etc, inside your - Workflow Expressions. - items: - type: string - type: array - start: - description: Workflow start definition. - x-kubernetes-preserve-unknown-fields: true - states: - items: - properties: - callbackState: - description: callbackState executes a function and waits - for callback event that indicates completion of the task. - properties: - action: - description: Defines the action to be executed. - properties: - actionDataFilter: - description: Filter the state data to select only - the data that can be used within function definition - arguments using its fromStateData property. Filter - the action results to select only the result data - that should be added/merged back into the state - data using its results property. Select the part - of state data which the action data results should - be added/merged to using the toStateData property. - properties: - fromStateData: - description: Workflow expression that filters - state data that can be used by the action. - type: string - results: - description: Workflow expression that filters - the actions data results. - type: string - toStateData: - description: Workflow expression that selects - a state data element to which the action results - should be added/merged into. If not specified - denotes the top-level state data element. - type: string - useResults: - description: If set to false, action data results - are not added/merged to state data. In this - case 'results' and 'toStateData' should be - ignored. Default is true. - type: boolean - type: object - condition: - description: Expression, if defined, must evaluate - to true for this action to be performed. If false, - action is disregarded. - type: string - eventRef: - description: References a 'trigger' and 'result' - reusable event definitions. - properties: - contextAttributes: - additionalProperties: - type: object - description: Add additional extension context - attributes to the produced event. - type: object - data: - description: If string type, an expression which - selects parts of the states data output to - become the data (payload) of the event referenced - by triggerEventRef. If object type, a custom - object to become the data (payload) of the - event referenced by triggerEventRef. - type: object - invoke: - default: sync - description: Specifies if the function should - be invoked sync or async. Default is sync. - enum: - - async - - sync - type: string - resultEventRef: - description: Reference to the unique name of - a 'consumed' event definition - type: string - resultEventTimeout: - description: Maximum amount of time (ISO 8601 - format) to wait for the result event. If not - defined it be set to the actionExecutionTimeout - type: string - triggerEventRef: - description: Reference to the unique name of - a 'produced' event definition, - type: string - required: - - resultEventRef - - triggerEventRef - type: object - functionRef: - description: References a reusable function definition. - properties: - arguments: - additionalProperties: - type: object - description: 'Arguments (inputs) to be passed - to the referenced function TODO: validate - it as required if function type is graphql' - type: object - invoke: - default: sync - description: Specifies if the function should - be invoked sync or async. Default is sync. - enum: - - async - - sync - type: string - refName: - description: Name of the referenced function. - type: string - selectionSet: - description: 'Used if function type is graphql. - String containing a valid GraphQL selection - set. TODO: validate it as required if function - type is graphql' - type: string - required: - - refName - type: object - id: - description: Defines Unique action identifier. - type: string - name: - description: Defines Unique action name. - type: string - nonRetryableErrors: - description: List of unique references to defined - workflow errors for which the action should not - be retried. Used only when `autoRetries` is set - to `true` - items: - type: string - type: array - retryRef: - description: References a defined workflow retry - definition. If not defined uses the default runtime - retry definition. - type: string - retryableErrors: - description: List of unique references to defined - workflow errors for which the action should be - retried. Used only when `autoRetries` is set to - `false` - items: - type: string - type: array - sleep: - description: Defines time period workflow execution - should sleep before / after function execution. - properties: - after: - description: Defines amount of time (ISO 8601 - duration format) to sleep after function/subflow - invocation. Does not apply if 'eventRef' is - defined. - type: string - before: - description: Defines amount of time (ISO 8601 - duration format) to sleep before function/subflow - invocation. Does not apply if 'eventRef' is - defined. - type: string - type: object - subFlowRef: - description: References a workflow to be invoked. - properties: - invoke: - default: sync - description: Specifies if the subflow should - be invoked sync or async. Defaults to sync. - enum: - - async - - sync - type: string - onParentComplete: - default: terminate - description: onParentComplete specifies how - subflow execution should behave when parent - workflow completes if invoke is 'async'. Defaults - to terminate. - enum: - - terminate - - continue - type: string - version: - description: Sub-workflow version - type: string - workflowId: - description: Sub-workflow unique id - type: string - required: - - workflowId - type: object - type: object - eventDataFilter: - description: Event data filter definition. - properties: - data: - description: Workflow expression that filters of - the event data (payload). - type: string - toStateData: - description: Workflow expression that selects a - state data element to which the action results - should be added/merged into. If not specified - denotes the top-level state data element - type: string - useData: - description: If set to false, event payload is not - added/merged to state data. In this case 'data' - and 'toStateData' should be ignored. Default is - true. - type: boolean - type: object - eventRef: - description: References a unique callback event name - in the defined workflow events. - type: string - timeouts: - description: Time period to wait for incoming events - (ISO 8601 format) - properties: - actionExecTimeout: - description: Default single actions definition execution - timeout (ISO 8601 duration format) - type: string - eventTimeout: - description: Default timeout for consuming defined - events (ISO 8601 duration format) - type: string - stateExecTimeout: - description: Default workflow state execution timeout - (ISO 8601 duration format) - properties: - single: - description: Single state execution timeout, - not including retries (ISO 8601 duration format) - type: string - total: - description: Total state execution timeout, - including retries (ISO 8601 duration format) - type: string - required: - - total - type: object - type: object - required: - - action - - eventRef - type: object - compensatedBy: - description: Unique Name of a workflow state which is responsible - for compensation of this state. - type: string - delayState: - description: delayState Causes the workflow execution to - delay for a specified duration. - properties: - timeDelay: - description: Amount of time (ISO 8601 format) to delay - type: string - required: - - timeDelay - type: object - end: - description: State end definition. - x-kubernetes-preserve-unknown-fields: true - eventState: - description: event states await one or more events and perform - actions when they are received. If defined as the workflow - starting state, the event state definition controls when - the workflow instances should be created. - properties: - exclusive: - default: true - description: If true consuming one of the defined events - causes its associated actions to be performed. If - false all the defined events must be consumed in order - for actions to be performed. Defaults to true. - type: boolean - onEvents: - description: Define the events to be consumed and optional - actions to be performed. - items: - description: OnEvents define which actions are be - performed for the one or more events. - properties: - actionMode: - default: sequential - description: Should actions be performed sequentially - or in parallel. Default is sequential. - enum: - - sequential - - parallel - type: string - actions: - description: Actions to be performed if expression - matches - items: - description: Action specify invocations of services - or other workflows during workflow execution. - properties: - actionDataFilter: - description: Filter the state data to select - only the data that can be used within - function definition arguments using its - fromStateData property. Filter the action - results to select only the result data - that should be added/merged back into - the state data using its results property. - Select the part of state data which the - action data results should be added/merged - to using the toStateData property. - properties: - fromStateData: - description: Workflow expression that - filters state data that can be used - by the action. - type: string - results: - description: Workflow expression that - filters the actions data results. - type: string - toStateData: - description: Workflow expression that - selects a state data element to which - the action results should be added/merged - into. If not specified denotes the - top-level state data element. - type: string - useResults: - description: If set to false, action - data results are not added/merged - to state data. In this case 'results' - and 'toStateData' should be ignored. - Default is true. - type: boolean - type: object - condition: - description: Expression, if defined, must - evaluate to true for this action to be - performed. If false, action is disregarded. - type: string - eventRef: - description: References a 'trigger' and - 'result' reusable event definitions. - properties: - contextAttributes: - additionalProperties: - type: object - description: Add additional extension - context attributes to the produced - event. - type: object - data: - description: If string type, an expression - which selects parts of the states - data output to become the data (payload) - of the event referenced by triggerEventRef. - If object type, a custom object to - become the data (payload) of the event - referenced by triggerEventRef. - type: object - invoke: - default: sync - description: Specifies if the function - should be invoked sync or async. Default - is sync. - enum: - - async - - sync - type: string - resultEventRef: - description: Reference to the unique - name of a 'consumed' event definition - type: string - resultEventTimeout: - description: Maximum amount of time - (ISO 8601 format) to wait for the - result event. If not defined it be - set to the actionExecutionTimeout - type: string - triggerEventRef: - description: Reference to the unique - name of a 'produced' event definition, - type: string - required: - - resultEventRef - - triggerEventRef - type: object - functionRef: - description: References a reusable function - definition. - properties: - arguments: - additionalProperties: - type: object - description: 'Arguments (inputs) to - be passed to the referenced function - TODO: validate it as required if function - type is graphql' - type: object - invoke: - default: sync - description: Specifies if the function - should be invoked sync or async. Default - is sync. - enum: - - async - - sync - type: string - refName: - description: Name of the referenced - function. - type: string - selectionSet: - description: 'Used if function type - is graphql. String containing a valid - GraphQL selection set. TODO: validate - it as required if function type is - graphql' - type: string - required: - - refName - type: object - id: - description: Defines Unique action identifier. - type: string - name: - description: Defines Unique action name. - type: string - nonRetryableErrors: - description: List of unique references to - defined workflow errors for which the - action should not be retried. Used only - when `autoRetries` is set to `true` - items: - type: string - type: array - retryRef: - description: References a defined workflow - retry definition. If not defined uses - the default runtime retry definition. - type: string - retryableErrors: - description: List of unique references to - defined workflow errors for which the - action should be retried. Used only when - `autoRetries` is set to `false` - items: - type: string - type: array - sleep: - description: Defines time period workflow - execution should sleep before / after - function execution. - properties: - after: - description: Defines amount of time - (ISO 8601 duration format) to sleep - after function/subflow invocation. - Does not apply if 'eventRef' is defined. - type: string - before: - description: Defines amount of time - (ISO 8601 duration format) to sleep - before function/subflow invocation. - Does not apply if 'eventRef' is defined. - type: string - type: object - subFlowRef: - description: References a workflow to be - invoked. - properties: - invoke: - default: sync - description: Specifies if the subflow - should be invoked sync or async. Defaults - to sync. - enum: - - async - - sync - type: string - onParentComplete: - default: terminate - description: onParentComplete specifies - how subflow execution should behave - when parent workflow completes if - invoke is 'async'. Defaults to terminate. - enum: - - terminate - - continue - type: string - version: - description: Sub-workflow version - type: string - workflowId: - description: Sub-workflow unique id - type: string - required: - - workflowId - type: object - type: object - type: array - eventDataFilter: - description: eventDataFilter defines the callback - event data filter definition - properties: - data: - description: Workflow expression that filters - of the event data (payload). - type: string - toStateData: - description: Workflow expression that selects - a state data element to which the action - results should be added/merged into. If - not specified denotes the top-level state - data element - type: string - useData: - description: If set to false, event payload - is not added/merged to state data. In this - case 'data' and 'toStateData' should be - ignored. Default is true. - type: boolean - type: object - eventRefs: - description: References one or more unique event - names in the defined workflow events. - items: - type: string - minItems: 1 - type: array - required: - - eventRefs - type: object - minItems: 1 - type: array - timeouts: - description: State specific timeouts. - properties: - actionExecTimeout: - description: Default single actions definition execution - timeout (ISO 8601 duration format) - type: string - eventTimeout: - description: Default timeout for consuming defined - events (ISO 8601 duration format) - type: string - stateExecTimeout: - description: Default workflow state execution timeout - (ISO 8601 duration format) - properties: - single: - description: Single state execution timeout, - not including retries (ISO 8601 duration format) - type: string - total: - description: Total state execution timeout, - including retries (ISO 8601 duration format) - type: string - required: - - total - type: object - type: object - required: - - onEvents - type: object - forEachState: - description: forEachState used to execute actions for each - element of a data set. - properties: - actions: - description: Actions to be executed for each of the - elements of inputCollection. - items: - description: Action specify invocations of services - or other workflows during workflow execution. - properties: - actionDataFilter: - description: Filter the state data to select only - the data that can be used within function definition - arguments using its fromStateData property. - Filter the action results to select only the - result data that should be added/merged back - into the state data using its results property. - Select the part of state data which the action - data results should be added/merged to using - the toStateData property. - properties: - fromStateData: - description: Workflow expression that filters - state data that can be used by the action. - type: string - results: - description: Workflow expression that filters - the actions data results. - type: string - toStateData: - description: Workflow expression that selects - a state data element to which the action - results should be added/merged into. If - not specified denotes the top-level state - data element. - type: string - useResults: - description: If set to false, action data - results are not added/merged to state data. - In this case 'results' and 'toStateData' - should be ignored. Default is true. - type: boolean - type: object - condition: - description: Expression, if defined, must evaluate - to true for this action to be performed. If - false, action is disregarded. - type: string - eventRef: - description: References a 'trigger' and 'result' - reusable event definitions. - properties: - contextAttributes: - additionalProperties: - type: object - description: Add additional extension context - attributes to the produced event. - type: object - data: - description: If string type, an expression - which selects parts of the states data output - to become the data (payload) of the event - referenced by triggerEventRef. If object - type, a custom object to become the data - (payload) of the event referenced by triggerEventRef. - type: object - invoke: - default: sync - description: Specifies if the function should - be invoked sync or async. Default is sync. - enum: - - async - - sync - type: string - resultEventRef: - description: Reference to the unique name - of a 'consumed' event definition - type: string - resultEventTimeout: - description: Maximum amount of time (ISO 8601 - format) to wait for the result event. If - not defined it be set to the actionExecutionTimeout - type: string - triggerEventRef: - description: Reference to the unique name - of a 'produced' event definition, - type: string - required: - - resultEventRef - - triggerEventRef - type: object - functionRef: - description: References a reusable function definition. - properties: - arguments: - additionalProperties: - type: object - description: 'Arguments (inputs) to be passed - to the referenced function TODO: validate - it as required if function type is graphql' - type: object - invoke: - default: sync - description: Specifies if the function should - be invoked sync or async. Default is sync. - enum: - - async - - sync - type: string - refName: - description: Name of the referenced function. - type: string - selectionSet: - description: 'Used if function type is graphql. - String containing a valid GraphQL selection - set. TODO: validate it as required if function - type is graphql' - type: string - required: - - refName - type: object - id: - description: Defines Unique action identifier. - type: string - name: - description: Defines Unique action name. - type: string - nonRetryableErrors: - description: List of unique references to defined - workflow errors for which the action should - not be retried. Used only when `autoRetries` - is set to `true` - items: - type: string - type: array - retryRef: - description: References a defined workflow retry - definition. If not defined uses the default - runtime retry definition. - type: string - retryableErrors: - description: List of unique references to defined - workflow errors for which the action should - be retried. Used only when `autoRetries` is - set to `false` - items: - type: string - type: array - sleep: - description: Defines time period workflow execution - should sleep before / after function execution. - properties: - after: - description: Defines amount of time (ISO 8601 - duration format) to sleep after function/subflow - invocation. Does not apply if 'eventRef' - is defined. - type: string - before: - description: Defines amount of time (ISO 8601 - duration format) to sleep before function/subflow - invocation. Does not apply if 'eventRef' - is defined. - type: string - type: object - subFlowRef: - description: References a workflow to be invoked. - properties: - invoke: - default: sync - description: Specifies if the subflow should - be invoked sync or async. Defaults to sync. - enum: - - async - - sync - type: string - onParentComplete: - default: terminate - description: onParentComplete specifies how - subflow execution should behave when parent - workflow completes if invoke is 'async'. - Defaults to terminate. - enum: - - terminate - - continue - type: string - version: - description: Sub-workflow version - type: string - workflowId: - description: Sub-workflow unique id - type: string - required: - - workflowId - type: object - type: object - minItems: 0 - type: array - batchSize: - anyOf: - - type: integer - - type: string - description: Specifies how many iterations may run in - parallel at the same time. Used if mode property is - set to parallel (default). If not specified, its value - should be the size of the inputCollection. - x-kubernetes-int-or-string: true - inputCollection: - description: Workflow expression selecting an array - element of the states' data. - type: string - iterationParam: - description: Name of the iteration parameter that can - be referenced in actions/workflow. For each parallel - iteration, this param should contain a unique element - of the inputCollection array. - type: string - mode: - default: parallel - description: Specifies how iterations are to be performed - (sequential or in parallel), defaults to parallel. - enum: - - sequential - - parallel - type: string - outputCollection: - description: Workflow expression specifying an array - element of the states data to add the results of each - iteration. - type: string - timeouts: - description: State specific timeout. - properties: - actionExecTimeout: - description: Default single actions definition execution - timeout (ISO 8601 duration format) - type: string - stateExecTimeout: - description: Default workflow state execution timeout - (ISO 8601 duration format) - properties: - single: - description: Single state execution timeout, - not including retries (ISO 8601 duration format) - type: string - total: - description: Total state execution timeout, - including retries (ISO 8601 duration format) - type: string - required: - - total - type: object - type: object - required: - - inputCollection - type: object - id: - description: Unique State id. - type: string - injectState: - description: injectState used to inject static data into - state data input. - properties: - data: - additionalProperties: - type: object - description: JSON object which can be set as state's - data input and can be manipulated via filter - minProperties: 1 - type: object - timeouts: - description: State specific timeouts - properties: - stateExecTimeout: - description: Default workflow state execution timeout - (ISO 8601 duration format) - properties: - single: - description: Single state execution timeout, - not including retries (ISO 8601 duration format) - type: string - total: - description: Total state execution timeout, - including retries (ISO 8601 duration format) - type: string - required: - - total - type: object - type: object - required: - - data - type: object - metadata: - additionalProperties: - type: object - description: Metadata information. - type: object - name: - description: State name. - type: string - onErrors: - description: States error handling and retries definitions. - items: - description: OnError ... - properties: - end: - description: End workflow execution in case of this - error. If retryRef is defined, this ends workflow - only if retries were unsuccessful. - x-kubernetes-preserve-unknown-fields: true - errorRef: - description: ErrorRef Reference to a unique workflow - error definition. Used of errorRefs is not used - type: string - errorRefs: - description: ErrorRefs References one or more workflow - error definitions. Used if errorRef is not used - items: - type: string - type: array - transition: - description: Transition to next state to handle the - error. If retryRef is defined, this transition is - taken only if retries were unsuccessful. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - operationState: - description: operationState defines a set of actions to - be performed in sequence or in parallel. - properties: - actionMode: - default: sequential - description: Specifies whether actions are performed - in sequence or in parallel, defaults to sequential. - enum: - - sequential - - parallel - type: string - actions: - description: Actions to be performed - items: - description: Action specify invocations of services - or other workflows during workflow execution. - properties: - actionDataFilter: - description: Filter the state data to select only - the data that can be used within function definition - arguments using its fromStateData property. - Filter the action results to select only the - result data that should be added/merged back - into the state data using its results property. - Select the part of state data which the action - data results should be added/merged to using - the toStateData property. - properties: - fromStateData: - description: Workflow expression that filters - state data that can be used by the action. - type: string - results: - description: Workflow expression that filters - the actions data results. - type: string - toStateData: - description: Workflow expression that selects - a state data element to which the action - results should be added/merged into. If - not specified denotes the top-level state - data element. - type: string - useResults: - description: If set to false, action data - results are not added/merged to state data. - In this case 'results' and 'toStateData' - should be ignored. Default is true. - type: boolean - type: object - condition: - description: Expression, if defined, must evaluate - to true for this action to be performed. If - false, action is disregarded. - type: string - eventRef: - description: References a 'trigger' and 'result' - reusable event definitions. - properties: - contextAttributes: - additionalProperties: - type: object - description: Add additional extension context - attributes to the produced event. - type: object - data: - description: If string type, an expression - which selects parts of the states data output - to become the data (payload) of the event - referenced by triggerEventRef. If object - type, a custom object to become the data - (payload) of the event referenced by triggerEventRef. - type: object - invoke: - default: sync - description: Specifies if the function should - be invoked sync or async. Default is sync. - enum: - - async - - sync - type: string - resultEventRef: - description: Reference to the unique name - of a 'consumed' event definition - type: string - resultEventTimeout: - description: Maximum amount of time (ISO 8601 - format) to wait for the result event. If - not defined it be set to the actionExecutionTimeout - type: string - triggerEventRef: - description: Reference to the unique name - of a 'produced' event definition, - type: string - required: - - resultEventRef - - triggerEventRef - type: object - functionRef: - description: References a reusable function definition. - properties: - arguments: - additionalProperties: - type: object - description: 'Arguments (inputs) to be passed - to the referenced function TODO: validate - it as required if function type is graphql' - type: object - invoke: - default: sync - description: Specifies if the function should - be invoked sync or async. Default is sync. - enum: - - async - - sync - type: string - refName: - description: Name of the referenced function. - type: string - selectionSet: - description: 'Used if function type is graphql. - String containing a valid GraphQL selection - set. TODO: validate it as required if function - type is graphql' - type: string - required: - - refName - type: object - id: - description: Defines Unique action identifier. - type: string - name: - description: Defines Unique action name. - type: string - nonRetryableErrors: - description: List of unique references to defined - workflow errors for which the action should - not be retried. Used only when `autoRetries` - is set to `true` - items: - type: string - type: array - retryRef: - description: References a defined workflow retry - definition. If not defined uses the default - runtime retry definition. - type: string - retryableErrors: - description: List of unique references to defined - workflow errors for which the action should - be retried. Used only when `autoRetries` is - set to `false` - items: - type: string - type: array - sleep: - description: Defines time period workflow execution - should sleep before / after function execution. - properties: - after: - description: Defines amount of time (ISO 8601 - duration format) to sleep after function/subflow - invocation. Does not apply if 'eventRef' - is defined. - type: string - before: - description: Defines amount of time (ISO 8601 - duration format) to sleep before function/subflow - invocation. Does not apply if 'eventRef' - is defined. - type: string - type: object - subFlowRef: - description: References a workflow to be invoked. - properties: - invoke: - default: sync - description: Specifies if the subflow should - be invoked sync or async. Defaults to sync. - enum: - - async - - sync - type: string - onParentComplete: - default: terminate - description: onParentComplete specifies how - subflow execution should behave when parent - workflow completes if invoke is 'async'. - Defaults to terminate. - enum: - - terminate - - continue - type: string - version: - description: Sub-workflow version - type: string - workflowId: - description: Sub-workflow unique id - type: string - required: - - workflowId - type: object - type: object - minItems: 0 - type: array - timeouts: - description: State specific timeouts - properties: - actionExecTimeout: - description: Default single actions definition execution - timeout (ISO 8601 duration format) - type: string - stateExecTimeout: - description: Defines workflow state execution timeout. - properties: - single: - description: Single state execution timeout, - not including retries (ISO 8601 duration format) - type: string - total: - description: Total state execution timeout, - including retries (ISO 8601 duration format) - type: string - required: - - total - type: object - type: object - required: - - actions - type: object - parallelState: - description: parallelState Consists of a number of states - that are executed in parallel. - properties: - branches: - description: List of branches for this parallel state. - items: - description: Branch Definition - properties: - actions: - description: Actions to be executed in this branch - items: - description: Action specify invocations of services - or other workflows during workflow execution. - properties: - actionDataFilter: - description: Filter the state data to select - only the data that can be used within - function definition arguments using its - fromStateData property. Filter the action - results to select only the result data - that should be added/merged back into - the state data using its results property. - Select the part of state data which the - action data results should be added/merged - to using the toStateData property. - properties: - fromStateData: - description: Workflow expression that - filters state data that can be used - by the action. - type: string - results: - description: Workflow expression that - filters the actions data results. - type: string - toStateData: - description: Workflow expression that - selects a state data element to which - the action results should be added/merged - into. If not specified denotes the - top-level state data element. - type: string - useResults: - description: If set to false, action - data results are not added/merged - to state data. In this case 'results' - and 'toStateData' should be ignored. - Default is true. - type: boolean - type: object - condition: - description: Expression, if defined, must - evaluate to true for this action to be - performed. If false, action is disregarded. - type: string - eventRef: - description: References a 'trigger' and - 'result' reusable event definitions. - properties: - contextAttributes: - additionalProperties: - type: object - description: Add additional extension - context attributes to the produced - event. - type: object - data: - description: If string type, an expression - which selects parts of the states - data output to become the data (payload) - of the event referenced by triggerEventRef. - If object type, a custom object to - become the data (payload) of the event - referenced by triggerEventRef. - type: object - invoke: - default: sync - description: Specifies if the function - should be invoked sync or async. Default - is sync. - enum: - - async - - sync - type: string - resultEventRef: - description: Reference to the unique - name of a 'consumed' event definition - type: string - resultEventTimeout: - description: Maximum amount of time - (ISO 8601 format) to wait for the - result event. If not defined it be - set to the actionExecutionTimeout - type: string - triggerEventRef: - description: Reference to the unique - name of a 'produced' event definition, - type: string - required: - - resultEventRef - - triggerEventRef - type: object - functionRef: - description: References a reusable function - definition. - properties: - arguments: - additionalProperties: - type: object - description: 'Arguments (inputs) to - be passed to the referenced function - TODO: validate it as required if function - type is graphql' - type: object - invoke: - default: sync - description: Specifies if the function - should be invoked sync or async. Default - is sync. - enum: - - async - - sync - type: string - refName: - description: Name of the referenced - function. - type: string - selectionSet: - description: 'Used if function type - is graphql. String containing a valid - GraphQL selection set. TODO: validate - it as required if function type is - graphql' - type: string - required: - - refName - type: object - id: - description: Defines Unique action identifier. - type: string - name: - description: Defines Unique action name. - type: string - nonRetryableErrors: - description: List of unique references to - defined workflow errors for which the - action should not be retried. Used only - when `autoRetries` is set to `true` - items: - type: string - type: array - retryRef: - description: References a defined workflow - retry definition. If not defined uses - the default runtime retry definition. - type: string - retryableErrors: - description: List of unique references to - defined workflow errors for which the - action should be retried. Used only when - `autoRetries` is set to `false` - items: - type: string - type: array - sleep: - description: Defines time period workflow - execution should sleep before / after - function execution. - properties: - after: - description: Defines amount of time - (ISO 8601 duration format) to sleep - after function/subflow invocation. - Does not apply if 'eventRef' is defined. - type: string - before: - description: Defines amount of time - (ISO 8601 duration format) to sleep - before function/subflow invocation. - Does not apply if 'eventRef' is defined. - type: string - type: object - subFlowRef: - description: References a workflow to be - invoked. - properties: - invoke: - default: sync - description: Specifies if the subflow - should be invoked sync or async. Defaults - to sync. - enum: - - async - - sync - type: string - onParentComplete: - default: terminate - description: onParentComplete specifies - how subflow execution should behave - when parent workflow completes if - invoke is 'async'. Defaults to terminate. - enum: - - terminate - - continue - type: string - version: - description: Sub-workflow version - type: string - workflowId: - description: Sub-workflow unique id - type: string - required: - - workflowId - type: object - type: object - minItems: 1 - type: array - name: - description: Branch name - type: string - timeouts: - description: Branch specific timeout settings - properties: - actionExecTimeout: - description: Single actions definition execution - timeout duration (ISO 8601 duration format) - type: string - branchExecTimeout: - description: Single branch execution timeout - duration (ISO 8601 duration format) - type: string - type: object - required: - - actions - - name - type: object - minItems: 1 - type: array - completionType: - default: allOf - description: Option types on how to complete branch - execution. Defaults to `allOf`. - enum: - - allOf - - atLeast - type: string - numCompleted: - anyOf: - - type: integer - - type: string - description: 'Used when branchCompletionType is set - to atLeast to specify the least number of branches - that must complete in order for the state to transition/end. - TODO: change this field to unmarshal result as int' - x-kubernetes-int-or-string: true - timeouts: - description: State specific timeouts - properties: - branchExecTimeout: - description: Default single branch execution timeout - (ISO 8601 duration format) - type: string - stateExecTimeout: - description: Default workflow state execution timeout - (ISO 8601 duration format) - properties: - single: - description: Single state execution timeout, - not including retries (ISO 8601 duration format) - type: string - total: - description: Total state execution timeout, - including retries (ISO 8601 duration format) - type: string - required: - - total - type: object - type: object - required: - - branches - type: object - sleepState: - description: sleepState suspends workflow execution for - a given time duration. - properties: - duration: - description: Duration (ISO 8601 duration format) to - sleep - type: string - timeouts: - description: Timeouts State specific timeouts - properties: - stateExecTimeout: - description: Default workflow state execution timeout - (ISO 8601 duration format) - properties: - single: - description: Single state execution timeout, - not including retries (ISO 8601 duration format) - type: string - total: - description: Total state execution timeout, - including retries (ISO 8601 duration format) - type: string - required: - - total - type: object - type: object - required: - - duration - type: object - stateDataFilter: - description: State data filter. - properties: - input: - description: Workflow expression to filter the state - data input - type: string - output: - description: Workflow expression that filters the state - data output - type: string - type: object - switchState: - description: 'switchState is workflow''s gateways: direct - transitions onf a workflow based on certain conditions.' - properties: - dataConditions: - description: Defines conditions evaluated against data - items: - description: DataCondition specify a data-based condition - statement which causes a transition to another workflow - state if evaluated to true. - properties: - condition: - description: Workflow expression evaluated against - state data. Must evaluate to true or false. - type: string - end: - description: TODO End or Transition needs to be - exclusive tag, one or another should be set. - Explicit transition to end - properties: - compensate: - description: If set to true, triggers workflow - compensation before workflow execution completes. - Default is false. - type: boolean - continueAs: - description: Defines that current workflow - execution should stop, and execution should - continue as a new workflow instance of the - provided id - properties: - data: - description: If string type, an expression - which selects parts of the states data - output to become the workflow data input - of continued execution. If object type, - a custom object to become the workflow - data input of the continued execution - type: object - version: - description: Version of the workflow to - continue execution as. - type: string - workflowExecTimeout: - description: WorkflowExecTimeout Workflow - execution timeout to be used by the - workflow continuing execution. Overwrites - any specific settings set by that workflow - properties: - duration: - default: unlimited - description: Workflow execution timeout - duration (ISO 8601 duration format). - If not specified should be 'unlimited'. - type: string - interrupt: - description: If false, workflow instance - is allowed to finish current execution. - If true, current workflow execution - is stopped immediately. Default - is false. - type: boolean - runBefore: - description: Name of a workflow state - to be executed before workflow instance - is terminated. - type: string - required: - - duration - type: object - workflowId: - description: Unique id of the workflow - to continue execution as. - type: string - required: - - workflowId - type: object - produceEvents: - description: Array of producedEvent definitions. - Defines events that should be produced. - items: - description: ProduceEvent Defines the event - (CloudEvent format) to be produced when - workflow execution completes or during - a workflow transitions. The eventRef property - must match the name of one of the defined - produced events in the events definition. - properties: - contextAttributes: - additionalProperties: - type: string - description: Add additional event extension - context attributes. - type: object - data: - description: If String, expression which - selects parts of the states data output - to become the data of the produced - event. If object a custom object to - become the data of produced event. - type: object - eventRef: - description: Reference to a defined - unique event name in the events definition - type: string - required: - - eventRef - type: object - type: array - terminate: - description: If true, completes all execution - flows in the given workflow instance. - type: boolean - type: object - metadata: - additionalProperties: - type: object - description: Metadata information. - type: object - name: - description: Data condition name. - type: string - transition: - description: Workflow transition if condition - is evaluated to true - properties: - compensate: - default: false - description: If set to true, triggers workflow - compensation before this transition is taken. - Default is false. - type: boolean - nextState: - description: Name of the state to transition - to next. - type: string - produceEvents: - description: Array of producedEvent definitions. - Events to be produced before the transition - takes place. - items: - description: ProduceEvent Defines the event - (CloudEvent format) to be produced when - workflow execution completes or during - a workflow transitions. The eventRef property - must match the name of one of the defined - produced events in the events definition. - properties: - contextAttributes: - additionalProperties: - type: string - description: Add additional event extension - context attributes. - type: object - data: - description: If String, expression which - selects parts of the states data output - to become the data of the produced - event. If object a custom object to - become the data of produced event. - type: object - eventRef: - description: Reference to a defined - unique event name in the events definition - type: string - required: - - eventRef - type: object - type: array - required: - - nextState - type: object - required: - - condition - - end - type: object - type: array - defaultCondition: - description: Default transition of the workflow if there - is no matching data conditions. Can include a transition - or end definition. - properties: - end: - description: If this state an end state - x-kubernetes-preserve-unknown-fields: true - transition: - description: Serverless workflow states can have - one or more incoming and outgoing transitions - (from/to other states). Each state can define - a transition definition that is used to determine - which state to transition to next. - x-kubernetes-preserve-unknown-fields: true - type: object - eventConditions: - description: Defines conditions evaluated against events. - items: - description: EventCondition specify events which the - switch state must wait for. - properties: - end: - description: TODO End or Transition needs to be - exclusive tag, one or another should be set. - Explicit transition to end - x-kubernetes-preserve-unknown-fields: true - eventDataFilter: - description: Event data filter definition. - properties: - data: - description: Workflow expression that filters - of the event data (payload). - type: string - toStateData: - description: Workflow expression that selects - a state data element to which the action - results should be added/merged into. If - not specified denotes the top-level state - data element - type: string - useData: - description: If set to false, event payload - is not added/merged to state data. In this - case 'data' and 'toStateData' should be - ignored. Default is true. - type: boolean - type: object - eventRef: - description: References a unique event name in - the defined workflow events. - type: string - metadata: - description: Metadata information. - x-kubernetes-preserve-unknown-fields: true - name: - description: Event condition name. - type: string - transition: - description: Workflow transition if condition - is evaluated to true - x-kubernetes-preserve-unknown-fields: true - required: - - eventRef - type: object - type: array - timeouts: - description: SwitchState specific timeouts - properties: - eventTimeout: - description: 'Specify the expire value to transitions - to defaultCondition. When event-based conditions - do not arrive. NOTE: this is only available for - EventConditions' - type: string - stateExecTimeout: - description: Default workflow state execution timeout - (ISO 8601 duration format) - properties: - single: - description: Single state execution timeout, - not including retries (ISO 8601 duration format) - type: string - total: - description: Total state execution timeout, - including retries (ISO 8601 duration format) - type: string - required: - - total - type: object - type: object - required: - - defaultCondition - type: object - transition: - description: Next transition of the workflow after the time - delay. - x-kubernetes-preserve-unknown-fields: true - type: - description: stateType can be any of delay, callback, event, - foreach, inject, operation, parallel, sleep, switch - enum: - - delay - - callback - - event - - foreach - - inject - - operation - - parallel - - sleep - - switch - type: string - usedForCompensation: - description: If true, this state is used to compensate another - state. Default is false. - type: boolean - required: - - name - - type - type: object - minItems: 1 - type: array - x-kubernetes-preserve-unknown-fields: true - timeouts: - description: Defines the workflow default timeout settings. - properties: - actionExecTimeout: - description: ActionExecTimeout Single actions definition execution - timeout duration (ISO 8601 duration format). - type: string - branchExecTimeout: - description: BranchExecTimeout Single branch execution timeout - duration (ISO 8601 duration format). - type: string - eventTimeout: - description: EventTimeout Timeout duration to wait for consuming - defined events (ISO 8601 duration format). - type: string - stateExecTimeout: - description: StateExecTimeout Total state execution timeout - (including retries) (ISO 8601 duration format). - properties: - single: - description: Single state execution timeout, not including - retries (ISO 8601 duration format) - type: string - total: - description: Total state execution timeout, including - retries (ISO 8601 duration format) - type: string - required: - - total - type: object - workflowExecTimeout: - description: WorkflowExecTimeout Workflow execution timeout - duration (ISO 8601 duration format). If not specified should - be 'unlimited'. - properties: - duration: - default: unlimited - description: Workflow execution timeout duration (ISO - 8601 duration format). If not specified should be 'unlimited'. - type: string - interrupt: - description: If false, workflow instance is allowed to - finish current execution. If true, current workflow - execution is stopped immediately. Default is false. - type: boolean - runBefore: - description: Name of a workflow state to be executed before - workflow instance is terminated. - type: string - required: - - duration - type: object - type: object - required: - - states - type: object - resources: - description: Resources workflow resources that are linked to this - workflow definition. For example, a collection of OpenAPI specification - files. - properties: - configMaps: - items: - description: ConfigMapWorkflowResource ConfigMap local reference - holding one or more workflow resources, such as OpenAPI files - that will be mounted in the workflow application. - properties: - configMap: - description: ConfigMap the given configMap name in the same - workflow context to find the resource - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - workflowPath: - description: WorkflowPath path relative to the workflow - application root file system within the pod (//src/main/resources). Starting trailing slashes will - be removed. - type: string - required: - - configMap - type: object - type: array - type: object - required: - - flow - type: object - status: - description: SonataFlowStatus defines the observed state of SonataFlow - properties: - address: - description: Address is used as a part of Addressable interface (status.address.url) - for knative - properties: - CACerts: - description: CACerts is the Certification Authority (CA) certificates - in PEM format according to https://www.rfc-editor.org/rfc/rfc7468. - type: string - name: - description: Name is the name of the address. - type: string - url: - type: string - type: object - conditions: - description: The latest available observations of a resource's current - state. - items: - description: Condition describes the common structure for conditions - in our types - properties: - lastUpdateTime: - description: The last time this condition was updated. - format: date-time - type: string - message: - description: A human-readable message indicating details about - the transition. - type: string - reason: - description: The reason for the condition's last transition. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type condition for the given object - type: string - required: - - status - - type - type: object - type: array - endpoint: - description: Endpoint is an externally accessible URL of the workflow - type: string - lastTimeRecoverAttempt: - format: date-time - type: string - observedGeneration: - description: The generation observed by the deployment controller. - format: int64 - type: integer - recoverFailureAttempts: - description: keeps track of how many failure recovers a given workflow - had so far - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: sonataflow-operator-controller-manager - namespace: sonataflow-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: sonataflow-operator-leader-election-role - namespace: sonataflow-operator-system -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: sonataflow-operator-builder-manager-role -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - pods/exec - - services - - services/finalizers - - namespaces - - serviceaccounts - - persistentvolumeclaims - - secrets - - events - - deployments - - nodes - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - configmaps - - pods - - pods/exec - - services - - services/finalizers - - namespaces - - serviceaccounts - - persistentvolumeclaims - - secrets - - events - - deployments - - nodes - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - - rolebindings - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: sonataflow-operator-leases -rules: -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: sonataflow-operator-manager-role -rules: -- apiGroups: - - sonataflow.org - resources: - - sonataflowbuilds - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - sonataflow.org - resources: - - sonataflowbuilds/finalizers - verbs: - - update -- apiGroups: - - sonataflow.org - resources: - - sonataflowbuilds/status - verbs: - - get - - patch - - update -- apiGroups: - - sonataflow.org - resources: - - sonataflowplatforms - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - sonataflow.org - resources: - - sonataflowplatforms/finalizers - verbs: - - update -- apiGroups: - - sonataflow.org - resources: - - sonataflowplatforms/status - verbs: - - get - - patch - - update -- apiGroups: - - sonataflow.org - resources: - - sonataflows - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - sonataflow.org - resources: - - sonataflows/finalizers - verbs: - - update -- apiGroups: - - sonataflow.org - resources: - - sonataflows/status - verbs: - - get - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: sonataflow-operator-metrics-reader -rules: -- nonResourceURLs: - - /metrics - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: sonataflow-operator-openshift-manager-role -rules: -- apiGroups: - - route.openshift.io - resources: - - route - - routes - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - route.openshift.io - resources: - - route/finalizers - - routes/finalizers - verbs: - - get - - list - - create - - update - - delete - - deletecollection - - patch - - watch -- apiGroups: - - image.openshift.io - resources: - - imagestreams - - imagestreamtags - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - image.openshift.io - resources: - - imagestreams/finalizers - - imagestreamtags/finalizers - verbs: - - get - - list - - create - - update - - delete - - deletecollection - - patch - - watch -- apiGroups: - - build.openshift.io - resources: - - buildconfigs - - builds - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - build.openshift.io - resources: - - buildconfigs/finalizers - - builds/finalizers - verbs: - - get - - list - - create - - update - - delete - - deletecollection - - patch - - watch -- apiGroups: - - build.openshift.io - resources: - - buildconfigs/instantiatebinary - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: sonataflow-operator-proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: sonataflow-operator-leader-election-rolebinding - namespace: sonataflow-operator-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: sonataflow-operator-leader-election-role -subjects: -- kind: ServiceAccount - name: sonataflow-operator-controller-manager - namespace: sonataflow-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: sonataflow-operator-builder-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: sonataflow-operator-builder-manager-role -subjects: -- kind: ServiceAccount - name: sonataflow-operator-controller-manager - namespace: sonataflow-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: sonataflow-operator-leases-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: sonataflow-operator-leases -subjects: -- kind: ServiceAccount - name: sonataflow-operator-controller-manager - namespace: sonataflow-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: sonataflow-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: sonataflow-operator-manager-role -subjects: -- kind: ServiceAccount - name: sonataflow-operator-controller-manager - namespace: sonataflow-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: sonataflow-operator-openshift-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: sonataflow-operator-openshift-manager-role -subjects: -- kind: ServiceAccount - name: sonataflow-operator-controller-manager - namespace: sonataflow-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: sonataflow-operator-proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: sonataflow-operator-proxy-role -subjects: -- kind: ServiceAccount - name: sonataflow-operator-controller-manager - namespace: sonataflow-operator-system ---- -apiVersion: v1 -data: - DEFAULT_BUILDER_RESOURCE_NAME: Dockerfile - DEFAULT_WORKFLOW_EXTENSION: .sw.json - Dockerfile: "FROM quay.io/kiegroup/kogito-swf-builder:1.44 AS builder\n\n# variables - that can be overridden by the builder\n# To add a Quarkus extension to your application\nARG - QUARKUS_EXTENSIONS\n# Args to pass to the Quarkus CLI add extension command\nARG - QUARKUS_ADD_EXTENSION_ARGS\n\n# Copy from build context to skeleton resources - project\nCOPY --chmod=644 * ./resources/\n\nRUN /home/kogito/launch/build-app.sh - ./resources\n \n#=============================\n# Runtime Run\n#=============================\nFROM - registry.access.redhat.com/ubi8/openjdk-11:latest\n\nENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'\n - \ \n# We make four distinct layers so if there are application changes the library - layers can be re-used\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/lib/ - /deployments/lib/\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/*.jar - /deployments/\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/app/ - /deployments/app/\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/quarkus/ - /deployments/quarkus/\n\nEXPOSE 8080\nUSER 185\nENV AB_JOLOKIA_OFF=\"\"\nENV JAVA_OPTS=\"-Dquarkus.http.host=0.0.0.0 - -Djava.util.logging.manager=org.jboss.logmanager.LogManager\"\nENV JAVA_APP_JAR=\"/deployments/quarkus-run.jar\"\n" -kind: ConfigMap -metadata: - name: sonataflow-operator-builder-config - namespace: sonataflow-operator-system ---- -apiVersion: v1 -data: - controller_manager_config.yaml: | - apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 - kind: ControllerManagerConfig - health: - healthProbeBindAddress: :8081 - metrics: - bindAddress: 127.0.0.1:8080 - webhook: - port: 9443 - leaderElection: - leaderElect: true - resourceName: 1be5e57d.kiegroup.org -kind: ConfigMap -metadata: - name: sonataflow-operator-manager-config - namespace: sonataflow-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - name: sonataflow-operator-controller-manager-metrics-service - namespace: sonataflow-operator-system -spec: - ports: - - name: https - port: 8443 - protocol: TCP - targetPort: https - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: sonataflow-operator-controller-manager - namespace: sonataflow-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0 - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - protocol: TCP - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 - - --leader-elect - - --v=0 - command: - - /usr/local/bin/manager - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/kiegroup/kogito-serverless-operator:1.44 - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - name: manager - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: sonataflow-operator-controller-manager - terminationGracePeriodSeconds: 10 diff --git a/deployment/kustomize/workflows/sonataflow-event-timeout/configmap_event-timeout-props.yaml b/deployment/kustomize/workflows/sonataflow-event-timeout/configmap_event-timeout-props.yaml deleted file mode 100644 index c2b29f7..0000000 --- a/deployment/kustomize/workflows/sonataflow-event-timeout/configmap_event-timeout-props.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: v1 -data: - application.properties: | - kogito.service.url=${kubernetes:services.v1/event-timeout} - quarkus.log.category."io.smallrye.reactive.messaging".level = DEBUG - quarkus.log.category."org.kie".level = DEBUG - quarkus.log.category."io.quarkus.reactivemessaging".level = DEBUG - quarkus.log.category."io.vertx".level = DEBUG - quarkus.datasource.jdbc.url=jdbc:postgresql://postgres-db-service.postgres:5432/sonataflow - quarkus.datasource.username=sonataflow - quarkus.datasource.password=sonataflow - kogito.jobs-service.url=${kubernetes:services.v1/jobs-service-service} - # Job Service kogito-addons-quarkus-jobs-knative-eventing configuration - mp.messaging.outgoing.kogito-job-service-job-request-events.connector=quarkus-http - mp.messaging.outgoing.kogito-job-service-job-request-events.url=http://jobs-service-service/v2/jobs/events - # Data Index configuration - mp.messaging.outgoing.kogito-processinstances-events.url=http://data-index-service/processes - mp.messaging.outgoing.kogito-usertaskinstances-events.url=http://data-index-service/tasks - mp.messaging.outgoing.kogito-variables-events.url=http://data-index-service/variables - - # Skip user tasks and variables events sending. - kogito.events.usertasks.enabled=false - kogito.events.variables.enabled=false -kind: ConfigMap -metadata: - labels: - app: event-timeout - name: event-timeout-props diff --git a/deployment/kustomize/workflows/sonataflow-event-timeout/kustomization.yaml b/deployment/kustomize/workflows/sonataflow-event-timeout/kustomization.yaml deleted file mode 100644 index 053db9b..0000000 --- a/deployment/kustomize/workflows/sonataflow-event-timeout/kustomization.yaml +++ /dev/null @@ -1,3 +0,0 @@ -resources: -- configmap_event-timeout-props.yaml -- sonataflow_event_timeout.yaml diff --git a/deployment/kustomize/workflows/sonataflow-event-timeout/sonataflow_event_timeout.yaml b/deployment/kustomize/workflows/sonataflow-event-timeout/sonataflow_event_timeout.yaml deleted file mode 100644 index 1ab8656..0000000 --- a/deployment/kustomize/workflows/sonataflow-event-timeout/sonataflow_event_timeout.yaml +++ /dev/null @@ -1,87 +0,0 @@ -apiVersion: sonataflow.org/v1alpha08 -kind: SonataFlow -metadata: - name: event-timeout - annotations: - sonataflow.org/description: Event timeout example on k8s! - sonataflow.org/version: 0.0.1 - sonataflow.org/profile: prod -spec: - flow: - start: PrintStartMessage - events: - - name: event1 - source: '' - type: event1_event_type - - name: event2 - source: '' - type: event2_event_type - functions: - - name: systemOut - type: custom - operation: sysout - timeouts: - eventTimeout: PT30S - states: - - name: PrintStartMessage - type: operation - actions: - - name: printSystemOut - functionRef: - refName: systemOut - arguments: - message: "${\"event-state-timeouts: \" + $WORKFLOW.instanceId + \" has started.\"}" - transition: WaitForEvent1 - - name: WaitForEvent1 - type: event - onEvents: - - eventRefs: [ event1 ] - eventDataFilter: - data: "${ \"The event1 was received.\" }" - toStateData: "${ .exitMessage1 }" - actions: - - name: printAfterEvent1 - functionRef: - refName: systemOut - arguments: - message: "${\"event-state-timeouts: \" + $WORKFLOW.instanceId + \" executing actions for event1.\"}" - - transition: WaitForEvent2 - - name: WaitForEvent2 - type: event - onEvents: - - eventRefs: [ event2 ] - eventDataFilter: - data: "${ \"The event2 was received.\" }" - toStateData: "${ .exitMessage2 }" - actions: - - name: printAfterEvent2 - functionRef: - refName: systemOut - arguments: - message: "${\"event-state-timeouts: \" + $WORKFLOW.instanceId + \" executing actions for event2.\"}" - transition: PrintExitMessage - - name: PrintExitMessage - type: operation - actions: - - name: printSystemOut - functionRef: - refName: systemOut - arguments: - message: "${\"event-state-timeouts: \" + $WORKFLOW.instanceId + \" has finalized. \" + if .exitMessage1 != null then .exitMessage1 else \"The event state did not receive event1, and the timeout has overdue\" end + \" -- \" + if .exitMessage2 != null then .exitMessage2 else \"The event state did not receive event2, and the timeout has overdue\" end }" - end: true - -# kind: Route -# apiVersion: route.openshift.io/v1 -# metadata: -# name: greeting -# labels: -# app: greeting -# spec: -# host: greeting-arhkp-kustomize.apps.rhdh-dev01.kni.syseng.devcluster.openshift.com -# to: -# kind: Service -# name: greeting -# port: -# targetPort: 8080 ---- diff --git a/deployment/kustomize/workflows/sonataflow-greeting/configmap_greeting-props.yaml b/deployment/kustomize/workflows/sonataflow-greeting/configmap_greeting-props.yaml deleted file mode 100644 index 851ddc9..0000000 --- a/deployment/kustomize/workflows/sonataflow-greeting/configmap_greeting-props.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -data: - application.properties: | - kogito.service.url=${kubernetes:services.v1/greeting} - quarkus.log.category."io.smallrye.reactive.messaging".level = DEBUG - quarkus.log.category."org.kie".level = DEBUG - quarkus.log.category."io.quarkus.reactivemessaging".level = DEBUG - quarkus.log.category."io.vertx".level = DEBUG - quarkus.datasource.jdbc.url=jdbc:postgresql://postgres-db-service.postgres:5432/sonataflow - quarkus.datasource.username=sonataflow - quarkus.datasource.password=sonataflow - kogito.jobs-service.url=${kubernetes:services.v1/jobs-service-service} -kind: ConfigMap -metadata: - labels: - app: greeting - name: greeting-props diff --git a/deployment/kustomize/workflows/sonataflow-greeting/kustomization.yaml b/deployment/kustomize/workflows/sonataflow-greeting/kustomization.yaml deleted file mode 100644 index ea3cf74..0000000 --- a/deployment/kustomize/workflows/sonataflow-greeting/kustomization.yaml +++ /dev/null @@ -1,3 +0,0 @@ -resources: -- configmap_greeting-props.yaml -- sonataflow_greeting.yaml diff --git a/deployment/kustomize/workflows/sonataflow-greeting/sonataflow_greeting.yaml b/deployment/kustomize/workflows/sonataflow-greeting/sonataflow_greeting.yaml deleted file mode 100644 index 464e7f3..0000000 --- a/deployment/kustomize/workflows/sonataflow-greeting/sonataflow_greeting.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: sonataflow.org/v1alpha08 -kind: SonataFlow -metadata: - name: greeting - annotations: - sonataflow.org/description: Greeting example on k8s! - sonataflow.org/version: 0.0.1 - sonataflow.org/profile: prod -spec: - flow: - start: ChooseOnLanguage - functions: - - name: greetFunction - type: custom - operation: sysout - states: - - name: ChooseOnLanguage - type: switch - dataConditions: - - condition: "${ .language == \"English\" }" - transition: GreetInEnglish - - condition: "${ .language == \"Spanish\" }" - transition: GreetInSpanish - defaultCondition: GreetInEnglish - - name: GreetInEnglish - type: inject - data: - greeting: "Hello from JSON Workflow, " - transition: GreetPerson - - name: GreetInSpanish - type: inject - data: - greeting: "Saludos desde JSON Workflow, " - transition: GreetPerson - - name: GreetPerson - type: operation - actions: - - name: greetAction - functionRef: - refName: greetFunction - arguments: - message: ".greeting+.name" - end: true -# kind: Route -# apiVersion: route.openshift.io/v1 -# metadata: -# name: greeting -# labels: -# app: greeting -# spec: -# host: greeting-arhkp-kustomize.apps.rhdh-dev01.kni.syseng.devcluster.openshift.com -# to: -# kind: Service -# name: greeting -# port: -# targetPort: 8080 ---- diff --git a/escalation-eda/README.md b/escalation-eda/README.md deleted file mode 100644 index 567f44c..0000000 --- a/escalation-eda/README.md +++ /dev/null @@ -1,159 +0,0 @@ -![Escalation CI](https://github.com/dmartinol/serverless-workflow-examples/actions/workflows/escalation-pipeline.yml/badge.svg) - -# Escalation workflow with Event Driven Architecture -## Use case -As a user I want to create a ticket to request the creation of a new namespace in an OpenShift cluster and inform the given -escalation manager in case the ticket is not completed in a given time. - -Working assumptions: -* Jira is the ticketing system -* The escalation manager is notified with an email from an external mailing service -* The application is deployed as a serverless workload in OpenShift using Helm charts -* The user workflow is implemented using the SonataFlow platform - -## Architectural components -![Escalation architecture][1] - -## Jira server -A ticketing service configured to create tickets and notify webhooks any time the tickets are updated. - -# Escalation workflow -A Serverless Workflow receiving the user request and then creating the ticket: once it is approved, it take care of provisioning the given namespace. - -See the [README][2] - -### Jira listener -A Java application configured to receive webhooks from the ticketing service, extract the relevant data and notify the Escalation workflow about the approval. - -See the [README][3] - -### Serverless infrastructure -It is made of the following components: -* `Red Hat Serverless Operator`, and basic `KnativeEventing` and `KnativeServing` instances -* An in-memory `Broker` receiving `CloudEvent`s from the `Jira listener` (linked using a `SinkBinding` instance) to the `Escalation workflow` - (using a `Trigger` instance) - -## Deploying the example -This is a two steps deployment: -1. [Deploy the serverless infrastrucure](#eda-infra-chart) (optional, if already availble) -2. [Deploy the escalation services](#escalation-eda-chart) - -### eda-infra chart -The [eda-infra][4] Helm creates the `Red Hat Serverless Operator`, and default instances of `KnativeEventing` and `KnativeServing`. -This chart requires a user with `cluster-admin` role. - -**Note**: as an alternative, you can provision the same resources manually, using the OpenShift UI console or the `oc` CLI command. - -It also created the needed [CRDs][5] according to the latest release of the -[OpenShift Serverless 1.30 operator](https://access.redhat.com/documentation/en-us/red_hat_openshift_serverless/1.30/html-single/about_serverless/index#new-features-1-30-0_serverless-release-notes). - -CRDs were downloaded from: -```bash -curl -LJO https://github.com/knative/operator/releases/download/knative-v1.9.6/operator.yaml -``` - -The following commands install, upgrade and delete the [eda-infra][6] -Helm chart in the `default` namespace with name `eda-infra`: -```bash -helm install -n default eda-infra helm/eda-infra --debug -helm status -n default eda-infra -helm upgrade -n default eda-infra helm/eda-infra --debug -helm uninstall -n default eda-infra --debug -``` - -After the initial installation, run the following commands to wait until the serverless infrastructure is ready: -```bash -> oc wait -n knative-eventing knativeeventing/knative-eventing --for=condition=Ready --timeout=5m -knativeeventing.operator.knative.dev/knative-eventing condition met -> oc wait -n knative-serving knativeserving/knative-serving --for=condition=Ready --timeout=5m -knativeserving.operator.knative.dev/knative-serving condition met -``` - -**Note**: the CRDs are not removed when the chart is uninstalled, see the [Helm docs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) - -**Know issues**: after the uninstall command the KnativeEventing and KnativeServing instances can remain in terminating state, which also prevents the -associated namespaces from being deleted. Manually run this command to verify the status: -```bash -oc get knativeeventing,knativeserving --all-namespaces -``` -Then run this command to patch the instances so that they can be eventually deleted: -```bash -oc patch -n knative-eventing knativeeventing/knative-eventing -p '{"metadata":{"finalizers":null}}' --type=merge -oc patch -n knative-serving knativeserving/knative-serving -p '{"metadata":{"finalizers":null}}' --type=merge -``` - -### escalation-eda chart -The [escalation-eda][7] Helm creates all the services related to the deployment of the [Escalation workflow](#escalation-workflow) -and the [Jira listener](#jira-listener). - -This chart requires a user with `admin` role. - -Helm properties: - -| Property | Description | Mandatory | Default | -|----------|-------------|-----------|---------| -| `namespace.create` | Flag to create the target namespace | ❌ | `true` | -| `namespace.name` | Target namespace name | ❌ | `escalation` | -| `jiralistener.image` | Container image of the `Jira listener` application | ❌ | `quay.io/orchestrator/jira-listener-jvm` | -| `jiralistener.name` | The name of the `Jira listener` service [see Troubleshooting the Duplicate Certificate Limit error][8] | ❌ | `jira-listener` | -| `escalationSwf.name` | The name of te `Escalation SWF` service | ❌ | `escalation-swf` | -| `escalationSwf.image` | Container image of the `Escalation SWF` application | ❌ | `quay.io/orchestrator/escalation-swf:1.0` | -| `escalationSwf.jira.url` | The Jira server URL | ✅ | | -| `escalationSwf.jira.username` | The Jira server username | ✅ | | -| `escalationSwf.jira.apiToken` | The Jira API Token | ✅ | | -| `escalationSwf.jira.project` | The key of the Jira project where the escalation issue is created | ✅ | | -| `escalationSwf.jira.issueType` | The ID of the Jira issue type to be created | ✅ | | -| `escalationSwf.mailTrap.apiToken` | The MailTrail API Token | ✅ | | -| `escalationSwf.mailTrap.inboxId` | The ID of the MailTrap inbox | ✅ | | -| `escalationSwf.ocp.apiServerUrl` | The OpenShift API server URL | ✅ | | -| `escalationSwf.ocp.apiServerToken` | The OpenShift API server token | ✅ | | -| `escalationSwf.escalationTimeoutSeconds` | The time to wait (in seconds) before escalating | ❌ | `30` | -| `eventdisplay.enabled` | Flag to install the optional `event-display` application for debugging purposes | ❌ | `true` | -| `letsEncryptCertificate` | Flag to use the `Lets Encrypt` certificate to expose the `Jira listener` service as the webhook receiver | ❌ | `false` | - -The following commands install, upgrade and delete the [escalation-eda][7] Helm chart in the `default` namespace - with name `escalation-eda`, assuming you provided the mandatory values in a file `escalation-eda-values.yaml`: -```bash -helm install -n default escalation-eda helm/escalation-eda --debug -f ./escalation-eda-values.yaml -helm status -n default escalation-eda -helm upgrade -n default escalation-eda helm/escalation-eda --debug -f ./escalation-eda-values.yaml -helm uninstall -n default escalation-eda --debug -``` - -After the initial installation, run the following commands to wait until the services are ready: -```bash -> oc wait -n escalation ksvc -l app=jira-listener --for=condition=Ready --timeout=5m -service.serving.knative.dev/jira-listener condition met -> oc wait -n escalation ksvc -l app=escalation-swf --for=condition=Ready --timeout=5m -service.serving.knative.dev/escalation-swf condition met -``` - -#### Deploy using the Let's Encrypt certificate -To uses the publicly-signed TLS certificate from [Let's Encrypt](https://letsencrypt.org/), set the following values in the custom values file: -```yaml -letsEncryptCertificate: false -jiralistener: - name: _YOUR_CUSTOM_NAME_ -``` - -#### Deploying on OpenShift sandbox -When deploying on the [OpenShift sandbox](https://developers.redhat.com/developer-sandbox), remember to manage the Helm chart in the user namespace, not in the `default` one: -```bash -SANDBOX_NS=$(oc project -q) -``` - -Then, set the following values in the custom values file: -```bash -namespace: - create: false - name: -``` - -[1]: https://github.com/parodos-dev/serverless-workflow-examples/blob/main/escalation-eda/doc/arch.png -[2]: https://github.com/parodos-dev/serverless-workflow-examples/blob/main/escalation-eda/escalation-swf/README.md -[3]: https://github.com/parodos-dev/serverless-workflow-examples/blob/main/escalation-eda/jira-listener/README.md -[4]: https://github.com/parodos-dev/serverless-workflow-examples/blob/main/escalation-eda/helm/eda-infra/Chart.yaml -[5]: https://github.com/parodos-dev/serverless-workflow-examples/blob/main/escalation-eda/helm/eda-infra/crds/operator.yaml -[6]: https://github.com/parodos-dev/serverless-workflow-examples/blob/main/escalation-eda/helm/eda-infra/Chart.yaml -[7]: https://github.com/parodos-dev/serverless-workflow-examples/blob/main/escalation-eda/helm/escalation-eda/Chart.yaml -[8]: https://github.com/parodos-dev/serverless-workflow-examples/blob/main/escalation-eda/jira-listener/README.md#troubleshooting-the-duplicate-certificate-limit-error diff --git a/escalation-eda/doc/arch.png b/escalation-eda/doc/arch.png deleted file mode 100644 index 56fbb40..0000000 Binary files a/escalation-eda/doc/arch.png and /dev/null differ diff --git a/escalation-eda/doc/webhook.png b/escalation-eda/doc/webhook.png deleted file mode 100644 index ee22765..0000000 Binary files a/escalation-eda/doc/webhook.png and /dev/null differ diff --git a/escalation-eda/escalation-swf/README.md b/escalation-eda/escalation-swf/README.md deleted file mode 100644 index cc86351..0000000 --- a/escalation-eda/escalation-swf/README.md +++ /dev/null @@ -1,209 +0,0 @@ -# Event Driven Escalation workflow -An escalation workflow integrated with a pluggable `Ticketing Service` orchestrated by -[SonataFlow](https://sonataflow.org/serverlessworkflow/latest/index.html) -and built on Event Driven Architecture (EDA). - -The initial implementation of the `Ticketing Service` is using [Atlassian JIRA](https://www.atlassian.com/software/jira). - -Email service is using [MailTrap Send email API](https://api-docs.mailtrap.io/docs/mailtrap-api-docs/bcf61cdc1547e-send-email-early-access). - -## Prerequisites -* Access to a Jira server (URL, user and [API token](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/)) -* [in-cluster deployment only] Access to an OpenShift cluster with `admin` Role -* An account to [MailTrap](https://mailtrap.io/home) with a [testing Inbox](https://mailtrap.io/inboxes) and an [API token](https://mailtrap.io/api-tokens) -* Available or Running [Janus IDP Backstage Notification Service](https://github.com/janus-idp/backstage-plugins/tree/4d4cb781ca9fc331a2c621583e9203f9e4585ee7), please note the following: - * This readme is made with Janus IDP version merged after Notification plugin's [PR #933](https://github.com/janus-idp/backstage-plugins/pull/933) - ```shell - git clone https://github.com/janus-idp/backstage-plugins.git - git checkout 4d4cb781ca9fc331a2c621583e9203f9e4585ee7 - cd /backstage-plugins - ``` - * Follow the [Notification Backend Plugin Readme](https://github.com/mareklibra/janus-idp-backstage-plugins/tree/c3ff659a0a2a9fba97b9e520568c93da09f150ae/plugins/notifications-backend) to configure the plugin and run. - ```shell - yarn start:backstage - ``` - * After Janus IDP successfully starts, be sure the following create notification command (as given in Notification Plugin Backend readme) works without errors - ```shell - curl -X POST http://localhost:7007/api/notifications/notifications -H "Content-Type: application/json" -H "notifications-secret: " -d '{"title":"my-title","origin":"my-origin","message":"message one","topic":"my-topic"}' - ``` - -## Escalation flow -The main escalation workflow is defined by the [ticketEscalation](./src/main/resources/ticketEscalation.sw.yaml) model: -* Create a ticket using the configured `Ticketing Service` subflow -* Wait until the `approvalEvent` is received - * If the waiting time exceeds the configured timeout, the error with `code: TimedOut` is handled to run the `Escalate` actions `SendNotification` and - `SendEmail` to send the warning notification and/or email - to the escalation manager - * To ensure that an event coming during the escalation state is not lost, the `GetTicket` and `CheckTicketState` states are executed before returning - to the waiting state -* Only when the event is received or the current status is `Approved` the workflow is transisioned to the final state `CreateK8sNamespace` - -![SWF VIZ](./src/main/resources/ticketEscalation.svg) - -The generalized implementation delegates all the `Ticketing Service` requests (e.g., `CreateTicket` and `GetTicket`) to a subflow whose requirements are: -* The `id` must be `ticketingService` -* It must be packaged together with the main workflow -* The workflow data input must comply with the schema defined in [specs/subflow-input-schema.json](./src/main/resources/specs/subflow-input-schema.json), e.g. the subflow accepts a generic request with a `type` field that defines the kind of requests and -returns a well-defined response: -```yaml -# Create request -request: - type: create - namespace: "" - parentId: "" - -# Expected response -type: create -ticketId: "" -ticket: "" -browseUrl: "" -``` -```yaml -# Get request -request: -type: \"\", ticketId: .createResponse.ticketId, ticket: .createResponse.ticket} }" - type: get - ticketId: "" - ticket: "" - -# Expected response -type: get -ticketId: "" -ticket: "" -status: "" -``` - -The sample implementation using the Atlassian JIRA service is defined by the following diagram: -![SWF VIZ](./src/main/resources/jiraSwf.svg) - -**Note about the Jira implementation**: The value of the `.jiraIssue.fields.status.statusCategory.key` field is the one to be used to identify when the `done` status is reached, all the other -similar fields are subject to translation to the configured language and cannot be used for a consistent check. - -### Dependencies on latest SonataFlow artifacts -* This implementation is dependent on version `999-SNAPSHOT` of the SonataFlow platform artifacts as you can see in the [pom.xml](./pom.xml).. -```xml -999-SNAPSHOT -``` - -In order to build and execute the workflows, a specific reference to the `JBoss Public Repository Group` has beeen added, -following the instructions documented [here](https://openshift-knative.github.io/docs/docs/latest/serverless-logic/getting-started/create-your-first-workflow-service.html#proc-configuring-maven-rhbq). - -**These changes allow to build and run the workflow but do not contain the required fixes to manage the timeout error. -Expect some runtime issues** - -This section will be removed once the latest artifacts are finally released. - -## Application configuration -Application properties can be initialized from environment variables before running the application. - -### Ticket escalation properties -| Environment variable | Description | Mandatory | Default value | -|-----------------------|----------------------------------------------------------------------------------------------------------------|-----------|--------------------------------------------| -| `BACKSTAGE_NOTIFICATIONS_URL` | The Backstage Notification Service URL | ✅ | `http://localhost:7007/api/notifications/` | -| `MAILTRAP_URL` | The MailTrail API Token | ❌ | `https://sandbox.api.mailtrap.io` | -| `MAILTRAP_API_TOKEN` | The MailTrail API Token | ✅ | | -| `MAILTRAP_INBOX_ID` | The ID of the MailTrap inbox | ✅ | | -| `MAILTRAP_SENDER_EMAIL` | The email address of the mail sender | ❌ | `escalation@company.com` | -| `OCP_API_SERVER_URL` | The OpensShift API Server URL | ✅ | | -| `OCP_API_SERVER_TOKEN`| The OpensShift API Server Token | ✅ | | -| `ESCALATION_TIMEOUT_SECONDS` | The ISO 8601 duration format to wait before triggering the escalation request, after the issue has been created | ❌ | `PT60S` | - -### Jira Ticketing Service properties - -| Environment variable | Description | Mandatory | Default value | -|-----------------------|-------------|-----------|---------------| -| `JIRA_URL` | The Jira server URL | ✅ | | -| `JIRA_USERNAME` | The Jira server username | ✅ | | -| `JIRA_API_TOKEN` | The Jira API Token | ✅ | | -| `JIRA_PROJECT` | The key of the Jira project where the escalation issue is created | ❌ | `TEST` | -| `JIRA_ISSUE_TYPE` | The ID of the Jira issue type to be created | ✅ | | -| `JIRA_WORKFLOW_INSTANCE_ID_LABEL` | The name part of the Jira ticket label that contains the ID of the related SWF instance (e.g. `workflowInstanceId=123`) | ❌ | `workflowInstanceId` | -| `JIRA_WORKFLOW_NAME_LABEL` | The whole Jira ticket label that contains the name of the SWF (e.g. `workflowName=escalation`) | ❌ | `workflowName=escalation` | - - - -## How to run -You can run it locally as a Quarkus application or using the deployment instructions at [escalation-eda chart chart](../README.md#escalation-eda-chart): -```bash -mvn -Dquarkus.profile=jira clean quarkus:dev -``` - -**Note**: The `jira` profile is selected in the above command to run the provided properties in [application-jira.properties](./src/main/resources/application-jira.properties) - -Initialize the environment before running some test command. - -For local runtime: -```bash -ESCALATION_SWF_URL="http://localhost:8080" -``` -Otherwise, in case of Knative environment: -```bash -ESCALATION_SWF_URL=$(oc get ksvc -n escalation escalation-swf -oyaml | yq '.status.url') -ESCALATION_SWF_URL="${ESCALATION_SWF_URL//\"/}" -``` - -Or, in case of regular deployment: -```bash -ESCALATION_SWF_URL=$(oc get route -n escalation escalation-swf -oyaml | yq '.status.ingress[0].host') -ESCALATION_SWF_URL="${ESCALATION_SWF_URL//\"/}" -``` - -Example of POST to trigger the flow (see input schema in [ticket-escalation-schema.json](./src/main/resources/specs/ticket-escalation-schema.json)): -```bash -export NAMESPACE=new-namespace -export MANAGER=manager@company.com -envsubst < input.json > data.json -SWF_INSTANCE_ID=$(curl -k -XPOST -H "Content-Type: application/json" "${ESCALATION_SWF_URL}/ticketEscalation" -d @data.json | jq '.id') -SWF_INSTANCE_ID="${SWF_INSTANCE_ID//\"/}" -echo $SWF_INSTANCE_ID -``` - -Where [input.json](./input.json) defines the input document as: -```json -{ - "namespace": "${NAMESPACE}", - "email": { - "manager": "${MANAGER}" - } -} -``` -To resume the pending instance, send a CloudEvent with: - -```bash -curl -k -X POST -H "Content-Type: application/cloudevents+json" -d "{ \ - \"specversion\": \"1.0\", \ - \"type\": \"dev.parodos.escalation\", \ - \"source\": \"jira.listener\", \ - \"id\": \"123456\", \ - \"time\": \"2023-10-10T12:00:00Z\", \ - \"kogitoprocrefid\": \"$SWF_INSTANCE_ID\", \ - \"data\": { \ - \"ticketId\": \"ES-6\", \ - \"workFlowInstanceId\":\"$SWF_INSTANCE_ID\", \ - \"workflowName\": \"escalation\", \ - \"status\": \"done\" \ - } \ - }" ${ESCALATION_SWF_URL} -``` -Tips: -* Visit [Workflow Instances](http://localhost:8080/q/dev/org.kie.kogito.kogito-quarkus-serverless-workflow-devui/workflowInstances) -* Visit (Data Index Query Service)[http://localhost:8080/q/graphql-ui/] - -## Building the containerized image -The application runs from a containerized image already avaliable as `quay.io/orchestrator/escalation-swf:1.0`. -You can build and publish your own image using: -```bash -mvn clean install -Pknative -docker tag quay.io/orchestrator/escalation-swf:1.0 quay.io/_YOUR_QUAY_ID_/jira-listener-jvm -docker push quay.io/_YOUR_QUAY_ID_/escalation-swf:1.0 -``` - -## Open Items -* Move the subflow to a separate folder and package the selected `Ticketing Service` implementation at deployment time -* Error handling: - * Define errors in the expected reponses - * Catch errors in the main workflow -* Prepare a non-Jira implementation - * Detailed dev guide of next implementations -* Restore disabled UT -* When in-cluster, connect the workflow to the deployed postgresql, data-index and jobs-service instead og the embedded services diff --git a/escalation-eda/escalation-swf/pom.xml b/escalation-eda/escalation-swf/pom.xml deleted file mode 100644 index fffe68c..0000000 --- a/escalation-eda/escalation-swf/pom.xml +++ /dev/null @@ -1,247 +0,0 @@ - - - 4.0.0 - dev.parodos - ticket-escalation - 1.0.0-SNAPSHOT - - 17 - 3.10.1 - kogito-bom - org.kie.kogito - 999-SNAPSHOT - UTF-8 - UTF-8 - quarkus-bom - io.quarkus.platform - 3.2.9.Final - true - 3.0.0-M7 - 0.1.3 - jib - - - - - ${quarkus.platform.group-id} - ${quarkus.platform.artifact-id} - ${quarkus.platform.version} - pom - import - - - ${kogito.bom.group-id} - ${kogito.bom.artifact-id} - ${kogito.bom.version} - pom - import - - - io.quarkiverse.embedded.postgresql - quarkus-embedded-postgresql - ${version.io.quarkiverse.embedded.postgresql} - - - - - - org.kie.kogito - kogito-quarkus-serverless-workflow - - - org.kie.kogito - kogito-quarkus-serverless-workflow-devui - - - org.kie.kogito - kogito-addons-quarkus-process-management - - - org.kie.kogito - kogito-addons-quarkus-source-files - - - io.quarkiverse.embedded.postgresql - quarkus-embedded-postgresql - - - io.quarkus - quarkus-jdbc-postgresql - - - io.quarkus - quarkus-agroal - - - org.kie.kogito - kogito-addons-quarkus-persistence-jdbc - - - org.kie.kogito - kogito-addons-quarkus-jobs-service-embedded - - - org.kie.kogito - kogito-addons-quarkus-data-index-inmemory - - - io.quarkus - quarkus-smallrye-openapi - - - io.quarkus - quarkus-resteasy-jackson - - - io.quarkus - quarkus-arc - - - io.quarkus - quarkus-openshift - - - io.quarkus - quarkus-kubernetes - - - io.quarkus - quarkus-container-image-jib - - - org.kie.kogito - kogito-addons-quarkus-knative-eventing - - - - io.quarkus - quarkus-junit5 - test - - - io.rest-assured - rest-assured - test - - - com.github.tomakehurst - wiremock - 3.0.1 - test - - - org.awaitility - awaitility - test - - - - - - ${quarkus.platform.group-id} - quarkus-maven-plugin - ${quarkus.platform.version} - true - - - - build - generate-code - generate-code-tests - - - - - - maven-compiler-plugin - ${compiler-plugin.version} - - - -parameters - - - - - maven-surefire-plugin - ${surefire-plugin.version} - - true - - org.jboss.logmanager.LogManager - ${maven.home} - - - - - maven-failsafe-plugin - ${surefire-plugin.version} - - - - integration-test - verify - - - - ${project.build.directory}/${project.build.finalName}-runner - org.jboss.logmanager.LogManager - ${maven.home} - - - - - - - - - - knative - - true - - workflow-with-persistence - false - - ${namespace} - ${deploy} - knative - jib - knative - escalation-swf - quay.io - orchestrator - escalation-swf - 1.0 - false - - - - native - - - native - - - - true - native - - - - - - - apache-repository-snapshots - https://repository.apache.org/content/repositories/snapshots/ - default - - true - - - true - daily - - - - diff --git a/escalation-eda/escalation-swf/src/main/resources/application-jira.properties b/escalation-eda/escalation-swf/src/main/resources/application-jira.properties deleted file mode 100644 index 3f59916..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/application-jira.properties +++ /dev/null @@ -1,21 +0,0 @@ -# The ID of the Jira issue type to be created (mandatory) -jira_issue_type=${JIRA_ISSUE_TYPE} -# The key of the Jira project where the escalation issue is created (mandatory) -jira_project=${JIRA_PROJECT} -# The name part of the Jira ticket label that contains the ID of the related SWF instance (e.g. `workflowInstanceId=123`) -jira_label_workflowInstanceId=${JIRA_WORKFLOW_INSTANCE_ID_LABEL:workflowInstanceId} -# The whole Jira ticket label that contains the name of the SWF (e.g. `workflowInstanceId=escalation`) -jira_label_workflowName=${JIRA_WORKFLOW_NAME_LABEL:workflowName=escalation} - -# Jira -quarkus.rest-client.jira_yaml.url=${JIRA_URL} -quarkus.openapi-generator.jira_yaml.auth.basicAuth.username=${JIRA_USERNAME} -quarkus.openapi-generator.jira_yaml.auth.basicAuth.password=${JIRA_API_TOKEN} - -%test.jira_issue_type=123456 -%test.jira_project=PRJ -%test.mailtrap_inbox_id=123456 - -%test.quarkus.rest-client.jira_yaml.url=http://localhost:8181 -%test.quarkus.openapi-generator.jira_yaml.auth.basicAuth.username=admin -%test.quarkus.openapi-generator.jira_yaml.auth.basicAuth.password=ABCDEF \ No newline at end of file diff --git a/escalation-eda/escalation-swf/src/main/resources/application.properties b/escalation-eda/escalation-swf/src/main/resources/application.properties deleted file mode 100644 index e61683d..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/application.properties +++ /dev/null @@ -1,57 +0,0 @@ -# Application properties -escalation_subflow_id=jiraSwf - -# The email address of the mail sender -sender_email=${MAILTRAP_SENDER_EMAIL:escalation@company.com} -# The ID of the MailTrap inbox (mandatory) -mailtrap_inbox_id=${MAILTRAP_INBOX_ID} -# The ISO 8601 duration format to wait before triggering the escalation request, after the issue has been created -timeout_seconds=${ESCALATION_TIMEOUT_SECONDS:PT60S} - -# OpenShift API Server -quarkus.rest-client.kube_yaml.url=${OCP_API_SERVER_URL} -quarkus.openapi-generator.kube_yaml.auth.BearerToken.bearer-token=${OCP_API_SERVER_TOKEN} -quarkus.tls.trust-all=true -quarkus.kubernetes-client.trust-certs=true - -# MailTrap service -quarkus.rest-client.mailtrap_yaml.url=${MAILTRAP_URL:https://sandbox.api.mailtrap.io} -quarkus.openapi-generator.mailtrap_yaml.auth.apiToken.api-key=${MAILTRAP_API_TOKEN} - -# Notifications service -quarkus.rest-client.notifications_yaml.url=${BACKSTAGE_NOTIFICATIONS_URL:http://localhost:7007/api/notifications/} - -#Quarkus -quarkus.http.host=0.0.0.0 -# This is to enable debugging of HTTP request -quarkus.log.category.\"org.apache.http\".level=DEBUG - -# Added -quarkus.http.port=8080 - -kogito.service.url=http://localhost:${quarkus.http.port} - -quarkus.kogito.devservices.enabled=false -quarkus.devservices.enabled=false -org.kie.kogito.addons.knative.eventing.health-enabled = false -quarkus.swagger-ui.always-include=true -quarkus.kogito.data-index.graphql.ui.always-include=true - -# Kogito runtime persistence configurations -kogito.persistence.type=jdbc -kogito.persistence.proto.marshaller=false -kogito.persistence.query.timeout.millis=10000 -quarkus.datasource.db-kind=postgresql -quarkus.flyway.migrate-at-start=true - -mp.messaging.incoming.kogito_incoming_stream.connector=quarkus-http -mp.messaging.incoming.kogito_incoming_stream.path=/ -mp.messaging.incoming.kogito_incoming_stream.method=POST - -# Test properties -%test.quarkus.http.port=8081 -%test.quarkus.rest-client.kube_yaml.url=http://localhost:8282 -%test.quarkus.openapi-generator.kube_yaml.auth.BearerToken.bearer-token=ABCDEF -%test.quarkus.openapi-generator.mailtrap_yaml.auth.apiToken.api-key=ABCDEF -%test.quarkus.rest-client.mailtrap_yaml.url=http://localhost:8383 -%test.K_SINK=http://localhost:8080 \ No newline at end of file diff --git a/escalation-eda/escalation-swf/src/main/resources/jiraSwf.svg b/escalation-eda/escalation-swf/src/main/resources/jiraSwf.svg deleted file mode 100644 index 96939d4..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/jiraSwf.svg +++ /dev/null @@ -1 +0,0 @@ -StartSetupOnRequestCreateJiraIssueNormalizeCreateRe sponse EndGetJiraIssueNormalizeGetRespo nse EndUnmanagedEndAuthErrorEnd.request.ty... .request.ty... authError authError .request.ty... .request.ty... \ No newline at end of file diff --git a/escalation-eda/escalation-swf/src/main/resources/jiraSwf.sw.yaml b/escalation-eda/escalation-swf/src/main/resources/jiraSwf.sw.yaml deleted file mode 100644 index 2e6aee3..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/jiraSwf.sw.yaml +++ /dev/null @@ -1,112 +0,0 @@ -specVersion: "0.8" -# id: jiraSwf -id: ticketingService -name: Jira Ticketing Service -annotations: - - "workflow-type/infrastructure" -version: 0.0.1 -timeouts: - workflowExecTimeout: - duration: PT24H -start: Setup -dataInputSchema: - failOnValidationErrors: true - schema: specs/subflow-input-schema.json -errors: - - name: authError - code: '401' -functions: - - name: createJiraIssue - operation: specs/jira.yaml#createIssue - - name: getJiraIssue - operation: specs/jira.yaml#getIssue - - name: logInfo - type: custom - operation: "sysout:INFO" -states: - - name: Setup - type: operation - actions: - - name: "logInfo" - functionRef: - refName: "logInfo" - arguments: - message: "\"Invoking Jira SWF with: \\(.)\"" - transition: OnRequest - - name: OnRequest - type: switch - dataConditions: - - condition: '.request.type =="create"' - transition: CreateJiraIssue - - condition: '.request.type =="get"' - transition: GetJiraIssue - defaultCondition: - transition: Unmanaged - - name: CreateJiraIssue - type: operation - actions: - - name: Create Jira Issue - functionRef: - refName: createJiraIssue - arguments: - update: {} - fields: - summary: '"Request For New Namespace: " + .request.namespace' - labels: - - '$SECRET.jira_label_workflowInstanceId + "=" + .request.parentId' - - "$SECRET.jira_label_workflowName" - issuetype: - id: "$SECRET.jira_issue_type" - project: - key: "$SECRET.jira_project" - actionDataFilter: - toStateData: .jiraIssue - transition: NormalizeCreateResponse - onErrors: - - errorRef: authError - transition: AuthError - stateDataFilter: - output: ". += { jiraBrowser: ((.jiraIssue.self | sub(\"rest/.*\"; \"browse/\")) + .jiraIssue.key) }" - - name: NormalizeCreateResponse - type: inject - data: {} - stateDataFilter: - output: "${ {type: \"create\", ticketId: .jiraIssue.key, ticket: .jiraIssue, browseUrl: ((.jiraIssue.self | sub(\"rest/.*\"; \"browse/\")) + .jiraIssue.key)} }" - end: true - - name: GetJiraIssue - type: operation - actions: - - functionRef: - refName: getJiraIssue - arguments: - issueIdOrKey: .request.ticketId - fields: status - actionDataFilter: - toStateData: .jiraIssue - transition: NormalizeGetResponse - - name: NormalizeGetResponse - type: inject - data: {} - stateDataFilter: - output: "${ { type: \"get\", ticketId: .jiraIssue.key, ticket: .jiraIssue, status: (if .jiraIssue.fields.status.statusCategory.key == \"done\" then \"Approved\" elif .jiraIssue.fields.status.statusCategory.key == \"new\" then \"Created\" elif .jiraIssue.fields.status.statusCategory.key == \"undefined\" then \"Unknown\" else \"Unknown\" end)} }" - end: true - - name: Unmanaged - type: operation - actions: - - name: "printAction" - functionRef: - refName: "logInfo" - arguments: - message: "\"Unmanaged request: \\(.)\"" - end: true - - name: AuthError - type: operation - actions: - - name: "printAction" - functionRef: - refName: "logInfo" - arguments: - message: "\"AuthError: \\(.)\"" - stateDataFilter: - output: "${ { type: \"get\", ticketId: .jiraIssue.key, ticket: .jiraIssue, status: (if .jiraIssue.fields.status.statusCategory.key == \"done\" then \"Approved\" elif .jiraIssue.fields.status.statusCategory.key == \"new\" then \"Created\" elif .jiraIssue.fields.status.statusCategory.key == \"undefined\" then \"Unknown\" else \"Unknown\" end)} }" - end: true \ No newline at end of file diff --git a/escalation-eda/escalation-swf/src/main/resources/specs/jira.yaml b/escalation-eda/escalation-swf/src/main/resources/specs/jira.yaml deleted file mode 100644 index 530f3c0..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/specs/jira.yaml +++ /dev/null @@ -1,332 +0,0 @@ -openapi: 3.0.3 -info: - title: The Jira Cloud platform REST API - description: Jira Cloud platform REST API documentation - termsOfService: http://atlassian.com/terms/ - contact: - email: ecosystem@atlassian.com - license: - name: Apache 2.0 - url: http://www.apache.org/licenses/LICENSE-2.0.html - version: 1001.0.0-SNAPSHOT -externalDocs: - description: Find out more about Atlassian products and services. - url: http://www.atlassian.com -servers: - - url: https://your-domain.atlassian.net -paths: - /rest/api/latest/issue: - post: - tags: - - Issues - summary: Create issue - description: |- - Creates an issue or, where the option to create subtasks is enabled in Jira, a subtask. A transition may be applied, to move the issue or subtask to a workflow step other than the default start step, and issue properties set. - - The content of the issue or subtask is defined using `update` and `fields`. The fields that can be set in the issue or subtask are determined using the [ Get create issue metadata](#api-rest-api-3-issue-createmeta-get). These are the same fields that appear on the issue's create screen. Note that the `description`, `environment`, and any `textarea` type custom fields (multi-line text fields) take Atlassian Document Format content. Single line custom fields (`textfield`) accept a string and don't handle Atlassian Document Format content. - - Creating a subtask differs from creating an issue as follows: - - * `issueType` must be set to a subtask issue type (use [ Get create issue metadata](#api-rest-api-3-issue-createmeta-get) to find subtask issue types). - * `parent` must contain the ID or key of the parent issue. - - In a next-gen project any issue may be made a child providing that the parent and child are members of the same project. - - **[Permissions](#permissions) required:** *Browse projects* and *Create issues* [project permissions](https://confluence.atlassian.com/x/yodKLg) for the project in which the issue or subtask is created. - operationId: createIssue - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/IssueUpdateDetails' - example: - fields: - assignee: - id: 5b109f2e9729b51b54dc274d - components: - - id: "10000" - customfield_10000: 09/Jun/19 - customfield_20000: 06/Jul/19 3:25 PM - customfield_30000: - - "10000" - - "10002" - customfield_40000: - content: - - content: - - text: Occurs on all orders - type: text - type: paragraph - type: doc - version: 1 - customfield_50000: - content: - - content: - - text: Could impact day-to-day work. - type: text - type: paragraph - type: doc - version: 1 - customfield_60000: jira-software-users - customfield_70000: - - jira-administrators - - jira-software-users - customfield_80000: - value: red - description: - content: - - content: - - text: Order entry fails when selecting supplier. - type: text - type: paragraph - type: doc - version: 1 - duedate: 2019-05-11 - environment: - content: - - content: - - text: UAT - type: text - type: paragraph - type: doc - version: 1 - fixVersions: - - id: "10001" - issuetype: - id: "10000" - labels: - - bugfix - - blitz_test - parent: - key: PROJ-123 - priority: - id: "20000" - project: - id: "10000" - reporter: - id: 5b10a2844c20165700ede21g - security: - id: "10000" - summary: Main order flow broken - timetracking: - originalEstimate: "10" - remainingEstimate: "5" - versions: - - id: "10000" - update: { } - required: true - responses: - "201": - description: Returned if the request is successful. - content: - application/json: - schema: - $ref: '#/components/schemas/CreatedIssue' - example: "{\"id\":\"10000\",\"key\":\"ED-24\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/10000\",\"transition\":{\"status\":200,\"errorCollection\":{\"errorMessages\":[],\"errors\":{}}}}" - "400": - description: |- - Returned if the request: - - * is missing required fields. - * contains invalid field values. - * contains fields that cannot be set for the issue type. - * is by a user who does not have the necessary permission. - * is to create a subtype in a project different that of the parent issue. - * is for a subtask when the option to create subtasks is disabled. - * is invalid for any other reason. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorCollection' - example: "{\"errorMessages\":[\"Field 'priority' is required\"],\"errors\":{}}" - "401": - description: Returned if the authentication credentials are incorrect or missing. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorCollection' - "403": - description: Returned if the user does not have the necessary permission. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorCollection' - deprecated: false - security: - - basicAuth: [ ] - - /rest/api/3/issue/{issueIdOrKey}: - get: - tags: - - Issues - summary: Get issue - description: |- - Returns the details for an issue. - - The issue is identified by its ID or key, however, if the identifier doesn't match an issue, a case-insensitive search and check for moved issues is performed. If a matching issue is found its details are returned, a 302 or other redirect is **not** returned. The issue key returned in the response is the key of the issue found. - - This operation can be accessed anonymously. - - **[Permissions](#permissions) required:** - - * *Browse projects* [project permission](https://confluence.atlassian.com/x/yodKLg) for the project that the issue is in. - * If [issue-level security](https://confluence.atlassian.com/x/J4lKLg) is configured, issue-level security permission to view the issue. - operationId: getIssue - parameters: - - name: issueIdOrKey - in: path - description: The ID or key of the issue. - required: true - style: simple - explode: false - schema: - type: string - - name: fields - in: query - description: The ID or key of the issue. - required: false - schema: - type: string - default: status - - name: fieldsByKeys - in: query - description: Whether fields in `fields` are referenced by keys rather than IDs. This parameter is useful where fields have been added by a connect app and a field's key may differ from its ID. - required: false - style: form - explode: true - schema: - type: boolean - default: false - - name: expand - in: query - description: |- - Use [expand](#expansion) to include additional information about the issues in the response. This parameter accepts a comma-separated list. Expand options include: - - * `renderedFields` Returns field values rendered in HTML format. - * `names` Returns the display name of each field. - * `schema` Returns the schema describing a field type. - * `transitions` Returns all possible transitions for the issue. - * `editmeta` Returns information about how each field can be edited. - * `changelog` Returns a list of recent updates to an issue, sorted by date, starting from the most recent. - * `versionedRepresentations` Returns a JSON array for each version of a field's value, with the highest number representing the most recent version. Note: When included in the request, the `fields` parameter is ignored. - required: false - style: form - explode: true - schema: - type: string - - name: updateHistory - in: query - description: "Whether the project in which the issue is created is added to the user's **Recently viewed** project list, as shown under **Projects** in Jira. This also populates the [JQL issues search](#api-rest-api-3-search-get) `lastViewed` field." - required: false - style: form - explode: true - schema: - type: boolean - default: false - responses: - "200": - description: Returned if the request is successful. - content: - application/json: - schema: - $ref: '#/components/schemas/IssueBean' - example: "{\"id\":\"10002\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/10002\",\"key\":\"ED-1\",\"fields\":{\"watcher\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/EX-1/watchers\",\"isWatching\":false,\"watchCount\":1,\"watchers\":[{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false}]},\"attachment\":[{\"id\":10000,\"self\":\"https://your-domain.atlassian.net/rest/api/3/attachments/10000\",\"filename\":\"picture.jpg\",\"author\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"key\":\"\",\"accountId\":\"5b10a2844c20165700ede21g\",\"accountType\":\"atlassian\",\"name\":\"\",\"avatarUrls\":{\"48x48\":\"https://avatar-management--avatars.server-location.prod.public.atl-paas.net/initials/MK-5.png?size=48&s=48\",\"24x24\":\"https://avatar-management--avatars.server-location.prod.public.atl-paas.net/initials/MK-5.png?size=24&s=24\",\"16x16\":\"https://avatar-management--avatars.server-location.prod.public.atl-paas.net/initials/MK-5.png?size=16&s=16\",\"32x32\":\"https://avatar-management--avatars.server-location.prod.public.atl-paas.net/initials/MK-5.png?size=32&s=32\"},\"displayName\":\"Mia Krystof\",\"active\":false},\"created\":\"2023-06-06T06:40:34.248+0000\",\"size\":23123,\"mimeType\":\"image/jpeg\",\"content\":\"https://your-domain.atlassian.net/jira/rest/api/3/attachment/content/10000\",\"thumbnail\":\"https://your-domain.atlassian.net/jira/rest/api/3/attachment/thumbnail/10000\"}],\"sub-tasks\":[{\"id\":\"10000\",\"type\":{\"id\":\"10000\",\"name\":\"\",\"inward\":\"Parent\",\"outward\":\"Sub-task\"},\"outwardIssue\":{\"id\":\"10003\",\"key\":\"ED-2\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/ED-2\",\"fields\":{\"status\":{\"iconUrl\":\"https://your-domain.atlassian.net/images/icons/statuses/open.png\",\"name\":\"Open\"}}}}],\"description\":{\"type\":\"doc\",\"version\":1,\"content\":[{\"type\":\"paragraph\",\"content\":[{\"type\":\"text\",\"text\":\"Main order flow broken\"}]}]},\"project\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/project/EX\",\"id\":\"10000\",\"key\":\"EX\",\"name\":\"Example\",\"avatarUrls\":{\"48x48\":\"https://your-domain.atlassian.net/secure/projectavatar?size=large&pid=10000\",\"24x24\":\"https://your-domain.atlassian.net/secure/projectavatar?size=small&pid=10000\",\"16x16\":\"https://your-domain.atlassian.net/secure/projectavatar?size=xsmall&pid=10000\",\"32x32\":\"https://your-domain.atlassian.net/secure/projectavatar?size=medium&pid=10000\"},\"projectCategory\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/projectCategory/10000\",\"id\":\"10000\",\"name\":\"FIRST\",\"description\":\"First Project Category\"},\"simplified\":false,\"style\":\"classic\",\"insight\":{\"totalIssueCount\":100,\"lastIssueUpdateTime\":\"2023-06-06T06:40:28.659+0000\"}},\"comment\":[{\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/10010/comment/10000\",\"id\":\"10000\",\"author\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false},\"body\":{\"type\":\"doc\",\"version\":1,\"content\":[{\"type\":\"paragraph\",\"content\":[{\"type\":\"text\",\"text\":\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eget venenatis elit. Duis eu justo eget augue iaculis fermentum. Sed semper quam laoreet nisi egestas at posuere augue semper.\"}]}]},\"updateAuthor\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false},\"created\":\"2021-01-17T12:34:00.000+0000\",\"updated\":\"2021-01-18T23:45:00.000+0000\",\"visibility\":{\"type\":\"role\",\"value\":\"Administrators\",\"identifier\":\"Administrators\"}}],\"issuelinks\":[{\"id\":\"10001\",\"type\":{\"id\":\"10000\",\"name\":\"Dependent\",\"inward\":\"depends on\",\"outward\":\"is depended by\"},\"outwardIssue\":{\"id\":\"10004L\",\"key\":\"PR-2\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/PR-2\",\"fields\":{\"status\":{\"iconUrl\":\"https://your-domain.atlassian.net/images/icons/statuses/open.png\",\"name\":\"Open\"}}}},{\"id\":\"10002\",\"type\":{\"id\":\"10000\",\"name\":\"Dependent\",\"inward\":\"depends on\",\"outward\":\"is depended by\"},\"inwardIssue\":{\"id\":\"10004\",\"key\":\"PR-3\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/PR-3\",\"fields\":{\"status\":{\"iconUrl\":\"https://your-domain.atlassian.net/images/icons/statuses/open.png\",\"name\":\"Open\"}}}}],\"worklog\":[{\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/10010/worklog/10000\",\"author\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false},\"updateAuthor\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false},\"comment\":{\"type\":\"doc\",\"version\":1,\"content\":[{\"type\":\"paragraph\",\"content\":[{\"type\":\"text\",\"text\":\"I did some work here.\"}]}]},\"updated\":\"2021-01-18T23:45:00.000+0000\",\"visibility\":{\"type\":\"group\",\"value\":\"jira-developers\",\"identifier\":\"276f955c-63d7-42c8-9520-92d01dca0625\"},\"started\":\"2021-01-17T12:34:00.000+0000\",\"timeSpent\":\"3h 20m\",\"timeSpentSeconds\":12000,\"id\":\"100028\",\"issueId\":\"10002\"}],\"updated\":1,\"timetracking\":{\"originalEstimate\":\"10m\",\"remainingEstimate\":\"3m\",\"timeSpent\":\"6m\",\"originalEstimateSeconds\":600,\"remainingEstimateSeconds\":200,\"timeSpentSeconds\":400}}}" - "401": - description: Returned if the authentication credentials are incorrect or missing. - "404": - description: Returned if the issue is not found or the user does not have permission to view it. - deprecated: false - security: - - basicAuth: [ ] - -components: - schemas: - CreatedIssue: - type: object - properties: - id: - type: string - description: The ID of the created issue or subtask. - readOnly: true - key: - type: string - description: The key of the created issue or subtask. - readOnly: true - self: - type: string - description: The URL of the created issue or subtask. - readOnly: true - additionalProperties: false - description: Details about a created issue or subtask. - EntityProperty: - type: object - properties: - key: - type: string - description: The key of the property. Required on create and update. - value: - description: The value of the property. Required on create and update. - additionalProperties: false - description: >- - An entity property, for more information see [Entity - properties](https://developer.atlassian.com/cloud/jira/platform/jira-entity-properties/). - ErrorCollection: - type: object - properties: - errorMessages: - type: array - description: "The list of error messages produced by this operation. For example, \"input parameter 'key' must be provided\"" - items: - type: string - errors: - type: object - additionalProperties: - type: string - description: "The list of errors by parameter returned by the operation. For example,\"projectKey\": \"Project keys must start with an uppercase letter, followed by one or more uppercase alphanumeric characters.\"" - status: - type: integer - format: int32 - additionalProperties: false - description: Error messages from an operation. - IssueBean: - type: object - properties: - expand: - type: string - description: Expand options that include additional issue details in the response. - readOnly: true - xml: - attribute: true - fields: - type: object - additionalProperties: { } - id: - type: string - description: The ID of the issue. - readOnly: true - key: - type: string - description: The key of the issue. - readOnly: true - - additionalProperties: false - description: Details about an issue. - xml: - name: issue - - IssueUpdateDetails: - type: object - properties: - fields: - type: object - additionalProperties: { } - description: "List of issue screen fields to update, specifying the sub-field to update and its value for each field. This field provides a straightforward option when setting a sub-field. When multiple sub-fields or other operations are required, use `update`. Fields included in here cannot be included in `update`." - properties: - type: array - description: Details of issue properties to be add or update. - items: - $ref: '#/components/schemas/EntityProperty' - securitySchemes: - basicAuth: - type: http - description: You can access this resource via basic auth. - scheme: basic diff --git a/escalation-eda/escalation-swf/src/main/resources/specs/kube.yaml b/escalation-eda/escalation-swf/src/main/resources/specs/kube.yaml deleted file mode 100644 index dc0e46a..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/specs/kube.yaml +++ /dev/null @@ -1,516 +0,0 @@ - openapi: 3.0.0 - info: - title: Kubernetes - version: v1.27.3 - paths: - - /api/v1/namespaces: - get: - tags: - - core_v1 - description: list or watch objects of kind Namespace - operationId: listCoreV1Namespace - parameters: - - name: allowWatchBookmarks - in: query - description: allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. - schema: - type: boolean - uniqueItems: true - - name: continue - in: query - description: |- - The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - - This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. - schema: - type: string - uniqueItems: true - - name: fieldSelector - in: query - description: A selector to restrict the list of returned objects by their fields. Defaults to everything. - schema: - type: string - uniqueItems: true - - name: labelSelector - in: query - description: A selector to restrict the list of returned objects by their labels. Defaults to everything. - schema: - type: string - uniqueItems: true - - name: limit - in: query - description: |- - limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - - The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. - schema: - type: integer - uniqueItems: true - - name: resourceVersion - in: query - description: |- - resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - - Defaults to unset - schema: - type: string - uniqueItems: true - - name: resourceVersionMatch - in: query - description: |- - resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - - Defaults to unset - schema: - type: string - uniqueItems: true - - name: sendInitialEvents - in: query - description: |- - `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic "Bookmark" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. - - When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan - is interpreted as "data at least as new as the provided `resourceVersion`" - and the bookmark event is send when the state is synced - to a `resourceVersion` at least as fresh as the one provided by the ListOptions. - If `resourceVersion` is unset, this is interpreted as "consistent read" and the - bookmark event is send when the state is synced at least to the moment - when request started being processed. - - `resourceVersionMatch` set to any other value or unset - Invalid error is returned. - - Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward compatibility reasons) and to false otherwise. - schema: - type: boolean - uniqueItems: true - - name: timeoutSeconds - in: query - description: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. - schema: - type: integer - uniqueItems: true - - name: watch - in: query - description: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. - schema: - type: boolean - uniqueItems: true - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - application/json;stream=watch: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - application/vnd.kubernetes.protobuf: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - application/vnd.kubernetes.protobuf;stream=watch: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - application/yaml: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - "401": - description: Unauthorized - x-kubernetes-action: list - x-kubernetes-group-version-kind: - group: "" - version: v1 - kind: Namespace - security: - - BearerToken: [] - post: - tags: - - core_v1 - description: create a Namespace - operationId: createCoreV1Namespace - parameters: - - name: dryRun - in: query - description: 'When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed' - schema: - type: string - uniqueItems: true - - name: fieldManager - in: query - description: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. - schema: - type: string - uniqueItems: true - - name: fieldValidation - in: query - description: 'fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.' - schema: - type: string - uniqueItems: true - requestBody: - content: - '*/*': - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/vnd.kubernetes.protobuf: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/yaml: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - "201": - description: Created - content: - application/json: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/vnd.kubernetes.protobuf: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/yaml: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - "202": - description: Accepted - content: - application/json: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/vnd.kubernetes.protobuf: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/yaml: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - "401": - description: Unauthorized - x-kubernetes-action: post - x-kubernetes-group-version-kind: - group: "" - version: v1 - kind: Namespace - security: - - BearerToken: [ ] - parameters: - - name: pretty - in: query - description: If 'true', then the output is pretty printed. - schema: - type: string - uniqueItems: true - - components: - schemas: - io.k8s.apimachinery.pkg.apis.meta.v1.Time: - description: Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. - type: string - format: date-time - io.k8s.api.core.v1.Namespace: - description: Namespace provides a scope for Names. Use of multiple namespaces is optional. - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - default: - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta' - spec: - description: 'Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - default: - allOf: - - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceSpec' - status: - description: 'Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - default: - allOf: - - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceStatus' - x-kubernetes-group-version-kind: - - group: "" - kind: Namespace - version: v1 - io.k8s.api.core.v1.NamespaceCondition: - description: NamespaceCondition contains details about state of namespace. - type: object - required: - - type - - status - properties: - lastTransitionTime: - default: - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time' - message: - type: string - reason: - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - default: "" - type: - description: Type of namespace controller condition. - type: string - default: "" - io.k8s.api.core.v1.NamespaceList: - description: NamespaceList is a list of Namespaces. - type: object - required: - - items - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - items: - description: 'Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: array - items: - default: - allOf: - - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - default: - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta' - x-kubernetes-group-version-kind: - - group: "" - kind: NamespaceList - version: v1 - io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta: - description: ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}. - type: object - properties: - continue: - description: continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. - type: string - remainingItemCount: - description: remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact. - type: integer - format: int64 - resourceVersion: - description: 'String that identifies the server''s internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - selfLink: - description: 'Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.' - type: string - io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry: - description: ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to. - type: object - properties: - apiVersion: - description: APIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted. - type: string - fieldsType: - description: 'FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1"' - type: string - fieldsV1: - description: FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1' - manager: - description: Manager is an identifier of the workflow managing these fields. - type: string - operation: - description: Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'. - type: string - subresource: - description: Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource. - type: string - time: - description: Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over. - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time' - io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1: - description: |- - FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. - - Each key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set. - - The exact format is defined in sigs.k8s.io/structured-merge-diff - type: object - - io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta: - description: ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. - type: object - properties: - annotations: - description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations' - type: object - additionalProperties: - type: string - default: "" - creationTimestamp: - description: |- - CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. - - Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - default: - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time' - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. - type: integer - format: int64 - deletionTimestamp: - description: |- - DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. - - Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time' - finalizers: - description: Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. - type: array - items: - type: string - default: "" - x-kubernetes-patch-strategy: merge - generateName: - description: |- - GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. - - If this field is specified and the generated name exists, the server will return a 409. - - Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency - type: string - generation: - description: A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. - type: integer - format: int64 - labels: - description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels' - type: object - additionalProperties: - type: string - default: "" - managedFields: - description: ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object. - type: array - items: - default: - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry' - name: - description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' - type: string - namespace: - description: |- - Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. - - Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces - type: string - ownerReferences: - description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. - type: array - items: - default: - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference' - x-kubernetes-patch-merge-key: uid - x-kubernetes-patch-strategy: merge - resourceVersion: - description: |- - An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. - - Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - selfLink: - description: 'Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.' - type: string - uid: - description: |- - UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. - - Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids - type: string - - io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference: - description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. - type: object - required: - - apiVersion - - kind - - name - - uid - properties: - apiVersion: - description: API version of the referent. - type: string - default: "" - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the managing controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - default: "" - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' - type: string - default: "" - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' - type: string - default: "" - x-kubernetes-map-type: atomic - - io.k8s.api.core.v1.NamespaceSpec: - description: NamespaceSpec describes the attributes on a Namespace. - type: object - properties: - finalizers: - description: 'Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/' - type: array - items: - type: string - default: "" - io.k8s.api.core.v1.NamespaceStatus: - description: NamespaceStatus is information about the current status of a Namespace. - type: object - properties: - conditions: - description: Represents the latest available observations of a namespace's current state. - type: array - items: - default: - allOf: - - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceCondition' - x-kubernetes-patch-merge-key: type - x-kubernetes-patch-strategy: merge - phase: - description: |- - Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ - - Possible enum values: - - `"Active"` means the namespace is available for use in the system - - `"Terminating"` means the namespace is undergoing graceful termination - type: string - enum: - - Active - - Terminating - - securitySchemes: - BearerToken: - type: http - scheme: bearer - description: Bearer Token authentication diff --git a/escalation-eda/escalation-swf/src/main/resources/specs/mailtrap.yaml b/escalation-eda/escalation-swf/src/main/resources/specs/mailtrap.yaml deleted file mode 100644 index 79e4ba6..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/specs/mailtrap.yaml +++ /dev/null @@ -1,79 +0,0 @@ -openapi: 3.0.3 -info: - title: The MailTrap API (https://api-docs.mailtrap.io/) - version: v2 -servers: - - url: "https://sandbox.api.mailtrap.io" -paths: - /api/send/{inbox_id}: - post: - summary: Send email - operationId: sendEmail - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SendMailDetails' - required: true - parameters: - - name: inbox_id - in: path - required: true - schema: - type: integer - responses: - "200": - description: all good - content: - application/json: - schema: - $ref: '#/components/schemas/SendMailResponse' - security: - - apiToken: [] -components: - schemas: - SendMailDetails: - type: object - properties: - to: - type: array - items: - type: object - properties: - email: - type: string - name: - type: string - from: - type: object - properties: - email: - type: string - name: - type: string - subject: - type: string - readOnly: true - html: - type: string - readOnly: true - text: - type: string - readOnly: true - additionalProperties: false - SendMailResponse: - type: object - properties: - success: - type: boolean - readOnly: true - message_ids: - type: array - items: - type: string - additionalProperties: false - securitySchemes: - apiToken: - type: apiKey - in: header - name: Api-Token diff --git a/escalation-eda/escalation-swf/src/main/resources/specs/notifications.yaml b/escalation-eda/escalation-swf/src/main/resources/specs/notifications.yaml deleted file mode 100644 index 0123027..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/specs/notifications.yaml +++ /dev/null @@ -1,236 +0,0 @@ -openapi: 3.0.3 -info: - title: Notifications Plugin - OpenAPI Specs - description: |- - Notifications Plugin - OpenAPI Specs - version: 1.0.0 -tags: - - name: notifications - description: notifications plugin -servers: - - url: http://localhost:7007/api/notifications -paths: - /notifications: - post: - tags: - - notifications - summary: Create notification - description: Create notification - operationId: createNotification - requestBody: - description: Create a new notification - content: - application/json: - schema: - $ref: '#/components/schemas/CreateBody' - responses: - '200': - description: Successful operation - content: - application/json: - schema: - type: object - properties: - messageId: - type: string - example: bc9f19de-8b7b-49a8-9262-c5036a1ed35e - required: ['messageId'] - get: - tags: - - notifications - summary: Gets notifications - description: Gets notifications - operationId: getNotifications - parameters: - - name: pageSize - in: query - description: Page size of the result - required: false - schema: - type: integer - minimum: 0 - - name: pageNumber - in: query - description: Page number of the result - required: false - schema: - type: integer - minimum: 0 - - name: orderBy - in: query - description: order by field. e.g. created, origin. - required: false - schema: - type: string - enum: - - title - - message - - created - - topic - - origin - - name: orderByDirec - in: query - description: order ascending or descending - required: false - schema: - type: string - enum: - - asc - - desc - - name: containsText - in: query - description: Filter notifications whose either title or message contains the provided string - required: false - schema: - type: string - - name: createdAfter - in: query - description: Only notifications created after this timestamp will be included - required: false - schema: - type: string - format: date-time - - name: messageScope - in: query - description: retrieve either logged-in user messages, system messages or both - required: false - schema: - type: string - enum: - - all - - user - - system - - name: read - in: query - description: Notifications read or not - required: false - schema: - type: boolean - responses: - '200': - description: Successful operation - content: - application/json: - schema: - $ref: '#/components/schemas/Notifications' - /notifications/count: - get: - tags: - - notifications - summary: Get notifications count - description: Gets notifications count - operationId: getNotificationsCount - parameters: - - name: containsText - in: query - description: Filter notifications whose either title or message contains the provided string - required: false - schema: - type: string - - name: createdAfter - in: query - description: Only notifications created after this timestamp will be included - required: false - schema: - type: string - format: date-time - - name: messageScope - in: query - description: retrieve either logged-in user messages, system messages or both - required: false - schema: - type: string - enum: - - all - - user - - system - - name: read - in: query - description: Notifications read or not - required: false - schema: - type: boolean - responses: - '200': - description: Successful operation - content: - application/json: - schema: - type: object - properties: - count: - type: number - required: ['count'] - /notifications/read: - put: - tags: - - notifications - summary: Set notification as read/unread - description: Set notification as read/unread - operationId: setRead - parameters: - - name: messageId - in: query - description: The message ID - required: true - schema: - type: string - - name: read - in: query - description: read/unread - required: true - schema: - type: boolean - responses: - '200': - description: Successful operation -components: - schemas: - Notifications: - type: array - items: - $ref: '#/components/schemas/Notification' - Notification: - properties: - id: - type: string - created: - type: string - format: date-time - readByUser: - type: boolean - isSystem: - type: boolean - origin: - type: string - title: - type: string - message: - type: string - topic: - type: string - actions: - type: array - items: - $ref: '#/components/schemas/Action' - required: [id, created, readByUser, isSystem, origin, title, actions] - Action: - properties: - id: - type: string - title: - type: string - url: - type: string - required: [id, title, url] - CreateBody: - properties: - origin: - type: string - title: - type: string - message: - type: string - topic: - type: string - required: [origin, title] \ No newline at end of file diff --git a/escalation-eda/escalation-swf/src/main/resources/specs/subflow-input-schema.json b/escalation-eda/escalation-swf/src/main/resources/specs/subflow-input-schema.json deleted file mode 100644 index 0750ce9..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/specs/subflow-input-schema.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "request": { - "type": "object", - "properties": { - "type": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "ticketId": { - "type": "string" - }, - "ticket": { - "type": "object" - }, - "parentId": { - "type": "string" - } - }, - "required": [ - "type" - ] - } - }, - "required": [ - "request" - ] -} \ No newline at end of file diff --git a/escalation-eda/escalation-swf/src/main/resources/specs/ticket-escalation-schema.json b/escalation-eda/escalation-swf/src/main/resources/specs/ticket-escalation-schema.json deleted file mode 100644 index 158ed2f..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/specs/ticket-escalation-schema.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "namespace": { - "type": "string", - "description": "Name of the requested namespace" - }, - "email": { - "type": "object", - "description": "Optional email notification", - "properties": { - "manager": { - "type": "string", - "format": "email", - "description": "Email address of the escalation manager" - } - } - } - } -} \ No newline at end of file diff --git a/escalation-eda/escalation-swf/src/main/resources/ticketEscalation.svg b/escalation-eda/escalation-swf/src/main/resources/ticketEscalation.svg deleted file mode 100644 index 958c0c9..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/ticketEscalation.svg +++ /dev/null @@ -1 +0,0 @@ -StartCreateTicketGetTicketCheckTicketStateWaitForApprovalEv ent allOfEscalateCreateK8sNamespa ce EndtimeoutError timeoutError (.getRespo... (.getRespo... notAvailable notAvailable \ No newline at end of file diff --git a/escalation-eda/escalation-swf/src/main/resources/ticketEscalation.sw.yaml b/escalation-eda/escalation-swf/src/main/resources/ticketEscalation.sw.yaml deleted file mode 100644 index d6b3e41..0000000 --- a/escalation-eda/escalation-swf/src/main/resources/ticketEscalation.sw.yaml +++ /dev/null @@ -1,134 +0,0 @@ -specVersion: "0.8" -id: ticketEscalation -name: Ticket escalation -annotations: - - "workflow-type/infrastructure" -version: 0.0.1 -timeouts: - workflowExecTimeout: - duration: PT24H -start: CreateTicket -dataInputSchema: - failOnValidationErrors: true - schema: specs/ticket-escalation-schema.json -errors: - - name: timeoutError - code: TimedOut - - name: notAvailable - code: '404' -functions: - - name: createNotification - operation: 'specs/notifications.yaml#createNotification' - - name: sendEmail - operation: 'specs/mailtrap.yaml#sendEmail' - - name: createK8sNamespace - operation: specs/kube.yaml#createCoreV1Namespace - - name: logInfo - type: custom - operation: "sysout:INFO" -events: - - name: approvalEvent - # TODO add property - source: ticket.listener - # TODO add property - type: dev.parodos.escalation -states: - - name: CreateTicket - type: operation - actions: - # - subFlowRef: $SECRET.escalation_subflow_id - - subFlowRef: ticketingService - actionDataFilter: - toStateData: .createResponse - stateDataFilter: - input: ". += { request: {type: \"create\", namespace: .namespace, parentId: $WORKFLOW.instanceId} }" - transition: WaitForApprovalEvent - - name: GetTicket - type: operation - actions: - # - subFlowRef: $SECRET.escalation_subflow_id - - subFlowRef: ticketingService - actionDataFilter: - toStateData: .getResponse - stateDataFilter: - input: ". += { request: { type: \"get\", ticketId: .createResponse.ticketId, ticket: .createResponse.ticket} }" - transition: CheckTicketState - - name: CheckTicketState - type: switch - dataConditions: - - condition: (.getResponse.status == "Approved") - transition: - nextState: CreateK8sNamespace - defaultCondition: - transition: WaitForApprovalEvent - - name: WaitForApprovalEvent - type: callback - action: - functionRef: - refName: logInfo - arguments: - message: "\"Waiting for approvalEvent: \\(.)\"" - eventRef: approvalEvent - timeouts: - eventTimeout: PT10S - # This is not working for now, waiting for a fix to https://issues.redhat.com/browse/KOGITO-9811 - # eventTimeout: $SECRET.timeout_seconds - onErrors: - - errorRef: timeoutError - transition: Escalate - transition: CreateK8sNamespace - - name: Escalate - type: parallel - branches: - - name: printAction - actions: - - name: printAction - functionRef: - refName: logInfo - arguments: - message: "\"Invoking escalation: \\(.)\"" - - name: createNotification - actions: - - name: createNotification - functionRef: - refName: createNotification - arguments: - title: '"ATTN: Escalation for ticket - " + .createResponse.ticketId' - message: '"Please manage escalation ticket " + .createResponse.ticketId + ""' - origin: "Escalation Worflow" - topic: "Escalation Workflow" - - name: sendEmail - actions: - - name: sendEmail - condition: .email != null and .email.manager != null - functionRef: - refName: sendEmail - arguments: - inbox_id: $SECRET.mailtrap_inbox_id | tonumber - to: - - email: .email.manager - name: Escalation Manager - from: - email: $SECRET.sender_email - name: Escalation service - subject: " \"Escalation ticket \" + .createResponse.ticketId " - html: '"Please manage escalation ticket " + .createResponse.ticketId + ""' - onErrors: - - errorRef: notAvailable - transition: GetTicket - transition: GetTicket - - name: CreateK8sNamespace - type: operation - actions: - - functionRef: - refName: createK8sNamespace - arguments: - apiVersion: v1 - kind: Namespace - metadata: - name: .namespace - actionDataFilter: - toStateData: .createdNamespace - stateDataFilter: - output: "{createdNamespace: .createdNamespace}" - end: true \ No newline at end of file diff --git a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/EscalationSwfTest.java b/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/EscalationSwfTest.java deleted file mode 100644 index 0a8865b..0000000 --- a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/EscalationSwfTest.java +++ /dev/null @@ -1,176 +0,0 @@ -package dev.parodos.escalationswf; - -import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; -import static com.github.tomakehurst.wiremock.client.WireMock.post; -import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.options; -import static io.restassured.RestAssured.given; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.awaitility.Awaitility.await; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.util.UUID; - -import javax.inject.Inject; - -import org.jboss.logging.Logger; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; -import org.kie.kogito.index.model.ProcessInstanceState; -import org.kie.kogito.index.storage.DataIndexStorageService; -import org.kie.kogito.persistence.api.StorageFetcher; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.github.tomakehurst.wiremock.WireMockServer; - -import dev.parodos.escalationswf.model.CreateResponse; -import dev.parodos.escalationswf.model.EscalationRequest; -import dev.parodos.escalationswf.model.JiraIssue; -import dev.parodos.escalationswf.model.Namespace; -import io.quarkus.test.junit.QuarkusTest; -import io.restassured.response.ExtractableResponse; -import io.restassured.response.Response; - -@QuarkusTest -public class EscalationSwfTest { - private static Logger logger = Logger.getLogger(EscalationSwfTest.class); - - private static WireMockServer jira; - private static WireMockServer openshift; - private static WireMockServer mailtrap; - - @Inject - DataIndexStorageService dataIndexService; - @Inject - static ObjectMapper mapper; - - @BeforeAll - public static void startMockServers() throws JsonProcessingException { - jira = new WireMockServer(options().port(8181)); - openshift = new WireMockServer(options().port(8282)); - mailtrap = new WireMockServer(options().port(8383)); - jira.start(); - JiraIssue jiraIssue = aJiraIssue(); - jira.stubFor(post("/rest/api/latest/issue").willReturn( - aResponse().withHeader("Content-Type", "application/json") - .withBody(new ObjectMapper().writeValueAsString(jiraIssue)) - .withStatus(201))); - - openshift.start(); - Namespace namespace = Namespace.of("NS"); - openshift.stubFor(post("/api/v1/namespaces").willReturn( - aResponse().withHeader("Content-Type", "application/json") - .withBody(new ObjectMapper().writeValueAsString(namespace)).withStatus(200))); - - mailtrap.start(); - mailtrap.stubFor(post("/").willReturn(aResponse().withBody("ok").withStatus(200))); - } - - @AfterAll - public static void stopMockServers() { - if (jira != null) { - jira.stop(); - } - if (openshift != null) { - openshift.stop(); - } - if (mailtrap != null) { - mailtrap.stop(); - } - } - - private EscalationRequest aRequest() { - return new EscalationRequest().setNamespace("NS").setManager("manager@company.com"); - } - - private static JiraIssue aJiraIssue() { - return new JiraIssue().setKey("PRJ-1") - .setSelf("https://your-domain.atlassian.net/rest/api/3/issue/10000"); - } - - @Test - @Disabled // Until the SF versioning issue is resolved - public void when_RequestIsApproved_ThenTheNamespaceIsCreated() { - CreateResponse createResponse = startRequest(); - String workflowInstanceId = createResponse.getId(); - org.kie.kogito.index.model.ProcessInstance processInstance = readCurrentState(workflowInstanceId); - assertTrue(isNodeCompleted("CreateJiraIssue", processInstance), "CreateJiraIssue is Completed"); - assertTrue(isNodeRunning("WaitForApprovalEvent", processInstance), "WaitForApprovalEvent is Running"); - sendCompletionCloudEvent(createResponse.getId()); - processInstance = readCurrentState(workflowInstanceId); - assertTrue(isNodeCompleted("CreateJiraIssue", processInstance), "CreateJiraIssue is Completed"); - assertTrue(nodeExists("Join-WaitForApprovalEvent", processInstance), "Join-WaitForApprovalEvent exists"); - assertTrue(isNodeCompleted("Join-WaitForApprovalEvent", processInstance), "Join-WaitForApprovalEvent is Completed"); - assertTrue(nodeExists("CreateK8sNamespace", processInstance), "CreateK8sNamespace exists"); - assertTrue(isNodeCompleted("CreateK8sNamespace", processInstance), "CreateK8sNamespace is Completed"); - assertEquals(ProcessInstanceState.COMPLETED, - ProcessInstanceState.fromStatus(processInstance.getState()), "SWF state is COMPLETED"); - } - - private CreateResponse startRequest() { - EscalationRequest aRequest = aRequest(); - logger.infof("Sending request %s", aRequest); - ExtractableResponse response = given() - .when().contentType("application/json") - .body(aRequest).post("/ticketEscalation") - .then() - .statusCode(201) - .extract(); - logger.infof("Response is %s", response.asPrettyString()); - CreateResponse createResponse = response.as(CreateResponse.class); - logger.infof("CreateResponse is %s", createResponse); - return createResponse; - } - - private void sendCompletionCloudEvent(String worflowInstanceId) { - given() - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-source", "jira.listener") - .header("ce-type", "dev.parodos.escalation") - .header("ce-kogitoprocrefid", worflowInstanceId) - .contentType("application/cloudevents+json") - .body("{\"event\": \"Closed ticket " + worflowInstanceId + "\"}") - .post("/") - .then() - .statusCode(202); - - await() - .atLeast(1, SECONDS) - .atMost(2, SECONDS); - } - - private org.kie.kogito.index.model.ProcessInstance readCurrentState(String worflowInstanceId) { - await() - .atLeast(1, SECONDS) - .atMost(2, SECONDS); - logger.infof("Reading status of %s", worflowInstanceId); - - StorageFetcher cache = dataIndexService - .getProcessInstanceStorage(); - org.kie.kogito.index.model.ProcessInstance processInstance = cache.get(worflowInstanceId); - logger.debugf("Current status is %s", processInstance); - - return processInstance; - } - - private boolean isNodeRunning(String nodeName, org.kie.kogito.index.model.ProcessInstance processInstance) { - return processInstance.getNodes().stream().filter( - n -> n.getName().equals(nodeName) && n.getEnter() != null && n.getExit() == null) - .findFirst().isPresent(); - } - - private boolean isNodeCompleted(String nodeName, org.kie.kogito.index.model.ProcessInstance processInstance) { - return processInstance.getNodes().stream().filter( - n -> n.getName().equals(nodeName) && n.getEnter() != null && n.getExit() != null) - .findFirst().isPresent(); - } - - private boolean nodeExists(String nodeName, org.kie.kogito.index.model.ProcessInstance processInstance) { - return processInstance.getNodes().stream().filter( - n -> n.getName().equals(nodeName)).findFirst().isPresent(); - } -} diff --git a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/CreateResponse.java b/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/CreateResponse.java deleted file mode 100644 index 313bb8b..0000000 --- a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/CreateResponse.java +++ /dev/null @@ -1,75 +0,0 @@ -package dev.parodos.escalationswf.model; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; - -@JsonIgnoreProperties(ignoreUnknown = true) -public class CreateResponse { - private String id; - private Workflowdata workflowdata; - - public static class Workflowdata { - private String namespace; - private String manager; - private JiraIssue jiraIssue; - private String jiraBrowser; - - public String getNamespace() { - return namespace; - } - - public void setNamespace(String namespace) { - this.namespace = namespace; - } - - public String getManager() { - return manager; - } - - public void setManager(String manager) { - this.manager = manager; - } - - public JiraIssue getJiraIssue() { - return jiraIssue; - } - - public void setJiraIssue(JiraIssue jiraIssue) { - this.jiraIssue = jiraIssue; - } - - public String getJiraBrowser() { - return jiraBrowser; - } - - public void setJiraBrowser(String jiraBrowser) { - this.jiraBrowser = jiraBrowser; - } - - @Override - public String toString() { - return "Workflowdata [namespace=" + namespace + ", manager=" + manager + ", jiraIssue=" + jiraIssue + "]"; - } - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public Workflowdata getWorkflowdata() { - return workflowdata; - } - - public void setWorkflowdata(Workflowdata workflowdata) { - this.workflowdata = workflowdata; - } - - @Override - public String toString() { - return "CreateResponse [id=" + id + ", workflowdata=" + workflowdata + "]"; - } - -} diff --git a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/EscalationRequest.java b/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/EscalationRequest.java deleted file mode 100644 index 22aaf6a..0000000 --- a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/EscalationRequest.java +++ /dev/null @@ -1,29 +0,0 @@ -package dev.parodos.escalationswf.model; - -public class EscalationRequest { - private String namespace; - private String manager; - - public String getNamespace() { - return namespace; - } - - public EscalationRequest setNamespace(String namespace) { - this.namespace = namespace; - return this; - } - - public String getManager() { - return manager; - } - - public EscalationRequest setManager(String manager) { - this.manager = manager; - return this; - } - - @Override - public String toString() { - return "EscalationRequest [namespace=" + namespace + ", manager=" + manager + "]"; - } -} diff --git a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/JiraIssue.java b/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/JiraIssue.java deleted file mode 100644 index c0602a5..0000000 --- a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/JiraIssue.java +++ /dev/null @@ -1,32 +0,0 @@ -package dev.parodos.escalationswf.model; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; - -@JsonIgnoreProperties(ignoreUnknown = true) -public class JiraIssue { - private String key; - private String self; - - public String getKey() { - return key; - } - - public JiraIssue setKey(String key) { - this.key = key; - return this; - } - - public String getSelf() { - return self; - } - - public JiraIssue setSelf(String self) { - this.self = self; - return this; - } - - @Override - public String toString() { - return "JiraIssue [key=" + key + ", self=" + self + "]"; - } -} diff --git a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/Namespace.java b/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/Namespace.java deleted file mode 100644 index 6e86592..0000000 --- a/escalation-eda/escalation-swf/src/test/java/dev/parodos/escalationswf/model/Namespace.java +++ /dev/null @@ -1,62 +0,0 @@ -package dev.parodos.escalationswf.model; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; - -@JsonIgnoreProperties(ignoreUnknown = true) -public class Namespace { - private String apiVersion = "v1"; - private String kind = "Namespace"; - private Metadata metadata = new Metadata(); - - public static Namespace of(String name) { - Namespace namespace = new Namespace(); - namespace.getMetadata().setName(name); - return namespace; - } - - public static class Metadata { - private String name; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - @Override - public String toString() { - return "Metadata [name=" + name + "]"; - } - } - - public String getApiVersion() { - return apiVersion; - } - - public void setApiVersion(String apiVersion) { - this.apiVersion = apiVersion; - } - - public String getKind() { - return kind; - } - - public void setKind(String kind) { - this.kind = kind; - } - - public Metadata getMetadata() { - return metadata; - } - - public void setMetadata(Metadata metadata) { - this.metadata = metadata; - } - - @Override - public String toString() { - return "Namespace [apiVersion=" + apiVersion + ", kind=" + kind + ", metadata=" + metadata + "]"; - } -} diff --git a/escalation-eda/helm/eda-infra/.helmignore b/escalation-eda/helm/eda-infra/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/escalation-eda/helm/eda-infra/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/escalation-eda/helm/eda-infra/Chart.yaml b/escalation-eda/helm/eda-infra/Chart.yaml deleted file mode 100644 index 5a6056c..0000000 --- a/escalation-eda/helm/eda-infra/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v2 -name: eda-infra -description: | - A Helm chart to install the serverless infrastructure: - * openshift-serverless namespace - * Red Hat Serverless operator - * default KnativeEventing instance in knative-eventing namespace - * default KnativeServing instance in knative-serving namespace -type: application -version: 0.1.0 -appVersion: "1.0.0" \ No newline at end of file diff --git a/escalation-eda/helm/eda-infra/crds/operator.yaml b/escalation-eda/helm/eda-infra/crds/operator.yaml deleted file mode 100644 index 72ed08e..0000000 --- a/escalation-eda/helm/eda-infra/crds/operator.yaml +++ /dev/null @@ -1,3192 +0,0 @@ ---- -# Copyright 2021 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: knativeeventings.operator.knative.dev - labels: - operator.knative.dev/release: "v1.9.6" - app.kubernetes.io/version: "1.9.6" - app.kubernetes.io/part-of: knative-operator -spec: - group: operator.knative.dev - versions: - - name: v1beta1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - description: Schema for the knativeeventings API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the desired state of KnativeEventing - properties: - additionalManifests: - description: A list of the additional eventing manifests, which will be installed by the operator - items: - properties: - URL: - description: The link of the additional manifest URL - type: string - type: object - type: array - config: - additionalProperties: - additionalProperties: - type: string - type: object - description: A means to override the corresponding entries in the upstream configmaps - type: object - defaultBrokerClass: - description: The default broker type to use for the brokers Knative creates. If no value is provided, MTChannelBasedBroker will be used. - type: string - high-availability: - description: Allows specification of HA control plane - properties: - replicas: - description: The number of replicas that HA parts of the control plane will be scaled to - minimum: 0 - type: integer - type: object - workloads: - description: A mapping of deployment or statefulset name to override - type: array - items: - type: object - properties: - name: - description: The name of the deployment - type: string - labels: - additionalProperties: - type: string - description: Labels overrides labels for the deployment and its template. - type: object - livenessProbes: - description: LivenessProbes overrides liveness probes for the containers. - items: - description: ProbesRequirementsOverride enables the user to override any container's env vars. - properties: - container: - description: The container name - type: string - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - required: - - container - type: object - type: array - annotations: - additionalProperties: - type: string - description: Annotations overrides labels for the deployment and its template. - type: object - env: - description: Env overrides env vars for the containers. - items: - properties: - container: - description: The container name - type: string - envVars: - description: The desired EnvVarRequirements - items: - description: EnvVar represents an environment variable present in a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - required: - - container - type: object - type: array - replicas: - description: The number of replicas that HA parts of the control plane will be scaled to - type: integer - minimum: 0 - nodeSelector: - additionalProperties: - type: string - description: NodeSelector overrides nodeSelector for the deployment. - type: object - readinessProbes: - description: ReadinessProbes overrides readiness probes for the containers. - items: - description: ProbesRequirementsOverride enables the user to override any container's env vars. - properties: - container: - description: The container name - type: string - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - required: - - container - type: object - type: array - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: If specified, the pod's topology spread constraints. - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. It''s the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It''s considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - version: - description: Version the cluster should be on. - type: string - volumeMounts: - description: VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. VolumeMounts specified will be appended to other VolumeMounts in the alertmanager container, that are generated as a result of StorageSpec objects. - items: - description: VolumeMount describes a mounting of a Volume within a container. - properties: - mountPath: - description: Path within the container at which the volume should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - resources: - description: If specified, the container's resources. - items: - description: The pod this Resource is used to specify the requests and limits for a certain container based on the name. - properties: - container: - description: The name of the container - type: string - limits: - properties: - cpu: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - memory: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - type: object - requests: - properties: - cpu: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - memory: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - type: object - type: object - type: array - deployments: - description: A mapping of deployment name to override - type: array - items: - type: object - properties: - name: - description: The name of the deployment - type: string - labels: - additionalProperties: - type: string - description: Labels overrides labels for the deployment and its template. - type: object - annotations: - additionalProperties: - type: string - description: Annotations overrides labels for the deployment and its template. - type: object - env: - description: Env overrides env vars for the containers. - items: - properties: - container: - description: The container name - type: string - envVars: - description: The desired EnvVarRequirements - items: - description: EnvVar represents an environment variable present in a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - required: - - container - type: object - type: array - livenessProbes: - description: LivenessProbes overrides liveness probes for the containers. - items: - description: ProbesRequirementsOverride enables the user to override any container's env vars. - properties: - container: - description: The container name - type: string - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - required: - - container - type: object - type: array - replicas: - description: The number of replicas that HA parts of the control plane will be scaled to - type: integer - minimum: 0 - nodeSelector: - additionalProperties: - type: string - description: NodeSelector overrides nodeSelector for the deployment. - type: object - readinessProbes: - description: ReadinessProbes overrides readiness probes for the containers. - items: - description: ProbesRequirementsOverride enables the user to override any container's env vars. - properties: - container: - description: The container name - type: string - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - required: - - container - type: object - type: array - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: If specified, the pod's topology spread constraints. - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. It''s the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It''s considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - resources: - description: If specified, the container's resources. - items: - description: The pod this Resource is used to specify the requests and limits for a certain container based on the name. - properties: - container: - description: The name of the container - type: string - limits: - properties: - cpu: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - memory: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - type: object - requests: - properties: - cpu: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - memory: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - type: object - type: object - type: array - services: - description: A mapping of service name to override - type: array - items: - type: object - properties: - name: - description: The name of the service - type: string - labels: - additionalProperties: - type: string - description: Labels overrides labels for the service - type: object - annotations: - additionalProperties: - type: string - description: Annotations overrides labels for the service - type: object - selector: - additionalProperties: - type: string - description: Selector overrides selector for the service - type: object - podDisruptionBudgets: - description: A mapping of podDisruptionBudget name to override - type: array - items: - type: object - properties: - name: - description: The name of the podDisruptionBudget - type: string - minAvailable: - anyOf: - - type: integer - - type: string - description: An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%". - x-kubernetes-int-or-string: true - source: - description: The source configuration for Knative Eventing - properties: - ceph: - description: Ceph settings - properties: - enabled: - type: boolean - type: object - github: - description: GitHub settings - properties: - enabled: - type: boolean - type: object - gitlab: - description: GitLab settings - properties: - enabled: - type: boolean - type: object - kafka: - description: Apache Kafka settings - properties: - enabled: - type: boolean - type: object - rabbitmq: - description: RabbitMQ settings - properties: - enabled: - type: boolean - type: object - redis: - description: Redis settings - properties: - enabled: - type: boolean - type: object - type: object - manifests: - description: A list of eventing manifests, which will be installed by the operator - items: - properties: - URL: - description: The link of the manifest URL - type: string - type: object - type: array - registry: - description: A means to override the corresponding deployment images in the upstream. This affects both apps/v1.Deployment and caching.internal.knative.dev/v1alpha1.Image. - properties: - default: - description: The default image reference template to use for all knative images. Takes the form of example-registry.io/custom/path/${NAME}:custom-tag - type: string - imagePullSecrets: - description: A list of secrets to be used when pulling the knative images. The secret must be created in the same namespace as the knative-eventing deployments, and not the namespace of this resource. - items: - properties: - name: - description: The name of the secret. - type: string - type: object - type: array - override: - additionalProperties: - type: string - description: A map of a container name or image name to the full image location of the individual knative image. - type: object - type: object - sinkBindingSelectionMode: - description: Specifies the selection mode for the sinkbinding webhook. If the value is `inclusion`, only namespaces/objects labelled as `bindings.knative.dev/include:true` will be considered. If `exclusion` is selected, only `bindings.knative.dev/exclude:true` label is checked and these will NOT be considered. The default is `exclusion`. - type: string - version: - description: The version of Knative Eventing to be installed - type: string - type: object - status: - properties: - conditions: - description: The latest available observations of a resource's current state. - items: - properties: - lastTransitionTime: - description: LastTransitionTime is the last time the condition transitioned from one status to another. We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic differences (all other things held constant). - type: string - message: - description: A human readable message indicating details about the transition. - type: string - reason: - description: The reason for the condition's last transition. - type: string - severity: - description: Severity with which to treat failures of this type of condition. When this is not specified, it defaults to Error. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type of condition. - type: string - required: - - type - - status - type: object - type: array - manifests: - description: The list of eventing manifests, which have been installed by the operator - items: - type: string - type: array - observedGeneration: - description: The generation last processed by the controller - type: integer - version: - description: The version of the installed release - type: string - type: object - type: object - additionalPrinterColumns: - - jsonPath: .status.version - name: Version - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].reason - name: Reason - type: string - names: - kind: KnativeEventing - listKind: KnativeEventingList - plural: knativeeventings - singular: knativeeventing - scope: Namespaced - conversion: - strategy: Webhook - webhook: - conversionReviewVersions: ["v1beta1"] - clientConfig: - service: - name: operator-webhook - namespace: default - path: /resource-conversion - ---- -# Copyright 2021 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: knativeservings.operator.knative.dev - labels: - operator.knative.dev/release: "v1.9.6" - app.kubernetes.io/version: "1.9.6" - app.kubernetes.io/part-of: knative-operator -spec: - group: operator.knative.dev - versions: - - name: v1beta1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - description: Schema for the knativeservings API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the desired state of KnativeServing - properties: - additionalManifests: - description: A list of the additional serving manifests, which will be installed by the operator - items: - properties: - URL: - description: The link of the additional manifest URL - type: string - type: object - type: array - config: - additionalProperties: - additionalProperties: - type: string - type: object - description: A means to override the corresponding entries in the upstream configmaps - type: object - controller-custom-certs: - description: Enabling the controller to trust registries with self-signed certificates - properties: - name: - description: The name of the ConfigMap or Secret - type: string - type: - description: One of ConfigMap or Secret - enum: - - ConfigMap - - Secret - - "" - type: string - type: object - high-availability: - description: Allows specification of HA control plane - properties: - replicas: - description: The number of replicas that HA parts of the control plane will be scaled to - minimum: 0 - type: integer - type: object - workloads: - description: A mapping of deployment or statefulset name to override - type: array - items: - type: object - properties: - name: - description: The name of the deployment - type: string - labels: - additionalProperties: - type: string - description: Labels overrides labels for the deployment and its template. - type: object - livenessProbes: - description: LivenessProbes overrides liveness probes for the containers. - items: - description: ProbesRequirementsOverride enables the user to override any container's env vars. - properties: - container: - description: The container name - type: string - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - required: - - container - type: object - type: array - annotations: - additionalProperties: - type: string - description: Annotations overrides labels for the deployment and its template. - type: object - env: - description: Env overrides env vars for the containers. - items: - properties: - container: - description: The container name - type: string - envVars: - description: The desired EnvVarRequirements - items: - description: EnvVar represents an environment variable present in a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - required: - - container - type: object - type: array - replicas: - description: The number of replicas that HA parts of the control plane will be scaled to - type: integer - minimum: 0 - nodeSelector: - additionalProperties: - type: string - description: NodeSelector overrides nodeSelector for the deployment. - type: object - readinessProbes: - description: ReadinessProbes overrides readiness probes for the containers. - items: - description: ProbesRequirementsOverride enables the user to override any container's env vars. - properties: - container: - description: The container name - type: string - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - required: - - container - type: object - type: array - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: If specified, the pod's topology spread constraints. - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. It''s the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It''s considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - version: - description: Version the cluster should be on. - type: string - volumeMounts: - description: VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. VolumeMounts specified will be appended to other VolumeMounts in the alertmanager container, that are generated as a result of StorageSpec objects. - items: - description: VolumeMount describes a mounting of a Volume within a container. - properties: - mountPath: - description: Path within the container at which the volume should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - resources: - description: If specified, the container's resources. - items: - description: The pod this Resource is used to specify the requests and limits for a certain container based on the name. - properties: - container: - description: The name of the container - type: string - limits: - properties: - cpu: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - memory: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - type: object - requests: - properties: - cpu: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - memory: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - type: object - type: object - type: array - deployments: - description: A mapping of deployment name to override - type: array - items: - type: object - properties: - name: - description: The name of the deployment - type: string - labels: - additionalProperties: - type: string - description: Labels overrides labels for the deployment and its template. - type: object - annotations: - additionalProperties: - type: string - description: Annotations overrides labels for the deployment and its template. - type: object - env: - description: Env overrides env vars for the containers. - items: - properties: - container: - description: The container name - type: string - envVars: - description: The desired EnvVarRequirements - items: - description: EnvVar represents an environment variable present in a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - required: - - container - type: object - type: array - livenessProbes: - description: LivenessProbes overrides liveness probes for the containers. - items: - description: ProbesRequirementsOverride enables the user to override any container's env vars. - properties: - container: - description: The container name - type: string - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - required: - - container - type: object - type: array - replicas: - description: The number of replicas that HA parts of the control plane will be scaled to - type: integer - minimum: 0 - nodeSelector: - additionalProperties: - type: string - description: NodeSelector overrides nodeSelector for the deployment. - type: object - readinessProbes: - description: ReadinessProbes overrides readiness probes for the containers. - items: - description: ProbesRequirementsOverride enables the user to override any container's env vars. - properties: - container: - description: The container name - type: string - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - required: - - container - type: object - type: array - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: If specified, the pod's topology spread constraints. - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. It''s the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It''s considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - resources: - description: If specified, the container's resources. - items: - description: The pod this Resource is used to specify the requests and limits for a certain container based on the name. - properties: - container: - description: The name of the container - type: string - limits: - properties: - cpu: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - memory: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - type: object - requests: - properties: - cpu: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - memory: - pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ - type: string - type: object - type: object - type: array - services: - description: A mapping of service name to override - type: array - items: - type: object - properties: - name: - description: The name of the service - type: string - labels: - additionalProperties: - type: string - description: Labels overrides labels for the service - type: object - annotations: - additionalProperties: - type: string - description: Annotations overrides labels for the service - type: object - selector: - additionalProperties: - type: string - description: Selector overrides selector for the service - type: object - podDisruptionBudgets: - description: A mapping of podDisruptionBudget name to override - type: array - items: - type: object - properties: - name: - description: The name of the podDisruptionBudget - type: string - minAvailable: - anyOf: - - type: integer - - type: string - description: An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%". - x-kubernetes-int-or-string: true - ingress: - description: The ingress configuration for Knative Serving - properties: - contour: - description: Contour settings - properties: - enabled: - type: boolean - type: object - istio: - description: Istio settings - properties: - enabled: - type: boolean - knative-ingress-gateway: - description: A means to override the knative-ingress-gateway - properties: - selector: - additionalProperties: - type: string - description: The selector for the ingress-gateway. - type: object - servers: - description: A list of server specifications. - items: - properties: - hosts: - description: One or more hosts exposed by this gateway. - items: - format: string - type: string - type: array - port: - properties: - name: - description: Label assigned to the port. - format: string - type: string - number: - description: A valid non-negative integer port number. - type: integer - target_port: - description: A valid non-negative integer target port number. - type: integer - protocol: - description: The protocol exposed on the port. - format: string - type: string - type: object - type: object - type: array - type: object - knative-local-gateway: - description: A means to override the knative-local-gateway - properties: - selector: - additionalProperties: - type: string - description: The selector for the ingress-gateway. - type: object - servers: - description: A list of server specifications. - items: - properties: - hosts: - description: One or more hosts exposed by this gateway. - items: - format: string - type: string - type: array - port: - properties: - name: - description: Label assigned to the port. - format: string - type: string - number: - description: A valid non-negative integer port number. - type: integer - target_port: - description: A valid non-negative integer target port number. - type: integer - protocol: - description: The protocol exposed on the port. - format: string - type: string - type: object - type: object - type: array - type: object - type: object - kourier: - description: Kourier settings - properties: - enabled: - type: boolean - service-type: - type: string - bootstrap-configmap: - type: string - type: object - type: object - security: - description: The security configuration for Knative Serving - properties: - securityGuard: - description: Security Guard settings - properties: - enabled: - type: boolean - type: object - type: object - manifests: - description: A list of serving manifests, which will be installed by the operator - items: - properties: - URL: - description: The link of the manifest URL - type: string - type: object - type: array - registry: - description: A means to override the corresponding deployment images in the upstream. This affects both apps/v1.Deployment and caching.internal.knative.dev/v1alpha1.Image. - properties: - default: - description: The default image reference template to use for all knative images. Takes the form of example-registry.io/custom/path/${NAME}:custom-tag - type: string - imagePullSecrets: - description: A list of secrets to be used when pulling the knative images. The secret must be created in the same namespace as the knative-serving deployments, and not the namespace of this resource. - items: - properties: - name: - description: The name of the secret. - type: string - type: object - type: array - override: - additionalProperties: - type: string - description: A map of a container name or image name to the full image location of the individual knative image. - type: object - type: object - version: - description: The version of Knative Serving to be installed - type: string - type: object - status: - description: Status defines the observed state of KnativeServing - properties: - conditions: - description: The latest available observations of a resource's current state. - items: - properties: - lastTransitionTime: - description: LastTransitionTime is the last time the condition transitioned from one status to another. We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic differences (all other things held constant). - type: string - message: - description: A human readable message indicating details about the transition. - type: string - reason: - description: The reason for the condition's last transition. - type: string - severity: - description: Severity with which to treat failures of this type of condition. When this is not specified, it defaults to Error. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type of condition. - type: string - required: - - type - - status - type: object - type: array - manifests: - description: The list of serving manifests, which have been installed by the operator - items: - type: string - type: array - observedGeneration: - description: The generation last processed by the controller - type: integer - version: - description: The version of the installed release - type: string - type: object - type: object - additionalPrinterColumns: - - jsonPath: .status.version - name: Version - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].reason - name: Reason - type: string - names: - kind: KnativeServing - listKind: KnativeServingList - plural: knativeservings - singular: knativeserving - scope: Namespaced - conversion: - strategy: Webhook - webhook: - conversionReviewVersions: ["v1beta1"] - clientConfig: - service: - name: operator-webhook - namespace: default - path: /resource-conversion diff --git a/escalation-eda/helm/eda-infra/templates/NOTES.txt b/escalation-eda/helm/eda-infra/templates/NOTES.txt deleted file mode 100644 index ef9c5f6..0000000 --- a/escalation-eda/helm/eda-infra/templates/NOTES.txt +++ /dev/null @@ -1,29 +0,0 @@ -{{ .Release.Name }} of {{ .Chart.Name }} installed - -Installed components: -======================= -{{- $serverlessOperatorInstalled := include "is-resource-installed" (list "operators.coreos.com/v1alpha1" "Subscription" "openshift-serverless" "serverless-operator" .Release.Name) }} -{{- $serverlessOperatorNamespaceInstalled := include "is-resource-installed" (list "v1" "Namespace" "" "openshift-serverless" .Release.Name) }} -{{- $knativeServingInstalled := include "is-resource-installed" (list "operator.knative.dev/v1beta1" "KnativeServing" "knative-serving" "knative-serving" .Release.Name) }} -{{- $knativeServingNamespaceInstalled := include "is-resource-installed" (list "v1" "Namespace" "" "knative-serving" .Release.Name) }} -{{- $knativeEventingInstalled := include "is-resource-installed" (list "operator.knative.dev/v1beta1" "KnativeEventing" "knative-eventing" "knative-eventing" .Release.Name) }} -{{- $knativeEventingNamespaceInstalled := include "is-resource-installed" (list "v1" "Namespace" "" "knative-eventing" .Release.Name) }} - -openshift-serverless Namespace: {{ $serverlessOperatorNamespaceInstalled }} -Red Hat Serverless Operator: {{ $serverlessOperatorInstalled }} -knative-serving Namespace: {{ $knativeServingNamespaceInstalled }} -KnativeServing in knative-serving namespace: {{ $knativeServingInstalled }} -knative-eventing Namespace: {{ $knativeEventingNamespaceInstalled }} -KnativeEventing in knative-eventing namespace: {{ $knativeEventingInstalled }} -{{/* Empty line */}} - -Run the following to verify the release status and export the configuration: - helm status {{ .Release.Name }} - helm get all {{ .Release.Name }} - -{{/* Empty line */}} - -Run the following to wait until the services are ready: - oc wait -n knative-eventing knativeeventing/knative-eventing --for=condition=Ready --timeout=5m - oc wait -n knative-serving knativeserving/knative-serving --for=condition=Ready --timeout=5m - diff --git a/escalation-eda/helm/eda-infra/templates/_helpers.tpl b/escalation-eda/helm/eda-infra/templates/_helpers.tpl deleted file mode 100644 index c96709f..0000000 --- a/escalation-eda/helm/eda-infra/templates/_helpers.tpl +++ /dev/null @@ -1,58 +0,0 @@ - -{{- define "resource-exists" -}} - {{- $api := index . 0 -}} - {{- $kind := index . 1 -}} - {{- $namespace := index . 2 -}} - {{- $name := index . 3 -}} - {{- $existingResource := lookup $api $kind $namespace $name }} - {{- if empty $existingResource }} - {{- "false" -}} - {{- else }} - {{- "true" -}} - {{- end }} -{{- end }} - -{{- define "unmanaged-resource-exists" -}} - {{- $api := index . 0 -}} - {{- $kind := index . 1 -}} - {{- $namespace := index . 2 -}} - {{- $name := index . 3 -}} - {{- $releaseName := index . 4 -}} - {{- $unmanagedSubscriptionExists := "true" -}} - {{- $existingOperator := lookup $api $kind $namespace $name -}} - {{- if empty $existingOperator -}} - {{- "false" -}} - {{- else -}} - {{- $isManagedResource := include "is-managed-resource" (list $existingOperator $releaseName) -}} - {{- if eq $isManagedResource "true" -}} - {{- "false" -}} - {{- else -}} - {{- "true" -}} - {{- end -}} - {{- end -}} -{{- end -}} - -{{- define "is-managed-resource" -}} - {{- $resource := index . 0 -}} - {{- $releaseName := index . 1 -}} - {{- $resourceReleaseName := dig "metadata" "annotations" (dict "meta.helm.sh/release-name" "NA") $resource -}} - {{- if eq (get $resourceReleaseName "meta.helm.sh/release-name") $releaseName -}} - {{- "true" -}} - {{- else -}} - {{- "false" -}} - {{- end -}} -{{- end -}} - -{{- define "is-resource-installed" -}} - {{- $api := index . 0 -}} - {{- $kind := index . 1 -}} - {{- $namespace := index . 2 -}} - {{- $name := index . 3 -}} - {{- $releaseName := index . 4 -}} - {{- $unmanagedResourceExists := include "unmanaged-resource-exists" (list $api $kind $namespace $name $releaseName) }} - {{- if eq $unmanagedResourceExists "false" }} - {{- "YES" -}} - {{- else -}} - {{- "NO" -}} - {{- end -}} -{{- end }} diff --git a/escalation-eda/helm/eda-infra/templates/knative.yaml b/escalation-eda/helm/eda-infra/templates/knative.yaml deleted file mode 100644 index 7ad15a2..0000000 --- a/escalation-eda/helm/eda-infra/templates/knative.yaml +++ /dev/null @@ -1,65 +0,0 @@ -{{- $unmanagedNamespaceExists := include "unmanaged-resource-exists" (list "v1" "Namespace" "" "openshift-serverless" .Release.Name) }} -{{- if eq $unmanagedNamespaceExists "false" }} -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-serverless -{{- end }} ---- -{{- $unmanagedNamespaceExists := include "unmanaged-resource-exists" (list "v1" "Namespace" "" "knative-serving" .Release.Name) }} -{{- if eq $unmanagedNamespaceExists "false" }} -apiVersion: v1 -kind: Namespace -metadata: - name: knative-serving -{{- end }} ---- -{{- $unmanagedNamespaceExists := include "unmanaged-resource-exists" (list "v1" "Namespace" "" "knative-eventing" .Release.Name) }} -{{- if eq $unmanagedNamespaceExists "false" }} -apiVersion: v1 -kind: Namespace -metadata: - name: knative-eventing -{{- end }} ---- -{{- $unmanagedSubscriptionExists := include "unmanaged-resource-exists" (list "operators.coreos.com/v1alpha1" "Subscription" "openshift-serverless" "serverless-operator" .Release.Name) }} -{{- if eq $unmanagedSubscriptionExists "false" }} -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: serverless-operator - namespace: openshift-serverless -spec: - channel: stable - installPlanApproval: Automatic - name: serverless-operator - source: redhat-operators - sourceNamespace: openshift-marketplace ---- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: serverless-operator-group - namespace: openshift-serverless -spec: {} -{{- end }} ---- -{{- $unmanagedKnativeEventingExists := include "unmanaged-resource-exists" (list "operator.knative.dev/v1beta1" "KnativeEventing" "knative-eventing" "knative-eventing" .Release.Name) }} -{{- if eq $unmanagedKnativeEventingExists "false" }} -kind: KnativeEventing -apiVersion: operator.knative.dev/v1beta1 -metadata: - name: knative-eventing - namespace: knative-eventing -spec: {} -{{- end }} ---- -{{- $unmanagedKnativeServingExists := include "unmanaged-resource-exists" (list "operator.knative.dev/v1beta1" "KnativeServing" "knative-serving" "knative-serving" .Release.Name) }} -{{- if eq $unmanagedKnativeEventingExists "false" }} -apiVersion: operator.knative.dev/v1beta1 -kind: KnativeServing -metadata: - name: knative-serving - namespace: knative-serving -spec: {} -{{- end }} diff --git a/escalation-eda/helm/eda-infra/values.yaml b/escalation-eda/helm/eda-infra/values.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/escalation-eda/helm/escalation-eda/.gitignore b/escalation-eda/helm/escalation-eda/.gitignore deleted file mode 100644 index 8b4bda0..0000000 --- a/escalation-eda/helm/escalation-eda/.gitignore +++ /dev/null @@ -1 +0,0 @@ -resources diff --git a/escalation-eda/helm/escalation-eda/.helmignore b/escalation-eda/helm/escalation-eda/.helmignore deleted file mode 100644 index 4779240..0000000 --- a/escalation-eda/helm/escalation-eda/.helmignore +++ /dev/null @@ -1,26 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ - -# CI script -prepare.sh diff --git a/escalation-eda/helm/escalation-eda/Chart.yaml b/escalation-eda/helm/escalation-eda/Chart.yaml deleted file mode 100644 index b0be114..0000000 --- a/escalation-eda/helm/escalation-eda/Chart.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v2 -name: escalation-eda -description: | - A Helm chart to install the escalation application(s): - * jira-listener Knative service - * (optional) event-display Knative service - * required Knative eventing resoures (Trigger, SinkBinding and Broker) -type: application -version: "1.0.0" -appVersion: "1.0.0" diff --git a/escalation-eda/helm/escalation-eda/prepare.sh b/escalation-eda/helm/escalation-eda/prepare.sh deleted file mode 100755 index 1e4cb16..0000000 --- a/escalation-eda/helm/escalation-eda/prepare.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -mkdir -p resources -cp -r ../../escalation-swf/src/main/resources . - -# Logic to copy subflow and in case update the SWF.id field -# ... \ No newline at end of file diff --git a/escalation-eda/helm/escalation-eda/templates/NOTES.txt b/escalation-eda/helm/escalation-eda/templates/NOTES.txt deleted file mode 100644 index 30b3618..0000000 --- a/escalation-eda/helm/escalation-eda/templates/NOTES.txt +++ /dev/null @@ -1,37 +0,0 @@ -{{ .Release.Name }} release of chart {{ .Chart.Name }} installed in namespace {{ .Values.namespace.name }} - -Run the following to verify the release status and export the configuration: - helm status {{ .Release.Name }} - helm get all {{ .Release.Name }} - -{{/* Empty line */}} -Verify the status of the Knative eventing resources: - oc get -n {{ .Values.namespace.name }} trigger,broker,sinkbinding - -Verify the status of the Knative services: - oc get -n {{ .Values.namespace.name }} ksvc - -Run the following to wait until the services are ready: - oc wait -n {{ .Values.namespace.name }} ksvc -l app=jira-listener --for=condition=Ready --timeout=5m - oc wait -n {{ .Values.namespace.name }} ksvc -l app=escalationswf --for=condition=Ready --timeout=5m -{{- if .Values.eventdisplay.enabled }} - oc wait -n {{ .Values.namespace.name }} ksvc -l app=event-display --for=condition=Ready --timeout=5m -{{- end }} - -The URL to be used in the Jira webhook is: -{{- if .Values.letsEncryptCertificate }} - JIRA_LISTENER_URL=$(oc get route -n knative-serving-ingress {{ .Values.jiralistener.name }} -oyaml | yq '.status.ingress[0].host') - echo "https://${JIRA_LISTENER_URL//\"/}/webhook/jira" -{{- else }} - JIRA_LISTENER_URL=$(oc get ksvc jira-listener -oyaml | yq '.status.url') - echo "${JIRA_LISTENER_URL//\"/}/webhook/jira" -{{- end }} - -Log the applications using: - oc logs -f -l app=jira-listener -c jira-listener -n {{ .Values.namespace.name }} - oc logs -f -l app=escalation-swf -n {{ .Values.namespace.name }} -{{- if .Values.eventdisplay.enabled }} - oc logs -f -l app=event-display -c event-display -n {{ .Values.namespace.name }} -{{- end }} - - diff --git a/escalation-eda/helm/escalation-eda/templates/_helpers.tpl b/escalation-eda/helm/escalation-eda/templates/_helpers.tpl deleted file mode 100644 index e69de29..0000000 diff --git a/escalation-eda/helm/escalation-eda/templates/escalation.yaml b/escalation-eda/helm/escalation-eda/templates/escalation.yaml deleted file mode 100644 index 863094d..0000000 --- a/escalation-eda/helm/escalation-eda/templates/escalation.yaml +++ /dev/null @@ -1,169 +0,0 @@ -{{- if .Values.namespace.create }} -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .Values.namespace.name }} -{{- end }} ---- -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: {{ .Values.jiralistener.name }} - namespace: {{ .Values.namespace.name }} - labels: - app: jira-listener -{{- if .Values.letsEncryptCertificate }} - annotations: - serving.knative.openshift.io/disableRoute: "true" -{{- end }} -spec: - template: - metadata: - labels: - app: jira-listener - annotations: - autoscaling.knative.dev/min-scale: "1" - spec: - containers: - - image: {{ .Values.jiralistener.image }} - imagePullPolicy: Always - name: jira-listener - resources: - limits: - memory: 200Mi ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: escalation-config - namespace: {{ .Values.namespace.name }} -data: - JIRA_URL: {{ .Values.escalationSwf.jira.url}} - JIRA_PROJECT: {{ .Values.escalationSwf.jira.project}} - JIRA_ISSUE_TYPE: '{{ .Values.escalationSwf.jira.issueType}}' - ESCALATION_TIMEOUT_SECONDS: '{{ .Values.escalationSwf.escalationTimeoutSeconds}}' ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: {{ .Values.escalationSwf.name }}-props - namespace: {{ .Values.namespace.name }} -{{- $fileName := printf "resources/%s" .Values.escalationSwf.ticketingServiceProps }} -data: - application.properties: |- -{{ .Files.Get "resources/application.properties" | indent 4 }} -{{ .Files.Get $fileName | indent 4 }} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: escalation-specs - namespace: {{ .Values.namespace.name }} -data: -{{ (tpl (.Files.Glob "resources/specs/*").AsConfig .) | indent 2 }} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: escalation-subflow - namespace: {{ .Values.namespace.name }} -data: -# TODO Replace original id with ticketing-service? - ticketing-service.sw.yaml: |- -{{- $fileName := printf "resources/%s" .Values.escalationSwf.ticketingServiceSubflow }} -{{ .Files.Get $fileName | indent 4 }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: escalation-secret - namespace: {{ .Values.namespace.name }} -type: Opaque -stringData: - JIRA_USERNAME: {{ .Values.escalationSwf.jira.username}} - JIRA_API_TOKEN: {{ .Values.escalationSwf.jira.apiToken}} - MAILTRAP_API_TOKEN: {{ .Values.escalationSwf.mailTrap.apiToken}} - MAILTRAP_INBOX_ID: '{{ .Values.escalationSwf.mailTrap.inboxId}}' - OCP_API_SERVER_URL: {{ .Values.escalationSwf.ocp.apiServerUrl}} - OCP_API_SERVER_TOKEN: {{ .Values.escalationSwf.ocp.apiServerToken}} ---- -apiVersion: sonataflow.org/v1alpha08 -kind: SonataFlow -metadata: - name: {{ .Values.escalationSwf.name }} - namespace: {{ .Values.namespace.name }} - labels: - app: escalation-swf - annotations: - sonataflow.org/description: Escalation workflow - sonataflow.org/version: 0.0.1 - sonataflow.org/profile: {{ .Values.escalationSwf.profile }} -spec: - resources: - configMaps: - - configMap: - name: escalation-specs - workflowPath: specs - - configMap: - name: escalation-subflow - flow: -{{ .Files.Get "resources/ticketEscalation.sw.yaml" | indent 4 }} - podTemplate: - container: - envFrom: - - configMapRef: - name: escalation-config - - secretRef: - name: escalation-secret ---- -{{- if .Values.eventdisplay.enabled }} -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: event-display - namespace: {{ .Values.namespace.name }} - labels: - app: event-display -spec: - template: - metadata: - labels: - app: event-display - annotations: - autoscaling.knative.dev/min-scale: "1" - spec: - containers: - - image: gcr.io/knative-releases/knative.dev/eventing/cmd/event_display - name: event-display - resources: - limits: - memory: 200Mi -{{- end }} ---- -{{- if .Values.letsEncryptCertificate }} -{{- $cluster := lookup "config.openshift.io/v1" "Ingress" "" "cluster" }} -{{- $domain := "UNKNOWN" }} -{{- if hasKey $cluster "spec" }} - {{- if hasKey $cluster.spec "domain" }} - {{- $domain = $cluster.spec.domain}} - {{- end }} -{{- end }} - -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - annotations: - haproxy.router.openshift.io/timeout: 600s - kubernetes.io/tls-acme: "true" - name: {{ .Values.jiralistener.name }} - namespace: knative-serving-ingress -spec: - host: {{ .Values.jiralistener.name }}-{{ .Values.namespace.name }}.{{ $domain }} - port: - targetPort: http2 - to: - kind: Service - name: kourier - weight: 100 - wildcardPolicy: None -{{- end }} diff --git a/escalation-eda/helm/escalation-eda/templates/knative.yaml b/escalation-eda/helm/escalation-eda/templates/knative.yaml deleted file mode 100644 index 7228eae..0000000 --- a/escalation-eda/helm/escalation-eda/templates/knative.yaml +++ /dev/null @@ -1,55 +0,0 @@ ---- -apiVersion: eventing.knative.dev/v1 -kind: Broker -metadata: - name: ticket-events - namespace: {{ .Values.namespace.name }} ---- -apiVersion: eventing.knative.dev/v1 -kind: Trigger -metadata: - name: ticket-closed - namespace: {{ .Values.namespace.name }} -spec: - broker: ticket-events - filter: - attributes: - type: dev.parodos.escalation - subscriber: - ref: - apiVersion: serving.knative.dev/v1 - kind: Service - name: {{ .Values.escalationSwf.name }} ---- -{{- if .Values.eventdisplay.enabled }} -apiVersion: eventing.knative.dev/v1 -kind: Trigger -metadata: - name: event-display-trigger - namespace: {{ .Values.namespace.name }} -spec: - broker: ticket-events - subscriber: - ref: - apiVersion: serving.knative.dev/v1 - kind: Service - name: event-display -{{- end }} ---- -apiVersion: sources.knative.dev/v1 -kind: SinkBinding -metadata: - name: ticket-source - namespace: {{ .Values.namespace.name }} -spec: - subject: - apiVersion: serving.knative.dev/v1 - kind: Service - name: {{ .Values.jiralistener.name }} - sink: - ref: - apiVersion: eventing.knative.dev/v1 - kind: Broker - name: ticket-events - namespace: {{ .Values.namespace.name }} - diff --git a/escalation-eda/helm/escalation-eda/values.yaml b/escalation-eda/helm/escalation-eda/values.yaml deleted file mode 100644 index 4cc6a7e..0000000 --- a/escalation-eda/helm/escalation-eda/values.yaml +++ /dev/null @@ -1,29 +0,0 @@ -namespace: - create: true - name: escalation -letsEncryptCertificate: false -jiralistener: - # Override this name to overcome the Let's Encrypt rate limit as defined here: https://letsencrypt.org/docs/duplicate-certificate-limit/ - name: jira-listener - image: quay.io/orchestrator/jira-listener:1.0.0-SNAPSHOT -escalationSwf: - profile: dev - name: escalation-swf - ticketingServiceSubflow: jiraSwf.sw.yaml - ticketingServiceProps: application-jira.properties - # image: quay.io/orchestrator/escalation-swf:1.0 - jira: - url: null - username: null - apiToken: null - project: null - issueType: null - mailTrap: - apiToken: null - inboxId: null - ocp: - apiServerUrl: null - apiServerToken: null - escalationTimeoutSeconds: 30 -eventdisplay: - enabled: true diff --git a/escalation-eda/jira-listener/README.md b/escalation-eda/jira-listener/README.md deleted file mode 100644 index 5111fa7..0000000 --- a/escalation-eda/jira-listener/README.md +++ /dev/null @@ -1,155 +0,0 @@ -# Jira listener -An application to monitor Jira webhooks and send a CloudEvent to the configured event sink whenever they match these requirements: -* They refer to closed tickets (e.g. status category key is `done`) -* They contain the labels added by the `Escalation workflow` application, e.g.: - * `workflowInstanceId=` - * `workflowName=escalation` - -The generated event includes only the relevant data, e.g.: -```json -{ - "ticketId":"ES-3", - "workFlowInstanceId":"500", - "workflowName":"escalation", - "status":"done" -} -``` - -No events are generated for discarded webhooks. - -## Design notes - -### Externalized configuration -The following environment variables can modify the configuration properties: - -| Variable | Description | Default value | -|----------|-------------|---------------| -| CLOUD_EVENT_TYPE | The value of `ce-type` header in the generated `CloudEvent` | `dev.parodos.escalation` | -| CLOUD_EVENT_SOURCE | The value of `ce-source` header in the generated `CloudEvent` | `jira.listener` | -| WORKFLOW_INSTANCE_ID_LABEL | The name part of the Jira ticket label that contains the ID of the relates SWF instance (e.g. `workflowInstanceId=123`) | `workflowInstanceId` | -| WORKFLOW_NAME_LABEL | The name part of the Jira ticket label that contains the name of the SWF (e.g. `workflowName=escalation`) | `workflowName` | -| EXPECTED_WORKFLOW_NAME | The expected value part of the Jira ticket label that contains the name of the SWF (e.g. `workflowName=escalation`) | `escalation` | -| K_SINK | The URL where to POST the generated `CloudEvent` (usually injected by the `SinkBinding` resource) | - | - -### Event modeling -Instead of leveraging on the [Jira Java SDK](https://developer.atlassian.com/server/jira/platform/java-apis/), we used a simplified model of the relevant data, -defined in the [WebhookEvent](./src/main/java/dev/parodos/jiralistener/model/WebhookEvent.java) Java class. This way we can simplify the dependency stack -and also limit the risk of parsing failures due to unexpected changes in the payload format. - -Parsing was derived from the original example in [this Backstage repo](https://github.com/tiagodolphine/backstage/blob/eedfe494dd313a3ad6a484c0596ba12d6199c1a8/plugins/swf-backend/src/service/JiraService.ts#L66C19-L66C40) - -## Building and publishing the image -The application runs from a containerized image already avaliable at `quay.io/orchestrator/jira-listener-jvm`. -You can build and publish your own image using: -```bash -mvn clean package -docker build -f src/main/docker/Dockerfile.jvm -t quarkus/jira-listener-jvm . -docker tag quarkus/jira-listener-jvm quay.io/_YOUR_QUAY_ID_/jira-listener-jvm -docker push quay.io/_YOUR_QUAY_ID_/jira-listener-jvm -``` - -## Running in development environment -Use this command to run the example at `localhost:8080` with remote debugger enabled at `5005`: -```bash -mvn quarkus:dev -``` - -## Deploying as Knative service -Follow the instructions at [Deploying the example](../README.md#deploying-the-example) - -### SSL -If you enabled the automatic route creation in Knative services, you can probably hit this error if you try to `curl` to its `https` endpoint and -OpenShift is publishing a self signed certificate: -``` -curl failed to verify the legitimacy of the server and therefore could not -establish a secure connection to it. To learn more about this situation and -how to fix it, please visit the web page mentioned above. -``` - -Since "Jira Cloud's built-in webhooks can handle sending requests over SSL to hosts using publicly signed certificates", we need to disable the automatic Route -creation using the following annotation in the `jira-listener` service: -```yaml - annotations: - serving.knative.openshift.io/disableRoute: "true" -``` -Then, we use the [Let's Encrypt](https://letsencrypt.org/) service to leverage its free publicly-signed certificates, according to this -[Securing Jira Webhooks discussion](https://community.atlassian.com/t5/Jira-questions/Securing-Jira-Webhooks/qaq-p/1850259) - -The following procedure is not integrated with the provided Helm charts and comes from this [article](https://developer.ibm.com/tutorials/secure-red-hat-openshift-routes-with-lets-encrypt/): -```bash -oc new-project acme-operator -oc create -n acme-operator \ - -fhttps://raw.githubusercontent.com/tnozicka/openshift-acme/master/deploy/cluster-wide/{clusterrole,serviceaccount,issuer-letsencrypt-live,deployment}.yaml -oc create clusterrolebinding openshift-acme --clusterrole=openshift-acme --serviceaccount="$( oc project -q ):openshift-acme" --dry-run -o yaml | oc create -f - -``` - -The provided Helm chart instead generates a Route in `knative-serving-ingress` namespace with the proper annotation to expose the publicly-signed certificate, e.g.: -```yaml -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - annotations: - haproxy.router.openshift.io/timeout: 600s - kubernetes.io/tls-acme: "true" - name: jira-listener - namespace: knative-serving-ingress -... -``` - -Run the following to uninstall the Let's Encrypt operator: -```bash -oc delete clusterrolebinding openshift-acme -oc delete -n acme-operator \ - -fhttps://raw.githubusercontent.com/tnozicka/openshift-acme/master/deploy/cluster-wide/{clusterrole,serviceaccount,issuer-letsencrypt-live,deployment}.yaml -oc delete project acme-operator -``` - -## Testing with curl -Initialize the `JIRA_WEBHOOK_URL` variable in case of local development environment: -```bash -JIRA_WEBHOOK_URL="http://localhost:8080/webhook/jira" -``` -Otherwise, in case of Knative environment: -```bash -JIRA_LISTENER_URL=$(oc get route -n knative-serving-ingress jira-listener -oyaml | yq '.status.ingress[0].host') -JIRA_WEBHOOK_URL="https://${JIRA_LISTENER_URL//\"/}/webhook/jira" -``` - -Then, use one of the sample json documents in [src/test/resources](./src/test/resources/) to trigger the `/webhook/jira` endpoint: -```bash -curl -v -X POST -d @./src/test/resources/valid.json -H "Content-Type: application/json" -k "${JIRA_WEBHOOK_URL}" -curl -v -X POST -d @./src/test/resources/invalid.json -H "Content-Type: application/json" -k "${JIRA_WEBHOOK_URL}" -``` - -### Troubleshooting the Duplicate Certificate Limit error -`Let's Encrypt` allows 5 certificate requests per week for each unique set of hostnames requested for the certificate. - -The issue is detected when the `jira-listener` service is not receiving any webhook event, and the above `JIRA_WEBHOOK_URL` uses an `http` -protocol instead of the expected `https`. - -To overcome this issue, you can define a different name for the `jira-listener` service by setting the property `jiralistener.name` as in: -```bash -helm upgrade -n default escalation-eda helm/escalation-eda --set jiralistener.name=my-jira-listener --debug -``` - -### Troubleshooting the SAN short enough to fit in CN issue -Note that the created hostname cannot exceed the 64 characters as described in: [Let's Encrypt (NewOrder request did not include a SAN short enough to fit in CN)](https://support.cpanel.net/hc/en-us/articles/4405807056023-Let-s-Encrypt-NewOrder-request-did-not-include-a-SAN-short-enough-to-fit-in-CN-) ->This error occurs when attempting to request an SSL certificate from Let's Encrypt for a domain name longer than 64 characters - -## Configuring the Jira server -### API token -In case you need to interact with Jira server using the [REST APIs])https://developer.atlassian.com/server/jira/platform/rest-apis/, you need an API Token: -* [API Tokens](https://id.atlassian.com/manage-profile/security/api-tokens) -* [Basic auth for REST APIs](https://developer.atlassian.com/cloud/jira/platform/basic-auth-for-rest-apis/) - -### Webhook -If you use Jira Cloud, you can create the webhook at https://_YOUR_JIRA_/plugins/servlet/webhooks, then: -* Configure `Issue related event` of type `update` -* Use the value of `JIRA_WEBHOOK_URL` calculated before as the URL - -![Jira webhook](../doc/webhook.png) - -The webhook event format is exaplained in [Issue: Get issue](https://docs.atlassian.com/software/jira/docs/api/REST/9.11.0/#api/2/issue-getIssue), -see an [Example](https://jira.atlassian.com/rest/api/2/issue/JRA-2000) - -In case of issues receiving the events, you can troubleshoot using [RequestBin](https://requestbin.com/), see [How to collect data to troubleshoot WebHook failure in Jira](https://confluence.atlassian.com/jirakb/how-to-collect-data-to-troubleshoot-webhook-failure-in-jira-397083035.html) \ No newline at end of file diff --git a/escalation-eda/jira-listener/pom.xml b/escalation-eda/jira-listener/pom.xml deleted file mode 100644 index 8092b2f..0000000 --- a/escalation-eda/jira-listener/pom.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - 4.0.0 - jira-listener - - dev.parodos - swf-parent - 1.0.0-SNAPSHOT - ../../swf-parent - - - 2.5.0 - - - - io.quarkus - quarkus-arc - - - io.quarkus - quarkus-resteasy-reactive-jackson - - - io.quarkus - quarkus-rest-client-reactive - - - io.quarkus - quarkus-rest-client-reactive-jackson - - - io.quarkus - quarkus-smallrye-openapi - - - io.cloudevents - cloudevents-http-restful-ws-jakarta - ${cloudevents.version} - - - io.cloudevents - cloudevents-json-jackson - ${cloudevents.version} - - - org.projectlombok - lombok - 1.18.30 - provided - - - - io.quarkus - quarkus-junit5 - test - - - io.rest-assured - rest-assured - test - - - com.github.tomakehurst - wiremock - 3.0.1 - test - - - - - central - https://repo.maven.apache.org/maven2 - - - atlassian-maven - https://packages.atlassian.com/mvn/maven-atlassian-external - - - - - native - - - native - - - - false - native - - - - \ No newline at end of file diff --git a/escalation-eda/jira-listener/src/main/docker/Dockerfile.jvm b/escalation-eda/jira-listener/src/main/docker/Dockerfile.jvm deleted file mode 100644 index 05117e9..0000000 --- a/escalation-eda/jira-listener/src/main/docker/Dockerfile.jvm +++ /dev/null @@ -1,98 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/jira-listener-jvm . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/jira-listener-jvm -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. -# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 -# when running the container -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/jira-listener-jvm -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-11:1.16 - -ENV LANGUAGE='en_US:en' - - -# We make four distinct layers so if there are application changes the library layers can be re-used -COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ -COPY --chown=185 target/quarkus-app/*.jar /deployments/ -COPY --chown=185 target/quarkus-app/app/ /deployments/app/ -COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ - -EXPOSE 8080 -USER 185 -ENV AB_JOLOKIA_OFF="" -ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - -ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] - diff --git a/escalation-eda/jira-listener/src/main/docker/Dockerfile.legacy-jar b/escalation-eda/jira-listener/src/main/docker/Dockerfile.legacy-jar deleted file mode 100644 index f99cb0f..0000000 --- a/escalation-eda/jira-listener/src/main/docker/Dockerfile.legacy-jar +++ /dev/null @@ -1,94 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -Dquarkus.package.type=legacy-jar -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/jira-listener-legacy-jar . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/jira-listener-legacy-jar -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. -# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 -# when running the container -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/jira-listener-legacy-jar -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-11:1.16 - -ENV LANGUAGE='en_US:en' - - -COPY target/lib/* /deployments/lib/ -COPY target/*-runner.jar /deployments/quarkus-run.jar - -EXPOSE 8080 -USER 185 -ENV AB_JOLOKIA_OFF="" -ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - -ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] diff --git a/escalation-eda/jira-listener/src/main/docker/Dockerfile.native b/escalation-eda/jira-listener/src/main/docker/Dockerfile.native deleted file mode 100644 index 2f484c1..0000000 --- a/escalation-eda/jira-listener/src/main/docker/Dockerfile.native +++ /dev/null @@ -1,27 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# -# Before building the container image run: -# -# ./mvnw package -Dnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native -t quarkus/jira-listener . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/jira-listener -# -### -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/escalation-eda/jira-listener/src/main/docker/Dockerfile.native-micro b/escalation-eda/jira-listener/src/main/docker/Dockerfile.native-micro deleted file mode 100644 index 7aba981..0000000 --- a/escalation-eda/jira-listener/src/main/docker/Dockerfile.native-micro +++ /dev/null @@ -1,30 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# It uses a micro base image, tuned for Quarkus native executables. -# It reduces the size of the resulting container image. -# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. -# -# Before building the container image run: -# -# ./mvnw package -Dnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/jira-listener . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/jira-listener -# -### -FROM quay.io/quarkus/quarkus-micro-image:2.0 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/EventNotifier.java b/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/EventNotifier.java deleted file mode 100644 index d5ad81f..0000000 --- a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/EventNotifier.java +++ /dev/null @@ -1,18 +0,0 @@ -package dev.parodos.jiralistener; - -import org.eclipse.microprofile.rest.client.inject.RegisterRestClient; - -import io.cloudevents.CloudEvent; -import io.cloudevents.jackson.JsonFormat; -import jakarta.ws.rs.POST; -import jakarta.ws.rs.Path; -import jakarta.ws.rs.Produces; - -@Path("/") -@RegisterRestClient(configKey="ce-emitter") -public interface EventNotifier { - - @POST - @Produces(JsonFormat.CONTENT_TYPE) - void emit(CloudEvent event); -} diff --git a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/JiraListenerResource.java b/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/JiraListenerResource.java deleted file mode 100644 index 51d3921..0000000 --- a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/JiraListenerResource.java +++ /dev/null @@ -1,66 +0,0 @@ -package dev.parodos.jiralistener; - -import java.io.IOException; -import java.lang.System.Logger; -import java.lang.System.Logger.Level; -import java.util.Map; - -import com.fasterxml.jackson.databind.ObjectMapper; - -import dev.parodos.jiralistener.JiraListenerService.OnEventResponse; -import dev.parodos.jiralistener.model.JiraIssue; -import dev.parodos.jiralistener.model.WebhookEvent; -import jakarta.inject.Inject; -import jakarta.ws.rs.Consumes; -import jakarta.ws.rs.POST; -import jakarta.ws.rs.Path; -import jakarta.ws.rs.core.MediaType; -import jakarta.ws.rs.core.Response; -import jakarta.ws.rs.core.Response.Status; - -@Path("/") -public class JiraListenerResource { - private Logger logger = System.getLogger(JiraListenerResource.class.getName()); - - @Inject - ObjectMapper mapper; - - @Inject - JiraListenerService jiraListenerService; - - // Test endpoint used in dev mode when not specifying a K_SINK variable - @POST - @Consumes(MediaType.APPLICATION_JSON) - @Path("/") - public void test(Object any) { - logger.log(Level.INFO, "RECEIVED " + any); - } - - @POST - @Consumes(MediaType.APPLICATION_JSON) - @Path("/webhook/jira") - public Response onEvent(Map requestBody) { - logger.log(Level.INFO, "Received " + requestBody); - - try { - WebhookEvent webhookEvent = mapper.readValue(mapper.writeValueAsBytes(requestBody), WebhookEvent.class); - logger.log(Level.INFO, "Received " + webhookEvent); - if (webhookEvent.getIssue() == null) { - logger.log(Level.WARNING, "Discarded because of missing field: issue"); - return Response.noContent().build(); - } - JiraIssue jiraIssue = webhookEvent.getIssue(); - - OnEventResponse response = jiraListenerService.onEvent(jiraIssue); - if (response.eventAccepted) { - return Response.ok(response.jiraTicketEventData).build(); - } - return Response.noContent().build(); - } catch (IOException e) { - return Response - .status(Status.BAD_REQUEST.getStatusCode(), - "Not a valid webhook event for a Jira issue: " + e.getMessage()) - .build(); - } - } -} \ No newline at end of file diff --git a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/JiraListenerService.java b/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/JiraListenerService.java deleted file mode 100644 index aaf4ec2..0000000 --- a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/JiraListenerService.java +++ /dev/null @@ -1,154 +0,0 @@ -package dev.parodos.jiralistener; - -import java.lang.System.Logger; -import java.lang.System.Logger.Level; -import java.net.URI; -import java.util.List; -import java.util.Optional; -import java.util.UUID; - -import org.eclipse.microprofile.config.inject.ConfigProperty; -import org.eclipse.microprofile.rest.client.inject.RestClient; - -import com.fasterxml.jackson.databind.ObjectMapper; - -import dev.parodos.jiralistener.model.JiraTicketEventData; -import dev.parodos.jiralistener.model.JiraIssue; -import dev.parodos.jiralistener.model.JiraIssue.StatusCategory; -import io.cloudevents.CloudEvent; -import io.cloudevents.core.builder.CloudEventBuilder; -import io.cloudevents.core.data.PojoCloudEventData; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import jakarta.ws.rs.core.MediaType; - -@ApplicationScoped -public class JiraListenerService { - @ConfigProperty(name = "cloudevent.type") - String cloudeventType; - @ConfigProperty(name = "cloudevent.source") - String cloudeventSource; - - @ConfigProperty(name = "jira.webhook.label.workflowInstanceId") - String workflowInstanceIdJiraLabel; - @ConfigProperty(name = "jira.webhook.label.workflowName") - String workflowNameJiraLabel; - @ConfigProperty(name = "escalation.workflowName") - String expectedWorkflowName; - - private Logger logger = System.getLogger(JiraListenerService.class.getName()); - - @Inject - @RestClient - EventNotifier eventNotifier; - - @Inject - ObjectMapper mapper; - - static class OnEventResponse { - boolean eventAccepted; - JiraTicketEventData jiraTicketEventData; - } - - OnEventResponse onEvent(JiraIssue jiraIssue) { - OnEventResponse response = new OnEventResponse(); - response.eventAccepted = false; - - Optional ticket = validateIsAClosedJiraIssue(jiraIssue); - if (ticket.isPresent()) { - logger.log(Level.INFO, "Created ticket " + ticket.get()); - CloudEvent newCloudEvent = CloudEventBuilder.v1() - .withDataContentType(MediaType.APPLICATION_JSON) - .withExtension("kogitoprocrefid", ticket.get().getWorkFlowInstanceId()) - .withId(UUID.randomUUID().toString()) - .withType(cloudeventType) - .withSource(URI.create(cloudeventSource)) - .withData(PojoCloudEventData.wrap(ticket.get(), - mapper::writeValueAsBytes)) - .build(); - - logger.log(Level.INFO, "Emitting " + newCloudEvent); - eventNotifier.emit(newCloudEvent); - response.eventAccepted = true; - response.jiraTicketEventData = ticket.get(); - } - - return response; - } - - private Optional validateIsAClosedJiraIssue(JiraIssue jiraIssue) { - Optional notaClosedJiraIssue = Optional.empty(); - String issueKey = jiraIssue.getKey(); - if (jiraIssue.getKey() != null) { - if (jiraIssue.getFields() == null) { - logger.log(Level.WARNING, "Discarded because of missing field: issue.fields"); - return notaClosedJiraIssue; - } - - if (jiraIssue.getFields().getLabels() == null) { - logger.log(Level.WARNING, String.format("Discarded because of missing field: issue.fields.labels")); - return notaClosedJiraIssue; - } - List labels = jiraIssue.getFields().getLabels(); - - Optional workflowInstanceIdLabel = labels.stream() - .filter(l -> l.startsWith(workflowInstanceIdJiraLabel + "=")).findFirst(); - if (workflowInstanceIdLabel.isEmpty()) { - logger.log(Level.INFO, - String.format("Discarded because no %s label found", workflowInstanceIdJiraLabel)); - return notaClosedJiraIssue; - } - String workflowInstanceId = workflowInstanceIdLabel.get().split("=")[1]; - - Optional workflowNameLabel = labels.stream() - .filter(l -> l.startsWith(workflowNameJiraLabel + "=")).findFirst(); - if (workflowNameLabel.isEmpty()) { - logger.log(Level.INFO, String.format("Discarded because no %s label found", workflowNameJiraLabel)); - return notaClosedJiraIssue; - } - String workflowName = workflowNameLabel.get().split("=")[1]; - if (!workflowName.equals(expectedWorkflowName)) { - logger.log(Level.INFO, - String.format("Discarded because label %s is not matching the expected value %s", - workflowNameLabel.get(), expectedWorkflowName)); - return notaClosedJiraIssue; - } - - if (jiraIssue.getFields().getStatus() == null) { - logger.log(Level.WARNING, String.format("Discarded because of missing field: issue.fields.status")); - return notaClosedJiraIssue; - } - JiraIssue.Status status = jiraIssue.getFields().getStatus(); - - if (status.getStatusCategory() == null) { - logger.log(Level.WARNING, - String.format("Discarded because of missing field: issue.fields.status.statusCategory")); - return notaClosedJiraIssue; - } - StatusCategory statusCategory = status.getStatusCategory(); - - if (statusCategory.getKey() == null) { - logger.log(Level.WARNING, - String.format("Discarded because of missing field: issue.fields.status.statusCategory.key")); - return notaClosedJiraIssue; - } - String statusCategoryKey = statusCategory.getKey(); - - logger.log(Level.INFO, - String.format("Received Jira issue %s with workflowInstanceId %s, workflowName %s and status %s", - issueKey, - workflowInstanceId, workflowName, statusCategoryKey)); - if (!statusCategoryKey.equals("done")) { - logger.log(Level.INFO, "Discarded because not a completed issue but " + statusCategoryKey); - return notaClosedJiraIssue; - } - - return Optional.of(JiraTicketEventData.builder().ticketId(issueKey) - .workFlowInstanceId(workflowInstanceId) - .workflowName(workflowName).status(statusCategoryKey).build()); - } else { - logger.log(Level.INFO, "Discarded because of missing field: key"); - return notaClosedJiraIssue; - } - } -} diff --git a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/model/JiraIssue.java b/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/model/JiraIssue.java deleted file mode 100644 index 462418c..0000000 --- a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/model/JiraIssue.java +++ /dev/null @@ -1,49 +0,0 @@ -package dev.parodos.jiralistener.model; - -import java.util.List; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonProperty; - -import lombok.Builder; -import lombok.Data; -import lombok.extern.jackson.Jacksonized; - -@Data -@Builder -@Jacksonized -@JsonIgnoreProperties(ignoreUnknown = true) -public class JiraIssue { - @JsonProperty(required = true) - private String key; - @JsonProperty(required = true) - private Fields fields; - - @Data - @Builder - @Jacksonized - @JsonIgnoreProperties(ignoreUnknown = true) - public static class Fields { - private List labels; - private Status status; - - } - - @Data - @Builder - @Jacksonized - @JsonIgnoreProperties(ignoreUnknown = true) - public static class Status { - private StatusCategory statusCategory; - - } - - @Data - @Builder - @Jacksonized - @JsonIgnoreProperties(ignoreUnknown = true) - public static class StatusCategory { - private String key; - - } -} diff --git a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/model/JiraTicketEventData.java b/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/model/JiraTicketEventData.java deleted file mode 100644 index ce25f97..0000000 --- a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/model/JiraTicketEventData.java +++ /dev/null @@ -1,15 +0,0 @@ -package dev.parodos.jiralistener.model; - -import lombok.Builder; -import lombok.Data; -import lombok.extern.jackson.Jacksonized; - -@Builder -@Data -@Jacksonized -public class JiraTicketEventData { - private String ticketId; - private String workFlowInstanceId; - private String workflowName; - private String status; -} diff --git a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/model/WebhookEvent.java b/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/model/WebhookEvent.java deleted file mode 100644 index 81b5cda..0000000 --- a/escalation-eda/jira-listener/src/main/java/dev/parodos/jiralistener/model/WebhookEvent.java +++ /dev/null @@ -1,19 +0,0 @@ -package dev.parodos.jiralistener.model; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; - -import lombok.Builder; -import lombok.Data; -import lombok.extern.jackson.Jacksonized; - -@Data -@Builder -@Jacksonized -@JsonIgnoreProperties(ignoreUnknown = true) -public class WebhookEvent { - private String timestamp; - private String webhookEvent; - private String issue_event_type_name; - - private JiraIssue issue; -} diff --git a/escalation-eda/jira-listener/src/main/resources/META-INF/resources/index.html b/escalation-eda/jira-listener/src/main/resources/META-INF/resources/index.html deleted file mode 100644 index 19fca5b..0000000 --- a/escalation-eda/jira-listener/src/main/resources/META-INF/resources/index.html +++ /dev/null @@ -1,280 +0,0 @@ - - - - - jira-listener - 1.0.0-SNAPSHOT - - - -
-
-
- - - - - quarkus_logo_horizontal_rgb_1280px_reverse - - - - - - - - - - - - - - - - - - -
-
-
- -
-
-
-

You just made a Quarkus application.

-

This page is served by Quarkus.

- Visit the Dev UI -

This page: src/main/resources/META-INF/resources/index.html

-

App configuration: src/main/resources/application.properties

-

Static assets: src/main/resources/META-INF/resources/

-

Code: src/main/java

-

Dev UI V1: /q/dev-v1

-

Generated starter code:

- -
-
-
Documentation
-

Practical step-by-step guides to help you achieve a specific goal. Use them to help get your work - done.

-
Set up your IDE
-

Everyone has a favorite IDE they like to use to code. Learn how to configure yours to maximize your - Quarkus productivity.

-
-
-
- - diff --git a/escalation-eda/jira-listener/src/main/resources/application.properties b/escalation-eda/jira-listener/src/main/resources/application.properties deleted file mode 100644 index 54f291d..0000000 --- a/escalation-eda/jira-listener/src/main/resources/application.properties +++ /dev/null @@ -1,13 +0,0 @@ -cloudevent.type=${CLOUD_EVENT_TYPE:dev.parodos.escalation} -cloudevent.source=${CLOUD_EVENT_SOURCE:jira.listener} -jira.webhook.label.workflowInstanceId=${WORKFLOW_INSTANCE_ID_LABEL:workflowInstanceId} -jira.webhook.label.workflowName=${WORKFLOW_NAME_LABEL:workflowName} -escalation.workflowName=${EXPECTED_WORKFLOW_NAME:escalation} -quarkus.rest-client.ce-emitter.url=${K_SINK} -%dev.quarkus.rest-client.ce-emitter.url=${K_SINK:http://localhost:8080} -%test.quarkus.rest-client.ce-emitter.url=http://localhost:8181 -quarkus.rest-client.ce-emitter.scope=jakarta.inject.Singleton -# Uncomment to enable RestCLient logging (e.g. POST requests to the configured ce-emitter) -# quarkus.rest-client.logging.scope=all -# quarkus.rest-client.logging.body-limit=1000 -# quarkus.log.category."org.jboss.resteasy.reactive.client.logging".level=DEBUG diff --git a/escalation-eda/jira-listener/src/test/java/dev/parodos/jiralistener/JiraConstants.java b/escalation-eda/jira-listener/src/test/java/dev/parodos/jiralistener/JiraConstants.java deleted file mode 100644 index c4f581b..0000000 --- a/escalation-eda/jira-listener/src/test/java/dev/parodos/jiralistener/JiraConstants.java +++ /dev/null @@ -1,10 +0,0 @@ -package dev.parodos.jiralistener; - -public final class JiraConstants { - static final String ISSUE = "issue"; - static final String KEY = "key"; - static final String FIELDS = "fields"; - static final String LABELS = "labels"; - static final String STATUS = "status"; - static final String STATUS_CATEGORY = "statusCategory"; -} diff --git a/escalation-eda/jira-listener/src/test/java/dev/parodos/jiralistener/JiraListenerResourceIT.java b/escalation-eda/jira-listener/src/test/java/dev/parodos/jiralistener/JiraListenerResourceIT.java deleted file mode 100644 index 73ec9d6..0000000 --- a/escalation-eda/jira-listener/src/test/java/dev/parodos/jiralistener/JiraListenerResourceIT.java +++ /dev/null @@ -1,8 +0,0 @@ -package dev.parodos.jiralistener; - -import io.quarkus.test.junit.QuarkusIntegrationTest; - -@QuarkusIntegrationTest -public class JiraListenerResourceIT extends JiraListenerResourceTest { - // Execute the same tests but in packaged mode. -} diff --git a/escalation-eda/jira-listener/src/test/java/dev/parodos/jiralistener/JiraListenerResourceTest.java b/escalation-eda/jira-listener/src/test/java/dev/parodos/jiralistener/JiraListenerResourceTest.java deleted file mode 100644 index de69b45..0000000 --- a/escalation-eda/jira-listener/src/test/java/dev/parodos/jiralistener/JiraListenerResourceTest.java +++ /dev/null @@ -1,232 +0,0 @@ -package dev.parodos.jiralistener; - -import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; -import static com.github.tomakehurst.wiremock.client.WireMock.post; -import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; -import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; -import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.options; -import static dev.parodos.jiralistener.JiraConstants.FIELDS; -import static dev.parodos.jiralistener.JiraConstants.ISSUE; -import static dev.parodos.jiralistener.JiraConstants.KEY; -import static dev.parodos.jiralistener.JiraConstants.LABELS; -import static dev.parodos.jiralistener.JiraConstants.STATUS; -import static dev.parodos.jiralistener.JiraConstants.STATUS_CATEGORY; -import static io.restassured.RestAssured.given; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.hasSize; -import static org.junit.jupiter.api.Assertions.assertEquals; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.eclipse.microprofile.config.inject.ConfigProperty; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -import com.fasterxml.jackson.core.exc.StreamReadException; -import com.fasterxml.jackson.databind.DatabindException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.github.tomakehurst.wiremock.WireMockServer; -import com.github.tomakehurst.wiremock.client.WireMock; -import com.github.tomakehurst.wiremock.stubbing.ServeEvent; -import com.google.common.collect.Lists; - -import dev.parodos.jiralistener.model.JiraTicketEventData; -import io.quarkus.test.junit.QuarkusTest; -import io.restassured.response.ExtractableResponse; -import io.restassured.response.Response; -import jakarta.inject.Inject; - -@QuarkusTest -public class JiraListenerResourceTest { - private static WireMockServer sink; - - @ConfigProperty(name = "cloudevent.type") - String cloudeventType; - @ConfigProperty(name = "cloudevent.source") - String cloudeventSource; - - @ConfigProperty(name = "jira.webhook.label.workflowInstanceId") - String workflowInstanceIdJiraLabel; - @ConfigProperty(name = "jira.webhook.label.workflowName") - String workflowNameJiraLabel; - @ConfigProperty(name = "escalation.workflowName") - String expectedWorkflowName; - - @Inject - ObjectMapper mapper; - - @BeforeAll - public static void startSink() { - sink = new WireMockServer(options().port(8181)); - sink.start(); - sink.stubFor(post("/").willReturn(aResponse().withBody("ok").withStatus(200))); - } - - @AfterAll - public static void stopSink() { - if (sink != null) { - sink.stop(); - } - } - - @BeforeEach - public void resetSink() { - sink.resetRequests(); - } - - private Map aClosedIssue() { - Map statusCategory = new HashMap(Map.of(KEY, "done")); - Map status = new HashMap(Map.of(STATUS_CATEGORY, statusCategory)); - List labels = new ArrayList<>(List.of(workflowInstanceIdJiraLabel + "=500", - workflowNameJiraLabel + "=" + expectedWorkflowName)); - Map fields = new HashMap(Map.of(LABELS, labels, STATUS, status)); - Map issue = new HashMap(Map.of(KEY, "PR-1", FIELDS, fields)); - return new HashMap(Map.of(ISSUE, issue)); - } - - @Test - public void when_jiraIssueIsClosed_onEvent_returnsClosedTicket() - throws StreamReadException, DatabindException, IOException { - Map webhookEvent = aClosedIssue(); - - String workflowInstanceId = "500"; - JiraTicketEventData closedTicket = JiraTicketEventData.builder().ticketId("PR-1") - .workFlowInstanceId(workflowInstanceId) - .workflowName("escalation").status("done").build(); - - ExtractableResponse response = given() - .when().contentType("application/json") - .body(webhookEvent).post("/webhook/jira") - .then() - .statusCode(200) - .extract(); - - assertEquals(response.as(JiraTicketEventData.class), closedTicket, "Returns JiraTicketEventData"); - sink.verify(1, postRequestedFor(urlEqualTo("/")) - .withHeader("ce-source", WireMock.equalTo(cloudeventSource)) - .withHeader("ce-type", WireMock.equalTo(cloudeventType)) - .withHeader("ce-kogitoprocrefid", WireMock.equalTo(workflowInstanceId))); - List allServeEvents = sink.getAllServeEvents(); - allServeEvents = Lists.reverse(allServeEvents); - assertThat(allServeEvents, hasSize(1)); - - ServeEvent event = allServeEvents.get(0); - System.out.println("Received event with headers " + event.getRequest().getAllHeaderKeys()); - JiraTicketEventData eventBody = mapper.readValue(event.getRequest().getBody(), JiraTicketEventData.class); - System.out.println("Received event with eventBody " + eventBody); - assertThat(event.getRequest().header("ce-source").values().get(0), - is(cloudeventSource)); - assertThat(event.getRequest().header("ce-type").values().get(0), - is(cloudeventType)); - assertThat(event.getRequest().header("ce-kogitoprocrefid").values().get(0), - is(workflowInstanceId)); - assertThat("Response body is equal to the request body", eventBody, is(closedTicket)); - } - - @Test - public void when_payloadIsInvalid_onEvent_returnsNoContent() - throws StreamReadException, DatabindException, IOException { - Map invalidIssue = Map.of("invalid", "any"); - validateNoContentRequest(invalidIssue); - - } - - @Test - public void when_jiraIssueHasNotAllRequiredFiels_onEvent_returnsNoContent() - throws StreamReadException, DatabindException, IOException { - Map webhookEvent = aClosedIssue(); - Map issue = (Map) webhookEvent.get(ISSUE); - ((List) ((Map) issue.get(FIELDS)).get(LABELS)).remove(0); - validateNoContentRequest(webhookEvent); - - webhookEvent = aClosedIssue(); - issue = (Map) webhookEvent.get(ISSUE); - ((List) ((Map) issue.get(FIELDS)).get(LABELS)).remove(1); - validateNoContentRequest(webhookEvent); - - webhookEvent = aClosedIssue(); - issue = (Map) webhookEvent.get(ISSUE); - ((Map) issue.get(FIELDS)).remove(LABELS); - validateNoContentRequest(webhookEvent); - - webhookEvent = aClosedIssue(); - issue = (Map) webhookEvent.get(ISSUE); - ((Map) ((Map) ((Map) issue.get(FIELDS)).get(STATUS)).get(STATUS_CATEGORY)) - .remove(KEY); - validateNoContentRequest(webhookEvent); - - webhookEvent = aClosedIssue(); - issue = (Map) webhookEvent.get(ISSUE); - ((Map) ((Map) issue.get(FIELDS)).get(STATUS)).remove(STATUS_CATEGORY); - validateNoContentRequest(webhookEvent); - - webhookEvent = aClosedIssue(); - issue = (Map) webhookEvent.get(ISSUE); - ((Map) issue.get(FIELDS)).remove(STATUS); - validateNoContentRequest(webhookEvent); - - webhookEvent = aClosedIssue(); - issue = (Map) webhookEvent.get(ISSUE); - issue.remove(FIELDS); - validateNoContentRequest(webhookEvent); - - webhookEvent = aClosedIssue(); - issue = (Map) webhookEvent.get(ISSUE); - issue.remove(KEY); - validateNoContentRequest(webhookEvent); - } - - @Test - public void when_jiraIssueIsNotClosed_onEvent_returnsNoContent() - throws StreamReadException, DatabindException, IOException { - Map webhookEvent = aClosedIssue(); - Map issue = (Map) webhookEvent.get(ISSUE); - ((Map) ((Map) ((Map) issue.get(FIELDS)).get(STATUS)).get(STATUS_CATEGORY)) - .put(KEY, - "undone"); - validateNoContentRequest(webhookEvent); - } - - @Test - public void when_jiraIssueHasWrongWorkflowName_onEvent_returnsNoContent() - throws StreamReadException, DatabindException, IOException { - Map webhookEvent = aClosedIssue(); - Map issue = (Map) webhookEvent.get(ISSUE); - Map fields = ((Map) issue.get(FIELDS)); - fields.put(LABELS, List.of(workflowInstanceIdJiraLabel + "=500", - workflowNameJiraLabel + "=invalidName")); - validateNoContentRequest(webhookEvent); - } - - @Test - public void when_jiraIssueHasWrongLabels_onEvent_returnsNoContent() - throws StreamReadException, DatabindException, IOException { - Map webhookEvent = aClosedIssue(); - Map issue = (Map) webhookEvent.get(ISSUE); - Map fields = ((Map) issue.get(FIELDS)); - fields.put(LABELS, List.of("anotherLabel")); - validateNoContentRequest(webhookEvent); - } - - private void validateNoContentRequest(Map issue) { - ExtractableResponse response = given() - .when().contentType("application/json") - .body(issue).post("/webhook/jira") - .then() - .statusCode(204) - .extract(); - - assertThat("Returns no content", response.asString(), is("")); - sink.verify(0, postRequestedFor(urlEqualTo("/"))); - List allServeEvents = sink.getAllServeEvents(); - allServeEvents = Lists.reverse(allServeEvents); - assertThat(allServeEvents, hasSize(0)); - } -} \ No newline at end of file diff --git a/escalation-eda/jira-listener/src/test/resources/invalid.json b/escalation-eda/jira-listener/src/test/resources/invalid.json deleted file mode 100644 index 5d79f73..0000000 --- a/escalation-eda/jira-listener/src/test/resources/invalid.json +++ /dev/null @@ -1,3 +0,0 @@ -{ -"a":"b" -} diff --git a/escalation-eda/jira-listener/src/test/resources/valid.json b/escalation-eda/jira-listener/src/test/resources/valid.json deleted file mode 100644 index 0a097fe..0000000 --- a/escalation-eda/jira-listener/src/test/resources/valid.json +++ /dev/null @@ -1,193 +0,0 @@ -{ - "timestamp": "1696423540500", - "webhookEvent": "jira:issue_updated", - "issue_event_type_name": "issue_generic", - "user": { - "self": "REDACTED", - "accountId": "REDACTED", - "avatarUrls": { - }, - "displayName": "REDACTED", - "active": "true", - "timeZone": "REDACTED", - "accountType": "atlassian" - }, - "issue": { - "id": "10088", - "self": "REDACTED", - "key": "ES-4", - "fields": { - "statuscategorychangedate": "2023-10-04T14:45:40.472+0200", - "issuetype": { - "self": "REDACTED", - "id": "10013", - "description": "Tasks track small, distinct pieces of work.", - "name": "Task", - "subtask": "false", - "avatarId": "10318", - "entityId": "REDACTED", - "hierarchyLevel": "0" - }, - "timespent": "null", - "customfield_10030": "null", - "customfield_10031": "null", - "project": { - "self": "REDACTED", - "id": "10003", - "key": "ES", - "name": "ESCALATION", - "projectTypeKey": "software", - "simplified": "true", - "avatarUrls": { - } - }, - "customfield_10032": "null", - "fixVersions": "[]", - "customfield_10033": "[]", - "customfield_10034": "null", - "aggregatetimespent": "null", - "customfield_10035": "null", - "resolution": "null", - "customfield_10027": "null", - "customfield_10028": "null", - "customfield_10029": "null", - "resolutiondate": "null", - "workratio": "-1", - "watches": { - "self": "REDACTED", - "watchCount": "1", - "isWatching": "true" - }, - "issuerestriction": { - "issuerestrictions": {}, - "shouldDisplay": "true" - }, - "lastViewed": "2023-10-04T14:30:35.075+0200", - "created": "2023-10-04T14:30:08.275+0200", - "customfield_10020": "null", - "customfield_10021": "null", - "customfield_10022": "null", - "priority": { - "self": "REDACTED", - "iconUrl": "REDACTED", - "name": "Medium", - "id": "3" - }, - "customfield_10023": "null", - "customfield_10024": "null", - "customfield_10025": "null", - "customfield_10026": "null", - "labels": [ - "workflowInstanceId=500", - "workflowName=escalation" - ], - "customfield_10016": "null", - "customfield_10017": "null", - "customfield_10018": { - "hasEpicLinkFieldDependency": "false", - "showField": "false", - "nonEditableReason": { - "reason": "PLUGIN_LICENSE_ERROR", - "message": "The Parent Link is only available to Jira Premium users." - } - }, - "customfield_10019": "0|i000en:", - "timeestimate": "null", - "aggregatetimeoriginalestimate": "null", - "versions": "[]", - "issuelinks": "[]", - "assignee": "null", - "updated": "2023-10-04T14:45:40.472+0200", - "status": { - "self": "REDACTED", - "description": "", - "iconUrl": "REDACTED", - "name": "In Progress", - "id": "10016", - "statusCategory": { - "self": "REDACTED", - "id": "4", - "key": "done", - "colorName": "yellow", - "name": "Complete" - } - }, - "components": "[]", - "timeoriginalestimate": "null", - "description": "null", - "customfield_10010": "null", - "customfield_10014": "null", - "timetracking": {}, - "customfield_10015": "null", - "customfield_10005": "null", - "customfield_10006": "null", - "customfield_10007": "null", - "security": "null", - "customfield_10008": "null", - "attachment": "[]", - "aggregatetimeestimate": "null", - "customfield_10009": "null", - "summary": "aaa", - "creator": { - "self": "REDACTED", - "accountId": "REDACTED", - "avatarUrls": { - }, - "displayName": "REDACTED", - "active": "true", - "timeZone": "REDACTED", - "accountType": "atlassian" - }, - "subtasks": "[]", - "customfield_10040": "null", - "customfield_10041": "null", - "customfield_10042": "null", - "reporter": { - "self": "REDACTED", - "accountId": "REDACTED", - "avatarUrls": { - }, - "displayName": "REDACTED", - "active": "true", - "timeZone": "REDACTED", - "accountType": "atlassian" - }, - "aggregateprogress": { - "progress": "0", - "total": "0" - }, - "customfield_10044": "[]", - "customfield_10001": "null", - "customfield_10002": "null", - "customfield_10003": "null", - "customfield_10004": "null", - "customfield_10038": "null", - "customfield_10039": "null", - "environment": "null", - "duedate": "null", - "progress": { - "progress": "0", - "total": "0" - }, - "votes": { - "self": "REDACTED", - "votes": "0", - "hasVoted": "false" - } - } - }, - "changelog": { - "id": "10163", - "items": [ - { - "field": "status", - "fieldtype": "jira", - "fieldId": "status", - "from": "10015", - "fromString": "To Do", - "to": "10016", - "toString": "In Progress" - } - ] - } -} \ No newline at end of file diff --git a/escalation-eda/pom.xml b/escalation-eda/pom.xml deleted file mode 100644 index 29f5466..0000000 --- a/escalation-eda/pom.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - 4.0.0 - dev.parodos - escalation-eda - 1.0.0-SNAPSHOT - pom - - jira-listener - escalation-swf - - \ No newline at end of file diff --git a/escalation-sn/README.md b/escalation-service-now/README.md similarity index 100% rename from escalation-sn/README.md rename to escalation-service-now/README.md diff --git a/escalation-sn/instance-setup/automated-script/readme.md b/escalation-service-now/instance-setup/automated-script/readme.md similarity index 100% rename from escalation-sn/instance-setup/automated-script/readme.md rename to escalation-service-now/instance-setup/automated-script/readme.md diff --git a/escalation-sn/instance-setup/automated-script/sn-instance-setup.sh b/escalation-service-now/instance-setup/automated-script/sn-instance-setup.sh similarity index 100% rename from escalation-sn/instance-setup/automated-script/sn-instance-setup.sh rename to escalation-service-now/instance-setup/automated-script/sn-instance-setup.sh diff --git a/escalation-sn/instance-setup/manual-self-guided/readme.md b/escalation-service-now/instance-setup/manual-self-guided/readme.md similarity index 100% rename from escalation-sn/instance-setup/manual-self-guided/readme.md rename to escalation-service-now/instance-setup/manual-self-guided/readme.md diff --git a/escalation-sn/instance-setup/readme.md b/escalation-service-now/instance-setup/readme.md similarity index 100% rename from escalation-sn/instance-setup/readme.md rename to escalation-service-now/instance-setup/readme.md diff --git a/escalation-sn/pom.xml b/escalation-service-now/pom.xml similarity index 100% rename from escalation-sn/pom.xml rename to escalation-service-now/pom.xml diff --git a/escalation-sn/src/main/resources/application-dev.properties b/escalation-service-now/src/main/resources/application-dev.properties similarity index 100% rename from escalation-sn/src/main/resources/application-dev.properties rename to escalation-service-now/src/main/resources/application-dev.properties diff --git a/escalation-sn/src/main/resources/application.properties b/escalation-service-now/src/main/resources/application.properties similarity index 100% rename from escalation-sn/src/main/resources/application.properties rename to escalation-service-now/src/main/resources/application.properties diff --git a/escalation-sn/src/main/resources/schemas/servicenow-escalation-schema.json b/escalation-service-now/src/main/resources/schemas/servicenow-escalation-schema.json similarity index 100% rename from escalation-sn/src/main/resources/schemas/servicenow-escalation-schema.json rename to escalation-service-now/src/main/resources/schemas/servicenow-escalation-schema.json diff --git a/escalation-sn/src/main/resources/serviceNowEscalation.sw.yaml b/escalation-service-now/src/main/resources/serviceNowEscalation.sw.yaml similarity index 100% rename from escalation-sn/src/main/resources/serviceNowEscalation.sw.yaml rename to escalation-service-now/src/main/resources/serviceNowEscalation.sw.yaml diff --git a/escalation-sn/src/main/resources/specs/servicenow.yaml b/escalation-service-now/src/main/resources/specs/servicenow.yaml similarity index 100% rename from escalation-sn/src/main/resources/specs/servicenow.yaml rename to escalation-service-now/src/main/resources/specs/servicenow.yaml diff --git a/escalation/README.md b/escalation/README.md deleted file mode 100644 index 6075a15..0000000 --- a/escalation/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# Simple escalation workflow -An escalation workflow integrated with Atlassian JIRA using [SonataFlow](https://sonataflow.org/serverlessworkflow/latest/index.html). - -Email service is using [MailTrap Send email API](https://api-docs.mailtrap.io/docs/mailtrap-api-docs/bcf61cdc1547e-send-email-early-access) API - -## Prerequisite -* Access to a Jira server (URL, user and [API token](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/)) -* Access to an OpenShift cluster with `admin` Role -* An account to [MailTrap](https://mailtrap.io/home) with a [testing Inbox](https://mailtrap.io/inboxes) and an [API token](https://mailtrap.io/api-tokens) - -## Escalation flow -![SWF VIZ](./src/main/resources/ticketEscalation.svg) - -**Note**: -The value of the `.jiraIssue.fields.status.statusCategory.key` field is the one to be used to identify when the `done` status is reached, all the other -similar fields are subject to translation to the configured language and cannot be used for a consistent check. - -## Application configuration -Application properties can be initialized from environment variables before running the application: - -| Environment variable | Description | Mandatory | Default value | -|-----------------------|-------------|-----------|---------------| -| `JIRA_URL` | The Jira server URL | ✅ | | -| `JIRA_USERNAME` | The Jira server username | ✅ | | -| `JIRA_API_TOKEN` | The Jira API Token | ✅ | | -| `JIRA_PROJECT` | The key of the Jira project where the escalation issue is created | ❌ | `TEST` | -| `JIRA_ISSUE_TYPE` | The ID of the Jira issue type to be created | ✅ | | -| `MAILTRAP_URL` | The MailTrail API Token| ❌ | `https://sandbox.api.mailtrap.io` | -| `MAILTRAP_API_TOKEN` | The MailTrail API Token| ✅ | | -| `MAILTRAP_INBOX_ID` | The ID of the MailTrap inbox | ✅ | | -| `MAILTRAP_SENDER_EMAIL` | The email address of the mail sender | ❌ | `escalation@company.com` | -| `OCP_API_SERVER_URL` | The OpensShift API Server URL | ✅ | | -| `OCP_API_SERVER_TOKEN`| The OpensShift API Server Token | ✅ | | -| `ESCALATION_TIMEOUT_SECONDS` | The number of seconds to wait before triggering the escalation request, after the issue has been created | ❌ | `60` | -| `POLLING_PERIODICITY`(1) | The polling periodicity of the issue state checker, according to ISO 8601 duration format | ❌ | `PT6S` | - -(1) This is still hardcoded as `PT5S` while waiting for a fix to [KOGITO-9811](https://issues.redhat.com/browse/KOGITO-9811) -## How to run - -```bash -mvn clean quarkus:dev -``` - -Example of POST to trigger the flow (see input schema in [ocp-onboarding-schema.json](./src/main/resources/ocp-onboarding-schema.json)): -```bash -curl -XPOST -H "Content-Type: application/json" http://localhost:8080/ticket-escalation -d '{"namespace": "_YOUR_NAMESPACE_", "manager": "_YOUR_EMAIL_"}' -``` - -Tips: -* Visit [Workflow Instances](http://localhost:8080/q/dev/org.kie.kogito.kogito-quarkus-serverless-workflow-devui/workflowInstances) -* Visit (Data Index Query Service)[http://localhost:8080/q/graphql-ui/] \ No newline at end of file diff --git a/escalation/pom.xml b/escalation/pom.xml deleted file mode 100644 index 28b6b4a..0000000 --- a/escalation/pom.xml +++ /dev/null @@ -1,199 +0,0 @@ - - - 4.0.0 - dev.parodos - ticket-escalation - 1.0.0-SNAPSHOT - - kogito-bom - org.kie.kogito - 1.43.0.Final - - 3.10.1 - 11 - UTF-8 - UTF-8 - quarkus-bom - io.quarkus.platform - 2.16.10.Final - true - 3.0.0-M7 - 0.0.8 - - - - - ${quarkus.platform.group-id} - ${quarkus.platform.artifact-id} - ${quarkus.platform.version} - pom - import - - - ${kogito.bom.group-id} - ${kogito.bom.artifact-id} - ${kogito.bom.version} - pom - import - - - ${quarkus.platform.group-id} - quarkus-kogito-bom - ${quarkus.platform.version} - pom - import - - - io.quarkiverse.embedded.postgresql - quarkus-embedded-postgresql - ${version.io.quarkiverse.embedded.postgresql} - - - - - - - org.kie.kogito - kogito-quarkus-serverless-workflow - - - org.kie.kogito - kogito-quarkus-serverless-workflow-devui - - - org.kie.kogito - kogito-addons-quarkus-process-management - - - org.kie.kogito - kogito-addons-quarkus-source-files - - - - io.quarkiverse.embedded.postgresql - quarkus-embedded-postgresql - - - io.quarkus - quarkus-jdbc-postgresql - - - io.quarkus - quarkus-agroal - - - org.kie.kogito - kogito-addons-quarkus-persistence-jdbc - - - - - org.kie.kogito - kogito-addons-quarkus-jobs-service-embedded - - - - - org.kie.kogito - kogito-addons-quarkus-data-index-inmemory - - - - org.eclipse.angus - angus-mail - 2.0.1 - - - - io.quarkus - quarkus-smallrye-openapi - - - io.quarkus - quarkus-resteasy-jackson - - - io.quarkus - quarkus-arc - - - io.quarkus - quarkus-junit5 - test - - - - - - ${quarkus.platform.group-id} - quarkus-maven-plugin - ${quarkus.platform.version} - true - - - - build - generate-code - generate-code-tests - - - - - - maven-compiler-plugin - ${compiler-plugin.version} - - - -parameters - - - - - maven-surefire-plugin - ${surefire-plugin.version} - - - org.jboss.logmanager.LogManager - ${maven.home} - - - - - maven-failsafe-plugin - ${surefire-plugin.version} - - - - integration-test - verify - - - - ${project.build.directory}/${project.build.finalName}-runner - org.jboss.logmanager.LogManager - ${maven.home} - - - - - - - - - - native - - - native - - - - false - native - - - - \ No newline at end of file diff --git a/escalation/src/main/resources/application.properties b/escalation/src/main/resources/application.properties deleted file mode 100644 index 55dced1..0000000 --- a/escalation/src/main/resources/application.properties +++ /dev/null @@ -1,51 +0,0 @@ -# Application properties -# The ID of the Jira issue type to be created (mandatory) -jira_issue_type=${JIRA_ISSUE_TYPE} -# The key of the Jira project where the escalation issue is created (mandatory) -jira_project=${JIRA_PROJECT} -# The email address of the mail sender -sender_email=${MAILTRAP_SENDER_EMAIL:escalation@company.com} -# The ID of the MailTrap inbox (mandatory) -mailtrap_inbox_id=${MAILTRAP_INBOX_ID} -# The number of seconds to wait before triggering the escalation request, after the issue has been created -timeout_seconds=${ESCALATION_TIMEOUT_SECONDS:60} -# The polling periodicity of the issue state checker, according to ISO 8601 duration format -polling_periodicity=${POLLING_PERIODICITY:PT6S} - -# Jira -quarkus.rest-client.jira_yaml.url=${JIRA_URL} -quarkus.openapi-generator.jira_yaml.auth.basicAuth.username=${JIRA_USERNAME} -quarkus.openapi-generator.jira_yaml.auth.basicAuth.password=${JIRA_API_TOKEN} - -# OpenShift API Server -quarkus.rest-client.kube_yaml.url=${OCP_API_SERVER_URL} -quarkus.openapi-generator.kube_yaml.auth.BearerToken.bearer-token=${OCP_API_SERVER_TOKEN} -quarkus.tls.trust-all=true -quarkus.kubernetes-client.trust-certs=true - -# MailTrap service -quarkus.rest-client.mailtrap_yaml.url=${MAILTRAP_URL:https://sandbox.api.mailtrap.io} -quarkus.openapi-generator.mailtrap_yaml.auth.apiToken.api-key=${MAILTRAP_API_TOKEN} - -#Quarkus -quarkus.http.host=0.0.0.0 -# This is to enable debugging of HTTP request -quarkus.log.category.\"org.apache.http\".level=DEBUG - -# Added -quarkus.http.port=8080 - -kogito.service.url=http://localhost:${quarkus.http.port} - -quarkus.kogito.devservices.enabled=false -quarkus.devservices.enabled=false - -quarkus.swagger-ui.always-include=true -quarkus.kogito.data-index.graphql.ui.always-include=true - -# Kogito runtime persistence configurations -kogito.persistence.type=jdbc -kogito.persistence.proto.marshaller=false -kogito.persistence.query.timeout.millis=10000 -quarkus.datasource.db-kind=postgresql -quarkus.flyway.migrate-at-start=true diff --git a/escalation/src/main/resources/specs/jira.yaml b/escalation/src/main/resources/specs/jira.yaml deleted file mode 100644 index 530f3c0..0000000 --- a/escalation/src/main/resources/specs/jira.yaml +++ /dev/null @@ -1,332 +0,0 @@ -openapi: 3.0.3 -info: - title: The Jira Cloud platform REST API - description: Jira Cloud platform REST API documentation - termsOfService: http://atlassian.com/terms/ - contact: - email: ecosystem@atlassian.com - license: - name: Apache 2.0 - url: http://www.apache.org/licenses/LICENSE-2.0.html - version: 1001.0.0-SNAPSHOT -externalDocs: - description: Find out more about Atlassian products and services. - url: http://www.atlassian.com -servers: - - url: https://your-domain.atlassian.net -paths: - /rest/api/latest/issue: - post: - tags: - - Issues - summary: Create issue - description: |- - Creates an issue or, where the option to create subtasks is enabled in Jira, a subtask. A transition may be applied, to move the issue or subtask to a workflow step other than the default start step, and issue properties set. - - The content of the issue or subtask is defined using `update` and `fields`. The fields that can be set in the issue or subtask are determined using the [ Get create issue metadata](#api-rest-api-3-issue-createmeta-get). These are the same fields that appear on the issue's create screen. Note that the `description`, `environment`, and any `textarea` type custom fields (multi-line text fields) take Atlassian Document Format content. Single line custom fields (`textfield`) accept a string and don't handle Atlassian Document Format content. - - Creating a subtask differs from creating an issue as follows: - - * `issueType` must be set to a subtask issue type (use [ Get create issue metadata](#api-rest-api-3-issue-createmeta-get) to find subtask issue types). - * `parent` must contain the ID or key of the parent issue. - - In a next-gen project any issue may be made a child providing that the parent and child are members of the same project. - - **[Permissions](#permissions) required:** *Browse projects* and *Create issues* [project permissions](https://confluence.atlassian.com/x/yodKLg) for the project in which the issue or subtask is created. - operationId: createIssue - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/IssueUpdateDetails' - example: - fields: - assignee: - id: 5b109f2e9729b51b54dc274d - components: - - id: "10000" - customfield_10000: 09/Jun/19 - customfield_20000: 06/Jul/19 3:25 PM - customfield_30000: - - "10000" - - "10002" - customfield_40000: - content: - - content: - - text: Occurs on all orders - type: text - type: paragraph - type: doc - version: 1 - customfield_50000: - content: - - content: - - text: Could impact day-to-day work. - type: text - type: paragraph - type: doc - version: 1 - customfield_60000: jira-software-users - customfield_70000: - - jira-administrators - - jira-software-users - customfield_80000: - value: red - description: - content: - - content: - - text: Order entry fails when selecting supplier. - type: text - type: paragraph - type: doc - version: 1 - duedate: 2019-05-11 - environment: - content: - - content: - - text: UAT - type: text - type: paragraph - type: doc - version: 1 - fixVersions: - - id: "10001" - issuetype: - id: "10000" - labels: - - bugfix - - blitz_test - parent: - key: PROJ-123 - priority: - id: "20000" - project: - id: "10000" - reporter: - id: 5b10a2844c20165700ede21g - security: - id: "10000" - summary: Main order flow broken - timetracking: - originalEstimate: "10" - remainingEstimate: "5" - versions: - - id: "10000" - update: { } - required: true - responses: - "201": - description: Returned if the request is successful. - content: - application/json: - schema: - $ref: '#/components/schemas/CreatedIssue' - example: "{\"id\":\"10000\",\"key\":\"ED-24\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/10000\",\"transition\":{\"status\":200,\"errorCollection\":{\"errorMessages\":[],\"errors\":{}}}}" - "400": - description: |- - Returned if the request: - - * is missing required fields. - * contains invalid field values. - * contains fields that cannot be set for the issue type. - * is by a user who does not have the necessary permission. - * is to create a subtype in a project different that of the parent issue. - * is for a subtask when the option to create subtasks is disabled. - * is invalid for any other reason. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorCollection' - example: "{\"errorMessages\":[\"Field 'priority' is required\"],\"errors\":{}}" - "401": - description: Returned if the authentication credentials are incorrect or missing. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorCollection' - "403": - description: Returned if the user does not have the necessary permission. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorCollection' - deprecated: false - security: - - basicAuth: [ ] - - /rest/api/3/issue/{issueIdOrKey}: - get: - tags: - - Issues - summary: Get issue - description: |- - Returns the details for an issue. - - The issue is identified by its ID or key, however, if the identifier doesn't match an issue, a case-insensitive search and check for moved issues is performed. If a matching issue is found its details are returned, a 302 or other redirect is **not** returned. The issue key returned in the response is the key of the issue found. - - This operation can be accessed anonymously. - - **[Permissions](#permissions) required:** - - * *Browse projects* [project permission](https://confluence.atlassian.com/x/yodKLg) for the project that the issue is in. - * If [issue-level security](https://confluence.atlassian.com/x/J4lKLg) is configured, issue-level security permission to view the issue. - operationId: getIssue - parameters: - - name: issueIdOrKey - in: path - description: The ID or key of the issue. - required: true - style: simple - explode: false - schema: - type: string - - name: fields - in: query - description: The ID or key of the issue. - required: false - schema: - type: string - default: status - - name: fieldsByKeys - in: query - description: Whether fields in `fields` are referenced by keys rather than IDs. This parameter is useful where fields have been added by a connect app and a field's key may differ from its ID. - required: false - style: form - explode: true - schema: - type: boolean - default: false - - name: expand - in: query - description: |- - Use [expand](#expansion) to include additional information about the issues in the response. This parameter accepts a comma-separated list. Expand options include: - - * `renderedFields` Returns field values rendered in HTML format. - * `names` Returns the display name of each field. - * `schema` Returns the schema describing a field type. - * `transitions` Returns all possible transitions for the issue. - * `editmeta` Returns information about how each field can be edited. - * `changelog` Returns a list of recent updates to an issue, sorted by date, starting from the most recent. - * `versionedRepresentations` Returns a JSON array for each version of a field's value, with the highest number representing the most recent version. Note: When included in the request, the `fields` parameter is ignored. - required: false - style: form - explode: true - schema: - type: string - - name: updateHistory - in: query - description: "Whether the project in which the issue is created is added to the user's **Recently viewed** project list, as shown under **Projects** in Jira. This also populates the [JQL issues search](#api-rest-api-3-search-get) `lastViewed` field." - required: false - style: form - explode: true - schema: - type: boolean - default: false - responses: - "200": - description: Returned if the request is successful. - content: - application/json: - schema: - $ref: '#/components/schemas/IssueBean' - example: "{\"id\":\"10002\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/10002\",\"key\":\"ED-1\",\"fields\":{\"watcher\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/EX-1/watchers\",\"isWatching\":false,\"watchCount\":1,\"watchers\":[{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false}]},\"attachment\":[{\"id\":10000,\"self\":\"https://your-domain.atlassian.net/rest/api/3/attachments/10000\",\"filename\":\"picture.jpg\",\"author\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"key\":\"\",\"accountId\":\"5b10a2844c20165700ede21g\",\"accountType\":\"atlassian\",\"name\":\"\",\"avatarUrls\":{\"48x48\":\"https://avatar-management--avatars.server-location.prod.public.atl-paas.net/initials/MK-5.png?size=48&s=48\",\"24x24\":\"https://avatar-management--avatars.server-location.prod.public.atl-paas.net/initials/MK-5.png?size=24&s=24\",\"16x16\":\"https://avatar-management--avatars.server-location.prod.public.atl-paas.net/initials/MK-5.png?size=16&s=16\",\"32x32\":\"https://avatar-management--avatars.server-location.prod.public.atl-paas.net/initials/MK-5.png?size=32&s=32\"},\"displayName\":\"Mia Krystof\",\"active\":false},\"created\":\"2023-06-06T06:40:34.248+0000\",\"size\":23123,\"mimeType\":\"image/jpeg\",\"content\":\"https://your-domain.atlassian.net/jira/rest/api/3/attachment/content/10000\",\"thumbnail\":\"https://your-domain.atlassian.net/jira/rest/api/3/attachment/thumbnail/10000\"}],\"sub-tasks\":[{\"id\":\"10000\",\"type\":{\"id\":\"10000\",\"name\":\"\",\"inward\":\"Parent\",\"outward\":\"Sub-task\"},\"outwardIssue\":{\"id\":\"10003\",\"key\":\"ED-2\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/ED-2\",\"fields\":{\"status\":{\"iconUrl\":\"https://your-domain.atlassian.net/images/icons/statuses/open.png\",\"name\":\"Open\"}}}}],\"description\":{\"type\":\"doc\",\"version\":1,\"content\":[{\"type\":\"paragraph\",\"content\":[{\"type\":\"text\",\"text\":\"Main order flow broken\"}]}]},\"project\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/project/EX\",\"id\":\"10000\",\"key\":\"EX\",\"name\":\"Example\",\"avatarUrls\":{\"48x48\":\"https://your-domain.atlassian.net/secure/projectavatar?size=large&pid=10000\",\"24x24\":\"https://your-domain.atlassian.net/secure/projectavatar?size=small&pid=10000\",\"16x16\":\"https://your-domain.atlassian.net/secure/projectavatar?size=xsmall&pid=10000\",\"32x32\":\"https://your-domain.atlassian.net/secure/projectavatar?size=medium&pid=10000\"},\"projectCategory\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/projectCategory/10000\",\"id\":\"10000\",\"name\":\"FIRST\",\"description\":\"First Project Category\"},\"simplified\":false,\"style\":\"classic\",\"insight\":{\"totalIssueCount\":100,\"lastIssueUpdateTime\":\"2023-06-06T06:40:28.659+0000\"}},\"comment\":[{\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/10010/comment/10000\",\"id\":\"10000\",\"author\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false},\"body\":{\"type\":\"doc\",\"version\":1,\"content\":[{\"type\":\"paragraph\",\"content\":[{\"type\":\"text\",\"text\":\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eget venenatis elit. Duis eu justo eget augue iaculis fermentum. Sed semper quam laoreet nisi egestas at posuere augue semper.\"}]}]},\"updateAuthor\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false},\"created\":\"2021-01-17T12:34:00.000+0000\",\"updated\":\"2021-01-18T23:45:00.000+0000\",\"visibility\":{\"type\":\"role\",\"value\":\"Administrators\",\"identifier\":\"Administrators\"}}],\"issuelinks\":[{\"id\":\"10001\",\"type\":{\"id\":\"10000\",\"name\":\"Dependent\",\"inward\":\"depends on\",\"outward\":\"is depended by\"},\"outwardIssue\":{\"id\":\"10004L\",\"key\":\"PR-2\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/PR-2\",\"fields\":{\"status\":{\"iconUrl\":\"https://your-domain.atlassian.net/images/icons/statuses/open.png\",\"name\":\"Open\"}}}},{\"id\":\"10002\",\"type\":{\"id\":\"10000\",\"name\":\"Dependent\",\"inward\":\"depends on\",\"outward\":\"is depended by\"},\"inwardIssue\":{\"id\":\"10004\",\"key\":\"PR-3\",\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/PR-3\",\"fields\":{\"status\":{\"iconUrl\":\"https://your-domain.atlassian.net/images/icons/statuses/open.png\",\"name\":\"Open\"}}}}],\"worklog\":[{\"self\":\"https://your-domain.atlassian.net/rest/api/3/issue/10010/worklog/10000\",\"author\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false},\"updateAuthor\":{\"self\":\"https://your-domain.atlassian.net/rest/api/3/user?accountId=5b10a2844c20165700ede21g\",\"accountId\":\"5b10a2844c20165700ede21g\",\"displayName\":\"Mia Krystof\",\"active\":false},\"comment\":{\"type\":\"doc\",\"version\":1,\"content\":[{\"type\":\"paragraph\",\"content\":[{\"type\":\"text\",\"text\":\"I did some work here.\"}]}]},\"updated\":\"2021-01-18T23:45:00.000+0000\",\"visibility\":{\"type\":\"group\",\"value\":\"jira-developers\",\"identifier\":\"276f955c-63d7-42c8-9520-92d01dca0625\"},\"started\":\"2021-01-17T12:34:00.000+0000\",\"timeSpent\":\"3h 20m\",\"timeSpentSeconds\":12000,\"id\":\"100028\",\"issueId\":\"10002\"}],\"updated\":1,\"timetracking\":{\"originalEstimate\":\"10m\",\"remainingEstimate\":\"3m\",\"timeSpent\":\"6m\",\"originalEstimateSeconds\":600,\"remainingEstimateSeconds\":200,\"timeSpentSeconds\":400}}}" - "401": - description: Returned if the authentication credentials are incorrect or missing. - "404": - description: Returned if the issue is not found or the user does not have permission to view it. - deprecated: false - security: - - basicAuth: [ ] - -components: - schemas: - CreatedIssue: - type: object - properties: - id: - type: string - description: The ID of the created issue or subtask. - readOnly: true - key: - type: string - description: The key of the created issue or subtask. - readOnly: true - self: - type: string - description: The URL of the created issue or subtask. - readOnly: true - additionalProperties: false - description: Details about a created issue or subtask. - EntityProperty: - type: object - properties: - key: - type: string - description: The key of the property. Required on create and update. - value: - description: The value of the property. Required on create and update. - additionalProperties: false - description: >- - An entity property, for more information see [Entity - properties](https://developer.atlassian.com/cloud/jira/platform/jira-entity-properties/). - ErrorCollection: - type: object - properties: - errorMessages: - type: array - description: "The list of error messages produced by this operation. For example, \"input parameter 'key' must be provided\"" - items: - type: string - errors: - type: object - additionalProperties: - type: string - description: "The list of errors by parameter returned by the operation. For example,\"projectKey\": \"Project keys must start with an uppercase letter, followed by one or more uppercase alphanumeric characters.\"" - status: - type: integer - format: int32 - additionalProperties: false - description: Error messages from an operation. - IssueBean: - type: object - properties: - expand: - type: string - description: Expand options that include additional issue details in the response. - readOnly: true - xml: - attribute: true - fields: - type: object - additionalProperties: { } - id: - type: string - description: The ID of the issue. - readOnly: true - key: - type: string - description: The key of the issue. - readOnly: true - - additionalProperties: false - description: Details about an issue. - xml: - name: issue - - IssueUpdateDetails: - type: object - properties: - fields: - type: object - additionalProperties: { } - description: "List of issue screen fields to update, specifying the sub-field to update and its value for each field. This field provides a straightforward option when setting a sub-field. When multiple sub-fields or other operations are required, use `update`. Fields included in here cannot be included in `update`." - properties: - type: array - description: Details of issue properties to be add or update. - items: - $ref: '#/components/schemas/EntityProperty' - securitySchemes: - basicAuth: - type: http - description: You can access this resource via basic auth. - scheme: basic diff --git a/escalation/src/main/resources/specs/kube.yaml b/escalation/src/main/resources/specs/kube.yaml deleted file mode 100644 index 5418e59..0000000 --- a/escalation/src/main/resources/specs/kube.yaml +++ /dev/null @@ -1,516 +0,0 @@ - openapi: 3.0.0 - info: - title: Kubernetes - version: v1.27.3 - paths: - - /api/v1/namespaces: - get: - tags: - - core_v1 - description: list or watch objects of kind Namespace - operationId: listCoreV1Namespace - parameters: - - name: allowWatchBookmarks - in: query - description: allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. - schema: - type: boolean - uniqueItems: true - - name: continue - in: query - description: |- - The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - - This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. - schema: - type: string - uniqueItems: true - - name: fieldSelector - in: query - description: A selector to restrict the list of returned objects by their fields. Defaults to everything. - schema: - type: string - uniqueItems: true - - name: labelSelector - in: query - description: A selector to restrict the list of returned objects by their labels. Defaults to everything. - schema: - type: string - uniqueItems: true - - name: limit - in: query - description: |- - limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - - The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. - schema: - type: integer - uniqueItems: true - - name: resourceVersion - in: query - description: |- - resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - - Defaults to unset - schema: - type: string - uniqueItems: true - - name: resourceVersionMatch - in: query - description: |- - resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - - Defaults to unset - schema: - type: string - uniqueItems: true - - name: sendInitialEvents - in: query - description: |- - `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic "Bookmark" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. - - When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan - is interpreted as "data at least as new as the provided `resourceVersion`" - and the bookmark event is send when the state is synced - to a `resourceVersion` at least as fresh as the one provided by the ListOptions. - If `resourceVersion` is unset, this is interpreted as "consistent read" and the - bookmark event is send when the state is synced at least to the moment - when request started being processed. - - `resourceVersionMatch` set to any other value or unset - Invalid error is returned. - - Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward compatibility reasons) and to false otherwise. - schema: - type: boolean - uniqueItems: true - - name: timeoutSeconds - in: query - description: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. - schema: - type: integer - uniqueItems: true - - name: watch - in: query - description: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. - schema: - type: boolean - uniqueItems: true - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - application/json;stream=watch: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - application/vnd.kubernetes.protobuf: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - application/vnd.kubernetes.protobuf;stream=watch: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - application/yaml: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceList' - "401": - description: Unauthorized - x-kubernetes-action: list - x-kubernetes-group-version-kind: - group: "" - version: v1 - kind: Namespace - security: - - BearerToken: [] - post: - tags: - - core_v1 - description: create a Namespace - operationId: createCoreV1Namespace - parameters: - - name: dryRun - in: query - description: 'When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed' - schema: - type: string - uniqueItems: true - - name: fieldManager - in: query - description: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. - schema: - type: string - uniqueItems: true - - name: fieldValidation - in: query - description: 'fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.' - schema: - type: string - uniqueItems: true - requestBody: - content: - '*/*': - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/vnd.kubernetes.protobuf: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/yaml: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - "201": - description: Created - content: - application/json: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/vnd.kubernetes.protobuf: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/yaml: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - "202": - description: Accepted - content: - application/json: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/vnd.kubernetes.protobuf: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - application/yaml: - schema: - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - "401": - description: Unauthorized - x-kubernetes-action: post - x-kubernetes-group-version-kind: - group: "" - version: v1 - kind: Namespace - security: - - BearerToken: [ ] - parameters: - - name: pretty - in: query - description: If 'true', then the output is pretty printed. - schema: - type: string - uniqueItems: true - - components: - schemas: - io.k8s.apimachinery.pkg.apis.meta.v1.Time: - description: Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. - type: string - format: date-time - io.k8s.api.core.v1.Namespace: - description: Namespace provides a scope for Names. Use of multiple namespaces is optional. - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta' - spec: - description: 'Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceSpec' - status: - description: 'Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceStatus' - x-kubernetes-group-version-kind: - - group: "" - kind: Namespace - version: v1 - io.k8s.api.core.v1.NamespaceCondition: - description: NamespaceCondition contains details about state of namespace. - type: object - required: - - type - - status - properties: - lastTransitionTime: - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time' - message: - type: string - reason: - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - default: "" - type: - description: Type of namespace controller condition. - type: string - default: "" - io.k8s.api.core.v1.NamespaceList: - description: NamespaceList is a list of Namespaces. - type: object - required: - - items - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - items: - description: 'Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: array - items: - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.api.core.v1.Namespace' - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta' - x-kubernetes-group-version-kind: - - group: "" - kind: NamespaceList - version: v1 - io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta: - description: ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}. - type: object - properties: - continue: - description: continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. - type: string - remainingItemCount: - description: remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact. - type: integer - format: int64 - resourceVersion: - description: 'String that identifies the server''s internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - selfLink: - description: 'Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.' - type: string - io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry: - description: ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to. - type: object - properties: - apiVersion: - description: APIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted. - type: string - fieldsType: - description: 'FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1"' - type: string - fieldsV1: - description: FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1' - manager: - description: Manager is an identifier of the workflow managing these fields. - type: string - operation: - description: Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'. - type: string - subresource: - description: Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource. - type: string - time: - description: Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over. - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time' - io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1: - description: |- - FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. - - Each key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set. - - The exact format is defined in sigs.k8s.io/structured-merge-diff - type: object - - io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta: - description: ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. - type: object - properties: - annotations: - description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations' - type: object - additionalProperties: - type: string - default: "" - creationTimestamp: - description: |- - CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. - - Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time' - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. - type: integer - format: int64 - deletionTimestamp: - description: |- - DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. - - Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time' - finalizers: - description: Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. - type: array - items: - type: string - default: "" - x-kubernetes-patch-strategy: merge - generateName: - description: |- - GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. - - If this field is specified and the generated name exists, the server will return a 409. - - Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency - type: string - generation: - description: A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. - type: integer - format: int64 - labels: - description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels' - type: object - additionalProperties: - type: string - default: "" - managedFields: - description: ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object. - type: array - items: - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry' - name: - description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' - type: string - namespace: - description: |- - Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. - - Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces - type: string - ownerReferences: - description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. - type: array - items: - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference' - x-kubernetes-patch-merge-key: uid - x-kubernetes-patch-strategy: merge - resourceVersion: - description: |- - An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. - - Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - selfLink: - description: 'Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.' - type: string - uid: - description: |- - UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. - - Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids - type: string - - io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference: - description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. - type: object - required: - - apiVersion - - kind - - name - - uid - properties: - apiVersion: - description: API version of the referent. - type: string - default: "" - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the managing controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - default: "" - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' - type: string - default: "" - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' - type: string - default: "" - x-kubernetes-map-type: atomic - - io.k8s.api.core.v1.NamespaceSpec: - description: NamespaceSpec describes the attributes on a Namespace. - type: object - properties: - finalizers: - description: 'Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/' - type: array - items: - type: string - default: "" - io.k8s.api.core.v1.NamespaceStatus: - description: NamespaceStatus is information about the current status of a Namespace. - type: object - properties: - conditions: - description: Represents the latest available observations of a namespace's current state. - type: array - items: - default: { } - allOf: - - $ref: '#/components/schemas/io.k8s.api.core.v1.NamespaceCondition' - x-kubernetes-patch-merge-key: type - x-kubernetes-patch-strategy: merge - phase: - description: |- - Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ - - Possible enum values: - - `"Active"` means the namespace is available for use in the system - - `"Terminating"` means the namespace is undergoing graceful termination - type: string - enum: - - Active - - Terminating - - securitySchemes: - BearerToken: - type: http - scheme: bearer - description: Bearer Token authentication diff --git a/escalation/src/main/resources/specs/mailtrap.yaml b/escalation/src/main/resources/specs/mailtrap.yaml deleted file mode 100644 index 79e4ba6..0000000 --- a/escalation/src/main/resources/specs/mailtrap.yaml +++ /dev/null @@ -1,79 +0,0 @@ -openapi: 3.0.3 -info: - title: The MailTrap API (https://api-docs.mailtrap.io/) - version: v2 -servers: - - url: "https://sandbox.api.mailtrap.io" -paths: - /api/send/{inbox_id}: - post: - summary: Send email - operationId: sendEmail - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SendMailDetails' - required: true - parameters: - - name: inbox_id - in: path - required: true - schema: - type: integer - responses: - "200": - description: all good - content: - application/json: - schema: - $ref: '#/components/schemas/SendMailResponse' - security: - - apiToken: [] -components: - schemas: - SendMailDetails: - type: object - properties: - to: - type: array - items: - type: object - properties: - email: - type: string - name: - type: string - from: - type: object - properties: - email: - type: string - name: - type: string - subject: - type: string - readOnly: true - html: - type: string - readOnly: true - text: - type: string - readOnly: true - additionalProperties: false - SendMailResponse: - type: object - properties: - success: - type: boolean - readOnly: true - message_ids: - type: array - items: - type: string - additionalProperties: false - securitySchemes: - apiToken: - type: apiKey - in: header - name: Api-Token diff --git a/escalation/src/main/resources/ticket-escalation-schema.json b/escalation/src/main/resources/ticket-escalation-schema.json deleted file mode 100644 index 5d91d47..0000000 --- a/escalation/src/main/resources/ticket-escalation-schema.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "namespace": { - "type": "string", - "description": "Name of the requested namespace" - }, - "manager": { - "type": "string", - "format": "email", - "description": "Email address of the escalation manager" - } - }, - "required": [ - "namespace", - "manager" - ] -} \ No newline at end of file diff --git a/escalation/src/main/resources/ticketEscalation.svg b/escalation/src/main/resources/ticketEscalation.svg deleted file mode 100644 index a86f477..0000000 --- a/escalation/src/main/resources/ticketEscalation.svg +++ /dev/null @@ -1 +0,0 @@ -StartCreateJiraIssueInitJiraBrowserInitTimerGetJiraIssueGetJiraIssueResultTicketDoneEscalateCreateK8sNamespa ce End(.jiraIssue.f... (.jiraIssue.f... (.jiraIssue.f... (.jiraIssue.f... \ No newline at end of file diff --git a/escalation/src/main/resources/ticketEscalation.sw.yaml b/escalation/src/main/resources/ticketEscalation.sw.yaml deleted file mode 100644 index 826024a..0000000 --- a/escalation/src/main/resources/ticketEscalation.sw.yaml +++ /dev/null @@ -1,123 +0,0 @@ -specVersion: "0.8" -id: ticket-escalation -name: Ticket escalation -annotations: - - "workflow-type/infrastructure" -version: 0.0.1 -timeouts: - workflowExecTimeout: PT24H -start: CreateJiraIssue -dataInputSchema: ticket-escalation-schema.json -functions: - - name: sendEmail - operation: 'specs/mailtrap.yaml#sendEmail' - - name: getJiraIssue - operation: specs/jira.yaml#getIssue - - name: createJiraIssue - operation: specs/jira.yaml#createIssue - - name: createK8sNamespace - operation: specs/kube.yaml#createCoreV1Namespace - - name: logInfo - type: custom - operation: "sysout:INFO" - -states: - - name: CreateJiraIssue - type: operation - actions: - - functionRef: - refName: createJiraIssue - arguments: - update: {} - fields: - summary: '"Request For New Namespace: " + .namespace' - issuetype: - id: "$SECRET.jira_issue_type" - project: - key: "$SECRET.jira_project" - actionDataFilter: - toStateData: .jiraIssue - transition: InitJiraBrowser - - name: InitJiraBrowser - type: operation - actions: [] - transition: InitTimer - stateDataFilter: - output: ". += { jiraBrowser: ((.jiraIssue.self | sub(\"rest/.*\"; \"browse/\")) + .jiraIssue.key) }" - - name: InitTimer - type: operation - actions: [] - stateDataFilter: - output: ". += { timer: { triggered: false, startTime: now(), elapsedSeconds: 0 }}" - transition: GetJiraIssue - - name: GetJiraIssue - type: operation - actions: - - functionRef: - refName: getJiraIssue - arguments: - issueIdOrKey: .jiraIssue.key - fields: status - actionDataFilter: - toStateData: .jiraIssue - sleep: - before: PT5S - # This is not working for now, waiting for a fix to https://issues.redhat.com/browse/KOGITO-9811 - # before: $SECRET.polling_periodicity - transition: UpdateTimer - - name: UpdateTimer - type: operation - actions: [] - transition: TicketDone - stateDataFilter: - output: ". += { timer: { triggered: .timer.triggered, startTime: .timer.startTime, elapsedSeconds: now() - .timer.startTime }}" - - name: TicketDone - type: switch - dataConditions: - - condition: (.jiraIssue.fields.status.statusCategory.key == "done") - transition: - nextState: CreateK8sNamespace - - condition: (.jiraIssue.fields.status.statusCategory.key != "done" and .timer.triggered == false and .timer.elapsedSeconds > ($SECRET.timeout_seconds | tonumber)) - transition: - nextState: Escalate - defaultCondition: - transition: GetJiraIssue - - name: Escalate - type: operation - actions: - - name: "printAction" - functionRef: - refName: "logInfo" - arguments: - message: "\"Escalate is \\(.)\"" - - name: "sendEmail" - functionRef: - refName: sendEmail - arguments: - inbox_id: $SECRET.mailtrap_inbox_id | tonumber - to: - - email: .manager - name: "Escalation Manager" - from: - email: $SECRET.sender_email - name: "Escalation service" - subject: " \"Escalation ticket \" + .jiraIssue.key " - html: '"Please manage escalation ticket " + .jiraIssue.key + ""' - transition: GetJiraIssue - stateDataFilter: - output: ". += { timer: { triggered: true, startTime: .timer.startTime, elapsedSeconds: 0 }}" - - name: CreateK8sNamespace - type: operation - actions: - - functionRef: - refName: createK8sNamespace - arguments: - apiVersion: v1 - kind: Namespace - metadata: - name: ".namespace" - actionDataFilter: - toStateData: .createdNamespace - stateDataFilter: - output: "{createdNamespace: .createdNamespace}" - end: true diff --git a/m2k/README.md b/m2k/README.md deleted file mode 100644 index 933b7ab..0000000 --- a/m2k/README.md +++ /dev/null @@ -1,270 +0,0 @@ -# m2k Project -Google doc version: https://docs.google.com/document/d/1lN8KT5u9vYag4N2DBg3a_G0bKarunM4k5d-CmX2GzNk/edit -## Context -This workflow is using https://move2kube.konveyor.io/ to migrate the existing code contained in a git repository to a K8s/OCP platform. - -Once the transformation is over, move2kube provides a zip file containing the transformed repo. - -### Design diagram -![sequence_diagram.svg](sequence_diagram.jpg) -![design.svg](design.svg) - -### Workflow -![m2k.svg](serverless-workflow-m2k/src/main/resources/m2k.svg) -## Install -### Data Index and Jobs Service -Follow the [README.md](..%2Fdeployment%2Fkustomize%2FREADME.md) - -## Breakdown -1. (prequisites) Create local K8s cluster with Knative support: https://knative.dev/docs/install/quickstart-install/#install-the-knative-cli -2. Install Move2Kube on local K8s cluster: https://artifacthub.io/packages/helm/move2kube/move2kube/0.3.0?modal=install -3. Run Backstage with notification plugin -4. Deploy generated files: Knative broker, triggers and sonataflow workflow service -4. Deploy Knative functions - -### Deploy -First, let's create our namespace -```bash -kubectl create ns m2k -``` -Should output -``` -namespace/m2k created -``` - -#### 1. Move2Kube -move2kube needs to have the ssh keys in the `.ssh` folder in order to be able to clone git repository using ssh: -```bash -kubectl create secret generic sshkeys --from-file=id_rsa=${HOME}/.ssh/id_rsa --from-file=id_rsa.pub=${HOME}/.ssh/id_rsa.pub -``` -To run properly, a move2kube instance must be running in the cluster, or at least reachable from the cluster: -```bash -kubectl apply -f k8s/move2kube.yaml -``` -Should output -``` -deployment.apps/move2kube created -service/move2kube-svc created -``` - -You can access it locally with port-forward: -```bash -kubectl port-forward svc/move2kube-svc 8080:8080 & -``` - -By default, the Knative function will use `http://move2kube-svc.default.svc.cluster.local:8080/api/v1` as host to reach the move2kube instance. -You can override this value by setting environment variable `MOVE2KUBE_API` -#### 2. Backstage with notification plugin -The workflow is sending notification to backstage so you need a running instance with the notification plugin. - -To do so, clone this repo https://github.com/mareklibra/janus-idp-backstage-plugins and checkout the branch `flpath560`. -Then run -```bash -yarn start:backstage -``` -Wait a few minutes for everything to be ready then check if the notification plugin is working: -```bash -curl -XGET http://localhost:7007/api/notifications/notifications&user= -``` -You should get en empty notifications list: -``` -[] -``` - -#### 3. Generated files -First, make sure the environment and minikube are correctly configured to the `knative` profile: -```bash -minikube profile knative -``` -Should output -``` -✅ minikube profile was successfully set to knative -``` -Then -```bash -eval $(minikube docker-env) -``` - -We need to use `initContainers` in our Knative services, we have to tell Knative to enable that feature: -```bash - kubectl patch configmap/config-features \ - -n knative-serving \ - --type merge \ - -p '{"data":{kubernetes.podspec-init-containers: "enabled"}}' - ``` - -Then generate the `broker` (and other workflow related Knative resources) by running the following command from `m2k/serverless-workflow-m2k`: -```bash -cd serverless-workflow-m2k -mvn clean package -Pknative -``` -Now you can apply the generated manifest located in `serverless-workflow-m2k/target/kubernetes` (you should still be in the `serverless-workflow-m2k` directory): - -First let's create the Knative service, service account and role bindings needed for the workflow -```bash -kubectl -n m2k apply -f serverless-workflow-m2k/target/kubernetes/knative.yml -``` -Should output -``` -Warning: Kubernetes default value is insecure, Knative may default this to secure in a future release: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation, spec.template.spec.containers[0].securityContext.capabilities, spec.template.spec.containers[0].securityContext.runAsNonRoot, spec.template.spec.containers[0].securityContext.seccompProfile -service.serving.knative.dev/serverless-workflow-m2k created -rolebinding.rbac.authorization.k8s.io/serverless-workflow-m2k-view created -serviceaccount/serverless-workflow-m2k created -``` -And then the broker and the triggers -```bash -kubectl -n m2k apply -f serverless-workflow-m2k/target/kubernetes/kogito.yml -``` -Should output -``` -trigger.eventing.knative.dev/error-event-type-trigger-serverless-workflow-m2k created -trigger.eventing.knative.dev/transformation-saved-event-type-trigger-serverless-workflow-m2k created -broker.eventing.knative.dev/default created -``` - -You also need to fill the environment variables needed: - -/!\ You may need to change those value, especially the `image` /!\ -```bash -kubectl -n m2k patch ksvc serverless-workflow-m2k --type merge -p '{ - "spec":{ - "template":{ - "spec":{ - "containers":[ - { - "name":"serverless-workflow-m2k", - "imagePullPolicy": "Always", - "image":"quay.io/orchestrator/serverless-workflow-m2k:2.0.0-SNAPSHOT", - "env":[ - { - "name":"MOVE2KUBE_URL", - "value":"http://move2kube-svc.default.svc.cluster.local:8080" - }, - { - "name":"BROKER_URL", - "value":"http://broker-ingress.knative-eventing.svc.cluster.local/m2k/default" - } - ] - } - ] - } - } - } -}' -``` -This will create a new deployment, you need to scale the previous one to 0: -```bash - kubectl -n m2k scale deployment --replicas=0 serverless-workflow-m2k-00001-deployment -``` - -And the docker image shall be generated as well: -```bash -docker images | grep serverless -``` -Should output -``` -quay.io/orchestrator/serverless-workflow-m2k cd2e0498ee70 4 minutes ago 487MB -``` -#### 4. M2K Knative functions and GC -* [m2k-service.yaml](k8s/m2k-service.yaml) will deploy the Knative service that will spin-up the functions when an event is received -* [m2k-trigger.yaml](k8s/m2k-trigger.yaml) will deploy the triggers related to the expected event and to which the kservice subscribes and rely on to get started -* [knative-gc.yaml](k8s%2Fknative-gc.yaml) will setup the GC to keep only 3 revisions in the cluster - -As we are using ssh keys to interact with the git repo (ie: bitbucket), similarly to what we have done when deploying the `move2kube` instance, we need to create secrets in the `m2k` namespace containing the keys: -```bash -kubectl create -n m2k secret generic sshkeys --from-file=id_rsa=${HOME}/.ssh/id_rsa --from-file=id_rsa.pub=${HOME}/.ssh/id_rsa.pub -``` -* From the root folder of the project, first create the Knative services: -```bash -kubectl -n m2k apply -f k8s/m2k-service.yaml -``` -Should output -``` -service.serving.knative.dev/m2k-save-transformation-func created -``` -Next, the Knative Garbage Collector: -```bash -kubectl apply -f k8s/knative-gc.yaml -``` -Should output -``` -configmap/config-gc configured -``` -Finally the triggers -```bash -kubectl -n m2k apply -f k8s/m2k-trigger.yaml -``` -Should output -``` -trigger.eventing.knative.dev/m2k-save-transformation-event created -``` -You will notice that the environment variable `EXPORTED_FUNC` is set for each Knative service: this variable defines which function is expose in the service. - -You should have something similar to: -```bash -kubectl -n m2k get ksvc -``` -``` -NAME URL LATESTCREATED LATESTREADY READY REASON -m2k-save-transformation-func http://m2k-save-transformation-func.m2k.10.110.165.153.sslip.io m2k-save-transformation-func-v1 m2k-save-transformation-func-v1 True -serverless-workflow-m2k http://serverless-workflow-m2k.m2k.10.110.165.153.sslip.io serverless-workflow-m2k-00002 serverless-workflow-m2k-00002 True -``` -### Use it -You should be sending the following request from withing the K8s cluster, to do so, you can for instance run: -```bash -kubectl run fedora --rm --image=fedora -i --tty -- bash -``` - -Request an execution by sending the following request: -```bash -curl -X POST -H 'Content-Type: application/json' serverless-workflow-m2k.m2k.svc.cluster.local/m2k -d '{ -"repo": "https://bitbucket.org/", -"sourceBranch": "master", -"targetBranch": "mk2-swf", -"token": "", -"workspaceId": "816fea47-84e6-43b4-81c8-9a7462cf9e1e", -"projectId": "fc411095-4b3c-499e-8590-7ac09d89d5fc", -"notification": { - "user": "", - "group": "" -} -}' -``` -Reponse: -```json -{"id":"185fd483-e765-420d-91c6-5ff3fefa0b05","workflowdata":{}} -``` -Then you can monitor the Knative functions pods being created: -```bash -Every 2.0s: kubectl -n m2k get pods fedora: Fri Oct 13 11:33:22 2023 - -NAME READY STATUS RESTARTS AGE -m2k-save-transformation-func-v1-deployment-545dc45cfc-rsdls 2/2 Running 0 23s -serverless-workflow-m2k-00002-deployment-58fb774d6c-xxwg2 2/2 Running 0 55s -``` - -Then you can check your Move2Kube instance and BitBucket repo to see the outcomes. - -After ~2min the serverless deployment will be scaled down if there is no activity. - -If the workflow was waiting for an event, once the event is triggered and received, the deployment will be scaled up and the workflow will resume its execution at the waiting state, processing the received event. - -If the timeout expires while the workflow is down, as the jobs service is sending an event, the deployment will be scaled up and the timeout will be processed. - -#### Debug/Tips - -* By default, Knative services have a `imagePullPolicy` set to `IfNotPresent`. You can override that by setting `registries-skipping-tag-resolving: quay.io` in the configmap `config-deployment` located in the `knative-serving` namespace - ```bash - kubectl patch configmap/config-deployment \ - -n knative-serving \ - --type merge \ - -p '{"data":{"registries-skipping-tag-resolving":"quay.io"}}' - ``` -* You can use the Integration tests `SaveTransformationFunctionIT` to debug the code -* If there is a `SinkBinding` generated you need to patch it as the namespace of the broker is not correctly set: -```bash -kubectl patch SinkBinding/sb-serverless-workflow-m2k \ - -n m2k \ - --type merge \ - -p '{"spec": {"sink": {"ref": {"namespace": "m2k"}}}}' -``` diff --git a/m2k/design.svg b/m2k/design.svg deleted file mode 100644 index ec182ed..0000000 --- a/m2k/design.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/m2k/k8s/knative-gc.yaml b/m2k/k8s/knative-gc.yaml deleted file mode 100644 index 5bf0807..0000000 --- a/m2k/k8s/knative-gc.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: config-gc - namespace: knative-serving -data: - retain-since-create-time: "disabled" - retain-since-last-active-time: "disabled" - min-non-active-revisions: "0" - max-non-active-revisions: "3" diff --git a/m2k/k8s/m2k-service.yaml b/m2k/k8s/m2k-service.yaml deleted file mode 100644 index 19d891d..0000000 --- a/m2k/k8s/m2k-service.yaml +++ /dev/null @@ -1,54 +0,0 @@ -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: m2k-save-transformation-func -spec: - template: - metadata: - name: m2k-save-transformation-func-v1 - spec: - initContainers: - - name: volume-mount-hack - image: busybox - command: [ "sh", "-c", "cp /root/.ssh/id_rsa /etc/pre-install/. && chown 185 /etc/pre-install/id_rsa" ] - volumeMounts: - - name: ssh-priv-key - mountPath: "/root/.ssh/id_rsa" - subPath: id_rsa - readOnly: true - - name: pre-install - mountPath: /etc/pre-install - containers: - - image: quay.io/orchestrator/m2k-kfunc:2.0.0-SNAPSHOT - imagePullPolicy: Always - env: - - name: EXPORTED_FUNC - value: saveTransformation - - name: SSH_PRIV_KEY_PATH - value: /home/jboss/.ssh/id_rsa - name: user-container - volumeMounts: - - name: pre-install - readOnly: true - mountPath: "/home/jboss/.ssh/id_rsa" - subPath: id_rsa - - name: ssh-pub-key - readOnly: true - mountPath: "/home/jboss/.ssh/id_rsa.pub" - subPath: id_rsa.pub - - readinessProbe: - successThreshold: 1 - tcpSocket: - port: 0 - volumes: - - name: ssh-priv-key - secret: - secretName: sshkeys - defaultMode: 384 - - name: ssh-pub-key - secret: - secretName: sshkeys - - name: pre-install - emptyDir: { } - diff --git a/m2k/k8s/m2k-trigger.yaml b/m2k/k8s/m2k-trigger.yaml deleted file mode 100644 index babd8fe..0000000 --- a/m2k/k8s/m2k-trigger.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: eventing.knative.dev/v1 -kind: Trigger -metadata: - name: m2k-save-transformation-event -spec: - broker: default - filter: - attributes: - type: save-transformation - subscriber: - ref: - apiVersion: serving.knative.dev/v1 - kind: Service - name: m2k-save-transformation-func ---- \ No newline at end of file diff --git a/m2k/k8s/move2kube.yaml b/m2k/k8s/move2kube.yaml deleted file mode 100644 index 6414e47..0000000 --- a/m2k/k8s/move2kube.yaml +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: move2kube -spec: - selector: - matchLabels: - app: move2kube - template: - metadata: - labels: - app: move2kube - spec: - containers: - - name: move2kube - image: quay.io/orchestrator/move2kube-ui:latest - ports: - - containerPort: 8080 - env: - - name: SSH_AUTH_SOCK - value: /tmp/unix-socket - volumeMounts: - - name: ssh-priv-key - readOnly: true - mountPath: "/root/.ssh/id_rsa" - subPath: id_rsa - - name: ssh-pub-key - readOnly: true - mountPath: "/root/.ssh/id_rsa.pub" - subPath: id_rsa.pub - lifecycle: - postStart: - exec: - command: [ "/bin/sh", "-c", "ssh-agent -a /tmp/unix-socket && ssh-add" ] - volumes: - - name: ssh-priv-key - secret: - secretName: sshkeys - defaultMode: 384 - - name: ssh-pub-key - secret: - secretName: sshkeys ---- -apiVersion: v1 -kind: Service -metadata: - name: move2kube-svc -spec: - ports: - - port: 8080 - protocol: TCP - selector: - app: move2kube diff --git a/m2k/m2k-func/.dockerignore b/m2k/m2k-func/.dockerignore deleted file mode 100644 index 94810d0..0000000 --- a/m2k/m2k-func/.dockerignore +++ /dev/null @@ -1,5 +0,0 @@ -* -!target/*-runner -!target/*-runner.jar -!target/lib/* -!target/quarkus-app/* \ No newline at end of file diff --git a/m2k/m2k-func/.gitignore b/m2k/m2k-func/.gitignore deleted file mode 100644 index 073fbed..0000000 --- a/m2k/m2k-func/.gitignore +++ /dev/null @@ -1,40 +0,0 @@ -#Maven -target/ -pom.xml.tag -pom.xml.releaseBackup -pom.xml.versionsBackup -release.properties - -# Eclipse -.project -.classpath -.settings/ -bin/ - -# IntelliJ -.idea -*.ipr -*.iml -*.iws - -# NetBeans -nb-configuration.xml - -# Visual Studio Code -.vscode -.factorypath - -# OSX -.DS_Store - -# Vim -*.swp -*.swo - -# patch -*.orig -*.rej - -# Local environment -.env -.mvn \ No newline at end of file diff --git a/m2k/m2k-func/README.md b/m2k/m2k-func/README.md deleted file mode 100644 index 52e296f..0000000 --- a/m2k/m2k-func/README.md +++ /dev/null @@ -1,171 +0,0 @@ -# m2k-kfunc Project -This projects implements the Knative functions that will interact with Move2Kube instance and Github in order to prepare and save the transformations. - -* SaveTransformationOutput: - * Triggered by the event `save-transformation` - * This function will first retrieve the transformation output archive from the Move2Kube project - * Then it will create a new branch based on the provided input in the provided BitBucket repo - * Finally, it will un-archive the previously downloaded file, commit the change and push them to BitBucket using the token if provided, otherwise the ssh keys will be used - * Will send events: - * `transformation_saved` if success - * `error` if any error -## Integration tests -Those function will be tested by the integration tests of the whole project. But you can run manual ones! - -To run the integration test you need to be sure to have -1. A running move2kube instance: - -```bash -docker run --rm -it -p 8080:8080 quay.io/konveyor/move2kube-ui -``` -2. To have access to BitBucket - - -## Install -First, the `move2kube` API client has to be generated: -```bash -cd move2kubeAPI -make install -``` - -Then, run the following command to execute tests and install the project: -```bash -mvn clean install -``` -## Build image -To build the image, run: -```bash - docker build -t quay.io/orchestrator/m2k-kfunc:2.0.0-SNAPSHOT -f src/main/docker/Dockerfile.jvm . -``` - -## Run it -### Prerequisites -Make sure you have a local K8s cluster with Knative installed on it, see https://knative.dev/docs/install/quickstart-install/#run-the-knative-quickstart-plugin - -Make sure you have a running instance of move2kube reachable from the cluster: -```bash -kubectl run fedora --rm --image=fedora -i --tty -- bash - -curl -XGET -``` - -### Deploy Knative functions to cluster - -* [m2k-service.yaml](k8s/m2k-service.yaml) will deploy 2 kservices that will spin-up the functions when an event is received -* [m2k-trigger.yaml](k8s/m2k-trigger.yaml) will deploy the triggers in order to susbcribe to the events used by the Knative services - - -First create the roles: -```bash -kubectl apply -f k8s/m2k-role.yaml -``` -Should output -``` -role.rbac.authorization.k8s.io/service-discovery-role created -rolebinding.rbac.authorization.k8s.io/serverless-workflow-m2k-service-discovery-role created -rolebinding.rbac.authorization.k8s.io/service-discovery-rolebinding created -``` -Then the Knative services: -```bash -kubectl -n m2k apply -f k8s/m2k-service.yaml -``` -Should output -``` -service.serving.knative.dev/m2k-save-transformation-func created -``` -Finally the triggers -```bash -kubectl -n m2k apply -f k8s/m2k-trigger.yaml -``` -Should output -``` -trigger.eventing.knative.dev/m2k-save-transformation-event created -``` -You will notice that the environment variable `EXPORTED_FUNC` is set for each Knative service: this variable defines which function is expose in the service. - -To run properly, a move2kube instance must be running in the cluster, or at least reachable from the cluster: -```bash -kubectl apply -f k8s/move2kube.yaml -``` -Should output -``` -deployment.apps/move2kube created -service/move2kube-svc created -``` - -You can access it locally with port-forward: -```bash -kubectl port-forward svc/move2kube-svc 8080:8080 & -``` - -By default, the Knative function will use `http://move2kube-svc.default.svc.cluster.local:8080/api/v1` as host to reach the move2kube instance. -You can override this value by setting environment variable `MOVE2KUBE_API` - -You should have something similar to: -```bash -kubectl -n m2k get ksvc -``` -Should output -``` -NAME URL LATESTCREATED LATESTREADY READY REASON -m2k-save-transformation-func http://m2k-save-transformation-func.m2k.10.110.165.153.sslip.io m2k-save-transformation-func-v1 m2k-save-transformation-func-v1 True -``` -### Use it -You shall sent the following requests from within the K8s cluster, to do so, you could run: -```bash -kubectl run fedora --rm --image=fedora -i --tty -- bash -``` - -1. Go to `http:///` and create a new workspace and a new project inside this workspace. - -2. Create a plan by upload an archive (ie: zip file) containing a git repo (see https://move2kube.konveyor.io/tutorials/ui for more details) -3. Then start the transformation. -You should be asked to answer some questions, once this is done, the transformation output should be generated. - -4. To save a transformation output, send the following request from a place that can reach the broker deployed in the cluster: -```bash -curl -v "http://broker-ingress.knative-eventing.svc.cluster.local/m2k/default"\ - -X POST\ - -H "Ce-Id: 1234"\ - -H "Ce-Specversion: 1.0"\ - -H "Ce-Type: save-transformation"\ - -H "Ce-Source: curl"\ - -H "Content-Type: application/json"\ - -d '{"gitRepo": "", - "branch": "", - "token": "", - "workspaceId": "", - "projectId": "", - "transformId": "", - "workflowCallerId": "" - }' -``` -You should see a new pod created for the save transformation service: - -```bash -kubectl get pods -n m2k -``` -Should output -``` -NAME READY STATUS RESTARTS AGE -m2k-save-transformation-func-v1-deployment-76859dc76-h7856 2/2 Running 0 6s -``` - -After few minutes, the pods will automatically scale down if no new event is received. - -The URL `http://broker-ingress.knative-eventing.svc.cluster.local/m2k/default` is formatted as follow: `http://broker-ingress.knative-eventing.svc.cluster.local//`. If you were to change the namespace or the name of the broker, the URL should be updated accordingly. - -To get this URL, run -```bash -kubectl get broker -n m2k -``` -Should output -``` -NAME URL AGE READY REASON -default http://broker-ingress.knative-eventing.svc.cluster.local/m2k/default 107s True -``` - -## Related Guides - -- Funqy HTTP Binding ([guide](https://quarkus.io/guides/funqy-http)): HTTP Binding for Quarkus Funqy framework - diff --git a/m2k/m2k-func/move2kubeAPI/.gitignore b/m2k/m2k-func/move2kubeAPI/.gitignore deleted file mode 100644 index 4dac805..0000000 --- a/m2k/m2k-func/move2kubeAPI/.gitignore +++ /dev/null @@ -1 +0,0 @@ -java-client \ No newline at end of file diff --git a/m2k/m2k-func/move2kubeAPI/Makefile b/m2k/m2k-func/move2kubeAPI/Makefile deleted file mode 100644 index 1f37b5b..0000000 --- a/m2k/m2k-func/move2kubeAPI/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -USER=$(shell id -u $(shell whoami)) -GROUP=$(shell id -g $(shell whoami)) -VERSION="2.0.0" - -download-openapi: - curl https://raw.githubusercontent.com/konveyor/move2kube-api/main/assets/openapi.json -o openapi.json - -generate: download-openapi - rm -rf java-client - docker run --rm -u $(USER):$(GROUP) -v $(PWD):/tmp -e GENERATE_PERMISSIONS=true openapitools/openapi-generator-cli generate -i /tmp/openapi.json -g java -o /tmp/java-client --invoker-package dev.parodos.move2kube --model-package dev.parodos.move2kube.client.model --api-package dev.parodos.move2kube.api --group-id dev.parodos --artifact-id move2kube --artifact-version v$(VERSION) --library apache-httpclient - -install: download-openapi generate - cd java-client && mvn clean install diff --git a/m2k/m2k-func/move2kubeAPI/openapi.json b/m2k/m2k-func/move2kubeAPI/openapi.json deleted file mode 100644 index a4ecb3d..0000000 --- a/m2k/m2k-func/move2kubeAPI/openapi.json +++ /dev/null @@ -1,2614 +0,0 @@ -{ - "openapi": "3.0.1", - "info": { - "title": "Move2Kube API", - "description": "This is a documentation of the Move2Kube REST API.\nAll API calls expect the `Authorization: Bearer ` HTTP header unless specified otherwise.\nThe access token can be obtained in the same way as OAuth 2.0 using the token endpoint in the admin section.\n", - "contact": { - "email": "move2kube-dev@googlegroups.com" - }, - "license": { - "name": "Apache 2.0", - "url": "http://www.apache.org/licenses/LICENSE-2.0.html" - }, - "version": "v1.0.0" - }, - "externalDocs": { - "description": "Find out more about Swagger", - "url": "http://swagger.io" - }, - "servers": [ - { - "url": "/api/v1" - } - ], - "tags": [ - { - "name": "move2kube", - "description": "Helps migrate your app to Kubernetes.", - "externalDocs": { - "description": "Find out more", - "url": "https://move2kube.konveyor.io/" - } - } - ], - "security": [ - { - "bearerAuth": [] - } - ], - "paths": { - "/token": { - "post": { - "security": [ - { - "basicAuth": [] - } - ], - "tags": [ - "admin" - ], - "summary": "Get an access token using client ID and client secret (for use with trusted clients).", - "description": "Get an access token using client ID and client secret (for use with trusted clients).", - "operationId": "get-tokens", - "requestBody": { - "description": "Use `grant_type=client_credentials` in the body and set the header \n`Authorization: Basic base64(client_id + \":\" + client_secret)`\n", - "content": { - "application/x-www-form-urlencoded": { - "schema": { - "type": "object", - "required": [ - "grant_type" - ], - "properties": { - "grant_type": { - "type": "string", - "enum": [ - "client_credentials" - ] - } - } - } - } - } - }, - "responses": { - "200": { - "$ref": "#/components/responses/Token" - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get the access token." - } - } - } - }, - "/support": { - "get": { - "tags": [ - "support" - ], - "summary": "Returns some support information like CLI, API and UI version info.", - "description": "Returns some support information like CLI, API and UI version info.", - "operationId": "get-support-info", - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "cli_version": { - "type": "string", - "description": "The version, commit hash, etc. of the Move2Kube CLI tool being used.", - "example": "version: v0.3.0+unreleased\ngitCommit: 0ccc6c4c6ea8ccd8fb8f999d37f81cdf0fdf22e6\ngitTreeState: clean\ngoVersion: go1.18.1\nplatform: darwin/amd64" - }, - "api_version": { - "type": "string", - "description": "The version, commit hash, etc. of the Move2Kube API server being used.", - "example": "version: v0.1.0+unreleased\ngitCommit: d21d2503e136fd85d5b166d5899d4058083cf0ce\ngitTreeState: clean\ngoVersion: go1.18.1\nplatform: darwin/amd64" - }, - "ui_version": { - "type": "string", - "description": "The version, commit hash, etc. of the Move2Kube UI website being used.", - "example": "unknown" - }, - "docker": { - "type": "string", - "description": "Whether the docker socket '/var/run/docker.sock' is mounted when running as a container.", - "example": "docker socket is not mounted" - } - } - } - } - } - } - } - } - }, - "/workspaces": { - "get": { - "tags": [ - "workspaces" - ], - "summary": "Get all the workspaces you have access to.", - "description": "Get all the workspaces you have access to.", - "operationId": "get-workspaces", - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Workspace" - } - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get all the workspaces." - } - } - }, - "post": { - "tags": [ - "workspaces" - ], - "summary": "Create a new workspace. The ID will be generated by the server.", - "description": "Create a new workspace. The ID will be generated by the server.", - "operationId": "create-workspace", - "requestBody": { - "description": "The metadata of the workspace.\nLeave the ID blank, it will be generated.\nLeave the projects blank, projects are managed through a different set of endpoints.\n", - "content": { - "application/json": { - "example": { - "name": "Team 1 Workspace", - "description": "The workspace team 1 uses." - }, - "schema": { - "$ref": "#/components/schemas/Workspace" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "Created.", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "ID of the new workspace.", - "example": "work-1234" - } - } - } - } - } - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to create a new workspace." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/workspaces/{workspace-id}": { - "get": { - "tags": [ - "workspaces" - ], - "summary": "Get the workspace with the given ID.", - "description": "Get the workspace with the given ID.", - "operationId": "get-workspace", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace to get.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Workspace" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get this workspace." - }, - "404": { - "description": "Workspace not found." - } - } - }, - "put": { - "tags": [ - "workspaces" - ], - "summary": "Update a workspace. The workspace will be created if it doesn't exist.", - "description": "Update a workspace. The workspace will be created if it doesn't exist.", - "operationId": "update-workspace", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace to update.", - "required": true, - "example": "work-1", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "requestBody": { - "description": "The metadata of the workspace.\nLeave the ID blank, it will be generated.\nLeave the projects blank, projects are managed through a different set of endpoints.\n", - "content": { - "application/json": { - "example": { - "name": "Team 1 Workspace. Update Name.", - "description": "The workspace team 1 uses. Updated description." - }, - "schema": { - "$ref": "#/components/schemas/Workspace" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "Created." - }, - "204": { - "description": "Updated." - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to create/update this workspace." - }, - "404": { - "description": "Workspace not found." - } - }, - "x-codegen-request-body-name": "body" - }, - "delete": { - "tags": [ - "workspaces" - ], - "summary": "Delete an existing workspace.", - "description": "Delete an existing workspace.", - "operationId": "delete-workspace", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace to delete.", - "required": true, - "example": "work-1", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "204": { - "description": "Deleted." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to delete this workspace." - }, - "404": { - "description": "Workspace not found." - } - } - } - }, - "/workspaces/{workspace-id}/inputs": { - "post": { - "tags": [ - "workspace-inputs" - ], - "summary": "Create a new input for this workspace. All the projects in this workspace will be able to use it. The ID will be generated by the server.", - "description": "Create a new input for this workspace. All the projects in this workspace will be able to use it. The ID will be generated by the server.", - "operationId": "create-workspace-input", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace to create the input in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "requestBody": { - "description": "The metadata of the workspace input.\nLeave the ID blank, it will be generated.\n", - "content": { - "multipart/form-data": { - "schema": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "sources", - "customizations", - "configs" - ], - "description": "The type of the input." - }, - "description": { - "type": "string", - "description": "A description for the input." - }, - "file": { - "type": "string", - "format": "binary", - "description": "The actual content of the input file." - } - } - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "Created.", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "ID of the new workspace input.", - "example": "work-input-1234" - } - } - } - } - } - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to create an input for this project." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/workspaces/{workspace-id}/inputs/{input-id}": { - "get": { - "tags": [ - "workspace-inputs" - ], - "summary": "Get the input of the project with the given ID.", - "description": "Get the input of the project with the given ID.", - "operationId": "get-workspace-input", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "input-id", - "in": "path", - "description": "ID of the input to get.", - "required": true, - "example": "work-input-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/octet-stream": { - "schema": { - "type": "string", - "format": "binary" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get this project input." - }, - "404": { - "description": "Workspace, project or input not found." - } - } - }, - "delete": { - "tags": [ - "workspace-inputs" - ], - "summary": "Delete the input of the project.", - "description": "Delete the input of the project.", - "operationId": "delete-workspace-input", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "input-id", - "in": "path", - "description": "ID of the input to delete.", - "required": true, - "example": "work-input-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "204": { - "description": "Deleted." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to delete this project input." - }, - "404": { - "description": "Workspace, project or input not found." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/workspaces/{workspace-id}/projects": { - "get": { - "tags": [ - "projects" - ], - "summary": "Get all the projects you have access to in this workspace.", - "description": "Get all the projects you have access to in this workspace.", - "operationId": "get-projects", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace to get the projects from.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Project" - } - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get all the projects in this workspace." - } - } - }, - "post": { - "tags": [ - "projects" - ], - "summary": "Create a new project in this workspace. The ID will be generated by the server.", - "description": "Create a new project in this workspace. The ID will be generated by the server.", - "operationId": "create-project", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace to create the project in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "requestBody": { - "description": "The metadata of the project.\nLeave the ID blank, it will be generated.\n", - "content": { - "application/json": { - "example": { - "name": "My Web App 1", - "description": "Project to transform my web app 1 to run on K8s." - }, - "schema": { - "$ref": "#/components/schemas/Project" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "Created.", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "ID of the new project.", - "example": "proj-1234" - } - } - } - } - } - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to create a new project in this workspace." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/workspaces/{workspace-id}/projects/{project-id}": { - "get": { - "tags": [ - "projects" - ], - "summary": "Get the project with the given ID.", - "description": "Get the project with the given ID.", - "operationId": "get-project", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to get.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Project" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get this project." - }, - "404": { - "description": "Workspace or project not found." - } - } - }, - "put": { - "tags": [ - "projects" - ], - "summary": "Update a project. The project will be created if it doesn't exist.", - "description": "Update a project. The project will be created if it doesn't exist.", - "operationId": "update-project", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to update.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "requestBody": { - "description": "The metadata of the project.\nLeave the ID blank.\n", - "content": { - "application/json": { - "example": { - "name": "My Web App 1. Updated Name.", - "description": "Project to transform my web app 1. Updated description." - }, - "schema": { - "$ref": "#/components/schemas/Project" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "Created." - }, - "204": { - "description": "Updated." - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to create/update this project." - }, - "404": { - "description": "Workspace or project not found." - } - }, - "x-codegen-request-body-name": "body" - }, - "delete": { - "tags": [ - "projects" - ], - "summary": "Delete an existing workspace.", - "description": "Delete an existing workspace.", - "operationId": "delete-project", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to delete.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "204": { - "description": "Deleted." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to delete this project." - }, - "404": { - "description": "Workspace or project not found." - } - } - } - }, - "/workspaces/{workspace-id}/projects/{project-id}/inputs": { - "post": { - "tags": [ - "project-inputs" - ], - "summary": "Create a new input for this project. The ID will be generated by the server.", - "description": "Create a new input for this project. The ID will be generated by the server.", - "operationId": "create-project-input", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to create the input in.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "requestBody": { - "description": "The metadata of the project input.\nLeave the ID blank, it will be generated.\n", - "content": { - "multipart/form-data": { - "schema": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "sources", - "customizations", - "configs", - "reference" - ], - "description": "The type of the input." - }, - "id": { - "type": "string", - "description": "If the input is of type 'reference', then this field indicates the id of the workspace input that it is referencing." - }, - "description": { - "type": "string", - "description": "A description for the input." - }, - "file": { - "type": "string", - "format": "binary", - "description": "The actual content of the input file." - } - } - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "Created.", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "ID of the new project input.", - "example": "proj-input-1234" - } - } - } - } - } - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to create an input for this project." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/workspaces/{workspace-id}/projects/{project-id}/inputs/{input-id}": { - "get": { - "tags": [ - "project-inputs" - ], - "summary": "Get the input of the project with the given ID.", - "description": "Get the input of the project with the given ID.", - "operationId": "get-project-input", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project the input is in.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "input-id", - "in": "path", - "description": "ID of the input to get.", - "required": true, - "example": "proj-input-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/octet-stream": { - "schema": { - "type": "string", - "format": "binary" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get this project input." - }, - "404": { - "description": "Workspace, project or input not found." - } - } - }, - "delete": { - "tags": [ - "project-inputs" - ], - "summary": "Delete the input of the project.", - "description": "Delete the input of the project.", - "operationId": "delete-project-input", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project the input is in.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "input-id", - "in": "path", - "description": "ID of the input to delete.", - "required": true, - "example": "proj-input-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "204": { - "description": "Deleted." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to delete this project input." - }, - "404": { - "description": "Workspace, project or input not found." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/workspaces/{workspace-id}/projects/{project-id}/plan": { - "post": { - "tags": [ - "plan" - ], - "summary": "Start planning on this project's inputs.", - "description": "Start planning on this project's inputs.", - "operationId": "start-planning", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to start planning in.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "remote-source", - "in": "query", - "description": "Remote source git URL from which get the source files.", - "required": false, - "example": "git+https://github.com/konveyor/move2kube", - "schema": { - "$ref": "#/components/schemas/RemoteSource" - } - } - ], - "responses": { - "202": { - "description": "Accepted." - }, - "400": { - "description": "Invalid format or validation error." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to start planning for this project." - }, - "404": { - "description": "Workspace or project not found." - } - }, - "x-codegen-request-body-name": "body" - }, - "get": { - "tags": [ - "plan" - ], - "summary": "Get the plan file.", - "description": "Get the plan file.", - "operationId": "get-plan", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to get the plan from.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Accepted.", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "plan": { - "type": "string", - "description": "The plan file in YAML format." - } - } - } - } - } - }, - "400": { - "description": "Invalid format or validation error." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to start planning for this project." - }, - "404": { - "description": "Workspace or project not found." - }, - "409": { - "description": "Conflict, because planning is already on-going for this project." - } - }, - "x-codegen-request-body-name": "body" - }, - "put": { - "tags": [ - "plan" - ], - "summary": "Update the plan for this project.", - "description": "Update the plan for this project.", - "operationId": "update-plan", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to update the plan for.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "plan": { - "type": "string", - "description": "The new plan file to use for this project." - } - } - } - } - } - }, - "responses": { - "204": { - "description": "Accepted." - }, - "400": { - "description": "Invalid format or validation error." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to update the plan for this project." - }, - "404": { - "description": "Workspace or project not found." - } - }, - "x-codegen-request-body-name": "body" - }, - "delete": { - "tags": [ - "plan" - ], - "summary": "Delete the current plan for the project.", - "description": "Delete the current plan for the project.", - "operationId": "delete-plan", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to delete the plan from.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "204": { - "description": "Deleted." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to delete the plan for this project." - }, - "404": { - "description": "Workspace or project not found." - } - } - } - }, - "/workspaces/{workspace-id}/projects/{project-id}/outputs": { - "post": { - "tags": [ - "project-outputs" - ], - "summary": "Start transformation for this project. Planning must be completed before this.", - "description": "Start transformation for this project. Planning must be completed before this.", - "operationId": "start-transformation", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to start the transformation for.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "skip-qa", - "in": "query", - "description": "Boolean to skip interactive QA.", - "required": false, - "example": "true", - "schema": { - "type": "boolean" - } - } - ], - "requestBody": { - "description": "A plan to use for the transformation. (Not required).\n", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "plan": { - "type": "string", - "description": "A plan to use for the transformation. (Not required)." - } - } - } - } - }, - "required": false - }, - "responses": { - "202": { - "description": "Accept", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "ID of the new project output.", - "example": "proj-output-1234" - }, - "name": { - "type": "string", - "description": "Name of the project output." - }, - "description": { - "type": "string", - "description": "Description of the project output." - }, - "timestamp": { - "type": "string", - "format": "date-time" - }, - "status": { - "type": "string", - "description": "The status of the transformation.", - "enum": [ - "transforming", - "done", - "error" - ] - } - } - } - } - } - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to start transformation for this project." - }, - "404": { - "description": "Workspace or project not found." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/workspaces/{workspace-id}/projects/{project-id}/outputs/{output-id}": { - "get": { - "tags": [ - "project-outputs" - ], - "summary": "Get the output of the project with the given ID.", - "description": "Get the output of the project with the given ID.", - "operationId": "get-project-output", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project the output is in.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "output-id", - "in": "path", - "description": "ID of the output to get.", - "required": true, - "example": "proj-output-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/octet-stream": { - "schema": { - "type": "string", - "format": "binary" - } - } - } - }, - "204": { - "description": "The transformation is still on-going." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get this project output." - }, - "404": { - "description": "Workspace, project or output not found." - } - } - }, - "delete": { - "tags": [ - "project-outputs" - ], - "summary": "Delete the output of the project.", - "description": "Delete the output of the project.", - "operationId": "delete-project-output", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project the output is in.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "output-id", - "in": "path", - "description": "ID of the output to delete.", - "required": true, - "example": "proj-output-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "204": { - "description": "Deleted." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to delete this project output." - }, - "404": { - "description": "Workspace, project or output not found." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/workspaces/{workspace-id}/projects/{project-id}/outputs/{output-id}/graph": { - "get": { - "tags": [ - "project-output-graphs" - ], - "summary": "Get the graph of the transformers used while creating the output with the given ID.", - "description": "Get the graph of the transformers used while creating the output with the given ID.", - "operationId": "get-project-output-graph", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project the output is in.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "output-id", - "in": "path", - "description": "ID of the output whose graph we should get.", - "required": true, - "example": "proj-output-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "type": "object" - } - } - } - }, - "204": { - "description": "The transformation is still on-going." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get this project output." - }, - "404": { - "description": "Workspace, project or output not found." - } - } - } - }, - "/workspaces/{workspace-id}/projects/{project-id}/outputs/{output-id}/problems/current": { - "get": { - "tags": [ - "qa" - ], - "summary": "Get the current question that needs to be answered for the transformation to proceed.", - "description": "Get the current question that needs to be answered for the transformation to proceed.", - "operationId": "get-current-question", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project the output is in.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "output-id", - "in": "path", - "description": "ID of the output whose transformation is on-going.", - "required": true, - "example": "proj-output-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "question": { - "type": "string", - "description": "A JSON encoded string of the question object." - } - } - } - } - } - }, - "204": { - "description": "All questions have finished." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get this project output." - }, - "404": { - "description": "Workspace, project or output not found. Might also be returned once the transformation has finished." - } - } - } - }, - "/workspaces/{workspace-id}/projects/{project-id}/outputs/{output-id}/problems/current/solution": { - "post": { - "tags": [ - "qa" - ], - "summary": "Post the answer to the current question for an on-going transformation given by the ID.", - "description": "Post the answer to the current question for an on-going transformation given by the ID.", - "operationId": "post-answer-to-question", - "parameters": [ - { - "name": "workspace-id", - "in": "path", - "description": "ID of the workspace the project is in.", - "required": true, - "example": "work-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "project-id", - "in": "path", - "description": "ID of the project to start the transformation for.", - "required": true, - "example": "proj-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "output-id", - "in": "path", - "description": "ID of the output whose transformation is on-going.", - "required": true, - "example": "proj-output-1234", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "requestBody": { - "description": "A plan to use for the transformation. (Not required).\n", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "solution": { - "type": "string", - "description": "A JSON encoded string containing the answer object." - } - } - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "Answer was accepted" - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to post an answer to the question for this project output." - }, - "404": { - "description": "Workspace, project, or output not found." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/roles": { - "get": { - "tags": [ - "roles" - ], - "summary": "Get all the roles.", - "description": "Get all the roles.", - "operationId": "get-roles", - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Role" - } - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get all the roles." - } - } - }, - "post": { - "tags": [ - "roles" - ], - "summary": "Create a new role.", - "description": "Create a new role.", - "operationId": "create-role", - "requestBody": { - "description": "The metadata of the role. Leave the ID blank, it will be generated.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Role" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "Created.", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "ID of the new role.", - "example": "role-1234" - } - } - } - } - } - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to create a new role." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/roles/{role-id}": { - "get": { - "tags": [ - "roles" - ], - "summary": "Get the role with the given ID.", - "description": "Get the role with the given ID.", - "operationId": "get-role", - "parameters": [ - { - "name": "role-id", - "in": "path", - "description": "ID of the role to get.", - "required": true, - "example": "team-7", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Role" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get this role." - }, - "404": { - "description": "Role not found." - } - } - }, - "put": { - "tags": [ - "roles" - ], - "summary": "Update a role. The role will be created if it doesn't exist.", - "description": "Update a role. The role will be created if it doesn't exist.", - "operationId": "update-role", - "parameters": [ - { - "name": "role-id", - "in": "path", - "description": "ID of the role to update.", - "required": true, - "example": "team-1", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "requestBody": { - "description": "The metadata of the role.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Role" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "Created." - }, - "204": { - "description": "Updated." - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to create/update this role." - }, - "404": { - "description": "Role not found." - } - }, - "x-codegen-request-body-name": "body" - }, - "delete": { - "tags": [ - "roles" - ], - "summary": "Delete an existing role", - "description": "Delete an existing role.", - "operationId": "delete-role", - "parameters": [ - { - "name": "role-id", - "in": "path", - "description": "ID of the role to delete.", - "required": true, - "example": "team-1", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "204": { - "description": "Deleted." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to delete this role." - }, - "404": { - "description": "Role not found." - } - } - } - }, - "/idps/{idp-id}/users/{user-id}/roles": { - "get": { - "tags": [ - "role-bindings" - ], - "summary": "Get all the roles for the given user.", - "description": "Get all the roles for the given user.", - "operationId": "get-roles-of-user", - "parameters": [ - { - "name": "idp-id", - "in": "path", - "description": "ID of the identity provider.", - "required": true, - "example": "idp-1", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "user-id", - "in": "path", - "description": "ID of the user as given by the identity provider.", - "required": true, - "example": "user-1", - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Success.", - "content": { - "application/json": { - "schema": { - "type": "array", - "description": "List of role IDs assigned to the user.", - "example": [ - "role-1", - "role-2" - ], - "items": { - "type": "string" - } - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to get the roles for this user." - }, - "404": { - "description": "User not found." - } - } - }, - "patch": { - "tags": [ - "role-bindings" - ], - "summary": "Update the roles of the given user.", - "description": "Update the roles of the given user.", - "operationId": "update-roles-of-user", - "parameters": [ - { - "name": "idp-id", - "in": "path", - "description": "ID of the identity provider.", - "required": true, - "example": "idp-1", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "user-id", - "in": "path", - "description": "ID of the user as given by the identity provider.", - "required": true, - "example": "user-1", - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "description": "Add, remove or overwrite the roles of the user.", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "op": { - "type": "string", - "description": "add: add these roles to the existing roles the user has. \nremove: remove these roles from the existing roles the user has. \noverwrite: completely overwrite the existing roles the user has with these roles.\n", - "enum": [ - "add", - "remove", - "overwrite" - ] - }, - "roles": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of role IDs", - "example": [ - "role-1", - "role-2" - ] - } - }, - "items": { - "type": "string" - } - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "Success." - }, - "400": { - "description": "Invalid format or validation error.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to update the roles for this user." - }, - "404": { - "description": "User not found." - } - }, - "x-codegen-request-body-name": "body" - } - }, - "/idps/{idp-id}/users/{user-id}/roles/{role-id}": { - "put": { - "tags": [ - "role-bindings" - ], - "summary": "Add a role to a user.", - "description": "Add a role to a user.", - "operationId": "add-role-to-user", - "parameters": [ - { - "name": "idp-id", - "in": "path", - "description": "ID of the identity provider.", - "required": true, - "example": "idp-1", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "user-id", - "in": "path", - "description": "ID of the user as given by the identity provider.", - "required": true, - "example": "user-1", - "schema": { - "type": "string" - } - }, - { - "name": "role-id", - "in": "path", - "description": "ID of the role to add to the user.", - "required": true, - "example": "team-7", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "201": { - "description": "Created." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to add this role to this user." - }, - "404": { - "description": "User not found." - } - } - }, - "delete": { - "tags": [ - "role-bindings" - ], - "summary": "Remove a role from a user.", - "description": "Remove a role from a user.", - "operationId": "remove-role-from-user", - "parameters": [ - { - "name": "idp-id", - "in": "path", - "description": "ID of the identity provider.", - "required": true, - "example": "idp-1", - "schema": { - "$ref": "#/components/schemas/ID" - } - }, - { - "name": "user-id", - "in": "path", - "description": "ID of the user as given by the identity provider.", - "required": true, - "example": "user-1", - "schema": { - "type": "string" - } - }, - { - "name": "role-id", - "in": "path", - "description": "ID of the role to remove from the user.", - "required": true, - "example": "team-7", - "schema": { - "$ref": "#/components/schemas/ID" - } - } - ], - "responses": { - "204": { - "description": "Deleted." - }, - "401": { - "$ref": "#/components/responses/UnauthorizedError" - }, - "403": { - "description": "Don't have authorization to remove this role from this user." - }, - "404": { - "description": "User not found." - } - } - } - } - }, - "components": { - "securitySchemes": { - "basicAuth": { - "type": "http", - "scheme": "basic" - }, - "bearerAuth": { - "type": "http", - "scheme": "bearer" - } - }, - "responses": { - "UnauthorizedError": { - "description": "Authorization header is missing or invalid.", - "headers": { - "WWW_Authenticate": { - "schema": { - "type": "string" - } - } - } - }, - "Token": { - "description": "The access token.", - "headers": { - "Cache-Control": { - "schema": { - "type": "string", - "enum": [ - "no-store" - ] - } - }, - "Pragma": { - "schema": { - "type": "string", - "enum": [ - "no-cache" - ] - } - } - }, - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "access_token": { - "type": "string", - "example": "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk" - }, - "token_type": { - "type": "string", - "example": "example" - }, - "expires_in": { - "type": "number", - "example": 3600 - } - } - } - } - } - } - }, - "schemas": { - "ID": { - "pattern": "^[a-zA-Z0-9-_]+$", - "type": "string", - "description": "A unique ID.", - "example": "id-1234" - }, - "RemoteSource": { - "pattern": "^git[+](https|ssh)://[a-zA-Z0-9]+([-.]{1}[a-zA-Z0-9]+)*[.][a-zA-Z]{2,5}(:[0-9]{1,5})?(\/.*)?$", - "type": "string", - "description": "A git URL.", - "example": "git+https://github.com/konveyor/move2kube" - }, - "Error": { - "required": [ - "error" - ], - "type": "object", - "properties": { - "error": { - "type": "object", - "required": [ - "description" - ], - "properties": { - "description": { - "type": "string", - "description": "A human readable error message.", - "example": "failed to create the workspace. Error: ..." - } - } - } - } - }, - "Project": { - "required": [ - "id", - "name" - ], - "type": "object", - "properties": { - "id": { - "type": "object", - "description": "A unique ID for the project.", - "example": "proj-1234", - "allOf": [ - { - "$ref": "#/components/schemas/ID" - } - ] - }, - "name": { - "type": "string", - "description": "A human readable name for the project.", - "example": "Project 23" - }, - "timestamp": { - "type": "string", - "format": "date-time" - }, - "outputs": { - "type": "object", - "additionalProperties": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "timestamp": { - "type": "string", - "format": "date-time" - }, - "status": { - "type": "string" - } - } - } - }, - "status": { - "type": "object", - "properties": { - "plan": { - "type": "boolean" - }, - "plan_error": { - "type": "boolean" - }, - "planning": { - "type": "boolean" - }, - "reference": { - "type": "boolean" - }, - "stale_plan": { - "type": "boolean" - }, - "sources": { - "type": "boolean" - }, - "outputs": { - "type": "boolean" - } - } - }, - "inputs": { - "type": "object", - "additionalProperties": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "timestamp": { - "type": "string", - "format": "date-time" - }, - "type": { - "type": "string" - }, - "normalized_name": { - "type": "string" - } - } - } - }, - "description": { - "type": "string", - "description": "A description about the project.", - "example": "This is one of the projects that team 1 is working on." - } - } - }, - "Workspace": { - "required": [ - "id", - "name" - ], - "type": "object", - "properties": { - "id": { - "type": "object", - "description": "A unique ID for the workspace.", - "example": "work-1234", - "allOf": [ - { - "$ref": "#/components/schemas/ID" - } - ] - }, - "name": { - "type": "string", - "description": "A human readable name for the workspace.", - "example": "Team 1 Workspace" - }, - "timestamp": { - "type": "string", - "format": "date-time" - }, - "project_ids": { - "type": "array", - "items": { - "type": "string" - } - }, - "inputs": { - "type": "object", - "additionalProperties": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "timestamp": { - "type": "string", - "format": "date-time" - }, - "type": { - "type": "string" - }, - "normalized_name": { - "type": "string" - } - } - } - }, - "description": { - "type": "string", - "description": "A description about the workspace.", - "example": "This is the workspace for all the projects of team 1." - }, - "projects": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Project" - } - } - } - }, - "Resource": { - "type": "string", - "description": "A resource is a URL path.", - "example": "/workspaces/work-1234/projects/proj-42" - }, - "Role": { - "required": [ - "id", - "name" - ], - "type": "object", - "properties": { - "id": { - "type": "object", - "description": "A unique ID for the role.", - "example": "team-1", - "allOf": [ - { - "$ref": "#/components/schemas/ID" - } - ] - }, - "name": { - "type": "string", - "description": "A human readable name for the role.", - "example": "Team 1" - }, - "description": { - "type": "string", - "description": "A description about the role", - "example": "A member of team 1." - }, - "rules": { - "type": "array", - "description": "The list of rules to apply for this role.\n", - "example": [ - { - "resources": [ - "/workspaces/work-7/.+", - "/workspaces/work-42/projects/.*", - "/workspaces/work-123/projects/proj-2" - ], - "verbs": [ - "all" - ] - }, - { - "resources": [ - "/workspaces/work-1234" - ], - "verbs": [ - "create-project", - "delete-project" - ] - } - ], - "items": { - "required": [ - "resources", - "verbs" - ], - "type": "object", - "properties": { - "resources": { - "type": "array", - "description": "List of resources. The elements of this list are Javascript ES6 Regex patterns.\nWhen a request for a protected resource is received these regexs are used to\nmatch against the resource URL.\n", - "items": { - "$ref": "#/components/schemas/Resource" - } - }, - "verbs": { - "type": "array", - "description": "List of allowed verbs. \nFor now the only supported verb is `all` which allows all actions on the resource.\n", - "items": { - "type": "string", - "example": "all" - } - } - }, - "description": "A rule is a list of resources and the list of allowed verbs for those resources." - } - } - } - } - } - } -} diff --git a/m2k/m2k-func/mvnw b/m2k/m2k-func/mvnw deleted file mode 100755 index 8a8fb22..0000000 --- a/m2k/m2k-func/mvnw +++ /dev/null @@ -1,316 +0,0 @@ -#!/bin/sh -# ---------------------------------------------------------------------------- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# ---------------------------------------------------------------------------- - -# ---------------------------------------------------------------------------- -# Maven Start Up Batch script -# -# Required ENV vars: -# ------------------ -# JAVA_HOME - location of a JDK home dir -# -# Optional ENV vars -# ----------------- -# M2_HOME - location of maven2's installed home dir -# MAVEN_OPTS - parameters passed to the Java VM when running Maven -# e.g. to debug Maven itself, use -# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -# MAVEN_SKIP_RC - flag to disable loading of mavenrc files -# ---------------------------------------------------------------------------- - -if [ -z "$MAVEN_SKIP_RC" ] ; then - - if [ -f /usr/local/etc/mavenrc ] ; then - . /usr/local/etc/mavenrc - fi - - if [ -f /etc/mavenrc ] ; then - . /etc/mavenrc - fi - - if [ -f "$HOME/.mavenrc" ] ; then - . "$HOME/.mavenrc" - fi - -fi - -# OS specific support. $var _must_ be set to either true or false. -cygwin=false; -darwin=false; -mingw=false -case "`uname`" in - CYGWIN*) cygwin=true ;; - MINGW*) mingw=true;; - Darwin*) darwin=true - # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home - # See https://developer.apple.com/library/mac/qa/qa1170/_index.html - if [ -z "$JAVA_HOME" ]; then - if [ -x "/usr/libexec/java_home" ]; then - export JAVA_HOME="`/usr/libexec/java_home`" - else - export JAVA_HOME="/Library/Java/Home" - fi - fi - ;; -esac - -if [ -z "$JAVA_HOME" ] ; then - if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=`java-config --jre-home` - fi -fi - -if [ -z "$M2_HOME" ] ; then - ## resolve links - $0 may be a link to maven's home - PRG="$0" - - # need this for relative symlinks - while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG="`dirname "$PRG"`/$link" - fi - done - - saveddir=`pwd` - - M2_HOME=`dirname "$PRG"`/.. - - # make it fully qualified - M2_HOME=`cd "$M2_HOME" && pwd` - - cd "$saveddir" - # echo Using m2 at $M2_HOME -fi - -# For Cygwin, ensure paths are in UNIX format before anything is touched -if $cygwin ; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --unix "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --unix "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --unix "$CLASSPATH"` -fi - -# For Mingw, ensure paths are in UNIX format before anything is touched -if $mingw ; then - [ -n "$M2_HOME" ] && - M2_HOME="`(cd "$M2_HOME"; pwd)`" - [ -n "$JAVA_HOME" ] && - JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" -fi - -if [ -z "$JAVA_HOME" ]; then - javaExecutable="`which javac`" - if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then - # readlink(1) is not available as standard on Solaris 10. - readLink=`which readlink` - if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then - if $darwin ; then - javaHome="`dirname \"$javaExecutable\"`" - javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" - else - javaExecutable="`readlink -f \"$javaExecutable\"`" - fi - javaHome="`dirname \"$javaExecutable\"`" - javaHome=`expr "$javaHome" : '\(.*\)/bin'` - JAVA_HOME="$javaHome" - export JAVA_HOME - fi - fi -fi - -if [ -z "$JAVACMD" ] ; then - if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - else - JAVACMD="`\\unset -f command; \\command -v java`" - fi -fi - -if [ ! -x "$JAVACMD" ] ; then - echo "Error: JAVA_HOME is not defined correctly." >&2 - echo " We cannot execute $JAVACMD" >&2 - exit 1 -fi - -if [ -z "$JAVA_HOME" ] ; then - echo "Warning: JAVA_HOME environment variable is not set." -fi - -CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher - -# traverses directory structure from process work directory to filesystem root -# first directory with .mvn subdirectory is considered project base directory -find_maven_basedir() { - - if [ -z "$1" ] - then - echo "Path not specified to find_maven_basedir" - return 1 - fi - - basedir="$1" - wdir="$1" - while [ "$wdir" != '/' ] ; do - if [ -d "$wdir"/.mvn ] ; then - basedir=$wdir - break - fi - # workaround for JBEAP-8937 (on Solaris 10/Sparc) - if [ -d "${wdir}" ]; then - wdir=`cd "$wdir/.."; pwd` - fi - # end of workaround - done - echo "${basedir}" -} - -# concatenates all lines of a file -concat_lines() { - if [ -f "$1" ]; then - echo "$(tr -s '\n' ' ' < "$1")" - fi -} - -BASE_DIR=`find_maven_basedir "$(pwd)"` -if [ -z "$BASE_DIR" ]; then - exit 1; -fi - -########################################################################################## -# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -# This allows using the maven wrapper in projects that prohibit checking in binary data. -########################################################################################## -if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found .mvn/wrapper/maven-wrapper.jar" - fi -else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." - fi - if [ -n "$MVNW_REPOURL" ]; then - jarUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar" - else - jarUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar" - fi - while IFS="=" read key value; do - case "$key" in (wrapperUrl) jarUrl="$value"; break ;; - esac - done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" - if [ "$MVNW_VERBOSE" = true ]; then - echo "Downloading from: $jarUrl" - fi - wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" - if $cygwin; then - wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` - fi - - if command -v wget > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found wget ... using wget" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - wget "$jarUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" - else - wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" - fi - elif command -v curl > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found curl ... using curl" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - curl -o "$wrapperJarPath" "$jarUrl" -f - else - curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f - fi - - else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Falling back to using Java to download" - fi - javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" - # For Cygwin, switch paths to Windows format before running javac - if $cygwin; then - javaClass=`cygpath --path --windows "$javaClass"` - fi - if [ -e "$javaClass" ]; then - if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Compiling MavenWrapperDownloader.java ..." - fi - # Compiling the Java class - ("$JAVA_HOME/bin/javac" "$javaClass") - fi - if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - # Running the downloader - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Running MavenWrapperDownloader.java ..." - fi - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") - fi - fi - fi -fi -########################################################################################## -# End of extension -########################################################################################## - -export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} -if [ "$MVNW_VERBOSE" = true ]; then - echo $MAVEN_PROJECTBASEDIR -fi -MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" - -# For Cygwin, switch paths to Windows format before running java -if $cygwin; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --path --windows "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --windows "$CLASSPATH"` - [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` -fi - -# Provide a "standardized" way to retrieve the CLI args that will -# work with both Windows and non-Windows executions. -MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" -export MAVEN_CMD_LINE_ARGS - -WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -exec "$JAVACMD" \ - $MAVEN_OPTS \ - $MAVEN_DEBUG_OPTS \ - -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.home=${M2_HOME}" \ - "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ - ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/m2k/m2k-func/mvnw.cmd b/m2k/m2k-func/mvnw.cmd deleted file mode 100644 index 1d8ab01..0000000 --- a/m2k/m2k-func/mvnw.cmd +++ /dev/null @@ -1,188 +0,0 @@ -@REM ---------------------------------------------------------------------------- -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM https://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM ---------------------------------------------------------------------------- - -@REM ---------------------------------------------------------------------------- -@REM Maven Start Up Batch script -@REM -@REM Required ENV vars: -@REM JAVA_HOME - location of a JDK home dir -@REM -@REM Optional ENV vars -@REM M2_HOME - location of maven2's installed home dir -@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending -@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven -@REM e.g. to debug Maven itself, use -@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files -@REM ---------------------------------------------------------------------------- - -@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' -@echo off -@REM set title of command window -title %0 -@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' -@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% - -@REM set %HOME% to equivalent of $HOME -if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") - -@REM Execute a user defined script before this one -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre -@REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* -if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* -:skipRcPre - -@setlocal - -set ERROR_CODE=0 - -@REM To isolate internal variables from possible post scripts, we use another setlocal -@setlocal - -@REM ==== START VALIDATION ==== -if not "%JAVA_HOME%" == "" goto OkJHome - -echo. -echo Error: JAVA_HOME not found in your environment. >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -:OkJHome -if exist "%JAVA_HOME%\bin\java.exe" goto init - -echo. -echo Error: JAVA_HOME is set to an invalid directory. >&2 -echo JAVA_HOME = "%JAVA_HOME%" >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -@REM ==== END VALIDATION ==== - -:init - -@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". -@REM Fallback to current working directory if not found. - -set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% -IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir - -set EXEC_DIR=%CD% -set WDIR=%EXEC_DIR% -:findBaseDir -IF EXIST "%WDIR%"\.mvn goto baseDirFound -cd .. -IF "%WDIR%"=="%CD%" goto baseDirNotFound -set WDIR=%CD% -goto findBaseDir - -:baseDirFound -set MAVEN_PROJECTBASEDIR=%WDIR% -cd "%EXEC_DIR%" -goto endDetectBaseDir - -:baseDirNotFound -set MAVEN_PROJECTBASEDIR=%EXEC_DIR% -cd "%EXEC_DIR%" - -:endDetectBaseDir - -IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig - -@setlocal EnableExtensions EnableDelayedExpansion -for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a -@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% - -:endReadAdditionalConfig - -SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" -set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" -set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar" - -FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( - IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B -) - -@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -@REM This allows using the maven wrapper in projects that prohibit checking in binary data. -if exist %WRAPPER_JAR% ( - if "%MVNW_VERBOSE%" == "true" ( - echo Found %WRAPPER_JAR% - ) -) else ( - if not "%MVNW_REPOURL%" == "" ( - SET DOWNLOAD_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar" - ) - if "%MVNW_VERBOSE%" == "true" ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %DOWNLOAD_URL% - ) - - powershell -Command "&{"^ - "$webclient = new-object System.Net.WebClient;"^ - "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ - "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ - "}"^ - "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ - "}" - if "%MVNW_VERBOSE%" == "true" ( - echo Finished downloading %WRAPPER_JAR% - ) -) -@REM End of extension - -@REM Provide a "standardized" way to retrieve the CLI args that will -@REM work with both Windows and non-Windows executions. -set MAVEN_CMD_LINE_ARGS=%* - -%MAVEN_JAVA_EXE% ^ - %JVM_CONFIG_MAVEN_PROPS% ^ - %MAVEN_OPTS% ^ - %MAVEN_DEBUG_OPTS% ^ - -classpath %WRAPPER_JAR% ^ - "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ - %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* -if ERRORLEVEL 1 goto error -goto end - -:error -set ERROR_CODE=1 - -:end -@endlocal & set ERROR_CODE=%ERROR_CODE% - -if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost -@REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" -if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" -:skipRcPost - -@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%"=="on" pause - -if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% - -cmd /C exit /B %ERROR_CODE% diff --git a/m2k/m2k-func/pom.xml b/m2k/m2k-func/pom.xml deleted file mode 100644 index 625bd23..0000000 --- a/m2k/m2k-func/pom.xml +++ /dev/null @@ -1,149 +0,0 @@ - - - 4.0.0 - m2k-func - - dev.parodos - swf-parent - 1.0.0-SNAPSHOT - ../../swf-parent - - - false - v2.0.0 - 6.7.0.202309050840-r - - - - io.quarkus - quarkus-funqy-http - - - io.quarkus - quarkus-arc - - - org.eclipse.jgit - org.eclipse.jgit - ${jgit-version} - - - org.eclipse.jgit - org.eclipse.jgit.archive - ${jgit-version} - - - org.eclipse.jgit - org.eclipse.jgit.ssh.jsch - ${jgit-version} - - - com.jcraft - jsch - - - - - com.github.mwiede - jsch - 0.2.9 - - - dev.parodos - move2kube - ${move2kube.version} - - - org.jboss.slf4j - slf4j-jboss-logmanager - - - io.quarkus - quarkus-junit5 - test - - - io.rest-assured - rest-assured - test - - - io.quarkus - quarkus-junit5-mockito - test - - - commons-io - commons-io - - - org.apache.commons - commons-compress - - - io.cloudevents - cloudevents-core - 2.3.0 - compile - - - io.cloudevents - cloudevents-json-jackson - 2.3.0 - compile - - - jakarta.inject - jakarta.inject-api - - - io.quarkus - quarkus-funqy-knative-events - - - org.apache.commons - commons-lang3 - - - io.quarkus - quarkus-rest-client-reactive-jackson - - - - - native - - - native - - - - - - maven-failsafe-plugin - ${surefire-plugin.version} - - - - integration-test - verify - - - - ${project.build.directory}/${project.build.finalName}-runner - org.jboss.logmanager.LogManager - ${maven.home} - - - - - - - - - native - - - - diff --git a/m2k/m2k-func/src/main/docker/Dockerfile.jvm b/m2k/m2k-func/src/main/docker/Dockerfile.jvm deleted file mode 100644 index 0ae79c1..0000000 --- a/m2k/m2k-func/src/main/docker/Dockerfile.jvm +++ /dev/null @@ -1,93 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/m2k-kfunc-jvm . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/m2k-kfunc-jvm -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005) like this : EXPOSE 8080 5005 -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/m2k-kfunc-jvm -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-17:1.11 - -ENV LANGUAGE='en_US:en' - - -# We make four distinct layers so if there are application changes the library layers can be re-used -COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ -COPY --chown=185 target/quarkus-app/*.jar /deployments/ -COPY --chown=185 target/quarkus-app/app/ /deployments/app/ -COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - diff --git a/m2k/m2k-func/src/main/docker/Dockerfile.legacy-jar b/m2k/m2k-func/src/main/docker/Dockerfile.legacy-jar deleted file mode 100644 index aa11bfa..0000000 --- a/m2k/m2k-func/src/main/docker/Dockerfile.legacy-jar +++ /dev/null @@ -1,89 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -Dquarkus.package.type=legacy-jar -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/m2k-kfunc-legacy-jar . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/m2k-kfunc-legacy-jar -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005) like this : EXPOSE 8080 5005 -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/m2k-kfunc-legacy-jar -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-17:1.11 - -ENV LANGUAGE='en_US:en' - - -COPY target/lib/* /deployments/lib/ -COPY target/*-runner.jar /deployments/quarkus-run.jar - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" diff --git a/m2k/m2k-func/src/main/docker/Dockerfile.native b/m2k/m2k-func/src/main/docker/Dockerfile.native deleted file mode 100644 index 40bca79..0000000 --- a/m2k/m2k-func/src/main/docker/Dockerfile.native +++ /dev/null @@ -1,27 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# -# Before building the container image run: -# -# ./mvnw package -Pnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native -t quarkus/m2k-kfunc . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/m2k-kfunc -# -### -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -CMD ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/m2k/m2k-func/src/main/docker/Dockerfile.native-micro b/m2k/m2k-func/src/main/docker/Dockerfile.native-micro deleted file mode 100644 index 42080b8..0000000 --- a/m2k/m2k-func/src/main/docker/Dockerfile.native-micro +++ /dev/null @@ -1,30 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# It uses a micro base image, tuned for Quarkus native executables. -# It reduces the size of the resulting container image. -# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. -# -# Before building the container image run: -# -# ./mvnw package -Pnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/m2k-kfunc . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/m2k-kfunc -# -### -FROM quay.io/quarkus/quarkus-micro-image:1.0 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -CMD ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/m2k/m2k-func/src/main/java/dev/parodos/CloudEventsCustomizer.java b/m2k/m2k-func/src/main/java/dev/parodos/CloudEventsCustomizer.java deleted file mode 100644 index 443009f..0000000 --- a/m2k/m2k-func/src/main/java/dev/parodos/CloudEventsCustomizer.java +++ /dev/null @@ -1,20 +0,0 @@ -package dev.parodos; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; -import io.cloudevents.jackson.JsonFormat; -import io.quarkus.jackson.ObjectMapperCustomizer; -import jakarta.inject.Singleton; - -/** - * Ensure the registration of the CloudEvent jackson module according to the Quarkus suggested procedure. - */ -@Singleton -public class CloudEventsCustomizer implements ObjectMapperCustomizer { - - @Override - public void customize(ObjectMapper mapper) { - mapper.registerModule(JsonFormat.getCloudEventJacksonModule()); - mapper.registerModule(new JavaTimeModule()); - } -} diff --git a/m2k/m2k-func/src/main/java/dev/parodos/EventGenerator.java b/m2k/m2k-func/src/main/java/dev/parodos/EventGenerator.java deleted file mode 100644 index a8eb53e..0000000 --- a/m2k/m2k-func/src/main/java/dev/parodos/EventGenerator.java +++ /dev/null @@ -1,60 +0,0 @@ -package dev.parodos; - -import io.quarkus.funqy.knative.events.CloudEvent; -import io.quarkus.funqy.knative.events.CloudEventBuilder; -import org.eclipse.microprofile.config.ConfigProvider; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.time.OffsetDateTime; -import java.util.Map; -import java.util.UUID; - -public class EventGenerator { - - public static final String ERROR_EVENT = ConfigProvider.getConfig().getValue("error.event.name",String.class); - - public static final String TRANSFORMATION_SAVED_EVENT = ConfigProvider.getConfig().getValue("transformation-saved.event.name",String.class); - - public static CloudEvent createCloudEvent(String workflowId, EventPOJO data, String eventType, String source) { - return baseCloudEventBuilder(workflowId, eventType, source) - .build(data); - } - - public static CloudEvent createCloudEvent(String workflowId, String eventType, String source) { - return baseCloudEventBuilder(workflowId, eventType, source) - .build(new EventPOJO()); - } - - public static CloudEvent createTransformationSavedEvent(String workflowId, String source) { - return baseCloudEventBuilder(workflowId, TRANSFORMATION_SAVED_EVENT, source) - .build(new EventPOJO()); - } - - public static CloudEvent createErrorEvent(String workflowCallerId, String message, String source) { - return createCloudEvent(workflowCallerId, new EventPOJO().setError(message), ERROR_EVENT, source); - } - private static CloudEventBuilder baseCloudEventBuilder(String workflowId, String eventType, String source) { - return CloudEventBuilder.create() - .id(UUID.randomUUID().toString()) - .source(source) - .type(eventType) - .time(OffsetDateTime.now()) - .extensions(Map.of("kogitoprocrefid", workflowId)); - } - - - public static class EventPOJO { - public String error; - public String message; - - public EventPOJO setError(String error) { - this.error = error; - return this; - } - - public EventPOJO setMessage(String message) { - this.message = message; - return this; - } - } -} diff --git a/m2k/m2k-func/src/main/java/dev/parodos/SaveTransformationFunction.java b/m2k/m2k-func/src/main/java/dev/parodos/SaveTransformationFunction.java deleted file mode 100644 index 2e76cda..0000000 --- a/m2k/m2k-func/src/main/java/dev/parodos/SaveTransformationFunction.java +++ /dev/null @@ -1,213 +0,0 @@ -package dev.parodos; - -import dev.parodos.move2kube.ApiException; -import dev.parodos.service.FolderCreatorService; -import dev.parodos.service.GitService; -import dev.parodos.service.Move2KubeService; -import io.quarkus.funqy.Funq; -import io.quarkus.funqy.knative.events.CloudEvent; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.apache.commons.io.FileUtils; -import org.eclipse.jgit.api.Git; -import org.eclipse.jgit.api.errors.GitAPIException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Date; - -@ApplicationScoped -public class SaveTransformationFunction { - private static final Logger log = LoggerFactory.getLogger(SaveTransformationFunction.class); - - public static final String COMMIT_MESSAGE = "Move2Kube transformation"; - - public static final String SOURCE = "save-transformation"; - - public static final String[] MOVE2KUBE_OUTPUT_DIRECTORIES_TO_SAVE = new String[]{"source","deploy","scripts" }; - - @Inject - GitService gitService; - - @Inject - Move2KubeService move2KubeService; - - @Inject - FolderCreatorService folderCreatorService; - - - - @Funq("saveTransformation") - public CloudEvent saveTransformation(FunInput input) { - if (!input.validate()) { - log.error("One or multiple mandatory input field was missing; input: {}", input); - return EventGenerator.createErrorEvent(input.workflowCallerId, String.format("One or multiple mandatory input field was missing; input: %s", input), - SOURCE); - } - - Path transformationOutputPath; - try { - transformationOutputPath = move2KubeService.getTransformationOutput(input.workspaceId, input.projectId, input.transformId); - } catch (IllegalArgumentException | IOException | ApiException e) { - log.error("Error while retrieving transformation output", e); - return EventGenerator.createErrorEvent(input.workflowCallerId, String.format("Cannot get transformation output of transformation %s" + - " in workspace %s for project %s for repo %s; error: %s", - input.transformId, input.workspaceId, input.projectId, input.gitRepo, e), SOURCE); - } - - return pushTransformationToGitRepo(input, transformationOutputPath); - } - - - private CloudEvent pushTransformationToGitRepo(FunInput input, Path transformationOutputPath) { - try { - Path gitDir = folderCreatorService.createGitRepositoryLocalFolder(input.gitRepo, String.format("%s-%s_%d", input.branch, input.transformId, new Date().getTime())); - - try (Git clonedRepo = gitService.cloneRepo(input.gitRepo, input.branch, input.token, gitDir)) { - CloudEvent errorEvent = createBranch(input, clonedRepo); - if (errorEvent != null) return errorEvent; - - try { - moveTransformationOutputToBranchDirectory(transformationOutputPath, gitDir); - } catch (IOException e) { - log.error("Cannot move transformation output to local git repo " + - "(repo {}; transformation: {}; workspace: {}; project: {})", - input.gitRepo, input.transformId, input.workspaceId, input.projectId, e); - return EventGenerator.createErrorEvent(input.workflowCallerId, String.format("Cannot move transformation output to local git repo " + - "(repo %s; transformation: %s; workspace: %s; project: %s); error: %s", - input.gitRepo, input.transformId, input.workspaceId, input.projectId, e), SOURCE); - } - - return commitAndPush(input, clonedRepo); - - } catch (GitAPIException e) { - log.error("Cannot clone repo {}", input.gitRepo, e); - return EventGenerator.createErrorEvent(input.workflowCallerId, String.format("Cannot clone repo %s; error: %s", input.gitRepo, e), - SOURCE); - } - } catch (IOException e) { - log.error("Cannot create temp dir to clone repo {}", input.gitRepo, e); - return EventGenerator.createErrorEvent(input.workflowCallerId, String.format("Cannot create temp dir to clone repo %s; error: %s", input.gitRepo, e), - SOURCE); - } - - } - - public void moveTransformationOutputToBranchDirectory(Path transformationOutput, Path gitDirectory) throws IOException { - cleanCurrentGitFolder(gitDirectory); - log.info("Moving extracted files located in {} to git repo folder {}", transformationOutput, gitDirectory); - Path finalPath; - try (var dir = Files.newDirectoryStream(transformationOutput.resolve("output"), Files::isDirectory)) { - finalPath = dir.iterator().next(); - } - log.info("FinalPath is --->{} and GitPath is {}", finalPath, gitDirectory); - for(String directory: MOVE2KUBE_OUTPUT_DIRECTORIES_TO_SAVE) { - FileUtils.copyDirectory(finalPath.resolve(Paths.get(directory)).toFile(), - gitDirectory.resolve(directory).toFile()); - } - FileUtils.copyFile(finalPath.resolve(Paths.get("Readme.md")).toFile(), - gitDirectory.resolve("Readme.md").toFile()); - } - - private static void cleanCurrentGitFolder(Path gitDirectory) throws IOException { - try (var files = Files.walk(gitDirectory, 1)) { - files.forEach(path -> { - if (!path.equals(gitDirectory) && !path.toAbsolutePath().toString().contains(".git")) { - File f = path.toFile(); - if (f.isDirectory()) { - try { - FileUtils.deleteDirectory(f); - } catch (IOException e) { - log.error("Error while deleting directory {}", path, e); - } - } else { - try { - FileUtils.delete(f); - } catch (IOException e) { - log.error("Error while deleting file {}", path, e); - } - } - } - }); - } - } - - private CloudEvent createBranch(FunInput input, Git clonedRepo) { - try { - if (gitService.branchExists(clonedRepo, input.branch)) { - log.error("Branch {} already exists on repo {}", input.branch, input.gitRepo); - return EventGenerator.createErrorEvent(input.workflowCallerId, String.format("Branch '%s' already exists on repo %s", - input.branch, input.gitRepo), SOURCE); - } - gitService.createBranch(clonedRepo, input.branch); - } catch (GitAPIException e) { - log.error("Cannot create branch {} to remote repo {}", input.branch, input.gitRepo, e); - return EventGenerator.createErrorEvent(input.workflowCallerId, String.format("Cannot create branch '%s' on repo %s; error: %s", - input.branch, input.gitRepo, e), SOURCE); - } - return null; - } - - private CloudEvent commitAndPush(FunInput input, Git clonedRepo) { - try { - gitService.commit(clonedRepo, COMMIT_MESSAGE, "."); - } catch (GitAPIException e) { - log.error("Cannot commit to local repo {}", input.gitRepo, e); - return EventGenerator.createErrorEvent(input.workflowCallerId, String.format("Cannot commit to local repo %s; error: %s", input.gitRepo, e), - SOURCE); - } - log.info("Pushing commit to branch {} of repo {}", input.branch, input.gitRepo); - try { - gitService.push(clonedRepo, input.token); - } catch (GitAPIException | IOException e) { - log.error("Cannot push branch {} to remote repo {}", input.branch, input.gitRepo, e); - return EventGenerator.createErrorEvent(input.workflowCallerId, String.format("Cannot push branch %s to remote repo %s; error: %s", - input.branch, input.gitRepo, e), SOURCE); - } - - var event = EventGenerator.createTransformationSavedEvent(input.workflowCallerId, SOURCE); - log.info("Sending cloud event {} to workflow {}", event, input.workflowCallerId); - return event; - } - - - public static class FunInput { - public String gitRepo; - public String branch; - public String token; - - public String workspaceId; - public String projectId; - public String transformId; - - public String workflowCallerId; - - public boolean validate() { - return !((gitRepo == null || gitRepo.isBlank()) || - (branch == null || branch.isBlank()) || - (workspaceId == null || workspaceId.isBlank()) || - (projectId == null || projectId.isBlank()) || - (workflowCallerId == null || workflowCallerId.isBlank()) || - (transformId == null || transformId.isBlank())); - } - - @Override - public String toString() { - return "FunInput{" + - "gitRepo='" + gitRepo + '\'' + - ", branch='" + branch + '\'' + - ", workspaceId='" + workspaceId + '\'' + - ", projectId='" + projectId + '\'' + - ", transformId='" + transformId + '\'' + - ", workflowCallerId='" + workflowCallerId + '\'' + - ", token='" + (token == null ? "" : "") + '\'' + - '}'; - } - } - -} diff --git a/m2k/m2k-func/src/main/java/dev/parodos/service/FolderCreatorService.java b/m2k/m2k-func/src/main/java/dev/parodos/service/FolderCreatorService.java deleted file mode 100644 index 9cad8fb..0000000 --- a/m2k/m2k-func/src/main/java/dev/parodos/service/FolderCreatorService.java +++ /dev/null @@ -1,13 +0,0 @@ -package dev.parodos.service; - -import java.io.IOException; -import java.nio.file.Path; - - -public interface FolderCreatorService { - - - Path createGitRepositoryLocalFolder(String gitRepo, String uniqueIdentifier) throws IOException; - - Path createMove2KubeTransformationFolder(String transformationId) throws IOException; -} diff --git a/m2k/m2k-func/src/main/java/dev/parodos/service/FolderCreatorServiceImpl.java b/m2k/m2k-func/src/main/java/dev/parodos/service/FolderCreatorServiceImpl.java deleted file mode 100644 index c2c228c..0000000 --- a/m2k/m2k-func/src/main/java/dev/parodos/service/FolderCreatorServiceImpl.java +++ /dev/null @@ -1,30 +0,0 @@ -package dev.parodos.service; - -import jakarta.enterprise.context.ApplicationScoped; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; - -@ApplicationScoped -public class FolderCreatorServiceImpl implements FolderCreatorService { - static final Logger log = LoggerFactory.getLogger(FolderCreatorServiceImpl.class); - - @Override - public Path createGitRepositoryLocalFolder(String gitRepo, String uniqueIdentifier) throws IOException { - String folder = String.format("local-git-transform-%s-%s", StringUtils.substringAfterLast(gitRepo, "/"), uniqueIdentifier); - log.info("Creating temp folder: {}", folder); - return Files.createTempDirectory(folder); - } - - @Override - public Path createMove2KubeTransformationFolder(String transformationId) throws IOException { - String folder = String.format("move2kube-transform-%s", transformationId); - log.info("Creating temp folder: {}", folder); - return Files.createTempDirectory(folder); - } - -} diff --git a/m2k/m2k-func/src/main/java/dev/parodos/service/GitService.java b/m2k/m2k-func/src/main/java/dev/parodos/service/GitService.java deleted file mode 100644 index 99cad56..0000000 --- a/m2k/m2k-func/src/main/java/dev/parodos/service/GitService.java +++ /dev/null @@ -1,25 +0,0 @@ -package dev.parodos.service; - -import org.eclipse.jgit.api.Git; -import org.eclipse.jgit.api.errors.GitAPIException; - -import java.io.IOException; -import java.nio.file.Path; - -public interface GitService { - - // Clone the git repository locally on the `targetDirectory` folder - Git cloneRepo(String repo, String branch, String token, Path targetDirectory) throws GitAPIException, IOException; - - // Clone then archive the git repository. The archive is saved as `archiveOutputPath`. - // The repository is locally persisted when cloned in the parent directory of `archiveOutputPath` - - void createBranch(Git repo, String branch) throws GitAPIException; - - void commit(Git repo, String commitMessage, String filePattern) throws GitAPIException; - - void push(Git repo, String token) throws GitAPIException, IOException; - - // Check is a branch exists on the repository based on the cloned git repository persisted in the directory `gitDir` - public boolean branchExists(Git repo, String branch) throws GitAPIException; -} diff --git a/m2k/m2k-func/src/main/java/dev/parodos/service/GitServiceImpl.java b/m2k/m2k-func/src/main/java/dev/parodos/service/GitServiceImpl.java deleted file mode 100644 index fd5b0ba..0000000 --- a/m2k/m2k-func/src/main/java/dev/parodos/service/GitServiceImpl.java +++ /dev/null @@ -1,135 +0,0 @@ -package dev.parodos.service; - -import com.jcraft.jsch.JSch; -import com.jcraft.jsch.JSchException; -import com.jcraft.jsch.Session; -import jakarta.enterprise.context.ApplicationScoped; -import org.eclipse.jgit.api.CloneCommand; -import org.eclipse.jgit.api.CommitCommand; -import org.eclipse.jgit.api.Git; -import org.eclipse.jgit.api.ListBranchCommand; -import org.eclipse.jgit.api.PushCommand; -import org.eclipse.jgit.api.TransportConfigCallback; -import org.eclipse.jgit.api.errors.GitAPIException; -import org.eclipse.jgit.api.errors.InvalidRemoteException; -import org.eclipse.jgit.transport.CredentialsProvider; -import org.eclipse.jgit.transport.SshTransport; -import org.eclipse.jgit.transport.Transport; -import org.eclipse.jgit.transport.UsernamePasswordCredentialsProvider; -import org.eclipse.jgit.transport.ssh.jsch.JschConfigSessionFactory; -import org.eclipse.jgit.transport.ssh.jsch.OpenSshConfig; -import org.eclipse.jgit.util.FS; -import org.eclipse.microprofile.config.ConfigProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.file.Path; - -@ApplicationScoped -public class GitServiceImpl implements GitService { - private static final Logger log = LoggerFactory.getLogger(GitServiceImpl.class); - public static final Path SSH_PRIV_KEY_PATH = Path.of(ConfigProvider.getConfig().getValue("ssh-priv-key-path", String.class)); - @Override - public Git cloneRepo(String repo, String branch, String token, Path targetDirectory) throws GitAPIException, IOException { - try { - if (repo.startsWith("ssh") && !repo.contains("@")) { - log.info("No user specified in ssh git url, using 'git' user"); - String[] protocolAndHost = repo.split("://"); - String repoWithGitUser = "git@" + protocolAndHost[1]; - repo = protocolAndHost[0] + "://" + repoWithGitUser; - } - CloneCommand cloneCommand = Git.cloneRepository().setURI(repo).setDirectory(targetDirectory.toFile()); - if (token != null && !token.isBlank()) { - log.info("Cloning repo {} in {} using token", repo, targetDirectory); - CredentialsProvider credentialsProvider = new UsernamePasswordCredentialsProvider(token, ""); - cloneCommand.setCredentialsProvider(credentialsProvider); - } else { - log.info("Cloning repo {} in {} using ssh keys {}", repo, targetDirectory, SSH_PRIV_KEY_PATH); - cloneCommand.setTransportConfigCallback(getTransport(SSH_PRIV_KEY_PATH)); - } - return cloneCommand.call(); - } catch (InvalidRemoteException e) { - log.error("remote repository server '{}' is not available", repo, e); - throw e; - } catch (GitAPIException e) { - log.error("Cannot clone repository: {}", repo, e); - throw e; - } catch (IOException e) { - log.error("Cannot set ssh transport: {}", repo, e); - throw e; - } - } - - @Override - public void createBranch(Git repo, String branch) throws GitAPIException { - log.info("Creating branch {} in repo {}", branch, repo.toString()); - repo.branchCreate().setName(branch).call(); - repo.checkout().setName(branch).call(); - } - - @Override - public boolean branchExists(Git repo, String branch) throws GitAPIException { - return repo.branchList() - .setListMode(ListBranchCommand.ListMode.ALL) - .call() - .stream() - .map(ref -> ref.getName()) - .anyMatch(branchName -> branchName.contains(branch)); - } - - @Override - public void commit(Git repo, String commitMessage, String filePattern) throws GitAPIException { - log.info("Committing files matching the pattern '{}' with message '{}' to repo {}", filePattern, commitMessage, repo); - repo.add().setUpdate(true).addFilepattern(filePattern).call(); - repo.add().addFilepattern(filePattern).call(); - CommitCommand commit = repo.commit().setMessage(commitMessage); - commit.setSign(Boolean.FALSE); - commit.call(); - } - - @Override - public void push(Git repo, String token) throws GitAPIException, IOException { - log.info("Pushing to repo {}", repo); - PushCommand pushCommand = repo.push().setForce(false).setRemote("origin"); - if (token != null && !token.isBlank()) { - log.info("Push using token"); - CredentialsProvider credentialsProvider = new UsernamePasswordCredentialsProvider(token, ""); - pushCommand.setCredentialsProvider(credentialsProvider); - } else { - log.info("Push using ssh key {}", SSH_PRIV_KEY_PATH); - pushCommand.setTransportConfigCallback(getTransport(SSH_PRIV_KEY_PATH)); - } - pushCommand.call(); - } - - public static TransportConfigCallback getTransport(Path sshKeyPath) throws IOException { - if (!sshKeyPath.toFile().exists()) { - throw new IOException("SSH key file at '%s' does not exists".formatted(sshKeyPath.toString())); - } - - var sshSessionFactory = new JschConfigSessionFactory() { - @Override - protected void configure(OpenSshConfig.Host host, Session session) { - session.setConfig("StrictHostKeyChecking", "no"); - session.setConfig("PreferredAuthentications", "publickey"); - } - - @Override - protected JSch createDefaultJSch(FS fs) throws JSchException { - JSch defaultJSch = super.createDefaultJSch(fs); - defaultJSch.removeAllIdentity(); - defaultJSch.addIdentity(sshKeyPath.toString()); - return defaultJSch; - } - }; - return new TransportConfigCallback() { - @Override - public void configure(Transport transport) { - SshTransport sshTransport = (SshTransport) transport; - sshTransport.setSshSessionFactory(sshSessionFactory); - - } - }; - } -} diff --git a/m2k/m2k-func/src/main/java/dev/parodos/service/Move2KubeService.java b/m2k/m2k-func/src/main/java/dev/parodos/service/Move2KubeService.java deleted file mode 100644 index a1b74c0..0000000 --- a/m2k/m2k-func/src/main/java/dev/parodos/service/Move2KubeService.java +++ /dev/null @@ -1,11 +0,0 @@ -package dev.parodos.service; - -import dev.parodos.move2kube.ApiException; - -import java.io.IOException; -import java.nio.file.Path; - -public interface Move2KubeService { - - public Path getTransformationOutput(String workspaceId, String projectId, String transformationId) throws IllegalArgumentException, IOException, ApiException; -} diff --git a/m2k/m2k-func/src/main/java/dev/parodos/service/Move2KubeServiceImpl.java b/m2k/m2k-func/src/main/java/dev/parodos/service/Move2KubeServiceImpl.java deleted file mode 100644 index 48349d1..0000000 --- a/m2k/m2k-func/src/main/java/dev/parodos/service/Move2KubeServiceImpl.java +++ /dev/null @@ -1,97 +0,0 @@ -package dev.parodos.service; - -import dev.parodos.move2kube.ApiClient; -import dev.parodos.move2kube.ApiException; -import dev.parodos.move2kube.api.PlanApi; -import dev.parodos.move2kube.api.ProjectInputsApi; -import dev.parodos.move2kube.api.ProjectOutputsApi; -import dev.parodos.move2kube.api.ProjectsApi; -import dev.parodos.move2kube.client.model.Project; -import dev.parodos.move2kube.client.model.ProjectOutputsValue; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.apache.commons.compress.archivers.zip.ZipArchiveEntry; -import org.apache.commons.compress.archivers.zip.ZipFile; -import org.eclipse.microprofile.config.inject.ConfigProperty; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.file.Path; -import java.util.Collections; - -@ApplicationScoped -public class Move2KubeServiceImpl implements Move2KubeService { - private static final Logger log = LoggerFactory.getLogger(Move2KubeServiceImpl.class); - - @Inject - FolderCreatorService folderCreatorService; - - @ConfigProperty(name = "move2kube.api") - String move2kubeApi; - - @Override - public Path getTransformationOutput(String workspaceId, String projectId, String transformationId) throws IllegalArgumentException, IOException, ApiException { - Path outputPath = folderCreatorService.createMove2KubeTransformationFolder(String.format("move2kube-transform-%s", transformationId)); - ApiClient client = new ApiClient(); - client.setBasePath(move2kubeApi); - ProjectOutputsApi output = new ProjectOutputsApi(client); - - waitForTransformationToBeDone(workspaceId, projectId, transformationId, client); - - log.info("Retrieving transformation {} output (Workspace: {}, project: {}", workspaceId, projectId, transformationId); - File file = output.getProjectOutput(workspaceId, projectId, transformationId); - if (file == null) { - log.error("Cannot get output file from transformation {} (Workspace: {}, project: {}", transformationId, workspaceId, projectId); - throw new FileNotFoundException(String.format("Cannot get output file from transformation %s (Workspace: %s, project: %s", transformationId, workspaceId, projectId)); - } - - log.info("Extracting {} to {}", file.getAbsolutePath(), outputPath); - extractZipFile(file, outputPath); - - return outputPath; - } - - private void waitForTransformationToBeDone(String workspaceId, String projectId, String transformationId, ApiClient client) throws ApiException { - log.info("Waiting for transformation {} to be done", transformationId); - ProjectsApi project = new ProjectsApi(client); - Project res ; - ProjectOutputsValue o; - do { - res = project.getProject(workspaceId, projectId); - o = res.getOutputs().get(transformationId); - if (o == null) { - log.error("Output is null for transformation {}", transformationId); - throw new IllegalArgumentException(String.format("Cannot get the project (%s) transformation (%s) output from the list", projectId, transformationId)); - } - log.info("Status of transformation Id {} is: {}", transformationId, o.getStatus()); - try { - Thread.sleep(5000L); - } catch (InterruptedException ignored) { - - } - } while (!o.getStatus().equals("done")) ; - } - - public static void extractZipFile(File zipFile, Path extractPath) throws IOException { - try (ZipFile zip = new ZipFile(zipFile)) { - for (ZipArchiveEntry entry : Collections.list(zip.getEntries())) { - File entryFile = new File(extractPath.toString() + "/" + entry.getName()); - if (entry.isDirectory()) { - entryFile.mkdirs(); - continue; - } - File parentDir = entryFile.getParentFile(); - if (parentDir != null && !parentDir.exists()) { - parentDir.mkdirs(); - } - try (FileOutputStream fos = new FileOutputStream(entryFile)) { - zip.getInputStream(entry).transferTo(fos); - } - } - } - } -} diff --git a/m2k/m2k-func/src/main/resources/META-INF/resources/index.html b/m2k/m2k-func/src/main/resources/META-INF/resources/index.html deleted file mode 100644 index e2fda0e..0000000 --- a/m2k/m2k-func/src/main/resources/META-INF/resources/index.html +++ /dev/null @@ -1,283 +0,0 @@ - - - - - m2k-kfunc - 1.0.0-SNAPSHOT - - - -
-
-
- - - - - quarkus_logo_horizontal_rgb_1280px_reverse - - - - - - - - - - - - - - - - - - -
-
-
- -
-
-
-

You just made a Quarkus application.

-

This page is served by Quarkus.

- Visit the Dev UI -

This page: src/main/resources/META-INF/resources/index.html

-

App configuration: src/main/resources/application.properties

-

Static assets: src/main/resources/META-INF/resources/

-

Code: src/main/java

-

Generated starter code:

-
    -
  • - Funqy HTTP Start your Funqy functions using HTTP -
    @Path: /greet -
    Related guide -
  • - -
-
-
-

Selected extensions

-
    -
  • Funqy HTTP Binding (guide)
  • -
-
Documentation
-

Practical step-by-step guides to help you achieve a specific goal. Use them to help get your work - done.

-
Set up your IDE
-

Everyone has a favorite IDE they like to use to code. Learn how to configure yours to maximize your - Quarkus productivity.

-
-
-
- - diff --git a/m2k/m2k-func/src/main/resources/application.properties b/m2k/m2k-func/src/main/resources/application.properties deleted file mode 100644 index 6575c17..0000000 --- a/m2k/m2k-func/src/main/resources/application.properties +++ /dev/null @@ -1,18 +0,0 @@ -move2kube.api=${MOVE2KUBE_API:http://move2kube-svc.default.svc.cluster.local:8080/api/v1} -transformation-saved.event.name=transformation_saved -error.event.name=error -ssh-priv-key-path=${SSH_PRIV_KEY_PATH:/home/jboss/.ssh/id_rsa} -broker.url=${BROKER_URL:http://broker-ingress.knative-eventing.svc.cluster.local/m2k/default} -quarkus.rest-client.logging.scope=request-response -quarkus.rest-client.logging.body-limit=-1 -quarkus.log.category."org.jboss.resteasy.reactive.client.logging".level=DEBUG -# quarkus.log.level=DEBUG -# ref: https://quarkus.io/guides/funqy-knative-events -quarkus.funqy.knative-events.mapping.saveTransformation.trigger=save-transformation - -quarkus.funqy.export=${EXPORTED_FUNC} - - - - - diff --git a/m2k/m2k-func/src/test/java/dev/parodos/SaveTransformationFunctionIT.java b/m2k/m2k-func/src/test/java/dev/parodos/SaveTransformationFunctionIT.java deleted file mode 100644 index 9ab28d6..0000000 --- a/m2k/m2k-func/src/test/java/dev/parodos/SaveTransformationFunctionIT.java +++ /dev/null @@ -1,63 +0,0 @@ -package dev.parodos; - -import dev.parodos.move2kube.ApiException; -import io.quarkus.test.junit.QuarkusTest; -import io.quarkus.test.junit.QuarkusTestProfile; -import io.quarkus.test.junit.TestProfile; -import io.restassured.RestAssured; -import io.restassured.http.ContentType; -import org.eclipse.jgit.api.errors.GitAPIException; -import org.eclipse.microprofile.config.inject.ConfigProperty; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.util.Map; -import java.util.UUID; - -import static org.hamcrest.Matchers.containsString; - -@QuarkusTest -@TestProfile(SaveTransformationFunctionIT.OverridePropertiesTestProfile.class) -public class SaveTransformationFunctionIT { - @ConfigProperty(name = "transformation-saved.event.name") - private String transformationSavedEventName; - - public static class OverridePropertiesTestProfile implements QuarkusTestProfile { - - @Override - public Map getConfigOverrides() { - return Map.of( - "move2kube.api", "http://localhost:8080/api/v1" - ); - } - } - - @Test - @Disabled - // TODO: before each (or all?) create a workspoce and a project in move2kube local instance - public void testSaveTransformationOK() throws GitAPIException, IOException, ApiException { - UUID workflowCallerId = UUID.randomUUID(); - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"https://github.com/gabriel-farache/dotfiles\", " + - "\"branch\": \"m2k-test\"," + - " \"token\": \"\"," + - " \"workspaceId\": \"765a25fe-ab46-4aee-be7b-15d9b82da566\"," + - " \"projectId\": \"dd4a8dc9-bc61-4047-a9b0-88bf43306d55\"," + - " \"transformId\": \"b3350712-cac0-4a5c-a42d-355c5f9d1e5b\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", transformationSavedEventName) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(containsString("\"error\":null")); - } -} diff --git a/m2k/m2k-func/src/test/java/dev/parodos/SaveTransformationFunctionTest.java b/m2k/m2k-func/src/test/java/dev/parodos/SaveTransformationFunctionTest.java deleted file mode 100644 index a91d646..0000000 --- a/m2k/m2k-func/src/test/java/dev/parodos/SaveTransformationFunctionTest.java +++ /dev/null @@ -1,515 +0,0 @@ -package dev.parodos; - -import dev.parodos.move2kube.ApiException; -import dev.parodos.service.FolderCreatorService; -import dev.parodos.service.GitService; -import dev.parodos.service.Move2KubeService; -import dev.parodos.service.Move2KubeServiceImpl; -import io.quarkus.test.InjectMock; -import io.quarkus.test.junit.QuarkusTest; -import io.restassured.RestAssured; -import io.restassured.http.ContentType; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.filefilter.TrueFileFilter; -import org.eclipse.jgit.api.Git; -import org.eclipse.jgit.api.errors.GitAPIException; -import org.eclipse.jgit.api.errors.InvalidRemoteException; -import org.eclipse.microprofile.config.inject.ConfigProperty; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.UUID; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.not; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@QuarkusTest -public class SaveTransformationFunctionTest { - - @InjectMock - GitService gitServiceMock; - - @InjectMock - Move2KubeService move2KubeServiceMock; - - @InjectMock - FolderCreatorService folderCreatorService; - - @ConfigProperty(name = "transformation-saved.event.name") - private String transformationSavedEventName; - - Path transformOutputPath; - Path gitRepoLocalFolder; - - - public static final String TRANSFORMED_ZIP = "SaveTransformationFunctionTest/references/transformation_output.zip"; - - public static final String REFERENCE_FOLDER_ZIP = "SaveTransformationFunctionTest/references/expected_output.zip"; - - public static File REFERENCE_OUTPUT_UNZIP_PATH; - - ClassLoader classLoader = getClass().getClassLoader(); - - private Git git; - - @BeforeEach - public void setUp() throws GitAPIException, IOException { - File tmpDir = Files.createTempDirectory("gitRepoTest").toFile(); - git = Git.init().setDirectory(tmpDir).call(); - REFERENCE_OUTPUT_UNZIP_PATH = Files.createTempDirectory("refOutput").toFile(); - Move2KubeServiceImpl.extractZipFile(new File(classLoader.getResource(REFERENCE_FOLDER_ZIP).getFile()), REFERENCE_OUTPUT_UNZIP_PATH.toPath()); - } - - @AfterEach - public void tearDown() throws IOException { - git.getRepository().close(); - if (transformOutputPath != null) { - FileUtils.deleteDirectory(transformOutputPath.toFile()); - } - if (gitRepoLocalFolder != null) { - FileUtils.deleteDirectory(gitRepoLocalFolder.toFile()); - } - if (REFERENCE_OUTPUT_UNZIP_PATH != null) { - FileUtils.deleteDirectory(REFERENCE_OUTPUT_UNZIP_PATH); - } - transformOutputPath = null; - gitRepoLocalFolder = null; - REFERENCE_OUTPUT_UNZIP_PATH = null; - } - - @Test - public void testSaveTransformationIsWorkingWithToken() throws GitAPIException, IOException, ApiException, URISyntaxException { - UUID workflowCallerId = UUID.randomUUID(); - UUID transformId = UUID.randomUUID(); - transformOutputPath = Files.createTempDirectory(String.format("move2kube-transform-TEST-%s", transformId)); - gitRepoLocalFolder = Files.createTempDirectory(String.format("local-git-transform-TEST-%s", transformId)); - - URL transformedZip = classLoader.getResource(TRANSFORMED_ZIP); - Move2KubeServiceImpl.extractZipFile(new File(transformedZip.getFile()), transformOutputPath); - - when(folderCreatorService.createGitRepositoryLocalFolder(eq("gitRepo"), anyString())).thenReturn(gitRepoLocalFolder); - when(move2KubeServiceMock.getTransformationOutput(anyString(), anyString(), anyString())).thenReturn(transformOutputPath); - when(gitServiceMock.cloneRepo(anyString(), anyString(), anyString(), any())).thenReturn(git); - when(gitServiceMock.branchExists(any(), anyString())).thenReturn(false); - doNothing().when(gitServiceMock).createBranch(eq(git), anyString()); - doNothing().when(gitServiceMock).commit(eq(git), anyString(), anyString()); - doNothing().when(gitServiceMock).createBranch(eq(git), anyString()); - doNothing().when(gitServiceMock).push(eq(git), anyString()); - - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"gitRepo\", " + - "\"branch\": \"branch\"," + - " \"token\": \"token\"," + - " \"workspaceId\": \"workspaceId\"," + - " \"projectId\": \"projectId\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"," + - " \"transformId\": \"" + transformId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", transformationSavedEventName) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(containsString("\"error\":null")); - - verify(move2KubeServiceMock, times(1)).getTransformationOutput(anyString(), anyString(), anyString()); - verify(gitServiceMock, times(1)).cloneRepo(anyString(), anyString(), anyString(), any()); - verify(gitServiceMock, times(1)).createBranch(eq(git), anyString()); - verify(gitServiceMock, times(1)).branchExists(any(), anyString()); - verify(gitServiceMock, times(1)).commit(eq(git), anyString(), anyString()); - verify(gitServiceMock, times(1)).push(eq(git), anyString()); - - AssertFileMovedToGitLocalFolder(REFERENCE_OUTPUT_UNZIP_PATH.toPath()); - } - - @Test - public void testSaveTransformationIsWorkingWithoutToken() throws GitAPIException, IOException, ApiException, URISyntaxException { - UUID workflowCallerId = UUID.randomUUID(); - UUID transformId = UUID.randomUUID(); - transformOutputPath = Files.createTempDirectory(String.format("move2kube-transform-TEST-%s", transformId)); - gitRepoLocalFolder = Files.createTempDirectory(String.format("local-git-transform-TEST-%s", transformId)); - - URL transformedZip = classLoader.getResource(TRANSFORMED_ZIP); - Move2KubeServiceImpl.extractZipFile(new File(transformedZip.getFile()), transformOutputPath); - - when(folderCreatorService.createGitRepositoryLocalFolder(eq("gitRepo"), anyString())).thenReturn(gitRepoLocalFolder); - when(move2KubeServiceMock.getTransformationOutput(anyString(), anyString(), anyString())).thenReturn(transformOutputPath); - when(gitServiceMock.cloneRepo(anyString(), anyString(), eq(null), any())).thenReturn(git); - when(gitServiceMock.branchExists(any(), anyString())).thenReturn(false); - doNothing().when(gitServiceMock).createBranch(eq(git), anyString()); - doNothing().when(gitServiceMock).commit(eq(git), anyString(), anyString()); - doNothing().when(gitServiceMock).createBranch(eq(git), anyString()); - doNothing().when(gitServiceMock).push(eq(git), eq(null)); - - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"gitRepo\", " + - "\"branch\": \"branch\"," + - " \"workspaceId\": \"workspaceId\"," + - " \"projectId\": \"projectId\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"," + - " \"transformId\": \"" + transformId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", transformationSavedEventName) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(containsString("\"error\":null")); - - verify(move2KubeServiceMock, times(1)).getTransformationOutput(anyString(), anyString(), anyString()); - verify(gitServiceMock, times(1)).cloneRepo(anyString(), anyString(), eq(null), any()); - verify(gitServiceMock, times(1)).createBranch(eq(git), anyString()); - verify(gitServiceMock, times(1)).branchExists(any(), anyString()); - verify(gitServiceMock, times(1)).commit(eq(git), anyString(), anyString()); - verify(gitServiceMock, times(1)).push(eq(git), eq(null)); - - AssertFileMovedToGitLocalFolder(REFERENCE_OUTPUT_UNZIP_PATH.toPath()); - } - - @Test - public void testSaveTransformationIsFailingWhenRetrievingTransformationOutput() throws IOException, ApiException, GitAPIException { - UUID workflowCallerId = UUID.randomUUID(); - UUID transformId = UUID.randomUUID(); - transformOutputPath = Files.createTempDirectory(String.format("move2kube-transform-TEST-%s", transformId)); - gitRepoLocalFolder = Files.createTempDirectory(String.format("local-git-transform-TEST-%s", transformId)); - URL transformedZip = classLoader.getResource(TRANSFORMED_ZIP); - Move2KubeServiceImpl.extractZipFile(new File(transformedZip.getFile()), transformOutputPath); - - when(folderCreatorService.createGitRepositoryLocalFolder(eq("gitRepo"), anyString())).thenReturn(gitRepoLocalFolder); - when(move2KubeServiceMock.getTransformationOutput(anyString(), anyString(), anyString())).thenThrow(new IOException("Error while retrieving transformation output")); - - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"gitRepo\", " + - "\"branch\": \"branch\"," + - " \"token\": \"token\"," + - " \"workspaceId\": \"workspaceId\"," + - " \"projectId\": \"projectId\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"," + - " \"transformId\": \"" + transformId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", EventGenerator.ERROR_EVENT) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(not(containsString("\"error\":null"))); - - verify(move2KubeServiceMock, times(1)).getTransformationOutput(anyString(), anyString(), anyString()); - verify(gitServiceMock, times(0)).cloneRepo(anyString(), anyString(), anyString(), any()); - verify(gitServiceMock, times(0)).createBranch(eq(git), anyString()); - verify(gitServiceMock, times(0)).branchExists(any(), anyString()); - verify(gitServiceMock, times(0)).commit(eq(git), anyString(), anyString()); - verify(gitServiceMock, times(0)).push(eq(git), anyString()); - } - - @Test - public void testSaveTransformationGitCloneFail() throws GitAPIException, IOException, ApiException { - UUID workflowCallerId = UUID.randomUUID(); - UUID transformId = UUID.randomUUID(); - transformOutputPath = Files.createTempDirectory(String.format("move2kube-transform-TEST-%s", transformId)); - gitRepoLocalFolder = Files.createTempDirectory(String.format("local-git-transform-TEST-%s", transformId)); - URL transformedZip = classLoader.getResource(TRANSFORMED_ZIP); - Move2KubeServiceImpl.extractZipFile(new File(transformedZip.getFile()), transformOutputPath); - - when(folderCreatorService.createGitRepositoryLocalFolder(eq("gitRepo"), anyString())).thenReturn(gitRepoLocalFolder); - when(move2KubeServiceMock.getTransformationOutput(anyString(), anyString(), anyString())).thenReturn(transformOutputPath); - when(gitServiceMock.cloneRepo(anyString(), anyString(), anyString(), any())).thenThrow(new InvalidRemoteException("Error while cloning repo")); - - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"gitRepo\", " + - "\"branch\": \"branch\"," + - " \"token\": \"token\"," + - " \"workspaceId\": \"workspaceId\"," + - " \"projectId\": \"projectId\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"," + - " \"transformId\": \"" + transformId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", EventGenerator.ERROR_EVENT) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(not(containsString("\"error\":null"))); - - verify(move2KubeServiceMock, times(1)).getTransformationOutput(anyString(), anyString(), anyString()); - verify(gitServiceMock, times(1)).cloneRepo(anyString(), anyString(), anyString(), any()); - verify(gitServiceMock, times(0)).branchExists(any(), anyString()); - verify(gitServiceMock, times(0)).createBranch(eq(git), anyString()); - verify(gitServiceMock, times(0)).commit(eq(git), anyString(), anyString()); - verify(gitServiceMock, times(0)).push(eq(git), anyString()); - } - - @Test - public void testSaveTransformationBranchExists() throws GitAPIException, IOException, ApiException, URISyntaxException { - UUID workflowCallerId = UUID.randomUUID(); - UUID transformId = UUID.randomUUID(); - transformOutputPath = Files.createTempDirectory(String.format("move2kube-transform-TEST-%s", transformId)); - gitRepoLocalFolder = Files.createTempDirectory(String.format("local-git-transform-TEST-%s", transformId)); - URL transformedZip = classLoader.getResource(TRANSFORMED_ZIP); - Move2KubeServiceImpl.extractZipFile(new File(transformedZip.getFile()), transformOutputPath); - - when(folderCreatorService.createGitRepositoryLocalFolder(eq("gitRepo"), anyString())).thenReturn(gitRepoLocalFolder); - when(move2KubeServiceMock.getTransformationOutput(anyString(), anyString(), anyString())).thenReturn(transformOutputPath); - when(gitServiceMock.cloneRepo(anyString(), anyString(), anyString(), any())).thenReturn(git); - when(gitServiceMock.branchExists(any(), anyString())).thenReturn(true); - - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"gitRepo\", " + - "\"branch\": \"branch\"," + - " \"token\": \"token\"," + - " \"workspaceId\": \"workspaceId\"," + - " \"projectId\": \"projectId\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"," + - " \"transformId\": \"" + transformId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", EventGenerator.ERROR_EVENT) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(not(containsString("\"error\":null"))); - - verify(move2KubeServiceMock, times(1)).getTransformationOutput(anyString(), anyString(), anyString()); - verify(gitServiceMock, times(1)).cloneRepo(anyString(), anyString(), anyString(), any()); - verify(gitServiceMock, times(1)).branchExists(any(), anyString()); - verify(gitServiceMock, times(0)).createBranch(eq(git), anyString()); - verify(gitServiceMock, times(0)).commit(eq(git), anyString(), anyString()); - verify(gitServiceMock, times(0)).push(eq(git), anyString()); - } - - @Test - public void testSaveTransformationCreateBranchFail() throws GitAPIException, IOException, ApiException, URISyntaxException { - UUID workflowCallerId = UUID.randomUUID(); - UUID transformId = UUID.randomUUID(); - transformOutputPath = Files.createTempDirectory(String.format("move2kube-transform-TEST-%s", transformId)); - gitRepoLocalFolder = Files.createTempDirectory(String.format("local-git-transform-TEST-%s", transformId)); - URL transformedZip = classLoader.getResource(TRANSFORMED_ZIP); - Move2KubeServiceImpl.extractZipFile(new File(transformedZip.getFile()), transformOutputPath); - - when(folderCreatorService.createGitRepositoryLocalFolder(eq("gitRepo"), anyString())).thenReturn(gitRepoLocalFolder); - when(move2KubeServiceMock.getTransformationOutput(anyString(), anyString(), anyString())).thenReturn(transformOutputPath); - when(gitServiceMock.cloneRepo(anyString(), anyString(), anyString(), any())).thenReturn(git); - when(gitServiceMock.branchExists(any(), anyString())).thenReturn(false); - doThrow(new InvalidRemoteException("Error while creating new branch")).when(gitServiceMock).createBranch(eq(git), anyString()); - - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"gitRepo\", " + - "\"branch\": \"branch\"," + - " \"token\": \"token\"," + - " \"workspaceId\": \"workspaceId\"," + - " \"projectId\": \"projectId\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"," + - " \"transformId\": \"" + transformId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", EventGenerator.ERROR_EVENT) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(not(containsString("\"error\":null"))); - - verify(move2KubeServiceMock, times(1)).getTransformationOutput(anyString(), anyString(), anyString()); - verify(gitServiceMock, times(1)).cloneRepo(anyString(), anyString(), anyString(), any()); - verify(gitServiceMock, times(1)).branchExists(any(), anyString()); - verify(gitServiceMock, times(1)).createBranch(eq(git), anyString()); - verify(gitServiceMock, times(0)).commit(eq(git), anyString(), anyString()); - verify(gitServiceMock, times(0)).push(eq(git), anyString()); - } - - @Test - public void testSaveTransformationCommitChangesFail() throws GitAPIException, IOException, ApiException, URISyntaxException { - UUID workflowCallerId = UUID.randomUUID(); - UUID transformId = UUID.randomUUID(); - transformOutputPath = Files.createTempDirectory(String.format("move2kube-transform-TEST-%s", transformId)); - gitRepoLocalFolder = Files.createTempDirectory(String.format("local-git-transform-TEST-%s", transformId)); - URL transformedZip = classLoader.getResource(TRANSFORMED_ZIP); - Move2KubeServiceImpl.extractZipFile(new File(transformedZip.getFile()), transformOutputPath); - - when(folderCreatorService.createGitRepositoryLocalFolder(eq("gitRepo"), anyString())).thenReturn(gitRepoLocalFolder); - when(move2KubeServiceMock.getTransformationOutput(anyString(), anyString(), anyString())).thenReturn(transformOutputPath); - when(gitServiceMock.cloneRepo(anyString(), anyString(), anyString(), any())).thenReturn(git); - when(gitServiceMock.branchExists(any(), anyString())).thenReturn(false); - doNothing().when(gitServiceMock).createBranch(eq(git), anyString()); - doThrow(new InvalidRemoteException("Error while committing changes")).when(gitServiceMock).commit(eq(git), anyString(), anyString()); - - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"gitRepo\", " + - "\"branch\": \"branch\"," + - " \"token\": \"token\"," + - " \"workspaceId\": \"workspaceId\"," + - " \"projectId\": \"projectId\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"," + - " \"transformId\": \"" + transformId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", EventGenerator.ERROR_EVENT) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(not(containsString("\"error\":null"))); - - verify(move2KubeServiceMock, times(1)).getTransformationOutput(anyString(), anyString(), anyString()); - verify(gitServiceMock, times(1)).cloneRepo(anyString(), anyString(), anyString(), any()); - verify(gitServiceMock, times(1)).branchExists(any(), anyString()); - verify(gitServiceMock, times(1)).createBranch(eq(git), anyString()); - verify(gitServiceMock, times(1)).commit(eq(git), anyString(), anyString()); - verify(gitServiceMock, times(0)).push(eq(git), anyString()); - - AssertFileMovedToGitLocalFolder(REFERENCE_OUTPUT_UNZIP_PATH.toPath()); - } - - @Test - public void testSaveTransformationGitPushFails() throws GitAPIException, IOException, ApiException, URISyntaxException { - UUID workflowCallerId = UUID.randomUUID(); - UUID transformId = UUID.randomUUID(); - transformOutputPath = Files.createTempDirectory(String.format("move2kube-transform-TEST-%s", transformId)); - gitRepoLocalFolder = Files.createTempDirectory(String.format("local-git-transform-TEST-%s", transformId)); - URL transformedZip = classLoader.getResource(TRANSFORMED_ZIP); - Move2KubeServiceImpl.extractZipFile(new File(transformedZip.getFile()), transformOutputPath); - - when(folderCreatorService.createGitRepositoryLocalFolder(eq("gitRepo"), anyString())).thenReturn(gitRepoLocalFolder); - when(move2KubeServiceMock.getTransformationOutput(anyString(), anyString(), anyString())).thenReturn(transformOutputPath); - when(gitServiceMock.cloneRepo(anyString(), anyString(), anyString(), any())).thenReturn(git); - doNothing().when(gitServiceMock).commit(eq(git), anyString(), anyString()); - doNothing().when(gitServiceMock).createBranch(eq(git), anyString()); - doThrow(new InvalidRemoteException("Error while pushing to remote")).when(gitServiceMock).push(eq(git), anyString()); - - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"gitRepo\", " + - "\"branch\": \"branch\"," + - " \"token\": \"token\"," + - " \"workspaceId\": \"workspaceId\"," + - " \"projectId\": \"projectId\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"," + - " \"transformId\": \"" + transformId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", EventGenerator.ERROR_EVENT) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(not(containsString("\"error\":null"))); - - verify(move2KubeServiceMock, times(1)).getTransformationOutput(anyString(), anyString(), anyString()); - verify(gitServiceMock, times(1)).cloneRepo(anyString(), anyString(), anyString(), any()); - verify(gitServiceMock, times(1)).branchExists(any(), anyString()); - verify(gitServiceMock, times(1)).createBranch(eq(git), anyString()); - verify(gitServiceMock, times(1)).commit(eq(git), anyString(), anyString()); - verify(gitServiceMock, times(1)).push(eq(git), anyString()); - - AssertFileMovedToGitLocalFolder(REFERENCE_OUTPUT_UNZIP_PATH.toPath()); - } - - @Test - public void testSaveTransformationMissingInput() throws GitAPIException, IOException, ApiException { - UUID workflowCallerId = UUID.randomUUID(); - RestAssured.given().contentType("application/json") - .header("ce-specversion", "1.0") - .header("ce-id", UUID.randomUUID().toString()) - .header("ce-type", "save-transformation") - .header("ce-source", "test") - .body("{\"gitRepo\": \"gitRepo\", " + - "\"branch\": \"branch\"," + - " \"token\": \"token\"," + - " \"projectId\": \"projectId\"," + - " \"workflowCallerId\": \"" + workflowCallerId + "\"" + - "}") - .post("/") - .then() - .statusCode(200) - .contentType(ContentType.JSON) - .header("ce-type", EventGenerator.ERROR_EVENT) - .header("ce-kogitoprocrefid", workflowCallerId.toString()) - .header("ce-source", SaveTransformationFunction.SOURCE) - .body(not(containsString("\"error\":null"))); - - verify(move2KubeServiceMock, times(0)).getTransformationOutput(anyString(), anyString(), anyString()); - verify(gitServiceMock, times(0)).cloneRepo(anyString(), anyString(), anyString(), any()); - verify(gitServiceMock, times(0)).branchExists(any(), anyString()); - verify(gitServiceMock, times(0)).createBranch(eq(git), anyString()); - verify(gitServiceMock, times(0)).commit(eq(git), anyString(), anyString()); - verify(gitServiceMock, times(0)).push(eq(git), anyString()); - } - - private void AssertFileMovedToGitLocalFolder(Path localOutputRef) { - Path refDeploy = Path.of(localOutputRef.toString(), "/deploy"); - Path actualDeploy = Path.of(gitRepoLocalFolder.toString(), "/deploy"); - Path refScripts = Path.of(localOutputRef.toString(), "/scripts"); - Path actualScripts = Path.of(gitRepoLocalFolder.toString(), "/scripts"); - Path refSource = Path.of(localOutputRef.toString(), "/source"); - Path actualSource = Path.of(gitRepoLocalFolder.toString(), "/source"); - AssertDirsEqual(refDeploy.toFile(), actualDeploy.toFile()); - AssertDirsEqual(refScripts.toFile(), actualScripts.toFile()); - AssertDirsEqual(refSource.toFile(), actualSource.toFile()); - } - - public static void AssertDirsEqual(File ref, File actual) { - Assertions.assertArrayEquals( - FileUtils.listFilesAndDirs(ref, TrueFileFilter.TRUE, TrueFileFilter.TRUE).stream().map(File::getName).sorted().toArray(), - FileUtils.listFilesAndDirs(actual, TrueFileFilter.TRUE, TrueFileFilter.TRUE).stream().map(File::getName).sorted().toArray()); - } -} diff --git a/m2k/m2k-func/src/test/resources/SaveTransformationFunctionTest/references/expected_output.zip b/m2k/m2k-func/src/test/resources/SaveTransformationFunctionTest/references/expected_output.zip deleted file mode 100644 index 7000262..0000000 Binary files a/m2k/m2k-func/src/test/resources/SaveTransformationFunctionTest/references/expected_output.zip and /dev/null differ diff --git a/m2k/m2k-func/src/test/resources/SaveTransformationFunctionTest/references/transformation_output.zip b/m2k/m2k-func/src/test/resources/SaveTransformationFunctionTest/references/transformation_output.zip deleted file mode 100644 index be45603..0000000 Binary files a/m2k/m2k-func/src/test/resources/SaveTransformationFunctionTest/references/transformation_output.zip and /dev/null differ diff --git a/m2k/pom.xml b/m2k/pom.xml deleted file mode 100644 index 5f54cd2..0000000 --- a/m2k/pom.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - 4.0.0 - dev.parodos - m2k - 1.0.0-SNAPSHOT - pom - - quarkus-bom - io.quarkus.platform - 3.4.1 - - - - - ${quarkus.platform.group-id} - ${quarkus.platform.artifact-id} - ${quarkus.platform.version} - pom - import - - - - - m2k-func - serverless-workflow-m2k - - diff --git a/m2k/sequence_diagram.jpg b/m2k/sequence_diagram.jpg deleted file mode 100644 index 7e08596..0000000 Binary files a/m2k/sequence_diagram.jpg and /dev/null differ diff --git a/m2k/serverless-workflow-m2k/.dockerignore b/m2k/serverless-workflow-m2k/.dockerignore deleted file mode 100644 index 94810d0..0000000 --- a/m2k/serverless-workflow-m2k/.dockerignore +++ /dev/null @@ -1,5 +0,0 @@ -* -!target/*-runner -!target/*-runner.jar -!target/lib/* -!target/quarkus-app/* \ No newline at end of file diff --git a/m2k/serverless-workflow-m2k/.gitignore b/m2k/serverless-workflow-m2k/.gitignore deleted file mode 100644 index f3e0985..0000000 --- a/m2k/serverless-workflow-m2k/.gitignore +++ /dev/null @@ -1,44 +0,0 @@ -#Maven -target/ -pom.xml.tag -pom.xml.releaseBackup -pom.xml.versionsBackup -release.properties -.flattened-pom.xml - -# Eclipse -.project -.classpath -.settings/ -bin/ - -# IntelliJ -.idea -*.ipr -*.iml -*.iws - -# NetBeans -nb-configuration.xml - -# Visual Studio Code -.vscode -.factorypath - -# OSX -.DS_Store - -# Vim -*.swp -*.swo - -# patch -*.orig -*.rej - -# Local environment -.env - -# Plugin directory -/.quarkus/cli/plugins/ -src/main/resources/specs/shared \ No newline at end of file diff --git a/m2k/serverless-workflow-m2k/.mvn/wrapper/.gitignore b/m2k/serverless-workflow-m2k/.mvn/wrapper/.gitignore deleted file mode 100644 index e72f5e8..0000000 --- a/m2k/serverless-workflow-m2k/.mvn/wrapper/.gitignore +++ /dev/null @@ -1 +0,0 @@ -maven-wrapper.jar diff --git a/m2k/serverless-workflow-m2k/.mvn/wrapper/MavenWrapperDownloader.java b/m2k/serverless-workflow-m2k/.mvn/wrapper/MavenWrapperDownloader.java deleted file mode 100644 index 84d1e60..0000000 --- a/m2k/serverless-workflow-m2k/.mvn/wrapper/MavenWrapperDownloader.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import java.io.IOException; -import java.io.InputStream; -import java.net.Authenticator; -import java.net.PasswordAuthentication; -import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; - -public final class MavenWrapperDownloader -{ - private static final String WRAPPER_VERSION = "3.2.0"; - - private static final boolean VERBOSE = Boolean.parseBoolean( System.getenv( "MVNW_VERBOSE" ) ); - - public static void main( String[] args ) - { - log( "Apache Maven Wrapper Downloader " + WRAPPER_VERSION ); - - if ( args.length != 2 ) - { - System.err.println( " - ERROR wrapperUrl or wrapperJarPath parameter missing" ); - System.exit( 1 ); - } - - try - { - log( " - Downloader started" ); - final URL wrapperUrl = new URL( args[0] ); - final String jarPath = args[1].replace( "..", "" ); // Sanitize path - final Path wrapperJarPath = Paths.get( jarPath ).toAbsolutePath().normalize(); - downloadFileFromURL( wrapperUrl, wrapperJarPath ); - log( "Done" ); - } - catch ( IOException e ) - { - System.err.println( "- Error downloading: " + e.getMessage() ); - if ( VERBOSE ) - { - e.printStackTrace(); - } - System.exit( 1 ); - } - } - - private static void downloadFileFromURL( URL wrapperUrl, Path wrapperJarPath ) - throws IOException - { - log( " - Downloading to: " + wrapperJarPath ); - if ( System.getenv( "MVNW_USERNAME" ) != null && System.getenv( "MVNW_PASSWORD" ) != null ) - { - final String username = System.getenv( "MVNW_USERNAME" ); - final char[] password = System.getenv( "MVNW_PASSWORD" ).toCharArray(); - Authenticator.setDefault( new Authenticator() - { - @Override - protected PasswordAuthentication getPasswordAuthentication() - { - return new PasswordAuthentication( username, password ); - } - } ); - } - try ( InputStream inStream = wrapperUrl.openStream() ) - { - Files.copy( inStream, wrapperJarPath, StandardCopyOption.REPLACE_EXISTING ); - } - log( " - Downloader complete" ); - } - - private static void log( String msg ) - { - if ( VERBOSE ) - { - System.out.println( msg ); - } - } - -} diff --git a/m2k/serverless-workflow-m2k/.mvn/wrapper/maven-wrapper.properties b/m2k/serverless-workflow-m2k/.mvn/wrapper/maven-wrapper.properties deleted file mode 100644 index 6d3a566..0000000 --- a/m2k/serverless-workflow-m2k/.mvn/wrapper/maven-wrapper.properties +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.3/apache-maven-3.9.3-bin.zip -wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar diff --git a/m2k/serverless-workflow-m2k/README.md b/m2k/serverless-workflow-m2k/README.md deleted file mode 100644 index b13277f..0000000 --- a/m2k/serverless-workflow-m2k/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# serverless-workflow-m2k - -This project uses Quarkus, the Supersonic Subatomic Java Framework. - -If you want to learn more about Quarkus, please visit its website: https://quarkus.io/ . - -## Running the application in dev mode - -You can run your application in dev mode that enables live coding using: -```shell script -./mvnw compile quarkus:dev -``` - -> **_NOTE:_** Quarkus now ships with a Dev UI, which is available in dev mode only at http://localhost:8080/q/dev/. - -## Packaging and running the application - -The application can be packaged using: -```shell script -./mvnw package -``` -It produces the `quarkus-run.jar` file in the `target/quarkus-app/` directory. -Be aware that it’s not an _über-jar_ as the dependencies are copied into the `target/quarkus-app/lib/` directory. - -The application is now runnable using `java -jar target/quarkus-app/quarkus-run.jar`. - -If you want to build an _über-jar_, execute the following command: -```shell script -./mvnw package -Dquarkus.package.type=uber-jar -``` - -The application, packaged as an _über-jar_, is now runnable using `java -jar target/*-runner.jar`. - -## Creating a native executable - -You can create a native executable using: -```shell script -./mvnw package -Dnative -``` - -Or, if you don't have GraalVM installed, you can run the native executable build in a container using: -```shell script -./mvnw package -Dnative -Dquarkus.native.container-build=true -``` - -You can then execute your native executable with: `./target/serverless-workflow-m2k-1.0.0-SNAPSHOT-runner` - -If you want to learn more about building native executables, please consult https://quarkus.io/guides/maven-tooling. - -## Related Guides - -- Kogito - Serverless Workflow ([guide](https://quarkus.io/version/2.13/guides/kogito)): Add Kogito Serverless Workflows (SW) capabilities - Includes the Process engine capability -- SmallRye OpenAPI ([guide](https://quarkus.io/guides/openapi-swaggerui)): Document your REST APIs with OpenAPI - comes with Swagger UI diff --git a/m2k/serverless-workflow-m2k/mvnw b/m2k/serverless-workflow-m2k/mvnw deleted file mode 100755 index 8d937f4..0000000 --- a/m2k/serverless-workflow-m2k/mvnw +++ /dev/null @@ -1,308 +0,0 @@ -#!/bin/sh -# ---------------------------------------------------------------------------- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# ---------------------------------------------------------------------------- - -# ---------------------------------------------------------------------------- -# Apache Maven Wrapper startup batch script, version 3.2.0 -# -# Required ENV vars: -# ------------------ -# JAVA_HOME - location of a JDK home dir -# -# Optional ENV vars -# ----------------- -# MAVEN_OPTS - parameters passed to the Java VM when running Maven -# e.g. to debug Maven itself, use -# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -# MAVEN_SKIP_RC - flag to disable loading of mavenrc files -# ---------------------------------------------------------------------------- - -if [ -z "$MAVEN_SKIP_RC" ] ; then - - if [ -f /usr/local/etc/mavenrc ] ; then - . /usr/local/etc/mavenrc - fi - - if [ -f /etc/mavenrc ] ; then - . /etc/mavenrc - fi - - if [ -f "$HOME/.mavenrc" ] ; then - . "$HOME/.mavenrc" - fi - -fi - -# OS specific support. $var _must_ be set to either true or false. -cygwin=false; -darwin=false; -mingw=false -case "$(uname)" in - CYGWIN*) cygwin=true ;; - MINGW*) mingw=true;; - Darwin*) darwin=true - # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home - # See https://developer.apple.com/library/mac/qa/qa1170/_index.html - if [ -z "$JAVA_HOME" ]; then - if [ -x "/usr/libexec/java_home" ]; then - JAVA_HOME="$(/usr/libexec/java_home)"; export JAVA_HOME - else - JAVA_HOME="/Library/Java/Home"; export JAVA_HOME - fi - fi - ;; -esac - -if [ -z "$JAVA_HOME" ] ; then - if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=$(java-config --jre-home) - fi -fi - -# For Cygwin, ensure paths are in UNIX format before anything is touched -if $cygwin ; then - [ -n "$JAVA_HOME" ] && - JAVA_HOME=$(cygpath --unix "$JAVA_HOME") - [ -n "$CLASSPATH" ] && - CLASSPATH=$(cygpath --path --unix "$CLASSPATH") -fi - -# For Mingw, ensure paths are in UNIX format before anything is touched -if $mingw ; then - [ -n "$JAVA_HOME" ] && [ -d "$JAVA_HOME" ] && - JAVA_HOME="$(cd "$JAVA_HOME" || (echo "cannot cd into $JAVA_HOME."; exit 1); pwd)" -fi - -if [ -z "$JAVA_HOME" ]; then - javaExecutable="$(which javac)" - if [ -n "$javaExecutable" ] && ! [ "$(expr "\"$javaExecutable\"" : '\([^ ]*\)')" = "no" ]; then - # readlink(1) is not available as standard on Solaris 10. - readLink=$(which readlink) - if [ ! "$(expr "$readLink" : '\([^ ]*\)')" = "no" ]; then - if $darwin ; then - javaHome="$(dirname "\"$javaExecutable\"")" - javaExecutable="$(cd "\"$javaHome\"" && pwd -P)/javac" - else - javaExecutable="$(readlink -f "\"$javaExecutable\"")" - fi - javaHome="$(dirname "\"$javaExecutable\"")" - javaHome=$(expr "$javaHome" : '\(.*\)/bin') - JAVA_HOME="$javaHome" - export JAVA_HOME - fi - fi -fi - -if [ -z "$JAVACMD" ] ; then - if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - else - JAVACMD="$(\unset -f command 2>/dev/null; \command -v java)" - fi -fi - -if [ ! -x "$JAVACMD" ] ; then - echo "Error: JAVA_HOME is not defined correctly." >&2 - echo " We cannot execute $JAVACMD" >&2 - exit 1 -fi - -if [ -z "$JAVA_HOME" ] ; then - echo "Warning: JAVA_HOME environment variable is not set." -fi - -# traverses directory structure from process work directory to filesystem root -# first directory with .mvn subdirectory is considered project base directory -find_maven_basedir() { - if [ -z "$1" ] - then - echo "Path not specified to find_maven_basedir" - return 1 - fi - - basedir="$1" - wdir="$1" - while [ "$wdir" != '/' ] ; do - if [ -d "$wdir"/.mvn ] ; then - basedir=$wdir - break - fi - # workaround for JBEAP-8937 (on Solaris 10/Sparc) - if [ -d "${wdir}" ]; then - wdir=$(cd "$wdir/.." || exit 1; pwd) - fi - # end of workaround - done - printf '%s' "$(cd "$basedir" || exit 1; pwd)" -} - -# concatenates all lines of a file -concat_lines() { - if [ -f "$1" ]; then - # Remove \r in case we run on Windows within Git Bash - # and check out the repository with auto CRLF management - # enabled. Otherwise, we may read lines that are delimited with - # \r\n and produce $'-Xarg\r' rather than -Xarg due to word - # splitting rules. - tr -s '\r\n' ' ' < "$1" - fi -} - -log() { - if [ "$MVNW_VERBOSE" = true ]; then - printf '%s\n' "$1" - fi -} - -BASE_DIR=$(find_maven_basedir "$(dirname "$0")") -if [ -z "$BASE_DIR" ]; then - exit 1; -fi - -MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}; export MAVEN_PROJECTBASEDIR -log "$MAVEN_PROJECTBASEDIR" - -########################################################################################## -# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -# This allows using the maven wrapper in projects that prohibit checking in binary data. -########################################################################################## -wrapperJarPath="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" -if [ -r "$wrapperJarPath" ]; then - log "Found $wrapperJarPath" -else - log "Couldn't find $wrapperJarPath, downloading it ..." - - if [ -n "$MVNW_REPOURL" ]; then - wrapperUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" - else - wrapperUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" - fi - while IFS="=" read -r key value; do - # Remove '\r' from value to allow usage on windows as IFS does not consider '\r' as a separator ( considers space, tab, new line ('\n'), and custom '=' ) - safeValue=$(echo "$value" | tr -d '\r') - case "$key" in (wrapperUrl) wrapperUrl="$safeValue"; break ;; - esac - done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" - log "Downloading from: $wrapperUrl" - - if $cygwin; then - wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") - fi - - if command -v wget > /dev/null; then - log "Found wget ... using wget" - [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--quiet" - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - wget $QUIET "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" - else - wget $QUIET --http-user="$MVNW_USERNAME" --http-password="$MVNW_PASSWORD" "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" - fi - elif command -v curl > /dev/null; then - log "Found curl ... using curl" - [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--silent" - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - curl $QUIET -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" - else - curl $QUIET --user "$MVNW_USERNAME:$MVNW_PASSWORD" -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" - fi - else - log "Falling back to using Java to download" - javaSource="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.java" - javaClass="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.class" - # For Cygwin, switch paths to Windows format before running javac - if $cygwin; then - javaSource=$(cygpath --path --windows "$javaSource") - javaClass=$(cygpath --path --windows "$javaClass") - fi - if [ -e "$javaSource" ]; then - if [ ! -e "$javaClass" ]; then - log " - Compiling MavenWrapperDownloader.java ..." - ("$JAVA_HOME/bin/javac" "$javaSource") - fi - if [ -e "$javaClass" ]; then - log " - Running MavenWrapperDownloader.java ..." - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$wrapperUrl" "$wrapperJarPath") || rm -f "$wrapperJarPath" - fi - fi - fi -fi -########################################################################################## -# End of extension -########################################################################################## - -# If specified, validate the SHA-256 sum of the Maven wrapper jar file -wrapperSha256Sum="" -while IFS="=" read -r key value; do - case "$key" in (wrapperSha256Sum) wrapperSha256Sum=$value; break ;; - esac -done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" -if [ -n "$wrapperSha256Sum" ]; then - wrapperSha256Result=false - if command -v sha256sum > /dev/null; then - if echo "$wrapperSha256Sum $wrapperJarPath" | sha256sum -c > /dev/null 2>&1; then - wrapperSha256Result=true - fi - elif command -v shasum > /dev/null; then - if echo "$wrapperSha256Sum $wrapperJarPath" | shasum -a 256 -c > /dev/null 2>&1; then - wrapperSha256Result=true - fi - else - echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." - echo "Please install either command, or disable validation by removing 'wrapperSha256Sum' from your maven-wrapper.properties." - exit 1 - fi - if [ $wrapperSha256Result = false ]; then - echo "Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised." >&2 - echo "Investigate or delete $wrapperJarPath to attempt a clean download." >&2 - echo "If you updated your Maven version, you need to update the specified wrapperSha256Sum property." >&2 - exit 1 - fi -fi - -MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" - -# For Cygwin, switch paths to Windows format before running java -if $cygwin; then - [ -n "$JAVA_HOME" ] && - JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") - [ -n "$CLASSPATH" ] && - CLASSPATH=$(cygpath --path --windows "$CLASSPATH") - [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") -fi - -# Provide a "standardized" way to retrieve the CLI args that will -# work with both Windows and non-Windows executions. -MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $*" -export MAVEN_CMD_LINE_ARGS - -WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -# shellcheck disable=SC2086 # safe args -exec "$JAVACMD" \ - $MAVEN_OPTS \ - $MAVEN_DEBUG_OPTS \ - -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ - ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/m2k/serverless-workflow-m2k/mvnw.cmd b/m2k/serverless-workflow-m2k/mvnw.cmd deleted file mode 100644 index c4586b5..0000000 --- a/m2k/serverless-workflow-m2k/mvnw.cmd +++ /dev/null @@ -1,205 +0,0 @@ -@REM ---------------------------------------------------------------------------- -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM ---------------------------------------------------------------------------- - -@REM ---------------------------------------------------------------------------- -@REM Apache Maven Wrapper startup batch script, version 3.2.0 -@REM -@REM Required ENV vars: -@REM JAVA_HOME - location of a JDK home dir -@REM -@REM Optional ENV vars -@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending -@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven -@REM e.g. to debug Maven itself, use -@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files -@REM ---------------------------------------------------------------------------- - -@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' -@echo off -@REM set title of command window -title %0 -@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' -@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% - -@REM set %HOME% to equivalent of $HOME -if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") - -@REM Execute a user defined script before this one -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre -@REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* -if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* -:skipRcPre - -@setlocal - -set ERROR_CODE=0 - -@REM To isolate internal variables from possible post scripts, we use another setlocal -@setlocal - -@REM ==== START VALIDATION ==== -if not "%JAVA_HOME%" == "" goto OkJHome - -echo. -echo Error: JAVA_HOME not found in your environment. >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -:OkJHome -if exist "%JAVA_HOME%\bin\java.exe" goto init - -echo. -echo Error: JAVA_HOME is set to an invalid directory. >&2 -echo JAVA_HOME = "%JAVA_HOME%" >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -@REM ==== END VALIDATION ==== - -:init - -@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". -@REM Fallback to current working directory if not found. - -set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% -IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir - -set EXEC_DIR=%CD% -set WDIR=%EXEC_DIR% -:findBaseDir -IF EXIST "%WDIR%"\.mvn goto baseDirFound -cd .. -IF "%WDIR%"=="%CD%" goto baseDirNotFound -set WDIR=%CD% -goto findBaseDir - -:baseDirFound -set MAVEN_PROJECTBASEDIR=%WDIR% -cd "%EXEC_DIR%" -goto endDetectBaseDir - -:baseDirNotFound -set MAVEN_PROJECTBASEDIR=%EXEC_DIR% -cd "%EXEC_DIR%" - -:endDetectBaseDir - -IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig - -@setlocal EnableExtensions EnableDelayedExpansion -for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a -@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% - -:endReadAdditionalConfig - -SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" -set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" -set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -set WRAPPER_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" - -FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( - IF "%%A"=="wrapperUrl" SET WRAPPER_URL=%%B -) - -@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -@REM This allows using the maven wrapper in projects that prohibit checking in binary data. -if exist %WRAPPER_JAR% ( - if "%MVNW_VERBOSE%" == "true" ( - echo Found %WRAPPER_JAR% - ) -) else ( - if not "%MVNW_REPOURL%" == "" ( - SET WRAPPER_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" - ) - if "%MVNW_VERBOSE%" == "true" ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %WRAPPER_URL% - ) - - powershell -Command "&{"^ - "$webclient = new-object System.Net.WebClient;"^ - "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ - "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ - "}"^ - "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%WRAPPER_URL%', '%WRAPPER_JAR%')"^ - "}" - if "%MVNW_VERBOSE%" == "true" ( - echo Finished downloading %WRAPPER_JAR% - ) -) -@REM End of extension - -@REM If specified, validate the SHA-256 sum of the Maven wrapper jar file -SET WRAPPER_SHA_256_SUM="" -FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( - IF "%%A"=="wrapperSha256Sum" SET WRAPPER_SHA_256_SUM=%%B -) -IF NOT %WRAPPER_SHA_256_SUM%=="" ( - powershell -Command "&{"^ - "$hash = (Get-FileHash \"%WRAPPER_JAR%\" -Algorithm SHA256).Hash.ToLower();"^ - "If('%WRAPPER_SHA_256_SUM%' -ne $hash){"^ - " Write-Output 'Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised.';"^ - " Write-Output 'Investigate or delete %WRAPPER_JAR% to attempt a clean download.';"^ - " Write-Output 'If you updated your Maven version, you need to update the specified wrapperSha256Sum property.';"^ - " exit 1;"^ - "}"^ - "}" - if ERRORLEVEL 1 goto error -) - -@REM Provide a "standardized" way to retrieve the CLI args that will -@REM work with both Windows and non-Windows executions. -set MAVEN_CMD_LINE_ARGS=%* - -%MAVEN_JAVA_EXE% ^ - %JVM_CONFIG_MAVEN_PROPS% ^ - %MAVEN_OPTS% ^ - %MAVEN_DEBUG_OPTS% ^ - -classpath %WRAPPER_JAR% ^ - "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ - %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* -if ERRORLEVEL 1 goto error -goto end - -:error -set ERROR_CODE=1 - -:end -@endlocal & set ERROR_CODE=%ERROR_CODE% - -if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost -@REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" -if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" -:skipRcPost - -@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%"=="on" pause - -if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% - -cmd /C exit /B %ERROR_CODE% diff --git a/m2k/serverless-workflow-m2k/pom.xml b/m2k/serverless-workflow-m2k/pom.xml deleted file mode 100644 index 8cc71f2..0000000 --- a/m2k/serverless-workflow-m2k/pom.xml +++ /dev/null @@ -1,222 +0,0 @@ - - - 4.0.0 - serverless-workflow-m2k - - dev.parodos - swf-parent - 1.0.0-SNAPSHOT - ../../swf-parent - - - - - - io.quarkus - quarkus-arc - - - io.quarkus - quarkus-resteasy - - - io.quarkus - quarkus-resteasy-jackson - - - io.quarkus - quarkus-smallrye-openapi - - - io.quarkus - quarkus-smallrye-health - - - - - org.kie.kogito - kogito-quarkus-serverless-workflow - - - org.kie.kogito - kogito-addons-quarkus-kubernetes - - - org.kie.kogito - kogito-addons-quarkus-fabric8-kubernetes-service-catalog - - - org.kie.kogito - kogito-quarkus-serverless-workflow-devui - - - org.kie.kogito - kogito-addons-quarkus-source-files - - - io.quarkiverse.openapi.generator - quarkus-openapi-generator - 2.2.15 - - - org.kie.kogito - kogito-addons-quarkus-knative-serving - - - - org.kie.kogito - kogito-addons-quarkus-persistence-jdbc - - - io.quarkus - quarkus-jdbc-postgresql - - - io.quarkus - quarkus-agroal - - - - - io.quarkus - quarkus-junit5 - test - - - io.rest-assured - rest-assured - test - - - org.awaitility - awaitility - test - - - - - - maven-clean-plugin - 2.5 - - - auto-clean - initialize - - clean - - - true - - - src/main/resources/specs/shared - - - - - - - - maven-resources-plugin - 3.3.1 - - - copy-resources-shared-specs - - generate-resources - - copy-resources - - - src/main/resources/specs/shared - - - ../../shared_specs - - mailtrap.yaml - - - - - - - - - - - - native - - - native - - - - false - native - - - - knative - - true - - m2k - false - - ${namespace} - ${deploy} - knative - - - - org.kie.kogito - kogito-addons-quarkus-events-process - - - org.kie.kogito - kogito-addons-quarkus-knative-eventing - - - org.kie.kogito - kogito-addons-quarkus-jobs-management - - - org.kie.kogito - kogito-addons-quarkus-source-files - - - org.kie.kogito - kogito-quarkus-serverless-workflow-devui - - - io.quarkus - quarkus-kubernetes - - - io.quarkus - quarkus-container-image-jib - - - - - container - - - container - - - - true - container - - - - io.quarkus - quarkus-container-image-jib - - - - - diff --git a/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.jvm b/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.jvm deleted file mode 100644 index 1c74acf..0000000 --- a/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.jvm +++ /dev/null @@ -1,97 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/serverless-workflow-m2k-jvm . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/serverless-workflow-m2k-jvm -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. -# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 -# when running the container -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/serverless-workflow-m2k-jvm -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-17:1.16 - -ENV LANGUAGE='en_US:en' - - -# We make four distinct layers so if there are application changes the library layers can be re-used -COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ -COPY --chown=185 target/quarkus-app/*.jar /deployments/ -COPY --chown=185 target/quarkus-app/app/ /deployments/app/ -COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - -ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] - diff --git a/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.legacy-jar b/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.legacy-jar deleted file mode 100644 index 79f72dd..0000000 --- a/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.legacy-jar +++ /dev/null @@ -1,93 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -Dquarkus.package.type=legacy-jar -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/serverless-workflow-m2k-legacy-jar . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/serverless-workflow-m2k-legacy-jar -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. -# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 -# when running the container -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/serverless-workflow-m2k-legacy-jar -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-17:1.16 - -ENV LANGUAGE='en_US:en' - - -COPY target/lib/* /deployments/lib/ -COPY target/*-runner.jar /deployments/quarkus-run.jar - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - -ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] diff --git a/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.native b/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.native deleted file mode 100644 index d01ef82..0000000 --- a/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.native +++ /dev/null @@ -1,27 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# -# Before building the container image run: -# -# ./mvnw package -Dnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native -t quarkus/serverless-workflow-m2k . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/serverless-workflow-m2k -# -### -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.native-micro b/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.native-micro deleted file mode 100644 index 86e00be..0000000 --- a/m2k/serverless-workflow-m2k/src/main/docker/Dockerfile.native-micro +++ /dev/null @@ -1,30 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# It uses a micro base image, tuned for Quarkus native executables. -# It reduces the size of the resulting container image. -# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. -# -# Before building the container image run: -# -# ./mvnw package -Dnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/serverless-workflow-m2k . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/serverless-workflow-m2k -# -### -FROM quay.io/quarkus/quarkus-micro-image:2.0 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/m2k/serverless-workflow-m2k/src/main/resources/application-knative.properties b/m2k/serverless-workflow-m2k/src/main/resources/application-knative.properties deleted file mode 100644 index 6524b78..0000000 --- a/m2k/serverless-workflow-m2k/src/main/resources/application-knative.properties +++ /dev/null @@ -1,23 +0,0 @@ -# Use the Kogito service discovery mechanism to get the current service url. -kogito.service.url=http://serverless-workflow-m2k.m2k.svc.cluster.local -# Skip user tasks and variables events sending. -kogito.events.usertasks.enabled=false -kogito.events.variables.enabled=false - - -# This enables Knative to fetch the image information on Minikube. -# You can change this property with -Pknative -Dquarkus.container-image.group from the command line. -quarkus.container-image.build=true -#quarkus.container-image.registry=dev.local -quarkus.container-image.group=orchestrator -quarkus.container-image.name=serverless-workflow-m2k -quarkus.container-image.tag=2.0.0-SNAPSHOT -quarkus.kubernetes.deployment-target=knative -quarkus.knative.image-pull-policy=always - -#Optional push to a registry -quarkus.container-image.registry=quay.io -quarkus.container-image.push=true - -quarkus.knative.min-scale=0 -quarkus.knative.max-scale=1 \ No newline at end of file diff --git a/m2k/serverless-workflow-m2k/src/main/resources/application.properties b/m2k/serverless-workflow-m2k/src/main/resources/application.properties deleted file mode 100644 index 4f037a6..0000000 --- a/m2k/serverless-workflow-m2k/src/main/resources/application.properties +++ /dev/null @@ -1,41 +0,0 @@ -quarkus.http.port=8080 - -kogito.service.url=http://localhost:${quarkus.http.port} - -quarkus.kogito.devservices.enabled=false -quarkus.devservices.enabled=false - -quarkus.swagger-ui.always-include=true -quarkus.kogito.data-index.graphql.ui.always-include=true - - -# Kogito runtime persistence configurations -quarkus.datasource.jdbc.url=jdbc:postgresql://postgres-db-service.postgres:5432/sonataflow -quarkus.datasource.username=sonataflow -quarkus.datasource.password=sonataflow - -kogito.persistence.type=jdbc -kogito.persistence.proto.marshaller=false -kogito.persistence.query.timeout.millis=10000 -quarkus.datasource.db-kind=postgresql -#quarkus.flyway.migrate-at-start=true -#quarkus.flyway.baseline-on-migrate = true - -# Configuration for the incoming cloud events received by the serverless workflows. -mp.messaging.incoming.kogito_incoming_stream.connector=quarkus-http -mp.messaging.incoming.kogito_incoming_stream.path=/ -mp.messaging.incoming.kogito_incoming_stream.method=POST - -kogito.jobs-service.url=http://jobs-service-service.default.svc.cluster.local -# Job Service kogito-addons-quarkus-jobs-knative-eventing configuration -mp.messaging.outgoing.kogito-job-service-job-request-events.connector=quarkus-http -mp.messaging.outgoing.kogito-job-service-job-request-events.url=http://jobs-service-service.default.svc.cluster.local/v2/jobs/events -# Data Index configuration -mp.messaging.outgoing.kogito-processinstances-events.url=http://data-index-service.default.svc.cluster.local/processes -mp.messaging.outgoing.kogito-usertaskinstances-events.url=http://data-index-service.default.svc.cluster.local/tasks -mp.messaging.outgoing.kogito-variables-events.url=http://data-index-service.default.svc.cluster.local/variables - -move2kube_url=${MOVE2KUBE_URL:http://move2kube-svc.default.svc.cluster.local:8080} -quarkus.rest-client.move2kube_yaml.url=${MOVE2KUBE_URL:http://move2kube-svc.default.svc.cluster.local:8080} -broker_url=${BROKER_URL:http://broker-ingress.knative-eventing.svc.cluster.local/m2k/default} -quarkus.rest-client.notifications_yaml.url=${BACKSTAGE_NOTIFICATIONS_URL:http://host.minikube.internal:7007/api/notifications/} diff --git a/m2k/serverless-workflow-m2k/src/main/resources/m2k.svg b/m2k/serverless-workflow-m2k/src/main/resources/m2k.svg deleted file mode 100644 index a4aeeb8..0000000 --- a/m2k/serverless-workflow-m2k/src/main/resources/m2k.svg +++ /dev/null @@ -1 +0,0 @@ -StartStartPlanningGetPlanningPlanRetrievedCheckStartTransformatio n PrintTransformatio nIdMessage NotifyTransformati onWaiting SaveTransformatio nOutput WaitForSaveTransformationCompletionTransformationSav ed TransformationErro r allOfPrintExitErrorMessa ge EndallOfPrintExitSuccessMe ssage End(has("plan") an... (has("plan") an... transformation... transformation... errorEvent errorEvent \ No newline at end of file diff --git a/m2k/serverless-workflow-m2k/src/main/resources/m2k.sw.yml b/m2k/serverless-workflow-m2k/src/main/resources/m2k.sw.yml deleted file mode 100644 index 3d6e2dd..0000000 --- a/m2k/serverless-workflow-m2k/src/main/resources/m2k.sw.yml +++ /dev/null @@ -1,180 +0,0 @@ -specVersion: "0.8" -id: m2k -version: '1.0' -name: Move2Kube workflow -description: Workflow to execute Move2Kube -dataInputSchema: schemas/input.json -start: StartPlanning -events: - - name: errorEvent - source: '' - type: error - - name: transformationSavedEvent - source: '' - type: transformation_saved -functions: - - name: systemOut - type: custom - operation: sysout - - name: startPlanning - type: rest - operation: specs/move2kube.yaml#start-planning - - name: getPlanning - type: rest - operation: specs/move2kube.yaml#get-plan - - name: startTransformation - type: rest - operation: specs/move2kube.yaml#start-transformation - - name: sendCloudEvent - type: custom - operation: rest:post:http://broker-ingress.knative-eventing.svc.cluster.local/m2k/default - - name: createNotification - operation: 'specs/notifications.yaml#createNotification' -states: - - name: StartPlanning - type: operation - actions: - - functionRef: - refName: startPlanning - arguments: - workspace-id: ".workspaceId" - project-id: ".projectId" - remote-source: "\"git+\" + .repo + \"@\" + .sourceBranch" - transition: GetPlanning - - name: GetPlanning - type: operation - actions: - - functionRef: - refName: getPlanning - arguments: - workspace-id: ".workspaceId" - project-id: ".projectId" - actionDataFilter: - toStateData: . - sleep: - before: PT2S - transition: PlanRetrievedCheck - - name: PlanRetrievedCheck - type: switch - dataConditions: - - condition: (has("plan") and .plan != "") - transition: - nextState: StartTransformation - defaultCondition: - transition: GetPlanning - - name: StartTransformation - type: operation - actions: - - functionRef: - refName: startTransformation - arguments: - workspace-id: ".workspaceId" - project-id: ".projectId" - plan: .plan - actionDataFilter: - results: .id - toStateData: .transformId - transition: PrintTransformationIdMessage - - name: PrintTransformationIdMessage - type: operation - actions: - - name: printSystemOut - functionRef: - refName: systemOut - arguments: - message: '${"m2k workflow: " + $WORKFLOW.instanceId + " transformation ID: " + .transformId }' - transition: NotifyTransformationWaiting - - name: NotifyTransformationWaiting - type: operation - actions: - - name: create - functionRef: - refName: createNotification - arguments: - title: " \"Transformation \" + .transformationId + \" waiting for Q&A\" " - message: '"Please go to your Move2Kube instance and answers to questions in order to continue the Move2Kube workflow"' - origin: "Move2Kube Workflow" - topic: "Move2Kube Workflow" - transition: SaveTransformationOutput - - name: SaveTransformationOutput - type: operation - actions: - - functionRef: - refName: sendCloudEvent - arguments: - HEADER_Ce-Id: $WORKFLOW.instanceId - HEADER_Ce-Specversion: "1.0" - HEADER_Ce-Type: "save-transformation" - HEADER_Ce-Source: "m2k_swf" - HEADER_Content-Type: "application/json" - gitRepo: .repo - branch: .targetBranch - token: .token - workspaceId: .workspaceId - projectId: .projectId - transformId: .transformId - workflowCallerId: $WORKFLOW.instanceId - transition: WaitForSaveTransformationCompletion - - name: WaitForSaveTransformationCompletion - type: switch - eventConditions: - - eventRef: transformationSavedEvent - transition: TransformationSaved - - eventRef: errorEvent - transition: TransformationError - defaultCondition: - transition: TransformationError - timeouts: - eventTimeout: PT3M - - name: TransformationSaved - type: inject - data: - exitMessage: Transformation successful. - transition: PrintExitSuccessMessage - - name: TransformationError - type: inject - data: - exitMessage: '"Error while saving transformation output. If no context, it was due to timeout expiration"' - transition: PrintExitErrorMessage - - name: PrintExitErrorMessage - type: parallel - branches: - - name: printSystemOut - actions: - - name: printSystemOut - functionRef: - refName: systemOut - arguments: - message: '${"m2k workflow: " + $WORKFLOW.instanceId + " has finalized with error. Exit message: " + .exitMessage + " -- Context: " + .error }' - - name: createNotification - actions: - - name: createNotification - functionRef: - refName: createNotification - arguments: - title: '"Move2Kube workflow " + $WORKFLOW.instanceId + " failed"' - message: '"Move2Kube workflow " + $WORKFLOW.instanceId + " on workspace " + .workspaceId + " and project " + .projectId + " failed with exit message: "+ .exitMessage + "

Error: " + .error' - origin: "Move2Kube Workflow" - topic: "Move2Kube Workflow" - end: true - - name: PrintExitSuccessMessage - type: parallel - branches: - - name: printSystemOut - actions: - - name: printSystemOut - functionRef: - refName: systemOut - arguments: - message: '${"m2k workflow: " + $WORKFLOW.instanceId + " has finalized successfully}"' - - name: createNotification - actions: - - name: createNotification - functionRef: - refName: createNotification - arguments: - title: '"Move2Kube workflow " + $WORKFLOW.instanceId + " success"' - message: '"Move2Kube workflow " + $WORKFLOW.instanceId + " on workspace " + .workspaceId + " and project " + .projectId + " was successful"' - origin: "Move2Kube Worflow" - topic: "Move2Kube Workflow" - end: true diff --git a/m2k/serverless-workflow-m2k/src/main/resources/schemas/input.json b/m2k/serverless-workflow-m2k/src/main/resources/schemas/input.json deleted file mode 100644 index bf35c49..0000000 --- a/m2k/serverless-workflow-m2k/src/main/resources/schemas/input.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "$id": "classpath:/schema/input.json", - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Input", - "description": "schema for input description", - "type": "object", - "properties": { - "token": { - "description": "The token to use to authenticate with GitHub", - "type": "string" - }, - "sourceBranch": { - "description": "the branch to download the zip from", - "type": "string" - }, - "targetBranch": { - "description": "the branch to create in the repo", - "type": "string" - }, - "repo": { - "description": "the repo to be used", - "type": "string", - "examples": [ - "dotfiles" - ] - }, - "workspace-id": { - "description": "the ID of the Move2Kube workspace", - "type": "string" - }, - "project-id": { - "description": "the ID of the Move2Kube project", - "type": "string" - } - }, - "required": [ - "repo", - "sourceBranch", - "targetBranch", - "workspaceId", - "projectId" - ] -} diff --git a/m2k/serverless-workflow-m2k/src/main/resources/specs/move2kube.yaml b/m2k/serverless-workflow-m2k/src/main/resources/specs/move2kube.yaml deleted file mode 100644 index 859ae6d..0000000 --- a/m2k/serverless-workflow-m2k/src/main/resources/specs/move2kube.yaml +++ /dev/null @@ -1,1818 +0,0 @@ ---- -openapi: 3.0.1 -info: - title: Move2Kube API - description: | - This is a documentation of the Move2Kube REST API. - All API calls expect the `Authorization: Bearer ` HTTP header unless specified otherwise. - The access token can be obtained in the same way as OAuth 2.0 using the token endpoint in the admin section. - contact: - email: move2kube-dev@googlegroups.com - license: - name: Apache 2.0 - url: http://www.apache.org/licenses/LICENSE-2.0.html - version: v1.0.0 -externalDocs: - description: Find out more about Swagger - url: http://swagger.io -servers: - - url: "/api/v1" -tags: - - name: move2kube - description: Helps migrate your app to Kubernetes. - externalDocs: - description: Find out more - url: https://move2kube.konveyor.io/ -security: - - bearerAuth: [] -paths: - "/token": - post: - security: - - basicAuth: [] - tags: - - admin - summary: Get an access token using client ID and client secret (for use with - trusted clients). - description: Get an access token using client ID and client secret (for use - with trusted clients). - operationId: get-tokens - requestBody: - description: "Use `grant_type=client_credentials` in the body and set the - header \n`Authorization: Basic base64(client_id + \":\" + client_secret)`\n" - content: - application/x-www-form-urlencoded: - schema: - type: object - required: - - grant_type - properties: - grant_type: - type: string - enum: - - client_credentials - responses: - '200': - "$ref": "#/components/responses/Token" - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get the access token. - "/support": - get: - tags: - - support - summary: Returns some support information like CLI, API and UI version info. - description: Returns some support information like CLI, API and UI version info. - operationId: get-support-info - responses: - '200': - description: Success. - content: - application/json: - schema: - type: object - properties: - cli_version: - type: string - description: The version, commit hash, etc. of the Move2Kube CLI - tool being used. - example: |- - version: v0.3.0+unreleased - gitCommit: 0ccc6c4c6ea8ccd8fb8f999d37f81cdf0fdf22e6 - gitTreeState: clean - goVersion: go1.18.1 - platform: darwin/amd64 - api_version: - type: string - description: The version, commit hash, etc. of the Move2Kube API - server being used. - example: |- - version: v0.1.0+unreleased - gitCommit: d21d2503e136fd85d5b166d5899d4058083cf0ce - gitTreeState: clean - goVersion: go1.18.1 - platform: darwin/amd64 - ui_version: - type: string - description: The version, commit hash, etc. of the Move2Kube UI - website being used. - example: unknown - docker: - type: string - description: Whether the docker socket '/var/run/docker.sock' - is mounted when running as a container. - example: docker socket is not mounted - "/workspaces": - get: - tags: - - workspaces - summary: Get all the workspaces you have access to. - description: Get all the workspaces you have access to. - operationId: get-workspaces - responses: - '200': - description: Success. - content: - application/json: - schema: - type: array - items: - "$ref": "#/components/schemas/Workspace" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get all the workspaces. - post: - tags: - - workspaces - summary: Create a new workspace. The ID will be generated by the server. - description: Create a new workspace. The ID will be generated by the server. - operationId: create-workspace - requestBody: - description: | - The metadata of the workspace. - Leave the ID blank, it will be generated. - Leave the projects blank, projects are managed through a different set of endpoints. - content: - application/json: - example: - name: Team 1 Workspace - description: The workspace team 1 uses. - schema: - "$ref": "#/components/schemas/Workspace" - required: true - responses: - '201': - description: Created. - content: - application/json: - schema: - type: object - properties: - id: - type: string - description: ID of the new workspace. - example: work-1234 - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to create a new workspace. - x-codegen-request-body-name: body - "/api/v1/workspaces/{workspace-id}": - get: - tags: - - workspaces - summary: Get the workspace with the given ID. - description: Get the workspace with the given ID. - operationId: get-workspace - parameters: - - name: workspace-id - in: path - description: ID of the workspace to get. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Success. - content: - application/json: - schema: - "$ref": "#/components/schemas/Workspace" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get this workspace. - '404': - description: Workspace not found. - put: - tags: - - workspaces - summary: Update a workspace. The workspace will be created if it doesn't exist. - description: Update a workspace. The workspace will be created if it doesn't - exist. - operationId: update-workspace - parameters: - - name: workspace-id - in: path - description: ID of the workspace to update. - required: true - example: work-1 - schema: - "$ref": "#/components/schemas/ID" - requestBody: - description: | - The metadata of the workspace. - Leave the ID blank, it will be generated. - Leave the projects blank, projects are managed through a different set of endpoints. - content: - application/json: - example: - name: Team 1 Workspace. Update Name. - description: The workspace team 1 uses. Updated description. - schema: - "$ref": "#/components/schemas/Workspace" - required: true - responses: - '201': - description: Created. - '204': - description: Updated. - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to create/update this workspace. - '404': - description: Workspace not found. - x-codegen-request-body-name: body - delete: - tags: - - workspaces - summary: Delete an existing workspace. - description: Delete an existing workspace. - operationId: delete-workspace - parameters: - - name: workspace-id - in: path - description: ID of the workspace to delete. - required: true - example: work-1 - schema: - "$ref": "#/components/schemas/ID" - responses: - '204': - description: Deleted. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to delete this workspace. - '404': - description: Workspace not found. - "/api/v1/workspaces/{workspace-id}/inputs": - post: - tags: - - workspace-inputs - summary: Create a new input for this workspace. All the projects in this workspace - will be able to use it. The ID will be generated by the server. - description: Create a new input for this workspace. All the projects in this - workspace will be able to use it. The ID will be generated by the server. - operationId: create-workspace-input - parameters: - - name: workspace-id - in: path - description: ID of the workspace to create the input in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - requestBody: - description: | - The metadata of the workspace input. - Leave the ID blank, it will be generated. - content: - multipart/form-data: - schema: - type: object - properties: - type: - type: string - enum: - - sources - - customizations - - configs - description: The type of the input. - description: - type: string - description: A description for the input. - file: - type: string - format: binary - description: The actual content of the input file. - required: true - responses: - '201': - description: Created. - content: - application/json: - schema: - type: object - properties: - id: - type: string - description: ID of the new workspace input. - example: work-input-1234 - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to create an input for this project. - x-codegen-request-body-name: body - "/api/v1/workspaces/{workspace-id}/inputs/{input-id}": - get: - tags: - - workspace-inputs - summary: Get the input of the project with the given ID. - description: Get the input of the project with the given ID. - operationId: get-workspace-input - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: input-id - in: path - description: ID of the input to get. - required: true - example: work-input-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Success. - content: - application/octet-stream: - schema: - type: string - format: binary - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get this project input. - '404': - description: Workspace, project or input not found. - delete: - tags: - - workspace-inputs - summary: Delete the input of the project. - description: Delete the input of the project. - operationId: delete-workspace-input - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: input-id - in: path - description: ID of the input to delete. - required: true - example: work-input-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '204': - description: Deleted. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to delete this project input. - '404': - description: Workspace, project or input not found. - x-codegen-request-body-name: body - "/api/v1/workspaces/{workspace-id}/projects": - get: - tags: - - projects - summary: Get all the projects you have access to in this workspace. - description: Get all the projects you have access to in this workspace. - operationId: get-projects - parameters: - - name: workspace-id - in: path - description: ID of the workspace to get the projects from. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Success. - content: - application/json: - schema: - type: array - items: - "$ref": "#/components/schemas/Project" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get all the projects in this workspace. - post: - tags: - - projects - summary: Create a new project in this workspace. The ID will be generated by - the server. - description: Create a new project in this workspace. The ID will be generated - by the server. - operationId: create-project - parameters: - - name: workspace-id - in: path - description: ID of the workspace to create the project in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - requestBody: - description: | - The metadata of the project. - Leave the ID blank, it will be generated. - content: - application/json: - example: - name: My Web App 1 - description: Project to transform my web app 1 to run on K8s. - schema: - "$ref": "#/components/schemas/Project" - required: true - responses: - '201': - description: Created. - content: - application/json: - schema: - type: object - properties: - id: - type: string - description: ID of the new project. - example: proj-1234 - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to create a new project in this workspace. - x-codegen-request-body-name: body - "/api/v1/workspaces/{workspace-id}/projects/{project-id}": - get: - tags: - - projects - summary: Get the project with the given ID. - description: Get the project with the given ID. - operationId: get-project - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to get. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Success. - content: - application/json: - schema: - "$ref": "#/components/schemas/Project" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get this project. - '404': - description: Workspace or project not found. - put: - tags: - - projects - summary: Update a project. The project will be created if it doesn't exist. - description: Update a project. The project will be created if it doesn't exist. - operationId: update-project - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to update. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - requestBody: - description: | - The metadata of the project. - Leave the ID blank. - content: - application/json: - example: - name: My Web App 1. Updated Name. - description: Project to transform my web app 1. Updated description. - schema: - "$ref": "#/components/schemas/Project" - required: true - responses: - '201': - description: Created. - '204': - description: Updated. - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to create/update this project. - '404': - description: Workspace or project not found. - x-codegen-request-body-name: body - delete: - tags: - - projects - summary: Delete an existing workspace. - description: Delete an existing workspace. - operationId: delete-project - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to delete. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '204': - description: Deleted. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to delete this project. - '404': - description: Workspace or project not found. - "/api/v1/workspaces/{workspace-id}/projects/{project-id}/inputs": - post: - tags: - - project-inputs - summary: Create a new input for this project. The ID will be generated by the - server. - description: Create a new input for this project. The ID will be generated by - the server. - operationId: create-project-input - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to create the input in. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - requestBody: - description: | - The metadata of the project input. - Leave the ID blank, it will be generated. - content: - multipart/form-data: - schema: - type: object - properties: - type: - type: string - enum: - - sources - - customizations - - configs - - reference - description: The type of the input. - id: - type: string - description: If the input is of type 'reference', then this field - indicates the id of the workspace input that it is referencing. - description: - type: string - description: A description for the input. - file: - type: string - format: binary - description: The actual content of the input file. - required: true - responses: - '201': - description: Created. - content: - application/json: - schema: - type: object - properties: - id: - type: string - description: ID of the new project input. - example: proj-input-1234 - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to create an input for this project. - x-codegen-request-body-name: body - "/api/v1/workspaces/{workspace-id}/projects/{project-id}/inputs/{input-id}": - get: - tags: - - project-inputs - summary: Get the input of the project with the given ID. - description: Get the input of the project with the given ID. - operationId: get-project-input - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project the input is in. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: input-id - in: path - description: ID of the input to get. - required: true - example: proj-input-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Success. - content: - application/octet-stream: - schema: - type: string - format: binary - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get this project input. - '404': - description: Workspace, project or input not found. - delete: - tags: - - project-inputs - summary: Delete the input of the project. - description: Delete the input of the project. - operationId: delete-project-input - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project the input is in. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: input-id - in: path - description: ID of the input to delete. - required: true - example: proj-input-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '204': - description: Deleted. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to delete this project input. - '404': - description: Workspace, project or input not found. - x-codegen-request-body-name: body - "/api/v1/workspaces/{workspace-id}/projects/{project-id}/plan": - post: - tags: - - plan - summary: Start planning on this project's inputs. - description: Start planning on this project's inputs. - operationId: start-planning - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to start planning in. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: remote-source - in: query - description: Remote source git URL from which get the source files. - required: false - example: git+https://github.com/konveyor/move2kube - schema: - "$ref": "#/components/schemas/RemoteSource" - responses: - '202': - description: Accepted. - '400': - description: Invalid format or validation error. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to start planning for this project. - '404': - description: Workspace or project not found. - x-codegen-request-body-name: body - get: - tags: - - plan - summary: Get the plan file. - description: Get the plan file. - operationId: get-plan - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to get the plan from. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Accepted. - content: - application/json: - schema: - type: object - properties: - plan: - type: string - description: The plan file in YAML format. - '400': - description: Invalid format or validation error. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to start planning for this project. - '404': - description: Workspace or project not found. - '409': - description: Conflict, because planning is already on-going for this project. - x-codegen-request-body-name: body - put: - tags: - - plan - summary: Update the plan for this project. - description: Update the plan for this project. - operationId: update-plan - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to update the plan for. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - plan: - type: string - description: The new plan file to use for this project. - responses: - '204': - description: Accepted. - '400': - description: Invalid format or validation error. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to update the plan for this project. - '404': - description: Workspace or project not found. - x-codegen-request-body-name: body - delete: - tags: - - plan - summary: Delete the current plan for the project. - description: Delete the current plan for the project. - operationId: delete-plan - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to delete the plan from. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '204': - description: Deleted. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to delete the plan for this project. - '404': - description: Workspace or project not found. - "/api/v1/workspaces/{workspace-id}/projects/{project-id}/outputs": - post: - tags: - - project-outputs - summary: Start transformation for this project. Planning must be completed before - this. - description: Start transformation for this project. Planning must be completed - before this. - operationId: start-transformation - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to start the transformation for. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: skip-qa - in: query - description: Boolean to skip interactive QA. - required: false - example: 'true' - schema: - type: boolean - requestBody: - description: 'A plan to use for the transformation. (Not required). - - ' - content: - application/json: - schema: - type: object - properties: - plan: - type: string - description: A plan to use for the transformation. (Not required). - required: false - responses: - '202': - description: Accept - content: - application/json: - schema: - type: object - properties: - id: - type: string - description: ID of the new project output. - example: proj-output-1234 - name: - type: string - description: Name of the project output. - description: - type: string - description: Description of the project output. - timestamp: - type: string - format: date-time - status: - type: string - description: The status of the transformation. - enum: - - transforming - - done - - error - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to start transformation for this project. - '404': - description: Workspace or project not found. - x-codegen-request-body-name: body - "/api/v1/workspaces/{workspace-id}/projects/{project-id}/outputs/{output-id}": - get: - tags: - - project-outputs - summary: Get the output of the project with the given ID. - description: Get the output of the project with the given ID. - operationId: get-project-output - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project the output is in. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: output-id - in: path - description: ID of the output to get. - required: true - example: proj-output-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Success. - content: - application/octet-stream: - schema: - type: string - format: binary - '204': - description: The transformation is still on-going. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get this project output. - '404': - description: Workspace, project or output not found. - delete: - tags: - - project-outputs - summary: Delete the output of the project. - description: Delete the output of the project. - operationId: delete-project-output - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project the output is in. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: output-id - in: path - description: ID of the output to delete. - required: true - example: proj-output-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '204': - description: Deleted. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to delete this project output. - '404': - description: Workspace, project or output not found. - x-codegen-request-body-name: body - "/api/v1/workspaces/{workspace-id}/projects/{project-id}/outputs/{output-id}/graph": - get: - tags: - - project-output-graphs - summary: Get the graph of the transformers used while creating the output with - the given ID. - description: Get the graph of the transformers used while creating the output - with the given ID. - operationId: get-project-output-graph - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project the output is in. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: output-id - in: path - description: ID of the output whose graph we should get. - required: true - example: proj-output-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Success. - content: - application/json: - schema: - type: object - '204': - description: The transformation is still on-going. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get this project output. - '404': - description: Workspace, project or output not found. - "/api/v1/workspaces/{workspace-id}/projects/{project-id}/outputs/{output-id}/problems/current": - get: - tags: - - qa - summary: Get the current question that needs to be answered for the transformation - to proceed. - description: Get the current question that needs to be answered for the transformation - to proceed. - operationId: get-current-question - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project the output is in. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: output-id - in: path - description: ID of the output whose transformation is on-going. - required: true - example: proj-output-1234 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Success. - content: - application/json: - schema: - type: object - properties: - question: - type: string - description: A JSON encoded string of the question object. - '204': - description: All questions have finished. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get this project output. - '404': - description: Workspace, project or output not found. Might also be returned - once the transformation has finished. - "/api/v1/workspaces/{workspace-id}/projects/{project-id}/outputs/{output-id}/problems/current/solution": - post: - tags: - - qa - summary: Post the answer to the current question for an on-going transformation - given by the ID. - description: Post the answer to the current question for an on-going transformation - given by the ID. - operationId: post-answer-to-question - parameters: - - name: workspace-id - in: path - description: ID of the workspace the project is in. - required: true - example: work-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: project-id - in: path - description: ID of the project to start the transformation for. - required: true - example: proj-1234 - schema: - "$ref": "#/components/schemas/ID" - - name: output-id - in: path - description: ID of the output whose transformation is on-going. - required: true - example: proj-output-1234 - schema: - "$ref": "#/components/schemas/ID" - requestBody: - description: 'A plan to use for the transformation. (Not required). - - ' - content: - application/json: - schema: - type: object - properties: - solution: - type: string - description: A JSON encoded string containing the answer object. - required: true - responses: - '204': - description: Answer was accepted - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to post an answer to the question - for this project output. - '404': - description: Workspace, project, or output not found. - x-codegen-request-body-name: body - "/roles": - get: - tags: - - roles - summary: Get all the roles. - description: Get all the roles. - operationId: get-roles - responses: - '200': - description: Success. - content: - application/json: - schema: - type: array - items: - "$ref": "#/components/schemas/Role" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get all the roles. - post: - tags: - - roles - summary: Create a new role. - description: Create a new role. - operationId: create-role - requestBody: - description: The metadata of the role. Leave the ID blank, it will be generated. - content: - application/json: - schema: - "$ref": "#/components/schemas/Role" - required: true - responses: - '201': - description: Created. - content: - application/json: - schema: - type: object - properties: - id: - type: string - description: ID of the new role. - example: role-1234 - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to create a new role. - x-codegen-request-body-name: body - "/roles/{role-id}": - get: - tags: - - roles - summary: Get the role with the given ID. - description: Get the role with the given ID. - operationId: get-role - parameters: - - name: role-id - in: path - description: ID of the role to get. - required: true - example: team-7 - schema: - "$ref": "#/components/schemas/ID" - responses: - '200': - description: Success. - content: - application/json: - schema: - "$ref": "#/components/schemas/Role" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get this role. - '404': - description: Role not found. - put: - tags: - - roles - summary: Update a role. The role will be created if it doesn't exist. - description: Update a role. The role will be created if it doesn't exist. - operationId: update-role - parameters: - - name: role-id - in: path - description: ID of the role to update. - required: true - example: team-1 - schema: - "$ref": "#/components/schemas/ID" - requestBody: - description: The metadata of the role. - content: - application/json: - schema: - "$ref": "#/components/schemas/Role" - required: true - responses: - '201': - description: Created. - '204': - description: Updated. - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to create/update this role. - '404': - description: Role not found. - x-codegen-request-body-name: body - delete: - tags: - - roles - summary: Delete an existing role - description: Delete an existing role. - operationId: delete-role - parameters: - - name: role-id - in: path - description: ID of the role to delete. - required: true - example: team-1 - schema: - "$ref": "#/components/schemas/ID" - responses: - '204': - description: Deleted. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to delete this role. - '404': - description: Role not found. - "/idps/{idp-id}/users/{user-id}/roles": - get: - tags: - - role-bindings - summary: Get all the roles for the given user. - description: Get all the roles for the given user. - operationId: get-roles-of-user - parameters: - - name: idp-id - in: path - description: ID of the identity provider. - required: true - example: idp-1 - schema: - "$ref": "#/components/schemas/ID" - - name: user-id - in: path - description: ID of the user as given by the identity provider. - required: true - example: user-1 - schema: - type: string - responses: - '200': - description: Success. - content: - application/json: - schema: - type: array - description: List of role IDs assigned to the user. - example: - - role-1 - - role-2 - items: - type: string - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to get the roles for this user. - '404': - description: User not found. - patch: - tags: - - role-bindings - summary: Update the roles of the given user. - description: Update the roles of the given user. - operationId: update-roles-of-user - parameters: - - name: idp-id - in: path - description: ID of the identity provider. - required: true - example: idp-1 - schema: - "$ref": "#/components/schemas/ID" - - name: user-id - in: path - description: ID of the user as given by the identity provider. - required: true - example: user-1 - schema: - type: string - requestBody: - description: Add, remove or overwrite the roles of the user. - content: - application/json: - schema: - type: object - properties: - op: - type: string - description: "add: add these roles to the existing roles the user - has. \nremove: remove these roles from the existing roles the - user has. \noverwrite: completely overwrite the existing roles - the user has with these roles.\n" - enum: - - add - - remove - - overwrite - roles: - type: array - items: - type: string - description: List of role IDs - example: - - role-1 - - role-2 - items: - type: string - required: true - responses: - '204': - description: Success. - '400': - description: Invalid format or validation error. - content: - application/json: - schema: - "$ref": "#/components/schemas/Error" - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to update the roles for this user. - '404': - description: User not found. - x-codegen-request-body-name: body - "/idps/{idp-id}/users/{user-id}/roles/{role-id}": - put: - tags: - - role-bindings - summary: Add a role to a user. - description: Add a role to a user. - operationId: add-role-to-user - parameters: - - name: idp-id - in: path - description: ID of the identity provider. - required: true - example: idp-1 - schema: - "$ref": "#/components/schemas/ID" - - name: user-id - in: path - description: ID of the user as given by the identity provider. - required: true - example: user-1 - schema: - type: string - - name: role-id - in: path - description: ID of the role to add to the user. - required: true - example: team-7 - schema: - "$ref": "#/components/schemas/ID" - responses: - '201': - description: Created. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to add this role to this user. - '404': - description: User not found. - delete: - tags: - - role-bindings - summary: Remove a role from a user. - description: Remove a role from a user. - operationId: remove-role-from-user - parameters: - - name: idp-id - in: path - description: ID of the identity provider. - required: true - example: idp-1 - schema: - "$ref": "#/components/schemas/ID" - - name: user-id - in: path - description: ID of the user as given by the identity provider. - required: true - example: user-1 - schema: - type: string - - name: role-id - in: path - description: ID of the role to remove from the user. - required: true - example: team-7 - schema: - "$ref": "#/components/schemas/ID" - responses: - '204': - description: Deleted. - '401': - "$ref": "#/components/responses/UnauthorizedError" - '403': - description: Don't have authorization to remove this role from this user. - '404': - description: User not found. -components: - securitySchemes: - basicAuth: - type: http - scheme: basic - bearerAuth: - type: http - scheme: bearer - responses: - UnauthorizedError: - description: Authorization header is missing or invalid. - headers: - WWW_Authenticate: - schema: - type: string - Token: - description: The access token. - headers: - Cache-Control: - schema: - type: string - enum: - - no-store - Pragma: - schema: - type: string - enum: - - no-cache - content: - application/json: - schema: - type: object - properties: - access_token: - type: string - example: eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk - token_type: - type: string - example: example - expires_in: - type: number - example: 3600 - schemas: - ID: - pattern: "^[a-zA-Z0-9-_]+$" - type: string - description: A unique ID. - example: id-1234 - RemoteSource: - pattern: "^git[+](https|ssh)://[a-zA-Z0-9]+([-.]{1}[a-zA-Z0-9]+)*[.][a-zA-Z]{2,5}(:[0-9]{1,5})?(/.*)?$" - type: string - description: A git URL. - example: git+https://github.com/konveyor/move2kube - Error: - required: - - error - type: object - properties: - error: - type: object - required: - - description - properties: - description: - type: string - description: A human readable error message. - example: 'failed to create the workspace. Error: ...' - Project: - required: - - id - - name - type: object - properties: - id: - type: object - description: A unique ID for the project. - example: proj-1234 - allOf: - - "$ref": "#/components/schemas/ID" - name: - type: string - description: A human readable name for the project. - example: Project 23 - timestamp: - type: string - format: date-time - outputs: - type: object - additionalProperties: - type: object - properties: - id: - type: string - format: uuid - name: - type: string - description: - type: string - timestamp: - type: string - format: date-time - status: - type: string - status: - type: object - properties: - plan: - type: boolean - plan_error: - type: boolean - planning: - type: boolean - reference: - type: boolean - stale_plan: - type: boolean - sources: - type: boolean - outputs: - type: boolean - inputs: - type: object - additionalProperties: - type: object - properties: - id: - type: string - name: - type: string - description: - type: string - timestamp: - type: string - format: date-time - type: - type: string - normalized_name: - type: string - description: - type: string - description: A description about the project. - example: This is one of the projects that team 1 is working on. - Workspace: - required: - - id - - name - type: object - properties: - id: - type: object - description: A unique ID for the workspace. - example: work-1234 - allOf: - - "$ref": "#/components/schemas/ID" - name: - type: string - description: A human readable name for the workspace. - example: Team 1 Workspace - timestamp: - type: string - format: date-time - project_ids: - type: array - items: - type: string - inputs: - type: object - additionalProperties: - type: object - properties: - id: - type: string - name: - type: string - description: - type: string - timestamp: - type: string - format: date-time - type: - type: string - normalized_name: - type: string - description: - type: string - description: A description about the workspace. - example: This is the workspace for all the projects of team 1. - projects: - type: array - items: - "$ref": "#/components/schemas/Project" - Resource: - type: string - description: A resource is a URL path. - example: "/api/v1/workspaces/work-1234/projects/proj-42" - Role: - required: - - id - - name - type: object - properties: - id: - type: object - description: A unique ID for the role. - example: team-1 - allOf: - - "$ref": "#/components/schemas/ID" - name: - type: string - description: A human readable name for the role. - example: Team 1 - description: - type: string - description: A description about the role - example: A member of team 1. - rules: - type: array - description: 'The list of rules to apply for this role. - - ' - example: - - resources: - - "/api/v1/workspaces/work-7/.+" - - "/api/v1/workspaces/work-42/projects/.*" - - "/api/v1/workspaces/work-123/projects/proj-2" - verbs: - - all - - resources: - - "/api/v1/workspaces/work-1234" - verbs: - - create-project - - delete-project - items: - required: - - resources - - verbs - type: object - properties: - resources: - type: array - description: | - List of resources. The elements of this list are Javascript ES6 Regex patterns. - When a request for a protected resource is received these regexs are used to - match against the resource URL. - items: - "$ref": "#/components/schemas/Resource" - verbs: - type: array - description: "List of allowed verbs. \nFor now the only supported - verb is `all` which allows all actions on the resource.\n" - items: - type: string - example: all - description: A rule is a list of resources and the list of allowed verbs - for those resources. diff --git a/m2k/serverless-workflow-m2k/src/main/resources/specs/notifications.yaml b/m2k/serverless-workflow-m2k/src/main/resources/specs/notifications.yaml deleted file mode 100644 index f946790..0000000 --- a/m2k/serverless-workflow-m2k/src/main/resources/specs/notifications.yaml +++ /dev/null @@ -1,243 +0,0 @@ -openapi: 3.0.3 -info: - title: Notifications Plugin - OpenAPI Specs - description: |- - Notifications Plugin - OpenAPI Specs - version: 1.0.0 -tags: - - name: notifications - description: notifications plugin -servers: - - url: http://localhost:7007/api/notifications -paths: - /notifications: - post: - tags: - - notifications - summary: Create notification - description: Create notification - operationId: createNotification - parameters: - - name: notifications-secret - in: header - description: Notification secret - required: true - schema: - type: string - requestBody: - description: Create a new notification - content: - application/json: - schema: - $ref: '#/components/schemas/CreateBody' - responses: - '200': - description: Successful operation - content: - application/json: - schema: - type: object - properties: - messageId: - type: string - example: bc9f19de-8b7b-49a8-9262-c5036a1ed35e - required: ['messageId'] - get: - tags: - - notifications - summary: Gets notifications - description: Gets notifications - operationId: getNotifications - parameters: - - name: pageSize - in: query - description: Page size of the result - required: false - schema: - type: integer - minimum: 0 - - name: pageNumber - in: query - description: Page number of the result - required: false - schema: - type: integer - minimum: 0 - - name: orderBy - in: query - description: order by field. e.g. created, origin. - required: false - schema: - type: string - enum: - - title - - message - - created - - topic - - origin - - name: orderByDirec - in: query - description: order ascending or descending - required: false - schema: - type: string - enum: - - asc - - desc - - name: containsText - in: query - description: Filter notifications whose either title or message contains the provided string - required: false - schema: - type: string - - name: createdAfter - in: query - description: Only notifications created after this timestamp will be included - required: false - schema: - type: string - format: date-time - - name: messageScope - in: query - description: retrieve either logged-in user messages, system messages or both - required: false - schema: - type: string - enum: - - all - - user - - system - - name: read - in: query - description: Notifications read or not - required: false - schema: - type: boolean - responses: - '200': - description: Successful operation - content: - application/json: - schema: - $ref: '#/components/schemas/Notifications' - /notifications/count: - get: - tags: - - notifications - summary: Get notifications count - description: Gets notifications count - operationId: getNotificationsCount - parameters: - - name: containsText - in: query - description: Filter notifications whose either title or message contains the provided string - required: false - schema: - type: string - - name: createdAfter - in: query - description: Only notifications created after this timestamp will be included - required: false - schema: - type: string - format: date-time - - name: messageScope - in: query - description: retrieve either logged-in user messages, system messages or both - required: false - schema: - type: string - enum: - - all - - user - - system - - name: read - in: query - description: Notifications read or not - required: false - schema: - type: boolean - responses: - '200': - description: Successful operation - content: - application/json: - schema: - type: object - properties: - count: - type: number - required: ['count'] - /notifications/read: - put: - tags: - - notifications - summary: Set notification as read/unread - description: Set notification as read/unread - operationId: setRead - parameters: - - name: messageId - in: query - description: The message ID - required: true - schema: - type: string - - name: read - in: query - description: read/unread - required: true - schema: - type: boolean - responses: - '200': - description: Successful operation -components: - schemas: - Notifications: - type: array - items: - $ref: '#/components/schemas/Notification' - Notification: - properties: - id: - type: string - created: - type: string - format: date-time - readByUser: - type: boolean - isSystem: - type: boolean - origin: - type: string - title: - type: string - message: - type: string - topic: - type: string - actions: - type: array - items: - $ref: '#/components/schemas/Action' - required: [id, created, readByUser, isSystem, origin, title, actions] - Action: - properties: - id: - type: string - title: - type: string - url: - type: string - required: [id, title, url] - CreateBody: - properties: - origin: - type: string - title: - type: string - message: - type: string - topic: - type: string - required: [origin, title] \ No newline at end of file