diff --git a/Federation/README.md b/Federation/README.md deleted file mode 100644 index 53ce300f..00000000 --- a/Federation/README.md +++ /dev/null @@ -1,4 +0,0 @@ -[MIP Deployment](../README.md#FederatedDeployment) -> `Federated MIP Deployment` - -# Federated MIP Deployment -[Here](doc/Readme.md), you can find details about deploying and operating the *federated* MIP. diff --git a/Federation/config/.gitignore b/Federation/config/.gitignore deleted file mode 100644 index 289ccc68..00000000 --- a/Federation/config/.gitignore +++ /dev/null @@ -1 +0,0 @@ -pathologies.json diff --git a/Federation/config/disabledAlgorithms.json b/Federation/config/disabledAlgorithms.json deleted file mode 100644 index 28ce8657..00000000 --- a/Federation/config/disabledAlgorithms.json +++ /dev/null @@ -1 +0,0 @@ -["THREE_C"] diff --git a/Federation/doc/MIP_Federated_Configuration.png b/Federation/doc/MIP_Federated_Configuration.png deleted file mode 100644 index 7b16cc31..00000000 Binary files a/Federation/doc/MIP_Federated_Configuration.png and /dev/null differ diff --git a/Federation/doc/MIP_Federated_Configuration.vsdx b/Federation/doc/MIP_Federated_Configuration.vsdx deleted file mode 100644 index ca950e5a..00000000 Binary files a/Federation/doc/MIP_Federated_Configuration.vsdx and /dev/null differ diff --git a/Federation/doc/MIP_Federated_Deployment.png b/Federation/doc/MIP_Federated_Deployment.png deleted file mode 100644 index 527284cb..00000000 Binary files a/Federation/doc/MIP_Federated_Deployment.png and /dev/null differ diff --git a/Federation/doc/MIP_Federated_Deployment.vsdx b/Federation/doc/MIP_Federated_Deployment.vsdx deleted file mode 100644 index 7a3b7492..00000000 Binary files a/Federation/doc/MIP_Federated_Deployment.vsdx and /dev/null differ diff --git a/Federation/doc/MIP_Federated_Deployment_II.png b/Federation/doc/MIP_Federated_Deployment_II.png deleted file mode 100644 index 6f41c2a8..00000000 Binary files a/Federation/doc/MIP_Federated_Deployment_II.png and /dev/null differ diff --git a/Federation/doc/MIP_Federated_Deployment_II.vsdx b/Federation/doc/MIP_Federated_Deployment_II.vsdx deleted file mode 100644 index 7d6aed38..00000000 Binary files a/Federation/doc/MIP_Federated_Deployment_II.vsdx and /dev/null differ diff --git a/Federation/doc/MonitoringMIPFederation.md b/Federation/doc/MonitoringMIPFederation.md deleted file mode 100644 index d82bcce8..00000000 --- a/Federation/doc/MonitoringMIPFederation.md +++ /dev/null @@ -1,50 +0,0 @@ -[Federated MIP Deployment](Readme.md#MonitoringMIPFederation) -> `Monitoring the MIP Federation` - -# Monitoring the MIP Federation -## Real-time federation-wide logs -In the *tmux* session (opened as **mipadmin** user), in the **logs** window (#1), you have a multi-pane view of each node of the federation. -In each of them, you have the equivalent of the command *mip -f logs*, which shows the most appropriate logs (according to the node type), in "follow" mode. -Don't try to CTRL-C the command, because it will "kill" the pane, and then, as it's configured to stay, it will be in "dead pane" mode. If it's the case, you can "respawn" it. -For the different commands to use in the *tmux* session for this purpose, please see this [guide](OperatingMIPFederation.md#ShortTmuxUsageNotice). - -## Extended MIP Federation Backend status -There's a *mip* script's feature to get extended MIP federation status, at the backend side (without anything related to the Web interface). -In the *tmux* session (opened as **mipadmin** user), in the **ms** (master) window (#3) -``` -mip status -``` -Note that here again, we don't necessarily need to use the *--node-type ms* parameter, as we already did the **master** node configuration, and this node type has been stored for this machine. - -With this *mip status* command, you can use parameters like the *--verbose-level * one, to specify the amount of details you want to see. -i.e. with a VERBOSE_LEVEL of *4*, you will see a lot of details, with all the different IPs (machines and Docker Swarm Networking IPs, Docker version, containers image name and version, deployment datetime, etc...) - -## Specific MIP Federation Component logs -In the MIP [*tmux* session description](OperatingMIPFederation.md#CreatingTmuxSession), you saw that there's a dedicated window for each node. -According to the node type, there are different [components](../../README.md#Components) which run on the machine, and from which you can extract specific logs. -In each machine **but** the pusher, you can run -``` -mip --component logs -``` -Alternatively, if you want to see the logs in "real-time", you can use the *-f* flag. -You can also "limit" the lines that you wanna see with *--limit \*. - -As usual, don't hesitate to use -``` -mip --help -``` - -In order for you to have a better understanding of the different available components for each type of node, here's a little table -|Node Type|Components| -| -- | -- | -|**worker**|*exareme*| -|**master**|*exareme-master*| -||*exareme-keystore*| -|**ui**|*frontend*| -||*gateway*| -||*portalbackend*| -||*portalbackend_db*| -||*create_dbs*| - -You can see here that *exareme-master* and *exareme-keystore* contains a dash ( **-** ) character, and not an underscore ( **_** ), as it was indicated in the [components](../../README.md#Components) guide. -This is because in this guide, the components are mainly the ones available in the *local* MIP, and in this specific case, the name differs a little bit. -You can also notice that in the Federation, there's no *keycloak* or *keacloak_db* component. diff --git a/Federation/doc/OperatingMIPFederation.md b/Federation/doc/OperatingMIPFederation.md deleted file mode 100644 index 3467b6ec..00000000 --- a/Federation/doc/OperatingMIPFederation.md +++ /dev/null @@ -1,186 +0,0 @@ -[Federated MIP Deployment](Readme.md#OperatingMIPFederation) -> `Operating the MIP Federation` - -# Operating the MIP Federation -## Using the *tmux* session -*tmux* is a virtual console, which will continue to live on the **pusher**, even if you disconnect your current session. Anytime you may need to operate the federation, you will have to open an SSH session on the **pusher**, and **re**-attach to the *tmux* session. Then, when you're done and you want to disconnect, first detach the *tmux* session, then close your SSH connection. - -### Short *tmux* usage notice -We won't detail the *tmux* usage here, but as a short notice: - -* When you're outside (not attached to) a *tmux* session - * List all the available *tmux* sessions (before you attach a session) - ``` - tmux ls - ``` - You will see the different sessions name or id. - * Attach to a *tmux* session - ``` - tmux a -t - ``` -* When you're inside (attached to) a *tmux* session - * Any operation will have to begin with *CTRL + j* (usually *CTRL + b*, but to avoid issues when having *tmux* inside *tmux* inside *tmux*, all the generated MIP-related *tmux* sessions have been redefined with *CTRL + j*) - * Detach the *tmux* session - ``` - "CTRL + j" d - ``` - * Navigate to the *next* window inside the session - ``` - "CTRL + j" n - ``` - * Navigate to the *previous* window inside the session - ``` - "CTRL + j" p - ``` - * Directly jump to a certain window number (0-9 only) - ``` - "CTRL + j" - ``` - * Navigate to the *next* pane inside a multi-panes window (like the logs window (1)) - ``` - "CTRL + j" o - ``` - On the MIP-related generated *tmux* session, on any OS **but** Mac (currently), you can also use the *ALT + ARROW_KEYS* shortcut - * Quickly show the pane numbers - ``` - "CTRL + j" q - ``` - * Directly jump to a certain pane number (0-9 only) - ``` - "CTRL + j" q (the number must be entered really quickly after the q) - ``` - * Zoom (in or out) the currently selected pane - ``` - "CTRL + j" z - ``` - * Enter copy mode (to stop the live logs and go back in the pane's history) - ``` - "CTRL + j" [ - ``` - * Quit copy mode - ``` - q or ESC - ``` - * Respawn a "dead" pane or window - ``` - "CTRL + j" r - ``` -If you want to know more about *tmux*, go on the [tmux Cheat Sheet](https://tmuxcheatsheet.com). - -### Creating the *tmux* session -Now that you know a bit more about *tmux*, we will generate the MIP special *tmux* session (or connect to it, if it already exists). - -On the **pusher** node, with the **mipadmin** user, you have to run -``` -mip --pusher --federation tmux -``` -This will generate the session if it does not exist, and then, in any case, attach to it. -If you want to force **re**-generate the *tmux* session, you can use the *--force* flag. - -Now that you're inside the session, you will notice that several windows are present (with **:** tab names). You can also notice that, left to the tab names, there's the *tmux* session's name (in blue), and it should be the . - -|Window Number|Window Name|Description| -| -- | -- | -- | -|0|bash|Console on the **pusher** node| -|1|logs|Multi-panes window to display real-time logs of all the nodes (top-to-bottom, left-to-right: **master**, **ui**, then all the workers)| -|2|deploy|A quick help about the **pusher** commands to operate the main MIP Federation tasks| -|3|-ms|Console on the **master** node| -|4|-ui|Console on the **ui** node| -|5-n|-wk-|Console on the **worker** nodes| - -## Consolidating data -In the *tmux* session (opened as **mipadmin** user), in the **pusher** window (#0): -``` -mip --pusher --federation data consolidate -``` -For each pathology accross the federation **workers**, this will list the different available datasets in the pathology's Common Data Elements (CDE) file, and then, from all the CDEs, it will generate the *pathologies.json* file (and push it to the **ui** node), used by the MIP Web interface to display the different variables. -In order for you to better understand this process, here's a step-by-step action list - -1. For each **worker** node - - 1. Connect to the node - 1. Ask it to prepare a list of the available datasets (and for each of those, give a prototype (headers list) of the data) - 1. Download this archived list on the **pusher** - 1. For each pathology in this list, download (on the **pusher**) the corresponding CDE from the **master** node (or directly from the data catalogue, if asked via optional parameters) -1. On the **pusher** node, parse the prepared pathology list - 1. For each pathology, edit the CDE and there, list **only** the available datasets in the federation - 1. Redistribute this modified CDE on the **master** and on each participating **worker** node - 1. With all the available CDE files, "compile" the *pathologies.json* file, and push it on the **ui** node - -Alternatively, you can ask to **re**-label the pathologies and/or the datasets by using the flag *--review-dataset-labels*. -As said earlier, you can ask the script to download the CDEs from the data catalogue using the flag *--online-cdes*. -If you want to use an non-default data catalogue, you can use the following parameters -* *--datacatalogue-protocol* (*http* | *https*) -* *--datacatalogue-host * - -Don't hesitate to use: -``` -mip --help -``` - -## Compiling data -In the *tmux* session (opened as **mipadmin** user), in the **pusher** window (#0) -``` -mip --pusher --federation data compile -``` -At any time, you can **re**-compile by using the *--force* flag. -You can also specify the pathology(ies) to compile, with the *--pathology \* parameter (comma-separated pathologies list). -If you want to limitate the compilation to a certain node, you can use the *--node \* parameter, but in this case, you'll also have to pass the *--pathology* argument. - -As usual, to get more details, use *mip --help* - -## Deploying services -In the *tmux* session (opened as **mipadmin** user), in the **pusher** window (#0) -``` -mip --pusher --federation service deploy -``` -This will deploy the Docker Swarm network, the master-related containers on the **master** node, and the worker container on each **worker** node. -At any time, you can **re**-deploy by using the same command. - -### Service-related additionnal features -Alternatively, there are different things that you can run in the same way (still on the **pusher** console of the *tmux* session) - -* Showing the services status - ``` - mip --pusher --federation service status - ``` -* Starting the services - ``` - mip --pusher --federation service start - ``` -* Stopping the services - ``` - mip --pusher --federation service stop - ``` -* Restarting the services - ``` - mip --pusher --federation service restart - ``` - Note that a *restart* is actually different from a "*stop* *start*" cycle. See the Docker documentation. - -For all these commands (including the *deploy* one), you can use the *--node * parameter to limitate the scope to a certain node. - -As usual, to get more details, use *mip --help* - -## [Synchronizing the KeyCloak roles](SynchronizingKeycloakRoles.md). - -## Run the MIP Web Interface -In the *tmux* session (opened as **mipadmin** user), in the **ui** window (#4) -``` -mip start -``` -Note that here, we don't necessarily need to use the *--node-type ui* parameter, as we already did the **ui** node configuration, and this node type has been stored for this machine. - -After launching, you should be able to browse the MIP on the URL which will be displayed. -Note that once the command ends, it may still take up to one minute until the MIP's Web interface is really operational. -Of course, you can also do other actions here: -* Stopping the MIP Web interface - ``` - mip stop - ``` -* Restarting the MIP Web interface - ``` - mip restart - ``` - Note that a *restart* is actually different from a "*stop* *start*" cycle. See the Docker documentation. - -At anytime, you can learn more about the *mip* commands with *mip --help* diff --git a/Federation/doc/PreparingKeycloakRealmClient.md b/Federation/doc/PreparingKeycloakRealmClient.md deleted file mode 100644 index 0aa50263..00000000 --- a/Federation/doc/PreparingKeycloakRealmClient.md +++ /dev/null @@ -1,84 +0,0 @@ -[Operating MIP Federation](OperatingMIPFederation.md#PreparingKeycloak) -> `Preparing KeyCloak Realm's Client` - -# Preparing KeyCloak Realm's Client -1. Preparing KeyCloak realm's client - - In the KeyCloak's interface, you will have to create a client (**realm-management** in our case) which has - * *openid-connect* Client Protocol - * *confidential* Access Type - * *Direct Access Grants Enabled* - * *Service Accounts Enabled* - * *Valid Redirect URIs* set to "*" - - Then, in its roles, prepare the following *Roles* - * *create-client* - * *impersonation* - * *manage-authorization* - * *manage-clients* - * *manage-events* - * *manage-identity-providers* - * *manage-realm* - * *manage-users* - * *query-clients* - * *query-groups* - * *query-realms* - * *query-users* - * *view-authorization* - * *view-events* - * *view-identity-providers* - * *view-realm* - - Also, you'll have to prepare "composite" *Roles* - * *view-clients*, containing the following *realm-management* client's roles - * *query-clients* - * *view-users*, containing the following *realm-management* client's roles - * *query-groups* - * *query-users* - * *realm-admin*, containing the following *realm-management* client's roles - * *create-client* - * *impersonation* - * *manage-authorization* - * *manage-clients* - * *manage-events* - * *manage-identity-providers* - * *manage-realm* - * *manage-users* - * *query-clients* - * *query-groups* - * *query-realms* - * *query-users* - * *view-authorization* - * *view-clients* (previously created composite role) - * *view-events* - * *view-identity-providers* - * *view-realm* - * *view-users* (previously created composite role) - - Then, you'll have to prepare *Mappers* - * *Client ID* - |Parameter|Value| - | -- | -- | - |*Mapper Type*|*User Session Note*| - |*User Session Note*|*clientId*| - |*Token Claim Name*|*clientId*| - * *Client IP Address* - |Parameter|Value| - | -- | -- | - |*Mapper Type*|*User Session Note*| - |*User Session Note*|*clientAddress*| - |*Token Claim Name*|*clientAddress*| - * *Client Host* - |Parameter|Value| - | -- | -- | - |*Mapper Type*|*User Session Note*| - |*User Session Note*|*clientHost*| - |*Token Claim Name*|*clientHost*| - - Finally, you'll have to prepare the following *Service Account Roles*, *Client Roles* - * *account* - * *manage-account* (composite role which contains "*manage-account-links*" *account* client's role) - * *realm-management* - * All the roles -1. Preparing KeyCloak realm's admin user - - Here, you just have to create a simple user, without any role, **realmadmin**, in our case diff --git a/Federation/doc/PreparingMaster.md b/Federation/doc/PreparingMaster.md deleted file mode 100644 index a894de65..00000000 --- a/Federation/doc/PreparingMaster.md +++ /dev/null @@ -1,50 +0,0 @@ -[Federated MIP Deployment](Readme.md#PreparingMaster) -> `Preparing the master node` - -# Preparing the **master** node -1. Install the **master** - - As a "sudoer" user: - 1. Set the hostname, with a meaningful name, i.e. - ``` - sudo hostnamectl set-hostname -ms - ``` - 1. Configure the networking, including the DNS client - 1. Install the MIP - ``` - git clone https://github.com/HBPMedical/mip-deployment - ``` - ``` - sudo mip-deployment/mip --node-type ms --yes install - ``` - Here, the *--node-type* parameter is very important, because it tells the script that this node will be a **master** (ms). - Following the same process than for the workers, you can also put the specific parameters (*--version*, *--branch* or *--commit*, used with the flag *--force-install-unstable*) if you want to install a specific version. - -1. Configure the **master** - - Still as a "sudoer" user: - ``` - sudo mip --node-type ms --yes configure all - ``` - - Like for the workers, by default, this will create a user *mipadmin*, and you can also change its password: - ``` - sudo passwd mipadmin - ``` - - Again, later on, you will have to give the *pusher* informations about this *master* node: - * Its IP address - * The user (*mipadmin*) - * The user's password - -1. Prepare the CDE metadata files (**only if you don't want to automatically download their latest version on the data catalogue**) - - If you want to manage your CDEs by yourself, you'll have to place them on the **master** node, as follows. - * For every pathology over the whole federation, as **mipadmin** user: - ``` - sudo mkdir -p /data// - ``` - * Then, still as **mipadmin**, place the corresponding *CDEsMetadata.json* file in the right pathology folder. - * Once it's done, you can set the data owner to *mipadmin* - ``` - sudo chown -R mipadmin.mipadmin /data - ``` diff --git a/Federation/doc/PreparingPusher.md b/Federation/doc/PreparingPusher.md deleted file mode 100644 index 69f6dffb..00000000 --- a/Federation/doc/PreparingPusher.md +++ /dev/null @@ -1,61 +0,0 @@ -[Federated MIP Deployment](Readme.md#PreparingPusher) -> `Preparing the pusher node` - -# Preparing the **pusher** node -As the **pusher** can virtually be any type of node (as it doesn't conflict with any MIP component), the pusher is not a type of node, but a flag in the *mip* script. -That said, preparing a dedicated **pusher** node is strongly encouraged, in order to avoid any confusion. -Also, as the **pusher** will operate the federation, remotely controlling the **worker** and the **master** nodes, the federation name will be required for each pusher operation. It also means that a pusher can manage many different federations from the same machine (which can also be a participant node in a federation, at the same time, but again, this kind of setup can become a source of confusions). - -1. Install the **pusher** - - As a "sudoer" user: - 1. Set the hostname, with a meaningful name, i.e. - ``` - sudo hostnamectl set-hostname -pusher - ``` - 1. Configure the networking, including the DNS client - 1. Install the MIP - ``` - git clone https://github.com/HBPMedical/mip-deployment - ``` - ``` - sudo mip-deployment/mip --yes --pusher --federation install - ``` - As said earlier, for the **pusher**, the *--node-type* parameter is not used. Instead, the *--pusher* flag and the *--federation \* parameter are mandatory! - - In the case of the **pusher**, it's a bit special: as this installation will clone the *exareme* repository, it shouldn't install any version of *exareme*, but instead, it should install the same version which is listed in the *mip-deployment/.versions_env* file, a version which has been tested and validated to work well with the MIP's Web interface. - The *exareme* folder will be cloned by default in */opt/\/exareme*, and *mip-deployment* won't be cloned by this installation. - That said, some binaries will be cloned from the *mip-deployment* repository, and installed in */usr/local/bin*. - - Following the same process than for the different federation nodes, you can also put the specific parameters (*--version*, *--branch* or *--commit*, used with the flag *--force-install-unstable*) if you want to install a specific version. - That said, the specification of the **pusher** makes it a bit trickier, because the along the process, the "signification" of the *--version*, *--branch* or *--commit* parameters can be used to target versions of *exareme*, instead of *mip-deployment*. That's why they should come by pair: - - * *--version \* with *--mip-version \* - * *--branch \* with *--mip-branch \* - * *--commit \* with *--mip-commit \* - - Again, by default, without specifying anything, the *exareme* version installed will match the one which is written in the *mip-deployment/.versions_env* file of the *mip-deployment* latest stable release, so if you don't understand these complexities, no worries. - - As usual, to get more details, use: - ``` - mip --help - ``` - -1. Configure the **pusher** - - As this operation is something that requires to be interactive at a moment, we won't use *--quiet* nor *--yes* parameters. - - Still as a "sudoer" user: - ``` - sudo mip --pusher --federation configure all - ``` - - This will first ask you to enter a vault password. This vault will securely store every sensitive details (like credentials) about the remote nodes. - For each future tasks which will imply an access on the remote nodes, the *mip* script will ask you this vault password again. - Now that this vault question is answered, the *mip configure* process will ask you to provide informations about the **master**, **ui** and **workers** nodes, to prepare SSH identity exchange with the federation participants. - For each node (**master**, **ui**, then the **workers**), you'll have to enter: - - * The internal IP address or address which is in the same federation LAN (physical or virtual, if you prepared a VPN). - * The node's administration user (usually *mipadmin*) - * This user's password - - The *mip* script will then establish an SSH connection to the node, install its SSH identity there for future passwordless connections, and get the real node's hostname. If you configured the nodes with a meaningful hostname, you should recognize it, and it should actually allow you to verify that you configured the correct machine. diff --git a/Federation/doc/PreparingUI.md b/Federation/doc/PreparingUI.md deleted file mode 100644 index a2fa9627..00000000 --- a/Federation/doc/PreparingUI.md +++ /dev/null @@ -1,83 +0,0 @@ -[Federated MIP Deployment](Readme.md#PreparingUI) -> `Preparing the ui node` - -# Preparing the **ui** node -1. Install the **ui** - - As a "sudoer" user: - 1. Set the hostname, with a meaningful name, i.e. - ``` - sudo hostnamectl set-hostname -ui - ``` - 1. Configure the networking, including the DNS client - 1. Install the MIP - ``` - git clone https://github.com/HBPMedical/mip-deployment - ``` - ``` - sudo mip-deployment/mip --node-type ui --yes --no-run install - ``` - Here, the *--node-type* parameter is very important, because it tells the script that this node will be a **ui**. - Following the same process than for the workers and the master, you can also put the specific parameters (*--version*, *--branch* or *--commit*, used with the flag *--force-install-unstable*) if you want to install a specific version. - As a reminder, the MIP will be installed by default in */opt/mip-deployment*. - Still by default, the *mip-deployment* folder you just created when cloning the repository will be deleted after the installation. If you want to keep it, just use the *--keep-installer* flag. - -1. Configure the **ui** - - Still as the "sudoer" user: - ``` - sudo mip --node-type ui configure all - ``` - With this command, the *mip* script will interactively ask you for the different parameters. Of course, you can also set these parameters within the *mip configure* command. - If you want to configure a particular part, you can run *sudo mip configure \* (like *sudo mip --node-type ui configure exareme-ip*). - At last, if you want to **re**-configure something, you will have to use the *--force* flag. - - The configurations which come as variables will be written in the *.mipenv* file, at the root level of the *mip-deployment* folder. This file is automatically set by the *mip configure* command, but can also be edited by hand (**IF YOU KNOW WHAT YOU'RE DOING**) later on (running *mip configure* is still a mandatory step prior to doing anything like that). - - For each of these variables, there is a matching *mip configure* command parameter: - - |Variable|Command parameter|Mandatory| - | -- | -- | -- | - |MIP_LINK|--link \|yes| - |EXTERNAL_MIP_PROTOCOL|--ext-protocol \|yes| - |PUBLIC_MIP_PROTOCOL|--protocol \|yes| - |PUBLIC_MIP_HOST|--host \|yes| - |EXAREME_IP|--exareme-ip \|yes| - |KEYCLOAK_AUTHENTICATION (0/1)|--without-keycloak-authentication \| --with-keycloak-authentication|yes| - |KEYCLOAK_PROTOCOL|--keycloak-protocol \|| - |KEYCLOAK_URL|--keycloak-url \|| - |KEYCLOAK_REALM|--keycloak-realm \|| - |KEYCLOAK_CLIENT_ID|--keycloak-client-id \|| - |KEYCLOAK_CLIENT_SECRET|--keycloak-client-secret \|| - |DATACATALOGUE_PROTOCOL|--datacatalogue-protocol \|| - |DATACATALOGUE_HOST|--datacatalogue-host \|| - - Note that if you don't provide anything for KeyCloak, it will use the default configuration to connect to the central *EBRAINS*' KeyCloak, with the *mipfedqa* client. - For the data catalogue, the default *EBRAINS* DC hostname will be used as well, if no replacement value is provided. - - In order to better understand the different configuration parameters, check the following picture: - ![MIP Federated Configuration Scheme](MIP_Federated_Configuration.png) - - You can see that there are two different setups: - * **direct** - - The **ui** node's Web interface is directly reachable from the browser. - - * **proxied** - - The **ui** node's Web interface is reachable through a reverse-proxy server. Currently, it's been tested and validated with Apache server, with this specific VirtualHost configuration: - - ``` - ServerName public.mip.address - - - ProxyPass http://internal.mip.address/ - ProxyPassReverse http://internal.mip.address/ - Allow from all - Required all granted - - ``` - - Again, don't hesitate to use: - ``` - mip --help - ``` diff --git a/Federation/doc/PreparingWorkers.md b/Federation/doc/PreparingWorkers.md deleted file mode 100644 index 1573223e..00000000 --- a/Federation/doc/PreparingWorkers.md +++ /dev/null @@ -1,62 +0,0 @@ -[Federated MIP Deployment](Readme.md#PreparingWorkers) -> `Preparing the worker nodes` - -# Preparing the **worker** nodes -1. Install the **workers** - - On each worker node, as a "sudoer" user: - 1. Set the hostname, with a meaningful name, i.e. - ``` - sudo hostnamectl set-hostname -wk - ``` - 1. Configure the networking, including the DNS client - 1. Install the MIP - ``` - git clone https://github.com/HBPMedical/mip-deployment - ``` - ``` - sudo mip-deployment/mip --node-type wk --yes install - ``` - Here, the *--node-type* parameter is very important, because it tells the script that this node will be a **worker** (wk). - If you want to install a specific version of the MIP, you can precise the tag (*--version \*), the branch (*--branch \*) or even the commit ID (*--commit \*), each of these parameters having precedence over the next one(s). If you specify a non-default version, you also have to force this installation with the flag *--force-install-unstable*. - - Don't hesitate to use: - ``` - mip --help - ``` - -1. Configure the **workers** - - On each worker node, still as a "sudoer" user: - ``` - sudo mip --node-type wk --yes configure all - ``` - - By default, this will create a user *mipadmin* (which will be in *docker* and *sudo* groups). This user will be used by the *pusher* to operate this node. If you don't know its password or want to change it, do it right now with: - ``` - sudo passwd mipadmin - ``` - - Later on, you (or the central system administrator) will need to provide the *pusher* node informations about this *worker* node: - - * The node IP address, with which the pusher will connect (via ssh), using the *mipadmin* user: - ``` - ip a - ``` - This command will give you the machine's networking configuration details. You'll have to search for the IP with which you will reach this node from the other federation's nodes. If you prepared a VPN network, you'll have to use the VPN IP of this node. - * The user (*mipadmin*) - * The user's password - -1. Prepare the datasets on the **workers** - - * On each worker, as **mipadmin** (*sudo su - mipadmin* can help you becoming this user if you don't know its password) user, prepare the federation data folder. Go with this pattern: - ``` - sudo mkdir -p /data/ - ``` - - If your federation is named *mipfed1*, the data folder will have to be */data/mipfed1*. - * Place your datasets in /data/// - * If you have CDE metadata files, it will have to be, or placed in the **master** node, or downloaded (latest version for now) from the central data catalogue. - * Set the data folder to be owned by *mipadmin* - ``` - sudo chown -R mipadmin.mipadmin /data - ``` diff --git a/Federation/doc/Readme.md b/Federation/doc/Readme.md deleted file mode 100644 index 6dbade6d..00000000 --- a/Federation/doc/Readme.md +++ /dev/null @@ -1,90 +0,0 @@ -[MIP Deployment](../../README.md#FederatedDeployment) -> `Federated MIP Deployment` - -# Federated MIP Deployment -## Structure -![MIP Federated Deployment Scheme](MIP_Federated_Deployment_II.png) - -The federated MIP is meant to run on different VM/Physical servers (nodes): - -* A **Pusher** (*4*) -* A **Master** (*5*) -* A **UI** (*6*) -* Some **Workers** (*7*) - -As the *pusher* service can run on any node, the bare minimum number of required servers is 3. That said, it's still strongly encouraged to deploy a pusher on a dedicated server. -As the opposite as a *local* MIP setup, all the required components are not in this *mip-deployment* repository (*1*). -As an additional part, for all the backend requirements, we will need the *exareme* repository (*2*) content, and another component (provided as an external service) which is the global *HBP* KeyCloak's instance (*5*). -Additionally, another external service can be used in a more punctual way: the *data catalogue*. -All these statements mean that the federated MIP is not designed (at least not at the moment) to run as an independant MIP setup. -That said, you can deploy your own KeyCloak server and your own data catalogue, but these processes won't be documented here. - -### Pusher (*4*) -The **Pusher** will contain all the *exareme* repository (*2*) structure. Its role will be to "push" containers and configurations to the **Master** and the **Worker** nodes, and to initiate a Docker Swarm network from the **Master**. -This pusher will need to have an *ssh* access to the master and the workers, and this will also include *root* access there. -As a Docker Swarm network is quite complicated in terms of layer 3 network protocols and layer 4 TCP and UDP port requirements, it is highly recommended to run all the different nodes in the same network subnet. That's why, if you need to have remote **Worker** nodes in hospitals, it's better to build a VPN connectivity (preferably in layer 2) between all the nodes. Such a VPN setup won't be documented here either. -As this **Pusher** is a central actor in the federation, it's also the best candidate to initiate federated actions, like the synchronization (consolidation) of metadata and pathologies details over the federation, the automated data compilation on the workers and master nodes, and the remote KeyCloak roles synchronization, amongst other useful features. - -### Master (*5*) -The **Master** will "schedule" and "rule" the **Workers**. This will be the main Docker Swarm network component, the Swarm Master. -The Docker containers running on this machine will be *exareme-master* and *exareme-keystore*. - -### UI (*6*) -The **UI** will contain most of the MIP Docker containers, but it won't have any *exareme* nor *keycloak* related container. -This node will require a TCP connection with the **Master** node only (not mentioning the external KeyCloak's instance, nor the reference to the external data catalogue). -This *mip-deployment* repository (*1*) includes a "Federation" subfolder with another *docker-compose.yml* file, which contains the references to the different Docker images (hosted on Docker Hub (*2*)) and their version tag, required to run the **UI** node only! -As this node will actually run the user Web interface, it will be the only one which is required to be reachable via HTTP/HTTPS. - -### Workers (*7*) -The **Worker** (usually located in hospital premises) nodes (at least one) will have to host the actual node-related datasets, and will run the *exareme* container only. - -## Requirements -For the different nodes, the requirements are the same as for a local MIP. - -### Hardware -* 40 GB HDD -* 8 GB RAM -* 2 CPU Cores - -### Software -* Ubuntu Server 20.04 (minimal installation, without GUI). - -## Setup -### Preparing the machines -Prepare a VM/Physical machine with **Ubuntu Server 20.04** (basic OS, without GUI), for each node of the federation. As we want to be able to run federated data analysis, we'll typically go with two workers, with the following plan: - -* **pusher** -* **ms** (master) -* **ui** (frontend) -* **wk1** (worker 1) -* **wk2** (worker 2) - -On every node **but** the pusher, we'll use the *mip-deployment* repository as an installer. Then, once installed, the *mip* script will (by default) delete the installer folder. -In the **ui** node, the installer will **re**-clone the *mip-deployment* repository, but in /opt. Therefore, after the installation has been done, it can also remove the *mip-deployment* folder that was used as the installer. -If you want to keep the installer folder, you'll have to explicitely use the flag *--keep-installer*. - -1. [Preparing the **worker** nodes](PreparingWorkers.md">) -1. [Preparing the **master** node](PreparingMaster.md) -1. [Preparing the **ui** node](PreparingUI.md) -1. [Preparing the **pusher** node](PreparingPusher.md) - -## Operating the MIP Federation -The first time, after the setup, just remember to follow these steps in the right order: - -1. [Generate the *tmux* session](OperatingMIPFederation.md#GeneratingTmuxSession) and connect to it -1. [Consolidate the data](OperatingMIPFederation.md#ConsolidatingData) -1. [Compile the data](OperatingMIPFederation.md#CompilingData) -1. [Deploy the backend services](OperatingMIPFederation.md#DeployingServices) (Docker Swarm) -1. [Synchronize the KeyCloak roles](OperatingMIPFederation.md#SynchronizingKeycloakRoles) -1. [Run the MIP Web Interface](OperatingMIPFederation.md#RunningWebInterface) - -## Monitoring the MIP Federation -1. [Real-time federation-wide logs](MonitoringMIPFederation.md#RealTimeLogs) -1. [Extended MIP Federation Backend status](MonitoringMIPFederation.md#ExtendedMIPFederationBackendStatus) -1. [Specific MIP Federation Component logs](MonitoringMIPFederation.md#SpecificMIPFederationComponentLogs) - -## Upgrading the MIP Federation -1. [Upgrading the **pusher** node](UpgradingMIPFederation.md#UpgradingPusher) -1. [Upgrading the **master** node](UpgradingMIPFederation.md#UpgradingMaster) -1. [Upgrading the **ui** node](UpgradingMIPFederation.md#UpgradingUI) -1. [Upgrading the **worker** nodes](UpgradingMIPFederation.md#UpgradingWorkers) -1. [Redeploying](UpgradingMIPFederation.md#Redeploying) diff --git a/Federation/doc/SynchronizingKeycloakRoles.md b/Federation/doc/SynchronizingKeycloakRoles.md deleted file mode 100644 index 459c771c..00000000 --- a/Federation/doc/SynchronizingKeycloakRoles.md +++ /dev/null @@ -1,55 +0,0 @@ -[Operating the MIP Federation](OperatingMIPFederation.md#SynchronizingKeycloakRoles) -> `Synchronizing the KeyCloak Roles` - -# Synchronizing the KeyCloak Roles -The Authentication (AuthN) and the authorization (AuthZ) processes of the MIP are managed by *KeyCloak*, which is usually, as the opposite of the *Local* MIP, an **external** service, which is by default in the EBRAINS infrastructure. -The roles are taken into account by the MIP Web interface, each role name being parsed and then checked to show (or not) certain items (features, pathologies, datasets...) -Then, within a federation, which may have many different pathologies and datasets, amongst many different nodes, the number of roles can rapidly increase, and also, each time you change anything related to these things, you will have to change roles accordingly into the KeyCloak interface (to have an idea of the MIP-related roles naming convention, read this [guide](../../documentation/UserAuthorizations.md). - -The KeyCloak's interface is excruciatingly slow and clearly not ergonomic at all. Additionally, it's way not MIP-comprehensive. All this will make you lose a considerable amount of time, and with the complexity and sensitivity of this process, you will probably do mistakes as well! - -In an attempt to drastically reduce the required time to do this, and also eliminate the risk of human errors, a roles synchronization script has been written, and is introduced into the *mip-deployment* starting with the MIP 6.5 release. -This script should be automatically installed in the **pusher** node, as it **has** to be executed from there! - -### Preparing the *realm-management* client and the *realmadmin* realm user in KeyCloak -Prior to run anything here, you need to make sure that the KeyCloak server's *realm* is ready to be used by the script. In other words, the *keycloak_roles_sync.py* script needs a realm client (with certain configurations and roles) and a realm user (using this realm client). -For that purpose, you will need to use a KeyCloak Administrator who has all the required privileges to fully manage the realm. - -In order to do this, follow this [guide](PreparingKeycloakRealmClient.md). - -### Exporting the MIP Federation data structure -As the synchronization script will use a JSON file representing the federation structure (nodes, pathologies, datasets), we have to generate this file with the *mip* script, prior to using the *keycloak_roles_sync.py* script. - -In the *tmux* session (opened as **mipadmin** user), in the **pusher** window (#0) -``` -mip --pusher --federation --export-data-structure data consolidate > mip_data_structure.json -``` -The *mip_data_structure.json* can be any other file you want. -Obviously, you'll have to **re**-run this anytime you will change **anything** about the -* nodes (adding or removing a node, or changing its hostname) -* pathologies (adding, removing or renaming or moving from/to any node) -* datasets (adding, removing, or renaming, or moving from/to any node) - -### Using the *keycloak_roles_sync.py* script -In the *tmux* session (opened as **mipadmin** user), in the **pusher** window (#0) -``` -keycloak_roles_sync.py --admin-client-secret --admin-password '' --sync-client-id '' --data-structure-json-file mip_data_structure.json -``` - -For this script, the different parameters are -|Parameter|Description|Mandatory| -| -- | -- | -- | -|*--server-url* \*|KeyCloak server URL (if different from the default EBRAINS one)|| -|*--realm-name \*|KeyCloak Realm name (if different from the default EBRAINS one)|| -|*--admin-client-id \*|KeyCloak Realm Administrator client-id (if different from the default EBRAINS one)|| -|*--admin-client-secret \*|KeyCloak Realm Administrator client-secret|**yes**| -|*--admin-username \*|KeyCloak Realm Administrator username (if different from the default EBRAINS one)|| -|*--admin-password '\'*||**yes**| -|*--sync-client-id \*|KeyCloak Realm client-id to create/synchronize. You are strongly encourage to use the MIP Federation name here!|**yes**| -|*--copy-users-from-client-id \*|If you want to copy the user/group relationships from another client/group_prefix|| -|*--data-structure-json-file \*|JSON data structure file to use as a pattern for roles/groups creation/synchronization|**yes**| - -Alternatively, you can generate the JSON structure with the *mip* script, and then, "pipe" it to the *keycloak_roles_sync.py* script. In this sense, the last parameter here is not "really" mandatory. -This should look like -``` -mip --pusher --federation --export-data-structure data consolidate | keycloak_roles_sync.py --admin-client-secret --admin-password '' --sync-client-id '' -``` diff --git a/Federation/doc/UpgradingMIPFederation.md b/Federation/doc/UpgradingMIPFederation.md deleted file mode 100644 index 43c556e8..00000000 --- a/Federation/doc/UpgradingMIPFederation.md +++ /dev/null @@ -1,159 +0,0 @@ -[Federated MIP Deployment](Readme.md#UpgradingMIPFederation) -> `Upgrading the MIP Federation` - -# Upgrading the MIP Federation -## Upgrading the **pusher** node -### Check-list -* Connect to the **pusher** node as *mipadmin* -* If you're in the *tmux* session, detach it, prior to upgrading the node -* Make sure you're not in the *exareme* deployment folder - ``` - cd - ``` - -### Installing the new version of the *mip* script -``` -git clone https://github.com/HBPMedical/mip-deployment -``` -``` -sudo mip-deployment/mip --pusher --federation --self --force install -``` -As it was already explained in the [**pusher** preparation](PreparingPusher.md), there are other parameters you can use here to install another specific version of the *MIP* (and they can be used in the case of the *mip script* install as well). - -#### Cleanup -``` -rm -rf mip-deployment -``` - -### Installing the new *MIP* version -``` -sudo mip --pusher --federation install -``` -Again, as explained in the [**pusher** preparation](PreparingPusher.md), if you have to install a specific version, use the documented parameters, and don't hesitate to call *mip --help*. - -When upgrading a **pusher** or **ui** node, a backup of the current installation is made. Then, the current installation folder is deleted, the new one is cloned from github, and finally, the backup is automatically restored. - -### Fixing the pre-6.5 folders after the automatic restore -When upgrading to *MIP 6.5* from an older version, there's a breaking change in the *exareme* deployment folder, which means that the restore will result in some folders being put in the wrong place. Let's fix it: - -``` -sudo cp -r /opt//exareme/Federated-Deployment/Compose-Files /opt//exareme/Federated-Deployment/docker-swarm/ -``` -``` -sudo rm -r /opt//exareme/Federated-Deployment/Compose-Files -``` -``` -sudo cp -r /opt//exareme/Federated-Deployment/Docker-Ansible /opt//exareme/Federated-Deployment/docker-swarm/ -``` -``` -sudo rm -r /opt//exareme/Federated-Deployment/Docker-Ansible -``` - -### Fixing the configuration/permissions -``` -sudo mip --pusher --federation configure all -``` - -You can now reconnect the tmux session to work on the other nodes -``` -mip --pusher --federation tmux -``` - -## Upgrading the **master** node -### Check-list -* In the *tmux* session (opened as **mipadmin** user), go in the **ms** (master) window (#3) - -### Installing the new version of the *mip* script -``` -git clone https://github.com/HBPMedical/mip-deployment -``` -``` -sudo mip-deployment/mip --self --force install -``` -As it was already explained in the [**master** preparation](PreparingMaster.md), there are other parameters you can use here to install another specific version of the *MIP* (and they can be used in the case of the *mip script* install as well). - -#### Cleanup - -``` -rm -rf mip-deployment -``` - -### Installing the new *MIP* version -``` -sudo mip --node-type ms install -``` -Again, as explained in the [**master** preparation](PreparingMaster.md), if you have to install a specific version, use the documented parameters, and don't hesitate to call *mip --help*. - -## Upgrading the **ui** node -### Check-list -* In the *tmux* session (opened as **mipadmin** user), in the **ui** window (#4) -* Make sure you're not in the *mip-deployment* folder - ``` - cd - ``` - -### Installing the new version of the *mip* script -``` -git clone https://github.com/HBPMedical/mip-deployment -``` -``` -sudo mip-deployment/mip --self --force install -``` -As it was already explained in the [**ui** preparation](PreparingUI.md), there are other parameters you can use here to install another specific version of the *MIP* (and they can be used in the case of the *mip script* install as well). - -#### Cleanup - -``` -rm -rf mip-deployment -``` - -### Installing the new *MIP* version -``` -sudo mip --node-type ui install -``` -Again, as explained in the [**ui** preparation](PreparingUI.md), if you have to install a specific version, use the documented parameters, and don't hesitate to call *mip --help*. - -## Upgrading the **wk** nodes -In the *tmux* session (opened as **mipadmin** user), do this in each worker windows (#5-#n) - -### Installing the new version of the *mip* script -``` -git clone https://github.com/HBPMedical/mip-deployment -``` -``` -sudo mip-deployment/mip --pusher --federation --self --force install -``` -As it was already explained in the [**workers** preparation](PreparingWorkers.md), there are other parameters you can use here to install another specific version of the *MIP* (and they can be used in the case of the *mip script* install as well). - -#### Cleanup -``` -rm -rf mip-deployment -``` - -### Installing the new *MIP* version -``` -sudo mip --node-type wk install -``` -Again, as explained in the [**workers** preparation](PreparingWorkers.md), if you have to install a specific version, use the documented parameters, and don't hesitate to call *mip --help*. - -## Redeploying -### Detach the *tmux* session -``` -CTRL+j d -``` - -### Regenerate the *tmux* session -``` -mip --pusher --federation --force tmux -``` - -### Stop the services -In the *tmux* session (opened as **mipadmin** user), in the **pusher** window (#0) -``` -mip --pusher --federation service stop -``` - -### Deploy the services -Still in the window #0 -``` -mip --pusher --federation service deploy -``` diff --git a/Federation/docker-compose.yml b/Federation/docker-compose.yml deleted file mode 100644 index a0376ed8..00000000 --- a/Federation/docker-compose.yml +++ /dev/null @@ -1,94 +0,0 @@ -version: '3.7' - -services: - portalbackend_db: - image: postgres:11.3-alpine - volumes: - - ./.stored_data/portalbackenddb:/var/lib/postgresql/data - hostname: portalbackend_db - environment: - POSTGRES_PASSWORD: test - command: -p 5433 - expose: - - 5433 - restart: unless-stopped - - create_dbs: - image: hbpmip/create-databases:1.1.0 - depends_on: - - portalbackend_db - environment: - DB_HOST: portalbackend_db - DB_PORT: 5433 - DB_ADMIN_USER: postgres - DB_ADMIN_PASSWORD: test - DB4: portal - USER4: portal - PASSWORD4: portalpwd - restart: on-failure - - portalbackend: - image: hbpmip/portal-backend:${PORTALBACKEND} - ports: - - '8080:8080' - - '8089:8089' - environment: - ### API ### - LOG_LEVEL: INFO - LOG_LEVEL_FRAMEWORK: INFO - AUTHENTICATION: ${KEYCLOAK_AUTHENTICATION} - ### Database ### - PORTAL_DB_URL: jdbc:postgresql://portalbackend_db:5433/portal - PORTAL_DB_SERVER: portalbackend_db:5433 - PORTAL_DB_USER: portal - PORTAL_DB_PASSWORD: portalpwd - ### Exareme2 ### - EXAREME2_URL: ${EXAREME2_URL} - ### Exareme ### - EXAREME_URL: http://${EXAREME_IP}:9090 - ### Keycloak ### - KEYCLOAK_AUTH_URL: ${KEYCLOAK_PROTOCOL}://${KEYCLOAK_URL}/auth/ - KEYCLOAK_REALM: ${KEYCLOAK_REALM} - KEYCLOAK_CLIENT_ID: ${KEYCLOAK_CLIENT_ID} - KEYCLOAK_CLIENT_SECRET: ${KEYCLOAK_CLIENT_SECRET} - KEYCLOAK_SSL_REQUIRED: ${KEYCLOAK_SSL_REQUIRED} - depends_on: - - create_dbs - volumes: - - ./config:/opt/portal/api - - ./logs:/opt/portal/logs - restart: unless-stopped - - gateway: - image: hbpmip/gateway:${GATEWAY} - environment: - ENGINE_TYPE: exareme - ENGINE_BASE_URL: http://portalbackend:8080/services/ - GATEWAY_PORT: 8081 - ports: - - '8081:8081' - depends_on: - - portalbackend - restart: unless-stopped - - frontend: - image: hbpmip/portal-frontend:${FRONTEND} - depends_on: - - gateway - ports: - - '80:80' - - '443:443' - volumes: - - ./.stored_data/caddy/caddy_data:/data - environment: - ERROR_LOG_LEVEL: info - PORTAL_BACKEND_SERVER: portalbackend:8080 - PORTAL_BACKEND_CONTEXT: services - GATEWAY_SERVER: gateway:8081 - INSTANCE_NAME: 'MIP ${MIP}' - VERSION: 'Frontend: ${FRONTEND}, Gateway: ${GATEWAY}, Backend: ${PORTALBACKEND}, Exareme: ${EXAREME}' - TRACKER_ID: UA-80660232-5 - DATACATALOGUE_SERVER: ${DATACATALOGUE_PROTOCOL}://${DATACATALOGUE_HOST} - PUBLIC_MIP_HOST: ${PUBLIC_MIP_HOST} - PUBLIC_MIP_PROTOCOL: ${PUBLIC_MIP_PROTOCOL} - restart: unless-stopped