diff --git a/README.md b/README.md
index c6359ec..53926b5 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,7 @@
![](doc/TRUE_Connector_Logo.png?raw=true)
+
+ Version 1.0.6
+
diff --git a/TRUE Connector v1.postman_collection.json b/TRUE Connector v1.postman_collection.json
index fcb75e9..70b7ad0 100644
--- a/TRUE Connector v1.postman_collection.json
+++ b/TRUE Connector v1.postman_collection.json
@@ -1072,7 +1072,7 @@
},
{
"key": "password",
- "value": "password",
+ "value": "passwordIdsUser",
"type": "string"
}
]
@@ -1268,7 +1268,7 @@
},
{
"key": "password",
- "value": "password",
+ "value": "passwordApiUser",
"type": "string"
}
]
@@ -1440,7 +1440,7 @@
},
{
"key": "password",
- "value": "password",
+ "value": "passwordIdsUser",
"type": "string"
}
]
@@ -1703,7 +1703,7 @@
},
{
"key": "password",
- "value": "password",
+ "value": "passwordIdsUser",
"type": "string"
}
]
@@ -1812,7 +1812,7 @@
},
{
"key": "password",
- "value": "password",
+ "value": "passwordIdsUser",
"type": "string"
}
]
@@ -1849,7 +1849,7 @@
},
{
"key": "password",
- "value": "password",
+ "value": "passwordApiUser",
"type": "string"
}
]
diff --git a/be-dataapp_resources/application-docker.properties b/be-dataapp_resources/application-docker.properties
index 98fa8f4..497637f 100644
--- a/be-dataapp_resources/application-docker.properties
+++ b/be-dataapp_resources/application-docker.properties
@@ -63,9 +63,4 @@ application.contract.negotiation.demo=true
spring.servlet.multipart.max-request-size=200MB
spring.servlet.multipart.max-file-size=200MB
# Setting max size of post requests to 6MB (default: 2MB)
-server.tomcat.max-http-post-size=200291456
-
-information.model.version=4.2.7
-
-# encoded 'password'
-application.security.password=$2a$10$MQ5grDaIqDpBjMlG78PFduv.AMRe9cs0CNm/V4cgUubrqdGTFCH3m
+server.tomcat.max-http-post-size=200291456
\ No newline at end of file
diff --git a/be-dataapp_resources/users.properties b/be-dataapp_resources/users.properties
new file mode 100644
index 0000000..700fb2a
--- /dev/null
+++ b/be-dataapp_resources/users.properties
@@ -0,0 +1,8 @@
+# List of users
+users.list=idsUser,bob
+
+# Credentials for each user
+# encoded - passwordIdsUser
+idsUser.password=$2a$12$54Rw0Bp/9yt5Zcj4gVkvnuVT9aeN36m4dzVMMLrPC0v78lAOQo9te
+# encoded - passwordBob
+bob.password=$2a$12$8ngZQYUF9pATTwNRmLiYeu6XGlLd79eb4FIgr5ezzuAA6tGLxuAyy
\ No newline at end of file
diff --git a/doc/TEST_API.md b/doc/TEST_API.md
index 9b5bf99..8a5f859 100644
--- a/doc/TEST_API.md
+++ b/doc/TEST_API.md
@@ -75,7 +75,7 @@ curl --location -k 'https://localhost:8090/about/version'
and expected response:
```
-1.14.6
+1.14.7
```
## Self Description API
diff --git a/doc/TRUEConnector/component-overview.md b/doc/TRUEConnector/component-overview.md
index f5d1217..ded812d 100644
--- a/doc/TRUEConnector/component-overview.md
+++ b/doc/TRUEConnector/component-overview.md
@@ -4,8 +4,8 @@ TRUE Connector is build using Java11, and use following libraries:
| Component | Version |
| --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- |
-| [Execution core container](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/releases/tag/1.14.6) | 1.14.6 |
-| [Basic data app](https://github.com/Engineering-Research-and-Development/true-connector-basic_data_app/releases/tag/0.3.7) | 0.3.7 |
+| [Execution core container](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/releases/tag/1.14.7) | 1.14.7 |
+| [Basic data app](https://github.com/Engineering-Research-and-Development/true-connector-basic_data_app/releases/tag/0.3.8) | 0.3.8 |
| [Usage control app](https://github.com/Engineering-Research-and-Development/true-connector-uc_data_app_platoon/releases/tag/1.7.8) | 1.7.8 |
| [Pip](https://github.com/Engineering-Research-and-Development/true-connector-uc_data_app_platoon/tree/1.7.8/Docker_Tecnalia_DataUsage/pip) | 1.0.0 |
| [Multipart Message Library](https://github.com/Engineering-Research-and-Development/true-connector-multipart_message_library/releases/tag/1.0.17) | 1.0.17 |
diff --git a/doc/TRUEConnector/prerequisite.md b/doc/TRUEConnector/prerequisite.md
index 5f06e80..4656b3f 100644
--- a/doc/TRUEConnector/prerequisite.md
+++ b/doc/TRUEConnector/prerequisite.md
@@ -11,25 +11,130 @@ To have secure and certification compliant environment, following prerequisites
* The host OS should be audited and secure; OS should be as minimal as possible and it should be preferably used to host our Docker exclusively. There should not coexist other services like web servers or web applications so that attacker could not exploit it or lead to potential exploit (minimal threat attack surface).
* Monitoring mechanism (Linux auditd service for example) should be installed and configured as prerequisite before deploying connector. This will capture if someone tries to make changes on property files used by the connector.
* make sure to create rules to monitor folders and property files of the TRUE Connector (for example auditctl -w /xxxx/TRUEConnector/* -k trueconnector, depending on the location where TRUE Connector is deployed)
-* make sure to create rules for monitoring docker service (dockerd, /run/containerc, /var/lib/docker, /etc/docker, docker.service...) This might differ based on OS distribution
-* rules for auditing should be persisted (/etc/audit/audit.d/rules/audit.rules file, depending on the OS distribution, location might differ)
-* make sure to create rules for mounted docker volumes (to be able to keep track of changes made over files present in those volumes)
-
-
+* Make sure to create rules for monitoring docker service (dockerd, /run/containerc, /var/lib/docker, /etc/docker, docker.service...) This might differ based on OS distribution
+* Rules for auditing should be persisted (/etc/audit/audit.d/rules/audit.rules file, depending on the OS distribution, location might differ)
+* Make sure to create rules for mounted docker volumes (to be able to keep track of changes made over files present in those volumes)
+* Make sure to create scripts to monitor storage capacity in order to notify when the OS system is reaching storage assigned capacity. Also use CroneTab to repeat those scripts at desired time interval. One example of how to write script and set CronTab to automate it can be found [here](https://tecadmin.net/shell-script-to-check-disk-space-and-send-alert/)
* User responsible for setting up environment where connector will run should isolate or disable other services.
-* OS user for running docker should not be root user; be sure to create new user, assign new user to docker group, that user can run docker compose. How to manage OS users you can find [here.](../advancedConfiguration/manage-os-users.md)
-* disable password login to the server for newly created user and allow only key-based authentication for accessing the server where connector will run
-* disable access for the root user by using a password when connecting to the server via ssh (key-based auth only)
-* in case of adding some additional, more configurable and robust firewall, be sure to restrict access to the /api/* endpoints to only internal network, since those endpoints should not be exposed to the outside world, but intended to be used by "internal" user, to make modifications to the self description document.
+* If there is a need to create a new (SSH/OS) user which isn't admin (root) which is not recommendation, who would run the docker, be sure that new SSH/OS user is not the root one, so create a new user, assign new user to docker group, that user can run docker compose. How to manage OS users you can find [here.](../advancedConfiguration/manage-os-users.md)
+* If there is a need to create a new user who would just inspect the TC logs via SSH access, follow the rest of the advices in this document, and then setup a crone job for copying logs from docker volumes to read-only folder on OS filesystem, which can be found [here](../advancedConfiguration/tc-logs-copying.md)
+* Disable password login to the server for newly created user and allow only key-based authentication for accessing the server where connector will run
+* Disable access for the root user by using a password when connecting to the server via ssh (key-based auth only)
+* In case of adding some additional, more configurable and robust firewall, be sure to restrict access to the /api/* endpoints to only internal network, since those endpoints should not be exposed to the outside world, but intended to be used by "internal" user, to make modifications to the self description document.
* 2 types of certificate are required: DAPS and TLS.
DAPS certificate should be obtained from Certified Authority responsible for the Dataspace, while TLS certificate can be self signed or signed by some CA. More information about TLS certificate can be found [here](../security.md).
+## Secure SSH server access
+
+It is good practice to prevent any user that is not necessary from logging in via ssh.
+The ssh daemon provides configuration to only allow logins from users that are a member of a specific group.
+
+```
+ groupadd ssh_login_group
+```
+
+With this new group created the users who need to access the server via ssh need to be added to this group:
+
+```
+ usermod username -a -G ssh_login_group
+```
+
+With the users added to the group, the ssh daemon needs to be configured.
+Open the config file /etc/ssh/sshd_config with your favorite editor and add these lines:
+
+```
+PermitRootLogin no
+AllowGroups ssh_login_group
+
+```
+
+### Disable password login
+
+You probably want to disable password login to avoid someone logging in from somewhere else then the hosts you have configured in the authorized_keys file. To disable password authentication globally for all users, the following 3 settings need to be changed to "no" in the /etc/ssh/sshd_config file:
+
+```
+ChallengeResponseAuthentication no
+PasswordAuthentication no
+UsePAM no
+AllowUsers sshUser1 sshUser2...
+```
+
+### Create SSH public and private keys for user accessing host machine
+
+As a first step, a key-pair needs to be created. This is usually done on the server.
+With the following command a new key-pair is created.
+
+```
+ssh-keygen -t rsa -b 4096 -f ~/.ssh/desktop_key-rsa
+```
+
+In order to create the key, you will be asked for a password. This is the password for your key. It is recommended and considered as best practice (and also security related) to enter passphrase. It will be used as security step, avoiding the usage of a stolen or lost private key. The result of this command should be two files. The file "\~/.ssh/desktop_key-rsa" which is the private-key file, and the file "~/.ssh/desktop_key-rsa.pub" which contains your public-key file.
+This public-key and private-key will be securely transferred to the client. This means that keys are transferred to the client machine without exposing the content of the file, following best practices for delivering files containing sensitive data, such are password protected zip archive, uploading to some storage, and providing link to the responsible user, admin approaching to the client and copying key file from USB stick, or whatever is applicable and most suitable for the company.
+
+public-key needs to be added to the authorized keys. To make sure we do not override any already configured authorized key,
+we add the public-key to the authorized_keys file. If the file does not yet exist, it will create it automatically:
+
+```
+cat desktop_key-rsa.pub >>~/.ssh/authorized_keys
+```
+
+### Authenticate with SSH key
+
+The format of the "authorized_keys" file is as follows:
+
+**options keytype base64-encoded-key comment**
+
+options can contain filter like
+
+```
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA...etc...mnMo7n1DD username
+```
+
+This will enforce that user can log in into server using key authentication.
+
+Once public-key is uploaded to authorized_keys, user requesting access to the server should securely receive public and private key.
+User can verify that connection with following:
+
+```
+ssh username@server.example.com -v -i .ssh/desktop_key-rsa
+```
+
+Note: username used in command to connect to the server needs to be added in sshd deamon configuration file and passphrase for that key needs to be entered when prompted.
+
+```
+AllowUsers sshUser1 sshUser2...
+```
+
+
+### Periodic SSH Key Update Procedure
+
+For maintaining security administrators should perform a periodic refresh of SSH keys. This process should be conducted every three months (minimal, or even on lesser time frame if security policy requires). During each update cycle, the administrator is responsible for generating new SSH keys for all end users and ensuring the invalidation of previous keys. This practice ensures that any potential security risks associated with compromised or outdated keys are mitigated.
+
+To facilitate this process, the following steps should be diligently followed:
+
+* Generate New SSH Keys: Admins should create new SSH key pairs for each user.
+
+* Distribute New Keys Securely: Once new keys are generated, they should be securely transferred to the end users.
+
+* Update the authorized_keys File: The new public keys must be added to the authorized_keys file on the server, replacing the old keys.
+
+* Invalidate Old Keys: Remove the old public keys from the authorized_keys file to ensure they can no longer be used for server access.
+
+* Communicate with Users: Inform all users about the key update and provide them with instructions on how to use the new keys for server access.
+
+* Document the Changes: Keep a log of key updates and user assignments for administrative and security auditing purposes.
+
+* Review and Test: After updating, conduct a thorough review and testing to ensure that only the new keys are operational and that server access is functioning as expected with the updated keys.
+
+
+By regularly updating SSH keys every three months, administrators will enhance the security of server access, making sure these keys effectively protect against unauthorized entry.
+
## Post configuration steps
-Once TRUE Connector is successfully configured and is up and running, responsible user for setting up environment and configuring connector should generate new passwords for 2 type of users required for operating with connector. More information how to do this can be found [here](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/SECURITY.md#change-default-password).
+Once TRUE Connector is successfully configured and is up and running, responsible user for setting up environment and configuring connector should generate new passwords for 2 type of users required for operating with connector. More information how to do this can be found [here](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/doc/SECURITY.md#change-default-password).
Make sure to update following properties to address your usecase:
diff --git a/doc/TRUEConnector/start-stop.md b/doc/TRUEConnector/start-stop.md
index 0716648..22c6415 100644
--- a/doc/TRUEConnector/start-stop.md
+++ b/doc/TRUEConnector/start-stop.md
@@ -89,10 +89,10 @@ You can also check using _docker ps_ command to verify that containers are up an
```
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
-bc693e1fdb90 rdlabengpa/ids_execution_core_container:1.14.6 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 0.0.0.0:8087->8086/tcp, :::8087->8086/tcp, 0.0.0.0:8091->8449/tcp, :::8091->8449/tcp, 0.0.0.0:8890->8889/tcp, :::8890->8889/tcp ecc-consumer
-28dc87213f68 rdlabengpa/ids_be_data_app:0.3.7 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 0.0.0.0:8184->8183/tcp, :::8184->8183/tcp, 0.0.0.0:9001->9000/tcp, :::9001->9000/tcp be-dataapp-consumer
-9eb157ceb37b rdlabengpa/ids_be_data_app:0.3.7 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 0.0.0.0:8183->8183/tcp, :::8183->8183/tcp, 0.0.0.0:9000->9000/tcp, :::9000->9000/tcp be-dataapp-provider
-44bc21187460 rdlabengpa/ids_execution_core_container:1.14.6 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 0.0.0.0:8086->8086/tcp, :::8086->8086/tcp, 0.0.0.0:8889->8889/tcp, :::8889->8889/tcp, 0.0.0.0:8090->8449/tcp, :::8090->8449/tcp ecc-provider
+bc693e1fdb90 rdlabengpa/ids_execution_core_container:1.14.7 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 0.0.0.0:8087->8086/tcp, :::8087->8086/tcp, 0.0.0.0:8091->8449/tcp, :::8091->8449/tcp, 0.0.0.0:8890->8889/tcp, :::8890->8889/tcp ecc-consumer
+28dc87213f68 rdlabengpa/ids_be_data_app:0.3.8 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 0.0.0.0:8184->8183/tcp, :::8184->8183/tcp, 0.0.0.0:9001->9000/tcp, :::9001->9000/tcp be-dataapp-consumer
+9eb157ceb37b rdlabengpa/ids_be_data_app:0.3.8 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 0.0.0.0:8183->8183/tcp, :::8183->8183/tcp, 0.0.0.0:9000->9000/tcp, :::9000->9000/tcp be-dataapp-provider
+44bc21187460 rdlabengpa/ids_execution_core_container:1.14.7 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 0.0.0.0:8086->8086/tcp, :::8086->8086/tcp, 0.0.0.0:8889->8889/tcp, :::8889->8889/tcp, 0.0.0.0:8090->8449/tcp, :::8090->8449/tcp ecc-provider
b3f4cdb77ed6 rdlabengpa/ids_uc_data_app_platoon:1.7.8 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 8080/tcp uc-dataapp-consumer
a36748901ce1 rdlabengpa/ids_uc_data_app_platoon_pip:v1.0.0 "java -jar pip.jar" 3 hours ago Up 3 hours 0/tcp uc-dataapp-pip-provider
d6f77ad9762d rdlabengpa/ids_uc_data_app_platoon:1.7.8 "/bin/sh -c 'java -j…" 3 hours ago Up 3 hours (healthy) 8080/tcp uc-dataapp-provider
diff --git a/doc/advancedConfiguration/auditlogs.md b/doc/advancedConfiguration/auditlogs.md
index 76ca442..77a4ac5 100644
--- a/doc/advancedConfiguration/auditlogs.md
+++ b/doc/advancedConfiguration/auditlogs.md
@@ -1,6 +1,6 @@
### Audit logs
-Audit logging is turned **off** by default. If you wish to configure it or even turn off please follow this [document](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/AUDIT.md) .
+Audit logging is turned **off** by default. If you wish to configure it or even turn off please follow this [document](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/doc/AUDIT.md) .
## Accessing audit logs
diff --git a/doc/advancedConfiguration/broker.md b/doc/advancedConfiguration/broker.md
index 9aa6d6a..32422ac 100644
--- a/doc/advancedConfiguration/broker.md
+++ b/doc/advancedConfiguration/broker.md
@@ -13,4 +13,4 @@ TRUE Connector can register itself on startup, and also unregister when shutting
application.selfdescription.registrateOnStartup=true
```
-Information on how TRUE Connector can interact with Broker, can be found on following [link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/BROKER.md)
+Information on how TRUE Connector can interact with Broker, can be found on following [link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/doc/BROKER.md)
diff --git a/doc/advancedConfiguration/extendedjwt.md b/doc/advancedConfiguration/extendedjwt.md
index 68b77ef..3f9d1ae 100644
--- a/doc/advancedConfiguration/extendedjwt.md
+++ b/doc/advancedConfiguration/extendedjwt.md
@@ -1,3 +1,3 @@
### Extended jwt validation
-TRUE Connector can check additional claims from jwToken. For more information please check the [following link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/TRANSPORTCERTSSHA256.md)
+TRUE Connector can check additional claims from jwToken. For more information please check the [following link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/doc/TRANSPORTCERTSSHA256.md)
diff --git a/doc/advancedConfiguration/manage-os-users.md b/doc/advancedConfiguration/manage-os-users.md
index a78d8d1..f2b890f 100644
--- a/doc/advancedConfiguration/manage-os-users.md
+++ b/doc/advancedConfiguration/manage-os-users.md
@@ -1,5 +1,7 @@
# Managing OS Roles and Permissions
+***NOTE:*** General advice is to have only one admin (root) user per OS, in case you have specific need for additional user, proceed with the next steps.
+
### Requirements
- A Linux system (e.g., Debian, Ubuntu, CentOS)
- Sudo or root access
diff --git a/doc/advancedConfiguration/tc-logs-copying.md b/doc/advancedConfiguration/tc-logs-copying.md
new file mode 100644
index 0000000..f6fe0af
--- /dev/null
+++ b/doc/advancedConfiguration/tc-logs-copying.md
@@ -0,0 +1,32 @@
+## TC logs copying from docker volumes to read-only folder
+
+***IMPORTANT:*** Operation described in this document can be only be done by **administrator (root)** user.
+
+If there is a need to create an additional user with SSH access to view TC logs, this can be achieved using the [tc-logs-copying.sh](./tc-logs-copying.sh) script provided. Once script is downloaded, before running, be sure to check if script is executable.
+
+Make sure to replace value of `DEST_DIR=` from `/path/to/acutal/folder/tc_logs"` with the actual path where you want to copy TC logs.
+
+This script is designed to copy logs from Docker volumes to a designated folder on the filesystem and make it read-only.
+
+
+### Setting Up a Cron Job
+
+To automate the log copying process, a cron job that will run the tc-logs-copying.sh script at a specified time daily can be configured. Here's how to create a cron job to run the script at 00:00:10 every day, and copy the logs from previous day:
+
+1. Open your terminal and edit the crontab configuration for your user by running:
+
+```
+crontab -e
+```
+
+2. Add the following line to the crontab file to schedule the script to run at 00:00:10 daily:
+
+```
+10 0 * * * /bin/bash /path/to/tc-logs-copying.sh
+```
+
+Make sure to replace `/path/to/tc-logs-copying.sh` with the actual path to your `tc-logs-copying.sh` script.
+
+3. Save the crontab file and exit the text editor.
+
+Now, the `tc-logs-copying.sh` script will be executed automatically every day at 00:00:10, copying the TC logs to the specified destination folder on a filesystem.
diff --git a/doc/advancedConfiguration/tc-logs-copying.sh b/doc/advancedConfiguration/tc-logs-copying.sh
new file mode 100755
index 0000000..1ba97e6
--- /dev/null
+++ b/doc/advancedConfiguration/tc-logs-copying.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# Directory where logs will be copied on the filesystem
+DEST_DIR="/home/mare/tc_logs"
+mkdir -p "$DEST_DIR"
+
+# Yesterday's date in yyyy-MM-dd format
+YESTERDAY=$(date -d "yesterday" +%Y-%m-%d)
+echo "Searching for logs from $YESTERDAY"
+
+# Array of container names and name of their folder where logs are stored
+declare -A containers
+containers=(
+ ["uc-dataapp-consumer"]="ucapp"
+ ["uc-dataapp-provider"]="ucapp"
+ ["ecc-consumer"]="ecc"
+ ["ecc-provider"]="ecc"
+ ["be-dataapp-consumer"]="dataapp"
+ ["be-dataapp-provider"]="dataapp"
+)
+
+# Loop through each container
+for container in "${!containers[@]}"; do
+
+ # Get name of subfolder from array of containers
+ subfolder=${containers[$container]}
+
+ # Directory where logs are stored in docker volumes
+ SRC_DIR="/home/nobody/data/log/$subfolder"
+
+ # Patern for searching any file with yesterday's date
+ FILE_PATTERN="*${YESTERDAY}*.gz"
+
+ # Find .gz files with yesterday's date and any index in the name
+ FILE_LIST=$(docker exec "$container" find "$SRC_DIR" -name "$FILE_PATTERN")
+
+ # Check if files were found
+ if [ -z "$FILE_LIST" ]; then
+ echo "No log files found for $container on $YESTERDAY"
+ else
+ echo "Copying log files for $container from $YESTERDAY"
+ for file in $FILE_LIST; do
+ # Get the basename of the file
+ base_filename="$(basename "$file")"
+
+ if [[ "$container" == "be-dataapp-consumer" || "$container" == "be-dataapp-provider" ]]; then
+ base_filename="${base_filename/dataapp/$container}"
+ fi
+
+ # Construct the destination file path
+ dest_file="$DEST_DIR/$base_filename"
+
+ # Copy the file to the host
+ docker cp "$container":"$file" "$dest_file"
+
+ # Set the copied file to read-only
+ chmod a-w "$dest_file"
+ done
+ fi
+done
\ No newline at end of file
diff --git a/doc/backupAndUpdate/backup.md b/doc/backupAndUpdate/backup.md
index 343e081..6f1f105 100644
--- a/doc/backupAndUpdate/backup.md
+++ b/doc/backupAndUpdate/backup.md
@@ -1,39 +1,67 @@
# Backup & restore data
-To backup and restore data we will be using the official Docker documentation as a reference, found [here](https://docs.docker.com/storage/volumes/#back-up-restore-or-migrate-data-volumes).
+To backup and restore data TRUE Connector offers two scripts: [`backup_script.sh`](./backup_script.sh) for creating backups and [`restore_script.sh`](./restore_script.sh) for restoring from backups.
-## Backup
+Before using these scripts, please ensure that they are executable. If they are not executable, you can make them executable using the following terminal command:
-In order to backup your data you can use the following code snippet:
-
-```
-docker run --rm --volumes-from ecc-consumer -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /home/nobody/data/log
+```bash
+chmod +x backup_script.sh restore_script.sh
```
-Let me explain the options:
+## `backup_script.sh`
- - ecc-consumer - container with the data that you want to backup
- - $(pwd) - current directory in the command prompt (use %cd% on Windows); the directory where you want the backup to be located
- - /backup.tar - name of the backup archive
- - /home/nobody/data/log - directory or file from the container that you wish to backup
-
-After the process finishes you will find a .tar file with the data.
+### Description
-## Restore
+`backup_script.sh` is a script for creating backups of TRUE Connector docker containers. To use it, follow these steps:
-Restoring the data is done in a similar way:
+1. **Specify Destination Directory**: Open the script and change the `DEST_DIR` variable to specify the destination directory where backup files will be stored on the filesystem.
-```
-docker run --rm --volumes-from ecc-consumer -v $(pwd):/backup ubuntu bash -c "cd /home && tar xvf /backup/backup.tar --strip 1"
-```
-
- The options are:
-
- - ecc-consumer - container where you want to restore the data
- - $(pwd) - current directory in the command prompt (use %cd% on Windows); the directory where the backup is located
- - /home - directory where the data will be restored
- - /backup.tar - name of the backup archive
-
- **NOTE**
-
- If the backup hierarchy looks like this */home/nobody/data/log* and you wish to keep it when restoring then you have to point the root folder in the command */home*, as it was done in the commands from above.
\ No newline at end of file
+2. **Run the Script**: Execute the script, and it will perform the following steps:
+ - Iterate through an array with the names of all containers.
+ - Create backup files for each container.
+ - Copy the backup files to the specified destination directory.
+ - Delete the temporary backup files on the Docker volume.
+
+
+## `restore_script.sh`
+
+### Description
+
+`restore_script.sh` is a script for restoring TRUE Connector Docker containers from backups. To use it, follow these steps:
+
+1. **Specify Docker Compose Path**: Open the script and change the `DOCKER_COMPOSE_PATH` variable to specify the path to your Docker Compose file.
+
+2. **Specify Backup Folder**: Change the `BCKP_DIR` variable to specify the folder where backup files are located.
+
+3. **Specify Desired Date for backup**: Set the `DATE` variable to the date of the backup you want to restore in format `YYYY-MM-DD`
+
+4. **Run the Script**: Execute the script, and it will perform the following steps:
+ - Stop all containers defined in your Docker Compose file.
+ - Iterate through an array of containers, checking if backup files exist for the specified date.
+ - If backup files exist, it will delete the previous content in each container's volume.
+ - Copy the data from the backup files into the respective container volumes.
+ - Start all containers again.
+
+## Setting Up a Cron Job for Daily Backups
+
+To automate the backup process and run the `backup_script.sh` daily at 00:00:02, you can set up a cron job. Here's how to do it:
+
+1. Open your terminal.
+
+2. Edit your crontab file by running:
+
+ ```bash
+ crontab -e
+ ```
+
+3. Add the following line to schedule the backup script to run daily at 00:00:02:
+
+ ```bash
+ 2 0 * * * /path/to/backup_script.sh
+ ```
+
+ Replace `/path/to/backup_script.sh` with the actual path to your `backup_script.sh` file.
+
+4. Save and exit the crontab editor.
+
+This cron job will execute the backup script daily at 00:00:02, ensuring that your Docker container backups are automatically created at the specified time.
diff --git a/doc/backupAndUpdate/backup_script.sh b/doc/backupAndUpdate/backup_script.sh
new file mode 100755
index 0000000..2b28e05
--- /dev/null
+++ b/doc/backupAndUpdate/backup_script.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+echo "Starting backup script..."
+echo "________________________________________________________"
+# Directory where the backup will be copied on the filesystem
+DEST_DIR="/path/to/backup/folder"
+mkdir -p "$DEST_DIR"
+
+# Today's date in yyyy-MM-dd format
+TODAY=$(date +%F)
+
+# Array of container names
+containers=(
+ "uc-dataapp-consumer"
+ "uc-dataapp-provider"
+ "ecc-consumer"
+ "ecc-provider"
+ "be-dataapp-consumer"
+ "be-dataapp-provider"
+)
+
+# Loop through each container
+for container in "${containers[@]}"; do
+ echo "Backing up $container for date: $TODAY..."
+
+ # Create backup inside the container
+ docker exec "$container" tar cvf /tmp/"$container-$TODAY-backup.tar" /home/nobody/data
+
+ # Copy backup from the container to the host
+ docker cp "$container":/tmp/"$container-$TODAY-backup.tar" "$DEST_DIR"
+
+ if [ $? -eq 0 ]; then
+ # If copy is successful, delete the temporary backup file inside the container
+ docker exec "$container" rm /tmp/"$container-$TODAY-backup.tar"
+ echo "Backup done for $container"
+ else
+ echo "Backup failed for $container"
+ fi
+ echo "________________________________________________________"
+done
diff --git a/doc/backupAndUpdate/restore_script.sh b/doc/backupAndUpdate/restore_script.sh
new file mode 100755
index 0000000..b88543a
--- /dev/null
+++ b/doc/backupAndUpdate/restore_script.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+
+echo "Starting restore script..."
+echo "____________________________________________________________________"
+
+# Path to the directory containing the Docker Compose file
+DOCKER_COMPOSE_PATH="/path/to/folder/where/compose/is"
+
+# Directory where the backups are stored on the filesystem
+BCKP_DIR="/path/to/backup/folder"
+
+# Date of desired backup for restore
+DATE="YYYY-MM-DD"
+
+# Array of container names and their associated volumes
+declare -A containers
+containers=(
+ ["uc-dataapp-consumer"]="trueconnector_uc_consumer_data"
+ ["uc-dataapp-provider"]="trueconnector_uc_provider_data"
+ ["ecc-consumer"]="trueconnector_ecc_consumer_data"
+ ["ecc-provider"]="trueconnector_ecc_provider_data"
+ ["be-dataapp-consumer"]="trueconnector_be_dataapp_consumer_data"
+ ["be-dataapp-provider"]="trueconnector_be_dataapp_provider_data"
+)
+
+# Go to the Docker Compose directory
+cd "$DOCKER_COMPOSE_PATH" || { echo "Failed to navigate to DOCKER_COMPOSE_PATH"; exit 1; }
+
+# Stop all containers managed by Docker Compose
+echo "Stopping all Docker Compose containers..."
+docker-compose down || { echo "Failed to stop containers"; exit 1; }
+
+# Polling for containers to be completely stopped
+echo "Waiting for all containers to stop..."
+while docker-compose ps | grep -E "(Up|Starting|Exiting)" > /dev/null; do
+ sleep 5
+ echo "Waiting for all containers to stop..."
+done
+
+echo "All containers are stopped."
+echo "____________________________________________________________________"
+
+# Flag to track overall success
+all_success=true
+
+# Loop through each container
+for container in "${!containers[@]}"; do
+ volume=${containers[$container]}
+ backup_file="$BCKP_DIR/$container-$DATE-backup.tar"
+
+ # Check if the backup file exists
+ if [ ! -f "$backup_file" ]; then
+ echo "Backup file $backup_file not found for $container"
+ all_success=false
+ continue
+ fi
+
+ echo "Restoring $container for date: $DATE..."
+ echo "____________________________________________________________________"
+
+ # Clear the target directory
+ docker run --rm -v "$volume":/home ubuntu bash -c "rm -rf /home/*"
+
+ # Change ownership of the backup files on the host
+ sudo chown -R nobody:nogroup "$backup_file"
+
+ # Extracting the backup
+ docker run --rm -v "$volume":/home -v "$BCKP_DIR":/backup ubuntu bash -c "cd /home && tar xvf /backup/$(basename "$backup_file") --strip 3"
+ TAR_EXIT_STATUS=$?
+
+ # Change ownership of the files to nobody:nogroup
+ docker run --rm -v "$volume":/home ubuntu chown -R nobody:nogroup /home
+
+ # Check for tar command success
+ if [ $TAR_EXIT_STATUS -ne 0 ]; then
+ echo "Restore failed for $container"
+ echo "____________________________________________________________________"
+ all_success=false
+ continue
+ fi
+
+ echo "Restore done for $container"
+ echo "____________________________________________________________________"
+done
+
+# Start Docker Compose if all restores were successful
+if [ "$all_success" = true ]; then
+ echo "All restores were successful, starting Docker Compose..."
+ echo "____________________________________________________________________"
+ docker-compose up -d
+else
+ echo "Some restores failed, not starting Docker Compose."
+ echo "____________________________________________________________________"
+fi
diff --git a/doc/contributingTC.md b/doc/contributingTC.md
index 6bde7cf..626ee6e 100644
--- a/doc/contributingTC.md
+++ b/doc/contributingTC.md
@@ -7,6 +7,10 @@ welcome to contribute to this project.
We document changes in the CHANGELOG.md on root level in each project.
+* ECC - [CHANGELOG.md](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/CHANGELOG.md)
+* DataApp - [CHANGELOG.md](https://github.com/Engineering-Research-and-Development/true-connector-basic_data_app/blob/0.3.8/CHANGELOG.md)
+* UCDataApp - [CHANGELOG.md](https://github.com/Engineering-Research-and-Development/true-connector-uc_data_app_platoon/blob/1.7.8/CHANGELOG.md)
+
## Issues
You always have to create an issue if you want to integrate a bugfix, improvement, or feature. Briefly and clearly describe the purpose of your contribution in the corresponding issue. You can send email to the [TRUE Connector team](mailto:trueconnector-team@eng.it) before creating an issue, if unclear in which project to create an issue.
@@ -18,4 +22,4 @@ should at least include the following information:
* Steps to reproduce (system specs included)
* Relevant logs and/or media (optional): e.g. an image
-For more details about branches, naming conventions and some suggestions, take a look at following [Developer instructions](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/tree/1.14.6#developer-guide-section)
\ No newline at end of file
+For more details about branches, naming conventions and some suggestions, take a look at following [Developer instructions](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/tree/1.14.7#developer-guide-section)
\ No newline at end of file
diff --git a/doc/cosign.md b/doc/cosign.md
index 9241032..1f326be 100644
--- a/doc/cosign.md
+++ b/doc/cosign.md
@@ -4,9 +4,9 @@ Docker images that are part of the TRUE Connector are signed using [cosign](http
Signed images starts with following versions:
-**rdlabengpa/ids\_execution\_core\_container:v1.14.6**\
+**rdlabengpa/ids\_execution\_core\_container:v1.14.7**\
-**rdlabengpa/ids\_be\_data\_app:v0.3.7**\
+**rdlabengpa/ids\_be\_data\_app:v0.3.8**\
**rdlabengpa/ids\_uc\_data\_app\_platoon:v1.7.8**\
@@ -25,52 +25,52 @@ signature not found in transparency log
```
```
-cosign verify --insecure-ignore-tlog --key trueconn.pub rdlabengpa/ids_execution_core_container:v1.14.6
+cosign verify --insecure-ignore-tlog --key trueconn.pub rdlabengpa/ids_execution_core_container:v1.14.7
WARNING: Skipping tlog verification is an insecure practice that lacks of transparency and auditability verification for the signature.
-Verification for index.docker.io/rdlabengpa/ids_execution_core_container:v1.14.6 --
+Verification for index.docker.io/rdlabengpa/ids_execution_core_container:v1.14.7 --
The following checks were performed on each of these signatures:
- The cosign claims were validated
- The signatures were verified against the specified public key
[
{
- "critical": {
- "identity": {
- "docker-reference": "index.docker.io/rdlabengpa/ids_execution_core_container"
- },
- "image": {
- "docker-manifest-digest": "sha256:cb7f37253a0e9ef89726b5b7f220f05d5069b5007809d40fc477e1ac21e936e5"
- },
- "type": "cosign container image signature"
- },
- "optional": null
- }
+ "critical": {
+ "identity": {
+ "docker-reference": "index.docker.io/rdlabengpa/ids_execution_core_container"
+ },
+ "image": {
+ "docker-manifest-digest": "sha256:17cb1512a22448326e74df3ab4df3e82cdddcd7cbbb8a8e81cb177455586df7c"
+ },
+ "type": "cosign container image signature"
+ },
+ "optional": null
+ }
]
```
```
-cosign verify --insecure-ignore-tlog --key trueconn.pub rdlabengpa/ids_be_data_app:v0.3.7
+cosign verify --insecure-ignore-tlog --key trueconn.pub rdlabengpa/ids_be_data_app:v0.3.8
WARNING: Skipping tlog verification is an insecure practice that lacks of transparency and auditability verification for the signature.
-Verification for index.docker.io/rdlabengpa/ids_be_data_app:v0.3.7 --
+Verification for index.docker.io/rdlabengpa/ids_be_data_app:v0.3.8 --
The following checks were performed on each of these signatures:
- The cosign claims were validated
- The signatures were verified against the specified public key
[
{
- "critical": {
- "identity": {
- "docker-reference": "index.docker.io/rdlabengpa/ids_be_data_app"
- },
- "image": {
- "docker-manifest-digest": "sha256:04523e3a91568526c4a9db08fc643aa6d15e26c3c2ce5894836a44fb0f163886"
- },
- "type": "cosign container image signature"
- },
- "optional": null
- }
+ "critical": {
+ "identity": {
+ "docker-reference": "index.docker.io/rdlabengpa/ids_be_data_app"
+ },
+ "image": {
+ "docker-manifest-digest": "sha256:7eee63e0e63013b3dcb0080c7468a618fb9f0f09a337647b74d326de550dceb3"
+ },
+ "type": "cosign container image signature"
+ },
+ "optional": null
+ }
]
```
diff --git a/doc/exchange-data.md b/doc/exchange-data.md
index ec87790..2523bfe 100644
--- a/doc/exchange-data.md
+++ b/doc/exchange-data.md
@@ -28,7 +28,7 @@ _NOTE_: even that this curl command is exported from Postman, it is noticed seve
If this happens, please check body of the request in Postman, and if body is empty, simply copy everything enclosed between\
_--data-raw '_ and _'_
-For more details on request samples, please check following link [Backend DataApp Usage](https://github.com/Engineering-Research-and-Development/true-connector-basic_data_app/blob/0.3.7/README.md)
+For more details on request samples, please check following link [Backend DataApp Usage](https://github.com/Engineering-Research-and-Development/true-connector-basic_data_app/blob/0.3.8/README.md)
Be sure to use correct configuration/ports for sender and receiver Data App and Execution Core Container (check .env file).
diff --git a/doc/rest-api.md b/doc/rest-api.md
index de205e5..b5bf10a 100644
--- a/doc/rest-api.md
+++ b/doc/rest-api.md
@@ -2,4 +2,4 @@
Detailed description of API endpoints provided by TRUE Connector can be found in [link](rest\_api/REST\_API.md)
-Bare in mind that all endpoints of the TRUE Connector will require authorization. Please follow [this link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/SECURITY.md) to get more information about providing correct credentials for desired request/functionality.
\ No newline at end of file
+Bare in mind that all endpoints of the TRUE Connector will require authorization. Please follow [this link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/doc/SECURITY.md) to get more information about providing correct credentials for desired request/functionality.
\ No newline at end of file
diff --git a/doc/rest_api/REST_API.md b/doc/rest_api/REST_API.md
index b78e693..b300c32 100644
--- a/doc/rest_api/REST_API.md
+++ b/doc/rest_api/REST_API.md
@@ -3,7 +3,7 @@
The TRUE Connector will use two protocols (http and https) as described by the Docker Compose File.
Overview of all available endpoints:
-*NOTE* Endpoints are protected with credentials, for more details, please check [this link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/SECURITY.md)
+*NOTE* Endpoints are protected with credentials, for more details, please check [this link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/doc/SECURITY.md)
| **Method** | **Endpoint** | **Usage** | **Returns** |
| ---------- | ------------ | --------- | ----------- |
@@ -38,5 +38,5 @@ Representation CRUD operations:
![Resource representation](Resource_Representation_Swagger.jpg "Resource representation swagger API")
-On following [link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/SELF_DESCRIPTION.md), you can find more detailed explanation of endpoints, with example requests.
+On following [link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/doc/SELF_DESCRIPTION.md), you can find more detailed explanation of endpoints, with example requests.
diff --git a/doc/security.md b/doc/security.md
index 882d537..3aa810a 100644
--- a/doc/security.md
+++ b/doc/security.md
@@ -68,7 +68,7 @@ Once certificate is generated, following instruction from previous link, you can
TRUE Connector has several ways to check the integrity:
* [Docker cosing check](cosign.md)
- * [Healthcheck](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/HEALTHCHECK.md)
+ * [Healthcheck](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/doc/HEALTHCHECK.md)
* Verification of the components itself, that will check if current version of subcomponent is verified or not;
Each component (Execution Core Container, Basic DataApp and Platoon Usage Control) should on startup log somethign like following:
diff --git a/doc/selfDescriptionAPI/changepassword.md b/doc/selfDescriptionAPI/changepassword.md
index 5cc13ac..070f7eb 100644
--- a/doc/selfDescriptionAPI/changepassword.md
+++ b/doc/selfDescriptionAPI/changepassword.md
@@ -1,13 +1,10 @@
### Changing API password
-If you want to change password for API, this can be done via following endpoint
+If you want to change password for connector users, this can be done via following endpoint
```
/notification/password/{new_password}
```
+Using this endpoint, it is guaranteed that the password strength rules configured in the `application.properties` file will be enforced.
-Bare in mind that this endpoint is password protected, and you will have to provide existing credentials in order for TRUE Connector to generate new hash that matches with the value passed in URL. Once new hash is returned, you can modify property and set new password.
-
-```
-spring.security.user.password=
-```
+Bare in mind that this endpoint is password protected, and you will have to provide existing credentials in order for TRUE Connector to generate new hash that matches with the value, which later you can edit in `user.properties` file.
diff --git a/doc/selfDescriptionAPI/self-description-API.md b/doc/selfDescriptionAPI/self-description-API.md
index db71415..bb4e339 100644
--- a/doc/selfDescriptionAPI/self-description-API.md
+++ b/doc/selfDescriptionAPI/self-description-API.md
@@ -1,6 +1,6 @@
## Self Description API
-To manage your Self Description Document please check following [link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/SELF_DESCRIPTION.md)
+To manage your Self Description Document please check following [link](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/doc/SELF_DESCRIPTION.md)
You can copy existing valid self-description.json document to following location **/ecc\_resources\_consumer** or **/ecc\_resources\_provider** folders, for consumer or provider.
diff --git a/doc/traceability_matrix.md b/doc/traceability_matrix.md
index 0118c9c..5ae494f 100644
--- a/doc/traceability_matrix.md
+++ b/doc/traceability_matrix.md
@@ -68,4 +68,24 @@ For managing security issues, a comprehensive approach is adopted:
4. **Test Coverage**: Emphasizing comprehensive test coverage, including unit, integration, and end-to-end tests, to detect vulnerabilities early in the development cycle.
-5. **Documentation and Tracking**: Thorough documentation of all security fixes, detailing the vulnerability, the fix, and the impact on the system.
\ No newline at end of file
+5. **Documentation and Tracking**: Thorough documentation of all security fixes, detailing the vulnerability, the fix, and the impact on the system.
+
+## Status of the issues
+
+As mentioned earlier, GitHub, used alongside Dependabot, serves as a system for monitoring reported issues, tracking the progress of ongoing issues, and recording closed issues. The most recent status updates for each component are available:
+
+1. Automated security issues reported by Dependabot
+
+| Severity | Report Date | Issue | Affected Component | Solution | Status |
+|:---------:|:-----------:|:--------------------------------------------------:|:------------------:|:---------------:|:------:|
+| High | 2022-04 | json stack overflow vulnerability | ECC | Bump to v20230227 | DONE |
+| Critical | 2022-02 | Arbitrary code execution in Apache Commons Text | DataApp | Bump to v1.10.0 | DONE |
+| Critical | 2022-02 | Arbitrary code execution in Apache Commons Text | ECC | Bump to v1.10.0 | DONE |
+| Moderate | 2022-04 | Chosen Ciphertext Attack in Jose4j | ECC | Bump to v0.9.3 | DONE |
+| Moderate | 2022-01 | Improper Locking in JetBrains Kotlin | ECC | Bump to v1.6.0 | DONE |
+| Moderate | 2021-01 | Timing based private key exposure in Bouncy Castle | ECC | Bump to v1.66 | DONE |
+
+
+2. Open issues - [ECC](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/issues), [DataApp](https://github.com/Engineering-Research-and-Development/true-connector-basic_data_app/issues)
+3. Changelogs (Closed implemented issues) - [ECC](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.7/CHANGELOG.md),[DataApp](https://github.com/Engineering-Research-and-Development/true-connector-basic_data_app/blob/0.3.8/CHANGELOG.md), [UCDataApp](https://github.com/Engineering-Research-and-Development/true-connector-uc_data_app_platoon/blob/1.7.8/CHANGELOG.md)
+
diff --git a/doc/user_management.md b/doc/user_management.md
index c6e4b7b..aa945f0 100644
--- a/doc/user_management.md
+++ b/doc/user_management.md
@@ -1,27 +1,128 @@
# User management
-TRUE Connector implements simple user management; 2 users, idsUser and apiUser are present in 2 property files, and are responsible for:
+TRUE Connector implements simple user management. Both in ECC and DataApp in resources you can find the user.properties file, where all user credentials are stored.
-idsUser - interacts with Basic DataApp, initiates communication with connector
-apiUser - interacts with Execution Core Container, makes modification for Self Description document
+None of user credentials are persisted anywhere beside properties files, and their passwords are encoded using BcryptPasswordEncoder.
-With their responsibilities, idsUser can be found and managed by modifying DataApp property file,
+Those users are strictly related to the connector on appliation level, if there is a need to create new users for SSH access, please refer to [Manage OS users](./advancedConfiguration/manage-os-users.md).
+
+## Default configuration
+
+By default, there are 4 users divided in two groups for quick start:
+
+idsUser and bob - initiates communication with connector (sends IDS messages, performs contract negotiation process, downloads artifacts)
+apiUser and alice - makes modification for Self Description document
+
+DataApp user.properties
```
-application.security.password=$2a$10$MQ5grDaIqDpBjMlG78PFduv.AMRe9cs0CNm/V4cgUubrqdGTFCH3m
+# List of users
+users.list=idsUser,bob
+
+# Credentials for each user
+# encoded - passwordIdsUser
+idsUser.password=$2a$12$54Rw0Bp/9yt5Zcj4gVkvnuVT9aeN36m4dzVMMLrPC0v78lAOQo9te
+# encoded - passwordBob
+bob.password=$2a$12$8ngZQYUF9pATTwNRmLiYeu6XGlLd79eb4FIgr5ezzuAA6tGLxuAyy
```
-while apiUser is present in ecc property file.
+ECC users.properties
+
+```
+# List of users
+users.list=apiUser,alice
+
+# Credentials for each user
+# encoded - passwordApiUser
+apiUser.password=$2a$12$cxXwcV989VwOUznb10oBcuvHSTVDAl6MWL2GG257RfI3Gg.J8Qvnu
+# encoded - passwordAlice
+alice.password=$2a$12$xeiemEk5ycerfxq7440ieeTUmZ3EK65hwXwM.NQu.1Y29xbpOMVyq
+```
+The `user.list` property is a list containing usernames of all users, with each username separated by a comma (,) but without any spaces. It is essential to individually assign each user a specific password which must be encoded using BCrypt.
+
+
+## Manage users credentials
+
+
+***IMPORTANT: *** By default, only admin(root) OS user can change credentials for TC users. If there is a specific need for a new OS user to access and modify the configurations, more information can be found [here](./advancedConfiguration/manage-os-users.md).
+
+ECC `users.properties` can be found in next folders: [***ecc_resources_provider (ECC Provider)***](../ecc_resources_provider/users.properties) and [***ecc_resources_consumer (ECC consumer)***](../ecc_resources_consumer/users.properties).
+
+[here](../ecc_resources_consumer/users.properties)
+
+DataApp `users.properties` can be found in next folder: [***be-dataapp_resouces***](../be-dataapp_resources/users.properties)
+
+
+### Usernames naming convention
+
+If your company doesn't have established naming convention, you can follow these basic guidelines:
+
+* Uniqueness: Ensure each username is unique.
+* Character Set: Stick to letters and numbers.
+* Length: Keep it short, around 3-20 characters.
+* No Personal Info: Avoid using personal data like full names or birthdays.
+* Case Insensitivity: Make usernames case-insensitive for user convenience.
+* Reserved Keywords: Don't allow reserved or restricted words.
+
+
+### Add new user
+
+As mentioned earlier `user.list` property is a list containing usernames, with each username separated by a comma (,) but without any spaces. It is essential to individually assign each user a specific password, as demonstrated in the example. These passwords must be encoded using BCrypt. Getting passwords can be done via following endpoint:
```
-application.user.api.password=$2a$10$MQ5grDaIqDpBjMlG78PFduv.AMRe9cs0CNm/V4cgUubrqdGTFCH3m
+/api/password/{new_password}
```
-Both user credentials are not persisted anywhere beside properties files, and their passwords are encoded using BcryptPasswordEncoder.
+Using this endpoint, it is guaranteed that the password strength rules configured in the `application.properties` file will be enforced.
+
+Bare in mind that this endpoint is password protected, and you will have to provide existing credentials in order for TRUE Connector to generate new hash that matches with the value passed in URL, so the general advice is to keep `apiUser` as a kind of administrator account. Once new hash is returned, you can modify properties file and set new password for specific user.
+
+Example:
+
+```
+# List of users
+users.list=exisitingUser,newUser
+
+# Credentials for each user
+# encoded - passwordExistingUser123
+exisitingUser.password=$2a$12$pnOQFwnr4abSkXs3DaKt8O0MRE2r234WHFLPKUmUgwXZkA245BDa.
+# encoded - passwordNewUser123
+newUser.password=$2a$12$v7/AKsx5KTNJpwOg9yRRe.h6jP80gc03umqqN6aMAQtvVEWmpIqna
+```
+
+After the files are modified, it is necessary to restart container in order to apply changes.
+
+
+### Modify existing user
+
+Same rules applies to user credentials modification, what it needs to be updated is username in the `user-list`, and if password need to be updated, the same endpoint mentioned above should be used.
+
+
+Example for changing username and password:
+
+```
+# List of users
+users.list=exisitingUser,newUserModify
+
+# Credentials for each user
+# encoded - passwordExistingUser123
+exisitingUser.password=$2a$12$pnOQFwnr4abSkXs3DaKt8O0MRE2r234WHFLPKUmUgwXZkA245BDa.
+# encoded - passwordNewUser123Modify
+newUserModify.password=$2a$12$O8BZtPck4AtFpMl.WvflFOy4MniRcc0S94X43I32Eym1ZuOr5M1/.
+
+```
+
+Also here, after the files are modified, it is necessary to restart container in order to apply changes.
+
+### Delete existing user
+
+If user needs to be deleted, it can be done by deleting desired username in the in the `user-list`, alongside with assigned password.
+
+Also here, after the files are modified, it is necessary to restart container in order to apply changes.
-## Modifying password for a user
+## User request for changing password
-Once new password is generated, (described [here](https://github.com/Engineering-Research-and-Development/true-connector-execution_core_container/blob/1.14.6/doc/SECURITY.md#change-default-password)) user should send encoded password to the operations user, which should be the only one who can modify connector property file. That user will update property file and restart TRUE Connector, so that new password will be loaded by the connector.
+If user wants to change password previously assigned by admin, it should directly contact the admin, providing new desired password, after which admin will apply changes.
diff --git a/docker-compose.yml b/docker-compose.yml
index 129fd4a..4a14313 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,7 +1,7 @@
version: '3.1'
services:
ecc-provider:
- image: rdlabengpa/ids_execution_core_container:v1.14.6
+ image: rdlabengpa/ids_execution_core_container:v1.14.7
deploy:
resources:
limits:
@@ -101,7 +101,7 @@ services:
- ./uc-dataapp-pip_resources_provider:/etc
be-dataapp-provider:
- image: rdlabengpa/ids_be_data_app:v0.3.7
+ image: rdlabengpa/ids_be_data_app:v0.3.8
deploy:
resources:
limits:
@@ -134,11 +134,11 @@ services:
- VALIDATE_SELF_DESCRIPTION=${VALIDATE_SELF_DESCRIPTION}
volumes:
- ./be-dataapp_resources:/config
- - be_dataapp_provider_data:/home/nobody/data/
+ - trueconnector_be_dataapp_provider_data:/home/nobody/data/
- ./ecc_cert:/cert
ecc-consumer:
- image: rdlabengpa/ids_execution_core_container:v1.14.6
+ image: rdlabengpa/ids_execution_core_container:v1.14.7
deploy:
resources:
limits:
@@ -238,7 +238,7 @@ services:
- ./uc-dataapp-pip_resources_consumer:/etc
be-dataapp-consumer:
- image: rdlabengpa/ids_be_data_app:v0.3.7
+ image: rdlabengpa/ids_be_data_app:v0.3.8
deploy:
resources:
limits:
@@ -284,5 +284,5 @@ volumes:
ecc_consumer_data: {}
uc_consumer_data: {}
be_dataapp_consumer_data: {}
- be_dataapp_provider_data:
+ trueconnector_be_dataapp_provider_data:
external: true
diff --git a/ecc_resources_consumer/users.properties b/ecc_resources_consumer/users.properties
new file mode 100644
index 0000000..de90054
--- /dev/null
+++ b/ecc_resources_consumer/users.properties
@@ -0,0 +1,8 @@
+# List of users
+users.list=apiUser,alice
+
+# Credentials for each user
+# encoded - passwordApiUser
+apiUser.password=$2a$12$cxXwcV989VwOUznb10oBcuvHSTVDAl6MWL2GG257RfI3Gg.J8Qvnu
+# encoded - passwordAlice
+alice.password=$2a$12$xeiemEk5ycerfxq7440ieeTUmZ3EK65hwXwM.NQu.1Y29xbpOMVyq
diff --git a/ecc_resources_provider/users.properties b/ecc_resources_provider/users.properties
new file mode 100644
index 0000000..de90054
--- /dev/null
+++ b/ecc_resources_provider/users.properties
@@ -0,0 +1,8 @@
+# List of users
+users.list=apiUser,alice
+
+# Credentials for each user
+# encoded - passwordApiUser
+apiUser.password=$2a$12$cxXwcV989VwOUznb10oBcuvHSTVDAl6MWL2GG257RfI3Gg.J8Qvnu
+# encoded - passwordAlice
+alice.password=$2a$12$xeiemEk5ycerfxq7440ieeTUmZ3EK65hwXwM.NQu.1Y29xbpOMVyq
diff --git a/prepopulate_be_dataapp_data_provider.sh b/prepopulate_be_dataapp_data_provider.sh
index 82b1ceb..e2ddf6d 100755
--- a/prepopulate_be_dataapp_data_provider.sh
+++ b/prepopulate_be_dataapp_data_provider.sh
@@ -1,22 +1,31 @@
#!/bin/bash
-# Remove the existing Docker volume (ignoring errors if it doesn't exist)
-docker volume rm be_dataapp_provider_data || true
+# Check if the Docker volume 'trueconnector_be_dataapp_provider_data' exists
+if docker volume inspect trueconnector_be_dataapp_provider_data &> /dev/null; then
+ # If it exists, remove it
+ echo "Removing existing Docker volume 'trueconnector_be_dataapp_provider_data'..."
+ docker volume rm trueconnector_be_dataapp_provider_data
+else
+ # If it doesn't exist, print a message
+ echo "Docker volume 'trueconnector_be_dataapp_provider_data' does not exist, skipping removal..."
+fi
-# Replace 'be-be_dataapp_provider_data' with the actual volume name you want to create
-docker volume create be_dataapp_provider_data
+echo "Creating trueconnector_be_dataapp_provider_data volume..."
+# Replace 'trueconnector_be_dataapp_provider_data' with the actual volume name you want to create
+docker volume create trueconnector_be_dataapp_provider_data
+echo "trueconnector_be_dataapp_provider_data volume created"
# Change the ownership of the local folder 'be-dataapp_data_provider' to nobody:nogroup
chown -R nobody:nogroup "$(pwd)/be-dataapp_data_provider"
-# Create the 'datalake' directory inside the 'be_dataapp_provider_data' volume with the desired ownership
-docker run --rm -v "be_dataapp_provider_data:/target_data" alpine sh -c "mkdir -p /target_data/datalake && chown -R nobody:nogroup /target_data/datalake"
+# Create the 'datalake' directory inside the 'trueconnector_be_dataapp_provider_data' volume with the desired ownership
+docker run --rm -v "trueconnector_be_dataapp_provider_data:/target_data" alpine sh -c "mkdir -p /target_data/datalake && chown -R nobody:nogroup /target_data/datalake"
-# Copy data from the 'be-dataapp_data_provider' folder to the 'target_data/datalake' directory inside the Docker volume
-docker run --rm -v "$(pwd)/be-dataapp_data_provider:/source_data" -v "be_dataapp_provider_data:/target_data" alpine sh -c "cp -r /source_data/* /target_data/datalake/"
+# Copy data from the 'trueconnector_be-dataapp_data_provider' folder to the 'target_data/datalake' directory inside the Docker volume
+docker run --rm -v "$(pwd)/be-dataapp_data_provider:/source_data" -v "trueconnector_be_dataapp_provider_data:/target_data" alpine sh -c "cp -r /source_data/* /target_data/datalake/"
-# Create the 'log' directory inside the 'be_dataapp_provider_data' volume with the desired ownership
-docker run --rm -v "be_dataapp_provider_data:/target_data" alpine sh -c "mkdir -p /target_data/log/dataapp && chown -R nobody:nogroup /target_data/log/dataapp"
+# Create the 'log' directory inside the 'trueconnector_be_dataapp_provider_data' volume with the desired ownership
+docker run --rm -v "trueconnector_be_dataapp_provider_data:/target_data" alpine sh -c "mkdir -p /target_data/log/dataapp && chown -R nobody:nogroup /target_data/log/dataapp"
# Change the ownership of the Docker volume contents to 'nobody:nogroup'
-docker run --rm -v "be_dataapp_provider_data:/target_data" alpine sh -c "chown -R nobody:nogroup /target_data"
+docker run --rm -v "trueconnector_be_dataapp_provider_data:/target_data" alpine sh -c "chown -R nobody:nogroup /target_data"
diff --git a/prepopulate_be_dataapp_data_provider_win.sh b/prepopulate_be_dataapp_data_provider_win.sh
index e360168..0618ddf 100644
--- a/prepopulate_be_dataapp_data_provider_win.sh
+++ b/prepopulate_be_dataapp_data_provider_win.sh
@@ -1,18 +1,27 @@
-# Remove the existing Docker volume (ignoring errors if it doesn't exist)
-docker volume rm be_dataapp_provider_data || true
+# Check if the Docker volume 'trueconnector_be_dataapp_provider_data' exists
+if docker volume inspect trueconnector_be_dataapp_provider_data &> /dev/null; then
+ # If it exists, remove it
+ echo "Removing existing Docker volume 'trueconnector_be_dataapp_provider_data'..."
+ docker volume rm trueconnector_be_dataapp_provider_data
+else
+ # If it doesn't exist, print a message
+ echo "Docker volume 'trueconnector_be_dataapp_provider_data' does not exist, skipping removal..."
+fi
-# Replace 'be-be_dataapp_provider_data' with the actual volume name you want to create
-docker volume create be_dataapp_provider_data
+echo "Creating trueconnector_be_dataapp_provider_data volume..."
+# Replace 'trueconnector_be_dataapp_provider_data' with the actual volume name you want to create
+docker volume create trueconnector_be_dataapp_provider_data
+echo "trueconnector_be_dataapp_provider_data volume created"
-# Create the 'datalake' directory inside the 'be_dataapp_provider_data' volume with the desired ownership
-docker run --rm -v "be_dataapp_provider_data:/target_data" alpine sh -c "mkdir -p /target_data/datalake && chown -R nobody:nogroup /target_data/datalake"
+# Create the 'datalake' directory inside the 'trueconnector_be_dataapp_provider_data' volume with the desired ownership
+docker run --rm -v "trueconnector_be_dataapp_provider_data:/target_data" alpine sh -c "mkdir -p /target_data/datalake && chown -R nobody:nogroup /target_data/datalake"
# Copy data from the 'be-dataapp_data_provider' folder to the 'target_data/datalake' directory inside the Docker volume
-docker run --rm -v "FULL_PATH/be-dataapp_data_provider:/source_data" -v "be_dataapp_provider_data:/target_data" alpine sh -c "cp -r /source_data/* /target_data/datalake/"
+docker run --rm -v "FULL_PATH/be-dataapp_data_provider:/source_data" -v "trueconnector_be_dataapp_provider_data:/target_data" alpine sh -c "cp -r /source_data/* /target_data/datalake/"
-# Create the 'log' directory inside the 'be_dataapp_provider_data' volume with the desired ownership
-docker run --rm -v "be_dataapp_provider_data:/target_data" alpine sh -c "mkdir -p /target_data/log/dataapp && chown -R nobody:nogroup /target_data/log/dataapp"
+# Create the 'log' directory inside the 'trueconnector_be_dataapp_provider_data' volume with the desired ownership
+docker run --rm -v "trueconnector_be_dataapp_provider_data:/target_data" alpine sh -c "mkdir -p /target_data/log/dataapp && chown -R nobody:nogroup /target_data/log/dataapp"
# Change the ownership of the Docker volume contents to 'nobody:nogroup'
-docker run --rm -v "be_dataapp_provider_data:/target_data" alpine sh -c "chown -R nobody:nogroup /target_data"
\ No newline at end of file
+docker run --rm -v "trueconnector_be_dataapp_provider_data:/target_data" alpine sh -c "chown -R nobody:nogroup /target_data"