From e9110f954607a56db7868fc6776a019f46c02f08 Mon Sep 17 00:00:00 2001 From: MichaelS Date: Tue, 4 Jun 2024 00:49:55 +0400 Subject: [PATCH] feat: The Ansible playbook for the Lava RPC Provider Service --- ansible/provider/README.md | 102 +++++++++++++ ansible/provider/ansible.cfg | 30 ++++ ansible/provider/inventory/group_vars/all.yml | 10 ++ .../inventory/host_vars/lava_provider_eu.yml | 141 ++++++++++++++++++ ansible/provider/inventory/hosts | 3 + ansible/provider/main.yml | 31 ++++ .../provider/roles/deploy/defaults/main.yml | 1 + .../provider/roles/deploy/handlers/main.yml | 16 ++ ansible/provider/roles/deploy/meta/main.yml | 19 +++ ansible/provider/roles/deploy/tasks/main.yml | 70 +++++++++ .../deploy/templates/docker-compose.yml.j2 | 48 ++++++ .../roles/deploy/templates/provider.env.j2 | 45 ++++++ .../roles/deploy/templates/rpcprovider.yml.j2 | 17 +++ ansible/provider/roles/deploy/vars/main.yml | 11 ++ 14 files changed, 544 insertions(+) create mode 100644 ansible/provider/README.md create mode 100644 ansible/provider/ansible.cfg create mode 100644 ansible/provider/inventory/group_vars/all.yml create mode 100644 ansible/provider/inventory/host_vars/lava_provider_eu.yml create mode 100644 ansible/provider/inventory/hosts create mode 100644 ansible/provider/main.yml create mode 100644 ansible/provider/roles/deploy/defaults/main.yml create mode 100644 ansible/provider/roles/deploy/handlers/main.yml create mode 100644 ansible/provider/roles/deploy/meta/main.yml create mode 100644 ansible/provider/roles/deploy/tasks/main.yml create mode 100644 ansible/provider/roles/deploy/templates/docker-compose.yml.j2 create mode 100644 ansible/provider/roles/deploy/templates/provider.env.j2 create mode 100644 ansible/provider/roles/deploy/templates/rpcprovider.yml.j2 create mode 100644 ansible/provider/roles/deploy/vars/main.yml diff --git a/ansible/provider/README.md b/ansible/provider/README.md new file mode 100644 index 0000000000..7fb9ca777e --- /dev/null +++ b/ansible/provider/README.md @@ -0,0 +1,102 @@ +# RPC Provider Service Deployment + +This repository includes Ansible playbooks and supporting files for deploying and managing the RPC Provider Service. The service uses Docker containers managed via Docker Compose to ensure easy and scalable deployments across multiple environments. + +## Prerequisites + +- **Ansible 2.9+**: Ensure Ansible is installed on your control machine. +- **Docker**: Must be installed on the target hosts. +- **Docker Compose**: Required for managing Dockerized applications. +- **SSH Access**: Root or sudo access on the target hosts. + +## Repository Structure + +- **`group_vars`** and **`host_vars`**: Contains variables specific to hosts and groups. Customize these to fit the deployment context. +- **`roles`**: Contains the tasks used for setting up the RPC provider. +- **`templates`**: Jinja2 templates for generating Docker Compose and environment configuration files. +- **`inventory`**: Hosts file defining the servers on which the RPC service will be deployed. + +## Installation and Setup + +### Clone the Repository + +Start by cloning this repository to your Ansible control machine: + +```bash +git clone +cd +``` + +### Configure Inventory + +Edit the `inventory/hosts` file to add the IP addresses or hostnames of the machines where the service should be deployed. + +Example: + +```yaml +all: + children: + lava_provider_eu: + hosts: + 192.168.1.100: + ansible_user: root + ansible_ssh_private_key_file: ~/.ssh/id_rsa +``` + +> You can declare certain parameters in the `ansible.cfg` configuration file or in `group_vars/all.yml`. These settings will be applied to all hosts at each startup + +ansible.cfg +```ini +[defaults] +private_key_file = ~/.ssh/id_rsa +``` + +group_vars/all.yml + +```yml +# group_vars/all.yml +--- +ansible_user: root +ansible_port: 22 +``` + +### Set Role Variables +Adjust role-specific variables in `group_vars/all.yml` and `host_vars/*.yml` to match your environment: + +## Deployment +The deployment process involves setting directly configuring Docker networks, generating necessary configuration claims, and managing Docker containers through Docker Compose. + +### Deploy the Service +To deploy the RPC Provider Service: + +```bash +ansible-playbook main.yml --tags deploy +``` + +> Note that by default ```anisble-playbook main.yml``` command deploys and runs the service. + +## Managing + +Start the Service: Ensure the service is up and running: + +```bash +ansible-playbook main.yml --tags start +``` + +Stop the Service: Safely stop the service when needed: + +```bash +ansible-playbook main.yml --tags stop +``` + +Restart the Service: Restart the service to apply updates or changes: + +```bash +ansible-playbook main.yml --tags restart +``` + +## Configuration Files + +* Docker Compose Configuration: Located at `{{ project_path }}/docker-compose.yml`, it defines the service setup, including image, ports, and environment variables. +* Environment Variables: Stored in `{{ project_path }}/provider.env`, this file includes environment-specific variables like log level and cache expiration. +* Chains configuration: Stored in `{{ volume_path }}`. This config includes specified chain settings for the Lava RPC Provider. \ No newline at end of file diff --git a/ansible/provider/ansible.cfg b/ansible/provider/ansible.cfg new file mode 100644 index 0000000000..2e7ac1e8e7 --- /dev/null +++ b/ansible/provider/ansible.cfg @@ -0,0 +1,30 @@ +[defaults] +# Defines the location of the inventory file that Ansible will use to find the host information. +inventory = ./inventory/hosts + +# Path where Ansible will look for the roles. +roles_path = roles + +# The number of parallel processes to use when Ansible executes tasks on multiple hosts. +forks = 20 + +# Disables SSH host key checking, making Ansible automatically accept unknown host keys. +# This is useful in automated environments to avoid manual intervention. +host_key_checking = False + +# Changes the default merging behavior of variables. With 'merge', hashes will be deeply merged. +hash_behaviour = merge + +# Enables pipelining, which reduces the number of SSH operations required to execute a module. +# This can result in a significant performance improvement but may not be compatible with all setups. +pipelining = True + +# Specifies the SSH private key file to use for SSH authentication. +private_key_file = ~/.ssh/id_rsa + +[ssh_connection] +# SSH arguments used when connecting to hosts. +# - ForwardAgent=yes: Allows SSH agent forwarding. +# - ControlMaster=auto: Enables the sharing of multiple sessions over a single network connection. +# - ControlPersist=60s: Makes the master connection stay open in the background for up to 60 seconds after the initial connection, improving subsequent connection times. +ssh_args = -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s diff --git a/ansible/provider/inventory/group_vars/all.yml b/ansible/provider/inventory/group_vars/all.yml new file mode 100644 index 0000000000..1592fc64a9 --- /dev/null +++ b/ansible/provider/inventory/group_vars/all.yml @@ -0,0 +1,10 @@ +# group_vars/all.yml +--- +ansible_user: root +ansible_port: 22 +project_name: lava +project_type: provider +project_unique_name: "{{ project_name }}-{{ provider_name }}-{{ project_type }}" +service_path: /opt/services # The docker-compose.yml and a variables file are located at this path. +project_path: "{{ service_path }}/{{ project_unique_name }}" # Configuration files and a wallet are located at this path. +volume_path: "{{ container.volume_path }}" diff --git a/ansible/provider/inventory/host_vars/lava_provider_eu.yml b/ansible/provider/inventory/host_vars/lava_provider_eu.yml new file mode 100644 index 0000000000..39eaf667e4 --- /dev/null +++ b/ansible/provider/inventory/host_vars/lava_provider_eu.yml @@ -0,0 +1,141 @@ +# host_vars/lava_provider_eu.yml +--- +ansible_host: xxx.xxx.xxx.xxx # IP address of the host +provider_name: michael # This value is used to generate unique name for directory and Docker container. +network: testnet + +# Container configuration +container: + image: svetekllc/lava # Docker image name + tag: v2.0.1-provider # Docker image tag to specify version + limits: + cpu: 2 # Maximum number of CPU cores the container can use + memory: 4gb # Maximum amount of memory the container can use + volume_path: /opt/{{ project_unique_name }} # Path where volumes are mounted in the host + +# Provider-specific configuration +provider_config: + cache: + enable: true # Whether caching is enabled (true | false) + address: lava-cache # Lava Cache service address (IP address | FQDN) + port: 23100 # Lava Cache service port + chain_id: lava-testnet-2 # Blockchain network identifier + config_path: /root/.lava # Path to provider configuration files + geolocation: 2 # Geolocation ID, used for regional distinctions + log_level: info # Logging level + moniker: Michael # Custom name for the provider instance + rewards_storage_dir: rewards-storage # Directory for storing rewards data + public_rpc_url: https://public-rpc-testnet2.lavanet.xyz:443/rpc/ # Public URL for RPC connections + total_connections: 25 # Max number of simultaneous network connections + +# Wallet configuration +wallet: + name: lava_michael # Wallet name + password: "" # Wallet password. Required if the keyring_backend parameter is equal: os, file + keyring_backend: test # Backend system for managing keys and secrets + +# Network ports used by the provider +provider_ports: + grpc: 22001 # Port for gRPC service + metrics: 23001 # Port for exposing metrics + +networks: + - lava # Network label used for Docker networking + +# Blockchain chains configurations +chains: + - name: "Arbitrum mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "ARB1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://arbitrum-mainnet:8545"] } + + - name: "Avalanche mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "AVAX", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://avalanche-mainnet/ext/bc"] } + + - name: "Axelar testnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "AXELART", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://axelar-testnet:26657", "ws://axelar-testnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "AXELART", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["axelar-testnet:9090"] } + - { api_interface: "rest", chain_id: "AXELART", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://axelar-testnet:26317"] } + + - name: "Axelar mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "AXELAR", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://axelar-mainnet:26657", "ws://axelar-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "AXELAR", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["axelar-mainnet:16090"] } + - { api_interface: "rest", chain_id: "AXELAR", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://axelar-mainnet:26317"] } + + - name: "Binance mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "BSC", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://binance-mainnet"] } + + - name: "Canto mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "CANTO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://canto-mainnet:26657", "ws://canto-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "CANTO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["canto-mainnet:9090"] } + - { api_interface: "jsonrpc", chain_id: "CANTO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://canto-mainnet:8545"] } + - { api_interface: "rest", chain_id: "CANTO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://canto-mainnet:1317"] } + + - name: "Celestia mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "CELESTIA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://celestia-mainnet:26657", "ws://celestia-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "CELESTIA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["celestia-mainnet:9090"] } + - { api_interface: "jsonrpc", chain_id: "CELESTIA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://celestia-mainnet-light:26658"] } + - { api_interface: "rest", chain_id: "CELESTIA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://celestia-mainnet:1317"] } + + - name: "Celo mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "CELO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://celo-mainnet:8545"] } + + - name: "CosmosHub mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "COSMOSHUB", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://cosmos-mainnet:26657", "ws://cosmos-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "COSMOSHUB", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["cosmos-mainnet:9090"] } + - { api_interface: "rest", chain_id: "COSMOSHUB", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://cosmos-mainnet:1317"] } + + - name: "Ethereum mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "ETH1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://ethereum-mainnet:8549"] } + + - name: "Evmos testnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "EVMOST", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-testnet:26657", "ws://evmos-testnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "EVMOST", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["evmos-testnet:9090"] } + - { api_interface: "jsonrpc", chain_id: "EVMOST", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-testnet:8545"] } + - { api_interface: "rest", chain_id: "EVMOST", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-testnet:1317"] } + + - name: "Evmos mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "EVMOS", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-mainnet:26657", "ws://evmos-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "EVMOS", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["evmos-mainnet:9090"] } + - { api_interface: "jsonrpc", chain_id: "EVMOS", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-mainnet:8545"] } + - { api_interface: "rest", chain_id: "EVMOS", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-mainnet:1317"] } + + - name: "Fantom mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "FTM250", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://fantom-mainnet:8545"] } + + - name: "Lava testnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "LAV1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://lava-testnet:26657", "ws://lava-testnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "LAV1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["lava-testnet:9090"] } + - { api_interface: "rest", chain_id: "LAV1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://lava-testnet:1317"] } + + - name: "Near testnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "NEART", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://near-testnet:3030"] } + + - name: "Near mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "NEAR", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://near-mainnet:3030"] } + + - name: "Polygon mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "POLYGON1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://polygon-mainnet"] } + + - name: "Solana mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "SOLANA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://solana-mainnet"] } + + - name: "Starknet mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "STRK", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://starknet-mainnet:9545"] } diff --git a/ansible/provider/inventory/hosts b/ansible/provider/inventory/hosts new file mode 100644 index 0000000000..5cab541aba --- /dev/null +++ b/ansible/provider/inventory/hosts @@ -0,0 +1,3 @@ +all: + hosts: + lava_provider_eu: diff --git a/ansible/provider/main.yml b/ansible/provider/main.yml new file mode 100644 index 0000000000..cf10eb0eda --- /dev/null +++ b/ansible/provider/main.yml @@ -0,0 +1,31 @@ +--- +# The main playbook for rpc provider deployment +- name: Deploying and managing the RPC Provider Service + hosts: all + become: true + gather_facts: false + roles: + - role: deploy + tags: + - deploy + tasks: + - name: Run the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: present + tags: + - start + - name: Stop the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: stopped + tags: + - never + - stop + - name: Restart the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: restarted + tags: + - never + - restart diff --git a/ansible/provider/roles/deploy/defaults/main.yml b/ansible/provider/roles/deploy/defaults/main.yml new file mode 100644 index 0000000000..74b75a13e5 --- /dev/null +++ b/ansible/provider/roles/deploy/defaults/main.yml @@ -0,0 +1 @@ +# roles/deploy/defaults/main.yml diff --git a/ansible/provider/roles/deploy/handlers/main.yml b/ansible/provider/roles/deploy/handlers/main.yml new file mode 100644 index 0000000000..73e7d35f7d --- /dev/null +++ b/ansible/provider/roles/deploy/handlers/main.yml @@ -0,0 +1,16 @@ +# roles/deploy/handlers/main.yml +--- +- name: Check and possibly restart docker service + block: + - name: Get container info + community.docker.docker_container_info: + name: "{{ project_unique_name }}" + register: container_info + listen: Check and possibly restart docker service + + - name: Restart docker service if container exists + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: restarted + when: container_info.exists + listen: Check and possibly restart docker service diff --git a/ansible/provider/roles/deploy/meta/main.yml b/ansible/provider/roles/deploy/meta/main.yml new file mode 100644 index 0000000000..6a70a8ec15 --- /dev/null +++ b/ansible/provider/roles/deploy/meta/main.yml @@ -0,0 +1,19 @@ +# roles/deploy/meta/main.yml +--- +dependencies: [] +galaxy_info: + author: Michael + description: The role for deploying the lava RPC provider + company: Impulse Expert | https://impulse.expert + license: GPL + min_ansible_version: "2.9" + platforms: + - name: Ubuntu + versions: + - xenial + - bionic + - focal + galaxy_tags: + - lava + - rpc + - docker diff --git a/ansible/provider/roles/deploy/tasks/main.yml b/ansible/provider/roles/deploy/tasks/main.yml new file mode 100644 index 0000000000..31f41da25e --- /dev/null +++ b/ansible/provider/roles/deploy/tasks/main.yml @@ -0,0 +1,70 @@ +# roles/deploy/tasks/main.yml +--- +- name: Create a directory for the Docker Compose file + ansible.builtin.file: + path: "{{ project_path }}" + state: directory + mode: "0755" + +- name: Generate the Docker Compose file + ansible.builtin.template: + src: "docker-compose.yml.j2" + dest: "{{ project_path }}/docker-compose.yml" + mode: "0644" + +- name: Generate the provider.env file + ansible.builtin.template: + src: "provider.env.j2" + dest: "{{ project_path }}/provider.env" + mode: "0644" + +- name: Create the data volume directory + ansible.builtin.file: + path: "{{ volume_path }}" + state: directory + mode: "0755" + +- name: Get volume information + community.docker.docker_volume_info: + name: "{{ project_unique_name }}" + register: volume_info + +- name: Create the data volume + when: + - volume_info.exists + - volume_info.volume.Options.device != volume_path + block: + - name: Get container information + community.docker.docker_container_info: + name: "{{ project_unique_name }}" + register: container_info + + - name: Remove the container + community.docker.docker_container: + name: "{{ project_unique_name }}" + state: absent + when: container_info.exists + + - name: Remove the volume + community.docker.docker_volume: + name: "{{ project_unique_name }}" + state: absent + when: volume_info.exists + +- name: "Create the directory for the configuration file" + ansible.builtin.file: + path: "{{ volume_path }}/config" + state: directory + mode: "0755" + +- name: Generate the rpcprovider.yml file + ansible.builtin.template: + src: "rpcprovider.yml.j2" + dest: "{{ volume_path }}/config/rpcprovider.yml" + mode: "0644" + notify: + - Check and possibly restart docker service + +- name: "Create the network" + community.docker.docker_network: + name: "{{ project_name }}" diff --git a/ansible/provider/roles/deploy/templates/docker-compose.yml.j2 b/ansible/provider/roles/deploy/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..3fc81ff293 --- /dev/null +++ b/ansible/provider/roles/deploy/templates/docker-compose.yml.j2 @@ -0,0 +1,48 @@ +--- +name: {{ project_unique_name }} + +services: + provider: + image: {{ container.image }}:{{ container.tag }} + container_name: {{ project_unique_name }} + labels: + network: "{% if network %}{{ network }}{% else %}-no_network_set{% endif %}" + env_file: + - provider.env + volumes: + - data:{{ provider_config.config_path}} + ports: +{% for key, value in provider_ports.items() %} + - "{{ value }}:{{ value }}" +{% endfor %} + networks: +{% for value in networks %} + - {{ value }} +{% endfor %} + logging: + driver: "json-file" + options: + max-size: "100m" + max-file: "1" + deploy: + resources: + limits: + cpus: "{{ container.limits.cpu }}" + memory: "{{ container.limits.memory }}" + restart: unless-stopped + +volumes: + data: + name: {{ project_unique_name }} + driver: local + driver_opts: + type: none + o: bind + device: {{ container.volume_path }} + +networks: +{% for value in networks %} + {{ value }}: + name: {{ value }} + external: true +{% endfor %} diff --git a/ansible/provider/roles/deploy/templates/provider.env.j2 b/ansible/provider/roles/deploy/templates/provider.env.j2 new file mode 100644 index 0000000000..f99536c3d0 --- /dev/null +++ b/ansible/provider/roles/deploy/templates/provider.env.j2 @@ -0,0 +1,45 @@ +### Provider variables ### + +# Enable or disable cache based on requirements. +# Set to true to enable, false to disable. +CACHE_ENABLE="{{ provider_config.cache.enable | lower }}" + +# Specify the IP address or domain name of the cache server. +CACHE_ADDRESS="{{ provider_config.cache.address }}" + +# Define the port number on which the cache server is running. +CACHE_PORT="{{ provider_config.cache.port }}" + +# Unique identifier for the blockchain network. Default is 'lava-testnet-2'. +CHAIN_ID="{{ provider_config.chain_id }}" + +# Directory path for storing the provider's configuration files. +# Default path is '/root/.lava'. +CONFIG_PATH="{{ provider_config.config_path }}" + +# Geolocation of the node, used for network optimization and identification. +GEOLOCATION="{{ provider_config.geolocation }}" + +# Name of the wallet used for transactions. +WALLET="{{ wallet.name }}" + +# Storage mechanism for keys, such as 'os', 'file', or 'pass'. +KEYRING_BACKEND="{{ wallet.keyring_backend }}" + +# Verbosity level of logs output. +LOGLEVEL="{{ provider_config.log_level }}" + +# Port number for exposing Prometheus metrics. +METRICS_PORT="{{ provider_ports.metrics }}" + +# Custom name to identify your provider node within the network. +MONIKER="{{ provider_config.moniker }}" + +# Directory to store rewards data. +REWARDS_STORAGE_DIR="{{ provider_config.rewards_storage_dir }}" + +# URL to access the public RPC server for the network. +PUBLIC_RPC="{{ provider_config.public_rpc_url }}" + +# Total number of simultaneous network connections the node will attempt to maintain. +TOTAL_CONNECTIONS="{{ provider_config.total_connections }}" diff --git a/ansible/provider/roles/deploy/templates/rpcprovider.yml.j2 b/ansible/provider/roles/deploy/templates/rpcprovider.yml.j2 new file mode 100644 index 0000000000..0830511661 --- /dev/null +++ b/ansible/provider/roles/deploy/templates/rpcprovider.yml.j2 @@ -0,0 +1,17 @@ +endpoints: + +{% for chain in chains %} + #### {{ chain.name }} #### +{% for endpoint in chain.endpoints %} + - api-interface: {{ endpoint.api_interface }} + chain-id: {{ endpoint.chain_id }} + network-address: + address: {{ endpoint.network_address }} + disable-tls: {{ endpoint.disable_tls | lower }} + node-urls: +{% for url in endpoint.node_urls %} + - url: {{ url }} +{% endfor %} +{% endfor %} + +{% endfor %} diff --git a/ansible/provider/roles/deploy/vars/main.yml b/ansible/provider/roles/deploy/vars/main.yml new file mode 100644 index 0000000000..c31b4b9468 --- /dev/null +++ b/ansible/provider/roles/deploy/vars/main.yml @@ -0,0 +1,11 @@ +# roles/deploy/vars/main.yml + +# In Ansible, the priority of variable values is determined by +# the order in which they are defined, known as variable precedence. +# Variables defined in roles/deploy/vars/main.yml can override the values set in the inventory +# if they are specified later in the precedence order. + +# Examples: +# network: testnet +# provider_config: +# chain_id: lava-testnet-2