diff --git a/.dockerignore b/.dockerignore
index 060cae42..99903e8d 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,4 +1,3 @@
-tests
helper-scripts
dist
build
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index 15407c80..abdf44a9 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -13,7 +13,7 @@ jobs:
create_release:
if: github.event.pull_request.merged
name: Create release
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
outputs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
version: ${{ steps.export_outputs.outputs.version }}
@@ -26,6 +26,7 @@ jobs:
- name: Checkout submodules
run: git submodule update --init
+
- name: Install ubuntu dependencies
run: |
sudo apt-get update
@@ -68,7 +69,7 @@ jobs:
strategy:
matrix:
include:
- - os: ubuntu-20.04
+ - os: ubuntu-22.04
asset_name: skale-${{ needs.create_release.outputs.version }}-Linux-x86_64
steps:
- uses: actions/checkout@v2
@@ -78,7 +79,7 @@ jobs:
python-version: 3.11
- name: Install ubuntu dependencies
- if: matrix.os == 'ubuntu-20.04'
+ if: matrix.os == 'ubuntu-22.04'
run: |
sudo apt-get update
@@ -127,7 +128,7 @@ jobs:
strategy:
matrix:
include:
- - os: ubuntu-20.04
+ - os: ubuntu-22.04
asset_name: skale-${{ needs.create_release.outputs.version }}-Linux-x86_64-sync
steps:
- uses: actions/checkout@v2
@@ -137,7 +138,7 @@ jobs:
python-version: 3.11
- name: Install ubuntu dependencies
- if: matrix.os == 'ubuntu-20.04'
+ if: matrix.os == 'ubuntu-22.04'
run: |
sudo apt-get update
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 0b1e24ef..31625095 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -3,7 +3,7 @@ on: [push, pull_request]
jobs:
test:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
strategy:
matrix:
python-version: [3.11]
@@ -23,20 +23,18 @@ jobs:
- name: Install ubuntu dependencies
run: |
sudo apt-get update
- sudo apt-get install python-setuptools iptables
+ sudo apt-get install iptables nftables python3-nftables
- name: Install python dependencies
run: |
python -m pip install --upgrade pip
- pip install -e .
pip install -e .[dev]
- pip install --upgrade 'setuptools<45.0.0'
- name: Lint with flake8
run: |
flake8 .
- - name: Build binary in Ubuntu 18.04 environment - normal
+ - name: Build binary - normal
run: |
mkdir -p ./dist
docker build . -t node-cli-builder
@@ -46,13 +44,7 @@ jobs:
- name: Check build - normal
run: sudo /home/ubuntu/dist/skale-test-Linux-x86_64
- - name: Build binary in Ubuntu 20.04 environment - normal
- run: |
- scripts/build.sh test test normal
-
- - name: Check build - sync
- run: sudo /home/ubuntu/dist/skale-test-Linux-x86_64
- - name: Build sync binary in Ubuntu 18.04 environment
+ - name: Build binary - sync
run: |
mkdir -p ./dist
docker build . -t node-cli-builder
@@ -62,12 +54,15 @@ jobs:
- name: Check build - sync
run: sudo /home/ubuntu/dist/skale-test-Linux-x86_64-sync
- - name: Build sync binary in Ubuntu 20.04 environment
+ - name: Run prepare test build
run: |
- scripts/build.sh test test sync
-
- - name: Check build - sync
- run: sudo /home/ubuntu/dist/skale-test-Linux-x86_64-sync
+ scripts/build.sh test test normal
- name: Run tests
- run: bash ./scripts/run_tests.sh
+ run: |
+ export PYTHONPATH=${PYTHONPATH}:/usr/lib/python3/dist-packages/
+ bash ./scripts/run_tests.sh
+
+ - name: Run nftables tests
+ run: |
+ scripts/run_nftables_test.sh
diff --git a/Dockerfile b/Dockerfile
index fbd5248c..c77efb4b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,15 +1,20 @@
-FROM python:3.11-buster
+FROM python:3.11-bookworm
ENV DEBIAN_FRONTEND=noninteractive
-RUN apt-get update && apt-get install -y software-properties-common
-RUN apt-get install -y \
+RUN apt-get update && apt install -y \
git \
build-essential \
+ software-properties-common \
zlib1g-dev \
libssl-dev \
libffi-dev \
swig \
- iptables
+ iptables \
+ nftables \
+ python3-nftables \
+ libxslt-dev \
+ kmod
+
RUN mkdir /app
WORKDIR /app
@@ -17,6 +22,8 @@ WORKDIR /app
COPY . .
ENV PATH=/app/buildvenv/bin:$PATH
+ENV PYTHONPATH="{PYTHONPATH}:/usr/lib/python3/dist-packages"
+
RUN python3.11 -m venv /app/buildvenv && \
pip install --upgrade pip && \
pip install wheel setuptools==63.2.0 && \
diff --git a/lvmpy b/lvmpy
index 0d796233..694e2533 160000
--- a/lvmpy
+++ b/lvmpy
@@ -1 +1 @@
-Subproject commit 0d7962335f1e60797fdd89d3c9d1e750e0355275
+Subproject commit 694e25337b51fe97aaadd3ea45e88759e838184e
diff --git a/main.spec b/main.spec
index a4dbe395..e3844bc1 100644
--- a/main.spec
+++ b/main.spec
@@ -2,15 +2,12 @@
import importlib.util
-libxtwrapper_path = importlib.util.find_spec('libxtwrapper').origin
-
block_cipher = None
a = Analysis(
['node_cli/main.py'],
pathex=['.'],
- binaries=[(libxtwrapper_path, '.')],
datas=[
("./text.yml", "data"),
("./datafiles/skaled-ssl-test", "data/datafiles")
diff --git a/node_cli/cli/__init__.py b/node_cli/cli/__init__.py
index 4b85052a..ed6a4da3 100644
--- a/node_cli/cli/__init__.py
+++ b/node_cli/cli/__init__.py
@@ -1,4 +1,4 @@
-__version__ = '2.5.0'
+__version__ = '2.6.0'
if __name__ == "__main__":
print(__version__)
diff --git a/node_cli/cli/node.py b/node_cli/cli/node.py
index ff781249..8eee2d96 100644
--- a/node_cli/cli/node.py
+++ b/node_cli/cli/node.py
@@ -19,8 +19,8 @@
import click
+from node_cli.core.node import configure_firewall_rules
from node_cli.core.node import (
- configure_firewall_rules,
get_node_signature,
init,
restore,
@@ -239,12 +239,13 @@ def check(network):
run_checks(network)
-@node.command(help='Reconfigure iptables rules')
+@node.command(help='Reconfigure nftables rules')
+@click.option('--monitoring', is_flag=True)
@click.option('--yes', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Are you sure you want to reconfigure firewall rules?')
-def configure_firewall():
- configure_firewall_rules()
+def configure_firewall(monitoring):
+ configure_firewall_rules(enable_monitoring=monitoring)
@node.command(help='Show node version information')
diff --git a/node_cli/configs/__init__.py b/node_cli/configs/__init__.py
index 7e742536..7d256f14 100644
--- a/node_cli/configs/__init__.py
+++ b/node_cli/configs/__init__.py
@@ -163,3 +163,6 @@ def _get_env():
TELEGRAF_TEMPLATE_PATH = os.path.join(CONTAINER_CONFIG_PATH, 'telegraf.conf.j2')
TELEGRAF_CONFIG_PATH = os.path.join(CONTAINER_CONFIG_PATH, 'telegraf.conf')
NODE_DOCKER_CONFIG_PATH = os.path.join(NODE_DATA_PATH, 'docker.json')
+
+NFTABLES_RULES_PATH = '/etc/nftables.conf'
+NFTABLES_CHAIN_FOLDER_PATH = '/etc/nft.conf.d/chains'
diff --git a/node_cli/core/checks.py b/node_cli/core/checks.py
index e094fe99..57a64454 100644
--- a/node_cli/core/checks.py
+++ b/node_cli/core/checks.py
@@ -321,10 +321,6 @@ def _check_apt_package(self, package_name: str,
else:
return self._ok(name=package_name, info=info)
- @preinstall
- def iptables_persistent(self) -> CheckResult:
- return self._check_apt_package('iptables-persistent')
-
@preinstall
def lvm2(self) -> CheckResult:
return self._check_apt_package('lvm2')
@@ -415,26 +411,26 @@ def docker_api(self) -> CheckResult:
@preinstall
def docker_compose(self) -> CheckResult:
- name = 'docker-compose'
- cmd = shutil.which('docker-compose')
+ name = 'docker'
+ cmd = shutil.which('docker')
if cmd is None:
- info = 'No such command: "docker-compose"'
+ info = 'No such command: "docker"'
return self._failed(name=name, info=info)
v_cmd_result = run_cmd(
- ['docker-compose', '-v'],
+ ['docker', 'compose', 'version'],
check_code=False,
separate_stderr=True
)
output = v_cmd_result.stdout.decode('utf-8').rstrip()
if v_cmd_result.returncode != 0:
- info = f'Checking docker-compose version failed with: {output}'
+ info = f'Checking docker compose version failed with: {output}'
return self._failed(name=name, info=output)
actual_version = output.split(',')[0].split()[-1].strip()
expected_version = self.requirements['docker-compose']
- info = f'Expected docker-compose version {expected_version}, actual {actual_version}' # noqa
+ info = f'Expected docker compose version {expected_version}, actual {actual_version}' # noqa
if version_parse(actual_version) < version_parse(expected_version):
return self._failed(name=name, info=info)
else:
diff --git a/node_cli/core/iptables.py b/node_cli/core/iptables.py
deleted file mode 100644
index fe072931..00000000
--- a/node_cli/core/iptables.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2021 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import logging
-import socket
-import sys
-from pathlib import Path
-
-from node_cli.configs import (
- IPTABLES_DIR,
- IPTABLES_RULES_STATE_FILEPATH,
- ENV,
- DEFAULT_SSH_PORT
-)
-from node_cli.utils.helper import run_cmd
-
-
-logger = logging.getLogger(__name__)
-
-try:
- import iptc
-except (FileNotFoundError, AttributeError) as err:
- if "pytest" in sys.modules or ENV == 'dev':
- from collections import namedtuple # hotfix for tests
- iptc = namedtuple('iptc', ['Chain', 'Rule'])
- else:
- logger.error(f'Unable to import iptc due to an error {err}')
-
-
-ALLOWED_INCOMING_TCP_PORTS = [
- '80', # filestorage
- '311', # watchdog https
- '8080', # http
- '443', # https
- '53', # dns
- '3009', # watchdog http
- '9100' # node exporter
-]
-
-ALLOWED_INCOMING_UDP_PORTS = [
- '53' # dns
-]
-
-
-def configure_iptables():
- """
- This is the main function used for the initial setup of the firewall rules on the SKALE Node
- host machine
- """
- logger.info('Configuring iptables...')
- if not iptc:
- raise ImportError('Unable to import iptc package')
- Path(IPTABLES_DIR).mkdir(parents=True, exist_ok=True)
-
- tb = iptc.Table(iptc.Table.FILTER)
- input_chain = iptc.Chain(tb, 'INPUT')
-
- set_base_policies()
- allow_loopback(input_chain)
- accept_icmp(input_chain)
- allow_conntrack(input_chain)
- allow_base_ports(input_chain)
- drop_all_tcp(input_chain)
- drop_all_udp(input_chain)
- save_iptables_rules_state()
-
-
-def save_iptables_rules_state():
- res = run_cmd(['iptables-save'])
- plain_rules = res.stdout.decode('utf-8').rstrip()
- with open(IPTABLES_RULES_STATE_FILEPATH, 'w') as state_file:
- state_file.write(plain_rules)
-
-
-def set_base_policies() -> None:
- """Drop all incoming, allow all outcoming, drop all forwarding"""
- logger.debug('Setting base policies...')
- iptc.easy.set_policy(iptc.Table.FILTER, 'INPUT', 'ACCEPT')
- iptc.easy.set_policy(iptc.Table.FILTER, 'OUTPUT', 'ACCEPT')
- iptc.easy.set_policy(iptc.Table.FILTER, 'FORWARD', 'DROP')
-
-
-def allow_loopback(chain: iptc.Chain) -> None:
- """Allow local loopback services"""
- logger.debug('Allowing loopback packages...')
- rule = iptc.Rule()
- rule.target = iptc.Target(rule, 'ACCEPT')
- rule.in_interface = 'lo'
- ensure_rule(chain, rule)
-
-
-def allow_conntrack(chain: iptc.Chain) -> None:
- """Allow conntrack established connections"""
- logger.debug('Allowing conntrack...')
- rule = iptc.Rule()
- rule.target = iptc.Target(rule, 'ACCEPT')
- match = iptc.Match(rule, 'conntrack')
- chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'INPUT')
- match.ctstate = 'RELATED,ESTABLISHED'
- rule.add_match(match)
- ensure_rule(chain, rule)
-
-
-def drop_all_tcp(chain: iptc.Chain) -> None:
- """Drop the rest of tcp connections"""
- logger.debug('Adding drop tcp rule ...')
- r = iptc.Rule()
- t = iptc.Target(r, 'DROP')
- r.target = t
- r.protocol = 'tcp'
- ensure_rule(chain, r)
-
-
-def drop_all_udp(chain: iptc.Chain) -> None:
- """Drop the rest of udp connections """
- logger.debug('Adding drop udp rule ...')
- r = iptc.Rule()
- t = iptc.Target(r, 'DROP')
- r.target = t
- r.protocol = 'udp'
- ensure_rule(chain, r)
-
-
-def get_ssh_port(ssh_service_name='ssh'):
- try:
- return socket.getservbyname(ssh_service_name)
- except OSError:
- logger.exception('Cannot get ssh service port')
- return DEFAULT_SSH_PORT
-
-
-def allow_ssh(chain: iptc.Chain) -> None:
- ssh_port = get_ssh_port()
- accept_incoming(chain, str(ssh_port), 'tcp')
-
-
-def allow_base_ports(chain: iptc.Chain) -> None:
- logger.info('Configuring ssh port')
- allow_ssh(chain)
- logger.info('Configuring incoming tcp ports')
- for port in ALLOWED_INCOMING_TCP_PORTS:
- accept_incoming(chain, port, 'tcp')
- logger.info('Configuring incoming udp ports')
- for port in ALLOWED_INCOMING_UDP_PORTS:
- accept_incoming(chain, port, 'udp')
-
-
-def accept_incoming(chain: iptc.Chain, port: str, protocol: str) -> None:
- logger.debug('Going to allow %s traffic from %s port', protocol, port)
- rule = iptc.Rule()
- rule.protocol = protocol
- match = iptc.Match(rule, protocol)
- match.dport = port
- t = iptc.Target(rule, 'ACCEPT')
- rule.target = t
- rule.add_match(match)
- ensure_rule(chain, rule, insert=True)
-
-
-def accept_icmp(chain: iptc.Chain) -> None:
- add_icmp_rule(chain, 'destination-unreachable')
- add_icmp_rule(chain, 'source-quench')
- add_icmp_rule(chain, 'time-exceeded')
-
-
-def add_icmp_rule(chain: iptc.Chain, icmp_type: str) -> None:
- rule = iptc.Rule()
- rule.protocol = 'icmp'
- match = iptc.Match(rule, 'icmp')
- match.icmp_type = icmp_type
- t = iptc.Target(rule, 'ACCEPT')
- rule.target = t
- rule.add_match(match)
- ensure_rule(chain, rule)
-
-
-def ensure_rule(chain: iptc.Chain, rule: iptc.Rule, insert=False) -> None:
- if rule not in chain.rules:
- logger.debug(f'Adding rule: {rule_to_dict(rule)}, chain: {chain.name}')
- if insert:
- chain.insert_rule(rule)
- else:
- chain.append_rule(rule)
- else:
- logger.debug(f'Rule already present: {rule_to_dict(rule)}, chain: {chain.name}')
-
-
-def rule_to_dict(rule):
- return {
- 'proto': rule.protocol,
- 'src': rule.src,
- 'dst': rule.dst,
- 'in_interface': rule.in_interface,
- 'out': rule.out_interface,
- 'target': rule.target.name,
- }
diff --git a/node_cli/core/nftables.py b/node_cli/core/nftables.py
new file mode 100644
index 00000000..de411ddb
--- /dev/null
+++ b/node_cli/core/nftables.py
@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2019 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import json
+import logging
+import os
+import sys
+from typing import Optional
+from dataclasses import dataclass
+
+from node_cli.configs import ENV, NFTABLES_RULES_PATH, NFTABLES_CHAIN_FOLDER_PATH
+from node_cli.utils.helper import get_ssh_port, remove_between_brackets, run_cmd
+
+logger = logging.getLogger(__name__)
+
+
+try:
+ import nftables
+except (FileNotFoundError, AttributeError, ModuleNotFoundError) as err:
+ if "pytest" in sys.modules or ENV == 'dev':
+ from collections import namedtuple # hotfix for tests
+ iptc = namedtuple('nftables', ['Chain', 'Rule'])
+ else:
+ logger.error(f'Unable to import nftables due to an error {err}')
+
+
+@dataclass
+class Rule:
+ chain: str
+ protocol: str
+ port: Optional[int] = None
+ icmp_type: Optional[str] = None
+ action: str = 'accept'
+
+
+class NFTablesError(Exception):
+ pass
+
+
+class NFTablesManager:
+ def __init__(self, family: str = 'inet', table: str = 'firewall', chain: str = 'skale') -> None:
+ self.nft = nftables.Nftables()
+ self.nft.set_json_output(True)
+ self.family = family
+ self.table = table
+ self.chain = chain
+
+ def execute_cmd(self, json_cmd: dict) -> None:
+ try:
+ rc, output, error = self.nft.json_cmd(json_cmd)
+ if rc != 0:
+ raise NFTablesError(f'Command failed: {error}')
+ return output
+ except Exception as e:
+ logger.error('Failed to execute command: %s', e)
+ raise NFTablesError(e)
+
+ def get_chains(self) -> list[str]:
+ try:
+ rc, output, error = self.nft.cmd(f'list chains {self.family}')
+ if rc != 0:
+ if 'No such file or directory' in error:
+ return []
+ raise NFTablesError(f'Failed to list chains: {error}')
+
+ chains = json.loads(output)
+ return [item['chain']['name'] for item in chains.get('nftables', []) if 'chain' in item]
+ except Exception as e:
+ logger.error('Failed to get chains: %s', e)
+ return []
+
+ def flush(self) -> None:
+ self.nft.cmd('flush ruleset')
+
+ def chain_exists(self, chain_name: str) -> bool:
+ return chain_name in self.get_chains()
+
+ def create_chain_if_not_exists(
+ self, chain: str, hook: str, priority: int = 1, policy: str = 'accept'
+ ) -> None:
+ if not self.chain_exists(chain):
+ cmd = {
+ 'nftables': [
+ {
+ 'add': {
+ 'chain': {
+ 'family': self.family,
+ 'table': self.table,
+ 'name': chain,
+ 'type': 'filter',
+ 'hook': hook,
+ 'prio': priority,
+ 'policy': policy,
+ }
+ }
+ }
+ ]
+ }
+ self.execute_cmd(cmd)
+ logger.info('Created new chain: %s %s', chain, cmd)
+ else:
+ logger.info('Chain already exists: %s', chain)
+
+ def table_exists(self) -> bool:
+ try:
+ rc, output, error = self.nft.cmd(f'list table {self.family} {self.table}')
+ return rc == 0
+ except Exception:
+ return False
+
+ def create_table_if_not_exists(self) -> None:
+ """Create table only if it doesn't exist"""
+ if not self.table_exists():
+ cmd = {'nftables': [{'add': {'table': {'family': self.family, 'name': self.table}}}]}
+ self.execute_cmd(cmd)
+ logger.info('Created new table: %s', self.table)
+ else:
+ logger.info('Table already exists: %s', self.table)
+
+ def get_rules(self, chain: str) -> list[dict]:
+ """Get existing rules for a chain"""
+ try:
+ cmd = f'list chain {self.family} {self.table} {chain}'
+ rc, output, error = self.nft.cmd(cmd)
+ if rc != 0:
+ if 'No such file or directory' in error:
+ return []
+ raise NFTablesError(f'Failed to list rules: {error}')
+
+ rules = json.loads(output)
+ return [item['rule'] for item in rules.get('nftables', []) if 'rule' in item]
+ except Exception as e:
+ logger.error('Failed to get rules: %s', e)
+ return []
+
+ def rule_exists(self, chain: str, new_rule_expr: list[dict]) -> bool:
+ existing_rules = self.get_rules(chain)
+
+ for rule in existing_rules:
+ expr = rule.get('expr')
+ for i, statement in enumerate(expr):
+ if 'counter' in statement:
+ expr[i] = {'counter': None}
+ rule['counter'] = None
+ if expr == new_rule_expr:
+ return True
+ return False
+
+ def add_drop_rule_if_node_exists(self, protocol: str) -> None:
+ expr = [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "payload": {
+ "protocol": "ip",
+ "field": "protocol"
+ }
+ },
+ "right": protocol
+ }
+ },
+ {'counter': None},
+ {"drop": None}
+ ]
+ if not self.rule_exists(self.chain, expr):
+ cmd = {
+ 'nftables': [
+ {
+ 'add': {
+ 'rule': {
+ 'family': self.family,
+ 'table': self.table,
+ 'chain': self.chain,
+ 'expr': expr,
+ }
+ }
+ }
+ ]
+ }
+ self.execute_cmd(cmd)
+ logger.info('Added drop rule for %s', protocol)
+
+ def add_rule_if_not_exists(self, rule: Rule) -> None:
+ expr = []
+
+ if rule.protocol in ['tcp', 'udp']:
+ if rule.port:
+ expr.append(
+ {
+ 'match': {
+ 'left': {'payload': {'protocol': rule.protocol, 'field': 'dport'}},
+ 'op': '==',
+ 'right': rule.port,
+ }
+ }
+ )
+ elif rule.protocol == 'icmp' and rule.icmp_type:
+ expr.append(
+ {
+ 'match': {
+ 'left': {'payload': {'protocol': 'icmp', 'field': 'type'}},
+ 'op': '==',
+ 'right': rule.icmp_type,
+ }
+ }
+ )
+
+ expr.append({'counter': None})
+ expr.append({rule.action: None})
+
+ if not self.rule_exists(rule.chain, expr):
+ cmd = {
+ 'nftables': [
+ {
+ 'add': {
+ 'rule': {
+ 'family': self.family,
+ 'table': self.table,
+ 'chain': rule.chain,
+ 'expr': expr,
+ }
+ }
+ }
+ ]
+ }
+ self.execute_cmd(cmd)
+ logger.info(
+ 'Added new rule to chain %s: %s port %s', rule.chain, rule.protocol, rule.port
+ )
+ else:
+ logger.info(
+ 'Rule already exists in chain %s: %s port %s', rule.chain, rule.protocol, rule.port
+ )
+
+ def add_connection_tracking_rule(self, chain: str) -> None:
+ expr = [
+ {
+ 'match': {
+ 'left': {'ct': {'key': 'state'}},
+ 'op': 'in',
+ 'right': ['established', 'related'],
+ }
+ },
+ {'counter': None},
+ {'accept': None},
+ ]
+
+ if not self.rule_exists(chain, expr):
+ cmd = {
+ 'nftables': [
+ {
+ 'add': {
+ 'rule': {
+ 'family': self.family,
+ 'table': self.table,
+ 'chain': chain,
+ 'expr': expr,
+ }
+ }
+ }
+ ]
+ }
+ self.execute_cmd(cmd)
+ logger.info('Added connection tracking rule to chain %s', chain)
+ else:
+ logger.info('Connection tracking rule already exists in chain %s', chain)
+
+ def add_loopback_rule(self, chain) -> None:
+ expr = [
+ {'match': {'left': {'meta': {'key': 'iifname'}}, 'op': '==', 'right': 'lo'}},
+ {'counter': None},
+ {'accept': None},
+ ]
+ if not self.rule_exists(chain, expr):
+ json_cmd = {
+ 'nftables': [
+ {
+ 'add': {
+ 'rule': {
+ 'family': self.family,
+ 'table': self.table,
+ 'chain': self.chain,
+ 'expr': expr,
+ }
+ }
+ }
+ ]
+ }
+ self.execute_cmd(json_cmd)
+ else:
+ logger.info('Loopback rule already exists in chain %s', chain)
+
+ def get_base_ruleset(self) -> str:
+ self.nft.set_json_output(False)
+ output = ''
+ try:
+ rc, output, error = self.nft.cmd('list ruleset')
+ if rc != 0:
+ raise NFTablesError(f'Failed to get ruleset: {error}')
+ finally:
+ self.nft.set_json_output(True)
+
+ return remove_between_brackets(text=output, pattern='skale-')
+
+ def setup_firewall(self, enable_monitoring: bool = False) -> None:
+ """Setup firewall rules"""
+
+ logger.info('Configuring firewall rules')
+ try:
+ self.create_table_if_not_exists()
+
+ base_chains_config = {'skale': {'hook': 'input', 'policy': 'accept'}}
+
+ for chain, config in base_chains_config.items():
+ self.create_chain_if_not_exists(
+ chain=chain, hook=config['hook'], policy=config['policy']
+ )
+
+ self.add_connection_tracking_rule(self.chain)
+
+ tcp_ports = [get_ssh_port(), 53, 443, 3009]
+ if enable_monitoring:
+ tcp_ports.extend([8080, 9100])
+ for port in tcp_ports:
+ self.add_rule_if_not_exists(Rule(chain=self.chain, protocol='tcp', port=port))
+
+ self.add_rule_if_not_exists(Rule(chain=self.chain, protocol='udp', port=53))
+ self.add_loopback_rule(chain=self.chain)
+
+ icmp_types = ['destination-unreachable', 'source-quench', 'time-exceeded']
+ for icmp_type in icmp_types:
+ self.add_rule_if_not_exists(
+ Rule(
+ chain=self.chain,
+ protocol='icmp',
+ icmp_type=icmp_type
+ )
+ )
+
+ self.add_drop_rule_if_node_exists(protocol='udp')
+
+ except Exception as e:
+ logger.error('Failed to setup firewall: %s', e)
+ raise NFTablesError(e)
+ logger.info('Firewall rules are configured')
+
+ def flush_chain(self, chain: str) -> None:
+ """Remove all rules from a specific chain"""
+ json_cmd = {
+ 'nftables': [{
+ 'flush': {
+ 'chain': {
+ 'family': self.family,
+ 'table': self.table,
+ 'name': chain
+ }
+ }
+ }]
+ }
+
+ try:
+ rc, output, error = self.nft.json_cmd(json_cmd)
+ if rc != 0:
+ raise NFTablesError(f'Failed to flush chain: {error}')
+ except Exception as e:
+ logger.error(f'Failed to flush chain: {str(e)}')
+ raise NFTablesError('Flushing chain errored')
+
+
+def prepare_directories() -> None:
+ logger.info('Prepare directories for nftables')
+ os.makedirs(NFTABLES_CHAIN_FOLDER_PATH, exist_ok=True)
+
+
+def configure_nftables(enable_monitoring: bool = False) -> None:
+ prepare_directories()
+ enable_nftables_service()
+ nft_mgr = NFTablesManager()
+ nft_mgr.setup_firewall(enable_monitoring=enable_monitoring)
+ ruleset = nft_mgr.get_base_ruleset()
+ save_nftables_rules(ruleset)
+
+
+def enable_nftables_service() -> None:
+ logger.info('Enabling nftables services')
+ run_cmd(['systemctl', 'enable', 'nftables'])
+
+
+def save_nftables_rules(ruleset: str) -> None:
+ logger.info('Saving nftables rules')
+ content = f'#!/usr/sbin/nft -f\nflush ruleset\n{ruleset}\ninclude "{NFTABLES_CHAIN_FOLDER_PATH}/*"' # noqa
+ with open(NFTABLES_RULES_PATH, 'w') as f:
+ f.write(content)
+ logger.info('Rules saved successfully to %s', NFTABLES_RULES_PATH)
diff --git a/node_cli/core/node.py b/node_cli/core/node.py
index fb213a79..5fb53294 100644
--- a/node_cli/core/node.py
+++ b/node_cli/core/node.py
@@ -7,7 +7,6 @@
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -40,18 +39,17 @@
SCHAINS_MNT_DIR_SYNC,
SKALE_DIR,
SKALE_STATE_DIR,
- TM_INIT_TIMEOUT
+ TM_INIT_TIMEOUT,
)
-from node_cli.configs.env import get_env_config
+from node_cli.cli import __version__
+from node_cli.configs.env import get_env_config, SKALE_DIR_ENV_FILEPATH
from node_cli.configs.cli_logger import LOG_DATA_PATH as CLI_LOG_DATA_PATH
-from node_cli.core.iptables import configure_iptables
-from node_cli.core.host import (
- is_node_inited, save_env_params, get_flask_secret_key
-)
+from node_cli.core.host import is_node_inited, save_env_params, get_flask_secret_key
from node_cli.core.checks import run_checks as run_host_checks
from node_cli.core.resources import update_resource_allocation
from node_cli.operations import (
+ configure_nftables,
update_op,
init_op,
turn_off_op,
@@ -59,20 +57,25 @@
restore_op,
init_sync_op,
repair_sync_op,
- update_sync_op
+ update_sync_op,
)
from node_cli.utils.print_formatters import (
- print_failed_requirements_checks, print_node_cmd_error, print_node_info
+ print_failed_requirements_checks,
+ print_node_cmd_error,
+ print_node_info,
+)
+from node_cli.utils.helper import (
+ error_exit,
+ get_request,
+ post_request,
+ extract_env_params,
)
-from node_cli.utils.helper import error_exit, get_request, post_request
-from node_cli.utils.helper import extract_env_params
+from node_cli.utils.meta import get_meta_info
from node_cli.utils.texts import Texts
from node_cli.utils.exit_codes import CLIExitCodes
-from node_cli.utils.decorators import (
- check_not_inited,
- check_inited,
- check_user
-)
+from node_cli.utils.decorators import check_not_inited, check_inited, check_user
+from node_cli.utils.docker_utils import is_admin_running, is_api_running, is_sync_admin_running
+from node_cli.migrations.focal_to_jammy import migrate as migrate_2_6
logger = logging.getLogger(__name__)
@@ -85,6 +88,7 @@
class NodeStatuses(Enum):
"""This class contains possible node statuses"""
+
ACTIVE = 0
LEAVING = 1
FROZEN = 2
@@ -93,7 +97,11 @@ class NodeStatuses(Enum):
NOT_CREATED = 5
-def is_update_safe() -> bool:
+def is_update_safe(sync_node: bool = False) -> bool:
+ if not sync_node and not is_admin_running() and not is_api_running():
+ return True
+ if sync_node and not is_sync_admin_running():
+ return True
status, payload = get_request(BLUEPRINT_NAME, 'update-safe')
if status == 'error':
return False
@@ -105,9 +113,7 @@ def is_update_safe() -> bool:
@check_inited
@check_user
-def register_node(name, p2p_ip,
- public_ip, port, domain_name):
-
+def register_node(name, p2p_ip, public_ip, port, domain_name):
if not is_node_inited():
print(TEXTS['node']['not_inited'])
return
@@ -118,13 +124,9 @@ def register_node(name, p2p_ip,
'ip': p2p_ip,
'publicIP': public_ip,
'port': port,
- 'domain_name': domain_name
+ 'domain_name': domain_name,
}
- status, payload = post_request(
- blueprint=BLUEPRINT_NAME,
- method='register',
- json=json_data
- )
+ status, payload = post_request(blueprint=BLUEPRINT_NAME, method='register', json=json_data)
if status == 'ok':
msg = TEXTS['node']['registered']
logger.info(msg)
@@ -137,23 +139,17 @@ def register_node(name, p2p_ip,
@check_not_inited
def init(env_filepath):
- env = get_node_env(env_filepath)
+ env = compose_node_env(env_filepath)
if env is None:
return
- configure_firewall_rules()
+
inited_ok = init_op(env_filepath, env)
if not inited_ok:
- error_exit(
- 'Init operation failed',
- exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR
- )
+ error_exit('Init operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR)
logger.info('Waiting for containers initialization')
time.sleep(TM_INIT_TIMEOUT)
if not is_base_containers_alive():
- error_exit(
- 'Containers are not running',
- exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR
- )
+ error_exit('Containers are not running', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR)
logger.info('Generating resource allocation file ...')
update_resource_allocation(env['ENV_TYPE'])
logger.info('Init procedure finished')
@@ -161,7 +157,7 @@ def init(env_filepath):
@check_not_inited
def restore(backup_path, env_filepath, no_snapshot=False, config_only=False):
- env = get_node_env(env_filepath)
+ env = compose_node_env(env_filepath)
if env is None:
return
save_env_params(env_filepath)
@@ -173,45 +169,24 @@ def restore(backup_path, env_filepath, no_snapshot=False, config_only=False):
restored_ok = restore_op(env, backup_path, config_only=config_only)
if not restored_ok:
- error_exit(
- 'Restore operation failed',
- exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR
- )
+ error_exit('Restore operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR)
time.sleep(RESTORE_SLEEP_TIMEOUT)
logger.info('Generating resource allocation file ...')
update_resource_allocation(env['ENV_TYPE'])
print('Node is restored from backup')
-def init_sync(
- env_filepath: str,
- archive: bool,
- historic_state: bool,
- snapshot_from: str
-) -> None:
- configure_firewall_rules()
- env = get_node_env(env_filepath, sync_node=True)
+def init_sync(env_filepath: str, archive: bool, historic_state: bool, snapshot_from: str) -> None:
+ env = compose_node_env(env_filepath, sync_node=True)
if env is None:
return
- inited_ok = init_sync_op(
- env_filepath,
- env,
- archive,
- historic_state,
- snapshot_from
- )
+ inited_ok = init_sync_op(env_filepath, env, archive, historic_state, snapshot_from)
if not inited_ok:
- error_exit(
- 'Init operation failed',
- exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR
- )
+ error_exit('Init operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR)
logger.info('Waiting for containers initialization')
time.sleep(TM_INIT_TIMEOUT)
if not is_base_containers_alive(sync_node=True):
- error_exit(
- 'Containers are not running',
- exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR
- )
+ error_exit('Containers are not running', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR)
logger.info('Sync node initialized successfully')
@@ -219,8 +194,10 @@ def init_sync(
@check_user
def update_sync(env_filepath: str, unsafe_ok: bool = False) -> None:
logger.info('Node update started')
- configure_firewall_rules()
- env = get_node_env(env_filepath, sync_node=True)
+ prev_version = get_meta_info()['version']
+ if (__version__ == 'test' or __version__.startswith('2.6')) and prev_version == '2.5.0':
+ migrate_2_6()
+ env = compose_node_env(env_filepath, sync_node=True)
update_ok = update_sync_op(env_filepath, env)
if update_ok:
logger.info('Waiting for containers initialization')
@@ -235,37 +212,30 @@ def update_sync(env_filepath: str, unsafe_ok: bool = False) -> None:
@check_inited
@check_user
-def repair_sync(
- archive: bool,
- historic_state: bool,
- snapshot_from: str
-) -> None:
-
+def repair_sync(archive: bool, historic_state: bool, snapshot_from: str) -> None:
env_params = extract_env_params(INIT_ENV_FILEPATH, sync_node=True)
schain_name = env_params['SCHAIN_NAME']
repair_sync_op(
schain_name=schain_name,
archive=archive,
historic_state=historic_state,
- snapshot_from=snapshot_from
+ snapshot_from=snapshot_from,
)
logger.info('Schain was started from scratch')
-def get_node_env(
+def compose_node_env(
env_filepath,
inited_node=False,
sync_schains=None,
pull_config_for_schain=None,
- sync_node=False
+ sync_node=False,
+ save: bool = True
):
if env_filepath is not None:
- env_params = extract_env_params(
- env_filepath,
- sync_node=sync_node,
- raise_for_status=True
- )
- save_env_params(env_filepath)
+ env_params = extract_env_params(env_filepath, sync_node=sync_node, raise_for_status=True)
+ if save:
+ save_env_params(env_filepath)
else:
env_params = extract_env_params(INIT_ENV_FILEPATH, sync_node=sync_node)
@@ -275,7 +245,7 @@ def get_node_env(
'SCHAINS_MNT_DIR': mnt_dir,
'FILESTORAGE_MAPPING': FILESTORAGE_MAPPING,
'SKALE_LIB_PATH': SKALE_STATE_DIR,
- **env_params
+ **env_params,
}
if inited_node and not sync_node:
flask_secret_key = get_flask_secret_key()
@@ -294,13 +264,15 @@ def update(env_filepath: str, pull_config_for_schain: str, unsafe_ok: bool = Fal
error_msg = 'Cannot update safely'
error_exit(error_msg, exit_code=CLIExitCodes.UNSAFE_UPDATE)
+ prev_version = get_meta_info().version
+ if (__version__ == 'test' or __version__.startswith('2.6')) and prev_version == '2.5.0':
+ migrate_2_6()
logger.info('Node update started')
- configure_firewall_rules()
- env = get_node_env(
+ env = compose_node_env(
env_filepath,
inited_node=True,
sync_schains=False,
- pull_config_for_schain=pull_config_for_schain
+ pull_config_for_schain=pull_config_for_schain,
)
update_ok = update_op(env_filepath, env)
if update_ok:
@@ -316,11 +288,7 @@ def update(env_filepath: str, pull_config_for_schain: str, unsafe_ok: bool = Fal
def get_node_signature(validator_id):
params = {'validator_id': validator_id}
- status, payload = get_request(
- blueprint=BLUEPRINT_NAME,
- method='signature',
- params=params
- )
+ status, payload = get_request(blueprint=BLUEPRINT_NAME, method='signature', params=params)
if status == 'ok':
return payload['signature']
else:
@@ -333,7 +301,7 @@ def backup(path):
def get_backup_filename():
- time = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
+ time = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
return f'{BACKUP_ARCHIVE_NAME}-{time}.tar.gz'
@@ -380,20 +348,13 @@ def create_backup_archive(backup_filepath):
print('Creating backup archive...')
cli_log_path = CLI_LOG_DATA_PATH
container_log_path = LOG_PATH
- pack_dir(
- SKALE_DIR,
- backup_filepath,
- exclude=(cli_log_path, container_log_path)
- )
+ pack_dir(SKALE_DIR, backup_filepath, exclude=(cli_log_path, container_log_path))
print(f'Backup archive succesfully created {backup_filepath}')
def set_maintenance_mode_on():
print('Setting maintenance mode on...')
- status, payload = post_request(
- blueprint=BLUEPRINT_NAME,
- method='maintenance-on'
- )
+ status, payload = post_request(blueprint=BLUEPRINT_NAME, method='maintenance-on')
if status == 'ok':
msg = TEXTS['node']['maintenance_on']
logger.info(msg)
@@ -406,10 +367,7 @@ def set_maintenance_mode_on():
def set_maintenance_mode_off():
print('Setting maintenance mode off...')
- status, payload = post_request(
- blueprint=BLUEPRINT_NAME,
- method='maintenance-off'
- )
+ status, payload = post_request(blueprint=BLUEPRINT_NAME, method='maintenance-off')
if status == 'ok':
msg = TEXTS['node']['maintenance_off']
logger.info(msg)
@@ -428,13 +386,14 @@ def turn_off(maintenance_on: bool = False, unsafe_ok: bool = False) -> None:
error_exit(error_msg, exit_code=CLIExitCodes.UNSAFE_UPDATE)
if maintenance_on:
set_maintenance_mode_on()
- turn_off_op()
+ env = compose_node_env(SKALE_DIR_ENV_FILEPATH, save=False)
+ turn_off_op(env=env)
@check_inited
@check_user
def turn_on(maintenance_off, sync_schains, env_file):
- env = get_node_env(env_file, inited_node=True, sync_schains=sync_schains)
+ env = compose_node_env(env_file, inited_node=True, sync_schains=sync_schains)
turn_on_op(env)
logger.info('Waiting for containers initialization')
time.sleep(TM_INIT_TIMEOUT)
@@ -449,18 +408,13 @@ def turn_on(maintenance_off, sync_schains, env_file):
def is_base_containers_alive(sync_node: bool = False):
dclient = docker.from_env()
containers = dclient.containers.list()
- skale_containers = list(filter(
- lambda c: c.name.startswith('skale_'), containers
- ))
+ skale_containers = list(filter(lambda c: c.name.startswith('skale_'), containers))
containers_amount = SYNC_BASE_CONTAINERS_AMOUNT if sync_node else BASE_CONTAINERS_AMOUNT
return len(skale_containers) >= containers_amount
def get_node_info_plain():
- status, payload = get_request(
- blueprint=BLUEPRINT_NAME,
- method='info'
- )
+ status, payload = get_request(blueprint=BLUEPRINT_NAME, method='info')
if status == 'ok':
return payload['node_info']
else:
@@ -474,10 +428,7 @@ def get_node_info(format):
elif node_info['status'] == NodeStatuses.NOT_CREATED.value:
print(TEXTS['service']['node_not_registered'])
else:
- print_node_info(
- node_info,
- get_node_status(int(node_info['status']))
- )
+ print_node_info(node_info, get_node_status(int(node_info['status'])))
def get_node_status(status):
@@ -489,11 +440,7 @@ def get_node_status(status):
def set_domain_name(domain_name):
print(f'Setting new domain name: {domain_name}')
status, payload = post_request(
- blueprint=BLUEPRINT_NAME,
- method='set-domain-name',
- json={
- 'domain_name': domain_name
- }
+ blueprint=BLUEPRINT_NAME, method='set-domain-name', json={'domain_name': domain_name}
)
if status == 'ok':
msg = TEXTS['node']['domain_name_changed']
@@ -506,7 +453,7 @@ def set_domain_name(domain_name):
def run_checks(
network: str = 'mainnet',
container_config_path: str = CONTAINER_CONFIG_PATH,
- disk: Optional[str] = None
+ disk: Optional[str] = None,
) -> None:
if not is_node_inited():
print(TEXTS['node']['not_inited'])
@@ -515,11 +462,7 @@ def run_checks(
if disk is None:
env = get_env_config()
disk = env['DISK_MOUNTPOINT']
- failed_checks = run_host_checks(
- disk,
- network,
- container_config_path
- )
+ failed_checks = run_host_checks(disk, network, container_config_path)
if not failed_checks:
print('Requirements checking succesfully finished!')
else:
@@ -527,7 +470,5 @@ def run_checks(
print_failed_requirements_checks(failed_checks)
-def configure_firewall_rules() -> None:
- print('Configuring firewall ...')
- configure_iptables()
- print('Done')
+def configure_firewall_rules(enable_monitoring: bool = False) -> None:
+ configure_nftables(enable_monitoring=enable_monitoring)
diff --git a/node_cli/migrations/__init__.py b/node_cli/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/node_cli/migrations/focal_to_jammy.py b/node_cli/migrations/focal_to_jammy.py
new file mode 100644
index 00000000..eb63b6e2
--- /dev/null
+++ b/node_cli/migrations/focal_to_jammy.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2019 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+
+import logging
+
+from node_cli.core.nftables import NFTablesManager
+from node_cli.utils.helper import get_ssh_port, run_cmd
+
+logger = logging.getLogger(__name__)
+
+
+ALLOWED_INCOMING_TCP_PORTS = [
+ '80', # filestorage
+ '311', # watchdog https
+ '8080', # http
+ '443', # https
+ '53', # dns
+ '3009', # watchdog http
+ '9100' # node exporter
+]
+
+ALLOWED_INCOMING_UDP_PORTS = [
+ '53' # dns
+]
+
+IPTABLES_CHAIN = 'INPUT'
+
+
+class NFTablesCmdFailedError(Exception):
+ pass
+
+
+def remove_tcp_rules(ssh_port: int) -> None:
+ tcp_rule_template = 'iptables -{} {} -p tcp -m tcp --dport {} -j ACCEPT'
+ for tcp_port in [*ALLOWED_INCOMING_TCP_PORTS, ssh_port]:
+ check_cmd = tcp_rule_template.format('C', IPTABLES_CHAIN, tcp_port).split(' ')
+ remove_cmd = tcp_rule_template.format('D', IPTABLES_CHAIN, tcp_port).split(' ')
+ result = run_cmd(check_cmd, check_code=False)
+ if result.returncode == 0:
+ result = run_cmd(remove_cmd)
+
+
+def remove_udp_rules() -> None:
+ udp_rule_template = 'iptables -{} {} -p udp -m udp --dport {} -j ACCEPT'
+ for udp_port in [*ALLOWED_INCOMING_UDP_PORTS]:
+ check_cmd = udp_rule_template.format('C', IPTABLES_CHAIN, udp_port).split(' ')
+ remove_cmd = udp_rule_template.format('D', IPTABLES_CHAIN, udp_port).split(' ')
+ result = run_cmd(check_cmd, check_code=False)
+ if result.returncode == 0:
+ result = run_cmd(remove_cmd)
+
+
+def remove_loopback_rules() -> None:
+ loopback_rule_template = 'iptables -{} {} -i lo -j ACCEPT'
+ check_cmd = loopback_rule_template.format('C', IPTABLES_CHAIN).split(' ')
+ remove_cmd = loopback_rule_template.format('D', IPTABLES_CHAIN).split(' ')
+ result = run_cmd(check_cmd, check_code=False)
+ if result.returncode == 0:
+ result = run_cmd(remove_cmd)
+
+
+def remove_icmp_rules() -> None:
+ icmp_rule_template = 'iptables -{} {} -p icmp -m icmp --icmp-type {} -j ACCEPT'
+ for icmp_type in [3, 4, 11]:
+ check_cmd = icmp_rule_template.format('C', IPTABLES_CHAIN, icmp_type).split(' ')
+ remove_cmd = icmp_rule_template.format('D', IPTABLES_CHAIN, icmp_type).split(' ')
+ result = run_cmd(check_cmd, check_code=False)
+ if result.returncode == 0:
+ result = run_cmd(remove_cmd)
+
+
+def remove_conntrack_rules() -> None:
+ track_rule_template = 'iptables -{} {} -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT'
+ check_cmd = track_rule_template.format('C', IPTABLES_CHAIN).split(' ')
+ remove_cmd = track_rule_template.format('D', IPTABLES_CHAIN).split(' ')
+ result = run_cmd(check_cmd, check_code=False)
+ if result.returncode == 0:
+ result = run_cmd(remove_cmd)
+
+
+def remove_drop_rules() -> None:
+ drop_rule_template = 'iptables -{} {} -p {} -j DROP'
+ protocols = ['tcp', 'udp']
+ for proto in protocols:
+ check_cmd = drop_rule_template.format('C', IPTABLES_CHAIN, proto).split(' ')
+ remove_cmd = drop_rule_template.format('D', IPTABLES_CHAIN, proto).split(' ')
+ result = run_cmd(check_cmd, check_code=False)
+ if result.returncode == 0:
+ result = run_cmd(remove_cmd)
+
+
+def remove_old_iptables_rules(ssh_port: int) -> None:
+ remove_drop_rules()
+ remove_conntrack_rules()
+ remove_loopback_rules()
+ remove_udp_rules()
+ remove_tcp_rules(ssh_port)
+ remove_icmp_rules()
+
+
+def migrate() -> None:
+ ssh_port = get_ssh_port()
+ logger.info('Running migration from focal to jammy')
+ remove_old_iptables_rules(ssh_port)
+ logger.info('Flushing nftables rules generated by release upgrade')
+ nft = NFTablesManager(family='ip', table='filter')
+ nft.flush_chain(IPTABLES_CHAIN)
+ logger.info('Migration from focal to jammy completed')
diff --git a/node_cli/operations/__init__.py b/node_cli/operations/__init__.py
index ca1b076d..28e46ff9 100644
--- a/node_cli/operations/__init__.py
+++ b/node_cli/operations/__init__.py
@@ -25,5 +25,6 @@
turn_off as turn_off_op,
turn_on as turn_on_op,
restore as restore_op,
- repair_sync as repair_sync_op
+ repair_sync as repair_sync_op,
+ configure_nftables
)
diff --git a/node_cli/operations/base.py b/node_cli/operations/base.py
index ea5ff162..540f0188 100644
--- a/node_cli/operations/base.py
+++ b/node_cli/operations/base.py
@@ -28,6 +28,7 @@
from node_cli.core.docker_config import configure_docker
from node_cli.core.nginx import generate_nginx_config
+from node_cli.core.nftables import configure_nftables
from node_cli.core.node_options import NodeOptions
from node_cli.core.resources import update_resource_allocation, init_shared_space_volume
@@ -46,7 +47,6 @@
from node_cli.operations.docker_lvmpy import lvmpy_install # noqa
from node_cli.operations.skale_node import download_skale_node, sync_skale_node, update_images
from node_cli.core.checks import CheckType, run_checks as run_host_checks
-from node_cli.core.iptables import configure_iptables
from node_cli.core.schains import update_node_cli_schain_status, cleanup_sync_datadir
from node_cli.utils.docker_utils import (
compose_rm,
@@ -59,6 +59,7 @@
)
from node_cli.utils.meta import get_meta_info, update_meta
from node_cli.utils.print_formatters import print_failed_requirements_checks
+from node_cli.utils.helper import str_to_bool
logger = logging.getLogger(__name__)
@@ -105,12 +106,14 @@ def update(env_filepath: str, env: Dict) -> None:
remove_dynamic_containers()
sync_skale_node()
-
ensure_btrfs_kernel_module_autoloaded()
if env.get('SKIP_DOCKER_CONFIG') != 'True':
configure_docker()
+ enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False'))
+ configure_nftables(enable_monitoring=enable_monitoring)
+
backup_old_contracts()
download_contracts(env)
@@ -141,7 +144,7 @@ def update(env_filepath: str, env: Dict) -> None:
distro.id(),
distro.version()
)
- update_images(env.get('CONTAINER_CONFIGS_DIR') != '')
+ update_images(env=env)
compose_up(env)
return True
@@ -154,6 +157,9 @@ def init(env_filepath: str, env: dict) -> bool:
if env.get('SKIP_DOCKER_CONFIG') != 'True':
configure_docker()
+ enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False'))
+ configure_nftables(enable_monitoring=enable_monitoring)
+
prepare_host(
env_filepath,
env_type=env['ENV_TYPE']
@@ -163,7 +169,6 @@ def init(env_filepath: str, env: dict) -> bool:
configure_filebeat()
configure_flask()
- configure_iptables()
generate_nginx_config()
lvmpy_install(env)
@@ -177,7 +182,7 @@ def init(env_filepath: str, env: dict) -> bool:
distro.version()
)
update_resource_allocation(env_type=env['ENV_TYPE'])
- update_images(env.get('CONTAINER_CONFIGS_DIR') != '')
+ update_images(env=env)
compose_up(env)
return True
@@ -200,6 +205,9 @@ def init_sync(
if env.get('SKIP_DOCKER_CONFIG') != 'True':
configure_docker()
+ enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False'))
+ configure_nftables(enable_monitoring=enable_monitoring)
+
prepare_host(
env_filepath,
env_type=env['ENV_TYPE'],
@@ -233,7 +241,7 @@ def init_sync(
if snapshot_from:
update_node_cli_schain_status(schain_name, snapshot_from=snapshot_from)
- update_images(env.get('CONTAINER_CONFIGS_DIR') != '', sync_node=True)
+ update_images(env=env, sync_node=True)
compose_up(env, sync_node=True)
return True
@@ -252,6 +260,9 @@ def update_sync(env_filepath: str, env: Dict) -> bool:
if env.get('SKIP_DOCKER_CONFIG') != 'True':
configure_docker()
+ enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False'))
+ configure_nftables(enable_monitoring=enable_monitoring)
+
ensure_filestorage_mapping()
backup_old_contracts()
download_contracts(env)
@@ -275,20 +286,20 @@ def update_sync(env_filepath: str, env: Dict) -> bool:
distro.id(),
distro.version()
)
- update_images(env.get('CONTAINER_CONFIGS_DIR') != '', sync_node=True)
+ update_images(env=env, sync_node=True)
compose_up(env, sync_node=True)
return True
-def turn_off():
+def turn_off(env: dict) -> None:
logger.info('Turning off the node...')
- compose_rm()
+ compose_rm(env=env)
remove_dynamic_containers()
logger.info('Node was successfully turned off')
-def turn_on(env):
+def turn_on(env: dict) -> None:
logger.info('Turning on the node...')
update_meta(
VERSION,
@@ -299,6 +310,10 @@ def turn_on(env):
)
if env.get('SKIP_DOCKER_CONFIG') != 'True':
configure_docker()
+
+ enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False'))
+ configure_nftables(enable_monitoring=enable_monitoring)
+
logger.info('Launching containers on the node...')
compose_up(env)
@@ -316,11 +331,14 @@ def restore(env, backup_path, config_only=False):
return False
ensure_btrfs_kernel_module_autoloaded()
+
if env.get('SKIP_DOCKER_CONFIG') != 'True':
configure_docker()
+ enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False'))
+ configure_nftables(enable_monitoring=enable_monitoring)
+
link_env_file()
- configure_iptables()
lvmpy_install(env)
init_shared_space_volume(env['ENV_TYPE'])
diff --git a/node_cli/operations/skale_node.py b/node_cli/operations/skale_node.py
index b3745070..d91e4765 100644
--- a/node_cli/operations/skale_node.py
+++ b/node_cli/operations/skale_node.py
@@ -35,11 +35,12 @@
logger = logging.getLogger(__name__)
-def update_images(local: bool = False, sync_node: bool = False) -> None:
+def update_images(env: dict, sync_node: bool = False) -> None:
+ local = env.get('CONTAINER_CONFIGS_DIR') != ''
if local:
- compose_build(sync_node=sync_node)
+ compose_build(env=env, sync_node=sync_node)
else:
- compose_pull(sync_node=sync_node)
+ compose_pull(env=env, sync_node=sync_node)
def download_skale_node(stream: Optional[str], src: Optional[str]) -> None:
diff --git a/node_cli/utils/docker_utils.py b/node_cli/utils/docker_utils.py
index 2f5e56a8..fe271d79 100644
--- a/node_cli/utils/docker_utils.py
+++ b/node_cli/utils/docker_utils.py
@@ -33,7 +33,6 @@
SYNC_COMPOSE_PATH,
REMOVED_CONTAINERS_FOLDER_PATH,
SGX_CERTIFICATES_DIR_NAME,
- SKALE_DIR,
NGINX_CONTAINER_NAME
)
@@ -247,7 +246,7 @@ def compose_rm(env={}, sync_node: bool = False):
compose_path = get_compose_path(sync_node)
run_cmd(
cmd=(
- 'docker-compose',
+ 'docker', 'compose',
'-f', compose_path,
'down',
'-t', str(COMPOSE_SHUTDOWN_TIMEOUT),
@@ -257,34 +256,30 @@ def compose_rm(env={}, sync_node: bool = False):
logger.info('Compose containers removed')
-def compose_pull(sync_node: bool = False):
+def compose_pull(env: dict, sync_node: bool = False):
logger.info('Pulling compose containers')
compose_path = get_compose_path(sync_node)
run_cmd(
- cmd=('docker-compose', '-f', compose_path, 'pull'),
- env={
- 'SKALE_DIR': SKALE_DIR
- }
+ cmd=('docker', 'compose', '-f', compose_path, 'pull'),
+ env=env
)
-def compose_build(sync_node: bool = False):
+def compose_build(env: dict, sync_node: bool = False):
logger.info('Building compose containers')
compose_path = get_compose_path(sync_node)
run_cmd(
- cmd=('docker-compose', '-f', compose_path, 'build'),
- env={
- 'SKALE_DIR': SKALE_DIR
- }
+ cmd=('docker', 'compose', '-f', compose_path, 'build'),
+ env=env
)
def get_up_compose_cmd(services):
- return ('docker-compose', '-f', COMPOSE_PATH, 'up', '-d', *services)
+ return ('docker', 'compose', '-f', COMPOSE_PATH, 'up', '-d', *services)
def get_up_compose_sync_cmd():
- return ('docker-compose', '-f', SYNC_COMPOSE_PATH, 'up', '-d')
+ return ('docker', 'compose', '-f', SYNC_COMPOSE_PATH, 'up', '-d')
def get_compose_path(sync_node: bool) -> str:
@@ -302,6 +297,7 @@ def compose_up(env, sync_node=False):
if 'SGX_CERTIFICATES_DIR_NAME' not in env:
env['SGX_CERTIFICATES_DIR_NAME'] = SGX_CERTIFICATES_DIR_NAME
+ logger.debug('Launching containers with env %s', env)
run_cmd(cmd=get_up_compose_cmd(BASE_COMPOSE_SERVICES), env=env)
if str_to_bool(env.get('MONITORING_CONTAINERS', 'False')):
logger.info('Running monitoring containers')
@@ -339,6 +335,27 @@ def cleanup_unused_images(dclient=None, ignore=None):
)
+def is_container_running(name: str, dclient: Optional[DockerClient] = None) -> bool:
+ dc = dclient or docker_client()
+ try:
+ container = dc.containers.get(name)
+ return container.status == 'running'
+ except docker.errors.NotFound:
+ return False
+
+
+def is_admin_running(dclient: Optional[DockerClient] = None) -> bool:
+ return is_container_running(name='skale_admin', dclient=dclient)
+
+
+def is_api_running(dclient: Optional[DockerClient] = None) -> bool:
+ return is_container_running(name='skale_api', dclient=dclient)
+
+
+def is_sync_admin_running(dclient: Optional[DockerClient] = None) -> bool:
+ return is_container_running(name='skale_sync_admin', dclient=dclient)
+
+
def system_prune():
logger.info('Removing dangling docker artifacts')
cmd = ['docker', 'system', 'prune', '-f']
diff --git a/node_cli/utils/helper.py b/node_cli/utils/helper.py
index 71e559cf..f479093e 100644
--- a/node_cli/utils/helper.py
+++ b/node_cli/utils/helper.py
@@ -21,6 +21,7 @@
import json
import os
import re
+import socket
import sys
import uuid
from urllib.parse import urlparse
@@ -54,7 +55,7 @@
)
from node_cli.configs import (
TEXT_FILE, ADMIN_HOST, ADMIN_PORT, HIDE_STREAM_LOG, GLOBAL_SKALE_DIR,
- GLOBAL_SKALE_CONF_FILEPATH
+ GLOBAL_SKALE_CONF_FILEPATH, DEFAULT_SSH_PORT
)
from node_cli.configs.routes import get_route
from node_cli.utils.global_config import read_g_config, get_system_user
@@ -66,7 +67,6 @@
logger = logging.getLogger(__name__)
-
HOST = f'http://{ADMIN_HOST}:{ADMIN_PORT}'
DEFAULT_ERROR_DATA = {
@@ -414,3 +414,36 @@ def get_tmp_path(path: str) -> str:
base, ext = os.path.splitext(path)
salt = uuid.uuid4().hex[:5]
return base + salt + '.tmp' + ext
+
+
+def get_ssh_port(ssh_service_name='ssh'):
+ try:
+ return socket.getservbyname(ssh_service_name)
+ except OSError:
+ logger.exception('Cannot get ssh service port')
+ return DEFAULT_SSH_PORT
+
+
+def remove_between_brackets(text: str, pattern: str) -> str:
+ """ Remove all lines between brackets where the bracket line starts with the pattern """
+ result = []
+ skip = 0
+ found_pattern = False
+
+ lines = text.split('\n')
+ for line in lines:
+ if pattern in line:
+ found_pattern = True
+
+ if found_pattern:
+ if '{' in line:
+ skip += line.count('{')
+ if '}' in line:
+ skip -= line.count('}')
+ if skip == 0:
+ found_pattern = False
+ continue
+ else:
+ result.append(line)
+
+ return '\n'.join(result)
diff --git a/scripts/run_nftables_test.sh b/scripts/run_nftables_test.sh
new file mode 100755
index 00000000..e4bf8563
--- /dev/null
+++ b/scripts/run_nftables_test.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+set -ea
+
+docker rm -f ncli-tester || true
+docker build . -t ncli-tester
+docker run \
+ -e LVMPY_LOG_DIR="$PROJECT_DIR/tests/" \
+ -e HIDE_STREAM_LOG=true \
+ -e TEST_HOME_DIR="$PROJECT_DIR/tests/" \
+ -e GLOBAL_SKALE_DIR="$PROJECT_DIR/tests/etc/skale" \
+ -e DOTENV_FILEPATH='tests/test-env' \
+ --cap-add=NET_ADMIN --cap-add=NET_RAW \
+ --name ncli-tester ncli-tester py.test tests/core/migration_test.py tests/core/nftables_test.py $@
+
diff --git a/scripts/run_tests.sh b/scripts/run_tests.sh
index edc7fa73..97676b9a 100755
--- a/scripts/run_tests.sh
+++ b/scripts/run_tests.sh
@@ -8,4 +8,4 @@ LVMPY_LOG_DIR="$PROJECT_DIR/tests/" \
TEST_HOME_DIR="$PROJECT_DIR/tests/" \
GLOBAL_SKALE_DIR="$PROJECT_DIR/tests/etc/skale" \
DOTENV_FILEPATH='tests/test-env' \
- py.test --cov=$PROJECT_DIR/ tests/ $@
+ py.test --cov=$PROJECT_DIR/ --ignore=tests/core/nftables_test.py --ignore=tests/core/migration_test.py tests $@
diff --git a/setup.py b/setup.py
index 4cefd5da..f335705d 100644
--- a/setup.py
+++ b/setup.py
@@ -20,13 +20,13 @@ def find_version(*file_paths):
extras_require = {
'linter': [
- "flake8==6.0.0",
+ "flake8==7.1.1",
"isort>=4.2.15,<5.10.2",
],
'dev': [
"bumpversion==0.6.0",
- "pytest==7.2.2",
- "pytest-cov==4.0.0",
+ "pytest==8.3.2",
+ "pytest-cov==5.0.0",
"twine==4.0.2",
"mock==4.0.3",
"freezegun==1.2.2"
@@ -50,13 +50,13 @@ def find_version(*file_paths):
author_email='support@skalelabs.com',
url='https://github.com/skalenetwork/node-cli',
install_requires=[
- "click==8.1.3",
+ "click==8.1.7",
"PyInstaller==5.12.0",
- "distro==1.4.0",
+ "distro==1.9.0",
"docker==6.0.1",
"texttable==1.6.7",
"python-dateutil==2.8.2",
- "Jinja2==3.1.2",
+ "Jinja2==3.1.4",
"psutil==5.9.4",
"python-dotenv==0.21.0",
"terminaltables==3.1.10",
@@ -64,13 +64,12 @@ def find_version(*file_paths):
"GitPython==3.1.41",
"packaging==23.0",
"python-debian==0.1.49",
- "python-iptables==1.0.1",
"PyYAML==6.0",
- "pyOpenSSL==22.0.0",
- "MarkupSafe==2.1.1",
+ "pyOpenSSL==24.2.1",
+ "MarkupSafe==3.0.2",
'Flask==2.3.3',
'itsdangerous==2.1.2',
- "cryptography==37.0.2",
+ "cryptography==42.0.4",
"filelock==3.0.12",
'sh==1.14.2',
'python-crontab==2.6.0'
diff --git a/tests/cli/node_test.py b/tests/cli/node_test.py
index 9c86057c..7e206db1 100644
--- a/tests/cli/node_test.py
+++ b/tests/cli/node_test.py
@@ -40,12 +40,12 @@
)
from node_cli.utils.exit_codes import CLIExitCodes
from node_cli.utils.helper import init_default_logger
+from node_cli.utils.meta import CliMeta
from tests.helper import (
response_mock,
run_command,
run_command_mock,
- safe_update_api_response,
subprocess_run_mock,
)
from tests.resources_test import BIG_DISK_SIZE
@@ -83,8 +83,7 @@ def test_register_node_with_error(resource_alloc, mocked_g_config):
['--name', 'test-node2', '--ip', '0.0.0.0', '--port', '80', '-d', 'skale.test'],
)
assert result.exit_code == 3
- assert (
- result.output == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n') # noqa
+ assert (result.output == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n') # noqa
def test_register_node_with_prompted_ip(resource_alloc, mocked_g_config):
@@ -98,7 +97,7 @@ def test_register_node_with_prompted_ip(resource_alloc, mocked_g_config):
input='0.0.0.0\n',
)
assert result.exit_code == 0
- assert result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n' # noqa
+ assert (result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n') # noqa
def test_register_node_with_default_port(resource_alloc, mocked_g_config):
@@ -112,7 +111,7 @@ def test_register_node_with_default_port(resource_alloc, mocked_g_config):
input='0.0.0.0\n',
)
assert result.exit_code == 0
- assert result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n' # noqa
+ assert (result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n') # noqa
def test_register_with_no_alloc(mocked_g_config):
@@ -125,7 +124,7 @@ def test_register_with_no_alloc(mocked_g_config):
input='0.0.0.0\n',
)
assert result.exit_code == 8
- assert result.output == f"Enter node public IP: 0.0.0.0\nCommand failed with following errors:\n--------------------------------------------------\nNode hasn't been inited before.\nYou should run < skale node init >\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n" # noqa
+ assert (result.output == f"Enter node public IP: 0.0.0.0\nCommand failed with following errors:\n--------------------------------------------------\nNode hasn't been inited before.\nYou should run < skale node init >\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n") # noqa
def test_node_info_node_info():
@@ -150,7 +149,7 @@ def test_node_info_node_info():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Active\n--------------------------------------------------\n' # noqa
+ assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Active\n--------------------------------------------------\n') # noqa
def test_node_info_node_info_not_created():
@@ -200,7 +199,7 @@ def test_node_info_node_info_frozen():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Frozen\n--------------------------------------------------\n' # noqa
+ assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Frozen\n--------------------------------------------------\n') # noqa
def test_node_info_node_info_left():
@@ -225,7 +224,7 @@ def test_node_info_node_info_left():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Left\n--------------------------------------------------\n' # noqa
+ assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Left\n--------------------------------------------------\n') # noqa
def test_node_info_node_info_leaving():
@@ -250,7 +249,7 @@ def test_node_info_node_info_leaving():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Leaving\n--------------------------------------------------\n' # noqa
+ assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Leaving\n--------------------------------------------------\n') # noqa
def test_node_info_node_info_in_maintenance():
@@ -275,7 +274,7 @@ def test_node_info_node_info_in_maintenance():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: In Maintenance\n--------------------------------------------------\n' # noqa
+ assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: In Maintenance\n--------------------------------------------------\n') # noqa
def test_node_signature():
@@ -306,7 +305,10 @@ def test_restore(mocked_g_config):
'subprocess.run', new=subprocess_run_mock
), patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), patch(
'node_cli.utils.decorators.is_node_inited', return_value=False
- ):
+ ), patch(
+ 'node_cli.core.node.get_meta_info',
+ return_value=CliMeta(version='2.4.0', config_stream='3.0.2'),
+ ), patch('node_cli.operations.base.configure_nftables'):
result = run_command(restore_node, [backup_path, './tests/test-env'])
assert result.exit_code == 0
assert 'Node is restored from backup\n' in result.output # noqa
@@ -325,7 +327,10 @@ def test_restore_no_snapshot(mocked_g_config):
'subprocess.run', new=subprocess_run_mock
), patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), patch(
'node_cli.utils.decorators.is_node_inited', return_value=False
- ):
+ ), patch(
+ 'node_cli.core.node.get_meta_info',
+ return_value=CliMeta(version='2.4.0', config_stream='3.0.2'),
+ ), patch('node_cli.operations.base.configure_nftables'):
result = run_command(restore_node, [backup_path, './tests/test-env', '--no-snapshot'])
assert result.exit_code == 0
assert 'Node is restored from backup\n' in result.output # noqa
@@ -362,8 +367,19 @@ def test_turn_off_maintenance_on(mocked_g_config):
with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch(
'node_cli.core.node.turn_off_op'
), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ result = run_command_mock(
+ 'node_cli.utils.helper.requests.post',
+ resp_mock,
+ _turn_off,
+ ['--maintenance-on', '--yes'],
+ )
+ assert (
+ result.output
+ == 'Setting maintenance mode on...\nNode is successfully set in maintenance mode\n'
+ ) # noqa
+ assert result.exit_code == 0
with mock.patch(
- 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response()
+ 'node_cli.utils.docker_utils.is_container_running', return_value=True
):
result = run_command_mock(
'node_cli.utils.helper.requests.post',
@@ -371,19 +387,8 @@ def test_turn_off_maintenance_on(mocked_g_config):
_turn_off,
['--maintenance-on', '--yes'],
)
- assert (
- result.output
- == 'Setting maintenance mode on...\nNode is successfully set in maintenance mode\n'
- ) # noqa
- assert result.exit_code == 0
- result = run_command_mock(
- 'node_cli.utils.helper.requests.post',
- resp_mock,
- _turn_off,
- ['--maintenance-on', '--yes'],
- )
- assert 'Cannot turn off safely' in result.output
- assert result.exit_code == CLIExitCodes.UNSAFE_UPDATE
+ assert 'Cannot turn off safely' in result.output
+ assert result.exit_code == CLIExitCodes.UNSAFE_UPDATE
def test_turn_on_maintenance_off(mocked_g_config):
@@ -392,7 +397,7 @@ def test_turn_on_maintenance_off(mocked_g_config):
'node_cli.core.node.get_flask_secret_key'
), mock.patch('node_cli.core.node.turn_on_op'), mock.patch(
'node_cli.core.node.is_base_containers_alive'
- ), mock.patch('node_cli.core.node.is_node_inited', return_value=True):
+ ), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
result = run_command_mock(
'node_cli.utils.helper.requests.post',
resp_mock,
@@ -424,14 +429,14 @@ def test_set_domain_name():
def test_node_version(meta_file_v2):
- result = run_command(version)
- assert result.exit_code == 0
- assert result.output == '--------------------------------------------------\nVersion: 0.1.1\nConfig Stream: develop\nLvmpy stream: 1.1.2\n--------------------------------------------------\n' # noqa
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ result = run_command(version)
+ assert result.exit_code == 0
+ assert (result.output == '--------------------------------------------------\nVersion: 0.1.1\nConfig Stream: develop\nLvmpy stream: 1.1.2\n--------------------------------------------------\n') # noqa
- result = run_command(version, ['--json'])
- print(repr(result.output))
- assert result.exit_code == 0
- assert (
- result.output
- == "{'version': '0.1.1', 'config_stream': 'develop', 'docker_lvmpy_stream': '1.1.2'}\n"
- ) # noqa
+ result = run_command(version, ['--json'])
+ assert result.exit_code == 0
+ assert (
+ result.output
+ == "{'version': '0.1.1', 'config_stream': 'develop', 'docker_lvmpy_stream': '1.1.2'}\n"
+ ) # noqa
diff --git a/tests/cli/sync_node_test.py b/tests/cli/sync_node_test.py
index 4f0517a5..360b1b5e 100644
--- a/tests/cli/sync_node_test.py
+++ b/tests/cli/sync_node_test.py
@@ -22,6 +22,7 @@
import mock
import logging
+from node_cli.cli import __version__
from node_cli.configs import SKALE_DIR, NODE_DATA_PATH
from node_cli.core.node_options import NodeOptions
from node_cli.cli.sync_node import _init_sync, _update_sync
@@ -40,7 +41,7 @@ def test_init_sync(mocked_g_config):
'node_cli.core.node.init_sync_op'
), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
- ), mock.patch('node_cli.core.node.configure_firewall_rules'), mock.patch(
+ ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
'node_cli.utils.decorators.is_node_inited', return_value=False
):
result = run_command(_init_sync, ['./tests/test-env'])
@@ -72,7 +73,7 @@ def test_init_sync_archive(mocked_g_config, clean_node_options):
'node_cli.operations.base.update_images'
), mock.patch('node_cli.operations.base.compose_up'), mock.patch(
'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
- ), mock.patch('node_cli.core.node.configure_firewall_rules'), mock.patch(
+ ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
'node_cli.utils.decorators.is_node_inited', return_value=False
):
result = run_command(
@@ -93,7 +94,7 @@ def test_init_sync_historic_state_fail(mocked_g_config, clean_node_options):
'node_cli.core.node.init_sync_op'
), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
- ), mock.patch('node_cli.core.node.configure_firewall_rules'), mock.patch(
+ ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
'node_cli.utils.decorators.is_node_inited', return_value=False
):
result = run_command(_init_sync, ['./tests/test-env', '--historic-state'])
@@ -108,8 +109,10 @@ def test_update_sync(mocked_g_config):
'node_cli.core.node.update_sync_op'
), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
- ), mock.patch('node_cli.core.node.configure_firewall_rules'), mock.patch(
+ ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
'node_cli.utils.decorators.is_node_inited', return_value=True
+ ), mock.patch(
+ 'node_cli.core.node.get_meta_info', return_value={'version': __version__}
):
result = run_command(_update_sync, ['./tests/test-env', '--yes'])
assert result.exit_code == 0
diff --git a/tests/core/core_checks_test.py b/tests/core/core_checks_test.py
index 6e75bbdd..921a46ec 100644
--- a/tests/core/core_checks_test.py
+++ b/tests/core/core_checks_test.py
@@ -346,9 +346,9 @@ def test_get_checks(requirements_data):
disk = 'test-disk'
checkers = get_all_checkers(disk, requirements_data)
checks = get_checks(checkers)
- assert len(checks) == 16
+ assert len(checks) == 15
checks = get_checks(checkers, check_type=CheckType.PREINSTALL)
- assert len(checks) == 14
+ assert len(checks) == 13
checks = get_checks(checkers, check_type=CheckType.POSTINSTALL)
assert len(checks) == 2
diff --git a/tests/core/core_node_test.py b/tests/core/core_node_test.py
index b945f96f..f79c6fa3 100644
--- a/tests/core/core_node_test.py
+++ b/tests/core/core_node_test.py
@@ -13,6 +13,7 @@
from node_cli.configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH
from node_cli.core.node import BASE_CONTAINERS_AMOUNT, is_base_containers_alive
from node_cli.core.node import init, pack_dir, update, is_update_safe, repair_sync
+from node_cli.utils.meta import CliMeta
from tests.helper import response_mock, safe_update_api_response, subprocess_run_mock
from tests.resources_test import BIG_DISK_SIZE
@@ -145,7 +146,7 @@ def test_init_node(no_resource_file): # todo: write new init node test
'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
), mock.patch('node_cli.core.host.prepare_host'), mock.patch(
'node_cli.core.host.init_data_dir'
- ), mock.patch('node_cli.core.node.configure_firewall_rules'), mock.patch(
+ ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
'node_cli.core.node.init_op'
), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
'node_cli.utils.helper.post_request', resp_mock
@@ -162,29 +163,50 @@ def test_update_node(mocked_g_config, resource_file):
'node_cli.core.node.update_op'
), mock.patch('node_cli.core.node.get_flask_secret_key'), mock.patch(
'node_cli.core.node.save_env_params'
- ), mock.patch('node_cli.core.node.configure_firewall_rules'), mock.patch(
+ ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
'node_cli.core.host.prepare_host'
), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
'node_cli.utils.helper.post_request', resp_mock
), mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), mock.patch(
'node_cli.core.host.init_data_dir'
+ ), mock.patch(
+ 'node_cli.core.node.get_meta_info',
+ return_value=CliMeta(
+ version='2.6.0', config_stream='3.0.2'
+ )
):
- with mock.patch(
- 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response()
- ): # noqa
+ with mock.patch( 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response()): # noqa
result = update(env_filepath, pull_config_for_schain=None)
assert result is None
def test_is_update_safe():
- assert not is_update_safe()
+ assert is_update_safe()
+ assert is_update_safe(sync_node=True)
+
+ with mock.patch('node_cli.core.node.is_admin_running', return_value=True):
+ with mock.patch('node_cli.core.node.is_api_running', return_value=True):
+ assert not is_update_safe()
+ assert is_update_safe(sync_node=True)
+
+ with mock.patch('node_cli.core.node.is_sync_admin_running', return_value=True):
+ assert is_update_safe()
+ assert not is_update_safe(sync_node=True)
+
+ with mock.patch('node_cli.utils.docker_utils.is_container_running', return_value=True):
+ with mock.patch(
+ 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response()
+ ):
+ assert is_update_safe()
+
with mock.patch('node_cli.utils.helper.requests.get', return_value=safe_update_api_response()):
assert is_update_safe()
- with mock.patch(
- 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response(safe=False)
- ):
- assert not is_update_safe()
+ with mock.patch('node_cli.utils.docker_utils.is_container_running', return_value=True):
+ with mock.patch(
+ 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response(safe=False)
+ ):
+ assert not is_update_safe()
def test_repair_sync(tmp_sync_datadir, mocked_g_config, resource_file):
diff --git a/tests/core/iptables_test.py b/tests/core/iptables_test.py
index 03ae7c31..2926b917 100644
--- a/tests/core/iptables_test.py
+++ b/tests/core/iptables_test.py
@@ -2,7 +2,7 @@
import mock
-from node_cli.core.iptables import allow_ssh, get_ssh_port
+from node_cli.utils.helper import get_ssh_port
def test_get_ssh_port():
@@ -10,9 +10,3 @@ def test_get_ssh_port():
assert get_ssh_port('http') == 80
with mock.patch.object(socket, 'getservbyname', side_effect=OSError):
assert get_ssh_port() == 22
-
-
-def test_allow_ssh():
- chain = mock.Mock()
- chain.rules = []
- allow_ssh(chain)
diff --git a/tests/core/migration_test.py b/tests/core/migration_test.py
new file mode 100644
index 00000000..c3f20d57
--- /dev/null
+++ b/tests/core/migration_test.py
@@ -0,0 +1,43 @@
+import pytest
+
+from node_cli.migrations.focal_to_jammy import migrate
+
+from node_cli.utils.helper import run_cmd
+
+CUSTOM_CHAIN_NAME = 'TEST'
+
+
+def add_base_rules():
+ run_cmd(f'iptables -N {CUSTOM_CHAIN_NAME}'.split(' '))
+ run_cmd('iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT'.split(' '))
+ run_cmd('iptables -A INPUT -i lo -j ACCEPT'.split(' '))
+ run_cmd('iptables -A INPUT -p tcp --dport 22 -j ACCEPT'.split(' '))
+ run_cmd('iptables -A INPUT -p tcp --dport 8080 -j ACCEPT'.split(' '))
+ run_cmd('iptables -A INPUT -p tcp --dport 443 -j ACCEPT'.split(' '))
+ run_cmd('iptables -A INPUT -p tcp --dport 53 -j ACCEPT'.split(' '))
+ run_cmd('iptables -A INPUT -p udp --dport 53 -j ACCEPT'.split(' '))
+ run_cmd('iptables -A INPUT -p tcp --dport 3009 -j ACCEPT'.split(' '))
+ # non skale related rule
+ run_cmd(f'iptables -A {CUSTOM_CHAIN_NAME} -p tcp --dport 2222 -j ACCEPT'.split(' '))
+ run_cmd('iptables -A INPUT -p tcp --dport 9100 -j ACCEPT'.split(' '))
+ run_cmd('iptables -A INPUT -p tcp -j DROP'.split(' '))
+ run_cmd('iptables -A INPUT -p udp -j DROP'.split(' '))
+ run_cmd('iptables -I INPUT -p icmp --icmp-type destination-unreachable -j ACCEPT'.split(' '))
+ run_cmd('iptables -I INPUT -p icmp --icmp-type source-quench -j ACCEPT'.split(' '))
+ run_cmd('iptables -I INPUT -p icmp --icmp-type time-exceeded -j ACCEPT'.split(' '))
+
+
+@pytest.fixture
+def base_rules():
+ try:
+ add_base_rules()
+ yield
+ finally:
+ run_cmd(['iptables', '-F'])
+
+
+def test_migration(base_rules):
+ migrate()
+ res = run_cmd(['iptables', '-S'])
+ output = res.stdout.decode('utf-8')
+ assert output == f'-P INPUT ACCEPT\n-P FORWARD ACCEPT\n-P OUTPUT ACCEPT\n-N {CUSTOM_CHAIN_NAME}\n-A {CUSTOM_CHAIN_NAME} -p tcp -m tcp --dport 2222 -j ACCEPT\n' # noqa
diff --git a/tests/core/nftables_test.py b/tests/core/nftables_test.py
new file mode 100644
index 00000000..e63e8d81
--- /dev/null
+++ b/tests/core/nftables_test.py
@@ -0,0 +1,141 @@
+import pytest
+from unittest.mock import Mock, patch
+import json
+import nftables
+
+
+from node_cli.core.nftables import NFTablesManager, Rule
+
+
+@pytest.fixture(scope='module')
+def nft_manager():
+ """Returns a NFTablesManager instance"""
+ manager = NFTablesManager(family='inet', table='filter')
+ try:
+ yield manager
+ finally:
+ manager.flush()
+
+
+@pytest.fixture
+def mock_nft_output():
+ """Fixture for mock nftables output"""
+ return {
+ 'nftables': [
+ {'chain': {'family': 'inet', 'table': 'filter', 'name': 'INPUT', 'handle': 1}},
+ {
+ 'rule': {
+ 'family': 'inet',
+ 'table': 'filter',
+ 'chain': 'INPUT',
+ 'handle': 2,
+ 'expr': [
+ {
+ 'match': {
+ 'left': {'payload': {'protocol': 'tcp', 'field': 'dport'}},
+ 'op': '==',
+ 'right': 80,
+ }
+ },
+ {'accept': None},
+ ],
+ }
+ },
+ ]
+ }
+
+
+def test_init(nft_manager):
+ """Test initialization"""
+ assert nft_manager.family == 'inet'
+ assert nft_manager.table == 'filter'
+ assert isinstance(nft_manager.nft, nftables.Nftables)
+
+
+@patch('nftables.Nftables.json_cmd')
+def test_execute_cmd_success(mock_json_cmd, nft_manager):
+ """Test successful command execution"""
+ mock_json_cmd.return_value = (0, '', '')
+ cmd = {'nftables': [{'add': {'table': {'family': 'inet', 'name': 'filter'}}}]}
+
+ nft_manager.execute_cmd(cmd)
+ mock_json_cmd.assert_called_once_with(cmd)
+
+
+@patch('nftables.Nftables.json_cmd')
+def test_execute_cmd_failure(mock_json_cmd, nft_manager):
+ """Test command execution failure"""
+ mock_json_cmd.return_value = (1, '', 'Error message')
+ cmd = {'nftables': [{'add': {'table': {'family': 'inet', 'name': 'filter'}}}]}
+
+ with pytest.raises(Exception) as exc_info:
+ nft_manager.execute_cmd(cmd)
+ assert 'Command failed: Error message' in str(exc_info.value)
+
+
+@patch('nftables.Nftables.cmd')
+def test_get_chains(mock_cmd, nft_manager, mock_nft_output):
+ """Test getting chains"""
+ mock_cmd.return_value = (0, json.dumps(mock_nft_output), '')
+
+ chains = nft_manager.get_chains()
+ assert 'INPUT' in chains
+ mock_cmd.assert_called_once_with('list chains inet')
+
+
+@patch('nftables.Nftables.cmd')
+def test_chain_exists(mock_cmd, nft_manager, mock_nft_output):
+ """Test chain existence check"""
+ mock_cmd.return_value = (0, json.dumps(mock_nft_output), '')
+
+ assert nft_manager.chain_exists('INPUT')
+ assert not nft_manager.chain_exists('nonexistent')
+
+
+@patch.object(NFTablesManager, 'execute_cmd')
+@patch.object(NFTablesManager, 'chain_exists')
+def test_create_chain_if_not_exists(mock_exists, mock_execute, nft_manager):
+ """Test chain creation"""
+ mock_exists.return_value = False
+
+ nft_manager.create_chain_if_not_exists('INPUT', 'input')
+ mock_execute.assert_called_once()
+
+
+@pytest.mark.parametrize(
+ 'rule_data',
+ [
+ {'chain': 'INPUT', 'protocol': 'tcp', 'port': 80, 'action': 'accept'},
+ {'chain': 'INPUT', 'protocol': 'udp', 'port': 53, 'action': 'accept'},
+ {'chain': 'INPUT', 'protocol': 'icmp', 'icmp_type': 'echo-request', 'action': 'accept'},
+ ],
+)
+@patch.object(NFTablesManager, 'execute_cmd')
+@patch.object(NFTablesManager, 'rule_exists')
+def test_add_rule_if_not_exists(mock_exists, mock_execute, nft_manager, rule_data):
+ """Test rule addition with different types"""
+ mock_exists.return_value = False
+
+ rule = Rule(**rule_data)
+ nft_manager.add_rule_if_not_exists(rule)
+ mock_execute.assert_called_once()
+
+
+@patch.object(NFTablesManager, 'execute_cmd')
+def test_setup_firewall(mock_execute, nft_manager):
+ """Test complete firewall setup"""
+ with patch.multiple(
+ NFTablesManager,
+ table_exists=Mock(return_value=False),
+ chain_exists=Mock(return_value=False),
+ rule_exists=Mock(return_value=False),
+ ):
+ nft_manager.setup_firewall()
+ assert mock_execute.called
+
+
+def test_invalid_protocol(nft_manager):
+ """Test adding rule with invalid protocol"""
+ rule = Rule(chain='INPUT', protocol='invalid', port=80)
+ with pytest.raises(Exception):
+ nft_manager.add_rule_if_not_exists(rule)