From c46291b5eecb29f2a62bf7210f6c9fc961b8c223 Mon Sep 17 00:00:00 2001 From: Smruti Ranjan Senapati Date: Thu, 26 Sep 2024 22:00:13 +0530 Subject: [PATCH] refactor: removes v1 clients --- README.md | 29 +- rapyuta_io/__init__.py | 5 +- rapyuta_io/clients/__init__.py | 4 +- rapyuta_io/clients/catalog_client.py | 141 +-- rapyuta_io/clients/common_models.py | 81 -- rapyuta_io/clients/core_api_client.py | 169 +-- rapyuta_io/clients/deployment.py | 296 ------ rapyuta_io/clients/device.py | 49 +- rapyuta_io/clients/native_network.py | 255 ----- rapyuta_io/clients/package.py | 967 ------------------ rapyuta_io/clients/persistent_volumes.py | 348 ------- rapyuta_io/clients/plan.py | 124 --- rapyuta_io/clients/project.py | 70 +- rapyuta_io/clients/provision_client.py | 93 -- rapyuta_io/clients/routed_network.py | 153 --- rapyuta_io/clients/secret.py | 85 +- rapyuta_io/clients/static_route.py | 49 - rapyuta_io/clients/v2_client.py | 68 ++ rapyuta_io/rio_client.py | 93 +- rapyuta_io/utils/settings.py | 3 +- sdk_test/config.json.example | 19 +- sdk_test/config.py | 17 +- sdk_test/coreapi/query_metrics_test.py | 94 -- sdk_test/coreapi/secret_test.py | 40 - sdk_test/coreapi/usergroup_test.py | 5 - .../{project_test.py => v2_project_test.py} | 25 +- sdk_test/coreapi/v2_secret_test.py | 30 + sdk_test/device/deployment_test.py | 66 -- sdk_test/device/topic_test.py | 163 --- sdk_test/jsons/builds/listener.json | 9 - sdk_test/jsons/builds/pingpong.json | 16 - sdk_test/jsons/builds/talker-noetic.json | 10 - sdk_test/jsons/builds/talker.json | 9 - .../jsons/builds/throttle-latch-build.json | 15 - sdk_test/jsons/packages/cloud-non-ros.json | 53 - sdk_test/jsons/packages/cloud-transform.json | 59 -- .../packages/delete-package-using-client.json | 53 - sdk_test/jsons/packages/delete-package.json | 53 - sdk_test/jsons/packages/device-volume.json | 44 - ...ast-talker-device-docker-with-rosbags.json | 82 -- .../inbound-incoming-scoped-targeted.json | 59 -- sdk_test/jsons/packages/latching-pkg.json | 66 -- sdk_test/jsons/packages/listener-docker.json | 55 - sdk_test/jsons/packages/listener.json | 43 - .../jsons/packages/nginx-multi-component.json | 92 -- .../packages/nginx-single-component.json | 57 -- .../jsons/packages/no-scoped-targeted.json | 82 -- sdk_test/jsons/packages/pv-reader.json | 50 - .../jsons/packages/rosbag-talker-cloud.json | 59 -- sdk_test/jsons/packages/scoped-cloud.json | 65 -- sdk_test/jsons/packages/scoped-targeted.json | 95 -- .../jsons/packages/talker-cloud-device.json | 77 -- sdk_test/jsons/packages/talker-cloud.json | 67 -- sdk_test/jsons/packages/talker-docker.json | 58 -- sdk_test/jsons/packages/talker-noetic.json | 56 - sdk_test/jsons/packages/talker.json | 56 - sdk_test/jsons/packages/throttling-pkg.json | 66 -- sdk_test/openshift/sdk-config.sample.yaml | 21 +- sdk_test/package/__init__.py | 0 sdk_test/package/cloud_non_ros_test.py | 81 -- .../package/cloud_scoped_targeted_test.py | 114 --- sdk_test/package/cloud_transform_test.py | 99 -- sdk_test/package/configuration_tests.py | 77 -- sdk_test/package/create_package_test.py | 39 - sdk_test/package/delete_package_test.py | 33 - sdk_test/package/deployment_test.py | 90 -- sdk_test/package/get_all_package_test.py | 71 -- .../inbound_incoming_scoped_targeted_test.py | 55 - sdk_test/package/native_network_tests.py | 198 ---- sdk_test/package/noetic_test.py | 45 - sdk_test/package/package_test.py | 151 --- sdk_test/package/rosbag_test.py | 524 ---------- sdk_test/package/routed_networks_tests.py | 158 --- sdk_test/package/static_route_test.py | 100 -- .../transformer_with_docker_device_test.py | 86 -- sdk_test/package/volume_test.py | 89 -- sdk_test/run_rio_sdk_test.py | 9 +- sdk_test/util.py | 14 +- tests/user_test.py | 1 - 79 files changed, 326 insertions(+), 6776 deletions(-) delete mode 100644 rapyuta_io/clients/common_models.py delete mode 100644 rapyuta_io/clients/deployment.py delete mode 100644 rapyuta_io/clients/native_network.py delete mode 100644 rapyuta_io/clients/package.py delete mode 100644 rapyuta_io/clients/persistent_volumes.py delete mode 100644 rapyuta_io/clients/plan.py delete mode 100644 rapyuta_io/clients/provision_client.py delete mode 100644 rapyuta_io/clients/routed_network.py delete mode 100644 rapyuta_io/clients/static_route.py create mode 100644 rapyuta_io/clients/v2_client.py delete mode 100644 sdk_test/coreapi/query_metrics_test.py delete mode 100644 sdk_test/coreapi/secret_test.py rename sdk_test/coreapi/{project_test.py => v2_project_test.py} (50%) create mode 100644 sdk_test/coreapi/v2_secret_test.py delete mode 100644 sdk_test/device/deployment_test.py delete mode 100644 sdk_test/device/topic_test.py delete mode 100644 sdk_test/jsons/builds/listener.json delete mode 100644 sdk_test/jsons/builds/pingpong.json delete mode 100644 sdk_test/jsons/builds/talker-noetic.json delete mode 100644 sdk_test/jsons/builds/talker.json delete mode 100644 sdk_test/jsons/builds/throttle-latch-build.json delete mode 100644 sdk_test/jsons/packages/cloud-non-ros.json delete mode 100644 sdk_test/jsons/packages/cloud-transform.json delete mode 100644 sdk_test/jsons/packages/delete-package-using-client.json delete mode 100644 sdk_test/jsons/packages/delete-package.json delete mode 100644 sdk_test/jsons/packages/device-volume.json delete mode 100644 sdk_test/jsons/packages/fast-talker-device-docker-with-rosbags.json delete mode 100644 sdk_test/jsons/packages/inbound-incoming-scoped-targeted.json delete mode 100644 sdk_test/jsons/packages/latching-pkg.json delete mode 100644 sdk_test/jsons/packages/listener-docker.json delete mode 100644 sdk_test/jsons/packages/listener.json delete mode 100644 sdk_test/jsons/packages/nginx-multi-component.json delete mode 100644 sdk_test/jsons/packages/nginx-single-component.json delete mode 100644 sdk_test/jsons/packages/no-scoped-targeted.json delete mode 100644 sdk_test/jsons/packages/pv-reader.json delete mode 100644 sdk_test/jsons/packages/rosbag-talker-cloud.json delete mode 100644 sdk_test/jsons/packages/scoped-cloud.json delete mode 100644 sdk_test/jsons/packages/scoped-targeted.json delete mode 100644 sdk_test/jsons/packages/talker-cloud-device.json delete mode 100644 sdk_test/jsons/packages/talker-cloud.json delete mode 100644 sdk_test/jsons/packages/talker-docker.json delete mode 100644 sdk_test/jsons/packages/talker-noetic.json delete mode 100644 sdk_test/jsons/packages/talker.json delete mode 100644 sdk_test/jsons/packages/throttling-pkg.json delete mode 100644 sdk_test/package/__init__.py delete mode 100644 sdk_test/package/cloud_non_ros_test.py delete mode 100644 sdk_test/package/cloud_scoped_targeted_test.py delete mode 100644 sdk_test/package/cloud_transform_test.py delete mode 100644 sdk_test/package/configuration_tests.py delete mode 100644 sdk_test/package/create_package_test.py delete mode 100644 sdk_test/package/delete_package_test.py delete mode 100644 sdk_test/package/deployment_test.py delete mode 100644 sdk_test/package/get_all_package_test.py delete mode 100644 sdk_test/package/inbound_incoming_scoped_targeted_test.py delete mode 100644 sdk_test/package/native_network_tests.py delete mode 100644 sdk_test/package/noetic_test.py delete mode 100644 sdk_test/package/package_test.py delete mode 100644 sdk_test/package/rosbag_test.py delete mode 100644 sdk_test/package/routed_networks_tests.py delete mode 100644 sdk_test/package/static_route_test.py delete mode 100644 sdk_test/package/transformer_with_docker_device_test.py delete mode 100644 sdk_test/package/volume_test.py diff --git a/README.md b/README.md index 0fc403fe..fc61f44f 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,14 @@ command. python setup.py install ``` +## Development + +Create a python virtual environment, having version less than 3.11 + +```bash +pipenv install --dev +``` + ## Getting Started Before using the SDK, you need the Rapyuta Token. You can get it from @@ -40,17 +48,14 @@ from rapyuta_io import Project project = client.create_project(Project("python-sdk")) client.set_project(project.guid) - -# Create a Build -from rapyuta_io import Build, StrategyType, DeviceArch - -client.create_build( - Build( - "dummy", - StrategyType.DOCKER, - "https://github.com/ankitrgadiya/dummy-package", - DeviceArch.AMD64, - ) -) ``` +## SDK Test + +`RIO_CONFIG` environment variable pointing to the config.json must be sourced to +run the sdk integration test. The sample config is present in `sdk_test` directory. +Run `run_rio_sdk_test.py` to start the sdk tests. + +Currently only one docker compose device is needed to be created and added to the config, +SDK Test will add the device to the newly created project and onboard it and run tests. + diff --git a/rapyuta_io/__init__.py b/rapyuta_io/__init__.py index 96598f2b..3da7e3c2 100644 --- a/rapyuta_io/__init__.py +++ b/rapyuta_io/__init__.py @@ -1,11 +1,8 @@ from __future__ import absolute_import -from .clients.deployment import DeploymentPhaseConstants, DeploymentStatusConstants -from .clients.device import TopicKind, DeviceStatus, TopicQOS, QoS +from .clients.device import TopicKind, DeviceStatus, TopicQOS, QoS, DeploymentPhaseConstants, ROSDistro from .clients.model import Label, Command, DeviceConfig, TopicsStatus -from .clients.persistent_volumes import DiskType from rapyuta_io.utils import error from .rio_client import Client -from .clients.package import ROSDistro from .clients.device_manager import DeviceArch from .clients.project import Project from .clients.secret import Secret, SecretConfigDocker diff --git a/rapyuta_io/clients/__init__.py b/rapyuta_io/clients/__init__.py index e9d8c84a..1e711ded 100644 --- a/rapyuta_io/clients/__init__.py +++ b/rapyuta_io/clients/__init__.py @@ -1,7 +1,5 @@ from .device_manager import DeviceManagerClient, DeviceArch from .model import * -from .provision_client import ProvisionClient from .paramserver import _ParamserverClient -from .package import ROSDistro -from .device import Device +from .device import Device, ROSDistro from .user_group import UserGroup diff --git a/rapyuta_io/clients/catalog_client.py b/rapyuta_io/clients/catalog_client.py index 1a9300db..86daf17f 100644 --- a/rapyuta_io/clients/catalog_client.py +++ b/rapyuta_io/clients/catalog_client.py @@ -1,18 +1,14 @@ # encoding: utf-8 from __future__ import absolute_import + import os import re -import six from rapyuta_io.clients.api_client import CatalogConfig -from rapyuta_io.clients.package import Package -from rapyuta_io.clients.persistent_volumes import PersistentVolumes -from rapyuta_io.utils import RestClient, PackageNotFound, to_objdict, APIError +from rapyuta_io.utils import RestClient, PackageNotFound from rapyuta_io.utils.rest_client import HttpMethod -from rapyuta_io.utils.settings import CATALOG_API_PATH, VOLUME_PACKAGE_ID +from rapyuta_io.utils.settings import CATALOG_API_PATH from rapyuta_io.utils.utils import response_validator -from six.moves import map -from rapyuta_io.utils.partials import PartialMixin class CatalogClient(CatalogConfig): @@ -34,117 +30,6 @@ def _get_service(self, package_id, retry_limit=0): url = self._get_api_path() + "?package_uid=%s" % package_id return self._execute(url, HttpMethod.GET, retry_limit) - @response_validator(True) - def get_all_packages(self, retry_limit): - url = self._catalog_api_host + '/v2/catalog' - return self._execute(url, HttpMethod.GET, retry_limit) - - def get_package(self, package_id, retry_limit): - package = self._get_service(package_id, retry_limit) - try: - package = package['packageInfo'] - except Exception: - raise APIError("packageInfo not present in the package") - if package_id == VOLUME_PACKAGE_ID: - pkg = PersistentVolumes(to_objdict(package)) - pkg.planId = 'default' - else: - # PARTIAL_ATTR set before initializing Package: Package._post_init() depends on is_partial - package[PartialMixin.PARTIAL_ATTR] = False - pkg = Package(to_objdict(package)) - setattr(pkg, 'packageId', package_id) - return pkg - - def delete_package(self, package_id): - path = '/serviceclass/delete?package_uid={}'.format(package_id) - url = self._catalog_api_host + path - return self._execute(url, HttpMethod.DELETE) - - @response_validator(True) - def get_deployment(self, deployment_id, retry_limit): - path = '/serviceinstance/{}'.format(deployment_id) - url = self._catalog_api_host + path - return self._execute(url, HttpMethod.GET, retry_limit) - - @response_validator(True) - def update_deployment(self, payload, retry_limit): - path = '/v2/service_instances/{}'.format(payload['deployment_id']) - url = self._catalog_api_host + path - return self._execute(url, HttpMethod.PATCH, retry_limit, payload=payload) - - @response_validator(True) - def deployment_list(self, phases, device_id, retry_limit): - url = self._catalog_api_host + '/deployment/list' - query_params = {} - if phases: - query_params = {'phase': list(map(str, phases))} - if device_id: - query_params['device_uid'] = device_id - - return self._execute(url, HttpMethod.GET, retry_limit, query_params=query_params) - - @response_validator(True) - def create_package(self, package_payload, retry_limit): - url = self._catalog_api_host + '/serviceclass/add' - return self._execute(url, HttpMethod.POST, retry_limit, package_payload) - - @response_validator(True) - def create_routed_network(self, **network_payload): - url = self._catalog_api_host + '/routednetwork' - routed_network = self._execute(url, HttpMethod.POST, retry_count=0, payload=network_payload) - return routed_network - - @response_validator(True) - def get_routed_network(self, network_guid): - url = self._catalog_api_host + '/routednetwork/{}'.format(network_guid) - return self._execute(url, HttpMethod.GET) - - @response_validator(True) - def delete_routed_network(self, network_guid): - url = self._catalog_api_host + '/routednetwork/{}'.format(network_guid) - return self._execute(url, HttpMethod.DELETE) - - @response_validator(True) - def list_routed_network(self): - url = self._catalog_api_host + '/routednetwork' - return self._execute(url, HttpMethod.GET) - - @response_validator(True) - def create_build(self, build): - url = self._catalog_api_host + '/build' - return self._execute(url, HttpMethod.POST, payload=build._serialize()) - - @response_validator(True) - def get_build(self, guid, include_build_requests): - url = self._catalog_api_host + '/build/{}'.format(guid) - query_params = None - if include_build_requests: - query_params = {'include_build_requests': include_build_requests} - return self._execute(url, HttpMethod.GET, query_params=query_params) - - @response_validator(True) - def list_builds(self, statuses): - url = self._catalog_api_host + '/build' - query_params = None - if statuses: - query_params = {'status': statuses} - - return self._execute(url, HttpMethod.GET, query_params=query_params) - - def delete_build(self, guid): - url = self._catalog_api_host + '/build/{}'.format(guid) - return self._execute(url, HttpMethod.DELETE) - - @response_validator(True) - def trigger_build(self, buildOperation): - url = self._catalog_api_host + '/build/operation/trigger' - return self._execute(url, HttpMethod.PUT, payload=buildOperation) - - @response_validator(True) - def rollback_build(self, buildOperation): - url = self._catalog_api_host + '/build/operation/rollback' - return self._execute(url, HttpMethod.PUT, payload=buildOperation) - @response_validator(True) def get_rosbag_job(self, guid): url = self._catalog_api_host + '/rosbag-jobs/job/{}'.format(guid) @@ -222,23 +107,3 @@ def download_blob(signed_url, filename, download_dir): def delete_rosbag_blob(self, guid): url = self._catalog_api_host + '/rosbag-blobs/{}'.format(guid) return self._execute(url, HttpMethod.DELETE) - - @response_validator(True) - def create_native_network(self, network_payload): - url = self._catalog_api_host + '/nativenetwork' - return self._execute(url, HttpMethod.POST, retry_count=0, payload=network_payload.serialize()) - - @response_validator(True) - def get_native_network(self, network_guid): - url = self._catalog_api_host + '/nativenetwork/{}'.format(network_guid) - return self._execute(url, HttpMethod.GET) - - @response_validator(True) - def delete_native_network(self, network_guid): - url = self._catalog_api_host + '/nativenetwork/{}'.format(network_guid) - return self._execute(url, HttpMethod.DELETE) - - @response_validator(True) - def list_native_network(self): - url = self._catalog_api_host + '/nativenetwork' - return self._execute(url, HttpMethod.GET) diff --git a/rapyuta_io/clients/common_models.py b/rapyuta_io/clients/common_models.py deleted file mode 100644 index d71d7930..00000000 --- a/rapyuta_io/clients/common_models.py +++ /dev/null @@ -1,81 +0,0 @@ -import rapyuta_io -from rapyuta_io.utils.object_converter import ObjBase, enum_field -import six -from rapyuta_io.utils.error import InvalidParameterException -from rapyuta_io.utils import ObjDict - - -class InternalDeploymentStatus(ObjBase): - """ - InternalDeploymentStatus represents Internal Deployment Status. - - :ivar phase: phase of the internal deployment - :vartype phase: :py:class:`rapyuta_io.clients.deployment.DeploymentPhaseConstants` - :ivar status: (full-only) status of the internal deployment - :vartype status: :py:class:`rapyuta_io.clients.deployment.DeploymentStatusConstants` - :ivar error_code: error code of the internal deployment - :vartype error_code: list(str) - - :param phase: phase of the internal deployment - :type phase: :py:class:`rapyuta_io.clients.deployment.DeploymentPhaseConstants` - :param status: status of the internal deployment - :type status: :py:class:`rapyuta_io.clients.deployment.DeploymentStatusConstants` - :param error_code: error code of the internal deployment - :type error_code: list(str) - """ - - def __init__(self, phase, status=None, error_code=None): - self.phase = phase - self.status = status - self.error_code = error_code - - def get_deserialize_map(self): - return { - 'phase': enum_field('phase', rapyuta_io.DeploymentPhaseConstants), - 'status': enum_field('status', rapyuta_io.DeploymentStatusConstants), - 'error_code': 'error_code' - } - - def get_serialize_map(self): - return {} - - -class Limits(ObjDict, ObjBase): - """ - Limits represent the cpu and memory specs of a cloud network - - :ivar cpu: cpu - :vartype cpu: Union [float, integer] - :ivar memory: memory - :vartype memory: integer - - :param cpu: cpu - :type cpu: Union [float, integer] - :param memory: memory - :type memory: integer - """ - - def __init__(self, cpu, memory): - self.validate(cpu, memory) - super(ObjDict, self).__init__(cpu=cpu, memory=memory) - - @staticmethod - def validate(cpu, memory): - if not isinstance(cpu, float) and not isinstance(cpu, six.integer_types): - raise InvalidParameterException('cpu must be a float or integer') - if cpu <= 0: - raise InvalidParameterException('cpu must be a positive number') - if not isinstance(memory, six.integer_types) or memory <= 0: - raise InvalidParameterException('memory must be a positive integer') - - def get_deserialize_map(self): - return { - 'cpu': 'cpu', - 'memory': 'memory', - } - - def get_serialize_map(self): - return { - 'cpu': 'cpu', - 'memory': 'memory', - } diff --git a/rapyuta_io/clients/core_api_client.py b/rapyuta_io/clients/core_api_client.py index 302216f3..6dba92db 100644 --- a/rapyuta_io/clients/core_api_client.py +++ b/rapyuta_io/clients/core_api_client.py @@ -1,20 +1,13 @@ from rapyuta_io.clients.project import Project, User -from rapyuta_io.clients.secret import Secret from rapyuta_io.clients.user_group import UserGroup from rapyuta_io.utils.utils import prepend_bearer_to_auth_token, create_auth_header, get_api_response_data from rapyuta_io.utils.rest_client import HttpMethod -from rapyuta_io.utils import RestClient, to_objdict -from rapyuta_io.clients.static_route import StaticRoute -from rapyuta_io.utils.error import ResourceNotFoundError +from rapyuta_io.utils import RestClient from rapyuta_io.utils.settings import METRICS_API_QUERY_PATH, LIST_METRICS_API_QUERY_PATH, \ LIST_TAGS_KEY_API_QUERY_PATH, LIST_TAGS_VALUE_API_QUERY_PATH, GET_USER_PATH class CoreAPIClient: - STATIC_ROUTE_PATH = '/api/staticroute' - PROJECT_PATH = '/api/project' - SECRET_PATH = '/api/secret' - def __init__(self, auth_token, project, core_api_host): self._core_api_host = core_api_host self._auth_token = prepend_bearer_to_auth_token(auth_token) @@ -33,166 +26,6 @@ def _add_auth_token_to_routes(self, routes): for route in routes: self._add_header_fields(route) - def _get_all_static_routes(self): - url = self._core_api_host + self.STATIC_ROUTE_PATH + '/list' - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).headers(headers).execute() - return get_api_response_data(response, parse_full=True) - - def _get_static_route_by_url_prefix(self, url_prefix): - url = self._core_api_host + self.STATIC_ROUTE_PATH + '/filter' - query_params = {'urlPrefix': url_prefix} - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).headers(headers).query_param(query_param=query_params).execute() - return get_api_response_data(response, parse_full=True) - - def _get_static_route(self, route_guid): - url = "{}{}/{}/get".format(self._core_api_host, self.STATIC_ROUTE_PATH, route_guid) - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).headers(headers).execute() - return get_api_response_data(response, parse_full=True) - - def _create_static_route(self, url_prefix): - url = self._core_api_host + self.STATIC_ROUTE_PATH + '/create' - headers = create_auth_header(self._auth_token, self._project) - payload = {"urlPrefix": url_prefix} - response = RestClient(url).method(HttpMethod.POST).headers(headers).execute(payload) - return get_api_response_data(response, parse_full=True) - - def get_all_static_routes(self): - static_routes = [] - data = self._get_all_static_routes() - for route in data: - static_route = StaticRoute(to_objdict(route)) - static_routes.append(static_route) - self._add_auth_token_to_routes(static_routes) - return static_routes - - def get_static_route(self, route_guid): - data = self._get_static_route(route_guid) - route = StaticRoute(to_objdict(data)) - self._add_header_fields(route) - return route - - def create_static_route(self, name): - data = self._create_static_route(name) - route = StaticRoute(to_objdict(data)) - self._add_header_fields(route) - return route - - def delete_static_route(self, guid): - url = self._core_api_host + self.STATIC_ROUTE_PATH + '/delete' - headers = create_auth_header(self._auth_token, self._project) - payload = {"guid": guid} - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute(payload) - get_api_response_data(response, parse_full=True) - - def get_static_route_by_name(self, name): - try: - routes = self._get_static_route_by_url_prefix(name) - except ResourceNotFoundError as e: - return None - route = StaticRoute(to_objdict(routes[0])) - self._add_header_fields(route) - return route - - def create_project(self, project): - url = self._core_api_host + self.PROJECT_PATH + '/create' - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.POST).headers(headers).execute(project.serialize()) - data = get_api_response_data(response, parse_full=True) - project = Project.deserialize(data) - self._add_header_fields(project) - return project - - def get_project(self, guid): - url = '{}{}/{}/get'.format(self._core_api_host, self.PROJECT_PATH, guid) - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.GET).headers(headers).execute() - data = get_api_response_data(response, parse_full=True) - project = Project.deserialize(data) - self._add_header_fields(project) - return project - - def list_projects(self): - url = self._core_api_host + self.PROJECT_PATH + '/list' - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.GET).headers(headers).execute() - data = get_api_response_data(response, parse_full=True) - projects = [] - for project_data in data: - project = Project.deserialize(project_data) - self._add_header_fields(project) - projects.append(project) - return projects - - def delete_project(self, guid): - url = self._core_api_host + self.PROJECT_PATH + '/delete' - headers = create_auth_header(self._auth_token, self._project) - payload = {'guid': guid} - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute(payload) - get_api_response_data(response, parse_full=True) - - def add_user_to_project(self, project_guid, user_guid): - url = '{}{}/{}/adduser'.format(self._core_api_host, self.PROJECT_PATH, project_guid) - headers = create_auth_header(self._auth_token, self._project) - payload = {'userGUID': user_guid} - response = RestClient(url).method(HttpMethod.PUT).headers(headers).execute(payload) - get_api_response_data(response, parse_full=True) - - def remove_user_from_project(self, project_guid, user_guid): - url = '{}{}/{}/removeuser'.format(self._core_api_host, self.PROJECT_PATH, project_guid) - headers = create_auth_header(self._auth_token, self._project) - payload = {'userGUID': user_guid} - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute(payload) - get_api_response_data(response, parse_full=True) - - def create_secret(self, secret): - url = self._core_api_host + self.SECRET_PATH + '/create' - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.POST).headers(headers).execute(secret.serialize()) - data = get_api_response_data(response, parse_full=True) - secret = Secret.deserialize(data) - self._add_header_fields(secret) - return secret - - def get_secret(self, guid): - url = '{}{}/{}/get'.format(self._core_api_host, self.SECRET_PATH, guid) - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.GET).headers(headers).execute() - data = get_api_response_data(response, parse_full=True) - secret = Secret.deserialize(data) - self._add_header_fields(secret) - return secret - - def list_secrets(self): - url = self._core_api_host + self.SECRET_PATH + '/list' - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.GET).headers(headers).execute() - data = get_api_response_data(response, parse_full=True) - secrets = [] - for secret_data in data: - secret = Secret.deserialize(secret_data) - self._add_header_fields(secret) - secrets.append(secret) - return secrets - - def update_secret(self, guid, secret): - url = self._core_api_host + self.SECRET_PATH + '/' + guid + '/update' - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.PUT).headers(headers).execute(secret.serialize()) - data = get_api_response_data(response, parse_full=True) - secret = Secret.deserialize(data) - self._add_header_fields(secret) - return secret - - def delete_secret(self, guid): - url = self._core_api_host + self.SECRET_PATH + '/delete' - headers = create_auth_header(self._auth_token, self._project) - payload = {'guid': guid} - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute(payload) - get_api_response_data(response, parse_full=True) - def query_metrics(self, metrics_query): url = self._core_api_host + METRICS_API_QUERY_PATH headers = create_auth_header(self._auth_token, self._project) diff --git a/rapyuta_io/clients/deployment.py b/rapyuta_io/clients/deployment.py deleted file mode 100644 index 8d7c2fcc..00000000 --- a/rapyuta_io/clients/deployment.py +++ /dev/null @@ -1,296 +0,0 @@ -# encoding: utf-8 -from __future__ import absolute_import -import enum -import time - -from rapyuta_io.clients.provision_client import ProvisionClient -from rapyuta_io.utils import ObjDict, to_objdict, DeploymentNotRunningException, RetriesExhausted -from rapyuta_io.utils.settings import BIND_ID, DEFAULT_SLEEP_INTERVAL, \ - DEPLOYMENT_STATUS_RETRY_COUNT -from rapyuta_io.utils.partials import PartialMixin -import six -from six.moves import range - - -def _poll_till_ready(instance, retry_count, sleep_interval, ready_phases=None): - # TODO: convert into DeploymentPollerMixin (pollers.py) - """ - - :param instance: instance can be a deployment, volume, or a routed network instance with get_status method - :param retry_count: Parameter to specify the retries. - :param sleep_interval: Parameter to specify the interval between retries. - - """ - if ready_phases is None: - ready_phases = [] - - dep_status = None - for _ in range(retry_count): - dep_status = instance.get_status() - - if dep_status.phase in ready_phases: - return dep_status - - if dep_status.phase == DeploymentPhaseConstants.SUCCEEDED.value: - if dep_status.status in [DeploymentStatusConstants.RUNNING.value, - DeploymentStatusConstants.AVAILABLE.value, - DeploymentStatusConstants.RELEASED.value]: - return dep_status - time.sleep(sleep_interval) - continue - if dep_status.phase == DeploymentPhaseConstants.INPROGRESS.value: - time.sleep(sleep_interval) - continue - if dep_status.phase == DeploymentPhaseConstants.PROVISIONING.value: - errors = dep_status.errors or [] - if 'DEP_E153' not in errors: # DEP_E153 (image-pull error) will persist across retries - time.sleep(sleep_interval) - continue - - msg = 'Deployment might not progress: phase={} status={} errors={}'.format( - dep_status.phase, dep_status.status, dep_status.errors) - raise DeploymentNotRunningException(msg, deployment_status=dep_status) - - msg = 'Retries exhausted: Tried {} times with {}s interval.'.format(retry_count, - sleep_interval) - if dep_status: - msg += ' Deployment: phase={} status={} errors={}'.format(dep_status.phase, dep_status.status, - dep_status.errors) - raise RetriesExhausted(msg) - - -class DeploymentPhaseConstants(str, enum.Enum): - """ - Enumeration variables for the deployment phase - - Deployment phase can be any of the following types \n - DeploymentPhaseConstants.INPROGRESS \n - DeploymentPhaseConstants.PROVISIONING \n - DeploymentPhaseConstants.SUCCEEDED \n - DeploymentPhaseConstants.FAILED_TO_START \n - DeploymentPhaseConstants.PARTIALLY_DEPROVISIONED \n - DeploymentPhaseConstants.DEPLOYMENT_STOPPED \n - """ - - def __str__(self): - return str(self.value) - - INPROGRESS = 'In progress' - PROVISIONING = 'Provisioning' - SUCCEEDED = 'Succeeded' - FAILED_TO_START = 'Failed to start' - PARTIALLY_DEPROVISIONED = 'Partially deprovisioned' - DEPLOYMENT_STOPPED = 'Deployment stopped' - - -class DeploymentStatusConstants(str, enum.Enum): - """ - Enumeration variables for the deployment status - - Deployment status can be any of the following types \n - DeploymentStatusConstants.RUNNING \n - DeploymentStatusConstants.PENDING \n - DeploymentStatusConstants.ERROR \n - DeploymentStatusConstants.UNKNOWN \n - DeploymentStatusConstants.STOPPED \n - """ - - def __str__(self): - return str(self.value) - - RUNNING = 'Running' - PENDING = 'Pending' - ERROR = 'Error' - UNKNOWN = 'Unknown' - STOPPED = 'Stopped' - - # Disk statuses, not meant to be documented - BOUND = 'Bound' - RELEASED = 'Released' - AVAILABLE= 'Available' - FAILED = 'Failed' - - -class DeploymentStatus(ObjDict): - """ - DeploymentStatus class - - :ivar deploymentId: Deployment Id. - :ivar name: Deployment name. - :ivar packageId: Package Id. - :ivar status: Deployment status - :ivar phase: Deployment phase - :ivar errors: Deployment errors - :ivar componentInfo: List containing the deployment components and their status. - :ivar dependentDeploymentStatus: Dependent deployment status. - :ivar packageDependencyStatus: Package dependency status. - """ - - def __init__(self, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - - -class Deployment(PartialMixin, ObjDict): - """ - Deployment class represents a running deployment. Member variables of the class represent the - properties of the deployment. \n - Variables marked as (full-only) are only available on a full object. Use `refresh()` to convert a - partial object into a full one. - - :ivar deploymentId: Deployment Id. - :ivar name: Deployment name. - :ivar packageId: Package Id. - :ivar packageName: Package Name. - :ivar packageAPIVersion: Package API Version. - :ivar planId: Plan Id. - :ivar bindable: Deployment is bindable or not. - :ivar labels: (full-only) Labels associated with the deployment. - :ivar parameters: (full-only) Deployment parameters. - :ivar componentInfo: (full-only) List of component details. - :ivar componentInstanceIds: (full-only) List of component instance ids. - :ivar dependentDeployments: (full-only) List of dependent deployments. - :ivar dependentDeploymentStatus: (full-only) Dependent deployments status details. - :ivar packageDependencyStatus: (full-only) Package dependency status details. - :ivar coreNetworks: (full-only) Routed and Native network details. - :ivar phase: Phase of the deployment. - :vartype phase: :py:class:`~rapyuta_io.clients.deployment.DeploymentPhaseConstants` - :ivar status: (full-only) Status of the deployment. - :vartype status: :py:class:`~rapyuta_io.clients.deployment.DeploymentStatusConstants` - :ivar provisionContext: (full-only) Context set during provisioning. - :ivar currentGeneration: (full-only) Build generation number. - :ivar errors: (full-only) List of errors. - :ivar inUse: Deployment is in use or not. - :ivar ownerProject: Owner project guid. - :ivar creator: Creator user guid. - :ivar CreatedAt: Date of creation. - :ivar UpdatedAt: Date of updation. - :ivar DeletedAt: Date of deletion. - """ - - def __init__(self, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - - def _get_status(self, retry_limit=0): - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - return provision_client.deployment_status(self.deploymentId, retry_limit) - - def refresh(self): - full_deployment = self._get_status() - for key, value in six.iteritems(full_deployment): - setattr(self, key, to_objdict(value)) - self.is_partial = False - - def get_status(self, retry_limit=0): - """ - Get the deployment status - - :param retry_limit: Optional parameter to specify the number of retry attempts to be - carried out if any failures occurs during the API call. - :type retry_limit: int - :returns: instance of class :py:class:`DeploymentStatus`: - :raises: :py:class:`APIError`: If the get deployment status api returns an error, the status - code is anything other than 200/201 - - Following example demonstrates how to get a deployment status - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> deployment = client.get_deployment('test_deployment_id') - >>> deployment.get_status() - - """ - return DeploymentStatus(to_objdict(self._get_status(retry_limit))) - - def deprovision(self, retry_limit=0): - """ - Deprovision the deployment instance represented by the corresponding :py:class:`~Deployment`: class. - - :param retry_limit: - :return: True if de-provision is successful, False otherwise - :raises: :py:class:`~rapyuta_io.utils.error.ParameterMissingException`: If the planId or - deploymentId is missing in the request. - :raises: :py:class:`~rapyuta_io.utils.error.APIError`: If the deprovision-api returns an error, the status code - is anything other than 200/201 - - Following example demonstrates how to deprovision a deployment - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> deployment = client.get_deployment('test_deployment_id') - >>> deployment.deprovision() - - """ - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - return provision_client.deprovision(self.deploymentId, self.planId, self.packageId, - retry_limit) - - def get_service_binding(self, binding_id=None, retry_limit=0): - """ - Get the service bindings of the deployment. Service Bindings contain the credentials that - can be used to communicate with the deployment. - - :param binding_id: Optional parameter Binding Id - :type binding_id: string - :param retry_limit: Optional parameter to specify the number of retry attempts to be - carried out if any failures occurs during the API call. - :type retry_limit: int - :return: Service binding dictionary containing credentials. - :raises: :py:class:`ServiceBindingError`: If the request failed to get the service binding. - :raises: :py:class:`APIError`: If service binding api return an error, the status code is - anything other than 200/201 - - Following example demonstrates how to get the service binding - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> deployment = client.get_deployment('test_deployment_id') - >>> deployment.get_service_binding() - - """ - - if binding_id is None: - binding_id = BIND_ID - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - credentials = provision_client.service_binding(self.deploymentId, self.planId, - self.packageId, binding_id, retry_limit) - return credentials - - def poll_deployment_till_ready(self, retry_count=DEPLOYMENT_STATUS_RETRY_COUNT, - sleep_interval=DEFAULT_SLEEP_INTERVAL, ready_phases=None): - """ - - Wait for the deployment to be ready - - :param retry_count: Optional parameter to specify the retries. Default value is 15 - :param sleep_interval: Optional parameter to specify the interval between retries. - Default value is 6 Sec. - :return: instance of class :py:class:`DeploymentStatus`: - :raises: :py:class:`APIError`: If service binding api return an error, the status code is - anything other than 200/201 - :raises: :py:class:`DeploymentNotRunningException`: If the deployment’s state might not - progress due to errors. - :raises: :py:class:`RetriesExhausted`: If number of polling retries exhausted before the - deployment could succeed or fail. - - Following example demonstrates use of poll_deployment_till_ready, and in case of deployment - failure uses error codes to check whether it was due to device being offline. - Read more on error codes: https://userdocs.rapyuta.io/6_troubleshoot/611_deployment-error-codes/ - - >>> from rapyuta_io import Client - >>> from rapyuta_io.utils.error import (DeploymentNotRunningException, - ... RetriesExhausted) - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> deployment = client.get_deployment('test_deployment_id') - >>> try: - ... dep_status = deployment.poll_deployment_till_ready() - ... print dep_status - ... except RetriesExhausted as e: - ... print e, 'Retry again?' - ... except DeploymentNotRunningException as e: - ... print e - ... if 'DEP_E151' in e.deployment_status.errors: - ... print 'Device is either offline or not reachable' - - - """ - return _poll_till_ready(self, retry_count, sleep_interval, ready_phases) diff --git a/rapyuta_io/clients/device.py b/rapyuta_io/clients/device.py index 348e78d0..46d74f48 100644 --- a/rapyuta_io/clients/device.py +++ b/rapyuta_io/clients/device.py @@ -10,7 +10,6 @@ import six import rapyuta_io -from rapyuta_io.clients.deployment import DeploymentPhaseConstants, Deployment from rapyuta_io.clients.model import TopicsStatus, DeviceConfig, Label, Metric, LogUploadStatus, \ LogUploads, SharedURL from rapyuta_io.utils import ObjDict, RestClient, ParameterMissingException, \ @@ -292,11 +291,11 @@ def validate(name, runtime, runtime_docker, runtime_preinstalled, ros_distro, ro raise InvalidParameterException( 'python_version must be one of rapyuta.io.client.device.DevicePythonVersion') if ros_distro is not None and ( - ros_distro not in list(rapyuta_io.clients.package.ROSDistro.__members__.values())): + ros_distro not in list(ROSDistro.__members__.values())): raise InvalidParameterException('ros_distro must be one of rapyuta_io.clients.package.ROSDistro') - if runtime_preinstalled and ros_distro == rapyuta_io.clients.package.ROSDistro.NOETIC: + if runtime_preinstalled and ros_distro == ROSDistro.NOETIC: raise InvalidParameterException('preinstalled runtime does not support noetic ros_distro yet') - if ros_distro == rapyuta_io.clients.package.ROSDistro.NOETIC and python_version == DevicePythonVersion.PYTHON2: + if ros_distro == ROSDistro.NOETIC and python_version == DevicePythonVersion.PYTHON2: raise InvalidParameterException('noetic ros_distro not supported on python_version 2') if rosbag_mount_path is not None and not isinstance(rosbag_mount_path, six.string_types): raise InvalidParameterException('rosbag_mount_path must be of type string') @@ -344,8 +343,6 @@ def _deserialize(cls, data): obj.config_variables = [DeviceConfig(to_objdict(config)) for config in obj.config_variables] if obj.labels: obj.labels = [Label(to_objdict(label)) for label in obj.labels] - if hasattr(obj, 'deployments') and obj.deployments: - obj.deployments = [Deployment(to_objdict(deployment)) for deployment in obj.deployments] return obj def is_online(self): @@ -1396,3 +1393,43 @@ def upgrade(self): if response.status_code == requests.codes.BAD_REQUEST: raise DeploymentRunningException() get_api_response_data(response) + + +class ROSDistro(str, enum.Enum): + """ + Enumeration variables for the Supported ROS Distros. ROS Distro may be one of: \n + ROSDistro.KINETIC ('kinetic') \n + ROSDistro.MELODIC ('melodic') \n + ROSDistro.NOETIC ('noetic') \n + """ + + def __str__(self): + return str(self.value) + + KINETIC = 'kinetic' + MELODIC = 'melodic' + NOETIC = 'noetic' + + +class DeploymentPhaseConstants(str, enum.Enum): + """ + Enumeration variables for the deployment phase + + Deployment phase can be any of the following types \n + DeploymentPhaseConstants.INPROGRESS \n + DeploymentPhaseConstants.PROVISIONING \n + DeploymentPhaseConstants.SUCCEEDED \n + DeploymentPhaseConstants.FAILED_TO_START \n + DeploymentPhaseConstants.PARTIALLY_DEPROVISIONED \n + DeploymentPhaseConstants.DEPLOYMENT_STOPPED \n + """ + + def __str__(self): + return str(self.value) + + INPROGRESS = 'In progress' + PROVISIONING = 'Provisioning' + SUCCEEDED = 'Succeeded' + FAILED_TO_START = 'Failed to start' + PARTIALLY_DEPROVISIONED = 'Partially deprovisioned' + DEPLOYMENT_STOPPED = 'Deployment stopped' diff --git a/rapyuta_io/clients/native_network.py b/rapyuta_io/clients/native_network.py deleted file mode 100644 index e4eaae62..00000000 --- a/rapyuta_io/clients/native_network.py +++ /dev/null @@ -1,255 +0,0 @@ -# coding=utf-8 -from __future__ import absolute_import - -import rapyuta_io.clients.package # to prevent cyclic import -from rapyuta_io.clients.deployment import _poll_till_ready -from rapyuta_io.clients.common_models import InternalDeploymentStatus -from rapyuta_io.utils import RestClient -from rapyuta_io.utils.error import InvalidParameterException, OperationNotAllowedError, ParameterMissingException -from rapyuta_io.utils.rest_client import HttpMethod -from rapyuta_io.utils.utils import create_auth_header, get_api_response_data -from rapyuta_io.utils.object_converter import ObjBase, enum_field, nested_field -from rapyuta_io.utils.partials import PartialMixin -import six -from rapyuta_io.clients.common_models import Limits - -class NativeNetwork(PartialMixin, ObjBase): - """ - NativeNetwork represents native network. \n - Variables marked as (full-only) are only available on a full object. Use `refresh()` to convert a - partial object into a full one. - - :ivar name: name of the native network - :vartype name: str - :ivar runtime: runtime of the native network - :vartype runtime: :py:class:`~rapyuta_io.clients.package.Runtime` - :ivar ros_distro: ROS distribution - :vartype ros_distro: :py:class:`~rapyuta_io.clients.package.ROSDistro` - :ivar parameters: parameters of the native network - :vartype parameters: :py:class:`~rapyuta_io.clients.native_network.Parameters` - :ivar created_at: creation time of the native network - :vartype created_at: str - :ivar updated_at: updating time of the native network - :vartype updated_at: str - :ivar guid: native network guid - :vartype guid: str - :ivar owner_project: project id - :vartype owner_project: str - :ivar creator: user id - :vartype creator: str - :ivar internal_deployment_guid: guid of the internal deployment - :vartype internal_deployment_guid: str - :ivar internal_deployment_status: internal deployment status of the native network - :vartype internal_deployment_status: :py:class:`~rapyuta_io.clients.common_models.InternalDeploymentStatus` - - :param name: name of the native network - :type name: str - :param runtime: runtime of the native network - :type runtime: :py:class:`~rapyuta_io.clients.package.Runtime` - :param ros_distro: ROS distribution - :type ros_distro: :py:class:`~rapyuta_io.clients.package.ROSDistro` - :param parameters: parameters of the native network - :type parameters: :py:class:`~rapyuta_io.clients.native_network.Parameters` - """ - NATIVE_NETWORK_PATH = 'nativenetwork' - - def __init__(self, name, runtime, ros_distro, parameters=None): - self.validate(name, runtime, ros_distro, parameters) - self.name = name - self.runtime = runtime - self.ros_distro = ros_distro - self.parameters = parameters - self.created_at = None - self.updated_at = None - self.guid = None - self.owner_project = None - self.creator = None - self.internal_deployment_guid = None - self.internal_deployment_status = None - - @staticmethod - def validate(name, runtime, ros_distro, parameters=None): - if not name or not isinstance(name, six.string_types): - raise InvalidParameterException('name must be a non-empty string') - if ros_distro not in list(rapyuta_io.clients.package.ROSDistro.__members__.values()): - raise InvalidParameterException('ros_distro must be one of rapyuta_io.clients.package.ROSDistro') - if runtime not in list(rapyuta_io.clients.package.Runtime.__members__.values()): - raise InvalidParameterException('runtime must be one of rapyuta_io.clients.package.Runtime') - if ros_distro == rapyuta_io.clients.package.ROSDistro.NOETIC and \ - runtime == rapyuta_io.clients.package.Runtime.DEVICE: - raise InvalidParameterException('device runtime does not support noetic ros_distro yet') - if parameters is not None and not isinstance(parameters, Parameters): - raise InvalidParameterException('parameters must be of type rapyuta_io.clients.native_network.Parameters') - if runtime == rapyuta_io.clients.package.Runtime.DEVICE.value: - if parameters is None: - raise InvalidParameterException('parameters must be present for device runtime') - if not parameters.device_id: - raise InvalidParameterException('device_id field must be present in rapyuta_io.clients.' - 'native_network.Parameters object for device runtime') - if not parameters.network_interface: - raise InvalidParameterException('network_interface must be present in rapyuta_io.clients.' - 'native_network.Parameters object for device runtime') - - def get_deserialize_map(self): - return { - 'name': 'name', - 'guid': 'guid', - 'owner_project': 'ownerProject', - 'creator': 'creator', - 'runtime': enum_field('runtime', rapyuta_io.clients.package.Runtime), - 'ros_distro': enum_field('rosDistro', rapyuta_io.clients.package.ROSDistro), - 'internal_deployment_guid': 'internalDeploymentGUID', - 'internal_deployment_status': nested_field('internalDeploymentStatus', InternalDeploymentStatus), - 'parameters': nested_field('parameters', Parameters), - 'created_at': 'CreatedAt', - 'updated_at': 'UpdatedAt' - } - - def get_serialize_map(self): - return { - 'name': 'name', - 'runtime': 'runtime', - 'rosDistro': 'ros_distro', - 'parameters': 'parameters' - } - - def poll_native_network_till_ready(self, retry_count=120, sleep_interval=5): - # TODO: implement and use DeploymentPollerMixin. see _poll_till_ready - """ - - Wait for the native network to be ready - - :param retry_count: Optional parameter to specify the retries. Default value is 120 - :param sleep_interval: Optional parameter to specify the interval between retries. - Default value is 5 Sec. - :return: instance of class :py:class:`~rapyuta_io.clients.common_models.InternalDeploymentStatus`: - :raises: :py:class:`APIError`: If service binding api return an error, the status code is - anything other than 200/201 - :raises: :py:class:`DeploymentNotRunningException`: If the deployment’s state might not - progress due to errors. - :raises: :py:class:`RetriesExhausted`: If number of polling retries exhausted before the - deployment could succeed or fail. - - Following example demonstrates use of poll_native_network_till_ready: - - >>> from rapyuta_io import Client - >>> from rapyuta_io.utils.error import (DeploymentNotRunningException, - ... RetriesExhausted) - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> native_network = client.get_native_network('network-guid') - >>> try: - ... network_status = native_network.poll_native_network_till_ready() - ... print network_status - ... except RetriesExhausted as e: - ... print e, 'Retry again?' - ... except DeploymentNotRunningException as e: - ... print e, e.deployment_status - - """ - _poll_till_ready(self, retry_count, sleep_interval) - return self - - def get_status(self): - if self.guid is None: - raise OperationNotAllowedError('resource has not been created') - native_network = NativeNetwork.deserialize(self._get_full_resource()) - internal_deployment_status = native_network.internal_deployment_status - internal_deployment_status.errors = native_network.get_error_code() - return internal_deployment_status - - def _get_full_resource(self): - url = '{}/{}/{}'.format(self._host, self.NATIVE_NETWORK_PATH, self.guid) - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.GET).headers(headers).execute() - return get_api_response_data(response, parse_full=True) - - def refresh(self): - NativeNetwork.deserialize(self._get_full_resource(), obj=self) - self.is_partial = False - - def delete(self): - - """ - Delete the native network using the native network object. - - Following example demonstrates how to delete a native network using native network object: - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project='project_guid') - >>> native_network = client.get_native_network(network_guid='network_guid') - >>> native_network.delete() - - """ - - url = '{}/{}/{}'.format(self._host, self.NATIVE_NETWORK_PATH, self.guid) - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute() - get_api_response_data(response, parse_full=True) - return True - - def get_error_code(self): - getattr(self.internal_deployment_status, "error_code", []) - - -class Parameters(ObjBase): - """ - Parameters represents Native Network Parameters - - :ivar limits: Values corresponding to limits of the parameters - :vartype limits: :py:class:`~rapyuta_io.clients.common_models.Limits` - :ivar device_id: device_id of device on which the native network is deployed. - :vartype device_id: str - :ivar network_interface: network interface to which native network is binded. - :vartype network_interface: str - :ivar restart_policy: restart policy of native network. - :vartype restart_policy: enum :py:class:`~rapyuta_io.clients.package.RestartPolicy` - - :param limits: Values corresponding to limits of the parameters - :type limits: :py:class:`~rapyuta_io.clients.common_models.Limits` - :param device: device on which the native network is deployed. - :type device: :py:class:`~rapyuta_io.clients.device.Device` - :param network_interface: network interface to which native network is binded. - :type network_interface: str - :param restart_policy: restart policy of native network. - :type restart_policy: enum :py:class:`~rapyuta_io.clients.package.RestartPolicy` - """ - - def __init__(self, limits=None, device=None, network_interface=None, restart_policy=None): - self.validate(device, network_interface, restart_policy) - self.limits = limits - self.device_id = device and device.uuid - self.network_interface = network_interface - self.restart_policy = restart_policy - - @staticmethod - def validate(device, network_interface, restart_policy): - if device: - if not isinstance(device, rapyuta_io.clients.device.Device): - raise InvalidParameterException('device must be of type rapyuta_io.clients.device.Device') - if not device.get('uuid'): - raise InvalidParameterException('uuid field must be present in rapyuta_io.clients.device.Device object') - if not device.get('ip_interfaces'): - raise InvalidParameterException( - 'ip_interfaces field must be present in rapyuta_io.clients.device.Device object') - ip_interfaces = device.ip_interfaces or {} - if network_interface not in list(ip_interfaces.keys()): - raise InvalidParameterException('NETWORK_INTERFACE should be in {}'.format(list(ip_interfaces.keys()))) - if restart_policy is not None and ( - restart_policy not in list(rapyuta_io.clients.package.RestartPolicy.__members__.values())): - raise InvalidParameterException('RestartPolicy must be one of rapyuta_io.clients.package.RestartPolicy') - - def get_deserialize_map(self): - return { - 'limits': nested_field('limits', Limits), - 'device_id': 'device_id', - 'network_interface': 'NETWORK_INTERFACE', - 'restart_policy': enum_field('restart_policy', rapyuta_io.clients.package.RestartPolicy), - } - - def get_serialize_map(self): - return { - 'limits': 'limits', - 'device_id': 'device_id', - 'NETWORK_INTERFACE': 'network_interface', - 'restart_policy': 'restart_policy' - } diff --git a/rapyuta_io/clients/package.py b/rapyuta_io/clients/package.py deleted file mode 100644 index 03496a97..00000000 --- a/rapyuta_io/clients/package.py +++ /dev/null @@ -1,967 +0,0 @@ -# encoding: utf-8 -from __future__ import absolute_import - -from collections import defaultdict - -import enum -import six - -import rapyuta_io -from rapyuta_io.clients import ProvisionClient -from rapyuta_io.clients.deployment import Deployment -from rapyuta_io.clients.deployment import DeploymentPhaseConstants -from rapyuta_io.clients.native_network import NativeNetwork -from rapyuta_io.clients.device import SPACE_GUID, VOLUME_PACKAGE_ID, INSTANCE_ID, Device -from rapyuta_io.clients.plan import Plan -from rapyuta_io.clients.rosbag import ROSBagJob, ROSBagOptions, UploadOptions, OverrideOptions -from rapyuta_io.clients.routed_network import RoutedNetwork -from rapyuta_io.clients.static_route import StaticRoute -from rapyuta_io.utils import ObjDict, to_objdict, OperationNotAllowedError, PlanNotFound, \ - InvalidParameterException, RestClient, APIError, ParameterMissingException, \ - AliasNotProvidedException, DuplicateAliasException -from rapyuta_io.utils.constants import DEVICE, DEVICE_ID, LABELS -from rapyuta_io.utils.rest_client import HttpMethod -from rapyuta_io.utils.settings import ORGANIZATION_GUID, CATALOG_API_PATH -from rapyuta_io.utils.utils import is_empty, \ - get_api_response_data, \ - create_auth_header -from rapyuta_io.utils.partials import PartialMixin - -CURRENT_PKG_VERSION = '2.0.0' - - -class Package(PartialMixin, ObjDict): - """ - Package class represents a service package. It contains method to provision - an instance of the package on cloud or on device. Additionally, it provides other utility - method. \n - Variables marked as (full-only) are only available on a full object. Use `refresh()` to convert a - partial object into a full one. - - :ivar packageId: Id of the package. - :ivar packageName: Package name. - :ivar packageVersion: Version of the package. - :ivar apiVersion: (full-only) Package API Version. - :ivar bindable: Package is bindable or not. - :ivar description: Description of the package. - :ivar category: (full-only) Package category. - :ivar plans: (full-only) List of plans associated with the package. - :vartype plans: list(:py:class:`~rapyuta_io.clients.plan.Plan`) - :ivar isPublic: (full-only) Boolean denoting whether the package is public or not. - :ivar status: (full-only) Status of the package. - :ivar tags: (full-only) Tags associated with the package. - :ivar buildGeneration: (full-only) Build generation. - :ivar ownerProject: (full-only) Owner project guid. - :ivar creator: (full-only) Creator user guid. - :ivar CreatedAt: (full-only) Date of creation. - :ivar UpdatedAt: (full-only) Date of updation. - :ivar DeletedAt: (full-only) Date of deletion. - - """ - - def __init__(self, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - - # Normalize object across responses from /serviceclass/status and /v2/catalog - if 'guid' in self: # /serviceclass/status - self.packageId = self.guid - else: # /v2/catalog - self.packageId = self.id - self.packageName = self.name - self.packageVersion = self.metadata['packageVersion'] - - self._post_init() - - def refresh(self): - self._refresh() - - def _refresh(self): - url = self._host + CATALOG_API_PATH + "?package_uid=%s" % self.packageId - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.GET).headers(headers).execute() - package = get_api_response_data(response, True) - try: - package = package['packageInfo'] - except Exception: - raise APIError("packageInfo not present in the package") - # PARTIAL_ATTR set before initializing Package: Package._post_init() depends on is_partial - package[PartialMixin.PARTIAL_ATTR] = False - pkg = Package(to_objdict(package)) - for attr in pkg.keys(): - self.__setattr__(attr, pkg.__getattr__(attr)) - - def delete(self): - - """ - Delete the package using the package object. - - Following example demonstrates how to delete a package using package object: - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project='project_guid') - >>> package = client.get_package(package_id='package_id') - >>> package.delete() - - """ - - url = self._host + '/serviceclass/delete?package_uid={}'.format(self.packageId) - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute() - get_api_response_data(response, parse_full=True) - - def _post_init(self): - if self.plans and not self.is_partial: - plans = list() - for plan in self.plans: - plans.append(Plan(to_objdict(plan))) - self.plans = plans - return - - def _update_auth_token(self, objects): - for obj in objects: - setattr(obj, '_host', self._host) - setattr(obj, '_auth_token', self._auth_token) - setattr(obj, '_project', self._project) - return - - def get_plan_by_id(self, plan_id): - for plan in self.plans: - if plan.planId == plan_id: - return plan - raise PlanNotFound(plan_id) - - def provision(self, deployment_name, provision_configuration, retry_limit=0): - """ - Provision the package (represented by the package class). Package can be deployed on device - or cloud. If the required runtime of the package is device, then specify the device in the - package config. \n - If the Package object is not complete, as indicated by `self.is_partial`, then `self.refresh()` is called to - update the object. - - :param deployment_name: Deployment Name - :type deployment_name: string - :param provision_configuration: Provision payload - :type provision_configuration: :py:class:`ProvisionConfiguration`: - :param retry_limit: Optional parameter to specify the number of retry attempts to be - carried out if any failures occurs during the API call. - :type retry_limit: int - :return: Instance of class :py:class:`Deployment`: - :raises: :py:class:`APIError`: If the API returns an error, a status code of - anything other than 200/201 is returned. - :raises: :py:class:`OperationNotAllowedError`: If the provision request is invalid - - """ - if not deployment_name or not isinstance(deployment_name, six.string_types): - raise InvalidParameterException("deployment_name must be a non-empty string") - if not isinstance(provision_configuration, ProvisionConfiguration): - raise InvalidParameterException('provision_configuration must be of type ProvisionConfiguration') - if provision_configuration._is_provisioned: - raise InvalidParameterException('cannot reuse this ProvisionConfiguration for provisioning') - if self.is_partial: - self.refresh() - provision_configuration.validate() - provision_configuration.context['name'] = deployment_name - delattr(provision_configuration, 'plan') - delattr(provision_configuration, '_dependency_seen_aliases') - provision_configuration._is_provisioned = True - - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - response = provision_client.provision(provision_configuration, retry_limit) - deployment_status = provision_client.deployment_status(response['operation'], retry_limit) - deployment = Deployment(to_objdict(deployment_status)) - deployment.is_partial = False - self._update_auth_token([deployment]) - return deployment - - def deployments(self, phases=None, retry_limit=0): - - """ - Get all the deployments of the package - - :param phases: optional parameter to filter out the deployments based on current deployment - :type phases: list(DeploymentPhaseConstants) - :param retry_limit: Optional parameter to specify the number of retry attempts to be - carried out if any failures occurs during the API call. - :type retry_limit: int - :return: list of instance of class :py:class:`Deployment`: - :raises: :py:class:`APIError`: If deployment info api return an error, the status code is - anything other than 200/201 - - Following example demonstrates how to the deployment list - - >>> from rapyuta_io import Client, DeploymentPhaseConstants - >>> client = Client(auth_token='auth_token', project='project') - >>> package = client.get_package('test_package_id') - >>> package.deployments() - >>> deployments_list_filtered_by_phase = package.deployments(phases= - >>> [DeploymentPhaseConstants.SUCCEEDED, DeploymentPhaseConstants.PROVISIONING]) - - """ - - deployment_list = ProvisionClient(self._host, self._auth_token, self._project) \ - .deployments(self.packageId, phases, retry_limit) - deployments = list() - if deployment_list: - for deployment in deployment_list: - deployments.append(Deployment(to_objdict(deployment))) - self._update_auth_token(deployments) - - return deployments - - def get_provision_configuration(self, plan_id=None): - """ - Get provision configuration payload for package provision request. \n - If the Package object is not complete, as indicated by `self.is_partial`, then `self.refresh()` is called to - update the object. - - :param plan_id: Plan Id - :type plan_id: string - :return: return instance of class :py:class:`ProvisionConfiguration`: - - """ - if self.is_partial: - self.refresh() - - if plan_id: - plan = self.get_plan_by_id(plan_id) - else: - try: - plan = self.plans[0] - except IndexError: - raise PlanNotFound() - - provision_request = ProvisionConfiguration(self.packageId, plan) - return provision_request - - -class ProvisionConfiguration(ObjDict): - """ - ProvisionConfiguration class that contains the component configuration for a package - deployment. - - """ - - def __init__(self, package_id, plan, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - self.service_id = package_id - self.plan = plan - self.plan_id = plan.planId - self.api_version = kwargs.get('api_version', '1.0.0') - self._init_parameters() - self._dependency_seen_aliases = set() - self._is_provisioned = False - self._devices = dict() - - def _init_parameters(self): - self.context = dict() - self.context['component_context'] = defaultdict(lambda: defaultdict(dict)) - self.instance_id = INSTANCE_ID - self.organization_guid = ORGANIZATION_GUID - self.space_guid = SPACE_GUID - self.accepts_incomplete = True - self.parameters = {'global': dict()} - self.context.setdefault('labels', list()) - self.context.setdefault('dependentDeployments', list()) - self.context.setdefault('diskMountInfo', list()) - self.context.setdefault('routedNetworks', list()) - self.context.setdefault('nativeNetworks', list()) - for component in self.plan.components.components: - component_id = self.plan.get_component_id(component.name) - com_dict = dict(component_id=component_id) - for params in component.parameters: - if not params.get('default'): - com_dict[params.name] = None - else: - com_dict[params.name] = params.default - self.parameters[component_id] = com_dict - self.set_component_alias(component.name) - job_defs = getattr(component, 'rosBagJobDefs', []) - for job_def in job_defs: - rosbag_opts = ROSBagOptions.deserialize(job_def.recordOptions) - upload_opts = UploadOptions.deserialize(job_def.uploadOptions) if hasattr(job_def, 'uploadOptions') \ - else None - override_opts = OverrideOptions.deserialize(job_def.overrideOptions) if \ - hasattr(job_def, 'overrideOptions') else None - job = ROSBagJob( - job_def.name, - rosbag_options=rosbag_opts, - upload_options=upload_opts, - override_options=override_opts - ) - self.add_rosbag_job(component.name, job) - - def _validate_device_id(self, component_id): - value = self.parameters[component_id].get(DEVICE_ID, None) - if is_empty(value): - msg = 'component is not mapped with the device' - raise OperationNotAllowedError(msg) - return True - - def _get_component_by_name(self, component_name): - for component in self.plan.components.components: - if component.name == component_name: - return component - raise AttributeError - - def validate_component_executables( - self, - component_name, - device_runtime, - device_docker_enabled, - device_preinstalled_enabled - ): - component = self.plan.get_component_by_name(component_name) - if not component: - raise OperationNotAllowedError('Component named %s is not found on the plan' % - component_name) - for executable in component.executables: - is_docker_executable = executable.get('buildGUID') or \ - executable.get('gitExecutable') or executable.get('docker') - - if is_docker_executable and (device_runtime != Device.DOCKER_COMPOSE and not device_docker_enabled): - raise OperationNotAllowedError('Device must be a {} device'.format(Device.DOCKER_COMPOSE)) - if not is_docker_executable and \ - (device_runtime != Device.PRE_INSTALLED and not device_preinstalled_enabled): - raise OperationNotAllowedError('Device must be a {} device'.format(Device.PRE_INSTALLED)) - - def add_restart_policy(self, component_name, restart_policy): - """ - Add RestartPolicy for the component - - :param component_name: Component name - :type component_name: string - :param restart_policy: one of RestartPolicy enums - :type restart_policy: enum :py:class:`~RestartPolicy` - :return: Updated instance of class :py:class:`ProvisionConfiguration` - :raises: :py:class:`InvalidParameterException`: If restart policy is not invalid - """ - if restart_policy not in list(RestartPolicy.__members__.values()): - raise InvalidParameterException('Restart policy must be one of rapyuta_io.clients.package.RestartPolicy') - component_id = self.plan.get_component_id(component_name) - self.context['component_context'][component_id]['component_override']['restart_policy'] = restart_policy.value - return self - - def add_rosbag_job(self, component_name, rosbag_job): - """ - Add rosbag for a component - - :param component_name: Component name - :type component_name: string - :param rosbag_job: instance of ROSBagJob - :type rosbag_job: :py:class:`~rapyuta_io.clients.rosbag.ROSBagJob` - :return: Updated instance of class :py:class:`ProvisionConfiguration` - :raises: :py:class:`InvalidParameterException`: If rosbag_job is not instance of ROSBagJob - :raises: :py:class:`OperationNotAllowedError`: If component is non ros or is a device component - """ - component_id = self.plan.get_component_id(component_name) - if not isinstance(rosbag_job, ROSBagJob): - raise InvalidParameterException('rosbag_job needs to a ROSBagJob object') - component = self.plan.get_component_by_name(component_name) - if not component.ros.isROS: - raise OperationNotAllowedError('rosbag job is only supported for ros components') - component_context = self.context['component_context'][component_id] - component_context['ros_bag_job_defs'] = component_context.get('ros_bag_job_defs', []) - for job in component_context['ros_bag_job_defs']: - if job['name'] == rosbag_job.name: - raise OperationNotAllowedError('rosbag job with same name already exists') - rosbag_jobs = {'name': rosbag_job.name, - 'recordOptions': rosbag_job.rosbag_options.serialize()} - if rosbag_job.upload_options: - rosbag_jobs['uploadOptions'] = rosbag_job.upload_options.serialize() - if rosbag_job.override_options: - rosbag_jobs['overrideOptions'] = rosbag_job.override_options.serialize() - component_context['ros_bag_job_defs'].append(rosbag_jobs) - - def remove_rosbag_job(self, component_name, job_name): - """ - Remove rosbag job by its name - - :param component_name: Component name - :type component_name: string - :param job_name: name of ROSBagJob - :type job_name: string - """ - component_id = self.plan.get_component_id(component_name) - component_context = self.context['component_context'][component_id] - component_context['ros_bag_job_defs'] = \ - [job for job in component_context['ros_bag_job_defs'] if job['name'] != job_name] - - def add_routed_network(self, routed_network, network_interface=None): - """ - Add Routed Network - - :param routed_network: RoutedNetwork - :type routed_network: instance of :py:class:`~rapyuta_io.clients.routed_network.RoutedNetwork` - :param network_interface: interface to which current deployment to bind - :type network_interface: string - :return: Updated instance of class :py:class:`ProvisionConfiguration` - :raises: :py:class:`InvalidParameterException`: If routed network is not valid - :raises: :py:class:`OperationNotAllowedError`: If network interface given for cloud runtime - """ - - if not isinstance(routed_network, RoutedNetwork): - raise InvalidParameterException('routed networks must be of type RoutedNetwork') - - if routed_network.runtime == Runtime.CLOUD and network_interface: - raise OperationNotAllowedError('cloud routed network does not bind to network interface') - - routed_network_config = dict() - routed_network_config_exists = False - for routed_net in self.context['routedNetworks']: - if routed_net['guid'] == routed_network['guid']: - routed_network_config_exists = True - routed_network_config = routed_net - break - - if network_interface: - routed_network_config['bindParameters'] = {'NETWORK_INTERFACE': network_interface} - - if not routed_network_config_exists: - routed_network_config['guid'] = routed_network.guid - self.context['routedNetworks'].append(routed_network_config) - - return self - - def add_routed_networks(self, routed_networks): - """ - - :param routed_networks: list of routed network :py:class:`~rapyuta_io.clients.routed_network.RoutedNetwork` - :type routed_networks: list - :return: Updated instance of class :py:class:`ProvisionConfiguration` - - >>> from rapyuta_io import Client - >>> from rapyuta_io.clients.package import ROSDistro - >>> client = Client(auth_token='auth_token', project='project') - >>> routed_network = client.create_cloud_routed_network('network_name', - ROSDistro.KINETIC, True) - >>> routed_network.poll_routed_network_till_ready() - >>> package = client.get_package('test_package_id') - >>> package_provision_config = package.get_provision_configuration('test_plan_id') - >>> package_provision_config.add_routed_networks([routed_network]) - >>> package.provision(deployment_name, package_provision_config) - """ - - for routed_network in routed_networks: - self.add_routed_network(routed_network) - return self - - def add_native_network(self, native_network, network_interface=None): - """ - Add Native Network - - :param native_network: NativeNetwork - :type native_network: instance of :py:class:`~rapyuta_io.clients.native_network.NativeNetwork` - :param network_interface: interface to which current deployment to bind, only required for device native network - :type network_interface: string - - :return: Updated instance of class :py:class:`ProvisionConfiguration` - :raises: :py:class:`InvalidParameterException`: If native network is not valid - :raises: :py:class:`OperationNotAllowedError`: If native network is not of cloud runtime - - >>> from rapyuta_io import Client - >>> from rapyuta_io.clients.package import ROSDistro, Runtime - >>> from rapyuta_io.clients.native_network import NativeNetwork - >>> client = Client(auth_token='auth_token', project='project') - >>> native_network = NativeNetwork("native_network_name", Runtime.CLOUD, - ... ROSDistro.KINETIC) - >>> native_network = client.create_native_network(native_network) - >>> native_network.poll_native_network_till_ready() - >>> package = client.get_package('test_package_id') - >>> package_provision_config = package.get_provision_configuration('test_plan_id') - >>> package_provision_config.add_native_network(native_network) - >>> package.provision('deployment_name', package_provision_config) - """ - if not isinstance(native_network, NativeNetwork): - raise InvalidParameterException('native network must be of type NativeNetwork') - - if native_network.runtime == Runtime.CLOUD and network_interface: - raise OperationNotAllowedError('cloud native network does not bind to network interface') - - native_network_config = dict() - native_network_config_exists = False - for native_net in self.context['nativeNetworks']: - if native_net['guid'] == native_network.guid: - native_network_config_exists = True - native_network_config = native_net - break - - if network_interface: - native_network_config['bindParameters'] = {'NETWORK_INTERFACE': network_interface} - - if not native_network_config_exists: - native_network_config['guid'] = native_network.guid - self.context['nativeNetworks'].append(native_network_config) - - return self - - def add_native_networks(self, native_networks): - """ - Add Native Networks - - :param native_networks: list of native network :py:class:`~rapyuta_io.clients.native_network.NativeNetwork` - :type native_networks: list - :return: Updated instance of class :py:class:`ProvisionConfiguration` - - >>> from rapyuta_io import Client - >>> from rapyuta_io.clients.package import ROSDistro, Runtime - >>> from rapyuta_io.clients.native_network import NativeNetwork - >>> client = Client(auth_token='auth_token', project='project') - >>> native_network = NativeNetwork("native_network_name", Runtime.CLOUD, - ROSDistro.KINETIC) - >>> native_network = client.create_native_network(native_network) - >>> native_network.poll_native_network_till_ready() - >>> package = client.get_package('test_package_id') - >>> package_provision_config = package.get_provision_configuration('test_plan_id') - >>> package_provision_config.add_native_networks([native_network]) - >>> package.provision('deployment_name', package_provision_config) - """ - for native_network in native_networks: - self.add_native_network(native_network) - return self - - def add_device(self, component_name, device, ignore_device_config=None, set_component_alias=True): - """ - Map component configuration with a device. ie, Setting the component is going to deploy on the given device. - By Default, the component alias name is set to the device name, if this has to be ignored please use - 'set_component_alias=False' as one of the method parameters. - - :param component_name: Component name - :type component_name: string - :param device: Device - :type device: instance of class :py:class:`Device`: - :param ignore_device_config: Optional parameter to ignore the device config variables - :type ignore_device_config: list - :param set_component_alias: Optional parameter to set the alias name of the component same as device name. - Defaults to True - :type set_component_alias: bool - :return: Updated instance of class :py:class:`ProvisionConfiguration`: - :raises: :py:class:`OperationNotAllowedError`: If the device is not online - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project='project') - >>> package = client.get_package('test_package_id') - >>> device = client.get_device('test_device_id') - >>> package_provision_config = package.get_provision_configuration('test_plan_id') - >>> # ros_workspace will be ignored while adding device to provision configuration - >>> package_provision_config.add_device('test_component_name', device, - >>> ignore_device_config=['ros_workspace'], set_component_alias=False) - >>> package.provision('deployment_name', package_provision_config) - - """ - ignore_device_config = ignore_device_config or [] - - device_runtime = device.get_runtime() - device_docker_enabled = device.is_docker_enabled() - device_preinstalled_enabled = device.is_preinstalled_enabled() - - self.validate_component_executables( - component_name, - device_runtime, - device_docker_enabled, - device_preinstalled_enabled - ) - - component_id = self.plan.get_component_id(component_name) - component_params = self.parameters.get(component_id) - component_params[DEVICE_ID] = device.deviceId - - if set_component_alias: - self.set_component_alias(component_name, device.name) - if (device_runtime != device.PRE_INSTALLED and not device_preinstalled_enabled) \ - and 'ros_workspace' not in ignore_device_config: - ignore_device_config.append('ros_workspace') - if (device_runtime != device.DOCKER_COMPOSE and not device_docker_enabled) and \ - 'rosbag_mount_path' not in ignore_device_config: - ignore_device_config.append('rosbag_mount_path') - - for config_var in device.get_config_variables(): - if config_var.key in ignore_device_config: - continue - component_params[config_var.key] = config_var.value - - if device_runtime == device.PRE_INSTALLED or device_preinstalled_enabled: - if 'ros_workspace' not in ignore_device_config and not self._validate_ros_workspace( - component_id): - raise InvalidParameterException('ros_workspace is not set') - - component = self._get_component_by_name(component_name) - if not component.ros.isROS: - self.parameters[component_id].pop('ros_distro', None) - if (device_runtime == device.PRE_INSTALLED or device_preinstalled_enabled) and \ - not self._validate_ros_distro(component.ros.isROS, component_id): - raise InvalidParameterException('ros_distro is not set') - global_config = self.parameters.get('global') - if 'device_ids' not in global_config: - global_config['device_ids'] = list() - global_config['device_ids'].append(device.deviceId) - self._devices[device.deviceId] = device - return self - - def add_parameter(self, component_name, key, value): - """ - Add component parameters - - :param component_name: Component name - :type component_name: string - :param key: Parameter key - :type key: string - :param value: Parameter value - :type value: string - :return: Updated instance of class :py:class:`ProvisionConfiguration`: - """ - if not component_name or not isinstance(component_name, six.string_types): - raise InvalidParameterException("component_name must be a non-empty string") - if not key or not isinstance(key, six.string_types): - raise InvalidParameterException("key must be a non-empty string") - - component_id = self.plan.get_component_id(component_name) - self.parameters[component_id][key] = value - - return self - - def set_component_alias(self, component_name, alias="", set_ros_namespace=False): - """ - Set an alias and ROS_NAMESPACE environment variable flag for the selected component. - This is used in scoping and targeting. alias defaults to the component name. - set_ros_namespace defaults to false - - *Note:* In typical scenarios in the case of a cloud deployment, alias is set to the component name - (or some derivation thereof) and on the device it is set to the device name. But it is left to the user. - All set aliases in a deployment and its dependent deployments are required to be unique. - - :param component_name: Component name - :type component_name: string - :param alias: alias for component - :type alias: string - :param set_ros_namespace: flag to set alias as ROS_NAMESPACE environment variable in the deployment. - It should be used only for deployments using native networks. - :type set_ros_namespace: bool - :return: Updated instance of class :py:class:`ProvisionConfiguration`: - """ - if not component_name or not isinstance(component_name, six.string_types): - raise InvalidParameterException("component_name must be a non-empty string") - if not isinstance(alias, six.string_types): - raise InvalidParameterException("alias must be a string") - if not isinstance(set_ros_namespace, bool): - raise InvalidParameterException("set_ros_namespace must be a boolean") - component_id = self.plan.get_component_id(component_name) - alias = component_name if alias == "" else alias - self.parameters[component_id]["bridge_params"] = {"alias": alias, "setROSNamespace": set_ros_namespace} - - def _add_dependency(self, deployment_id, component_id=None, mount_path=None, network_interface=None, - executable_mounts=None): - dep_info = dict() - dep_info['dependentDeploymentId'] = deployment_id - if component_id: - dep_info['applicableComponentId'] = component_id - else: - dep_info['applicableComponentId'] = '' - - dep_info['config'] = dict() - if mount_path: - dep_info['config']['mountPath'] = mount_path - if executable_mounts: - d = {} - for mount in executable_mounts: - d[mount.exec_name] = {} - d[mount.exec_name]['mountPath'] = mount.mount_path - if mount.sub_path: - d[mount.exec_name]['subPath'] = mount.sub_path - dep_info['config']['mountPaths'] = d - if network_interface: - dep_info['config']['NETWORK_INTERFACE'] = network_interface - self.context['dependentDeployments'].append(dep_info) - - def _add_disk_mount_info(self, resource_id, component_id, executable_mounts): - dep_info = dict() - dep_info['diskResourceId'] = resource_id - dep_info['applicableComponentId'] = component_id - dep_info['config'] = dict() - mountPaths = {} - for mount in executable_mounts: - mountPaths[mount.exec_name] = {} - mountPaths[mount.exec_name]['mountPath'] = mount.mount_path - if mount.sub_path: - mountPaths[mount.exec_name]['subPath'] = mount.sub_path - else: - mountPaths[mount.exec_name]['subPath'] = '/' - - dep_info['config']['mountPaths'] = mountPaths - self.context['diskMountInfo'].append(dep_info) - - def mount_volume(self, component_name, volume=None, device=None, mount_path=None, executable_mounts=None): - """ - To mount a volume instance. - - :param component_name: Component name - :type component_name: string - :param volume: VolumeInstance class - :type volume: instance of class :py:class:`VolumeInstance`: - :param device: Device class - :type device: instance of class :py:class:`Device`: - :param mount_path: Mount path - :type mount_path: string - :param executable_mounts: list of executable mounts. mandatory parameter for device volumes - :type executable_mounts: list(:py:class:`ExecutableMount`) - :return: Updated instance of class :py:class:`ProvisionConfiguration`: - """ - if volume == None and device == None: - raise InvalidParameterException('either a volume or device parameter must be present') - if volume != None and device != None: - raise InvalidParameterException('both volume and device parameter cannot be present') - component_id = self.plan.get_component_id(component_name) - if device != None: - if not isinstance(device, Device): - raise InvalidParameterException('device must be of type Device') - if not isinstance(executable_mounts, list) or not all( - isinstance(mount, ExecutableMount) for mount in executable_mounts): - raise InvalidParameterException( - 'executable_mounts must be a list of rapyuta_io.clients.package.ExecutableMount') - if device.get_runtime() != Device.DOCKER_COMPOSE and not device.is_docker_enabled(): - raise OperationNotAllowedError('Device must be a {} device'.format(Device.DOCKER_COMPOSE)) - component_params = self.parameters.get(component_id) - if component_params.get(DEVICE_ID) != device.deviceId: - raise OperationNotAllowedError('Device must be added to the component') - self._add_disk_mount_info(device.deviceId, component_id, executable_mounts) - else: - if not isinstance(volume, rapyuta_io.clients.persistent_volumes.VolumeInstance): - raise InvalidParameterException( - 'volume must be of type rapyuta_io.clients.persistent_volumes.VolumeInstance') - if not volume.packageId == VOLUME_PACKAGE_ID: - raise InvalidParameterException('Invalid volume instance') - if volume.get_status().phase != DeploymentPhaseConstants.SUCCEEDED.value: - raise OperationNotAllowedError('Dependent deployment is not running') - if (mount_path is None and executable_mounts is None) or ( - mount_path is not None and executable_mounts is not None): - raise InvalidParameterException('One of mount_path or executable_mounts should be present') - if executable_mounts is not None and ((not isinstance(executable_mounts, list)) or not all( - isinstance(mount, ExecutableMount) for mount in executable_mounts)): - raise InvalidParameterException( - 'executable_mounts must be a list of rapyuta_io.clients.package.ExecutableMount') - self._add_dependency(volume.deploymentId, component_id, mount_path, executable_mounts=executable_mounts) - return self - - def add_dependent_deployment(self, deployment, ready_phases=[DeploymentPhaseConstants.SUCCEEDED.value]): - """ - Add dependent deployments. \n - `deployment.refresh()` is called to get the latest deployment status. - - :param deployment: Deployment - :type deployment: class :py:class:`Deployment`: - :return: Updated instance of class :py:class:`ProvisionConfiguration`: - """ - deployment.refresh() - if deployment.phase not in ready_phases: - raise OperationNotAllowedError('dependent deployment is not ready') - self._update_dependency_seen_aliases_set(deployment) - self._add_dependency(deployment_id=deployment.deploymentId) - - return self - - def add_static_route(self, component_name, endpoint_name, static_route): - """ - Add static route to a component in a package - - :param component_name: Name of the component to add static route to - :param endpoint_name: Name of the endpoint (Should be exposed externally) - :param static_route: class :py:class:`StaticRoute`: - :return: Updated instance of class :py:class:`ProvisionConfiguration`: - """ - if not isinstance(static_route, StaticRoute): - raise TypeError("{} is not of type Static Route".format(static_route)) - component_id = self.plan.get_component_id(component_name) - self.context['component_context'][component_id]['static_route_config'][endpoint_name] = static_route.guid - return self - - def _update_dependency_seen_aliases_set(self, deployment): - for params in deployment.parameters.values(): - try: - self._dependency_seen_aliases.add(params.bridge_params.alias) - self.plan._needs_alias = True - except AttributeError: - pass - - def add_label(self, key, value): - """ - Add labels - - :param key: Key - :type key: string - :param value: Value - :type value: string - :return: Updated instance of class :py:class:`ProvisionConfiguration`: - """ - if not key or not value: - raise ParameterMissingException(str.format("key or value of parameter is missing")) - - label = {'key': key, 'value': value} - self.context[LABELS].append(label) - return self - - def _validate_ros_workspace(self, component_id): - if not self.parameters[component_id].get('ros_workspace'): - return False - return True - - def _validate_ros_distro(self, isRos, component_id): - if isRos and not self.parameters[component_id].get('ros_distro'): - return False - return True - - def validate(self): - for component in self.plan.components.components: - component_id = self.plan.get_component_id(component.name) - if component.requiredRuntime == DEVICE: - self._validate_device_id(component_id) - self._validate_rosbag_devices(component_id) - component_params = component.parameters - for param in component_params: - name = param.name - if is_empty(self.parameters[component_id][name]): - raise InvalidParameterException('Provide the value for the parameter {} in ' - 'component {}'.format(param.name, component.name)) - - self._validate_aliases() - return True - - def _validate_rosbag_devices(self, component_id): - if not self.context['component_context'][component_id].get('ros_bag_job_defs'): - return - device_id = self.parameters[component_id].get(DEVICE_ID, None) - if not device_id: - return - device = self._devices.get(device_id) - if device.get_runtime() == device.PRE_INSTALLED: - raise InvalidParameterException('ROSBag on Device does not support Preinstalled ' - 'devices') - required_config = [x for x in device.config_variables if x.key == 'rosbag_mount_path' - and x.value != ''] - if not required_config: - raise InvalidParameterException('This device does not have ROSBag components installed.' - ' Please re-onboard the device to use ROSBag features') - - def _validate_aliases(self): - aliases_needed = self.plan.needs_aliases() - if not aliases_needed: - for params in self.parameters.values(): - if "bridge_params" in params: - params.pop("bridge_params") - return - seen_aliases = set() - for component_id, params in self.parameters.items(): - if component_id == "global": - continue - try: - alias = params["bridge_params"]["alias"] - if alias in seen_aliases: - raise DuplicateAliasException( - "Aliases must be unique. Alias %s provided for %s component isn't" % (alias, component_id)) - if alias in self._dependency_seen_aliases: - raise DuplicateAliasException( - "Aliase %s for %s component conflicts with dependant deployment" % (alias, component_id)) - seen_aliases.add(alias) - except KeyError: - raise AliasNotProvidedException( - "Aliases are required but not provided for %s component" % component_id) - - -class ExecutableMount(object): - """ - ExecutableMount defines the mount details specific to an executable. - - :ivar exec_name: Name of the executable. - :vartype exec_name: str - :ivar mount_path: Mountpath of the executable - :vartype mount_path: str - :ivar sub_path: Subpath of the executable - :vartype sub_path: str - :ivar uid: Userid to which subpath belongs to - :vartype uid: int - :ivar gid: Groupid to which subpath belongs to - :vartype gid: int - :ivar perm: Permissions for subpath - :vartype perm: int - - :param exec_name: Name of the executable. - :type exec_name: str - :param mount_path: Mountpath of the executable - :type mount_path: str - :param sub_path: Subpath of the executable - :type sub_path: str - :param uid: userid of subpath - :type uid: int - :param gid: groupid of subpath - :type gid: int - :param perm: permissions of subpath - :type perm: int - """ - - def __init__(self, exec_name, mount_path, sub_path=None, uid=None, gid=None, perm=None): - self.validate(exec_name, mount_path, sub_path, uid, gid, perm) - self.exec_name = exec_name - self.mount_path = mount_path - self.sub_path = sub_path - self.uid = uid - self.gid = gid - self.perm = perm - - @staticmethod - def validate(exec_name, mount_path, sub_path=None, uid=None, gid=None, perm=None): - if not isinstance(exec_name, six.string_types): - raise InvalidParameterException('exec_name must be a non-empty string') - if not isinstance(mount_path, six.string_types): - raise InvalidParameterException('mount_path must be a non-empty string') - if sub_path is not None and not isinstance(sub_path, six.string_types): - raise InvalidParameterException('sub_path must be a non-empty string') - if uid is not None and not isinstance(uid, six.integer_types): - raise InvalidParameterException('uid must be a non-empty integer') - if gid is not None and not isinstance(gid, six.integer_types): - raise InvalidParameterException('gid must be a non-empty integer') - if perm is not None and not isinstance(perm, six.integer_types): - raise InvalidParameterException('perm must be a non-empty integer') - - -class RestartPolicy(str, enum.Enum): - """ - Enumeration variables for the Restart Policy. Restart Policy may be 'Always', 'Never' or 'OnFailure' \n - RestartPolicy.Always \n - RestartPolicy.Never \n - RestartPolicy.OnFailure \n - """ - - def __str__(self): - return str(self.value) - - Always = "always" - Never = "no" - OnFailure = "on-failure" - - -class ROSDistro(str, enum.Enum): - """ - Enumeration variables for the Supported ROS Distros. ROS Distro may be one of: \n - ROSDistro.KINETIC ('kinetic') \n - ROSDistro.MELODIC ('melodic') \n - ROSDistro.NOETIC ('noetic') \n - """ - - def __str__(self): - return str(self.value) - - KINETIC = 'kinetic' - MELODIC = 'melodic' - NOETIC = 'noetic' - - -class Runtime(str, enum.Enum): - """ - Enumeration variables for the Supported Runtimes. Runtime may be 'cloud', or 'device' \n - Runtime.CLOUD \n - Runtime.DEVICE \n - """ - - def __str__(self): - return str(self.value) - - CLOUD = 'cloud' - DEVICE = 'device' diff --git a/rapyuta_io/clients/persistent_volumes.py b/rapyuta_io/clients/persistent_volumes.py deleted file mode 100644 index 8c18603e..00000000 --- a/rapyuta_io/clients/persistent_volumes.py +++ /dev/null @@ -1,348 +0,0 @@ -# encoding: utf-8 -from __future__ import absolute_import -import enum - -from rapyuta_io.clients import ProvisionClient -from rapyuta_io.clients.deployment import DeploymentPhaseConstants, _poll_till_ready -from rapyuta_io.clients.package import ProvisionConfiguration, Runtime -from rapyuta_io.clients.plan import Plan -from rapyuta_io.utils import ObjDict, to_objdict, InvalidParameterException, DeploymentNotRunningException -from rapyuta_io.utils.settings import DEPLOYMENT_STATUS_RETRY_COUNT, DEFAULT_SLEEP_INTERVAL -from rapyuta_io.utils.partials import PartialMixin - -import six - -VOLUME_COMPONENT = 'volumeComponent' - - -class DiskType(str, enum.Enum): - """ - Enumeration variables for the Volume Type. The type may be 'Default' or 'SSD' \n - DiskType.DEFAULT \n - DiskType.SSD - """ - - def __str__(self): - return str(self.value) - - SSD = 'ssd' - DEFAULT = 'ssd' - - -class DiskCapacity(int, enum.Enum): - """ - Enumeration variables for disk capacity. The type may be one of the following \n - DiskCapacity.GiB_4 \n - DiskCapacity.GiB_8 \n - DiskCapacity.GiB_16 \n - DiskCapacity.GiB_32 \n - DiskCapacity.GiB_64 \n - DiskCapacity.GiB_128 \n - DiskCapacity.GiB_256 \n - DiskCapacity.GiB_512 \n - """ - - def __str__(self): - return str(self.value) - - GiB_4 = 4 - GiB_8 = 8 - GiB_16 = 16 - GiB_32 = 32 - GiB_64 = 64 - GiB_128 = 128 - GiB_256 = 256 - GiB_512 = 512 - - -class VolumeInstanceStatus(ObjDict): - """ - VolumeInstanceStatus class - - :ivar deploymentId: Deployment Id. - :ivar name: Volume instance name. - :ivar packageId: Package Id. - :ivar status: Deployment status - :ivar phase: Deployment phase - :ivar errors: Deployment errors - :ivar componentInfo: List containing the deployment components and their status. - :ivar dependentDeploymentStatus: Dependent deployment status. - :ivar packageDependencyStatus: Package dependency status. - - """ - - def __init__(self, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - - -class PersistentVolumes(ObjDict): - """ - PersistentVolumes class represents a a persistent volume package. It contains methods to create - persistent volume instance and listing all the instances. - - :ivar packageId: Id of the package. - :ivar packageName: Package name. - :ivar packageVersion: Version of the package. - :ivar apiVersion: Package API Version. - :ivar bindable: Package is bindable or not. - :ivar description: Description of the package. - :ivar category: Package category. - :ivar plans: List of plans associated with the package. - :vartype plans: list(:py:class:`~rapyuta_io.clients.plan.Plan`) - :ivar isPublic: Boolean denoting whether the package is public or not. - :ivar status: Status of the package. - :ivar tags: Tags associated with the package. - :ivar buildGeneration: Build generation. - :ivar ownerProject: Owner project guid. - :ivar creator: Creator user guid. - :ivar CreatedAt: Date of creation. - :ivar UpdatedAt: Date of updation. - :ivar DeletedAt: Date of deletion. - - """ - - def __init__(self, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - plan = Plan(to_objdict(self.plans[0])) - self.plans = [plan] - - def _update_auth_token(self, objects): - for obj in objects: - setattr(obj, '_host', self._host) - setattr(obj, '_auth_token', self._auth_token) - setattr(obj, '_project', self._project) - return - - def create_volume_instance(self, name, capacity, disk_type=DiskType.DEFAULT, retry_limit=0): - """ - Create a volume instance - - :param name: name of the volume instance - :type name: str - :param capacity: disk capacity of the volume instance - :type capacity: enum :py:class:`~DiskCapacity` - :param disk_type: Type of disk to be deployed. Allowed values are - default or ssd - :type disk_type: enum :py:class:`~DiskType` - :param retry_limit: Optional parameter to specify the number of retry attempts to be - carried out if any failures occurs during the API call. - :type retry_limit: int - :returns: volume instance - :raises: :py:class:`InvalidParameterException`: If the disk type and volume capacity - parameters are missing or invalid. - :raises: :py:class:`APIError`: If the api return an error, the status code is - anything other than 200/201 - - Following example demonstrates how to create a volume instance - - >>> from rapyuta_io import Client - >>> from rapyuta_io.clients.persistent_volumes import DiskType, DiskCapacity - >>> client = Client(auth_token='auth_token', project='project_guid') - >>> pv = client.get_persistent_volume() - >>> pv.create_volume_instance(name='myVolume', capacity=DiskCapacity.GiB_32, disk_type=DiskType.SSD) - - """ - if disk_type not in list(DiskType.__members__.values()): - raise InvalidParameterException('disk_type must be of rapyuta_io.clients.persistent_volumes.DiskType') - # supporting integer values for backward compatibility - if capacity not in list(DiskCapacity.__members__.values()) or not(isinstance(capacity, DiskCapacity) - or isinstance(capacity, six.integer_types)): - raise InvalidParameterException('capacity must be one of ' - 'rapyuta_io.clients.persistent_volumes.DiskCapacity') - disk_capacity = capacity if isinstance(capacity, six.integer_types) else capacity.value - - disk_payload = {'name': name, 'runtime': Runtime.CLOUD, 'capacity': disk_capacity, 'diskType': disk_type} - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - response = provision_client.create_disk(disk_payload, retry_limit) - disk = provision_client.get_disk(response['guid'], retry_limit) - volume_instance = provision_client.deployment_status(disk['internalDeploymentGUID'], retry_limit) - volume_instance = VolumeInstance(to_objdict(volume_instance)) - self._update_auth_token([volume_instance]) - volume_instance.is_partial = False - return volume_instance - - def get_volume_instance(self, volume_instance_id, retry_limit=0): - """ - Get a volume instance - - :param volume_instance_id: Volume instance Id - :type volume_instance_id: string - :param retry_limit: Optional parameter to specify the number of retry attempts to be - carried out if any failures occurs during the API call. - :type retry_limit: int - :return: return instance of class :py:class:`VolumeInstance`: - :raises: :py:class:`APIError`: If the api return an error, the status code is - anything other than 200/201 - - - Following example demonstrates how to a volume instance - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> persistent_volume = client.get_persistent_volume() - >>> volume_instance = persistent_volume.get_volume_instance('instance_id') - - """ - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - instance = provision_client.deployment_status(volume_instance_id, retry_limit) - volume_instance = VolumeInstance(to_objdict(instance)) - self._update_auth_token([volume_instance]) - volume_instance.is_partial = False - return volume_instance - - def get_all_volume_instances(self, phases=None, retry_limit=0, deploymentGUIDs=None): - """ - Get all persistent volume instances - - :param phases: optional parameter to filter out the deployments based on current deployment - :type phases: list(DeploymentPhaseConstants) - :param retry_limit: Optional parameter to specify the number of retry attempts to be - carried out if any failures occurs during the API call. - :type retry_limit: int - :returns: List of volume instances - :raises: :py:class:`APIError`: If the api return an error, the status code is - anything other than 200/201 - - Following example demonstrates how to create a volume instance - - >>> from rapyuta_io import Client, DeploymentPhaseConstants - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> pv = client.get_persistent_volume() - >>> pv.get_all_volume_instances() - >>> volume_deployments_list_filtered_by_phase = pv.get_all_volume_instances(phases= - >>> [DeploymentPhaseConstants.SUCCEEDED, DeploymentPhaseConstants.PROVISIONING]) - - """ - - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - disks = provision_client.list_disk(deploymentGUIDs, retry_limit) - volumes = list() - for disk in disks: - volume_instance = provision_client.deployment_status(disk['internalDeploymentGUID'], retry_limit) - volume_instance = VolumeInstance(to_objdict(volume_instance)) - if phases is None or volume_instance.phase in phases: - volumes.append(volume_instance) - self._update_auth_token(volumes) - return volumes - - -class VolumeInstance(PartialMixin, ObjDict): - """ - VolumeInstance class represents a running Persistent Volume. \n - Variables marked as (full-only) are only available on a full object. Use `refresh()` to convert a - partial object into a full one. - - :ivar deploymentId: Deployment Id. - :ivar name: Deployment name. - :ivar packageId: Package Id. - :ivar packageName: Package Name. - :ivar packageAPIVersion: Package API Version. - :ivar planId: Plan Id. - :ivar bindable: Deployment is bindable or not. - :ivar labels: (full-only) Labels associated with the deployment. - :ivar parameters: (full-only) Deployment parameters. - :ivar componentInfo: (full-only) List of component details. - :ivar componentInstanceIds: (full-only) List of component instance ids. - :ivar dependentDeployments: (full-only) List of dependent deployments. - :ivar dependentDeploymentStatus: (full-only) Dependent deployments status details. - :ivar packageDependencyStatus: (full-only) Package dependency status details. - :ivar coreNetworks: (full-only) Routed and Native network details. - :ivar phase: Phase of the deployment. - :vartype phase: :py:class:`~rapyuta_io.clients.deployment.DeploymentPhaseConstants` - :ivar status: (full-only) Status of the deployment. - :vartype status: :py:class:`~rapyuta_io.clients.deployment.DeploymentStatusConstants` - :ivar provisionContext: (full-only) Context set during provisioning. - :ivar currentGeneration: (full-only) Build generation number. - :ivar errors: (full-only) List of errors. - :ivar inUse: Deployment is in use or not. - :ivar ownerProject: Owner project guid. - :ivar creator: Creator user guid. - :ivar CreatedAt: Date of creation. - :ivar UpdatedAt: Date of updation. - :ivar DeletedAt: Date of deletion. - - """ - def __init__(self, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - - def get_status(self, retry_limit=0): - """ - Get the status of volume instance - - :param retry_limit: Optional parameter to specify the number of retry attempts to be - carried out if any failures occurs during the API call. - :type retry_limit: int - :returns: instance of class :py:class:`DeploymentStatus`: - :raises: :py:class:`APIError`: If the api return an error, the status code is - anything other than 200/201 - - Following example demonstrates how to get a deployment status - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> persistent_volume = client.get_persistent_volume() - >>> volume_instance = persistent_volume.get_volume_instance('instance_id') - >>> volume_instance.get_status() - - """ - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - instance_status = provision_client.deployment_status(self.deploymentId, retry_limit) - return VolumeInstanceStatus(to_objdict(instance_status)) - - def refresh(self): - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - full_volume_instance = provision_client.deployment_status(self.deploymentId, retry_limit=0) - for key, value in six.iteritems(full_volume_instance): - setattr(self, key, to_objdict(value)) - self.is_partial = False - - def poll_deployment_till_ready(self, retry_count=DEPLOYMENT_STATUS_RETRY_COUNT, - sleep_interval=DEFAULT_SLEEP_INTERVAL): - """ - - Wait for the deployment to be ready - - :param retry_count: Optional parameter to specify the retries. Default value is 15 - :param sleep_interval: Optional parameter to specify the interval between retries. - Default value is 6 Sec. - :return: instance of class :py:class:`VolumeInstanceStatus`: - :raises: :py:class:`APIError`: If service binding api return an error, the status code is - anything other than 200/201 - :raises: :py:class:`DeploymentNotRunningException`: If the deployment's state might not - progress due to errors - :raises: :py:class:`RetriesExhausted`: If number of polling retries exhausted before the - deployment could succeed or fail. - - Following example demonstrates use of poll_deployment_till_ready. - - >>> from rapyuta_io import Client - >>> from rapyuta_io.utils.error import (DeploymentNotRunningException, - ... RetriesExhausted) - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> persistent_volume = client.get_persistent_volume() - >>> volume_instance = persistent_volume.get_volume_instance('instance_id') - >>> try: - ... vol_status = volume_instance.poll_deployment_till_ready(sleep_interval=20) - ... print vol_status - ... except RetriesExhausted as e: - ... print e, 'Retry again?' - ... except DeploymentNotRunningException as e: - ... print e, e.deployment_status - - """ - return _poll_till_ready(self, retry_count, sleep_interval) - - def destroy_volume_instance(self, retry_limit=0): - """ - Destroy a volume instance - - :param retry_limit: Optional parameter to specify the number of retry attempts to be - carried out if any failures occurs during the API call. - :type retry_limit: int - :returns: True if volume is destroyed is successfully, False otherwise - :raises: :py:class:`APIError`: If the api return an error, the status code is - anything other than 200/201 - """ - provision_client = ProvisionClient(self._host, self._auth_token, self._project) - disks = provision_client.list_disk([self.deploymentId], retry_limit) - if len(disks): - return provision_client.delete_disk(disks[0]['guid'], retry_limit) diff --git a/rapyuta_io/clients/plan.py b/rapyuta_io/clients/plan.py deleted file mode 100644 index a79832c2..00000000 --- a/rapyuta_io/clients/plan.py +++ /dev/null @@ -1,124 +0,0 @@ -# encoding: utf-8 -from __future__ import absolute_import - -import six - -from rapyuta_io.utils import ObjDict, ComponentNotFoundException -from rapyuta_io.utils.utils import is_empty - - -class Plan(ObjDict): - """ - Plan class represents a plan in the package. Member variables of the class represent the - properties of the plan. - - :ivar planId: (full-only) Plan Id. - :ivar planName: (full-only) Plan Name. - :ivar packageId: (full-only) Package Id. - :ivar description: Plan Description. - :ivar singleton: (full-only) Boolean representing whether the plan is singelton or not. - :ivar inboundROSInterfaces: (full-only) Dictionary containing inbound ROS interfaces information. - :ivar dependentDeployments: (full-only) List of other dependent deployments. - :ivar components: (full-only) Dictionary containing components such as executables. - :ivar internalComponents: (full-only) Dictionary containing internal components information. - :ivar metadata: List containing plan metadata. - :ivar CreatedAt: (full-only) Date of creation. - :ivar UpdatedAt: (full-only) Date of updation. - :ivar DeletedAt: (full-only) Date of deletion. - - """ - - def __init__(self, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - if 'planId' in self: - self.planId = self.planId - else: - self.planId = self.id - self._component_id_map = dict() - self._map_component_id_with_name() - self._needs_alias = None - - def _map_component_id_with_name(self): - if not hasattr(self, 'internalComponents'): - raise ComponentNotFoundException('Internal components not found for the plan: %s(%s)' - % (self.planName, self.planId)) - for component in self.internalComponents: - self._component_id_map[component['componentName']] = component['componentId'] - - def get_component_id(self, component_name): - component_id = self._component_id_map.get(component_name) - if is_empty(component_id): - raise ComponentNotFoundException('Component %s is not found' % component_name) - return component_id - - def get_component_by_name(self, component_name): - for component in self.components.components: - if component.name == component_name: - return component - return None - - def _has_inbound_targeted(self): - try: - if hasattr(self.inboundROSInterfaces.inboundROSInterfaces, 'anyIncomingScopedOrTargetedRosConfig') and \ - self.inboundROSInterfaces.inboundROSInterfaces.anyIncomingScopedOrTargetedRosConfig: - return True - for topic in self.inboundROSInterfaces.inboundROSInterfaces.topics: - if isinstance(topic.targeted, bool) and topic.targeted: - return True - if isinstance(topic.scoped, six.string_types): - if topic.scoped.lower() == 'true': - return True - for service in self.inboundROSInterfaces.inboundROSInterfaces.services: - if isinstance(service.targeted, bool) and service.targeted: - return True - if isinstance(service.scoped, six.string_types): - if service.scoped.lower() == 'true': - return True - for action in self.inboundROSInterfaces.inboundROSInterfaces.actions: - if isinstance(action.targeted, bool) and action.targeted: - return True - if isinstance(action.scoped, six.string_types): - if action.scoped.lower() == 'true': - return True - except AttributeError: - pass - return False - - def _has_oubound_scoped_targeted(self): - if not hasattr(self, 'components'): - return False - if not hasattr(self.components, 'components'): - return False - for component in self.components.components: - if not hasattr(component, 'ros'): - continue - for entity in ['topics', 'services', 'actions']: - entity_list = getattr(component.ros, entity, []) - for element in entity_list: - if hasattr(element, 'targeted'): - if isinstance(element.targeted, bool) and element.targeted: - return True - if isinstance(element.targeted, six.string_types): - if element.scoped.lower() == 'true': - return True - if hasattr(element, 'scoped'): - if isinstance(element.scoped, bool) and element.scoped: - return True - if isinstance(element.scoped, six.string_types): - if element.scoped.lower() == 'true': - return True - return False - - def needs_aliases(self): - if self._needs_alias is not None: - return self._needs_alias - self._needs_alias = (self._has_inbound_targeted() or self._has_oubound_scoped_targeted()) - return self._needs_alias - - def validate(self): - for component in self.config.component_parameters.keys(): - for param in self.config.component_parameters[component]: - if self.config.component_parameters[component][param] is None: - raise ValueError( - "Value of parameter %s of component %s not provided" % ( - param, component)) diff --git a/rapyuta_io/clients/project.py b/rapyuta_io/clients/project.py index 56a2988f..477c6557 100644 --- a/rapyuta_io/clients/project.py +++ b/rapyuta_io/clients/project.py @@ -1,15 +1,13 @@ from __future__ import absolute_import -import re import enum +import re import six from rapyuta_io.clients.organization import Organization -from rapyuta_io.utils import RestClient, InvalidParameterException +from rapyuta_io.utils import InvalidParameterException from rapyuta_io.utils.object_converter import ObjBase, list_field, nested_field -from rapyuta_io.utils.rest_client import HttpMethod -from rapyuta_io.utils.utils import create_auth_header, get_api_response_data project_name_regex = re.compile('^[a-z0-9-]{3,15}$') @@ -18,8 +16,6 @@ class Project(ObjBase): """ Project is an organizational unit and all the resources must belong to a Project. - :ivar id: id of the Project - :vartype id: int :ivar guid: guid of the Project :vartype guid: str :ivar created_at: creation time of the Project @@ -33,11 +29,7 @@ class Project(ObjBase): :ivar creator: GUID of the User that created the Project :vartype creator: str :ivar users: Users that have access to the Project - :vartype users: list(:py:class:`~rapyuta_io.clients.project.User`) - :ivar organization: Organization that the project belongs to - :vartype organization: :py:class:`~rapyuta_io.clients.organization.Organization` """ - PROJECT_PATH = '/api/project' def __init__(self, name, organization_guid=None): self.validate(name, organization_guid) @@ -55,27 +47,6 @@ def __init__(self, name, organization_guid=None): setattr(org, 'guid', organization_guid) self.organization = org - def get_deserialize_map(self): - return { - 'id': 'ID', - 'guid': 'guid', - 'created_at': 'CreatedAt', - 'updated_at': 'UpdatedAt', - 'deleted_at': 'DeletedAt', - 'name': 'name', - 'creator': 'creator', - 'users': list_field('users', User), - 'organization': nested_field('organization', Organization) - } - - def get_serialize_map(self): - serialized_project = { - 'name': 'name', - } - if self.organization is not None: - serialized_project['organization'] = 'organization' - return serialized_project - @staticmethod def validate(name, organization_guid): if not isinstance(name, str): @@ -88,27 +59,23 @@ def validate(name, organization_guid): if not project_name_regex.match(name): raise InvalidParameterException('name can have alphabets, numbers or - only') - def delete(self): - - """ - Delete the project using the project object. - - Following example demonstrates how to delete a project using project object: - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project='project_guid') - >>> project = client.get_project(guid='project-id') - >>> project.delete() - - """ + def get_deserialize_map(self): + return { + 'guid': 'guid', + 'created_at': 'createdAt', + 'updated_at': 'updatedAt', + 'deleted_at': 'deletedAt', + 'name': 'name', + 'creator': 'creatorGUID', + } - if not (hasattr(self, '_core_api_host') and hasattr(self, '_auth_token')): - raise InvalidParameterException('Project must be created first') - url = self._core_api_host + self.PROJECT_PATH + '/delete' - headers = create_auth_header(self._auth_token, self.guid) - payload = {'guid': self.guid} - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute(payload) - get_api_response_data(response, parse_full=True) + def get_serialize_map(self): + serialized_project = { + 'name': 'name', + } + if self.organization is not None: + serialized_project['organization'] = 'organization' + return serialized_project class User(ObjBase): @@ -132,6 +99,7 @@ class User(ObjBase): :ivar organizations: List of organizations that the user is part of :vartype organizations: list(:py:class:`~rapyuta_io.clients.organization.Organization`) """ + def __init__(self): self.guid = None self.first_name = None diff --git a/rapyuta_io/clients/provision_client.py b/rapyuta_io/clients/provision_client.py deleted file mode 100644 index ce957765..00000000 --- a/rapyuta_io/clients/provision_client.py +++ /dev/null @@ -1,93 +0,0 @@ -# encoding: utf-8 -from __future__ import absolute_import -from rapyuta_io.clients.api_client import CatalogConfig -from rapyuta_io.utils import ResourceNotFoundError, ServiceBindingError -from rapyuta_io.utils.rest_client import RestClient, HttpMethod -from rapyuta_io.utils.settings import * -from rapyuta_io.utils.utils import response_validator -from six.moves import map - - -class ProvisionClient(CatalogConfig): - def __init__(self, catalog_api_host, auth_token, project): - CatalogConfig.__init__(self, catalog_api_host, auth_token, project) - self._api_path = PROVISION_API_PATH - - def _get_api_path(self): - return self._catalog_api_host + self._api_path - - def _execute_api(self, url, method, payload=None, query_params=None, retry_limit=0): - response = RestClient(url).method(method).headers(self._headers) \ - .retry(retry_limit).query_param(query_params).execute(payload=payload) - return response - - @response_validator(True) - def provision(self, payload, retry_limit): - url = self._get_api_path() + "/" + payload['instance_id'] - response = self._execute_api(url, HttpMethod.PUT, payload, retry_limit=retry_limit) - return response - - @response_validator(return_value=True) - def deprovision(self, deployment_id, plan_id, service_id, retry_limit): - path = '{}?plan_id={}&service_id={}&accepts_incomplete=false' \ - .format(deployment_id, plan_id, service_id) - url = self._get_api_path() + "/" + path - response = self._execute_api(url, HttpMethod.DELETE, retry_limit=retry_limit) - return response - - @response_validator(True, errors={404: ServiceBindingError}) - def service_binding(self, deployment_id, plan_id, service_id, binding_id, retry_limit): - payload = dict(service_id=service_id, plan_id=plan_id) - path = '/{}/service_bindings/{}'.format(deployment_id, binding_id) - url = self._get_api_path() + path - return self._execute_api(url, HttpMethod.PUT, payload, retry_limit=retry_limit) - - @response_validator(errors={404: ResourceNotFoundError}, return_value=True) - def service_unbinding(self, deployment_id, plan_id, service_id, binding_id, retry_limit): - path = '/{}/service_bindings/{}?service_id={}&plan_id={}'.format(deployment_id, binding_id, - service_id, plan_id) - url = self._get_api_path() + path - response = self._execute_api(url, HttpMethod.DELETE, retry_limit=retry_limit) - return response - - @response_validator(True) - def deployments(self, service_id, phases, retry_limit): - query_params = {'package_uid': service_id} - if phases: - query_params['phase'] = list(map(str, phases)) - path = '/deployment/list' - url = self._catalog_api_host + path - return self._execute_api(url, HttpMethod.GET, retry_limit=retry_limit, query_params=query_params) - - @response_validator(True) - def deployment_status(self, deployment_id, retry_limit): - path = '/serviceinstance/{}'.format(deployment_id) - url = self._catalog_api_host + path - return self._execute_api(url, HttpMethod.GET, retry_limit=retry_limit) - - @response_validator(True) - def create_disk(self, payload, retry_limit): - path = '/disk' - url = self._catalog_api_host + path - return self._execute_api(url, HttpMethod.POST, payload=payload, retry_limit=retry_limit) - - @response_validator(True) - def get_disk(self, disk_guid, retry_limit): - path = '/disk/{}'.format(disk_guid) - url = self._catalog_api_host + path - return self._execute_api(url, HttpMethod.GET, retry_limit=retry_limit) - - @response_validator(True) - def list_disk(self, deploymentGUIDs=None, retry_limit=0): - path = '/disk' - query_params = {} - if deploymentGUIDs: - query_params['deployment_guid'] = deploymentGUIDs - url = self._catalog_api_host + path - return self._execute_api(url, HttpMethod.GET, query_params=query_params, retry_limit=retry_limit) - - @response_validator(True) - def delete_disk(self, disk_guid, retry_limit): - path = '/disk/{}'.format(disk_guid) - url = self._catalog_api_host + path - return self._execute_api(url, HttpMethod.DELETE, retry_limit=retry_limit) \ No newline at end of file diff --git a/rapyuta_io/clients/routed_network.py b/rapyuta_io/clients/routed_network.py deleted file mode 100644 index 4a319796..00000000 --- a/rapyuta_io/clients/routed_network.py +++ /dev/null @@ -1,153 +0,0 @@ -# coding=utf-8 -from __future__ import absolute_import -import six - -from rapyuta_io.clients.deployment import _poll_till_ready -from rapyuta_io.utils import ObjDict -from rapyuta_io.utils import RestClient -from rapyuta_io.utils import to_objdict -from rapyuta_io.utils.rest_client import HttpMethod -from rapyuta_io.utils.utils import create_auth_header, get_api_response_data -from rapyuta_io.utils.error import InvalidParameterException -from rapyuta_io.utils.partials import PartialMixin -from rapyuta_io.clients.common_models import Limits - -class RoutedNetwork(PartialMixin, ObjDict): - """ - RoutedNetwork represents Routed Network. \n - Variables marked as (full-only) are only available on a full object. Use `refresh()` to convert a - partial object into a full one. - - :ivar name: Name of RoutedNetwork. - :vartype name: str - :ivar guid: GUID - :vartype guid: str - :ivar runtime: Runtime of RoutedNetwork - :vartype runtime: :py:class:`~rapyuta_io.clients.package.Runtime` - :ivar rosDistro: ROSDistro of RoutedNetwork - :vartype rosDistro: :py:class:`~rapyuta_io.clients.package.ROSDistro` - :ivar shared: Whether the network can be shared. - :vartype shared: bool - :ivar parameters: parameters of the routed network - :vartype parameters: :py:class:`~rapyuta_io.clients.routed_network.Parameters` - :ivar phase: Deployment phase - :vartype phase: :py:class:`~rapyuta_io.clients.deployment.DeploymentPhaseConstants` - :ivar status: (full-only) Deployment status - :vartype status: :py:class:`~rapyuta_io.clients.deployment.DeploymentStatusConstants` - :ivar error_code: Deployment errors - :ivar internalDeploymentGUID: guid of the internal deployment - :vartype internalDeploymentGUID: str - :ivar internalDeploymentStatus: Internal deployment status of the routed network. Has attributes: phase, - status (full-only), and errors. - :vartype internalDeploymentStatus: :py:class:`~rapyuta_io.clients.common_models.InternalDeploymentStatus` - :ivar ownerProject: Owner project guid. - :vartype ownerProject: str - :ivar creator: Creator user guid. - :vartype creator: str - :ivar CreatedAt: Date of creation. - :vartype CreatedAt: str - :ivar UpdatedAt: Date of updation. - :vartype UpdatedAt: str - :ivar DeletedAt: Date of deletion. - :vartype DeletedAt: str - """ - - ROUTED_NETWORK_PATH = 'routednetwork' - - def __init__(self, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - - def poll_routed_network_till_ready(self, retry_count=120, sleep_interval=5): - # TODO: implement and use DeploymentPollerMixin. see _poll_till_ready - """ - - Wait for the routed network to be ready - - :param retry_count: Optional parameter to specify the retries. Default value is 120 - :param sleep_interval: Optional parameter to specify the interval between retries. - Default value is 5 Sec. - :return: instance of class :py:class:`~rapyuta_io.clients.common_models.InternalDeploymentStatus`: - :raises: :py:class:`APIError`: If service binding api return an error, the status code is - anything other than 200/201 - :raises: :py:class:`DeploymentNotRunningException`: If the deployment’s state might not - progress due to errors. - :raises: :py:class:`RetriesExhausted`: If number of polling retries exhausted before the - deployment could succeed or fail. - - Following example demonstrates use of poll_routed_network_till_ready: - - >>> from rapyuta_io import Client - >>> from rapyuta_io.utils.error import (DeploymentNotRunningException, - ... RetriesExhausted) - >>> client = Client(auth_token='auth_token', project="project_guid") - >>> routed_network = client.get_routed_network('network-guid') - >>> try: - ... network_status = routed_network.poll_routed_network_till_ready() - ... print network_status - ... except RetriesExhausted as e: - ... print e, 'Retry again?' - ... except DeploymentNotRunningException as e: - ... print e, e.deployment_status - - """ - _poll_till_ready(self, retry_count, sleep_interval) - return self - - def get_status(self): - routed_network = RoutedNetwork(to_objdict(self._get_full_resource())) - internal_deployment_status = routed_network.internalDeploymentStatus - internal_deployment_status.errors = routed_network.get_error_code() - return internal_deployment_status - - def _get_full_resource(self): - url = '{}/{}/{}'.format(self._host, self.ROUTED_NETWORK_PATH, self.guid) - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.GET).headers(headers).execute() - return get_api_response_data(response, parse_full=True) - - def refresh(self): - full_network = self._get_full_resource() - for key, value in six.iteritems(full_network): - setattr(self, key, to_objdict(value)) - self.is_partial = False - - def delete(self): - - """ - Delete the routed network using the routed network object. - - Following example demonstrates how to delete a routed network using routed network object: - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project='project_guid') - >>> routed_network = client.get_routed_network(network_guid='network_guid') - >>> routed_network.delete() - - """ - - url = '{}/{}/{}'.format(self._host, self.ROUTED_NETWORK_PATH, self.guid) - headers = create_auth_header(self._auth_token, self._project) - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute() - get_api_response_data(response, parse_full=True) - self.clear() - return True - - def get_error_code(self): - return self.internalDeploymentStatus.error_code if hasattr(self.internalDeploymentStatus, 'error_code') else [] - - -class Parameters(ObjDict): - """ - Parameters represents Routed Network Parameters - - :ivar limits: Values corresponding to limits of the parameters - :vartype limits: :py:class:`~rapyuta_io.clients.routed_network.RoutedNetworkLimits` - - :param limits: Values corresponding to limits of the parameters - :type limits: :py:class:`~rapyuta_io.clients.routed_network.RoutedNetworkLimits` - """ - - def __init__(self, limits = None): - super(ObjDict, self).__init__(limits=limits) - - diff --git a/rapyuta_io/clients/secret.py b/rapyuta_io/clients/secret.py index 4c45ffd7..38110d75 100644 --- a/rapyuta_io/clients/secret.py +++ b/rapyuta_io/clients/secret.py @@ -1,15 +1,12 @@ from __future__ import absolute_import -import base64 -import json -import six -from abc import ABCMeta, abstractmethod -import re + import enum +import re + +import six -from rapyuta_io.utils import RestClient, InvalidParameterException -from rapyuta_io.utils.object_converter import ObjBase, enum_field -from rapyuta_io.utils.rest_client import HttpMethod -from rapyuta_io.utils.utils import create_auth_header, get_api_response_data +from rapyuta_io.utils import InvalidParameterException +from rapyuta_io.utils.object_converter import ObjBase DOCKER_HUB_REGISTRY = 'https://index.docker.io/v1/' @@ -23,13 +20,10 @@ class Secret(ObjBase): :ivar name: Name of the Secret :ivar guid: GUID of the Secret - :ivar secret_type: Type of the Secret :ivar created_at: Creation Time of the Secret :ivar creator: Create of the Secret :param name: Name of the Secret :type name: str - :param secret_config: Secret Configuration - :type secret_config: Union[:py:class:`~rapyuta_io.clients.secret.SecretConfigDocker`] """ SECRET_PATH = '/api/secret' @@ -52,18 +46,17 @@ def validate(name, secret_config): if not secret_name_regex.match(name): raise InvalidParameterException('name must consist of lower case alphanumeric characters or - and must ' + 'start and end with an alphanumeric character') - if not isinstance(secret_config, _SecretConfigBase): + if not isinstance(secret_config, SecretConfigDocker): raise InvalidParameterException( - 'secret_config must be of type SourceSecretBasicConfig, SourceSecretSSHConfig or DockerSecretConfig') + 'secret_config must be of type DockerSecretConfig') def get_deserialize_map(self): return { - 'created_at': 'CreatedAt', + 'created_at': 'createdAt', 'guid': 'guid', 'name': 'name', - 'creator': 'creator', + 'creator': 'creatorGUID', 'project_guid': 'projectGUID', - 'secret_type': enum_field('type', SecretType), } def get_serialize_map(self): @@ -73,28 +66,6 @@ def get_serialize_map(self): 'data': '_secret_config', } - def delete(self): - - """ - Delete the secret using the secret object. - - Following example demonstrates how to delete a secret using secret object: - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project='project_guid') - >>> secret = client.get_secret(guid='secret-id') - >>> secret.delete() - - """ - - if not (hasattr(self, '_core_api_host') and hasattr(self, '_auth_token')): - raise InvalidParameterException('Secret must be created first') - url = self._core_api_host + self.SECRET_PATH + '/delete' - headers = create_auth_header(self._auth_token, self._project) - payload = {'guid': self.guid} - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute(payload) - get_api_response_data(response, parse_full=True) - class SecretType(str, enum.Enum): """ @@ -102,9 +73,7 @@ class SecretType(str, enum.Enum): SecretType can be any of the following types \n - SecretType.DOCKER \n - SecretType.SOURCE_BASIC_AUTH \n - SecretType.SOURCE_SSH_AUTH \n + SecretType.DOCKER """ def __str__(self): @@ -113,24 +82,7 @@ def __str__(self): DOCKER = 'kubernetes.io/dockercfg' -class _SecretConfigBase(six.with_metaclass(ABCMeta, ObjBase)): - """ - SecretConfigBase is an abstract class that implements that defines abstract methods for all types of SecretConfig - classes. - """ - - @abstractmethod - def get_type(self): - pass - - def get_deserialize_map(self): - pass - - def get_serialize_map(self): - pass - - -class SecretConfigDocker(_SecretConfigBase): +class SecretConfigDocker: """ SecretConfigDocker represents Docker Secret for Docker registries. This type of secrets can be used to access private Docker repositories for either pulling base images or pushing the images from Builds. @@ -166,14 +118,9 @@ def get_type(cls): return SecretType.DOCKER def serialize(self): - config = json.dumps({ - self.registry: { - 'username': self.username, - 'password': self.password, - 'email': self.email, - 'auth': base64.b64encode('{}:{}'.format(self.username, self.password).encode()).decode() - } - }) return { - '.dockercfg': base64.b64encode(config.encode()).decode() + 'username': self.username, + 'password': self.password, + 'email': self.email, + 'registry': self.registry } diff --git a/rapyuta_io/clients/static_route.py b/rapyuta_io/clients/static_route.py deleted file mode 100644 index cf2dc594..00000000 --- a/rapyuta_io/clients/static_route.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import absolute_import -from rapyuta_io.utils import ObjDict -from rapyuta_io.utils.utils import create_auth_header, get_api_response_data -from rapyuta_io.utils.rest_client import HttpMethod -from rapyuta_io.utils import RestClient - - -class StaticRoute(ObjDict): - """ - StaticRoute class represents an instance of a static route. It contains methods to delete static route. - - :ivar CreatedAt: Date of creation - :ivar DeletedAt: Date of deletion - :ivar ID: ID of the static route - :ivar creator: User guid who created the static route - :ivar metadata: Metadata associated with the static route - :ivar projectGUID: GUID of the project the static route is to be created in - :ivar urlPrefix: Prefix/subdomain of the static route - :ivar urlString: Full static route URL - """ - - STATIC_ROUTE_PATH = '/api/staticroute' - - def __init__(self, *args, **kwargs): - super(ObjDict, self).__init__(*args, **kwargs) - - def delete(self): - """ - Delete static route - - :return: True or False - :rtype: bool - - Following example demonstrates how to delete a static route - - >>> from rapyuta_io import Client - >>> client = Client(auth_token='auth_token', project='project_guid') - >>> static_route = client.get_all_static_routes()[0] - >>> result = static_route.delete() - """ - url = self._core_api_host + self.STATIC_ROUTE_PATH + '/delete' - headers = create_auth_header(self._auth_token, self._project) - payload = {"guid": self.guid} - response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute(payload) - response_data = get_api_response_data(response, parse_full=True) - if response_data['success']: - self.clear() - return True - return False diff --git a/rapyuta_io/clients/v2_client.py b/rapyuta_io/clients/v2_client.py new file mode 100644 index 00000000..ca5af704 --- /dev/null +++ b/rapyuta_io/clients/v2_client.py @@ -0,0 +1,68 @@ +from rapyuta_io.clients.project import Project +from rapyuta_io.clients.secret import Secret +from rapyuta_io.utils import prepend_bearer_to_auth_token, RestClient +from rapyuta_io.utils.rest_client import HttpMethod +from rapyuta_io.utils.utils import get_api_response_data, create_auth_header + + +class V2Client: + def __init__(self, auth_token, project, v2_api_host): + self.v2_api_host = v2_api_host + self._auth_token = prepend_bearer_to_auth_token(auth_token) + self._project = project + + def set_project(self, project): + self._project = project + + def create_project(self, project): + url = self.v2_api_host + '/v2/projects/' + serialized_project = project.serialize() + organization_guid = serialized_project['organization']['guid'] + headers = dict(Authorization=self._auth_token, organizationguid=organization_guid) + request = { + 'metadata': { + 'name': serialized_project['name'], + 'organizationGUID': organization_guid + }, + } + response = RestClient(url).method(HttpMethod.POST).headers(headers).execute(request) + data = get_api_response_data(response, parse_full=True) + project = Project.deserialize(data.get('metadata')) + return project + + def delete_project(self, guid): + url = self.v2_api_host + '/v2/projects/' + guid + '/' + headers = dict(Authorization=self._auth_token) + response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute() + data = get_api_response_data(response, parse_full=True) + return data + + def create_secret(self, secret): + url = self.v2_api_host + '/v2/secrets/' + headers = create_auth_header(self._auth_token, self._project) + serialized_secret = secret.serialize() + data = serialized_secret['data'] + request = { + 'metadata': { + 'name': serialized_secret['name'] + }, + 'spec': { + 'docker': { + 'username': data.username, + 'password': data.password, + 'email': data.email, + 'registry': data.registry + } + } + } + response = RestClient(url).method(HttpMethod.POST).headers(headers).execute(request) + data = get_api_response_data(response, parse_full=True) + secret = Secret.deserialize(data.get('metadata')) + return secret + + def delete_secret(self, name): + url = self.v2_api_host + '/v2/secrets/' + name + '/' + headers = create_auth_header(self._auth_token, self._project) + response = RestClient(url).method(HttpMethod.DELETE).headers(headers).execute() + data = get_api_response_data(response, parse_full=True) + return data diff --git a/rapyuta_io/rio_client.py b/rapyuta_io/rio_client.py index 943bc4d1..ae27b90a 100644 --- a/rapyuta_io/rio_client.py +++ b/rapyuta_io/rio_client.py @@ -6,6 +6,8 @@ import six +from rapyuta_io.clients.project import Project +from rapyuta_io.clients.secret import Secret from rapyuta_io.clients import DeviceManagerClient, _ParamserverClient from rapyuta_io.clients.catalog_client import CatalogClient from rapyuta_io.clients.core_api_client import CoreAPIClient @@ -15,6 +17,7 @@ from rapyuta_io.clients.rip_client import AuthTokenLevel, RIPClient from rapyuta_io.clients.rosbag import ROSBagBlob, ROSBagBlobStatus, ROSBagJob, ROSBagJobStatus from rapyuta_io.clients.user_group import UserGroup +from rapyuta_io.clients.v2_client import V2Client from rapyuta_io.utils import InvalidAuthTokenException, \ InvalidParameterException from rapyuta_io.utils.settings import default_host_config @@ -48,6 +51,8 @@ def __init__(self, auth_token, project=None): device_api_host=self._get_api_endpoints('core_api_host')) self._paramserver_client = _ParamserverClient(auth_token, project, self._get_api_endpoints('core_api_host')) + self._v2_client_host = V2Client(auth_token, project, self._get_api_endpoints('v2_api_host')) + @staticmethod def _validate_auth_token(auth_token): if not auth_token: @@ -110,6 +115,89 @@ def set_project(self, project_guid): self._core_api_client.set_project(project_guid) self._dmClient.set_project(project_guid) self._paramserver_client.set_project(project_guid) + self._v2_client_host.set_project(project_guid) + + def create_project(self, project): + """ + Create a new Project + + :param project: Project object + :type project: :py:class:`~rapyuta_io.clients.project.Project` + :rtype: :py:class:`~rapyuta_io.clients.project.Project` + + Following example demonstrates the use of this method for creating a new Project. + + >>> from rapyuta_io.clients.project import Project + >>> client = Client(auth_token='auth_token') + >>> proj = Project('project-name') + >>> client.create_project(proj) + + Following example demonstrates the use of this method for creating a new Project in a different organization. + Please do note that the user should be a part of the organization where the project is to be created. + + >>> from rapyuta_io.clients.project import Project + >>> client = Client(auth_token='auth_token') + >>> proj = Project('project-name', 'org-guid') + >>> client.create_project(proj) + + """ + if not isinstance(project, Project): + raise InvalidParameterException("project must be non-empty and of type " + "rapyuta_io.clients.project.Project") + + return self._v2_client_host.create_project(project) + + def delete_project(self, guid): + """ + Deletes a project on the Platform + + :param name: Project name + :type name: str` + + Following example demonstrates the use of this method + + >>> client = Client(auth_token='auth_token', project='project_guid') + >>> client.delete_secret('secret-name') + + """ + return self._v2_client_host.delete_project(guid) + + def create_secret(self, secret): + """ + Create a new Secret on the Platform under the project. + + :param secret: Secret object + :type secret: :py:class:`~rapyuta_io.clients.secret.Secret` + :rtype: :py:class:`~rapyuta_io.clients.secret.Secret` + + Following example demonstrates the use of this method for creating a new Secret. + + >>> from rapyuta_io.clients.secret import Secret, SecretConfigDocker + >>> client = Client(auth_token='auth_token', project='project_guid') + >>> secret_config = SecretConfigDocker('user', 'password', 'email', 'registry') + >>> secret = Secret('secret-name', secret_config) + >>> client.create_secret(secret) + + """ + if not isinstance(secret, Secret): + raise InvalidParameterException("secret must be non-empty and of type " + "rapyuta_io.clients.secret.Secret") + return self._v2_client_host.create_secret(secret) + + def delete_secret(self, name): + """ + Deletes a Secret on the Platform under the project. + + :param name: Secret name + :type name: str` + + Following example demonstrates the use of this method + + >>> client = Client(auth_token='auth_token', project='project_guid') + >>> client.delete_secret('secret-name') + + """ + return self._v2_client_host.delete_secret(name) def get_authenticated_user(self): """ @@ -231,7 +319,7 @@ def delete_device(self, device_id): Following example demonstrates how to delete a device. - >>> from rapyuta_io import Client, ROSDistro + >>> from rapyuta_io import Client >>> client = Client(auth_token='auth_token', project='project_guid') >>> client.delete_device('device-id') """ @@ -397,9 +485,8 @@ def create_rosbag_job(self, rosbag_job): >>> from rapyuta_io import Client >>> from rapyuta_io.clients.rosbag import ROSBagJob, ROSBagOptions >>> client = Client(auth_token='auth_token', project='project_guid') - >>> deployment = client.get_deployment('deployment_id') >>> rosbag_options = ROSBagOptions(all_topics=True) - >>> rosbag_job = ROSBagJob(deployment_id=deployment.deploymentId, + >>> rosbag_job = ROSBagJob(deployment_id="deployment-id", ... component_instance_id=component_instance_id, ... rosbag_options=rosbag_options, name='name') >>> rosbag_job = client.create_rosbag_job(rosbag_job) diff --git a/rapyuta_io/utils/settings.py b/rapyuta_io/utils/settings.py index 935ca718..d727e00c 100644 --- a/rapyuta_io/utils/settings.py +++ b/rapyuta_io/utils/settings.py @@ -3,7 +3,8 @@ default_host_config = { "core_api_host": "https://gaapiserver.apps.okd4v2.prod.rapyuta.io", "catalog_host": "https://gacatalog.apps.okd4v2.prod.rapyuta.io", - "rip_host": "https://garip.apps.okd4v2.prod.rapyuta.io" + "rip_host": "https://garip.apps.okd4v2.prod.rapyuta.io", + "v2_api_host": "https://api.rapyuta.io" } # Paramserver APIs diff --git a/sdk_test/config.json.example b/sdk_test/config.json.example index 968932c3..9c567aaa 100644 --- a/sdk_test/config.json.example +++ b/sdk_test/config.json.example @@ -1,6 +1,7 @@ { "catalog_host": "https://v14catalog.az39.rapyuta.io", "core_api_host": "https://qaapiserver.az39.rapyuta.io", + "v2_api_host": "https://qaapi.rapyuta.io", "hwil_host": "https://hwil.rapyuta.io", "hwil_user": "USER", "hwil_password": "PASSWORD", @@ -8,21 +9,13 @@ "organization_guid": "org_guid", "devices": [ { - "name": "NAME", - "runtime": "RUNTIME", - "ip": "IP_ADDRESS", - "arch": "ARCHITECTURE", - "distro": "ROS_DISTRIBUTION" + "name": "docker-compose-amd64", + "runtime": "Dockercompose", + "ip": "10.224.4.5", + "arch": "amd64", + "distro": "melodic" } ], - "git": { - "ssh-key": "SSH_KEY" - }, - "docker": { - "username": "DOCKER_USERNAME", - "password": "DOCKER_PASSWORD", - "email": "DOCKER_EMAIL" - }, "test_files": [], "worker_threads": 1 } diff --git a/sdk_test/config.py b/sdk_test/config.py index d8be9335..f790a402 100644 --- a/sdk_test/config.py +++ b/sdk_test/config.py @@ -11,6 +11,7 @@ from rapyuta_io.utils.error import InvalidParameterException from rapyuta_io.utils.utils import create_auth_header, \ prepend_bearer_to_auth_token, generate_random_value +from sdk_test.util import get_logger class _Singleton(type): @@ -62,15 +63,7 @@ class Configuration(six.with_metaclass(_Singleton, object)): "arch": "ARCHITECTURE", "distro": "ROS_DISTRO" } - ], - "git": { - "ssh-key": "SSH_KEY" - }, - "docker": { - "username": "DOCKER_USERNAME", - "password": "DOCKER_PASSWORD", - "email": "DOCKER_EMAIL" - } + ] } """ @@ -92,6 +85,7 @@ def __init__(self, file_path=None): raise InvalidParameterException('test_files must be a list of test file names') self.worker_threads = self._config['worker_threads'] self.organization_guid = self._config.get('organization_guid') + self.logger = get_logger() def validate(self): # if len(self.get_device_configs(arch=DeviceArch.AMD64, runtime='Preinstalled')) != 1: @@ -114,12 +108,13 @@ def create_project(self): name = 'test-{}'.format(generate_random_value(8)) self._project = self.client.create_project( Project(name, organization_guid=self.organization_guid)) + self.logger.info('Created project: {}'.format(name)) self.set_project(self._project.guid) def delete_project(self): if self._project is None: return - self._project.delete() + self.client.delete_project(self._project.guid) def set_project(self, project_guid): self._config['project'] = project_guid @@ -161,7 +156,7 @@ def create_secrets(self): def delete_secrets(self): for secret in self._secrets.values(): - secret.delete() + self.client.delete_secret(secret.name) def get_secret(self, secret_type): """ diff --git a/sdk_test/coreapi/query_metrics_test.py b/sdk_test/coreapi/query_metrics_test.py deleted file mode 100644 index 88cc55fe..00000000 --- a/sdk_test/coreapi/query_metrics_test.py +++ /dev/null @@ -1,94 +0,0 @@ -import pytz -from datetime import datetime, timedelta -from unittest import TestCase - - -from rapyuta_io import DeviceArch -from rapyuta_io.clients.device import SystemMetric, QoS -from rapyuta_io.clients.metrics import QueryMetricsRequest, MetricFunction, MetricOperation, \ - StepInterval, SortOrder, ListMetricsRequest, Entity, ListTagKeysRequest, ListTagValuesRequest -from rapyuta_io.utils.error import ConflictError -from sdk_test.config import Configuration -from sdk_test.util import get_logger -from time import sleep - - -class MetricsTests(TestCase): - DEVICE = None - WAIT_TIME = 120 - - @classmethod - def setUpClass(cls): - config = Configuration() - logger = get_logger() - device = config.get_devices(arch=DeviceArch.AMD64, runtime='Dockercompose')[0] - try: - logger.info('subscribing to metrics') - device.subscribe_metric(SystemMetric.CPU, QoS.LOW) - logger.info('waiting for {} seconds '.format(cls.WAIT_TIME)) - sleep(cls.WAIT_TIME) - except ConflictError: - get_logger().info('metrics is info already subscribed') - cls.DEVICE = device - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - self.to_datetime = datetime.now(pytz.UTC) - self.from_datetime = self.to_datetime - timedelta(days=1) - - @classmethod - def tearDownClass(cls): - cls.DEVICE.unsubscribe_metric(SystemMetric.CPU) - - def assert_column_object_fields(self, columns): - self.assertTrue(self.has_value_for_attribute(columns, 'name')) - self.assertTrue(all(map(lambda x: getattr(x, 'function') if x.name != 'timestamp' else True, - columns))) - self.assertTrue(all(map(lambda x: getattr(x, 'metric_group') if x.name != 'timestamp' else True, - columns))) - for col in columns: - if getattr(col, 'tag_names'): - self.assertTrue(len(col.tag_names) == len(col.tag_values)) - - @staticmethod - def has_value_for_attribute(response, field): - return all(map(lambda x: getattr(x, field), response)) - - def test_query_metrics(self): - metrics = [MetricOperation(MetricFunction.COUNT, 'cpu.usage_system'), - MetricOperation(MetricFunction.PERCENTILE_95, 'cpu.usage_idle')] - query_metrics_request = QueryMetricsRequest(self.from_datetime, self.to_datetime, StepInterval.ONE_MINUTE, - metrics, groupby=['device_id'], sort=SortOrder.DESC) - metrics_response = self.config.client.query_metrics(query_metrics_request) - - self.assert_column_object_fields(metrics_response.columns) - self.assertTrue(len(metrics_response.columns)) - self.assertTrue(len(metrics_response.rows) == len(metrics_response.columns)) - - def test_list_metrics(self): - list_metrics_query = ListMetricsRequest(Entity.PROJECT, self.config._project.guid, - self.from_datetime, self.to_datetime) - metrics = self.config.client.list_metrics(list_metrics_query) - - self.assertTrue(len(metrics)) - self.assertTrue(self.has_value_for_attribute(metrics, 'metric_group')) - self.assertTrue(self.has_value_for_attribute(metrics, 'metric_names')) - - def test_list_tag_keys(self): - list_tag_keys_query = ListTagKeysRequest(Entity.PROJECT, self.config._project.guid, - self.from_datetime, self.to_datetime) - tag_keys = self.config.client.list_tag_keys(list_tag_keys_query) - - self.assertTrue(len(tag_keys)) - self.assertTrue(self.has_value_for_attribute(tag_keys, 'tags')) - self.assertTrue(self.has_value_for_attribute(tag_keys, 'metric_group')) - - def test_list_tag_values(self): - tag = 'cpu' - list_tag_values_query = ListTagValuesRequest(Entity.PROJECT, self.config._project.guid, tag, - self.from_datetime, self.to_datetime) - tag_values = self.config.client.list_tag_values(list_tag_values_query) - - self.assertTrue(len(tag_values)) - self.assertTrue(isinstance(tag_values, list)) diff --git a/sdk_test/coreapi/secret_test.py b/sdk_test/coreapi/secret_test.py deleted file mode 100644 index 79ae3244..00000000 --- a/sdk_test/coreapi/secret_test.py +++ /dev/null @@ -1,40 +0,0 @@ -from __future__ import absolute_import -import unittest - -from rapyuta_io import Secret, SecretConfigDocker -from sdk_test.config import Configuration -from sdk_test.util import get_logger - - -class TestSecret(unittest.TestCase): - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - - def tearDown(self): - if hasattr(self, 'secret'): - self.config.client.delete_secret(self.secret.guid) - - def assertSecret(self, secret): - self.assertIsInstance(secret, Secret) - self.assertIsNotNone(secret.guid) - self.assertIsNotNone(secret.creator) - self.assertIsNotNone(secret.created_at) - self.assertIsNotNone(secret.secret_type) - - - def test_create_secret_docker(self): - self.secret = self.config.client.create_secret(Secret('docker-test', SecretConfigDocker('user','pass', 'email'))) - self.assertSecret(self.secret) - - def test_list_secret_docker(self): - self.secret = self.config.client.create_secret(Secret('docker-test', SecretConfigDocker('user','pass', 'email'))) - secret_list = self.config.client.list_secrets() - secret_list = [s for s in secret_list if s.guid == self.secret.guid] - self.assertEqual(len(secret_list), 1) - - def test_update_secret_source_docker(self): - self.secret = self.config.client.create_secret(Secret('docker-test', SecretConfigDocker('user','pass', 'email'))) - self.secret = self.config.client.update_secret(self.secret.guid, Secret('docker-test', SecretConfigDocker('user1','pass1', 'email1'))) - self.assertSecret(self.secret) - diff --git a/sdk_test/coreapi/usergroup_test.py b/sdk_test/coreapi/usergroup_test.py index 899cc5bc..1934e89d 100644 --- a/sdk_test/coreapi/usergroup_test.py +++ b/sdk_test/coreapi/usergroup_test.py @@ -96,8 +96,3 @@ def test_update_usergroup(self): self.assertEqual(len(self.usergroup.projects), 1) self.assertEqual(len(self.usergroup.admins), 1) self.assertEqual(len(self.usergroup.members), 1) - - - - - diff --git a/sdk_test/coreapi/project_test.py b/sdk_test/coreapi/v2_project_test.py similarity index 50% rename from sdk_test/coreapi/project_test.py rename to sdk_test/coreapi/v2_project_test.py index 8b28be4f..0fecf05e 100644 --- a/sdk_test/coreapi/project_test.py +++ b/sdk_test/coreapi/v2_project_test.py @@ -26,27 +26,6 @@ def test_create_project(self): self.project = self.config.client.create_project(p) self.assertIsInstance(self.project, Project) self.assertIsNotNone(self.project.guid) + self.assertIsNotNone(self.project.name) self.assertIsNotNone(self.project.creator) - self.assertIsNotNone(self.project.created_at) - self.assertIsNotNone(self.project.users) - - def test_list_project(self): - p = Project( - 'test-{}'.format(generate_random_value(5)), - organization_guid=self.config.organization_guid - ) - self.project = self.config.client.create_project(p) - project_list = self.config.client.list_projects() - project_list = [p for p in project_list if p.guid == self.project.guid] - self.assertEqual(len(project_list), 1) - - def test_client_without_project(self): - auth = self.config.get_auth_token() - client = Client(auth) - self.assertRaises( - BadRequestError, - lambda: client.create_secret( - Secret('test-secret', SecretConfigDocker(username='username', password='password', email='test@example.com', - registry='quay.io')) - ) - ) + self.assertIsNotNone(self.project.created_at) \ No newline at end of file diff --git a/sdk_test/coreapi/v2_secret_test.py b/sdk_test/coreapi/v2_secret_test.py new file mode 100644 index 00000000..49751484 --- /dev/null +++ b/sdk_test/coreapi/v2_secret_test.py @@ -0,0 +1,30 @@ +from __future__ import absolute_import +import unittest + +from rapyuta_io import Secret, SecretConfigDocker +from sdk_test.config import Configuration +from sdk_test.util import get_logger + + +class TestSecret(unittest.TestCase): + def setUp(self): + self.config = Configuration() + self.logger = get_logger() + + def tearDown(self): + if hasattr(self, 'secret'): + self.config.client.delete_secret(self.secret.name) + + def assertSecret(self, secret): + self.assertIsInstance(secret, Secret) + self.assertIsNotNone(secret.guid) + self.assertIsNotNone(secret.name) + self.assertIsNotNone(secret.creator) + self.assertIsNotNone(secret.created_at) + + def test_create_secret_docker(self): + self.secret = self.config.client.create_secret( + Secret('docker-test', SecretConfigDocker('user', 'pass', 'email'))) + self.assertSecret(self.secret) + + diff --git a/sdk_test/device/deployment_test.py b/sdk_test/device/deployment_test.py deleted file mode 100644 index 7b16e04f..00000000 --- a/sdk_test/device/deployment_test.py +++ /dev/null @@ -1,66 +0,0 @@ -from __future__ import absolute_import -from sdk_test.config import Configuration -from sdk_test.device.device_test import DeviceTest -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, add_package, delete_package, get_package -from rapyuta_io import DeviceArch -from rapyuta_io.utils import DeploymentRunningException - - -class TestDeployment(DeviceTest, PackageTest): - TALKER_DOCKER_MANIFEST = 'talker-docker.json' - TALKER_DOCKER_PACKAGE = 'test-deployment-talker-docker-pkg' - - @classmethod - def setUpClass(cls): - add_package(cls.TALKER_DOCKER_MANIFEST, cls.TALKER_DOCKER_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.TALKER_DOCKER_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - # Assumption: We only have one amd64 device with Docker runtime. - devices = self.config.get_devices(arch=DeviceArch.AMD64, runtime='Dockercompose') - self.device = devices[0] - - def tearDown(self): - pass - - def assert_get_deployments(self, deployment_id): - self.logger.info('Asserting if some deployment is present') - deployments = self.device.get_deployments() - self.assertGreater(len(deployments), 0) - deployment_exists = False - for deployment in deployments: - if deployment_id == deployment['io_deployment_id']: - deployment_exists = True - break - self.assertTrue(deployment_exists, 'Current deployment should be present') - - def test_get_deployment(self): - self.routed_network = self.create_cloud_routed_network('talker-routed-network') - self.package = get_package(self.TALKER_DOCKER_PACKAGE) - self.provision_config = self.package.get_provision_configuration() - self.provision_config.add_device('default', self.device) - self.provision_config.add_routed_network(self.routed_network) - - self.logger.info('Deploying talker package') - self.talker_deployment = self.deploy_package(self.package, self.provision_config) - self.talker_deployment.poll_deployment_till_ready(sleep_interval=20, retry_count=10) - self.assert_get_deployments(self.talker_deployment.deploymentId) - - self.deprovision_all_deployments([self.talker_deployment]) - self.routed_network.delete() - self.package = None - - def test_device_refresh(self): - partial_device = [d for d in self.config.client.get_all_devices() if d.uuid == self.device.uuid][0] - self.assertTrue(partial_device.is_partial) - with self.assertRaises(AttributeError): - partial_device.host - partial_device.refresh() - self.assertFalse(partial_device.is_partial) - self.assertTrue(partial_device.host) diff --git a/sdk_test/device/topic_test.py b/sdk_test/device/topic_test.py deleted file mode 100644 index 7d6d9c4a..00000000 --- a/sdk_test/device/topic_test.py +++ /dev/null @@ -1,163 +0,0 @@ -from __future__ import absolute_import - -from rapyuta_io import TopicKind, DeviceArch -from rapyuta_io.clients.device import QoS -from rapyuta_io.utils import BadRequestError -from sdk_test.config import Configuration -from sdk_test.device.device_test import DeviceTest -from sdk_test.util import get_logger, start_roscore, stop_roscore -import six - - -class TestTopic(DeviceTest): - - @classmethod - def setUpClass(cls): - config = Configuration() - devices = config.get_devices(arch=DeviceArch.AMD64, runtime="Preinstalled") - start_roscore(devices[0]) - - @classmethod - def tearDownClass(cls): - config = Configuration() - devices = config.get_devices(arch=DeviceArch.AMD64, runtime="Preinstalled") - stop_roscore(devices[0]) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - # Assumption: We only have one amd64 device with Preinstalled runtime. - self.device = self.config.get_devices(arch=DeviceArch.AMD64, runtime="Preinstalled")[0] - - def assert_topic_subscription_status(self, topic, subscription_status): - if subscription_status.get('subscribed_success', None): - self.assertIn(topic, subscription_status.get('subscribed_success'), - 'Topic %s not found on the subscribed list' % topic) - self.logger.info('Topic %s subscribed successfully' % topic) - return - elif subscription_status.get('subscribed_error', None): - error = subscription_status.get('subscribed_error')[0][topic] - self.logger.info('Topic subscription failed due to %s' % error) - - raise AssertionError('Topic subscription failed for the topic: %s' % topic) - - def assert_topic_unsubscription_status(self, topic, unsubscription_status): - if unsubscription_status.get('unsubscribed_success', None): - self.assertIn(topic, unsubscription_status.get('unsubscribed_success'), - 'Topic %s not found on the unsubscribed list' % topic) - self.logger.info('Topic %s unsubscribed successfully' % topic) - return - elif unsubscription_status.get('unsubscribed_error', None): - error = unsubscription_status.get('unsubscribed_error')[0] - self.logger.error('Topic unsubscription failed due to %s' % error) - - raise AssertionError('Topic unsubscription failed for the topic: %s' % topic) - - def assert_topic_status(self, topic_status, topic, kind): - self.logger.info('Asserting subscribed topic is present on the subscription status') - if isinstance(kind, TopicKind): - kind = kind.value - for topic_dict in topic_status.Subscribed[kind.lower()]: - if topic == topic_dict['name']: - self.logger.info('Topic %s is in the subscribed list' % topic) - return - self.logger.error('Topic %s is not found in the subscribed list' % topic) - raise AssertionError('%s topic is not in subscribed list' % topic) - - def subscribe_any_topic(self): - topics = self.device.topics() - self.assertNotEqual(0, len(topics), 'Topics should not be empty') - topic = topics[0] - subscription_status = self.device.subscribe_topic(topic, QoS.MEDIUM.value, TopicKind.LOG) - topic_status = self.device.topic_status() - self.assert_topic_subscription_status(topic, subscription_status) - self.assert_topic_status(topic_status, topic, TopicKind.LOG) - return topic - - def test_topics(self): - self.logger.info('Started device topics test') - self.logger.info('Getting topic lists') - topics = self.device.topics() - self.assertTrue(isinstance(topics, list)) - for topic in topics: - self.assertTrue(isinstance(topic, six.string_types)) - self.logger.info(topics) - - def test_topic_status(self): - self.logger.info('Getting topic status') - topic_status = self.device.topic_status() - self.assertTrue(isinstance(topic_status.Subscribed.metric, list)) - self.assertTrue(isinstance(topic_status.Subscribed.log, list)) - self.assertTrue(isinstance(topic_status.Unsubscribed, list)) - self.logger.info(topic_status) - - def test_subscribe_topic(self): - self.logger.info('Subscribing for a valid topic') - self.subscribe_any_topic() - - def test_subscribe_unknown_topic(self): - self.logger.info('Subscribing for unknown topic') - unknown_topic = '/unknow_topic' - with self.assertRaises(AssertionError): - subscription_status = self.device.subscribe_topic(unknown_topic, QoS.HIGH.value, TopicKind.METRIC) - self.assert_topic_subscription_status(unknown_topic, subscription_status) - - def test_unsubscribe_topic(self): - self.logger.info('Unsubscribing valid topic') - topic_status = self.device.topic_status() - topic = None - if len(topic_status.Subscribed.metric) > 0: - topic = topic_status.Subscribed.metric[0].get('name') - kind = TopicKind.METRIC - elif len(topic_status.Subscribed.log) > 0: - topic = topic_status.Subscribed.log[0]['name'] - kind = TopicKind.LOG - - if not topic: - topic = self.subscribe_any_topic() - kind = TopicKind.LOG - - unsubscription_status = self.device.unsubscribe_topic(topic, kind) - self.assert_topic_unsubscription_status(topic, unsubscription_status) - - def test_unsubscribe_unknown_topic(self): - self.logger.info('Unsubscribing invalid topic') - unknown_topic = '/unknow_topic' - with self.assertRaises(AssertionError): - unsubscription_status = self.device.unsubscribe_topic(unknown_topic, TopicKind.METRIC) - self.assert_topic_unsubscription_status(unknown_topic, unsubscription_status) - - def test_subscribe_topic_with_fields_override(self): - self.logger.info('Subscribing for unknown topic') - unknown_topic = '/unknow_topic' - subscription_status = self.device.subscribe_topic(unknown_topic, QoS.HIGH.value, TopicKind.METRIC, - whitelist_field=['randomfieldoverride'], - fail_on_topic_inexistence=False) - self.assert_topic_subscription_status(unknown_topic, subscription_status) - - def test_subscribe_topic_with_tags_fields_override(self): - self.logger.info('Subscribing for unknown topic') - unknown_topic = '/unknow_topic' - subscription_status = self.device.subscribe_topic(unknown_topic, QoS.HIGH.value, TopicKind.METRIC, - whitelist_field=['randomfieldoverride'], - whitelist_tag=['randomtags'], - fail_on_topic_inexistence=False) - self.assert_topic_subscription_status(unknown_topic, subscription_status) - - def test_subscribe_topic_with_tags_override_error(self): - self.logger.info('Subscribing for unknown topic') - unknown_topic = '/unknow_topic' - self.with_error('', BadRequestError, self.device.subscribe_topic, unknown_topic, QoS.HIGH.value, - TopicKind.METRIC, - whitelist_tag=['randomtags', 1, 2], - whitelist_field=['randomfieldoverride'], - fail_on_topic_inexistence=False) - - def test_subscribe_topic_with_fields_override_error(self): - self.logger.info('Subscribing for unknown topic') - unknown_topic = '/unknow_topic' - self.with_error('', BadRequestError, self.device.subscribe_topic, unknown_topic, QoS.HIGH.value, - TopicKind.METRIC, - whitelist_field=['randomfieldoverride', 1, 2], - whitelist_tag=['randomtags'], - fail_on_topic_inexistence=False) diff --git a/sdk_test/jsons/builds/listener.json b/sdk_test/jsons/builds/listener.json deleted file mode 100644 index d8e3348d..00000000 --- a/sdk_test/jsons/builds/listener.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "buildName": "listener", - "repository": "https://github.com/rapyuta-robotics/io_tutorials.git", - "strategyType": "Source", - "architecture": "amd64", - "isRos": true, - "rosDistro": "melodic", - "contextDir": "talk/listener" -} diff --git a/sdk_test/jsons/builds/pingpong.json b/sdk_test/jsons/builds/pingpong.json deleted file mode 100644 index d2473abd..00000000 --- a/sdk_test/jsons/builds/pingpong.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "buildName": "pingpong", - "secret": "git", - "repository": "ssh://git@bitbucket.org/rapyutians/io_test_scenarios#rapyutaio", - "strategyType": "Source", - "architecture": "amd64", - "isRos": true, - "rosDistro": "melodic", - "buildOptions": { - "catkinOptions": [ - { - "rosPkgs": "pingpong" - } - ] - } -} \ No newline at end of file diff --git a/sdk_test/jsons/builds/talker-noetic.json b/sdk_test/jsons/builds/talker-noetic.json deleted file mode 100644 index d6f8798a..00000000 --- a/sdk_test/jsons/builds/talker-noetic.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "buildName": "talker-noetic", - "repository": "https://github.com/rapyuta-robotics/io_tutorials.git", - "branch": "master", - "strategyType": "Source", - "architecture": "amd64", - "isRos": true, - "rosDistro": "noetic", - "contextDir": "talk/talker3" -} diff --git a/sdk_test/jsons/builds/talker.json b/sdk_test/jsons/builds/talker.json deleted file mode 100644 index 43444f2a..00000000 --- a/sdk_test/jsons/builds/talker.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "buildName": "talker", - "repository": "https://github.com/rapyuta-robotics/io_tutorials.git", - "strategyType": "Source", - "architecture": "amd64", - "isRos": true, - "rosDistro": "melodic", - "contextDir": "talk/talker" -} diff --git a/sdk_test/jsons/builds/throttle-latch-build.json b/sdk_test/jsons/builds/throttle-latch-build.json deleted file mode 100644 index 275a5529..00000000 --- a/sdk_test/jsons/builds/throttle-latch-build.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "branch": "master", - "buildName": "throttle-latch-build", - "strategyType": "Source", - "contextDir": "talk/throttle_latch", - "repository": "https://github.com/rapyuta-robotics/io_tutorials", - "architecture": "amd64", - "isRos": true, - "triggerName": "", - "tagName": "", - "rosDistro": "melodic", - "simulationOptions": { - "simulation": false - } -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/cloud-non-ros.json b/sdk_test/jsons/packages/cloud-non-ros.json deleted file mode 100644 index 86198652..00000000 --- a/sdk_test/jsons/packages/cloud-non-ros.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "name": "cloud-non-ros", - "packageVersion": "v1.0.0", - "description": "cloud-non-ros sdk test package", - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [ - { - "name": "ep1", - "exposeExternally": true, - "port": 443, - "targetPort": 5000, - "proto": "HTTPS" - } - ] - }, - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": false - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "exec", - "cmd": [], - "docker": "hitesh99/simpleflask:v1" - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/cloud-transform.json b/sdk_test/jsons/packages/cloud-transform.json deleted file mode 100644 index c18a13dc..00000000 --- a/sdk_test/jsons/packages/cloud-transform.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "name": "cloud-transform", - "packageVersion": "v1.0.0", - "description": "cloud-transform sdk test package", - "plans": [ - { - "name": "Plan1", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [] - }, - "ros": { - "topics": [ - { - "name": "telemetry_decorated", - "qos": "max", - "scoped": "False", - "targeted": "False" - } - ], - "services": [], - "actions": [], - "isROS": true - }, - "requiredRuntime": "cloud", - "executables": [ - { - "name": "CloudTransfExec", - "gitExecutable": { - "repository": "https://github.com/bhuvanchandra/ros_string_decorator_py.git", - "strategyType": "Source", - "dockerFilePath": "", - "contextDir": "" - }, - "cmd": [ - "roslaunch string_decorator string_decorator.launch" - ] - } - ], - "parameters": [], - "architecture": "amd64" - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/delete-package-using-client.json b/sdk_test/jsons/packages/delete-package-using-client.json deleted file mode 100644 index c139df03..00000000 --- a/sdk_test/jsons/packages/delete-package-using-client.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "name": "delete-package-using-client", - "packageVersion": "v1.0.0", - "description": "cloud-non-ros sdk test package", - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [ - { - "name": "ep1", - "exposeExternally": true, - "port": 443, - "targetPort": 5000, - "proto": "HTTPS" - } - ] - }, - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": false - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "exec", - "cmd": [], - "docker": "hitesh99/simpleflask:v1" - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/delete-package.json b/sdk_test/jsons/packages/delete-package.json deleted file mode 100644 index ac5f55e0..00000000 --- a/sdk_test/jsons/packages/delete-package.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "name": "delete-package", - "packageVersion": "v1.0.0", - "description": "cloud-non-ros sdk test package", - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [ - { - "name": "ep1", - "exposeExternally": true, - "port": 443, - "targetPort": 5000, - "proto": "HTTPS" - } - ] - }, - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": false - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "exec", - "cmd": [], - "docker": "hitesh99/simpleflask:v1" - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/device-volume.json b/sdk_test/jsons/packages/device-volume.json deleted file mode 100644 index e512f9d5..00000000 --- a/sdk_test/jsons/packages/device-volume.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "device-volume", - "packageVersion": "v1.0.0", - "description": "device-volume sdk test package", - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": false - }, - "requiredRuntime": "device", - "architecture": "amd64", - "executables": [ - { - "name": "nginx", - "simulationOptions": { - "simulation": false - }, - "cmd": [ ], - "docker": "nginx:alpine" - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/fast-talker-device-docker-with-rosbags.json b/sdk_test/jsons/packages/fast-talker-device-docker-with-rosbags.json deleted file mode 100644 index b500e3fa..00000000 --- a/sdk_test/jsons/packages/fast-talker-device-docker-with-rosbags.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "name": "test-package", - "packageVersion": "v1.0.0", - "description": "", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": { - - }, - "singleton": false, - "components": [ - { - "name": "talker-fast-device", - "description": "", - "ros": { - "topics": [ - - ], - "services": [ - - ], - "actions": [ - - ], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "device", - "restart_policy": "always", - "architecture": "amd64", - "executables": [ - { - "name": "talker", - "simulationOptions": { - "simulation": false - }, - "cmd": [ - "roslaunch talker talker.launch" - ] - } - ], - "parameters": [ - { - "default": "100000", - "name": "RATE", - "description": "" - } - ], - "rosBagJobDefs": [ - { - "name": "continuous_upload_type", - "recordOptions": { - "allTopics": true, - "maxSplits": 5, - "maxSplitSize": 10 - }, - "uploadOptions": { - "uploadType": "Continuous", - "maxUploadRate": 5242880, - "purgeAfter": false - } - } - ] - } - ], - "includePackages": [ - - ], - "dependentDeployments": [ - - ], - "inboundROSInterfaces": { - "anyIncomingScopedOrTargetedRosConfig": false - }, - "exposedParameters": [ - - ] - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/inbound-incoming-scoped-targeted.json b/sdk_test/jsons/packages/inbound-incoming-scoped-targeted.json deleted file mode 100644 index 28898da7..00000000 --- a/sdk_test/jsons/packages/inbound-incoming-scoped-targeted.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "apiVersion": "2.0.0", - "name": "inbound-incoming-scoped-targeted", - "packageVersion": "v1.0.0", - "description": "Package contains single component", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "listener", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [] - }, - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "ListenerExec", - "simulationOptions": { - "simulation": false - }, - "gitExecutable": { - "repository": "https://github.com/rapyuta/io_tutorials", - "strategyType": "Source", - "dockerFilePath": "", - "contextDir": "" - }, - "buildOptions": { - "catkinOptions": [] - }, - "cmd": [ - "roslaunch listener listener.launch" - ] - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "inboundROSInterfaces": { - "anyIncomingScopedOrTargetedRosConfig": true - }, - "exposedParameters": [] - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/latching-pkg.json b/sdk_test/jsons/packages/latching-pkg.json deleted file mode 100644 index ea1620a0..00000000 --- a/sdk_test/jsons/packages/latching-pkg.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "name": "latching-pkg", - "packageVersion": "v1.0.0", - "description": "", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": { - - }, - "singleton": false, - "components": [ - { - "name": "latching-component", - "description": "", - "ros": { - "topics": [ - - ], - "services": [ - - ], - "actions": [ - - ], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "device", - "restart_policy": "always", - "architecture": "amd64", - "executables": [ - { - "name": "latching-executable", - "simulationOptions": { - "simulation": false - }, - "cmd": [ - "roslaunch latching latch.launch" - ] - } - ], - "parameters": [ - - ], - "rosBagJobDefs": [ - - ] - } - ], - "includePackages": [ - - ], - "dependentDeployments": [ - - ], - "inboundROSInterfaces": { - "anyIncomingScopedOrTargetedRosConfig": false - }, - "exposedParameters": [ - - ] - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/listener-docker.json b/sdk_test/jsons/packages/listener-docker.json deleted file mode 100644 index 5c0f9801..00000000 --- a/sdk_test/jsons/packages/listener-docker.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "listener-docker", - "packageVersion": "v1.0.0", - "description": "listener-docker sdk test package", - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "device", - "architecture": "amd64", - "executables": [ - { - "name": "listenerExec", - "gitExecutable": { - "repository": "https://github.com/bhuvanchandra/listener_py.git", - "strategyType": "Source", - "dockerFilePath": "", - "contextDir": "" - }, - "cmd": [ - "roslaunch listener listener.launch" - ] - } - ], - "parameters": [ - { - "default": "/telemetry_decorated", - "name": "topic_name", - "description": "config param.." - } - ] - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/listener.json b/sdk_test/jsons/packages/listener.json deleted file mode 100644 index 0eae6e78..00000000 --- a/sdk_test/jsons/packages/listener.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "name": "listener", - "packageVersion": "v1.0.0", - "description": "listener amd64 sdk test package", - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "device", - "architecture": "amd64", - "executables": [ - { - "name": "listenerExec", - "cmd": [ - "roslaunch listener listener.launch" - ] - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/nginx-multi-component.json b/sdk_test/jsons/packages/nginx-multi-component.json deleted file mode 100644 index e310f7c1..00000000 --- a/sdk_test/jsons/packages/nginx-multi-component.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "name": "nginx-multi-component", - "packageVersion": "v1.0.0", - "description": "", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": { }, - "singleton": false, - "components": [ - { - "name": "nginx2", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [ - { - "name": "test2", - "exposeExternally": true, - "port": 443, - "targetPort": 80, - "proto": "HTTPS" - } - ] - }, - "ros": { - "topics": [ ], - "services": [ ], - "actions": [ ], - "isROS": false - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "nginx2", - "simulationOptions": { - "simulation": false - }, - "cmd": [ ], - "docker": "nginx:alpine" - } - ], - "parameters": [ ] - }, - { - "name": "nginx", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [ - { - "name": "test", - "exposeExternally": true, - "port": 443, - "targetPort": 80, - "proto": "HTTPS" - } - ] - }, - "ros": { - "topics": [ ], - "services": [ ], - "actions": [ ], - "isROS": false - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "nginx2", - "simulationOptions": { - "simulation": false - }, - "cmd": [ ], - "docker": "nginx:alpine" - } - ], - "parameters": [ ] - } - ], - "dependentDeployments": [ ], - "inboundROSInterfaces": { - "topics": [ ], - "services": [ ], - "actions": [ ] - }, - "exposedParameters": [ ] - } - ] -} diff --git a/sdk_test/jsons/packages/nginx-single-component.json b/sdk_test/jsons/packages/nginx-single-component.json deleted file mode 100644 index fccb634e..00000000 --- a/sdk_test/jsons/packages/nginx-single-component.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "plans": [ - { - "singleton": false, - "name": "default", - "inboundROSInterfaces": { - "topics": [ ], - "services": [ ], - "actions": [ ] - }, - "dependentDeployments": [ ], - "components": [ - { - "executables": [ - { - "name": "nginx", - "simulationOptions": { - "simulation": false - }, - "cmd": [ ], - "docker": "nginx:alpine" - } - ], - "cloudInfra": { - "endpoints": [ - { - "targetPort": 80, - "proto": "HTTPS", - "exposeExternally": true, - "name": "test", - "port": 443 - } - ], - "replicas": 1 - }, - "name": "nginx", - "parameters": [ ], - "architecture": "amd64", - "requiredRuntime": "cloud", - "ros": { - "services": [ ], - "topics": [ ], - "isROS": false, - "actions": [ ] - }, - "description": "" - } - ], - "exposedParameters": [ ], - "metadata": { } - } - ], - "description": "", - "bindable": true, - "packageVersion": "v1.0.0", - "name": "nginx-single-component" -} diff --git a/sdk_test/jsons/packages/no-scoped-targeted.json b/sdk_test/jsons/packages/no-scoped-targeted.json deleted file mode 100644 index 42be2922..00000000 --- a/sdk_test/jsons/packages/no-scoped-targeted.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "name": "no-scoped-targeted", - "packageVersion": "v1.0.0", - "description": "no targeted or scoped sdk test package", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [] - }, - "ros": { - "topics": [ - { - "name": "topic_B", - "qos": "low", - "compression": "snappy", - "scoped": false, - "targeted": false - }, - { - "name": "topic_A", - "qos": "low", - "compression": "", - "scoped": false, - "targeted": false - } - ], - "services": [ - { - "name": "srv_B", - "compression": "snappy", - "scoped": false - }, - { - "name": "srv_A", - "compression": "", - "scoped": false - } - ], - "actions": [ - { - "name": "actionA", - "compression": "", - "scoped": false - } - ], - "isROS": true - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "docker_exec", - "cmd": [ - "/bin/bash", - "-c", - "sleep 10000" - ], - "docker": "ubuntu" - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - }, - "exposedParameters": [] - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/pv-reader.json b/sdk_test/jsons/packages/pv-reader.json deleted file mode 100644 index 932dc76b..00000000 --- a/sdk_test/jsons/packages/pv-reader.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "name": "pv-reader", - "packageVersion": "v1.0.0", - "description": "pv-reader sdk test package", - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [] - }, - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": false - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "CompReaderExec", - "gitExecutable": { - "repository": "https://github.com/hiteshsethi/go-reader-writer", - "strategyType": "Docker", - "dockerFilePath": "Dockerfile.reader", - "contextDir": "" - }, - "cmd": [] - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/rosbag-talker-cloud.json b/sdk_test/jsons/packages/rosbag-talker-cloud.json deleted file mode 100644 index f83ede78..00000000 --- a/sdk_test/jsons/packages/rosbag-talker-cloud.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "name": "test-rosbag-talker-cloud-pkg", - "packageVersion": "v1.0.0", - "description": "", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "talker-cloud", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [] - }, - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "talker", - "simulationOptions": { - "simulation": false - }, - "cmd": [ - "roslaunch talker talker.launch" - ] - } - ], - "parameters": [], - "rosBagJobDefs": [ - { - "name":"test-rosbag-defs", - "recordOptions":{ - "topics":[ - "/telemetry" - ] - } - } - ] - } - ], - "dependentDeployments": [], - "inboundROSInterfaces": { - "anyIncomingScopedOrTargetedRosConfig": false - }, - "exposedParameters": [] - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/scoped-cloud.json b/sdk_test/jsons/packages/scoped-cloud.json deleted file mode 100644 index 1816491c..00000000 --- a/sdk_test/jsons/packages/scoped-cloud.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "scoped-cloud", - "packageVersion": "v1.0.0", - "description": "scoped sdk test package", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "cloudping", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [] - }, - "ros": { - "topics": [ - { - "name": "ping", - "qos": "low", - "compression": "", - "scoped": true, - "targeted": false - } - ], - "services": [], - "actions": [], - "isROS": true - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "cloudy", - "cmd": [] - } - ], - "parameters": [ - { - "default": "pingpong", - "name": "ROS_PKG", - "description": "" - }, - { - "default": "pingst.launch", - "name": "ROS_LAUNCH_FILE", - "description": "" - } - ] - } - ], - "includePackages": [], - "dependentDeployments": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - }, - "exposedParameters": [] - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/scoped-targeted.json b/sdk_test/jsons/packages/scoped-targeted.json deleted file mode 100644 index 84412bb2..00000000 --- a/sdk_test/jsons/packages/scoped-targeted.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "name": "scoped-targeted", - "packageVersion": "v1.0.0", - "description": "scoped and targeted sdk test package", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "cloudping", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [] - }, - "ros": { - "topics": [ - { - "name": "ping", - "qos": "low", - "compression": "", - "scoped": true, - "targeted": false - } - ], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "cloudy", - "cmd": [] - } - ], - "parameters": [ - { - "default": "pingpong", - "name": "ROS_PKG", - "description": "" - }, - { - "default": "pingst.launch", - "name": "ROS_LAUNCH_FILE", - "description": "" - } - ] - }, - { - "name": "devicepong", - "description": "", - "ros": { - "topics": [ - { - "name": "pong", - "qos": "low", - "compression": "", - "scoped": false, - "targeted": true - } - ], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "device", - "architecture": "amd64", - "executables": [ - { - "name": "divvy", - "cmd": [ - "roslaunch pingpong pongst.launch" - ] - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - }, - "exposedParameters": [] - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/talker-cloud-device.json b/sdk_test/jsons/packages/talker-cloud-device.json deleted file mode 100644 index 1170c4e9..00000000 --- a/sdk_test/jsons/packages/talker-cloud-device.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "name": "talker-cloud-device", - "packageVersion": "v1.0.0", - "description": "", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "talker-device", - "description": "", - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "device", - "restart_policy": "no", - "architecture": "amd64", - "executables": [ - { - "name": "talker", - "simulationOptions": { - "simulation": false - }, - "cmd": [ - "roslaunch talker talker.launch" - ] - } - ], - "parameters": [], - "rosBagJobDefs": [] - }, - { - "name": "talker-cloud", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [] - }, - "ros": { - "topics": [], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "talker", - "simulationOptions": { - "simulation": false - }, - "cmd": [ - "roslaunch talker talker.launch" - ] - } - ], - "parameters": [], - "rosBagJobDefs": [] - } - ], - "dependentDeployments": [], - "inboundROSInterfaces": { - "anyIncomingScopedOrTargetedRosConfig": false - }, - "exposedParameters": [] - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/talker-cloud.json b/sdk_test/jsons/packages/talker-cloud.json deleted file mode 100644 index d4cfe9fc..00000000 --- a/sdk_test/jsons/packages/talker-cloud.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "apiVersion": "2.0.0", - "name": "talker-cloud", - "packageVersion": "v1.0.0", - "description": "Package contains single component", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "talker", - "description": "", - "cloudInfra": { - "replicas": 1, - "endpoints": [] - }, - "ros": { - "topics": [ - { - "name": "/telemetry", - "qos": "low", - "compression": "", - "scoped": false, - "targeted": true - } - ], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "TalkerExec", - "simulationOptions": { - "simulation": false - }, - "gitExecutable": { - "repository": "https://github.com/rapyuta/io_tutorials", - "strategyType": "Source", - "dockerFilePath": "", - "contextDir": "" - }, - "buildOptions": { - "catkinOptions": [] - }, - "cmd": [ - "rostopic pub -r 10 /listener/telemetry std_msgs/String rapyuta" - ] - } - ], - "parameters": [] - } - ], - "dependentDeployments": [], - "inboundROSInterfaces": { - "anyIncomingScopedOrTargetedRosConfig": false - }, - "exposedParameters": [] - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/talker-docker.json b/sdk_test/jsons/packages/talker-docker.json deleted file mode 100644 index b2da872b..00000000 --- a/sdk_test/jsons/packages/talker-docker.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "name": "talker-docker", - "packageVersion": "v1.0.0", - "description": "talker-docker sdk test package", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "ros": { - "topics": [ - { - "name": "/telemetry", - "qos": "med", - "scoped": "False", - "targeted": "False" - } - ], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "device", - "restart_policy": "no", - "architecture": "amd64", - "executables": [ - { - "name": "talkerExec", - "simulationOptions": { - "simulation": false - }, - "cmd": [ - "roslaunch talker talker.launch" - ], - "docker": "quay.io/rapyuta/io_tutorials:latest" - } - ], - "parameters": [], - "rosBagJobDefs": [] - } - ], - "includePackages": [], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/talker-noetic.json b/sdk_test/jsons/packages/talker-noetic.json deleted file mode 100644 index 3d7b58c7..00000000 --- a/sdk_test/jsons/packages/talker-noetic.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "name": "talker noetic", - "packageVersion": "v1.0.0", - "description": "noetic talker test package", - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "talker", - "description": "", - "ros": { - "topics": [ - { - "name": "telemetry", - "qos": "low", - "scoped": false, - "targeted": false - } - ], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "noetic" - }, - "requiredRuntime": "cloud", - "architecture": "amd64", - "executables": [ - { - "name": "talkerExec", - "cmd": [ - "roslaunch talker3 talker.launch" - ] - } - ], - "parameters": [ - { - "default": "telemetry", - "name": "topic_name", - "description": "" - } - ] - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/talker.json b/sdk_test/jsons/packages/talker.json deleted file mode 100644 index 62695edb..00000000 --- a/sdk_test/jsons/packages/talker.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "name": "talker", - "packageVersion": "v1.0.0", - "description": "talker amd64 sdk test package", - "plans": [ - { - "name": "default", - "metadata": {}, - "singleton": false, - "components": [ - { - "name": "default", - "description": "", - "ros": { - "topics": [ - { - "name": "telemetry", - "qos": "low", - "scoped": false, - "targeted": false - } - ], - "services": [], - "actions": [], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "device", - "architecture": "amd64", - "executables": [ - { - "name": "talkerExec", - "cmd": [ - "roslaunch talker talker.launch" - ] - } - ], - "parameters": [ - { - "default": "telemetry", - "name": "topic_name", - "description": "" - } - ] - } - ], - "dependentDeployments": [], - "exposedParameters": [], - "inboundROSInterfaces": { - "topics": [], - "services": [], - "actions": [] - } - } - ] -} \ No newline at end of file diff --git a/sdk_test/jsons/packages/throttling-pkg.json b/sdk_test/jsons/packages/throttling-pkg.json deleted file mode 100644 index 6bde1c22..00000000 --- a/sdk_test/jsons/packages/throttling-pkg.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "name": "throttling-pkg", - "packageVersion": "v1.0.0", - "description": "", - "bindable": true, - "plans": [ - { - "name": "default", - "metadata": { - - }, - "singleton": false, - "components": [ - { - "name": "throttling-component", - "description": "", - "ros": { - "topics": [ - - ], - "services": [ - - ], - "actions": [ - - ], - "isROS": true, - "ros_distro": "melodic" - }, - "requiredRuntime": "device", - "restart_policy": "always", - "architecture": "amd64", - "executables": [ - { - "name": "throttling-executable", - "simulationOptions": { - "simulation": false - }, - "cmd": [ - "roslaunch throttling throttle.launch" - ] - } - ], - "parameters": [ - - ], - "rosBagJobDefs": [ - - ] - } - ], - "includePackages": [ - - ], - "dependentDeployments": [ - - ], - "inboundROSInterfaces": { - "anyIncomingScopedOrTargetedRosConfig": false - }, - "exposedParameters": [ - - ] - } - ] -} \ No newline at end of file diff --git a/sdk_test/openshift/sdk-config.sample.yaml b/sdk_test/openshift/sdk-config.sample.yaml index 8baad6f0..9a70d1c8 100644 --- a/sdk_test/openshift/sdk-config.sample.yaml +++ b/sdk_test/openshift/sdk-config.sample.yaml @@ -8,25 +8,13 @@ data: { "catalog_host": "https://qacatalog.az39.rapyuta.io", "core_api_host": "https://qaapiserver.az39.rapyuta.io", + "v2_api_host": "https://qaapi.rapyuta.io", "hwil_host": "https://hwil.rapyuta.io", "hwil_user": "ansible", "hwil_password": "HWIL_PASSWORD", - "auth_token": "AUTH_TOKEN", + "auth_token": "Bearer AUTH_TOKEN", + "organization_guid": "org_guid", "devices": [ - { - "name": "supervisord-arm32", - "runtime": "Preinstalled", - "ip": "10.91.1.14", - "arch": "arm32v7", - "distro": "kinetic" - }, - { - "name": "docker-compose-arm32", - "runtime": "Dockercompose", - "ip": "10.91.1.15", - "arch": "arm32v7", - "distro": "kinetic" - }, { "name": "docker-compose-amd64", "runtime": "Dockercompose", @@ -35,7 +23,4 @@ data: "distro": "melodic" } ], - "git": { - "ssh-key": "SSH_KEY" - } } diff --git a/sdk_test/package/__init__.py b/sdk_test/package/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/sdk_test/package/cloud_non_ros_test.py b/sdk_test/package/cloud_non_ros_test.py deleted file mode 100644 index 1386529b..00000000 --- a/sdk_test/package/cloud_non_ros_test.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import absolute_import -import json - -from rapyuta_io.utils import RestClient -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, add_package, delete_package, get_package - - -class TestCloudNonRosWithEndpoint(PackageTest): - ENDPOINT_NAME = 'ep1' - - CLOUD_NON_ROS_MANIFEST = 'cloud-non-ros.json' - CLOUD_NON_ROS_PACKAGE = 'test-cloud-non-ros-with-endpoint' - - @classmethod - def setUpClass(cls): - add_package(cls.CLOUD_NON_ROS_MANIFEST, cls.CLOUD_NON_ROS_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.CLOUD_NON_ROS_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - self.package = get_package(self.CLOUD_NON_ROS_PACKAGE) - self.provision_config = self.package.get_provision_configuration() - self.cnr_deployment = None - - def tearDown(self): - self.deprovision_all_deployments([self.cnr_deployment]) - - def validate_service_endpoint(self, endpoint_name, endpoint): - self.logger.info('Checking the status of endpoint(%s) - %s' % (endpoint_name, endpoint)) - self.assert_endpoint_url(endpoint) - self.logger.info('%s: Valid endpoint url' % endpoint) - endpoint = endpoint + '/status' - response = RestClient(endpoint).execute() - self.assertEqual(response.status_code, 200) - self.assertEqual(json.loads(response.text)['status'], 'running') - self.logger.info('Endpoint %s returns status running' % endpoint) - - def test_cloud_non_ros(self): - self.logger.info('Testing cloud non ros with endpoint and replicas') - self.cnr_deployment = self.deploy_package(self.package, self.provision_config) - self.cnr_deployment.poll_deployment_till_ready() - - binding_obj = self.get_service_binding(self.cnr_deployment) - - for internal_component in self.package.plans[0].internalComponents: - component_id = internal_component.componentId - component = binding_obj.credentials.components.get(component_id) - if component: - self.logger.info('Getting network endpoints from service binding') - network_endpoints = component.networkEndpoints - self.logger.info('Fetching the status of "%s" endpoint' % self.ENDPOINT_NAME) - endpoint = network_endpoints.get(self.ENDPOINT_NAME) - self.validate_service_endpoint(self.ENDPOINT_NAME, endpoint) - - self.validate_package_refresh() - self.validate_deployment_refresh() - - def validate_package_refresh(self): - partial_package = [p for p in self.config.client.get_all_packages() if p.packageId == self.package.packageId][0] - self.assertTrue(partial_package.is_partial) - with self.assertRaises(AttributeError): - partial_package.ownerProject - partial_package.refresh() - self.assertFalse(partial_package.is_partial) - self.assertTrue(partial_package.ownerProject) - - def validate_deployment_refresh(self): - partial_deployment = [d for d in self.config.client.get_all_deployments() - if d.deploymentId == self.cnr_deployment.deploymentId][0] - self.assertTrue(partial_deployment.is_partial) - with self.assertRaises(AttributeError): - partial_deployment.parameters - partial_deployment.refresh() - self.assertFalse(partial_deployment.is_partial) - self.assertTrue(partial_deployment.parameters) diff --git a/sdk_test/package/cloud_scoped_targeted_test.py b/sdk_test/package/cloud_scoped_targeted_test.py deleted file mode 100644 index 075b0a70..00000000 --- a/sdk_test/package/cloud_scoped_targeted_test.py +++ /dev/null @@ -1,114 +0,0 @@ -# encoding: utf-8 - -from __future__ import absolute_import - -from rapyuta_io import DeviceArch -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, start_roscore, stop_roscore, add_package, delete_package, \ - get_package, add_cloud_native_network, add_cloud_routed_network, delete_native_network, delete_routed_network, \ - get_routed_network, get_native_network - - -class TestScopedTargeted(PackageTest): - ST_DEV_COMP = "devicepong" - - SCOPED_TARGETED_PACKAGE = 'test-scoped-targeted' - SCOPED_TARGETED_MANIFEST = 'scoped-targeted.json' - - SCOPED_CLOUD_PACKAGE = 'test-scoped-cloud' - SCOPED_CLOUD_MANIFEST = 'scoped-cloud.json' - - NO_SCOPED_TARGETED_PACKAGE = 'test-no-scoped-targeted' - NO_SCOPED_TARGETED_MANIFEST = 'no-scoped-targeted.json' - - @classmethod - def setUpClass(cls): - add_package(cls.SCOPED_TARGETED_MANIFEST, cls.SCOPED_TARGETED_PACKAGE - , build_map={ - 'cloudping': {'cloudy': ('pingpong-build', 'pingpong.json')} - }) - add_package(cls.SCOPED_CLOUD_MANIFEST, cls.SCOPED_CLOUD_PACKAGE, - build_map={ - 'cloudping': {'cloudy': ('pingpong-build', 'pingpong.json')} - }) - add_package(cls.NO_SCOPED_TARGETED_MANIFEST, cls.NO_SCOPED_TARGETED_PACKAGE) - config = Configuration() - devices = config.get_devices(arch=DeviceArch.AMD64, runtime="Preinstalled") - start_roscore(devices[0]) - add_cloud_native_network('cloud_scoped_targeted_native_network') - add_cloud_routed_network('cloud_scoped_targeted_routed_network') - - @classmethod - def tearDownClass(cls): - config = Configuration() - devices = config.get_devices(arch=DeviceArch.AMD64, runtime="Preinstalled") - stop_roscore(devices[0]) - delete_package(cls.SCOPED_TARGETED_PACKAGE, False) - delete_package(cls.SCOPED_CLOUD_PACKAGE) - delete_package(cls.NO_SCOPED_TARGETED_PACKAGE) - delete_native_network('cloud_scoped_targeted_native_network') - delete_routed_network('cloud_scoped_targeted_routed_network') - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - devices = self.config.get_devices(arch=DeviceArch.AMD64, runtime="Preinstalled") - self.device = devices[0] - self.deployments = [] - self.non_st_dep_deployment = None - self.routed_network = get_routed_network('cloud_scoped_targeted_routed_network') - self.native_network = get_native_network('cloud_scoped_targeted_native_network') - - def tearDown(self): - self.deprovision_all_deployments(self.deployments) - - def test_scoped_targeted_package(self): - package = get_package(self.SCOPED_TARGETED_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_device(self.ST_DEV_COMP, self.device, ignore_device_config=['network_interface']) - provision_config.add_routed_networks([self.routed_network]) - deployment = self.deploy_package(package, provision_config) - deployment.poll_deployment_till_ready() - self.assert_deployment_status(deployment) - self.deployments.append(deployment) - - def test_scoped_cloud_package_ros_namespace(self): - package = get_package(self.SCOPED_CLOUD_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_native_network(self.native_network) - provision_config.set_component_alias("cloudping", "cloudping", True) - deployment = self.deploy_package(package, provision_config) - deployment.poll_deployment_till_ready() - comp_id = deployment.componentInfo[0].componentID - self.assert_deployment_status_st_check(deployment, comp_id, "cloudping", True) - self.assert_deployment_status(deployment) - self.deployments.append(deployment) - - def test_scoped_targeted_package_non_alias_depends(self): - package = get_package(self.SCOPED_TARGETED_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_device(self.ST_DEV_COMP, self.device, ignore_device_config=['network_interface']) - provision_config.add_routed_networks([self.routed_network]) - comp_id = provision_config.plan.get_component_id(self.ST_DEV_COMP) - st_deployment = self.deploy_package(package, provision_config) - st_deployment.poll_deployment_till_ready() - self.deployments.append(st_deployment) - self.assert_deployment_status(st_deployment) - self.assert_deployment_status_st_check(st_deployment, comp_id, self.device.name, False) - package = get_package(self.NO_SCOPED_TARGETED_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_dependent_deployment(st_deployment) - provision_config.add_routed_network(self.routed_network) - non_st_dep_deployment = self.deploy_package(package, provision_config) - self.assert_dependent_deployment(non_st_dep_deployment, [st_deployment]) - non_st_dep_deployment.poll_deployment_till_ready() - self.assert_deployment_status(non_st_dep_deployment) - self.deployments.append(non_st_dep_deployment) - - def assert_deployment_status_st_check(self, deployment, component, alias, set_ros_namespace): - componentobj = getattr(deployment.parameters, component) - self.assertEqual(componentobj.bridge_params.alias, alias) - self.assertEqual(componentobj.bridge_params.setROSNamespace, set_ros_namespace) - self.logger.info('Deployment %s(%s) started successfully' % (deployment.name, - deployment.packageId)) diff --git a/sdk_test/package/cloud_transform_test.py b/sdk_test/package/cloud_transform_test.py deleted file mode 100644 index c6659148..00000000 --- a/sdk_test/package/cloud_transform_test.py +++ /dev/null @@ -1,99 +0,0 @@ -# encoding: utf-8 - -from __future__ import absolute_import -from rapyuta_io import DeviceArch -from rapyuta_io.clients.package import RestartPolicy -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, get_package, add_package, delete_package -import six - - -class TestCloudTransform(PackageTest): - - TALKER_MANIFEST = 'talker.json' - LISTENER_MANIFEST = 'listener.json' - CLOUD_TRANSFORM_MANIFEST = 'cloud-transform.json' - - TALKER_PACKAGE = 'test-cloud-transform-talker' - LISTENER_PACKAGE = 'test-cloud-transform-listener' - CLOUD_TRANSFORM_PACKAGE = 'test-cloud-transform-pkg' - - @classmethod - def setUpClass(cls): - add_package(cls.TALKER_MANIFEST, cls.TALKER_PACKAGE) - add_package(cls.LISTENER_MANIFEST, cls.LISTENER_PACKAGE) - add_package(cls.CLOUD_TRANSFORM_MANIFEST, cls.CLOUD_TRANSFORM_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.TALKER_PACKAGE) - delete_package(cls.LISTENER_PACKAGE) - delete_package(cls.CLOUD_TRANSFORM_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - devices = self.config.get_devices(arch=DeviceArch.AMD64, runtime='Preinstalled') - self.device = devices[0] - self.routed_network = self.create_cloud_routed_network('cloud_transform_network') - - self.talker_deployment = None - self.cloud_transform_deployment = None - self.listener_deployment = None - - def tearDown(self): - self.deprovision_all_deployments([self.talker_deployment, self.cloud_transform_deployment, - self.listener_deployment]) - self.routed_network.delete() - - def assert_deployment_list_with_device_id(self): - dev_id = self.device.get('uuid') - filtered_deployments = self.config.client.get_all_deployments(device_id=dev_id) - filtered_deployment_ids = map(lambda dep: dep['deploymentId'], filtered_deployments) - device_deployments = self.device.get_deployments() - device_deployment_ids = map(lambda dep: dep['io_deployment_id'], device_deployments) - six.assertCountEqual(self, filtered_deployment_ids, device_deployment_ids, 'both deployments should match') - - def deploy_talker_package(self): - package = get_package(self.TALKER_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_device('default', self.device, ignore_device_config=['network_interface']) - provision_config.add_routed_network(self.routed_network) - self.logger.info('Deploying talker package') - self.talker_deployment = self.deploy_package(package, provision_config) - - def deploy_cloud_transform_package(self): - package = get_package(self.CLOUD_TRANSFORM_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_routed_network(self.routed_network) - provision_config.add_dependent_deployment(self.talker_deployment) - self.logger.info('Deploying cloud transform package') - self.cloud_transform_deployment = self.deploy_package(package, provision_config) - self.assert_dependent_deployment(self.cloud_transform_deployment, [self.talker_deployment]) - - def deploy_listener_package(self, restart_policy): - package = get_package(self.LISTENER_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_device('default', self.device, ignore_device_config=['network_interface']) - provision_config.add_routed_network(self.routed_network) - provision_config.add_dependent_deployment(self.cloud_transform_deployment) - provision_config.add_restart_policy('default', restart_policy) - self.listener_deployment = self.deploy_package(package, provision_config) - self.assert_dependent_deployment(self.listener_deployment, [self.cloud_transform_deployment]) - - def test_deploy_cloud_transform(self): - self.deploy_talker_package() - self.talker_deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.talker_deployment) - self.deploy_cloud_transform_package() - self.cloud_transform_deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.cloud_transform_deployment) - self.deploy_listener_package(RestartPolicy.Always) - self.listener_deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.listener_deployment) - listener_provision_config = get_package(self.LISTENER_PACKAGE).get_provision_configuration() - listener_component_id = listener_provision_config.plan.get_component_id("default") - component_context = self.listener_deployment.provisionContext.component_context[listener_component_id] - self.assertEqual(component_context.component_override.restart_policy, RestartPolicy.Always) - self.assert_deployment_list_with_device_id() diff --git a/sdk_test/package/configuration_tests.py b/sdk_test/package/configuration_tests.py deleted file mode 100644 index 3154941f..00000000 --- a/sdk_test/package/configuration_tests.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import absolute_import -from unittest import TestCase - -import os -import hashlib - -from tempfile import mkdtemp -from shutil import rmtree, copyfile - -from sdk_test.config import Configuration - - -class ConfigurationTestCase(TestCase): - def setUp(self): - self.config = Configuration() - self.tmp_dir = mkdtemp() - self.download_tmp_dir = mkdtemp() - self.config_dir = 'test_files' - self.config_dir_path = os.path.join(os.path.dirname(__file__), '..', self.config_dir) - self.tree_names = ['warehouse'] - self.files = ( - 'warehouse', - 'warehouse/device.yaml', - 'warehouse/lena.png', - 'warehouse/robot_type', - 'warehouse/robot_type/magni', - 'warehouse/robot_type/magni/lena.png', - 'warehouse/robot_type/magni/device.yaml', - ) - - def tearDown(self): - rmtree(self.tmp_dir) - rmtree(self.download_tmp_dir) - - def setup_local_configuration_structure(self): - for file in self.files: - if '.' not in file: - os.mkdir(os.path.join(self.tmp_dir, file)) - else: - filename = os.path.basename(file) - src_file = os.path.join(self.config_dir_path, filename) - dst_file = os.path.join(self.tmp_dir, os.path.dirname(file), filename) - copyfile(src_file, dst_file) - - @staticmethod - def list_flatten_dir(tmp_dir): - flatten_file_struct = set() - for root, dirs, files in os.walk(tmp_dir): - dir_name = root[len(tmp_dir)+1:] - if dir_name: - flatten_file_struct.add(dir_name) - for filename in files: - flatten_file_struct.add(os.path.join(dir_name, filename)) - return flatten_file_struct - - def assert_checksum(self, src_file, dst_file): - with open(src_file, mode='rb') as f: - src_hash = hashlib.md5(f.read()) - src_hex_digest = src_hash.hexdigest() - with open(dst_file, mode='rb') as f: - dst_hash = hashlib.md5(f.read()) - dst_hex_digest = dst_hash.hexdigest() - self.assertEqual(src_hex_digest, dst_hex_digest) - - def assert_files(self): - upload_file_structure = set(self.files) - download_file_structure = self.list_flatten_dir(self.download_tmp_dir) - self.assertEqual(upload_file_structure, download_file_structure) - files = [x for x in self.files if '.' in x] - for filename in files: - self.assert_checksum(os.path.join(self.tmp_dir, filename), os.path.join(self.download_tmp_dir, filename)) - - def test_upload_configuration(self): - self.setup_local_configuration_structure() - self.config.client.upload_configurations(self.tmp_dir, self.tree_names, delete_existing_trees=True) - self.config.client.download_configurations(self.download_tmp_dir, self.tree_names) - self.assert_files() diff --git a/sdk_test/package/create_package_test.py b/sdk_test/package/create_package_test.py deleted file mode 100644 index cf8d2f8b..00000000 --- a/sdk_test/package/create_package_test.py +++ /dev/null @@ -1,39 +0,0 @@ -from __future__ import absolute_import -import unittest - -from rapyuta_io.utils.error import BadRequestError -from sdk_test.config import Configuration - - -class CreatePackage(unittest.TestCase): - - def setUp(self): - self.config = Configuration() - - def test_create_package_fails_for_bad_request(self): - invalid_manifest = { - "apiVersion": "v1.0.0", - "packageVersion": "v1.0.0", - "plans": [ - { - "components": [ - { - "name": "default", - "description": "", - "executables": [ - { - "name": "listenerExec", - "cmd": [ - "roslaunch listener listener.launch" - ] - } - ], - } - ], - } - ] - } - with self.assertRaises(BadRequestError): - self.config.client.create_package(invalid_manifest) - - diff --git a/sdk_test/package/delete_package_test.py b/sdk_test/package/delete_package_test.py deleted file mode 100644 index dc3047ca..00000000 --- a/sdk_test/package/delete_package_test.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import absolute_import -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from rapyuta_io.utils.error import PackageNotFound -from sdk_test.util import add_package, get_package - - -class DeletePackage(PackageTest): - - DELETE_MANIFEST = 'delete-package.json' - DELETE_PACKAGE = 'test-delete-package' - - def setUp(self): - self.config = Configuration() - add_package(self.DELETE_MANIFEST, self.DELETE_PACKAGE) - - def test_delete_package_using_package_object(self): - package = get_package(self.DELETE_PACKAGE) - packageId = package.packageId - package.delete() - expected_err_msg = 'Package not found' - with self.assertRaises(PackageNotFound) as e: - self.config.client.get_package(packageId) - self.assertEqual(str(e.exception), expected_err_msg) - - def test_delete_package_using_client(self): - package = get_package(self.DELETE_PACKAGE) - packageId = package.packageId - self.config.client.delete_package(package_id=packageId) - expected_err_msg = 'Package not found' - with self.assertRaises(PackageNotFound) as e: - self.config.client.get_package(packageId) - self.assertEqual(str(e.exception), expected_err_msg) diff --git a/sdk_test/package/deployment_test.py b/sdk_test/package/deployment_test.py deleted file mode 100644 index fd3e8ba8..00000000 --- a/sdk_test/package/deployment_test.py +++ /dev/null @@ -1,90 +0,0 @@ -from __future__ import absolute_import - -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, add_package, delete_package, get_package -from rapyuta_io.clients.deployment import DeploymentPhaseConstants -from rapyuta_io.utils.error import BadRequestError - -class UpdateDeployment(PackageTest): - - CLOUD_NON_ROS_MANIFEST = 'cloud-non-ros.json' - CLOUD_NON_ROS_PACKAGE = 'test-cloud-non-ros-package' - CLOUD_NON_ROS_DEPLOYMENT = 'test-cloud-non-ros-deployment' - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - add_package(self.CLOUD_NON_ROS_MANIFEST,self.CLOUD_NON_ROS_PACKAGE) - self.cloud_non_ros_pkg = get_package(self.CLOUD_NON_ROS_PACKAGE) - provision_configuration = self.cloud_non_ros_pkg.get_provision_configuration() - self.deployment = self.deploy_package(self.cloud_non_ros_pkg, provision_configuration) - self.deployment.poll_deployment_till_ready() - - def tearDown(self): - self.deprovision_all_deployments([self.deployment]) - delete_package(self.CLOUD_NON_ROS_PACKAGE) - - def test_update_deployment_provisioning_deployment(self): - component_context = get_component_context(self.deployment.get("componentInfo", {})) - payload = { - "service_id": self.deployment["packageId"], - "plan_id": self.deployment["planId"], - "deployment_id": self.deployment["deploymentId"], - "context": { - "component_context": component_context - } - } - self.config.client.update_deployment(payload) - deployment = self.config.client.get_deployment(self.deployment["deploymentId"]) - self.assertEqual(deployment["phase"], DeploymentPhaseConstants.PROVISIONING) - - # tries to update deployment which is in provisioning state - with self.assertRaises(BadRequestError): - self.config.client.update_deployment(payload) - - def test_update_deployment_success(self): - self.logger.info("Started update deployment") - component_context = get_component_context(self.deployment.get("componentInfo", {})) - payload = { - "service_id": self.deployment["packageId"], - "plan_id": self.deployment["planId"], - "deployment_id": self.deployment["deploymentId"], - "context": { - "component_context": component_context - } - } - self.config.client.update_deployment(payload) - deployment = self.config.client.get_deployment(self.deployment["deploymentId"]) - self.assertEqual(deployment["phase"], DeploymentPhaseConstants.PROVISIONING) - - deployment.poll_deployment_till_ready() - deployment = self.config.client.get_deployment(self.deployment["deploymentId"]) - self.assertEqual(deployment["phase"], DeploymentPhaseConstants.SUCCEEDED) - -def get_component_context(component_info): - result = {} - for component in component_info: - comp = {} - executables = [] - executableMetaData = component.get("executableMetaData", []) or [] - for exec in executableMetaData: - # Component will be considered only if any of its executables is docker or build - if not (exec.get("docker") or exec.get("buildGUID")): - continue - executable = {} - if exec.get("buildGUID"): - executable["buildGUID"] = exec["buildGUID"] - if exec.get("docker"): - executable["docker"] = exec["docker"] - - executable["id"] = exec.get("id", "") - executable["name"] = exec.get("name", "") - executables.append(executable) - - if len(executables) > 0: - result[component["componentID"]] = comp - comp["component"] = {"executables": executables} - comp["update_deployment"] = True - - return result \ No newline at end of file diff --git a/sdk_test/package/get_all_package_test.py b/sdk_test/package/get_all_package_test.py deleted file mode 100644 index a4cdf3dd..00000000 --- a/sdk_test/package/get_all_package_test.py +++ /dev/null @@ -1,71 +0,0 @@ -from __future__ import absolute_import -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from sdk_test.util import add_package, delete_package, get_logger -from rapyuta_io.clients.package import Package - - -class GetAllPackage(PackageTest): - - CLOUD_NON_ROS_MANIFEST = 'cloud-non-ros.json' - CLOUD_NON_ROS_PACKAGE = 'test-get-all-packages-cloud-non-ros-pkg' - NGINX_SINGLE_COMPONENT_MANIFEST = 'nginx-single-component.json' - NGINX_SINGLE_COMPONENT_PACKAGE = 'test-get-all-packages-nginx-single-component-pkg' - PACKAGE_VERSION = 'v1.0.0' - - @classmethod - def setUpClass(cls): - add_package(cls.CLOUD_NON_ROS_MANIFEST, cls.CLOUD_NON_ROS_PACKAGE) - add_package(cls.NGINX_SINGLE_COMPONENT_MANIFEST, cls.NGINX_SINGLE_COMPONENT_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.CLOUD_NON_ROS_PACKAGE) - delete_package(cls.NGINX_SINGLE_COMPONENT_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - - def assert_package_exists(self, package_list, name, version=None): - for package in package_list: - if package.packageName == name and (not version or package.packageVersion): - return - - return self.fail("package not found in the list") - - def test_get_all_packages_no_filter_parameters(self): - packages = self.config.client.get_all_packages() - for pkg in packages: - self.assertIsInstance(pkg, Package, 'pkg should be instance of Package class') - self.assertTrue(pkg.is_partial) - self.assert_package_exists(package_list=packages, name=self.NGINX_SINGLE_COMPONENT_PACKAGE) - self.assert_package_exists(package_list=packages, name=self.CLOUD_NON_ROS_PACKAGE) - - def test_get_all_packages_filter_name_only(self): - name = self.CLOUD_NON_ROS_PACKAGE - packages = self.config.client.get_all_packages(name=name) - for pkg in packages: - self.assertIsInstance(pkg, Package, 'pkg should be instance of Package class') - self.assertTrue(pkg.is_partial) - self.assert_package_exists(package_list=packages, name=name) - - def test_get_all_packages_filter_version_only(self): - version = self.PACKAGE_VERSION - packages = self.config.client.get_all_packages(version=version) - for pkg in packages: - self.assertIsInstance(pkg, Package, 'pkg should be instance of Package class') - self.assertTrue(pkg.is_partial) - self.assertEqual(pkg.packageVersion, version) - self.assert_package_exists(package_list=packages, name=self.NGINX_SINGLE_COMPONENT_PACKAGE) - self.assert_package_exists(package_list=packages, name=self.CLOUD_NON_ROS_PACKAGE) - - def test_get_all_packages_filter_name_filter_version(self): - name = self.NGINX_SINGLE_COMPONENT_PACKAGE - version = self.PACKAGE_VERSION - packages = self.config.client.get_all_packages(name=name, version=version) - for pkg in packages: - self.assertIsInstance(pkg, Package, 'pkg should be instance of Package class') - self.assertTrue(pkg.is_partial) - self.assert_package_exists(package_list=packages, name=self.NGINX_SINGLE_COMPONENT_PACKAGE, version=version) - diff --git a/sdk_test/package/inbound_incoming_scoped_targeted_test.py b/sdk_test/package/inbound_incoming_scoped_targeted_test.py deleted file mode 100644 index 1882a7a0..00000000 --- a/sdk_test/package/inbound_incoming_scoped_targeted_test.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import absolute_import -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, add_package, delete_package, get_package - - -class InboundIncomingScopedTargetedTestCase(PackageTest): - - INBOUND_INCOMING_SCOPED_TARGETED_MANIFEST = 'inbound-incoming-scoped-targeted.json' - TALKER_CLOUD_MANIFEST = 'talker-cloud.json' - - INBOUND_INCOMING_SCOPED_TARGETED_PACKAGE = 'test-inbound-incoming-scoped-targeted-pkg' - TALKER_CLOUD_PACKAGE = 'test-inbound-incoming-scoped-targeted-talker-cloud-pkg' - - @classmethod - def setUpClass(cls): - add_package(cls.INBOUND_INCOMING_SCOPED_TARGETED_MANIFEST, - cls.INBOUND_INCOMING_SCOPED_TARGETED_PACKAGE) - add_package(cls.TALKER_CLOUD_MANIFEST, - cls.TALKER_CLOUD_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.INBOUND_INCOMING_SCOPED_TARGETED_PACKAGE) - delete_package(cls.TALKER_CLOUD_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - self.talker_deployment = None - self.listener_deployment = None - self.routed_network = self.create_cloud_routed_network('cloud_routed_network') - - def tearDown(self): - self.deprovision_all_deployments([self.talker_deployment, self.listener_deployment]) - self.routed_network.delete() - - def deploy_inbound_incoming_scoped_targeted_listener_package(self): - package = get_package(self.INBOUND_INCOMING_SCOPED_TARGETED_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_routed_network(self.routed_network) - self.logger.info('Deploying listener package') - self.listener_deployment = self.deploy_package(package, provision_config) - self.listener_deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.listener_deployment) - - def test_inbound_incoming_scoped_targeted(self): - self.deploy_inbound_incoming_scoped_targeted_listener_package() - package = get_package(self.TALKER_CLOUD_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_routed_network(self.routed_network) - self.logger.info('Deploying talker package') - self.talker_deployment = self.deploy_package(package, provision_config) - self.talker_deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.talker_deployment) diff --git a/sdk_test/package/native_network_tests.py b/sdk_test/package/native_network_tests.py deleted file mode 100644 index 9831f9e7..00000000 --- a/sdk_test/package/native_network_tests.py +++ /dev/null @@ -1,198 +0,0 @@ -from __future__ import absolute_import -from rapyuta_io import DeploymentStatusConstants, DeviceArch -from rapyuta_io.clients.deployment import DeploymentPhaseConstants -from rapyuta_io.clients.native_network import NativeNetwork, Parameters -from rapyuta_io.clients.common_models import Limits -from rapyuta_io.clients.package import Runtime, ROSDistro -from rapyuta_io.utils.utils import generate_random_value -from sdk_test.config import Configuration -from sdk_test.device.device_test import DeviceTest -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, get_package, add_package, delete_package - -NETWORK_INTERFACE = 'network_interface' - - -class NativeNetworkTest(PackageTest, DeviceTest): - native_network = None - - TALKER_CLOUD_MANIFEST = 'talker-cloud.json' - TALKER_CLOUD_PACKAGE = 'test-native-network-talker-cloud-pkg' - TALKER_DEVICE_MANIFEST = 'talker-docker.json' - TALKER_DEVICE_PACKAGE = 'test-native-network-talker-device-pkg' - - @classmethod - def setUpClass(cls): - add_package(cls.TALKER_CLOUD_MANIFEST, cls.TALKER_CLOUD_PACKAGE) - add_package(cls.TALKER_DEVICE_MANIFEST, cls.TALKER_DEVICE_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.TALKER_CLOUD_PACKAGE) - delete_package(cls.TALKER_DEVICE_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - self.name = 'net-' + generate_random_value() - self.ros_distro = ROSDistro.MELODIC - self.runtime = Runtime.CLOUD - self.parameters = Parameters(Limits(cpu=1, memory=1024)) - self.device_runtime = Runtime.DEVICE - self.docker_device = self.config.get_devices(arch=DeviceArch.AMD64, runtime='Dockercompose')[0] - self.docker_device.refresh() - self.device_parameters = Parameters(limits=None, device=self.docker_device, network_interface='docker0') - - def add_network_interface_config_variable(self, device): - self.logger.info('Adding network interface config variable') - config_vars = device.get_config_variables() - for config_var in config_vars: - if config_var.key == NETWORK_INTERFACE: - config_var.value = 'docker0' - device.update_config_variable(config_var) - return - device.add_config_variable(NETWORK_INTERFACE, 'docker0') - self.logger.info('Added network interface config variable') - - def delete_network_interface_config_variable(self, device): - self.logger.info('Removing network interface config variable') - config_vars = device.get_config_variables() - for config_var in config_vars: - if config_var.key == NETWORK_INTERFACE: - device.delete_config_variable(config_id=config_var.id) - break - self.logger.info('Removed network interface config variable') - - def assert_native_network_status(self): - self.logger.info('Checking the deployment status of the native network {}'.format(self.name)) - status = self.native_network.get_status() - self.assertEqual(status.status, DeploymentStatusConstants.RUNNING.value) - self.assertEqual(status.phase, DeploymentPhaseConstants.SUCCEEDED.value) - self.logger.info('native network %s(%s) started successfully' % (self.native_network.name, - self.native_network.guid)) - - def assert_native_network_fields(self, native_network): - self.logger.info('comparing the details the native network {} just fetched'.format(self.name)) - self.assertEqual(self.native_network.name, native_network.name) - self.assertEqual(self.native_network.runtime, native_network.runtime) - self.assertEqual(self.native_network.ros_distro, native_network.ros_distro) - self.assertEqual(self.native_network.parameters.limits.cpu, native_network.parameters.limits.cpu) - self.assertEqual(self.native_network.parameters.limits.memory, native_network.parameters.limits.memory) - self.assertEqual(self.native_network.created_at, native_network.created_at) - self.assertEqual(self.native_network.updated_at, native_network.updated_at) - self.assertEqual(self.native_network.guid, native_network.guid) - self.assertEqual(self.native_network.owner_project, native_network.owner_project) - self.assertEqual(self.native_network.creator, native_network.creator) - self.assertEqual(self.native_network.internal_deployment_guid, - native_network.internal_deployment_guid) - self.assertEqual(self.native_network.internal_deployment_status.phase, - native_network.internal_deployment_status.phase) - self.logger.info('successfully checked the contents of the native network'.format(self.name)) - - def assert_native_network_present_in_list(self, all_native_network): - self.logger.info('Checking the presence of native network {}'.format(self.name)) - guid = self.native_network.guid - native_network_list = list(filter(lambda network: network.guid == guid, all_native_network)) - self.logger.info('Checking if only one native native network with id {} is present' - .format(self.native_network.guid)) - native_network = native_network_list[0] - self.assertEqual(len(native_network_list), 1) - self.assertEqual(self.native_network.name, native_network.name) - self.logger.info('native network {} present'.format(self.name)) - - def assert_native_network_stopped(self): - self.logger.info('Checking if the native network {} stopped'.format(self.name)) - guid = self.native_network.guid - all_native_network = self.config.client.list_native_networks() - native_network = list(filter(lambda network: network.guid == guid, all_native_network))[0] - self.assertEqual(native_network.internal_deployment_status.phase, - DeploymentPhaseConstants.DEPLOYMENT_STOPPED.value) - self.logger.info('native network {} stopped'.format(self.name)) - - def validate_refresh(self, guid): - partial_net = [n for n in self.config.client.list_native_networks() if n.guid == guid][0] - self.assertTrue(partial_net.is_partial) - self.assertFalse(partial_net.internal_deployment_status.status) - partial_net.refresh() - self.assertFalse(partial_net.is_partial) - self.assertTrue(partial_net.internal_deployment_status.status) - - def test_01_create_native_network(self): - self.logger.info('creating native network {}'.format(self.name)) - native_network_payload = NativeNetwork(self.name, self.runtime, self.ros_distro, self.parameters) - self.native_network = self.config.client.create_native_network(native_network_payload) - self.logger.info('polling till the native network {} is ready'.format(self.name)) - self.native_network.poll_native_network_till_ready() - self.__class__.native_network = self.config.client.get_native_network(self.native_network.guid) - self.assert_native_network_status() - self.validate_refresh(self.native_network.guid) - - def test_02_get_native_network(self): - self.logger.info('fetching the native network {} just created'.format(self.name)) - guid = self.native_network.guid - native_network = self.config.client.get_native_network(guid) - self.assert_native_network_fields(native_network) - - def test_03_list_native_networks(self): - self.logger.info('fetching the list of all the native networks') - all_native_network = self.config.client.list_native_networks() - self.assert_native_network_present_in_list(all_native_network) - - def test_04_add_native_network_to_package(self): - self.logger.info('Started creating package talker component') - app_package = get_package(self.TALKER_CLOUD_PACKAGE) - prov_config = app_package.get_provision_configuration() - self.logger.info('adding the native network {} to the provision configuration of the package'.format(self.name)) - prov_config.add_native_network(self.native_network) - guid = self.native_network.guid - self.assertEqual(prov_config.context['nativeNetworks'], [{"guid": guid}]) - self.logger.info('creating deployment') - deployment = app_package.provision("test_deployment", prov_config) - self.logger.info('polling till deployment is ready') - deployment.poll_deployment_till_ready() - self.logger.info('deployment is ready') - deployment.deprovision() - self.logger.info('de-provisioning the deployment') - - def test_05_delete_native_network(self): - self.logger.info('deleting the native network {}'.format(self.name)) - guid = self.native_network.guid - self.config.client.delete_native_network(guid) - self.assert_native_network_stopped() - - def test_06_create_device_native_network(self): - self.add_network_interface_config_variable(self.docker_device) - self.logger.info('Started creating device native network') - native_network_payload = NativeNetwork(self.name, Runtime.DEVICE, self.ros_distro, self.device_parameters) - self.native_network = self.config.client.create_native_network(native_network_payload) - guid = self.native_network.guid - self.logger.info('polling till the native network {} is ready'.format(self.name)) - self.native_network.poll_native_network_till_ready() - self.__class__.native_network = self.config.client.get_native_network(guid) - self.assert_native_network_status() - self.validate_refresh(guid) - self.assertEqual(self.native_network.runtime, self.device_runtime) - - self.add_network_interface_config_variable(self.docker_device) - self.logger.info('Started creating package talker component') - app_package = get_package(self.TALKER_DEVICE_PACKAGE) - prov_config = app_package.get_provision_configuration() - prov_config.add_device('default', self.docker_device) - prov_config.add_native_network(self.native_network, 'docker0') - self.assertEqual(prov_config.context['nativeNetworks'], [{"guid": guid, "bindParameters": - {"NETWORK_INTERFACE": 'docker0'}}]) - self.logger.info('creating deployment') - ignored_device_configs = ['ros_workspace', 'ros_distro'] - deployment = self.deploy_package(app_package, prov_config, device=self.docker_device, - ignored_device_configs=ignored_device_configs) - self.logger.info('polling till deployment is ready') - deployment.poll_deployment_till_ready(retry_count=50) - self.logger.info('deployment is ready') - deployment.deprovision() - self.logger.info('deprovisioned the deployment') - self.delete_network_interface_config_variable(self.docker_device) - - self.logger.info('Delete routed network with guid : %s' % guid) - self.config.client.delete_native_network(guid) - self.assert_native_network_stopped() - self.delete_network_interface_config_variable(self.docker_device) diff --git a/sdk_test/package/noetic_test.py b/sdk_test/package/noetic_test.py deleted file mode 100644 index e4d83c52..00000000 --- a/sdk_test/package/noetic_test.py +++ /dev/null @@ -1,45 +0,0 @@ -from __future__ import absolute_import - -from rapyuta_io.clients.package import ROSDistro - -from sdk_test.package.package_test import PackageTest -from sdk_test.config import Configuration -from sdk_test.util import get_logger, get_package, add_package, delete_package, \ - add_cloud_native_network, get_native_network, delete_native_network, delete_build - - -class NoeticTest(PackageTest): - - TALKER_MANIFEST = 'talker-noetic.json' - TALKER_BUILD = 'test-noetic-talker-build' - TALKER_PACKAGE = 'test-noetic-talker-pkg' - - @classmethod - def setUpClass(cls): - add_package(cls.TALKER_MANIFEST, cls.TALKER_PACKAGE, build_map={ - 'talker': {'talkerExec': (cls.TALKER_BUILD, cls.TALKER_MANIFEST)}, - }) - add_cloud_native_network('noetic_cloud_network', ros_distro=ROSDistro.NOETIC) - - @classmethod - def tearDownClass(cls): - delete_package(cls.TALKER_PACKAGE) - delete_native_network('noetic_cloud_network') - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - self.native_network = get_native_network('noetic_cloud_network') - self.deployments = [] - - def tearDown(self): - self.deprovision_all_deployments(self.deployments) - - def test_scoped_targeted_package(self): - package = get_package(self.TALKER_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_native_network(self.native_network) - deployment = self.deploy_package(package, provision_config) - deployment.poll_deployment_till_ready() - self.assert_deployment_status(deployment) - self.deployments.append(deployment) diff --git a/sdk_test/package/package_test.py b/sdk_test/package/package_test.py deleted file mode 100644 index 0815f104..00000000 --- a/sdk_test/package/package_test.py +++ /dev/null @@ -1,151 +0,0 @@ -from __future__ import absolute_import - -import re -import unittest - -import requests - -from rapyuta_io import DeploymentStatusConstants, ROSDistro -from rapyuta_io.clients.deployment import Deployment, DeploymentPhaseConstants -from rapyuta_io.clients.persistent_volumes import VolumeInstance, PersistentVolumes, DiskCapacity -from rapyuta_io.utils import to_objdict, RestClient -from rapyuta_io.utils.utils import generate_random_value - -DEFAULT_DISK_CAPACITY = DiskCapacity.GiB_32 - - -class PackageTest(unittest.TestCase): - - def setUp(self): - self.logger = None - - def get_persistent_volume(self): - persistent_volume = self.config.client.get_persistent_volume() - self.assertIsNotNone(persistent_volume, 'Persistent volume package should not be empty') - self.assertTrue(isinstance(persistent_volume, PersistentVolumes), - 'Object should be instance of PersistanceVolumes class') - self.assertEqual('io-public-persistent-volume', persistent_volume.packageId, - 'Package should be same') - return persistent_volume - - def get_persistent_volume_instance(self, instance_name, disk_capacity=DEFAULT_DISK_CAPACITY): - self.logger.info('Creating volume instance') - persistent_volume = self.get_persistent_volume() - volume_instance = persistent_volume.create_volume_instance(instance_name, disk_capacity) - self.assertTrue(isinstance(volume_instance, VolumeInstance), - 'Object should be an instance of VolumeInstance class') - return volume_instance - - def deploy_package(self, package, provision_config, device=None, ignored_device_configs=None): - self.logger.info('Started deploying the package %s' % package.packageName) - deployment_name = generate_random_value() - deployment = package.provision(deployment_name, provision_config) - self.assert_deployment(deployment) - self.assert_deployment_info(deployment_name, deployment, package) - self.assert_component_parameters(deployment, package.plans[0], device, - ignored_device_configs) - return deployment - - def assert_deployment(self, deployment): - self.assertTrue(isinstance(deployment, Deployment), - 'Object should be an instance of Deployment class') - self.logger.info('Package (%s) deployed (%s) successfully' - % (deployment.packageName, deployment.name)) - - def assert_component_parameters(self, deployment, plan, device=None, - ignored_device_configs=None): - if ignored_device_configs is None: - ignored_device_configs = [] - if device and device.get_runtime() == device.PRE_INSTALLED: - ignored_device_configs.append('rosbag_mount_path') - self.logger.info('Validating component parameters for the deployment: %s' % deployment.name) - for component in plan.internalComponents: - component_id = component.componentId - component_params = deployment.parameters[component_id] - self.assertEqual(component_params["component_id"], component_id) - if component.runtime == "device" and device: - for config_var in device.config_variables: - if config_var.key in ignored_device_configs: - continue - self.assertEqual(component_params[config_var.key], config_var.value) - self.assertEqual(component_params["device_id"], device.uuid) - - def assert_deployment_info(self, deployment_name, deployment, package): - self.logger.info('Validating deployment info for the deployment: %s(%s)' - % (deployment.name, deployment.packageName)) - self.assertEqual(deployment.name, deployment_name) - self.assertEqual(deployment.packageId, package.packageId) - self.assertEqual(deployment.packageName, package.packageName) - self.assertEqual(deployment.planId, package.plans[0].planId) - - def assert_dependent_deployment(self, deployment, dependent_deployments): - self.logger.info('Validating dependent deployment info for the deployment: %s(%s)' - % (deployment.name, deployment.packageName)) - dependent_deployment_id_list = list() - for dependent_deployment in deployment.dependentDeployments: - dependent_deployment_id_list.append(dependent_deployment["dependentDeploymentId"]) - - for dependent_deployment in dependent_deployments: - self.assertIn(dependent_deployment.deploymentId, dependent_deployment_id_list) - - def assert_deployment_status(self, deployment): - self.logger.info('Checking deployment status') - deployment_status = deployment.get_status() - self.assertEqual(deployment_status.status, DeploymentStatusConstants.RUNNING.value) - self.assertEqual(deployment_status.phase, DeploymentPhaseConstants.SUCCEEDED.value) - self.logger.info('Deployment %s(%s) started successfully' % (deployment.name, - deployment.packageId)) - - def assert_endpoint_url(self, url): - regex = re.compile( - r'^(?:http|ftp)s?://' - r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' - r'localhost|' - r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' - r'(?::\d+)?' - r'(?:/?|[/?]\S+)$', re.IGNORECASE) - self.assertRegexpMatches(url, regex, 'Endpoint should ben an url') - - def assert_static_route(self, url): - # Asserts that the URL (nginx server) is reachable and is returning a success status code - response = RestClient(url).execute() - self.assertEqual(response.status_code, requests.codes.OK) - - def get_service_binding(self, deployment): - self.logger.info('Sending binding request for the deployment %s' % deployment.name) - binding = deployment.get_service_binding() - self.assertIsNotNone(binding) - self.assertIn('credentials', binding, 'Credentials not found on the binding response') - if not bool(binding): - raise AssertionError('binding result should not empty') - binding_obj = to_objdict(binding) - return binding_obj - - def deprovision_all_deployments(self, deployments): - self.logger.info("Tear down!. Stopping all the deployments") - retry_count = 5 - total_deployments = 0 - success_count = 0 - for deployment in deployments: - if not deployment: - continue - try: - total_deployments = total_deployments + 1 - if isinstance(deployment, Deployment): - deployment.deprovision(retry_count) - self.logger.info('Deployment stopped: %s' % deployment.name) - success_count = success_count + 1 - elif isinstance(deployment, VolumeInstance): - deployment.destroy_volume_instance(retry_count) - self.logger.info('Volume instance deleted: %s' % deployment.name) - success_count = success_count + 1 - except Exception as err: - self.logger.error('Failed to stop deployment %s' % deployment.name) - self.logger.error(err) - self.logger.info('%d Deployment(s) were stopped out of %d' % (success_count, - total_deployments)) - - def create_cloud_routed_network(self, name, ros_distro=ROSDistro.KINETIC): - routed_network = self.config.client.create_cloud_routed_network(name, ros_distro, True) - routed_network.poll_routed_network_till_ready() - return routed_network diff --git a/sdk_test/package/rosbag_test.py b/sdk_test/package/rosbag_test.py deleted file mode 100644 index e58266bd..00000000 --- a/sdk_test/package/rosbag_test.py +++ /dev/null @@ -1,524 +0,0 @@ -# TODO(senapati): This test file is commented because it uses builds -# which is deprecated -# from __future__ import absolute_import -# -# import math -# import os -# import time -# from time import sleep -# -# from rapyuta_io import DeviceArch -# from rapyuta_io.clients.rosbag import ROSBagJob, ROSBagOptions, ROSBagJobStatus, UploadOptions, ROSBagUploadTypes, \ -# ROSBagOnDemandUploadOptions, ROSBagTimeRange, OverrideOptions, TopicOverrideInfo -# from rapyuta_io.utils.utils import generate_random_value -# from sdk_test.config import Configuration -# from sdk_test.package.package_test import PackageTest -# from sdk_test.util import get_logger, get_package, add_package, delete_package, add_build -# -# class ROSBagJobTest(PackageTest): -# deployment = None -# deployment_with_fast_talker = None -# deployment_with_throttling = None -# deployment_with_latching = None -# device_rosbag_job = None -# cloud_rosbag_job = None -# throttling_rosbag_job = None -# latching_rosbag_job = None -# continuous_upload_type_rosbag = None -# -# TALKER_MANIFEST = 'talker.json' -# TALKER_BUILD = 'test-rosbag-job-talker-pkg' -# -# TALKER_CLOUD_DEVICE_MANIFEST = 'talker-cloud-device.json' -# TALKER_CLOUD_DEVICE_PACKAGE = 'test-rosbag-job-talker-cloud-device-pkg' -# -# ROSBAG_TALKER_MANIFEST = 'rosbag-talker-cloud.json' -# ROSBAG_TALKER_PACKAGE = 'test-rosbag-talker-cloud-pkg' -# -# FAST_TALKER_DEVICE_WITH_ROSBAGS_MANIFEST = 'fast-talker-device-docker-with-rosbags.json' -# FAST_TALKER_DEVICE_WITH_ROSBAGS_PACKAGE = 'fast-talker-device-docker-with-rosbags-pkg' -# -# THROTTLE_LATCH_BUILD_MANIFEST = 'throttle-latch-build.json' -# THROTTLE_LATCH_BUILD_NAME = 'throttle-latch-build' -# -# THROTTLING_PACKAGE_MANIFEST = 'throttling-pkg.json' -# THROTTLING_PACKAGE_NAME = 'throttling-pkg' -# -# LATCHING_PACKAGE_MANIFEST = 'latching-pkg.json' -# LATCHING_PACKAGE_NAME = 'latching-pkg' -# -# @classmethod -# def setUpClass(cls): -# add_build(cls.TALKER_MANIFEST, cls.TALKER_BUILD) -# -# add_package(cls.TALKER_CLOUD_DEVICE_MANIFEST, cls.TALKER_CLOUD_DEVICE_PACKAGE, -# build_map={ -# 'talker-device': {'talker': ('talker-build', 'talker.json')}, -# 'talker-cloud': {'talker': ('talker-build', 'talker.json')}, -# }) -# add_package(cls.ROSBAG_TALKER_MANIFEST, cls.ROSBAG_TALKER_PACKAGE, -# build_map={ -# 'talker-cloud': {'talker': ('talker-build', 'talker.json')}, -# }) -# add_package(cls.FAST_TALKER_DEVICE_WITH_ROSBAGS_MANIFEST, cls.FAST_TALKER_DEVICE_WITH_ROSBAGS_PACKAGE, -# build_map={ -# 'talker-fast-device': {'talker': ('talker-build', 'talker.json')} -# }) -# add_build(cls.THROTTLE_LATCH_BUILD_MANIFEST, cls.THROTTLE_LATCH_BUILD_NAME) -# add_package(cls.THROTTLING_PACKAGE_MANIFEST, cls.THROTTLING_PACKAGE_NAME, -# build_map={ -# 'throttling-component': { -# 'throttling-executable': ('throttle-latch-build', 'throttle-latch-build.json')} -# }) -# add_package(cls.LATCHING_PACKAGE_MANIFEST, cls.LATCHING_PACKAGE_NAME, -# build_map={ -# 'latching-component': { -# 'latching-executable': ('throttle-latch-build', 'throttle-latch-build.json')} -# }) -# -# @classmethod -# def tearDownClass(cls): -# delete_package(cls.TALKER_CLOUD_DEVICE_PACKAGE, delete_builds=False) -# delete_package(cls.ROSBAG_TALKER_PACKAGE, delete_builds=False) -# delete_package(cls.FAST_TALKER_DEVICE_WITH_ROSBAGS_PACKAGE) -# delete_package(cls.THROTTLING_PACKAGE_NAME, delete_builds=False) -# delete_package(cls.LATCHING_PACKAGE_NAME) -# -# def setUp(self): -# self.config = Configuration() -# self.logger = get_logger() -# self.package = [ -# get_package(self.TALKER_CLOUD_DEVICE_PACKAGE), -# get_package(self.ROSBAG_TALKER_PACKAGE), -# get_package(self.FAST_TALKER_DEVICE_WITH_ROSBAGS_PACKAGE), -# get_package(self.THROTTLING_PACKAGE_NAME), -# get_package(self.LATCHING_PACKAGE_NAME) -# ] -# self.device = self.config.get_devices(arch=DeviceArch.AMD64, runtime='Dockercompose')[0] -# self.bag_filename = 'test.bag' -# self.rosbag_job_name = 'test-rosbag-defs' -# -# def tearDown(self): -# if os.path.exists(self.bag_filename): -# os.remove(self.bag_filename) -# -# def test_01_create_deployment_with_rosbag_jobs(self): -# self.logger.info('creating deployment with rosbag jobs') -# device_rosbag_job = ROSBagJob('device-init-job', ROSBagOptions(all_topics=True), -# upload_options=UploadOptions(upload_type=ROSBagUploadTypes.ON_STOP)) -# cloud_rosbag_job = ROSBagJob('cloud-init-job', ROSBagOptions(all_topics=True)) -# provision_config = self.package[0].get_provision_configuration() -# ignored_device_configs = ['network_interface'] -# provision_config.add_device('talker-device', self.device, ignore_device_config=ignored_device_configs) -# provision_config.add_rosbag_job('talker-device', device_rosbag_job) -# provision_config.add_rosbag_job('talker-cloud', cloud_rosbag_job) -# deployment = self.deploy_package(self.package[0], provision_config, -# ignored_device_configs=ignored_device_configs) -# deployment.poll_deployment_till_ready(retry_count=100, sleep_interval=5) -# self.__class__.deployment = self.config.client.get_deployment(deployment.deploymentId) -# self.assert_rosbag_jobs_present(self.deployment.deploymentId, [device_rosbag_job.name, cloud_rosbag_job.name], -# [ROSBagJobStatus.STARTING, ROSBagJobStatus.RUNNING]) -# self.assert_rosbag_jobs_in_project(device_rosbag_job.name) -# -# def test_02_create_rosbag_jobs(self): -# self.logger.info('creating rosbag jobs on cloud and device') -# self.__class__.device_rosbag_job = self.create_rosbag_job('talker-device', is_device=True) -# self.__class__.cloud_rosbag_job = self.create_rosbag_job('talker-cloud') -# self.assert_rosbag_jobs_present(self.deployment.deploymentId, -# [self.device_rosbag_job.name, self.cloud_rosbag_job.name], -# [ROSBagJobStatus.RUNNING, ROSBagJobStatus.STARTING]) -# -# def test_03_stop_rosbag_jobs(self): -# self.wait_till_jobs_are_running(self.deployment.deploymentId, [self.cloud_rosbag_job.guid, -# self.device_rosbag_job.guid], -# sleep_interval_in_sec=5) -# self.logger.info('stopping the running rosbag jobs on cloud and device') -# self.config.client.stop_rosbag_jobs(self.deployment.deploymentId, guids=[ -# self.device_rosbag_job.guid, self.cloud_rosbag_job.guid]) -# self.assert_rosbag_jobs_present(self.deployment.deploymentId, -# [self.device_rosbag_job.name, self.cloud_rosbag_job.name], -# [ROSBagJobStatus.STOPPING, ROSBagJobStatus.STOPPED]) -# -# def test_04_rosbag_blobs(self): -# blobs = self.wait_till_blobs_are_uploaded(sleep_interval_in_sec=5) -# self.logger.info('validating the uploaded rosbag blobs for the stopped jobs') -# self.assert_rosbag_blobs_of_device(blobs) -# self.assert_rosbag_blobs(blobs) -# -# def test_05_auto_stop_rosbag_jobs_on_deprovision(self): -# jobs = self.config.client.list_rosbag_jobs(deployment_id=self.deployment.deploymentId, -# statuses=[ROSBagJobStatus.RUNNING]) -# job_ids = list(map(lambda job: job.guid, jobs)) -# self.assertEqual(2, len(jobs)) -# init_job_names = list(map(lambda job: job.name, jobs)) -# self.logger.info('deprovisioning deployment with running rosbag jobs') -# self.deployment.deprovision() -# self.assert_rosbag_jobs_present(self.deployment.deploymentId, init_job_names, -# [ROSBagJobStatus.STOPPING, ROSBagJobStatus.STOPPED]) -# self.wait_till_blobs_are_uploaded(job_ids=job_ids, sleep_interval_in_sec=5) -# -# def test_06_create_deployment_with_rosbag_jos_in_package_config(self): -# provision_config = self.package[1].get_provision_configuration() -# deployment = self.deploy_package(self.package[1], provision_config, -# ignored_device_configs=['network_interface']) -# deployment.poll_deployment_till_ready(retry_count=100, sleep_interval=5) -# self.assert_rosbag_jobs_present(deployment.deploymentId, [self.rosbag_job_name], -# [ROSBagJobStatus.STARTING, ROSBagJobStatus.RUNNING]) -# jobs = self.config.client.list_rosbag_jobs(deployment.deploymentId) -# job_ids = [job.guid for job in jobs] -# self.wait_till_jobs_are_running(deployment.deploymentId) -# self.config.client.stop_rosbag_jobs(deployment.deploymentId) -# self.wait_till_blobs_are_uploaded(job_ids=job_ids) -# deployment.deprovision() -# -# def test_07_rosbag_job_with_upload_type_continuous(self): -# job_name = 'continuous_upload_type' -# -# self.logger.info('creating device deployment with rosbag job with upload type as Continuous') -# provision_config = self.package[2].get_provision_configuration() -# ignored_device_configs = ['network_interface'] -# provision_config.add_device('talker-fast-device', self.device, ignore_device_config=ignored_device_configs) -# deployment = self.deploy_package(self.package[2], provision_config, -# ignored_device_configs=ignored_device_configs) -# deployment.poll_deployment_till_ready(retry_count=100, sleep_interval=5) -# self.__class__.deployment_with_fast_talker = self.config.client.get_deployment(deployment.deploymentId) -# -# self.assert_rosbag_jobs_present(self.deployment_with_fast_talker.deploymentId, [job_name], -# [ROSBagJobStatus.STARTING, ROSBagJobStatus.RUNNING]) -# self.assert_rosbag_jobs_in_project(job_name) -# self.__class__.continuous_upload_type_rosbag = self.get_job_by_job_name(deployment.deploymentId, job_name) -# uploaded_blobs = self.wait_till_blobs_are_uploaded(job_ids=[self.continuous_upload_type_rosbag.guid]) -# -# # to ensure first split is uploaded because it continuously -# # uploads -# first_bag_uploaded = False -# for blob in uploaded_blobs: -# if blob.filename.endswith('_0.bag'): -# first_bag_uploaded = True -# break -# -# self.assertTrue(first_bag_uploaded) -# -# self.config.client.stop_rosbag_jobs( -# deployment_id=deployment.deploymentId, -# guids=[self.continuous_upload_type_rosbag.guid] -# ) -# -# def test_08_rosbag_job_with_upload_type_on_demand(self): -# self.logger.info('creating rosbag job with upload type as OnDemand') -# -# job_name = 'on_demand_upload_type' -# component_instance_id = self.deployment_with_fast_talker.get_component_instance_id('talker-fast-device') -# -# job_req = ROSBagJob( -# name=job_name, -# deployment_id=self.deployment_with_fast_talker.deploymentId, -# component_instance_id=component_instance_id, -# rosbag_options=ROSBagOptions( -# all_topics=True, -# max_splits=10, -# max_split_size=10 -# ), -# upload_options=UploadOptions(upload_type=ROSBagUploadTypes.ON_DEMAND), -# ) -# -# rosbag_creation_time = int(time.time()) -# job = self.config.client.create_rosbag_job(job_req) -# -# start_recording_duration = 8 -# split_duration = 60 -# -# self.logger.info('sleeping for sometime for recording to continue') -# sleep(start_recording_duration + (split_duration * 2)) -# -# from_time = rosbag_creation_time + start_recording_duration + split_duration + 10 -# to_time = from_time + split_duration -# on_demand_opts = ROSBagOnDemandUploadOptions( -# time_range=ROSBagTimeRange( -# from_time=from_time, -# to_time=to_time -# ) -# ) -# -# job.patch(on_demand_options=on_demand_opts) -# -# uploaded_blobs = self.wait_till_blobs_are_uploaded(job_ids=[job.guid]) -# -# # to ensure first split is not uploaded because it is not -# # within the time range provided -# for blob in uploaded_blobs: -# self.assertFalse(blob.filename.endswith('_0.bag')) -# -# self.deployment_with_fast_talker.deprovision() -# -# def test_09_rosbag_job_throttling(self): -# """ -# Default publishing rate on channels -# /topic1: 15 -# /topic2: 30 -# /topic3: 5 -# /topic4: 20 -# """ -# difference_margin = 8 -# topic2_throttled_freq = 15 -# topic3_throttled_freq = 2 -# self.logger.info('deploying throttling package') -# device_rosbag_job = ROSBagJob('device-init-job', ROSBagOptions(all_topics=True), -# upload_options=UploadOptions(upload_type=ROSBagUploadTypes.ON_STOP)) -# provision_config = self.package[3].get_provision_configuration() -# ignored_device_configs = ['network_interface'] -# provision_config.add_device('throttling-component', self.device, ignore_device_config=ignored_device_configs) -# provision_config.add_rosbag_job('throttling-component', device_rosbag_job) -# deployment = self.deploy_package(self.package[3], provision_config, -# ignored_device_configs=ignored_device_configs) -# deployment.poll_deployment_till_ready(retry_count=100, sleep_interval=5) -# self.__class__.deployment_with_throttling = self.config.client.get_deployment(deployment.deploymentId) -# -# component_instance_id = self.deployment_with_throttling.get_component_instance_id('throttling-component') -# throttling_rosbag_job = ROSBagJob('rosbag-test-throttling', -# deployment_id=self.deployment_with_throttling.deploymentId, -# component_instance_id=component_instance_id, -# rosbag_options=ROSBagOptions(all_topics=True), -# upload_options=UploadOptions(upload_type=ROSBagUploadTypes.ON_STOP), -# override_options=OverrideOptions( -# topic_override_info=[ -# TopicOverrideInfo('/topic2', topic2_throttled_freq, False), -# TopicOverrideInfo('/topic3', topic3_throttled_freq, False), -# ], -# exclude_topics=['/topic4'] -# )) -# self.__class__.throttling_rosbag_job = self.config.client.create_rosbag_job(throttling_rosbag_job) -# self.assert_rosbag_jobs_present(self.deployment_with_throttling.deploymentId, -# [throttling_rosbag_job.name], -# [ROSBagJobStatus.STARTING, ROSBagJobStatus.RUNNING]) -# self.assert_rosbag_jobs_in_project(throttling_rosbag_job.name) -# self.wait_till_jobs_are_running(self.deployment_with_throttling.deploymentId, -# [self.throttling_rosbag_job.guid], sleep_interval_in_sec=5) -# self.logger.info('sleeping for 8 seconds') -# time.sleep(8) -# self.config.client.stop_rosbag_jobs(self.deployment_with_throttling.deploymentId, -# guids=[self.throttling_rosbag_job.guid]) -# self.assert_rosbag_jobs_present(self.deployment_with_throttling.deploymentId, -# [self.throttling_rosbag_job.name], -# [ROSBagJobStatus.STOPPING, ROSBagJobStatus.STOPPED]) -# uploaded_blobs = self.wait_till_blobs_are_uploaded(sleep_interval_in_sec=5, -# job_ids=[self.throttling_rosbag_job.guid]) -# # self.logger.info('validating the uploaded rosbag blobs for the stopped jobs') -# """ -# TODO: -# Observation: This following assertion succeeds on a newly onboarded device but fails on reusing the same device. -# Cause of failure: The bag files fetched on basis of device id > bag files created during this deployment. -# Inference: bag files are not getting deleted after each test. -# Hence, Commenting this assertion for now. -# """ -# # self.assert_rosbag_blobs_of_device(uploaded_blobs) -# self.assertEqual(len(uploaded_blobs), 1) -# uploaded_blob = uploaded_blobs[0] -# relevant_topics = ['/topic1', '/topic2', '/topic3', '/topic4'] -# record_duration = uploaded_blob.info.duration -# topics = list(filter(lambda topic: topic.name in relevant_topics, uploaded_blob.info.topics)) -# topic1_metadata = next(filter(lambda topic: topic.name == '/topic1', topics), None) -# topic2_metadata = next(filter(lambda topic: topic.name == '/topic2', topics), None) -# topic3_metadata = next(filter(lambda topic: topic.name == '/topic3', topics), None) -# -# # asserting that the message count numbers recorded on topic1 and topic2 are close -# expected_msg_count_t1_t2 = record_duration * topic2_throttled_freq -# self.assertGreater(topic1_metadata.message_count, expected_msg_count_t1_t2 - difference_margin) -# self.assertLess(topic1_metadata.message_count, expected_msg_count_t1_t2 + difference_margin) -# self.assertGreater(topic2_metadata.message_count, expected_msg_count_t1_t2 - difference_margin) -# self.assertLess(topic2_metadata.message_count, expected_msg_count_t1_t2 + difference_margin) -# self.logger.info("Expected msg count: %s, " -# "Actual msg count on '/topic1': %s, " -# "Actual msg count on '/topic2': %s, " -# "Allowed difference margin: %s", -# expected_msg_count_t1_t2, topic1_metadata.message_count, topic2_metadata.message_count, -# difference_margin) -# -# self.assertGreater(topic3_metadata.message_count, record_duration * topic3_throttled_freq - 5) -# self.assertLess(topic3_metadata.message_count, record_duration * topic3_throttled_freq + 5) -# self.deployment_with_throttling.deprovision() -# -# def test_10_rosbag_job_latching(self): -# self.logger.info('deploying latching package') -# device_rosbag_job = ROSBagJob('device-init-job', ROSBagOptions(all_topics=True), -# upload_options=UploadOptions(upload_type=ROSBagUploadTypes.ON_STOP)) -# provision_config = self.package[4].get_provision_configuration() -# ignored_device_configs = ['network_interface'] -# provision_config.add_device('latching-component', self.device, ignore_device_config=ignored_device_configs) -# provision_config.add_rosbag_job('latching-component', device_rosbag_job) -# deployment = self.deploy_package(self.package[4], provision_config, -# ignored_device_configs=ignored_device_configs) -# deployment.poll_deployment_till_ready(retry_count=100, sleep_interval=5) -# self.__class__.deployment_with_latching = self.config.client.get_deployment(deployment.deploymentId) -# -# component_instance_id = self.deployment_with_latching.get_component_instance_id('latching-component') -# latching_rosbag_job = ROSBagJob('rosbag-test-latching', -# deployment_id=self.deployment_with_latching.deploymentId, -# component_instance_id=component_instance_id, -# rosbag_options=ROSBagOptions(all_topics=True, max_splits=5, max_split_size=20), -# upload_options=UploadOptions(upload_type=ROSBagUploadTypes.ON_STOP), -# override_options=OverrideOptions( -# topic_override_info=[ -# TopicOverrideInfo('/map', latched=True), -# ], -# )) -# self.__class__.latching_rosbag_job = self.config.client.create_rosbag_job(latching_rosbag_job) -# self.assert_rosbag_jobs_present(self.deployment_with_latching.deploymentId, -# [latching_rosbag_job.name], -# [ROSBagJobStatus.STARTING, ROSBagJobStatus.RUNNING]) -# self.assert_rosbag_jobs_in_project(latching_rosbag_job.name) -# self.wait_till_jobs_are_running(self.deployment_with_latching.deploymentId, -# [self.latching_rosbag_job.guid], sleep_interval_in_sec=5) -# self.logger.info('sleeping for 60 seconds') -# time.sleep(60) -# self.config.client.stop_rosbag_jobs(self.deployment_with_latching.deploymentId, -# guids=[self.latching_rosbag_job.guid]) -# self.assert_rosbag_jobs_present(self.deployment_with_latching.deploymentId, -# [self.latching_rosbag_job.name], -# [ROSBagJobStatus.STOPPING, ROSBagJobStatus.STOPPED]) -# -# uploaded_blobs = self.wait_till_blobs_are_uploaded(sleep_interval_in_sec=5, -# job_ids=[self.latching_rosbag_job.guid]) -# # self.logger.info('validating the uploaded rosbag blobs for the stopped jobs') -# """ -# TODO: -# Observation: This following assertion succeeds on a newly onboarded device but fails on reusing the same device. -# Cause of failure: The bag files fetched on basis of device id > bag files created during this deployment. -# Inference: bag files are not getting deleted after each test. -# Hence, Commenting this assertion for now. -# """ -# # self.assert_rosbag_blobs_of_device(uploaded_blobs) -# """ -# TODO: -# There's an anomaly in the following assertion. -# len(uploaded_blobs) outputs 3 while debugging, while in the UI there were 6 splits. -# Hence, Commenting the assertion for now (even though it passes because 3>1) unless the reason behind this -# behaviour is understood. -# """ -# # self.assertGreater(len(uploaded_blobs), 1) -# -# topic_absent_in_split = False -# for blob in uploaded_blobs: -# topics = blob.info.topics -# x = next((topic for topic in topics if topic.name == '/map'), None) -# if x is None: -# topic_absent_in_split = True -# break -# -# self.assertFalse(topic_absent_in_split) -# self.deployment_with_latching.deprovision() -# -# def assert_rosbag_jobs_present(self, deployment_id, job_names, statuses=None): -# self.logger.info('checking jobs ') -# jobs_list = self.config.client.list_rosbag_jobs(deployment_id) -# jobs = [x for x in jobs_list if x.name in job_names] -# self.assertNotEqual(len(jobs), 0, 'no jobs were started') -# if statuses: -# for job in jobs: -# self.assertTrue(job.status in statuses) -# -# def assert_rosbag_jobs_in_project(self, job_name): -# self.logger.info('checking jobs in project ') -# jobs_list = self.config.client.list_rosbag_jobs_in_project([self.device.deviceId]) -# self.assertEqual((job_name in [job.name for job in jobs_list]), True) -# -# def assert_rosbag_blobs(self, blobs): -# for blob in blobs: -# self.config.client.download_rosbag_blob(blob.guid, filename=self.bag_filename) -# self.assert_bag_file_exists() -# self.config.client.delete_rosbag_blob(blob.guid) -# self.assert_rosbag_blob_deleted(blob.guid) -# -# def assert_rosbag_blobs_of_device(self, blobs): -# self.logger.info('checking if the blobs fetched based on device id are present in the uploaded blobs') -# blobs_based_on_device_id = self.config.client.list_rosbag_blobs(device_ids=[self.device.deviceId]) -# guids_based_on_device_id = [blob.guid for blob in blobs_based_on_device_id] -# all_guids = [blob.guid for blob in blobs] -# self.assertEqual(all(guid in all_guids for guid in guids_based_on_device_id), True) -# -# def assert_bag_file_exists(self): -# self.assertTrue(os.path.exists(self.bag_filename)) -# -# def assert_rosbag_blob_deleted(self, blob_guid): -# blobs = self.config.client.list_rosbag_blobs(guids=[blob_guid]) -# self.assertEqual(len(blobs), 0) -# -# def create_rosbag_job(self, component_name, is_device=False): -# self.logger.info('creating rosbag job for {} component'.format(component_name)) -# rosbag_job_name = generate_random_value() -# upload_options = None -# component_instance_id = self.deployment.get_component_instance_id(component_name) -# if is_device: -# upload_options = UploadOptions(upload_type=ROSBagUploadTypes.ON_STOP) -# rosbag_job = ROSBagJob(rosbag_job_name, ROSBagOptions(all_topics=True), -# deployment_id=self.deployment.deploymentId, -# component_instance_id=component_instance_id, -# upload_options=upload_options) -# return self.config.client.create_rosbag_job(rosbag_job) -# -# def wait_till_jobs_are_running(self, deployment_id, guids=None, retry_limit=50, sleep_interval_in_sec=1): -# self.logger.info('waiting for rosbag jobs to start running') -# retry_count = 0 -# while retry_count < retry_limit: -# jobs = self.config.client.list_rosbag_jobs(deployment_id, -# guids=guids) -# running_jobs = [job for job in jobs if job.status == ROSBagJobStatus.RUNNING] -# if len(jobs) == len(running_jobs): -# self.logger.info('rosbag jobs are running') -# return -# sleep(sleep_interval_in_sec) -# retry_count += 1 -# -# raise Exception('rosbag jobs are not running, waiting timed out') -# -# def wait_till_blobs_are_uploaded( -# self, -# job_ids=None, -# retry_limit=50, -# sleep_interval_in_sec=1, -# list_blobs_sleep_interval_in_sec=5 -# ): -# if not job_ids: -# job_ids = [self.cloud_rosbag_job.guid, self.device_rosbag_job.guid] -# self.logger.info('waiting for rosbag blobs to finish uploading') -# -# blobs = [] -# retry_count = 0 -# job_ids_copy = job_ids.copy() -# while retry_count < retry_limit: -# blobs = self.config.client.list_rosbag_blobs(job_ids=job_ids) -# if not blobs: -# sleep(list_blobs_sleep_interval_in_sec) -# continue -# -# for blob in blobs: -# if blob.job.guid in job_ids_copy: -# job_ids_copy.remove(blob.job.guid) -# -# if len(job_ids_copy) == 0: -# break -# -# if not job_ids_copy: -# break -# -# sleep(list_blobs_sleep_interval_in_sec) -# -# if job_ids_copy: -# raise Exception( -# 'not even a single rosbag blob has been uploaded for job ids {}, waiting timed out'.format(job_ids_copy) -# ) -# -# for blob in blobs: -# blob.poll_till_ready(retry_count=retry_limit, sleep_interval=sleep_interval_in_sec) -# -# self.logger.info('rosbag blobs are uploaded') -# -# return blobs -# -# def get_job_by_job_name(self, deployment_id, job_name): -# jobs = self.config.client.list_rosbag_jobs(deployment_id) -# for job in jobs: -# if job.name == job_name: -# return job -# -# return None diff --git a/sdk_test/package/routed_networks_tests.py b/sdk_test/package/routed_networks_tests.py deleted file mode 100644 index 8db1559f..00000000 --- a/sdk_test/package/routed_networks_tests.py +++ /dev/null @@ -1,158 +0,0 @@ -from __future__ import absolute_import -from rapyuta_io import DeploymentStatusConstants, DeviceArch -from rapyuta_io.clients.deployment import DeploymentPhaseConstants -from rapyuta_io.clients.package import Runtime, ROSDistro, RestartPolicy -from rapyuta_io.utils.utils import generate_random_value -from sdk_test.config import Configuration -from sdk_test.device.device_test import DeviceTest -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, add_package, delete_package, get_package -from rapyuta_io.clients.routed_network import Parameters -from rapyuta_io.clients.common_models import Limits - -class RoutedNetworkTest(PackageTest, DeviceTest): - - LISTENER_MANIFEST = 'listener.json' - LISTENER_PACKAGE = 'test-routed-network-pkg' - - @classmethod - def setUpClass(cls): - add_package(cls.LISTENER_MANIFEST, cls.LISTENER_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.LISTENER_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - self.name = 'net-' + generate_random_value() - self.routed_network = None - self.ros_distro = ROSDistro.MELODIC - self.shared = True - self.routed_network = None - self.docker_device = self.config.get_devices(arch=DeviceArch.AMD64, runtime='Dockercompose')[0] - self.supervisor_device = self.config.get_devices(arch=DeviceArch.AMD64, runtime='Preinstalled')[0] - self.parameters = Parameters(Limits(cpu=0.5, memory=1024)) - - def create_routed_network(self, runtime, parameters): - if runtime == Runtime.CLOUD: - self.routed_network = self.config.client.create_cloud_routed_network( - self.name, self.ros_distro, self.shared) - else: - device = self.config.client.get_device(parameters['device_id']) - self.routed_network = self.config.client.create_device_routed_network( - self.name, self.ros_distro, self.shared, device, - parameters['NETWORK_INTERFACE'], parameters['restart_policy']) - self.routed_network.poll_routed_network_till_ready() - - def assert_deployment_status(self, routed_network): - self.logger.info('Checking network status') - status = routed_network.get_status() - self.assertEqual(status.status, DeploymentStatusConstants.RUNNING.value) - self.assertEqual(status.phase, DeploymentPhaseConstants.SUCCEEDED.value) - self.logger.info('network %s(%s) started successfully' % (routed_network.name, - routed_network.guid)) - - def assert_routed_network_present_in_list(self, guid): - all_routed_network = self.config.client.get_all_routed_networks() - routed_network = list(filter(lambda network: network.guid == guid, all_routed_network))[0] - self.assertEqual(routed_network.name, self.routed_network.name) - - def assert_routed_network_stopped(self, guid): - all_routed_network = self.config.client.get_all_routed_networks() - routed_network = list(filter(lambda network: network.guid == guid, all_routed_network))[0] - self.assertEqual(routed_network.internalDeploymentStatus.phase, - DeploymentPhaseConstants.DEPLOYMENT_STOPPED.value) - - def validate_refresh(self, guid): - partial_net = [n for n in self.config.client.get_all_routed_networks() if n.guid == guid][0] - self.assertTrue(partial_net.is_partial) - with self.assertRaises(AttributeError): - partial_net.internalDeploymentStatus.status - partial_net.refresh() - self.assertFalse(partial_net.is_partial) - self.assertTrue(partial_net.internalDeploymentStatus.status) - - def assert_routed_network_parameters(self, parameters, network): - self.assertEqual(parameters['device_id'], self.docker_device.deviceId) - self.assertEqual(parameters['NETWORK_INTERFACE'], network) - self.assertEqual(parameters['restart_policy'], RestartPolicy.OnFailure) - - def test_add_device_routed_network(self): - self.logger.info('Started creating device routed network') - self.create_routed_network(Runtime.DEVICE, {'device_id': self.docker_device.deviceId, - 'NETWORK_INTERFACE': 'lo', - 'restart_policy': RestartPolicy.OnFailure}) - - self.logger.info('getting device routed network') - self.routed_network = self.config.client.get_routed_network(self.routed_network.guid) - self.assertEqual(self.routed_network.runtime, 'device') - self.assertEqual(self.routed_network.guid, self.routed_network.guid) - self.assert_routed_network_parameters(self.routed_network.parameters, 'lo') - self.assert_deployment_status(self.routed_network) - self.assert_routed_network_present_in_list(self.routed_network.guid) - - self.logger.info('Started creating package listener component') - app_package = get_package(self.LISTENER_PACKAGE) - prov_config = app_package.get_provision_configuration() - ignore_device_config = ['network_interface'] - prov_config.add_device('default', self.supervisor_device, ignore_device_config=ignore_device_config) - prov_config.add_routed_network(self.routed_network, 'docker0') - guid = self.routed_network.guid - self.assertEqual(prov_config.context['routedNetworks'], [{"guid": guid, "bindParameters": - {"NETWORK_INTERFACE": 'docker0'}}]) - self.logger.info('creating deployment') - deployment = self.deploy_package(app_package, prov_config, device=self.supervisor_device, - ignored_device_configs=ignore_device_config) - deployment.poll_deployment_till_ready() - deployment.deprovision() - - self.logger.info('Delete routed network with guid : %s' % guid) - self.routed_network.delete() - self.assert_routed_network_stopped(guid) - - def test_create_cloud_routed_network(self): - self.logger.info('Started creating cloud routed network') - self.create_routed_network(Runtime.CLOUD, {}) - routed_network = self.config.client.get_routed_network(self.routed_network.guid) - self.assertEqual(self.routed_network.runtime, 'cloud') - self.assertEqual(routed_network.guid, self.routed_network.guid) - self.assertEqual(routed_network.parameters, self.parameters.to_dict()) - self.assert_deployment_status(routed_network) - self.assert_routed_network_present_in_list(routed_network.guid) - guid = self.routed_network.guid - self.logger.info('Delete routed network with guid : %s' % guid) - routed_network.delete() - self.assert_routed_network_stopped(guid) - - def test_create_cloud_routed_network_with_parameters(self): - self.logger.info('Started creating cloud routed network with parameters') - self.create_routed_network(Runtime.CLOUD, Parameters(Limits(cpu=0.5, memory=1024))) - routed_network = self.config.client.get_routed_network(self.routed_network.guid) - self.assertEqual(self.routed_network.runtime, 'cloud') - self.assertEqual(routed_network.guid, self.routed_network.guid) - self.assertEqual(routed_network.parameters, self.parameters.to_dict()) - self.assert_deployment_status(routed_network) - self.assert_routed_network_present_in_list(routed_network.guid) - guid = self.routed_network.guid - self.validate_refresh(guid) - self.logger.info('Delete routed network with guid : %s' % guid) - self.config.client.delete_routed_network(guid) - self.assert_routed_network_stopped(guid) - - def test_create_device_routed_network(self): - self.logger.info('Started creating device routed network') - self.create_routed_network(Runtime.DEVICE, {'device_id': self.docker_device.deviceId, - 'NETWORK_INTERFACE': 'docker0', - 'restart_policy': RestartPolicy.OnFailure}) - self.routed_network = self.config.client.get_routed_network(self.routed_network.guid) - self.assertEqual(self.routed_network.runtime, 'device') - self.assertEqual(self.routed_network.guid, self.routed_network.guid) - self.assert_routed_network_parameters(self.routed_network.parameters, 'docker0') - self.assert_deployment_status(self.routed_network) - self.assert_routed_network_present_in_list(self.routed_network.guid) - guid = self.routed_network.guid - self.logger.info('Delete routed network with guid : %s' % guid) - self.routed_network.delete() - self.assert_routed_network_stopped(guid) diff --git a/sdk_test/package/static_route_test.py b/sdk_test/package/static_route_test.py deleted file mode 100644 index c6d5b794..00000000 --- a/sdk_test/package/static_route_test.py +++ /dev/null @@ -1,100 +0,0 @@ -from __future__ import absolute_import -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, add_package, delete_package, get_package - - -class StaticRouteTest(PackageTest): - STATIC_ROUTE_1 = 'nginx' - STATIC_ROUTE_2 = 'nginx2' - ENDPOINT_1 = 'test' - ENDPOINT_2 = 'test2' - - NGINX_MULTI_COMPONENT_MANIFEST = 'nginx-multi-component.json' - NGINX_MULTI_COMPONENT_PACKAGE = 'test-static-route-nginx-multi-component-pkg' - - NGINX_SINGLE_COMPONENT_MANIFEST = 'nginx-single-component.json' - NGINX_SINGLE_COMPONENT_PACKAGE = 'test-static-route-nginx-single-component-pkg' - - @classmethod - def setUpClass(cls): - add_package(cls.NGINX_MULTI_COMPONENT_MANIFEST, - cls.NGINX_MULTI_COMPONENT_PACKAGE) - add_package(cls.NGINX_SINGLE_COMPONENT_MANIFEST, - cls.NGINX_SINGLE_COMPONENT_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.NGINX_MULTI_COMPONENT_PACKAGE) - delete_package(cls.NGINX_SINGLE_COMPONENT_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - self.volume_instance = None - self.deployment = None - self.static_routes = [] - self.nginx_multi_pkg = get_package(self.NGINX_MULTI_COMPONENT_PACKAGE) - self.nginx_single_pkg = get_package(self.NGINX_SINGLE_COMPONENT_PACKAGE) - - def tearDown(self): - self.deprovision_all_deployments([self.deployment]) - self._remove_static_routes() - - def _create_static_routes(self): - sr1 = self.config.client.create_static_route(self.STATIC_ROUTE_1) - sr2 = self.config.client.create_static_route(self.STATIC_ROUTE_2) - # Doing a get again to get the urlString field - self.static_routes.append(self.config.client.get_static_route(sr1.guid)) - self.static_routes.append(self.config.client.get_static_route(sr2.guid)) - - def _remove_static_routes(self): - self.logger.info("Removing all static routes") - for route in self.static_routes: - result = route.delete() - if not result: - self.logger.warn("Error while cleaning up static routes {}".format(route)) - - def test_deployment_with_multiple_static_routes(self): - self.logger.info("Started multi component static route deployment") - provision_configuration = self.nginx_multi_pkg.get_provision_configuration() - self._create_static_routes() - provision_configuration.add_static_route(self.STATIC_ROUTE_1, self.ENDPOINT_1, self.static_routes[0]) - provision_configuration.add_static_route(self.STATIC_ROUTE_2, self.ENDPOINT_2, self.static_routes[1]) - self.deployment = self.deploy_package(self.nginx_multi_pkg, provision_configuration) - self.deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.deployment) - self.assert_static_route('https://' + str(self.static_routes[0].urlString)) - self.assert_static_route('https://' + str(self.static_routes[1].urlString)) - - def test_deployment_with_single_static_routes(self): - self.logger.info("Started single component static route deployment") - provision_configuration = self.nginx_single_pkg.get_provision_configuration() - self._create_static_routes() - provision_configuration.add_static_route(self.STATIC_ROUTE_1, self.ENDPOINT_1, self.static_routes[0]) - self.deployment = self.deploy_package(self.nginx_single_pkg, provision_configuration) - self.deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.deployment) - self.assert_static_route('https://' + str(self.static_routes[0].urlString)) - - def test_get_static_route_by_name_404_case(self): - self._create_static_routes() - route = self.config.client.get_static_route_by_name('temp') - self.assertIsNone(route) - - # TODO(senapati): This test is commented because filter static route - # API is deprecated in v1 api server. - # def test_get_static_route_by_name_success(self): - # self._create_static_routes() - # route = self.config.client.get_static_route_by_name(self.STATIC_ROUTE_1) - # route_name = route['urlPrefix'].split('-')[0] - # self.assertEqual(route_name, self.STATIC_ROUTE_1) - - def test_delete_static_route_success(self): - route1 = self.config.client.create_static_route(self.STATIC_ROUTE_1) - route_guid = route1.guid - self.config.client.delete_static_route(route_guid) - route = self.config.client.get_static_route_by_name(self.STATIC_ROUTE_1) - self.assertIsNone(route) - - diff --git a/sdk_test/package/transformer_with_docker_device_test.py b/sdk_test/package/transformer_with_docker_device_test.py deleted file mode 100644 index 9f71ea4b..00000000 --- a/sdk_test/package/transformer_with_docker_device_test.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import absolute_import -from rapyuta_io import DeviceArch -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, get_package, delete_package, add_package - - -class TestTransformerWithDockerDevice(PackageTest): - - TALKER_DOCKER_MANIFEST = 'talker-docker.json' - LISTENER_DOCKER_MANIFEST = 'listener-docker.json' - CLOUD_TRANSFORM_MANIFEST = 'cloud-transform.json' - - TALKER_DOCKER_PACKAGE = 'test-transformer-talker-docker-pkg' - LISTENER_DOCKER_PACKAGE = 'test-transformer-listener-docker-pkg' - CLOUD_TRANSFORM_PACKAGE = 'test-transformer-cloud-transform-pkg' - - @classmethod - def setUpClass(cls): - add_package(cls.TALKER_DOCKER_MANIFEST, cls.TALKER_DOCKER_PACKAGE) - add_package(cls.LISTENER_DOCKER_MANIFEST, cls.LISTENER_DOCKER_PACKAGE) - add_package(cls.CLOUD_TRANSFORM_MANIFEST, cls.CLOUD_TRANSFORM_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.TALKER_DOCKER_PACKAGE) - delete_package(cls.LISTENER_DOCKER_PACKAGE) - delete_package(cls.CLOUD_TRANSFORM_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - self.device = self.config.get_devices(arch=DeviceArch.AMD64, runtime='Dockercompose')[0] - self.talker_deployment = None - self.cloud_transform_deployment = None - self.listener_deployment = None - self.routed_network = self.create_cloud_routed_network('transformer_with_docker_device') - - def tearDown(self): - self.deprovision_all_deployments([self.talker_deployment, self.cloud_transform_deployment, - self.listener_deployment]) - self.routed_network.delete() - - def deploy_talker_package(self, device): - self.logger.info('Deploying talker package') - package = get_package(self.TALKER_DOCKER_PACKAGE) - provision_config = package.get_provision_configuration() - ignored_device_configs = ['ros_workspace', 'ros_distro'] - provision_config.add_device("default", device, ignored_device_configs) - provision_config.add_routed_network(self.routed_network) - self.talker_deployment = self.deploy_package(package, provision_config, device, - ignored_device_configs) - - def deploy_cloud_transform_package(self): - package = get_package(self.CLOUD_TRANSFORM_PACKAGE) - provision_config = package.get_provision_configuration() - provision_config.add_dependent_deployment(self.talker_deployment) - provision_config.add_routed_network(self.routed_network) - self.logger.info('Deploying cloud transform package') - self.cloud_transform_deployment = self.deploy_package(package, provision_config) - self.assert_dependent_deployment(self.cloud_transform_deployment, [self.talker_deployment]) - - def deploy_listener_package(self, device): - package = get_package(self.LISTENER_DOCKER_PACKAGE) - provision_config = package.get_provision_configuration() - ignored_device_configs = ['ros_workspace', 'ros_distro'] - provision_config.add_device("default", device, ignored_device_configs) - provision_config.add_routed_networks([self.routed_network]) - provision_config.add_dependent_deployment(self.cloud_transform_deployment) - self.listener_deployment = self.deploy_package(package, provision_config, device, - ignored_device_configs) - self.assert_dependent_deployment(self.listener_deployment, [self.cloud_transform_deployment]) - - # TODO(senapati): This test is commented as its using build based package - # which is not supported anymore - # def test_deploy_transformer_with_docker_device(self): - # self.logger.info('Started transformer with docker device test case') - # self.deploy_talker_package(self.device) - # self.talker_deployment.poll_deployment_till_ready() - # self.assert_deployment_status(self.talker_deployment) - # self.deploy_cloud_transform_package() - # self.cloud_transform_deployment.poll_deployment_till_ready() - # self.assert_deployment_status(self.cloud_transform_deployment) - # self.deploy_listener_package(self.device) - # self.listener_deployment.poll_deployment_till_ready() - # self.assert_deployment_status(self.listener_deployment) diff --git a/sdk_test/package/volume_test.py b/sdk_test/package/volume_test.py deleted file mode 100644 index 5cf3b614..00000000 --- a/sdk_test/package/volume_test.py +++ /dev/null @@ -1,89 +0,0 @@ -from __future__ import absolute_import - -from rapyuta_io import DeviceArch -from rapyuta_io.clients.package import ExecutableMount, Runtime, RestartPolicy -from sdk_test.config import Configuration -from sdk_test.package.package_test import PackageTest -from sdk_test.util import get_logger, delete_package, add_package, get_package - -MOUNT_PATH = "/data" - - -class TestVolume(PackageTest): - - PV_READER_MANIFEST = 'pv-reader.json' - PV_READER_PACKAGE = 'test-volume-pv-reader-pkg' - DEVICE_VOLUME_MANIFEST = 'device-volume.json' - DEVICE_VOLUME_PACKAGE = 'test-device-volume-pkg' - - @classmethod - def setUpClass(cls): - add_package(cls.PV_READER_MANIFEST, cls.PV_READER_PACKAGE) - add_package(cls.DEVICE_VOLUME_MANIFEST, cls.DEVICE_VOLUME_PACKAGE) - - @classmethod - def tearDownClass(cls): - delete_package(cls.PV_READER_PACKAGE) - delete_package(cls.DEVICE_VOLUME_PACKAGE) - - def setUp(self): - self.config = Configuration() - self.logger = get_logger() - self.volume_instance = None - self.deployment = None - self.package = get_package(self.PV_READER_PACKAGE) - self.device_package = get_package(self.DEVICE_VOLUME_PACKAGE) - self.docker_device = self.config.get_devices(arch=DeviceArch.AMD64, runtime='Dockercompose')[0] - self.docker_device.refresh() - - def tearDown(self): - self.deprovision_all_deployments([self.deployment, self.volume_instance]) - - def test_01_persistent_volume_as_dependent_deployment(self): - self.logger.info("Started persistent volume as dependent deployment") - self.volume_instance = self.get_persistent_volume_instance(instance_name='volume_instance') - self.volume_instance.poll_deployment_till_ready() - provision_configuration = self.package.get_provision_configuration() - self.logger.info('Adding persistent volume as dependent deployment') - provision_configuration.add_dependent_deployment(self.volume_instance) - provision_configuration.mount_volume("default", volume=self.volume_instance, mount_path=MOUNT_PATH) - self.deployment = self.deploy_package(self.package, provision_configuration) - self.deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.deployment) - self.validate_refresh() - - def test_02_persistent_volume_as_dependent_deployment_with_executable_mounts(self): - self.logger.info("Started persistent volume as dependent deployment") - self.volume_instance = self.get_persistent_volume_instance(instance_name='test_executable_mounts') - self.volume_instance.poll_deployment_till_ready() - provision_configuration = self.package.get_provision_configuration() - self.logger.info('Adding persistent volume as dependent deployment') - provision_configuration.add_dependent_deployment(self.volume_instance) - executable_mounts = [ - ExecutableMount(exec_name='CompReaderExec', mount_path='/test_path', sub_path='test_subpath')] - provision_configuration.mount_volume("default", volume=self.volume_instance, mount_path=None, - executable_mounts=executable_mounts) - self.deployment = self.deploy_package(self.package, provision_configuration) - self.deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.deployment) - - def test_03_device_volume_with_executable_mounts(self): - self.logger.info("Started device volume") - self.volume_instance = None - provision_configuration = self.device_package.get_provision_configuration() - self.logger.info('Adding device to component') - provision_configuration.add_device('default', self.docker_device) - exec_mounts = [ExecutableMount('nginx', '/tmp/', '/home/rapyuta/')] - self.logger.info('Adding mount paths to device volume') - provision_configuration.mount_volume('default', device=self.docker_device, mount_path=None, executable_mounts=exec_mounts) - self.deployment = self.deploy_package(self.device_package, provision_configuration) - self.deployment.poll_deployment_till_ready() - self.assert_deployment_status(self.deployment) - - def validate_refresh(self): - partial_volume = [v for v in self.get_persistent_volume().get_all_volume_instances() - if v.deploymentId == self.volume_instance.deploymentId][0] - self.assertTrue(partial_volume.is_partial) - partial_volume.refresh() - self.assertFalse(partial_volume.is_partial) - self.assertTrue(partial_volume.parameters) diff --git a/sdk_test/run_rio_sdk_test.py b/sdk_test/run_rio_sdk_test.py index 6e1e25e1..234f4ee5 100644 --- a/sdk_test/run_rio_sdk_test.py +++ b/sdk_test/run_rio_sdk_test.py @@ -36,16 +36,17 @@ def get_concurrent_test_suites(self): def setUpSuite(self): self.logger.info('Creating project') self.config.create_project() - self.logger.info('Creating secrets') - self.config.create_secrets() + # Secrets not needed for sdk integration tests + # self.logger.info('Creating secrets') + # self.config.create_secrets() self.onboard_devices() self.wait_for_devices() def tearDownSuite(self): self.remove_devices() self.config.set_devices(None) - self.logger.info('Deleting secrets') - self.config.delete_secrets() + # self.logger.info('Deleting secrets') + # self.config.delete_secrets() self.logger.info('Deleting project') self.config.delete_project() diff --git a/sdk_test/util.py b/sdk_test/util.py index 780ea8f8..cd935528 100644 --- a/sdk_test/util.py +++ b/sdk_test/util.py @@ -1,5 +1,6 @@ from __future__ import absolute_import +import logging from time import sleep from rapyuta_io.clients.model import Command @@ -11,6 +12,18 @@ _NATIVE_NETWORK_MAP = dict() +def get_logger(): + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + logger = logging.getLogger('RIO_SDK Logger') + logger.setLevel(logging.DEBUG) + if not logger.handlers: + console_handler = logging.StreamHandler() + console_handler.setFormatter(formatter) + logger.handler_set = True + logger.addHandler(console_handler) + return logger + def start_roscore(device, bg=True): command = Command(cmd='source /opt/ros/melodic/setup.bash && roscore', shell='/bin/bash', bg=bg) @@ -21,4 +34,3 @@ def start_roscore(device, bg=True): def stop_roscore(device, bg=True): command = Command(cmd='pkill roscore', shell='/bin/bash', bg=bg) device.execute_command(command, retry_limit=10) - diff --git a/tests/user_test.py b/tests/user_test.py index 11e132c8..72e81314 100644 --- a/tests/user_test.py +++ b/tests/user_test.py @@ -31,7 +31,6 @@ def test_get_authenticated_user_details_success(self, mock_request): for project in user.projects: self.assertIsInstance(project, Project) self.assertIsNotNone(project.guid) - self.assertTrue(hasattr(project, 'organization')) self.assertTrue(len(user.projects)) self.assertIsInstance(user.organization, Organization)