diff --git a/neptune/new/attributes/atoms/string.py b/neptune/new/attributes/atoms/string.py index eb4c690dd..f00689205 100644 --- a/neptune/new/attributes/atoms/string.py +++ b/neptune/new/attributes/atoms/string.py @@ -15,11 +15,10 @@ # import typing -import click - from neptune.new.attributes.atoms.copiable_atom import CopiableAtom from neptune.new.internal.container_type import ContainerType from neptune.new.internal.operation import AssignString +from neptune.new.internal.utils.logger import logger from neptune.new.internal.utils.paths import path_to_str from neptune.new.types.atoms.string import String as StringVal @@ -60,11 +59,12 @@ def assign(self, value: typing.Union[StringVal, str], wait: bool = False): if not self._value_truncation_occurred: # the first truncation self._value_truncation_occurred = True - click.echo( - f"Warning: string '{path_to_str(self._path)}' value was " - f"longer than {String.MAX_VALUE_LENGTH} characters and was truncated. " - f"This warning is printed only once.", - err=True, + logger.warning( + "Warning: string '%s' value was" + " longer than %s characters and was truncated." + " This warning is printed only once.", + path_to_str(self._path), + String.MAX_VALUE_LENGTH, ) with self._container.lock(): diff --git a/neptune/new/attributes/series/float_series.py b/neptune/new/attributes/series/float_series.py index 9acc23805..3b3881117 100644 --- a/neptune/new/attributes/series/float_series.py +++ b/neptune/new/attributes/series/float_series.py @@ -15,8 +15,6 @@ # from typing import Iterable, List, Optional, Union -import click - from neptune.new.attributes.series.fetchable_series import FetchableSeries from neptune.new.attributes.series.series import Series from neptune.new.internal.backends.api_model import FloatSeriesValues @@ -27,6 +25,7 @@ Operation, ) from neptune.new.internal.utils import verify_type +from neptune.new.internal.utils.logger import logger from neptune.new.types.series.float_series import FloatSeries as FloatSeriesVal from neptune.utils import split_to_chunks @@ -64,10 +63,7 @@ def _get_config_operation_from_value(self, value: Val) -> Optional[Operation]: def _data_to_value(self, values: Iterable, **kwargs) -> Val: if kwargs: - click.echo( - "Warning: unexpected arguments ({kwargs}) in FloatSeries".format(kwargs=kwargs), - err=True, - ) + logger.warning("Warning: unexpected arguments (%s) in FloatSeries", kwargs) return FloatSeriesVal(values) def _is_value_type(self, value) -> bool: diff --git a/neptune/new/attributes/series/string_series.py b/neptune/new/attributes/series/string_series.py index 84fcd8494..2a2090978 100644 --- a/neptune/new/attributes/series/string_series.py +++ b/neptune/new/attributes/series/string_series.py @@ -15,12 +15,11 @@ # from typing import TYPE_CHECKING, Iterable, List, Optional -import click - from neptune.new.attributes.series.fetchable_series import FetchableSeries from neptune.new.attributes.series.series import Series from neptune.new.internal.backends.api_model import StringSeriesValues from neptune.new.internal.operation import ClearStringLog, LogStrings, Operation +from neptune.new.internal.utils.logger import logger from neptune.new.internal.utils.paths import path_to_str from neptune.new.types.series.string_series import StringSeries as StringSeriesVal from neptune.utils import split_to_chunks @@ -48,11 +47,12 @@ def _get_log_operations_from_value( ): # the first truncation self._value_truncation_occurred = True - click.echo( - f"Warning: string series '{ path_to_str(self._path)}' value was " - f"longer than {MAX_STRING_SERIES_VALUE_LENGTH} characters and was truncated. " - f"This warning is printed only once per series.", - err=True, + logger.warning( + "Warning: string series '%s' value was" + " longer than %s characters and was truncated." + " This warning is printed only once per series.", + path_to_str(self._path), + MAX_STRING_SERIES_VALUE_LENGTH, ) values = [LogStrings.ValueType(val, step=step, ts=timestamp) for val in values] @@ -63,10 +63,7 @@ def _get_clear_operation(self) -> Operation: def _data_to_value(self, values: Iterable, **kwargs) -> Val: if kwargs: - click.echo( - "Warning: unexpected arguments ({kwargs}) in StringSeries".format(kwargs=kwargs), - err=True, - ) + logger.warning("Warning: unexpected arguments (%s) in StringSeries", kwargs) return StringSeriesVal(values) def _is_value_type(self, value) -> bool: diff --git a/neptune/new/internal/backends/hosted_file_operations.py b/neptune/new/internal/backends/hosted_file_operations.py index 397ded350..797d7965b 100644 --- a/neptune/new/internal/backends/hosted_file_operations.py +++ b/neptune/new/internal/backends/hosted_file_operations.py @@ -22,7 +22,6 @@ from typing import AnyStr, Dict, Iterable, List, Optional, Set, Union from urllib.parse import urlencode -import click from bravado.exception import HTTPPaymentRequired, HTTPUnprocessableEntity from bravado.requests_client import RequestsClient from requests import Request, Response @@ -60,6 +59,7 @@ with_api_exceptions_handler, ) from neptune.new.internal.utils import get_absolute_paths, get_common_root +from neptune.new.internal.utils.logger import logger DEFAULT_CHUNK_SIZE = 5 * BYTES_IN_ONE_MB DEFAULT_UPLOAD_CONFIG = AttributeUploadConfiguration(chunk_size=DEFAULT_CHUNK_SIZE) @@ -315,7 +315,7 @@ def _multichunk_upload_with_retry( upload_entry, swagger_client, query_params, multipart_config, urlset ) except UploadedFileChanged as e: - click.echo(e) + logger.error(str(e)) def _multichunk_upload( diff --git a/neptune/new/internal/backends/utils.py b/neptune/new/internal/backends/utils.py index aa7eed1f0..7efbbfa99 100644 --- a/neptune/new/internal/backends/utils.py +++ b/neptune/new/internal/backends/utils.py @@ -18,13 +18,11 @@ import logging import os import socket -import sys import time from functools import lru_cache, wraps from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Text from urllib.parse import urljoin, urlparse -import click import requests import urllib3 from bravado.client import SwaggerClient @@ -69,6 +67,7 @@ from neptune.new.internal.backends.swagger_client_wrapper import SwaggerClientWrapper from neptune.new.internal.operation import CopyAttribute, Operation from neptune.new.internal.utils import replace_patch_version +from neptune.new.internal.utils.logger import logger _logger = logging.getLogger(__name__) @@ -197,11 +196,10 @@ def verify_client_version(client_config: ClientConfig, version: Version): client_config.version_info.min_recommended and client_config.version_info.min_recommended > version ): - click.echo( - "WARNING: We recommend an upgrade to a new version of neptune-client - {} (installed - {}).".format( - client_config.version_info.min_recommended, version - ), - sys.stderr, + logger.warning( + "WARNING: We recommend an upgrade to a new version of neptune-client - %s (installed - %s).", + client_config.version_info.min_recommended, + version, ) @@ -210,7 +208,7 @@ def update_session_proxies(session: Session, proxies: Optional[Dict[str, str]]): try: session.proxies.update(proxies) except (TypeError, ValueError): - raise ValueError("Wrong proxies format: {}".format(proxies)) + raise ValueError(f"Wrong proxies format: {proxies}") def build_operation_url(base_api: str, operation_url: str) -> str: @@ -225,13 +223,13 @@ def handle_server_raw_response_messages(response: Response): try: info = response.headers.get("X-Server-Info") if info: - click.echo(info) + logger.info(info) warning = response.headers.get("X-Server-Warning") if warning: - click.echo(warning) + logger.warning(warning) error = response.headers.get("X-Server-Error") if error: - click.echo(message=error, err=True) + logger.error(error) return response except Exception: # any issues with printing server messages should not cause code to fail @@ -258,13 +256,13 @@ def _handle_response(self): try: info = self._delegate.headers.get("X-Server-Info") if info: - click.echo(info) + logger.info(info) warning = self._delegate.headers.get("X-Server-Warning") if warning: - click.echo(warning) + logger.warning(warning) error = self._delegate.headers.get("X-Server-Error") if error: - click.echo(message=error, err=True) + logger.error(error) except Exception: # any issues with printing server messages should not cause code to fail pass diff --git a/neptune/new/internal/operation_processors/async_operation_processor.py b/neptune/new/internal/operation_processors/async_operation_processor.py index be38338a5..7be2ba9fd 100644 --- a/neptune/new/internal/operation_processors/async_operation_processor.py +++ b/neptune/new/internal/operation_processors/async_operation_processor.py @@ -20,8 +20,6 @@ from time import monotonic, time from typing import List, Optional -import click - from neptune.new.internal.backends.neptune_backend import NeptuneBackend from neptune.new.internal.container_type import ContainerType from neptune.new.internal.disk_queue import DiskQueue @@ -31,6 +29,7 @@ OperationProcessor, ) from neptune.new.internal.threading.daemon import Daemon +from neptune.new.internal.utils.logger import logger # pylint: disable=protected-access @@ -103,19 +102,19 @@ def _wait_for_queue_empty(self, initial_queue_size: int, seconds: Optional[float ) if initial_queue_size > 0: if self._consumer.last_backoff_time > 0: - click.echo( - f"We have been experiencing connection interruptions during your run. " - f"Neptune client will now try to resume connection and sync data for the next " - f"{max_reconnect_wait_time} seconds. " - f"You can also kill this process and synchronize your data manually later " - f"using `neptune sync` command.", - sys.stderr, + logger.warning( + "We have been experiencing connection interruptions during your run." + " Neptune client will now try to resume connection and sync data for the next" + " %s seconds." + " You can also kill this process and synchronize your data manually later" + " using `neptune sync` command.", + max_reconnect_wait_time, ) else: - click.echo( - f"Waiting for the remaining {initial_queue_size} operations to synchronize with Neptune. " - f"Do not kill this process.", - sys.stderr, + logger.warning( + "Waiting for the remaining %s operations to synchronize with Neptune." + " Do not kill this process.", + initial_queue_size, ) while True: @@ -136,32 +135,34 @@ def _wait_for_queue_empty(self, initial_queue_size: int, seconds: Optional[float (already_synced / initial_queue_size) * 100 if initial_queue_size else 100 ) if size_remaining == 0: - click.echo(f"All {initial_queue_size} operations synced, thanks for waiting!") + logger.info("All %s operations synced, thanks for waiting!", initial_queue_size) return time_elapsed = monotonic() - waiting_start if self._consumer.last_backoff_time > 0 and time_elapsed >= max_reconnect_wait_time: - click.echo( - f"Failed to reconnect with Neptune in {max_reconnect_wait_time} seconds." - f" You have {size_remaining} operations saved on disk that can be manually synced" - f" using `neptune sync` command.", - sys.stderr, + logger.warning( + "Failed to reconnect with Neptune in %s seconds." + " You have %s operations saved on disk that can be manually synced" + " using `neptune sync` command.", + max_reconnect_wait_time, + size_remaining, ) return if seconds is not None and wait_time == 0: - click.echo( - f"Failed to sync all operations in {seconds} seconds." - f" You have {size_remaining} operations saved on disk that can be manually synced" - f" using `neptune sync` command.", - sys.stderr, + logger.warning( + "Failed to sync all operations in %s seconds." + " You have %s operations saved on disk that can be manually synced" + " using `neptune sync` command.", + seconds, + size_remaining, ) return - click.echo( - f"Still waiting for the remaining {size_remaining} operations " - f"({already_synced_proc:.2f}% done). Please wait.", - sys.stderr, + logger.warning( + "Still waiting for the remaining %s operations" " (%.2f%% done). Please wait.", + size_remaining, + already_synced_proc, ) def stop(self, seconds: Optional[float] = None): diff --git a/neptune/new/internal/threading/daemon.py b/neptune/new/internal/threading/daemon.py index 27d150648..7acc52cb8 100644 --- a/neptune/new/internal/threading/daemon.py +++ b/neptune/new/internal/threading/daemon.py @@ -15,13 +15,11 @@ # import abc import functools -import sys import threading import time -import click - from neptune.new.exceptions import NeptuneConnectionLostException +from neptune.new.internal.utils.logger import logger class Daemon(threading.Thread): @@ -77,15 +75,15 @@ def wrapper(self_: Daemon, *args, **kwargs): result = func(self_, *args, **kwargs) if self_.last_backoff_time > 0: self_.last_backoff_time = 0 - click.echo("Communication with Neptune restored!", sys.stderr) + logger.info("Communication with Neptune restored!") return result except NeptuneConnectionLostException as e: if self_.last_backoff_time == 0: - click.echo( - "Experiencing connection interruptions. " - "Will try to reestablish communication with Neptune. " - f"Internal exception was: {e.cause.__class__.__name__}", - sys.stderr, + logger.warning( + "Experiencing connection interruptions." + " Will try to reestablish communication with Neptune." + " Internal exception was: %s", + e.cause.__class__.__name__, ) self_.last_backoff_time = self.INITIAL_RETRY_BACKOFF else: @@ -94,9 +92,9 @@ def wrapper(self_: Daemon, *args, **kwargs): ) time.sleep(self_.last_backoff_time) except Exception: - click.echo( - f"Unexpected error occurred in Neptune background thread: {self.kill_message}", - sys.stderr, + logger.error( + "Unexpected error occurred in Neptune background thread: %s", + self.kill_message, ) raise diff --git a/neptune/new/internal/utils/images.py b/neptune/new/internal/utils/images.py index 8003f3a10..bb619edd8 100644 --- a/neptune/new/internal/utils/images.py +++ b/neptune/new/internal/utils/images.py @@ -21,12 +21,12 @@ from io import BytesIO, StringIO from typing import Optional -import click from packaging import version from pandas import DataFrame from neptune.new.exceptions import PlotlyIncompatibilityException from neptune.new.internal.utils import limits +from neptune.new.internal.utils.logger import logger _logger = logging.getLogger(__name__) @@ -107,10 +107,10 @@ def _to_html(chart) -> str: chart = _matplotlib_to_plotly(chart) return _export_plotly_figure(chart) except ImportError: - print("Plotly not installed. Logging plot as an image.") + logger.warning("Plotly not installed. Logging plot as an image.") return _image_content_to_html(_get_figure_image_data(chart)) except UserWarning: - print( + logger.warning( "Couldn't convert Matplotlib plot to interactive Plotly plot. Logging plot as an image instead." ) return _image_content_to_html(_get_figure_image_data(chart)) @@ -176,11 +176,10 @@ def _get_numpy_as_image(array): if array_max > 1: data_range_warnings.append(f"the largest value in the array is {array_max}") if data_range_warnings: - data_range_warning_message = (" and ".join(data_range_warnings) + ". ").capitalize() - click.echo( - f"{data_range_warning_message}" - f"To be interpreted as colors correctly values in the array need to be in the [0, 1] range.", - err=True, + data_range_warning_message = (" and ".join(data_range_warnings) + ".").capitalize() + logger.warning( + "%s To be interpreted as colors correctly values in the array need to be in the [0, 1] range.", + data_range_warning_message, ) array *= 255 shape = array.shape diff --git a/neptune/new/internal/utils/logger.py b/neptune/new/internal/utils/logger.py new file mode 100644 index 000000000..02740d2ca --- /dev/null +++ b/neptune/new/internal/utils/logger.py @@ -0,0 +1,48 @@ +# +# Copyright (c) 2022, Neptune Labs Sp. z o.o. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import logging +import sys + +LOGGER_NAME = "neptune-client" + + +class GrabbableStdoutHandler(logging.StreamHandler): + """ + This class is like a StreamHandler using sys.stdout, but always uses + whatever sys.stdout is currently set to rather than the value of + sys.stderr at handler construction time. + This enables neptune-client to capture stdout regardless + of logging configuration time. + Based on logging._StderrHandler from standard library. + """ + + def __init__(self, level=logging.NOTSET): + # pylint: disable=non-parent-init-called,super-init-not-called + logging.Handler.__init__(self, level) + + @property + def stream(self): + return sys.stdout + + +logger = logging.getLogger(LOGGER_NAME) + +logger.propagate = False +logger.setLevel(level=logging.DEBUG) +stdout_handler = GrabbableStdoutHandler() +stdout_handler.setFormatter(logging.Formatter("%(message)s")) +logger.addHandler(stdout_handler) diff --git a/neptune/new/internal/websockets/websocket_signals_background_job.py b/neptune/new/internal/websockets/websocket_signals_background_job.py index 74d988f4a..a894a7904 100644 --- a/neptune/new/internal/websockets/websocket_signals_background_job.py +++ b/neptune/new/internal/websockets/websocket_signals_background_job.py @@ -19,7 +19,6 @@ from json.decoder import JSONDecodeError from typing import TYPE_CHECKING, Optional -import click from websocket import WebSocketConnectionClosedException from neptune.internal.websockets.reconnecting_websocket import ReconnectingWebsocket @@ -31,6 +30,7 @@ from neptune.new.internal.background_job import BackgroundJob from neptune.new.internal.threading.daemon import Daemon from neptune.new.internal.utils import process_killer +from neptune.new.internal.utils.logger import logger from neptune.new.internal.websockets.websockets_factory import WebsocketsFactory if TYPE_CHECKING: @@ -85,12 +85,11 @@ def _handler_message(self, msg: str): msg_type = json_msg.get("type") msg_body = json_msg.get("body") if not msg_type: - click.echo("Malformed websocket signal: missing type", err=True) + logger.error("Malformed websocket signal: missing type") return if not isinstance(msg_type, str): - click.echo( - f"Malformed websocket signal: type is {type(msg_type)}, should be str", - err=True, + logger.error( + "Malformed websocket signal: type is %s, should be str", type(msg_type) ) return if msg_type.lower() == SIGNAL_TYPE_STOP: @@ -98,18 +97,17 @@ def _handler_message(self, msg: str): elif msg_type.lower() == SIGNAL_TYPE_ABORT: self._handle_abort(msg_body) except JSONDecodeError as ex: - click.echo(f"Malformed websocket signal: {ex}, message: {msg}", err=True) + logger.error("Malformed websocket signal: %s, message: %s", ex, msg) def _handle_stop(self, msg_body): msg_body = msg_body or dict() if not isinstance(msg_body, dict): - click.echo( - f"Malformed websocket signal: body is {type(msg_body)}, should be dict", - err=True, + logger.error( + "Malformed websocket signal: body is %s, should be dict", type(msg_body) ) return run_id = self._container["sys/id"].fetch() - click.echo(f"Run {run_id} received stop signal. Exiting", err=True) + logger.error("Run %s received stop signal. Exiting", run_id) seconds = msg_body.get("seconds") self._container.stop(seconds=seconds) process_killer.kill_me() @@ -117,13 +115,12 @@ def _handle_stop(self, msg_body): def _handle_abort(self, msg_body): msg_body = msg_body or dict() if not isinstance(msg_body, dict): - click.echo( - f"Malformed websocket signal: body is {type(msg_body)}, should be dict", - err=True, + logger.error( + "Malformed websocket signal: body is %s, should be dict", type(msg_body) ) return run_id = self._container["sys/id"].fetch() - click.echo(f"Run {run_id} received abort signal. Exiting", err=True) + logger.error("Run %s received abort signal. Exiting", run_id) seconds = msg_body.get("seconds") self._container[SYSTEM_FAILED_ATTRIBUTE_PATH] = True self._container.stop(seconds=seconds) diff --git a/neptune/new/metadata_containers/metadata_container.py b/neptune/new/metadata_containers/metadata_container.py index 444d0309e..969ff9a7a 100644 --- a/neptune/new/metadata_containers/metadata_container.py +++ b/neptune/new/metadata_containers/metadata_container.py @@ -24,8 +24,6 @@ from functools import wraps from typing import Any, Dict, List, Optional, Union -import click - from neptune.exceptions import UNIX_STYLES from neptune.new.attributes import create_attribute_from_type from neptune.new.attributes.attribute import Attribute @@ -62,6 +60,7 @@ is_string_like, verify_type, ) +from neptune.new.internal.utils.logger import logger from neptune.new.internal.utils.paths import parse_path from neptune.new.internal.utils.runningmode import in_interactive, in_notebook from neptune.new.internal.utils.uncaught_exception_handler import ( @@ -206,16 +205,16 @@ def stop(self, seconds: Optional[Union[float, int]] = None) -> None: self._state = ContainerState.STOPPING ts = time.time() - click.echo("Shutting down background jobs, please wait a moment...") + logger.info("Shutting down background jobs, please wait a moment...") self._bg_job.stop() self._bg_job.join(seconds) - click.echo("Done!") + logger.info("Done!") with self._lock: sec_left = None if seconds is None else seconds - (time.time() - ts) self._op_processor.stop(sec_left) if self._mode != Mode.OFFLINE: - click.echo("Explore the metadata in the Neptune app:") - click.echo(self._metadata_url) + logger.info("Explore the metadata in the Neptune app:") + logger.info(self._metadata_url) self._backend.close() self._state = ContainerState.STOPPED @@ -229,16 +228,16 @@ def print_structure(self) -> None: def _print_structure_impl(self, struct: dict, indent: int) -> None: for key in sorted(struct.keys()): - click.echo(" " * indent, nl=False) + print(" " * indent, end="") if isinstance(struct[key], dict): - click.echo( + print( "{blue}'{key}'{end}:".format( blue=UNIX_STYLES["blue"], key=key, end=UNIX_STYLES["end"] ) ) self._print_structure_impl(struct[key], indent=indent + 1) else: - click.echo( + print( "{blue}'{key}'{end}: {type}".format( blue=UNIX_STYLES["blue"], key=key, @@ -341,17 +340,18 @@ def get_url(self) -> str: def _startup(self, debug_mode): if not debug_mode: - click.echo(self.get_url()) + logger.info(self.get_url()) self.start() if not debug_mode: if in_interactive() or in_notebook(): - click.echo( - f"Remember to stop your {self.container_type.value} once you’ve finished logging your metadata" - f" ({self._docs_url_stop})." + logger.info( + "Remember to stop your %s once you’ve finished logging your metadata (%s)." " It will be stopped automatically only when the notebook" - " kernel/interactive console is terminated." + " kernel/interactive console is terminated.", + self.container_type.value, + self._docs_url_stop, ) uncaught_exception_handler.activate() diff --git a/neptune/new/sync/__init__.py b/neptune/new/sync/__init__.py index 9612504ac..11284e4c5 100644 --- a/neptune/new/sync/__init__.py +++ b/neptune/new/sync/__init__.py @@ -35,6 +35,7 @@ from neptune.new.internal.credentials import Credentials from neptune.new.internal.disk_queue import DiskQueue from neptune.new.internal.operation import Operation +from neptune.new.internal.utils.logger import logger from neptune.new.sync.status import StatusRunner from neptune.new.sync.sync import SyncRunner @@ -172,7 +173,7 @@ def sync( sync_runner = SyncRunner(backend=HostedNeptuneBackend(Credentials.from_token())) if runs_names: - click.echo( + logger.warning( "WARNING: --run parameter is deprecated and will be removed in the future, please start using --object" ) # prefer object_names, obviously diff --git a/neptune/new/sync/status.py b/neptune/new/sync/status.py index aca70d8d5..70af4c224 100644 --- a/neptune/new/sync/status.py +++ b/neptune/new/sync/status.py @@ -21,11 +21,10 @@ from pathlib import Path from typing import List, Sequence, Tuple -import click - from neptune.new.constants import ASYNC_DIRECTORY, OFFLINE_NAME_PREFIX from neptune.new.envs import PROJECT_ENV_NAME from neptune.new.internal.backends.api_model import ApiExperiment +from neptune.new.internal.utils.logger import logger from neptune.new.sync.abstract_backend_runner import AbstractBackendRunner from neptune.new.sync.utils import ( get_metadata_container, @@ -82,43 +81,39 @@ def list_containers( offline_dirs: Sequence[str], ) -> None: if not synced_containers and not unsynced_containers and not offline_dirs: - click.echo("There are no Neptune objects in {}".format(base_path)) + logger.info("There are no Neptune objects in %s", base_path) sys.exit(1) if unsynced_containers: - click.echo("Unsynchronized objects:") + logger.info("Unsynchronized objects:") for container in unsynced_containers: - click.echo("- {}".format(get_qualified_name(container))) + logger.info("- %s", get_qualified_name(container)) if synced_containers: - click.echo("Synchronized objects:") + logger.info("Synchronized objects:") for container in synced_containers: - click.echo("- {}".format(get_qualified_name(container))) + logger.info("- %s", get_qualified_name(container)) if offline_dirs: - click.echo("Unsynchronized offline objects:") + logger.info("Unsynchronized offline objects:") for run_id in offline_dirs: - click.echo("- {}{}".format(OFFLINE_NAME_PREFIX, run_id)) - click.echo() - click.echo(textwrap.fill(offline_run_explainer, width=90)) + logger.info("- %s", f"{OFFLINE_NAME_PREFIX}{run_id}") + logger.info("\n%s", textwrap.fill(offline_run_explainer, width=90)) if not unsynced_containers: - click.echo() - click.echo("There are no unsynchronized objects in {}".format(base_path)) + logger.info("\nThere are no unsynchronized objects in %s", base_path) if not synced_containers: - click.echo() - click.echo("There are no synchronized objects in {}".format(base_path)) + logger.info("\nThere are no synchronized objects in %s", base_path) - click.echo() - click.echo("Please run with the `neptune sync --help` to see example commands.") + logger.info("\nPlease run with the `neptune sync --help` to see example commands.") def synchronization_status(self, base_path: Path) -> None: synced_containers, unsynced_containers, not_found = self.partition_containers(base_path) if not_found > 0: - click.echo( - f"WARNING: {not_found} objects was skipped because they are in trash or do not exist anymore.", - sys.stderr, + logger.warning( + "WARNING: %s objects was skipped because they are in trash or do not exist anymore.", + not_found, ) offline_dirs = get_offline_dirs(base_path) self.list_containers(base_path, synced_containers, unsynced_containers, offline_dirs) diff --git a/neptune/new/sync/sync.py b/neptune/new/sync/sync.py index b58e78eaf..72e89cbba 100644 --- a/neptune/new/sync/sync.py +++ b/neptune/new/sync/sync.py @@ -18,14 +18,11 @@ import logging import os -import sys import threading import time from pathlib import Path from typing import Iterable, List, Optional, Sequence -import click - from neptune.new.constants import ( ASYNC_DIRECTORY, OFFLINE_DIRECTORY, @@ -41,6 +38,7 @@ from neptune.new.internal.disk_queue import DiskQueue from neptune.new.internal.id_formats import QualifiedName, UniqueId from neptune.new.internal.operation import Operation +from neptune.new.internal.utils.logger import logger from neptune.new.sync.abstract_backend_runner import AbstractBackendRunner from neptune.new.sync.utils import ( create_dir_name, @@ -59,14 +57,14 @@ class SyncRunner(AbstractBackendRunner): def sync_run(self, run_path: Path, run: ApiExperiment) -> None: qualified_run_name = get_qualified_name(run) - click.echo("Synchronising {}".format(qualified_run_name)) + logger.info("Synchronising %s", qualified_run_name) for execution_path in run_path.iterdir(): self.sync_execution( execution_path=execution_path, container_id=run.id, container_type=run.type, ) - click.echo(f"Synchronization of {run.type.value} {qualified_run_name} completed.") + logger.info("Synchronization of %s %s completed.", run.type.value, qualified_run_name) def sync_execution( self, @@ -103,11 +101,11 @@ def sync_execution( except NeptuneConnectionLostException as ex: if time.monotonic() - start_time > retries_timeout: raise ex - click.echo( - "Experiencing connection interruptions. " - "Will try to reestablish communication with Neptune. " - f"Internal exception was: {ex.cause.__class__.__name__}", - sys.stderr, + logger.warning( + "Experiencing connection interruptions." + " Will try to reestablish communication with Neptune." + " Internal exception was: %s", + ex.cause.__class__.__name__, ) def sync_all_registered_containers(self, base_path: Path) -> None: @@ -138,9 +136,8 @@ def sync_selected_registered_containers( elif run_path_deprecated.exists(): self.sync_run(run_path=run_path_deprecated, run=run) else: - click.echo( - "Warning: Run '{}' does not exist in location {}".format(name, base_path), - file=sys.stderr, + logger.warning( + "Warning: Run '%s' does not exist in location %s", name, base_path ) def _register_offline_run( @@ -152,10 +149,9 @@ def _register_offline_run( else: raise ValueError("Only runs are supported in offline mode") except Exception as e: - click.echo( - "Exception occurred while trying to create a run " - "on the Neptune server. Please try again later", - file=sys.stderr, + logger.warning( + "Exception occurred while trying to create a run" + " on the Neptune server. Please try again later", ) logging.exception(e) return None @@ -191,13 +187,12 @@ def register_offline_runs( server_id=run.id, server_type=run.type, ) - click.echo(f"Offline run {offline_dir} registered as {get_qualified_name(run)}") + logger.info( + "Offline run %s registered as %s", offline_dir, get_qualified_name(run) + ) result.append(run) else: - click.echo( - f"Offline run {offline_dir} not found on disk.", - err=True, - ) + logger.warning("Offline run %s not found on disk.", offline_dir) return result def sync_offline_runs( diff --git a/neptune/new/sync/utils.py b/neptune/new/sync/utils.py index 4605eb7e0..caf9bb8df 100644 --- a/neptune/new/sync/utils.py +++ b/neptune/new/sync/utils.py @@ -27,14 +27,11 @@ import logging import os -import sys import textwrap import threading from pathlib import Path from typing import Iterator, List, Optional, Tuple, Union -import click - from neptune.new.constants import OFFLINE_DIRECTORY from neptune.new.envs import PROJECT_ENV_NAME from neptune.new.exceptions import ( @@ -48,6 +45,7 @@ from neptune.new.internal.disk_queue import DiskQueue from neptune.new.internal.id_formats import QualifiedName, UniqueId from neptune.new.internal.operation import Operation +from neptune.new.internal.utils.logger import logger def get_metadata_container( @@ -59,11 +57,10 @@ def get_metadata_container( try: return backend.get_metadata_container(container_id, expected_container_type=container_type) except MetadataContainerNotFound: - click.echo(f"Can't fetch {public_container_type} {container_id}. Skipping.") + logger.warning("Can't fetch %s %s. Skipping.", public_container_type, container_id) except NeptuneException as e: - click.echo( - f"Exception while fetching {public_container_type} {container_id}. Skipping.", - err=True, + logger.warning( + "Exception while fetching %s %s. Skipping.", public_container_type, container_id ) logging.exception(e) @@ -71,19 +68,17 @@ def get_metadata_container( _project_name_missing_message = ( - "Project name not provided. Could not synchronize offline runs. " - "To synchronize offline run, specify the project name with the --project flag " - "or by setting the {} environment variable.".format(PROJECT_ENV_NAME) + "Project name not provided. Could not synchronize offline runs." + " To synchronize offline run, specify the project name with the --project flag" + f" or by setting the {PROJECT_ENV_NAME} environment variable." ) def _project_not_found_message(project_name: QualifiedName) -> str: return ( - "Project {} not found. Could not synchronize offline runs. ".format(project_name) - + "Please ensure you specified the correct project name with the --project flag " - + "or with the {} environment variable, or contact Neptune for support.".format( - PROJECT_ENV_NAME - ) + f"Project {project_name} not found. Could not synchronize offline runs." + " Please ensure you specified the correct project name with the --project flag" + f" or with the {PROJECT_ENV_NAME} environment variable, or contact Neptune for support." ) @@ -92,17 +87,17 @@ def get_project( ) -> Optional[Project]: project_name = project_name_flag or QualifiedName(os.getenv(PROJECT_ENV_NAME)) if not project_name: - click.echo(textwrap.fill(_project_name_missing_message), file=sys.stderr) + logger.warning(textwrap.fill(_project_name_missing_message)) return None try: return backend.get_project(project_name) except ProjectNotFound: - click.echo(textwrap.fill(_project_not_found_message(project_name)), file=sys.stderr) + logger.warning(textwrap.fill(_project_not_found_message(project_name))) return None def get_qualified_name(run: ApiExperiment) -> QualifiedName: - return QualifiedName("{}/{}/{}".format(run.workspace, run.project_name, run.sys_id)) + return QualifiedName(f"{run.workspace}/{run.project_name}/{run.sys_id}") def is_container_synced(run_path: Path) -> bool: diff --git a/neptune/new/types/series/file_series.py b/neptune/new/types/series/file_series.py index f4dbf7174..3e090dfbd 100644 --- a/neptune/new/types/series/file_series.py +++ b/neptune/new/types/series/file_series.py @@ -16,9 +16,8 @@ from typing import TYPE_CHECKING, List, TypeVar -import click - from neptune.new.internal.utils import is_collection +from neptune.new.internal.utils.logger import logger from neptune.new.types import File from neptune.new.types.series.series import Series @@ -37,10 +36,7 @@ def __init__(self, values, **kwargs): self.name = kwargs.pop("name", None) self.description = kwargs.pop("description", None) if kwargs: - click.echo( - "Warning: unexpected arguments ({kwargs}) in FileSeries".format(kwargs=kwargs), - err=True, - ) + logger.error("Warning: unexpected arguments (%s) in FileSeries", kwargs) def accept(self, visitor: "ValueVisitor[Ret]") -> Ret: return visitor.visit_image_series(self) @@ -50,4 +46,4 @@ def values(self) -> List[File]: return self._values def __str__(self): - return "FileSeries({})".format(str(self.values)) + return f"FileSeries({self.values})" diff --git a/tests/neptune/new/internal/utils/test_images.py b/tests/neptune/new/internal/utils/test_images.py index 741994e9c..31a09556e 100644 --- a/tests/neptune/new/internal/utils/test_images.py +++ b/tests/neptune/new/internal/utils/test_images.py @@ -73,11 +73,11 @@ def test_get_image_content_from_3d_grayscale_array(self): expected_image = Image.fromarray(expected_array.astype(numpy.uint8)) # expect - stderr = io.StringIO() - with contextlib.redirect_stderr(stderr): + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): self.assertEqual(get_image_content(image_array), self._encode_pil_image(expected_image)) self.assertEqual( - stderr.getvalue(), + stdout.getvalue(), "The smallest value in the array is -3 and the largest value in the array is 6." " To be interpreted as colors correctly values in the array need to be in the [0, 1] range.\n", ) @@ -264,8 +264,8 @@ def test_get_oversize_html_from_pandas(self): caplog.output, [ "WARNING:neptune.new.internal.utils.limits:You are attempting to create an in-memory file that" - " is 38.1MB large. Neptune supports logging in-memory file objects smaller than 32MB. " - "Resize or increase compression of this object" + " is 38.1MB large. Neptune supports logging in-memory file objects smaller than 32MB." + " Resize or increase compression of this object" ], ) diff --git a/tests/neptune/new/sync/test_status.py b/tests/neptune/new/sync/test_status.py index debb78b50..43cf515d7 100644 --- a/tests/neptune/new/sync/test_status.py +++ b/tests/neptune/new/sync/test_status.py @@ -59,14 +59,14 @@ def test_list_containers(tmp_path, mocker, capsys, backend, status_runner, conta # then captured = capsys.readouterr() - assert captured.err == "" - assert ( - "Synchronized objects:\n- {}".format(get_qualified_name(synced_container)) in captured.out - ) - assert ( - "Unsynchronized objects:\n- {}".format(get_qualified_name(unsynced_container)) - in captured.out - ) + assert captured.out.splitlines() == [ + "Unsynchronized objects:", + f"- {get_qualified_name(unsynced_container)}", + "Synchronized objects:", + f"- {get_qualified_name(synced_container)}", + "", + "Please run with the `neptune sync --help` to see example commands.", + ] def test_list_offline_runs(tmp_path, mocker, capsys, status_runner): diff --git a/tests/neptune/new/sync/test_sync.py b/tests/neptune/new/sync/test_sync.py index 19a9b3f9c..eb9047476 100644 --- a/tests/neptune/new/sync/test_sync.py +++ b/tests/neptune/new/sync/test_sync.py @@ -271,7 +271,7 @@ def test_sync_non_existent_container(tmp_path, capsys, sync_runner): # then captured = capsys.readouterr() - assert "Warning: Run 'bar' does not exist in location" in captured.err + assert "Warning: Run 'bar' does not exist in location" in captured.out def test_sync_non_existent_offline_containers(tmp_path, capsys, sync_runner): @@ -285,5 +285,5 @@ def test_sync_non_existent_offline_containers(tmp_path, capsys, sync_runner): # then captured = capsys.readouterr() - assert "Offline run foo__bar not found on disk." in captured.err - assert "Offline run model__bar not found on disk." in captured.err + assert "Offline run foo__bar not found on disk." in captured.out + assert "Offline run model__bar not found on disk." in captured.out