diff --git a/teuthology/dispatcher/__init__.py b/teuthology/dispatcher/__init__.py index dee4f55fca..fe309731d5 100644 --- a/teuthology/dispatcher/__init__.py +++ b/teuthology/dispatcher/__init__.py @@ -1,3 +1,4 @@ +import datetime import logging import os import psutil @@ -5,7 +6,6 @@ import sys import yaml -from datetime import datetime from typing import Dict, List from teuthology import ( @@ -26,7 +26,7 @@ from teuthology import safepath log = logging.getLogger(__name__) -start_time = datetime.utcnow() +start_time = datetime.datetime.now(datetime.timezone.utc) restart_file_path = '/tmp/teuthology-restart-dispatcher' stop_file_path = '/tmp/teuthology-stop-dispatcher' @@ -34,7 +34,10 @@ def sentinel(path): if not os.path.exists(path): return False - file_mtime = datetime.utcfromtimestamp(os.path.getmtime(path)) + file_mtime = datetime.datetime.fromtimestamp( + os.path.getmtime(path), + datetime.timezone.utc, + ) return file_mtime > start_time diff --git a/teuthology/dispatcher/supervisor.py b/teuthology/dispatcher/supervisor.py index 5806bb5473..c003a4e620 100644 --- a/teuthology/dispatcher/supervisor.py +++ b/teuthology/dispatcher/supervisor.py @@ -1,3 +1,4 @@ +import datetime import logging import os import subprocess @@ -6,7 +7,6 @@ import requests from urllib.parse import urljoin -from datetime import datetime from teuthology import exporter, kill, report, safepath from teuthology.config import config as teuth_config @@ -275,7 +275,7 @@ def unlock_targets(job_config): def run_with_watchdog(process, job_config): - job_start_time = datetime.utcnow() + job_start_time = datetime.datetime.now(datetime.timezone.utc) # Only push the information that's relevant to the watchdog, to save db # load @@ -289,7 +289,7 @@ def run_with_watchdog(process, job_config): hit_max_timeout = False while process.poll() is None: # Kill jobs that have been running longer than the global max - run_time = datetime.utcnow() - job_start_time + run_time = datetime.datetime.now(datetime.timezone.utc) - job_start_time total_seconds = run_time.days * 60 * 60 * 24 + run_time.seconds if total_seconds > teuth_config.max_job_time: hit_max_timeout = True diff --git a/teuthology/provision/cloud/util.py b/teuthology/provision/cloud/util.py index a6f137e941..03ea7796f2 100644 --- a/teuthology/provision/cloud/util.py +++ b/teuthology/provision/cloud/util.py @@ -1,5 +1,4 @@ import datetime -import dateutil.tz import dateutil.parser import json import os @@ -103,7 +102,7 @@ def write(self, value, expires, endpoint): def expired(self): if self.expires is None: return True - utcnow = datetime.datetime.now(dateutil.tz.tzutc()) + utcnow = datetime.datetime.now(datetime.timezone.utc) offset = datetime.timedelta(minutes=30) return self.expires < (utcnow + offset) diff --git a/teuthology/provision/fog.py b/teuthology/provision/fog.py index 63d53cba3f..12914f2960 100644 --- a/teuthology/provision/fog.py +++ b/teuthology/provision/fog.py @@ -1,10 +1,10 @@ +import datetime import json import logging import requests import socket import re -from datetime import datetime from paramiko import SSHException from paramiko.ssh_exception import NoValidConnectionsError @@ -227,8 +227,8 @@ def schedule_deploy_task(self, host_id): for task in host_tasks: timestamp = task['createdTime'] time_delta = ( - datetime.utcnow() - datetime.strptime( - timestamp, self.timestamp_format) + datetime.datetime.now(datetime.timezone.utc) - datetime.datetime.strptime( + timestamp, self.timestamp_format).replace(tzinfo=datetime.timezone.utc) ).total_seconds() # There should only be one deploy task matching our host. Just in # case there are multiple, select a very recent one. diff --git a/teuthology/provision/test/test_fog.py b/teuthology/provision/test/test_fog.py index a61c172abe..3d0baa752c 100644 --- a/teuthology/provision/test/test_fog.py +++ b/teuthology/provision/test/test_fog.py @@ -1,5 +1,6 @@ +import datetime + from copy import deepcopy -from datetime import datetime from mock import patch, DEFAULT, PropertyMock from pytest import raises, mark @@ -216,8 +217,10 @@ def test_schedule_deploy_task(self): tasktype_result = dict(tasktypes=[dict(name='deploy', id=tasktype_id)]) schedule_result = dict() host_tasks = [dict( - createdTime=datetime.strftime( - datetime.utcnow(), self.klass.timestamp_format), + createdTime=datetime.datetime.strftime( + datetime.datetime.now(datetime.timezone.utc), + self.klass.timestamp_format + ), id=task_id, )] self.mocks['m_requests_Session_send']\ diff --git a/teuthology/task/ssh_keys.py b/teuthology/task/ssh_keys.py index f7e0dba32c..9c899e8fa9 100644 --- a/teuthology/task/ssh_keys.py +++ b/teuthology/task/ssh_keys.py @@ -3,10 +3,10 @@ Ssh-key key handlers and associated routines """ import contextlib +import datetime import logging import paramiko import re -from datetime import datetime from io import StringIO from teuthology import contextutil @@ -21,7 +21,7 @@ def timestamp(format_='%Y-%m-%d_%H:%M:%S:%f'): """ Return a UTC timestamp suitable for use in filenames """ - return datetime.utcnow().strftime(format_) + return datetime.datetime.now(datetime.timezone.utc).strftime(format_) def backup_file(remote, path, sudo=False): diff --git a/teuthology/test/test_worker.py b/teuthology/test/test_worker.py index 1fe9d57b00..4e4e2f5512 100644 --- a/teuthology/test/test_worker.py +++ b/teuthology/test/test_worker.py @@ -1,7 +1,7 @@ +import datetime import os from unittest.mock import patch, Mock, MagicMock -from datetime import datetime, timedelta from teuthology import worker @@ -24,21 +24,19 @@ def test_restart_file_path_doesnt_exist(self, m_exists): @patch("os.path.getmtime") @patch("os.path.exists") - @patch("teuthology.worker.datetime") - def test_needs_restart(self, m_datetime, m_exists, m_getmtime): + def test_needs_restart(self, m_exists, m_getmtime): m_exists.return_value = True - m_datetime.utcfromtimestamp.return_value = datetime.utcnow() + timedelta(days=1) - result = worker.sentinel(worker.restart_file_path) - assert result + now = datetime.datetime.now(datetime.timezone.utc) + m_getmtime.return_value = (now + datetime.timedelta(days=1)).timestamp() + assert worker.sentinel(worker.restart_file_path) @patch("os.path.getmtime") @patch("os.path.exists") - @patch("teuthology.worker.datetime") - def test_does_not_need_restart(self, m_datetime, m_exists, getmtime): + def test_does_not_need_restart(self, m_exists, m_getmtime): m_exists.return_value = True - m_datetime.utcfromtimestamp.return_value = datetime.utcnow() - timedelta(days=1) - result = worker.sentinel(worker.restart_file_path) - assert not result + now = datetime.datetime.now(datetime.timezone.utc) + m_getmtime.return_value = (now - datetime.timedelta(days=1)).timestamp() + assert not worker.sentinel(worker.restart_file_path) @patch("os.symlink") def test_symlink_success(self, m_symlink): diff --git a/teuthology/worker.py b/teuthology/worker.py index 2e5a57c543..b11f887f46 100644 --- a/teuthology/worker.py +++ b/teuthology/worker.py @@ -1,3 +1,4 @@ +import datetime import logging import os import subprocess @@ -6,8 +7,6 @@ import time import yaml -from datetime import datetime - from teuthology import ( # non-modules setup_log_file, @@ -24,7 +23,7 @@ from teuthology.exceptions import BranchNotFoundError, CommitNotFoundError, SkipJob, MaxWhileTries log = logging.getLogger(__name__) -start_time = datetime.utcnow() +start_time = datetime.datetime.now(datetime.timezone.utc) restart_file_path = '/tmp/teuthology-restart-workers' stop_file_path = '/tmp/teuthology-stop-workers' @@ -32,7 +31,10 @@ def sentinel(path): if not os.path.exists(path): return False - file_mtime = datetime.utcfromtimestamp(os.path.getmtime(path)) + file_mtime = datetime.datetime.fromtimestamp( + os.path.getmtime(path), + datetime.timezone.utc, + ) if file_mtime > start_time: return True else: @@ -325,7 +327,7 @@ def run_job(job_config, teuth_bin_path, archive_dir, verbose): def run_with_watchdog(process, job_config): - job_start_time = datetime.utcnow() + job_start_time = datetime.datetime.now(datetime.timezone.utc) # Only push the information that's relevant to the watchdog, to save db # load @@ -339,7 +341,7 @@ def run_with_watchdog(process, job_config): symlink_worker_log(job_config['worker_log'], job_config['archive_path']) while process.poll() is None: # Kill jobs that have been running longer than the global max - run_time = datetime.utcnow() - job_start_time + run_time = datetime.datetime.now(datetime.timezone.utc) - job_start_time total_seconds = run_time.days * 60 * 60 * 24 + run_time.seconds if total_seconds > teuth_config.max_job_time: log.warning("Job ran longer than {max}s. Killing...".format(