From 7a972e86c4e5009830d5e6faacadfe6e1ed2efff Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Feb 2023 01:06:24 +0100 Subject: [PATCH] Update .pre-commit-config.yaml (#11009) * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update __init__.py Signed-off-by: Glenn Jocher * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * Pre-commit updates * Pre-commit updates --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 35 ++--- benchmarks.py | 2 +- classify/predict.py | 4 +- classify/train.py | 26 ++-- classify/tutorial.ipynb | 2 +- classify/val.py | 8 +- detect.py | 2 +- export.py | 26 ++-- models/common.py | 16 +- models/segment/yolov5m-seg.yaml | 2 +- models/segment/yolov5s-seg.yaml | 2 +- models/tf.py | 12 +- segment/predict.py | 2 +- segment/train.py | 14 +- segment/tutorial.ipynb | 2 +- segment/val.py | 16 +- train.py | 6 +- tutorial.ipynb | 2 +- utils/__init__.py | 2 +- utils/dataloaders.py | 34 ++--- utils/downloads.py | 2 +- utils/flask_rest_api/example_request.py | 8 +- utils/flask_rest_api/restapi.py | 22 +-- utils/general.py | 48 +++--- utils/loggers/__init__.py | 16 +- utils/loggers/clearml/clearml_utils.py | 6 +- utils/loggers/comet/__init__.py | 192 ++++++++++++------------ utils/loggers/comet/comet_utils.py | 42 +++--- utils/loggers/comet/hpo.py | 32 ++-- utils/loggers/wandb/wandb_utils.py | 10 +- utils/metrics.py | 10 +- utils/plots.py | 2 +- utils/segment/dataloaders.py | 32 ++-- utils/segment/loss.py | 12 +- utils/segment/metrics.py | 90 +++++------ utils/segment/plots.py | 20 +-- utils/torch_utils.py | 4 +- utils/triton.py | 14 +- val.py | 4 +- 39 files changed, 389 insertions(+), 392 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b188048e63a6..c5162378ab81 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,8 +1,5 @@ -# Define hooks for code formations -# Will be applied on any updated commit files if a user has installed and linked commit hook - -default_language_version: - python: python3.8 +# Ultralytics YOLO 🚀, GPL-3.0 license +# Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md exclude: 'docs/' # Define bot property if installed via https://github.com/marketplace/pre-commit-ci @@ -16,13 +13,13 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - # - id: end-of-file-fixer + - id: end-of-file-fixer - id: trailing-whitespace - id: check-case-conflict - id: check-yaml - - id: check-toml - - id: pretty-format-json - id: check-docstring-first + - id: double-quote-string-fixer + - id: detect-private-key - repo: https://github.com/asottile/pyupgrade rev: v3.3.1 @@ -31,11 +28,11 @@ repos: name: Upgrade code args: [--py37-plus] - # - repo: https://github.com/PyCQA/isort - # rev: 5.11.4 - # hooks: - # - id: isort - # name: Sort imports + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + name: Sort imports - repo: https://github.com/google/yapf rev: v0.32.0 @@ -59,12 +56,12 @@ repos: - id: flake8 name: PEP8 - #- repo: https://github.com/codespell-project/codespell - # rev: v2.2.2 - # hooks: - # - id: codespell - # args: - # - --ignore-words-list=crate,nd + - repo: https://github.com/codespell-project/codespell + rev: v2.2.2 + hooks: + - id: codespell + args: + - --ignore-words-list=crate,nd,strack,dota #- repo: https://github.com/asottile/yesqa # rev: v1.4.0 diff --git a/benchmarks.py b/benchmarks.py index 03d7d693a936..09108b8a7cc4 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -164,6 +164,6 @@ def main(opt): test(**vars(opt)) if opt.test else run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/predict.py b/classify/predict.py index 5a5edabda42c..5f0d40787b52 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -179,7 +179,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms') # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image @@ -221,6 +221,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/train.py b/classify/train.py index 8ae2fdd52828..b752a3c1fe32 100644 --- a/classify/train.py +++ b/classify/train.py @@ -78,7 +78,7 @@ def train(opt, device): LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') t = time.time() if str(data) == 'imagenet': - subprocess.run(["bash", str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) + subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) else: url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' download(url, dir=data_dir.parent) @@ -220,11 +220,11 @@ def train(opt, device): # Log metrics = { - "train/loss": tloss, - f"{val}/loss": vloss, - "metrics/accuracy_top1": top1, - "metrics/accuracy_top5": top5, - "lr/0": optimizer.param_groups[0]['lr']} # learning rate + 'train/loss': tloss, + f'{val}/loss': vloss, + 'metrics/accuracy_top1': top1, + 'metrics/accuracy_top5': top5, + 'lr/0': optimizer.param_groups[0]['lr']} # learning rate logger.log_metrics(metrics, epoch) # Save model @@ -251,11 +251,11 @@ def train(opt, device): if RANK in {-1, 0} and final_epoch: LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' f"\nResults saved to {colorstr('bold', save_dir)}" - f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" - f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" - f"\nExport: python export.py --weights {best} --include onnx" + f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' + f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' + f'\nExport: python export.py --weights {best} --include onnx' f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f"\nVisualize: https://netron.app\n") + f'\nVisualize: https://netron.app\n') # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels @@ -263,7 +263,7 @@ def train(opt, device): file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') # Log results - meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} + meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) logger.log_model(best, epochs, metadata=meta) @@ -310,7 +310,7 @@ def main(opt): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Parameters opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run @@ -328,6 +328,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index cc18aa934039..58723608bdbe 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1477,4 +1477,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/classify/val.py b/classify/val.py index 03ba817d5ea2..4edd5a1f5e9e 100644 --- a/classify/val.py +++ b/classify/val.py @@ -100,7 +100,7 @@ def run( pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' - desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" + desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}' bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: @@ -123,14 +123,14 @@ def run( top1, top5 = acc.mean(0).tolist() if pbar: - pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" + pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}' if verbose: # all classes LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") for i, c in model.names.items(): acc_i = acc[targets == i] top1i, top5i = acc_i.mean(0).tolist() - LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}') # Print results t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image @@ -165,6 +165,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/detect.py b/detect.py index 2d13401f78bd..3f32d7a50d6b 100644 --- a/detect.py +++ b/detect.py @@ -256,6 +256,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/export.py b/export.py index 2c9fb77d17be..e8287704866a 100644 --- a/export.py +++ b/export.py @@ -120,7 +120,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) @@ -195,13 +195,13 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): f = str(file).replace('.pt', f'_openvino_model{os.sep}') args = [ - "mo", - "--input_model", + 'mo', + '--input_model', str(file.with_suffix('.onnx')), - "--output_dir", + '--output_dir', f, - "--data_type", - ("FP16" if half else "FP32"),] + '--data_type', + ('FP16' if half else 'FP32'),] subprocess.run(args, check=True, env=os.environ) # export yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -237,7 +237,7 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): if bits < 32: if MACOS: # quantization only supported on macOS with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) else: print(f'{prefix} quantization only supported on macOS, skipping...') @@ -293,7 +293,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument') profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) @@ -403,7 +403,7 @@ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=c converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) tflite_model = converter.convert() - open(f, "wb").write(tflite_model) + open(f, 'wb').write(tflite_model) return f, None @@ -618,14 +618,14 @@ def run( det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg - s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ - "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' + s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \ + '# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else '' LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f"\nVisualize: https://netron.app") + f'\nVisualize: https://netron.app') return f # return list of exported files/dirs @@ -667,6 +667,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/models/common.py b/models/common.py index 71340688d2e0..f416ddf25eb8 100644 --- a/models/common.py +++ b/models/common.py @@ -380,11 +380,11 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: - network.get_parameters()[0].set_layout(Layout("NCHW")) + network.get_parameters()[0].set_layout(Layout('NCHW')) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() - executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 + executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2 stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') @@ -431,7 +431,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) @@ -445,7 +445,7 @@ def gd_outputs(gd): gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, 'rb') as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) + frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate @@ -467,9 +467,9 @@ def gd_outputs(gd): output_details = interpreter.get_output_details() # outputs # load metadata with contextlib.suppress(zipfile.BadZipFile): - with zipfile.ZipFile(w, "r") as model: + with zipfile.ZipFile(w, 'r') as model: meta_file = model.namelist()[0] - meta = ast.literal_eval(model.read(meta_file).decode("utf-8")) + meta = ast.literal_eval(model.read(meta_file).decode('utf-8')) stride, names = int(meta['stride']), meta['names'] elif tfjs: # TF.js raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') @@ -491,7 +491,7 @@ def gd_outputs(gd): check_requirements('tritonclient[all]') from utils.triton import TritonRemoteModel model = TritonRemoteModel(url=w) - nhwc = model.runtime.startswith("tensorflow") + nhwc = model.runtime.startswith('tensorflow') else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -608,7 +608,7 @@ def _model_type(p='path/to/model.pt'): url = urlparse(p) # if url may be Triton inference server types = [s in Path(p).name for s in sf] types[8] &= not types[9] # tflite &= not edgetpu - triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc]) + triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) return types + [triton] @staticmethod diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index f73d1992ac19..07ec25ba264d 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index 7cbdb36b425c..a827814e1399 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/tf.py b/models/tf.py index 3f3dc8dbe7e7..8290cf2e57f5 100644 --- a/models/tf.py +++ b/models/tf.py @@ -356,7 +356,7 @@ class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() - assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" + assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2' self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False @@ -371,7 +371,7 @@ class TFConcat(keras.layers.Layer): # TF version of torch.concat() def __init__(self, dimension=1, w=None): super().__init__() - assert dimension == 1, "convert only NCHW to NHWC concat" + assert dimension == 1, 'convert only NCHW to NHWC concat' self.d = 3 def call(self, inputs): @@ -523,17 +523,17 @@ def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS selected_boxes = tf.gather(boxes, selected_inds) padded_boxes = tf.pad(selected_boxes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", + mode='CONSTANT', constant_values=0.0) selected_scores = tf.gather(scores_inp, selected_inds) padded_scores = tf.pad(selected_scores, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) selected_classes = tf.gather(class_inds, selected_inds) padded_classes = tf.pad(selected_classes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) valid_detections = tf.shape(selected_inds)[0] return padded_boxes, padded_scores, padded_classes, valid_detections @@ -603,6 +603,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/predict.py b/segment/predict.py index e9093baa1cc7..d82df89a85b0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -279,6 +279,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/train.py b/segment/train.py index 4914f9613a3d..2e71de131a8d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -139,7 +139,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - logger.update_params({"batch_size": batch_size}) + logger.update_params({'batch_size': batch_size}) # loggers.on_params_update({"batch_size": batch_size}) # Optimizer @@ -341,10 +341,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Mosaic plots if plots: if ni < 3: - plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg') if ni == 10: files = sorted(save_dir.glob('train*.jpg')) - logger.log_images(files, "Mosaics", epoch) + logger.log_images(files, 'Mosaics', epoch) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -454,8 +454,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - logger.log_images(files, "Results", epoch + 1) - logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + logger.log_images(files, 'Results', epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1) torch.cuda.empty_cache() return results @@ -548,7 +548,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -659,6 +659,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index cb1af34d9f17..cb52045bcb25 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -591,4 +591,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/segment/val.py b/segment/val.py index 665b540a5490..a7f95fe9b6fc 100644 --- a/segment/val.py +++ b/segment/val.py @@ -70,8 +70,8 @@ def save_one_json(predn, jdict, path, class_map, pred_masks): from pycocotools.mask import encode def single_encode(x): - rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] - rle["counts"] = rle["counts"].decode("utf-8") + rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] + rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem @@ -105,7 +105,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: - gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes @@ -231,8 +231,8 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", - "mAP50", "mAP50-95)") + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', + 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) @@ -343,7 +343,7 @@ def run( # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format - LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') @@ -369,7 +369,7 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -468,6 +468,6 @@ def main(opt): raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/train.py b/train.py index ccda0a7fe2e3..c4e3aac3561a 100644 --- a/train.py +++ b/train.py @@ -148,7 +148,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - loggers.on_params_update({"batch_size": batch_size}) + loggers.on_params_update({'batch_size': batch_size}) # Optimizer nbs = 64 # nominal batch size @@ -522,7 +522,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -635,6 +635,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/tutorial.ipynb b/tutorial.ipynb index c320d699a940..32af68b57945 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -973,4 +973,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/__init__.py b/utils/__init__.py index 7bf3efe6b8c7..d158c5515a12 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -69,7 +69,7 @@ def notebook_init(verbose=True): if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage("/") + total, used, free = shutil.disk_usage('/') display.clear_output() s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 02c2a79f5747..7687a2ba2665 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -89,7 +89,7 @@ def exif_transpose(image): if method is not None: image = image.transpose(method) del exif[0x0112] - image.info["exif"] = exif.tobytes() + image.info['exif'] = exif.tobytes() return image @@ -212,11 +212,11 @@ def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): # Parse monitor shape monitor = self.sct.monitors[self.screen] - self.top = monitor["top"] if top is None else (monitor["top"] + top) - self.left = monitor["left"] if left is None else (monitor["left"] + left) - self.width = width or monitor["width"] - self.height = height or monitor["height"] - self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} def __iter__(self): return self @@ -224,7 +224,7 @@ def __iter__(self): def __next__(self): # mss screen capture: get raw pixels from the screen as np array im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR - s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' if self.transforms: im = self.transforms(im0) # transforms @@ -239,7 +239,7 @@ def __next__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: @@ -358,7 +358,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy - s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam if s == 0: assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' @@ -373,7 +373,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') self.threads[i].start() LOGGER.info('') # newline @@ -495,7 +495,7 @@ def __init__(self, # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: - d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -598,8 +598,8 @@ def check_cache_ram(self, safety_margin=0.1, prefix=''): mem = psutil.virtual_memory() cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question if not cache: - LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " - f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' f"{'caching images ✅' if cache else 'not caching images ⚠️'}") return cache @@ -607,7 +607,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f"{prefix}Scanning {path.parent / path.stem}..." + desc = f'{prefix}Scanning {path.parent / path.stem}...' with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, @@ -622,7 +622,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' pbar.close() if msgs: @@ -1063,7 +1063,7 @@ def __init__(self, path='coco128.yaml', autodownload=False): if zipped: data['path'] = data_dir except Exception as e: - raise Exception("error/HUB/dataset_stats/yaml_load") from e + raise Exception('error/HUB/dataset_stats/yaml_load') from e check_dataset(data, autodownload) # download dataset if missing self.hub_dir = Path(data['path'] + '-hub') @@ -1188,7 +1188,7 @@ def __getitem__(self, i): else: # read image im = cv2.imread(f) # BGR if self.album_transforms: - sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] else: sample = self.torch_transforms(im) return sample, j diff --git a/utils/downloads.py b/utils/downloads.py index e739919540b4..643b529fba3b 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -77,7 +77,7 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): file.unlink() # remove partial downloads - LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") + LOGGER.info(f'ERROR: {assert_msg}\n{error_msg}') LOGGER.info('') diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 773ad8932967..952e5dcb90fa 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -7,13 +7,13 @@ import requests -DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" -IMAGE = "zidane.jpg" +DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s' +IMAGE = 'zidane.jpg' # Read image -with open(IMAGE, "rb") as f: +with open(IMAGE, 'rb') as f: image_data = f.read() -response = requests.post(DETECTION_URL, files={"image": image_data}).json() +response = requests.post(DETECTION_URL, files={'image': image_data}).json() pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 8482435c861e..9258b1a68860 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -13,36 +13,36 @@ app = Flask(__name__) models = {} -DETECTION_URL = "/v1/object-detection/" +DETECTION_URL = '/v1/object-detection/' -@app.route(DETECTION_URL, methods=["POST"]) +@app.route(DETECTION_URL, methods=['POST']) def predict(model): - if request.method != "POST": + if request.method != 'POST': return - if request.files.get("image"): + if request.files.get('image'): # Method 1 # with request.files["image"] as f: # im = Image.open(io.BytesIO(f.read())) # Method 2 - im_file = request.files["image"] + im_file = request.files['image'] im_bytes = im_file.read() im = Image.open(io.BytesIO(im_bytes)) if model in models: results = models[model](im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") + return results.pandas().xyxy[0].to_json(orient='records') -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model') + parser.add_argument('--port', default=5000, type=int, help='port number') parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') opt = parser.parse_args() for m in opt.model: - models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) + models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True) - app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat + app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py index a6af4f3216dd..b6efe6bb8732 100644 --- a/utils/general.py +++ b/utils/general.py @@ -38,7 +38,7 @@ import yaml from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize, curl_download +from utils.downloads import curl_download, gsutil_getsize from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() @@ -90,11 +90,11 @@ def is_kaggle(): def is_docker() -> bool: """Check if the process runs inside a docker container.""" - if Path("/.dockerenv").exists(): + if Path('/.dockerenv').exists(): return True try: # check if docker is in control groups - with open("/proc/self/cgroup") as file: - return any("docker" in line for line in file) + with open('/proc/self/cgroup') as file: + return any('docker' in line for line in file) except OSError: return False @@ -113,7 +113,7 @@ def is_writeable(dir, test=False): return False -LOGGING_NAME = "yolov5" +LOGGING_NAME = 'yolov5' def set_logging(name=LOGGING_NAME, verbose=True): @@ -121,21 +121,21 @@ def set_logging(name=LOGGING_NAME, verbose=True): rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR logging.config.dictConfig({ - "version": 1, - "disable_existing_loggers": False, - "formatters": { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { name: { - "format": "%(message)s"}}, - "handlers": { + 'format': '%(message)s'}}, + 'handlers': { name: { - "class": "logging.StreamHandler", - "formatter": name, - "level": level,}}, - "loggers": { + 'class': 'logging.StreamHandler', + 'formatter': name, + 'level': level,}}, + 'loggers': { name: { - "level": level, - "handlers": [name], - "propagate": False,}}}) + 'level': level, + 'handlers': [name], + 'propagate': False,}}}) set_logging(LOGGING_NAME) # run before defining LOGGER @@ -218,7 +218,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def methods(instance): # Get class/instance methods - return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith('__')] def print_args(args: Optional[dict] = None, show_file=True, show_func=False): @@ -299,7 +299,7 @@ def check_online(): def run_once(): # Check once try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + socket.create_connection(('1.1.1.1', 443), 5) # check host accessibility return True except OSError: return False @@ -386,7 +386,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta check_python() # check python version if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() - assert file.exists(), f"{prefix} {file} not found, check failed." + assert file.exists(), f'{prefix} {file} not found, check failed.' with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] elif isinstance(requirements, str): @@ -450,7 +450,7 @@ def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): - assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}' def check_yaml(file, suffix=('.yaml', '.yml')): @@ -556,8 +556,8 @@ def check_dataset(data, autodownload=True): else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" - LOGGER.info(f"Dataset download {s}") + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌' + LOGGER.info(f'Dataset download {s}') check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts return data # dictionary @@ -673,7 +673,7 @@ def make_divisible(x, divisor): def clean_str(s): # Cleans a string by replacing special characters with underscore _ - return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) def one_cycle(y1=0.0, y2=1.0, steps=100): diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 1e7f38e0d677..9de1f226233c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -121,8 +121,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # Comet if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): - run_id = self.opt.resume.split("/")[-1] + if isinstance(self.opt.resume, str) and self.opt.resume.startswith('comet://'): + run_id = self.opt.resume.split('/')[-1] self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) else: @@ -158,7 +158,7 @@ def on_pretrain_routine_end(self, labels, names): plot_labels(labels, names, self.save_dir) paths = self.save_dir.glob('*labels*.jpg') # training labels if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + self.wandb.log({'Labels': [wandb.Image(str(x), caption=x.name) for x in paths]}) # if self.clearml: # pass # ClearML saves these images automatically using hooks if self.comet_logger: @@ -212,7 +212,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({'Validation': [wandb.Image(str(f), caption=f.name) for f in files]}) if self.clearml: self.clearml.log_debug_samples(files, title='Validation') @@ -279,7 +279,7 @@ def on_train_end(self, last, best, epoch, results): if self.wandb: self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({'Results': [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: wandb.log_artifact(str(best if best.exists() else last), @@ -329,7 +329,7 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): if wandb and 'wandb' in self.include: self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, + name=None if opt.name == 'exp' else opt.name, config=opt) else: self.wandb = None @@ -370,12 +370,12 @@ def log_graph(self, model, imgsz=(640, 640)): def log_model(self, model_path, epoch=0, metadata={}): # Log model to all loggers if self.wandb: - art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art = wandb.Artifact(name=f'run_{wandb.run.id}_model', type='model', metadata=metadata) art.add_file(str(model_path)) wandb.log_artifact(art) def update_params(self, params): - # Update the paramters logged + # Update the parameters logged if self.wandb: wandb.run.config.update(params, allow_val_change=True) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 3457727a96a4..2764abe90da8 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -25,7 +25,7 @@ def construct_dataset(clearml_info_string): dataset_root_path = Path(dataset.get_local_copy()) # We'll search for the yaml file definition in the dataset - yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) + yaml_filenames = list(glob.glob(str(dataset_root_path / '*.yaml')) + glob.glob(str(dataset_root_path / '*.yml'))) if len(yaml_filenames) > 1: raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' 'the dataset definition this way.') @@ -100,7 +100,7 @@ def __init__(self, opt, hyp): self.task.connect(opt, name='Args') # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent - self.task.set_base_docker("ultralytics/yolov5:latest", + self.task.set_base_docker('ultralytics/yolov5:latest', docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', docker_setup_bash_script='pip install clearml') @@ -150,7 +150,7 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres class_name = class_names[int(class_nr)] confidence_percentage = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence_percentage}%" + label = f'{class_name}: {confidence_percentage}%' if conf > conf_threshold: annotator.rectangle(box.cpu().numpy(), outline=color) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index b0318f88d6a6..d4599841c9fc 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -17,7 +17,7 @@ # Project Configuration config = comet_ml.config.get_config() - COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") + COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') except (ModuleNotFoundError, ImportError): comet_ml = None COMET_PROJECT_NAME = None @@ -31,32 +31,32 @@ from utils.general import check_dataset, scale_boxes, xywh2xyxy from utils.metrics import box_iou -COMET_PREFIX = "comet://" +COMET_PREFIX = 'comet://' -COMET_MODE = os.getenv("COMET_MODE", "online") +COMET_MODE = os.getenv('COMET_MODE', 'online') # Model Saving Settings -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') # Dataset Artifact Settings -COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" +COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true' # Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" -COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" -COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) +COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true' +COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true' +COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100)) # Confusion Matrix Settings -CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) -IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) +CONF_THRES = float(os.getenv('CONF_THRES', 0.001)) +IOU_THRES = float(os.getenv('IOU_THRES', 0.6)) # Batch Logging Settings -COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" -COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) -COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) -COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" +COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true' +COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true' -RANK = int(os.getenv("RANK", -1)) +RANK = int(os.getenv('RANK', -1)) to_pil = T.ToPILImage() @@ -66,7 +66,7 @@ class CometLogger: with Comet """ - def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwargs) -> None: self.job_type = job_type self.opt = opt self.hyp = hyp @@ -87,52 +87,52 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar # Default parameters to pass to Experiment objects self.default_experiment_kwargs = { - "log_code": False, - "log_env_gpu": True, - "log_env_cpu": True, - "project_name": COMET_PROJECT_NAME,} + 'log_code': False, + 'log_env_gpu': True, + 'log_env_cpu': True, + 'project_name': COMET_PROJECT_NAME,} self.default_experiment_kwargs.update(experiment_kwargs) self.experiment = self._get_experiment(self.comet_mode, run_id) self.data_dict = self.check_dataset(self.opt.data) - self.class_names = self.data_dict["names"] - self.num_classes = self.data_dict["nc"] + self.class_names = self.data_dict['names'] + self.num_classes = self.data_dict['nc'] self.logged_images_count = 0 self.max_images = COMET_MAX_IMAGE_UPLOADS if run_id is None: - self.experiment.log_other("Created from", "YOLOv5") + self.experiment.log_other('Created from', 'YOLOv5') if not isinstance(self.experiment, comet_ml.OfflineExperiment): - workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] + workspace, project_name, experiment_id = self.experiment.url.split('/')[-3:] self.experiment.log_other( - "Run Path", - f"{workspace}/{project_name}/{experiment_id}", + 'Run Path', + f'{workspace}/{project_name}/{experiment_id}', ) self.log_parameters(vars(opt)) self.log_parameters(self.opt.hyp) self.log_asset_data( self.opt.hyp, - name="hyperparameters.json", - metadata={"type": "hyp-config-file"}, + name='hyperparameters.json', + metadata={'type': 'hyp-config-file'}, ) self.log_asset( - f"{self.opt.save_dir}/opt.yaml", - metadata={"type": "opt-config-file"}, + f'{self.opt.save_dir}/opt.yaml', + metadata={'type': 'opt-config-file'}, ) self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX - if hasattr(self.opt, "conf_thres"): + if hasattr(self.opt, 'conf_thres'): self.conf_thres = self.opt.conf_thres else: self.conf_thres = CONF_THRES - if hasattr(self.opt, "iou_thres"): + if hasattr(self.opt, 'iou_thres'): self.iou_thres = self.opt.iou_thres else: self.iou_thres = IOU_THRES - self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + self.log_parameters({'val_iou_threshold': self.iou_thres, 'val_conf_threshold': self.conf_thres}) self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: @@ -147,22 +147,22 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS self.experiment.log_others({ - "comet_mode": COMET_MODE, - "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, - "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, - "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, - "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, - "comet_model_name": COMET_MODEL_NAME,}) + 'comet_mode': COMET_MODE, + 'comet_max_image_uploads': COMET_MAX_IMAGE_UPLOADS, + 'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS, + 'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS, + 'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX, + 'comet_model_name': COMET_MODEL_NAME,}) # Check if running the Experiment with the Comet Optimizer - if hasattr(self.opt, "comet_optimizer_id"): - self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) - self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) - self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) - self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) + if hasattr(self.opt, 'comet_optimizer_id'): + self.experiment.log_other('optimizer_id', self.opt.comet_optimizer_id) + self.experiment.log_other('optimizer_objective', self.opt.comet_optimizer_objective) + self.experiment.log_other('optimizer_metric', self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_parameters', json.dumps(self.hyp)) def _get_experiment(self, mode, experiment_id=None): - if mode == "offline": + if mode == 'offline': if experiment_id is not None: return comet_ml.ExistingOfflineExperiment( previous_experiment=experiment_id, @@ -182,11 +182,11 @@ def _get_experiment(self, mode, experiment_id=None): return comet_ml.Experiment(**self.default_experiment_kwargs) except ValueError: - logger.warning("COMET WARNING: " - "Comet credentials have not been set. " - "Comet will default to offline logging. " - "Please set your credentials to enable online logging.") - return self._get_experiment("offline", experiment_id) + logger.warning('COMET WARNING: ' + 'Comet credentials have not been set. ' + 'Comet will default to offline logging. ' + 'Please set your credentials to enable online logging.') + return self._get_experiment('offline', experiment_id) return @@ -210,12 +210,12 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): return model_metadata = { - "fitness_score": fitness_score[-1], - "epochs_trained": epoch + 1, - "save_period": opt.save_period, - "total_epochs": opt.epochs,} + 'fitness_score': fitness_score[-1], + 'epochs_trained': epoch + 1, + 'save_period': opt.save_period, + 'total_epochs': opt.epochs,} - model_files = glob.glob(f"{path}/*.pt") + model_files = glob.glob(f'{path}/*.pt') for model_path in model_files: name = Path(model_path).name @@ -232,12 +232,12 @@ def check_dataset(self, data_file): data_config = yaml.safe_load(f) if data_config['path'].startswith(COMET_PREFIX): - path = data_config['path'].replace(COMET_PREFIX, "") + path = data_config['path'].replace(COMET_PREFIX, '') data_dict = self.download_dataset_artifact(path) return data_dict - self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) + self.log_asset(self.opt.data, metadata={'type': 'data-config-file'}) return check_dataset(data_file) @@ -253,8 +253,8 @@ def log_predictions(self, image, labelsn, path, shape, predn): filtered_detections = detections[mask] filtered_labels = labelsn[mask] - image_id = path.split("/")[-1].split(".")[0] - image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" + image_id = path.split('/')[-1].split('.')[0] + image_name = f'{image_id}_curr_epoch_{self.experiment.curr_epoch}' if image_name not in self.logged_image_names: native_scale_image = PIL.Image.open(path) self.log_image(native_scale_image, name=image_name) @@ -263,22 +263,22 @@ def log_predictions(self, image, labelsn, path, shape, predn): metadata = [] for cls, *xyxy in filtered_labels.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}-gt", - "score": 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}-gt', + 'score': 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) for *xyxy, conf, cls in filtered_detections.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}", - "score": conf * 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}', + 'score': conf * 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) self.metadata_dict[image_name] = metadata self.logged_images_count += 1 @@ -305,35 +305,35 @@ def preprocess_prediction(self, image, labels, shape, pred): return predn, labelsn def add_assets_to_artifact(self, artifact, path, asset_path, split): - img_paths = sorted(glob.glob(f"{asset_path}/*")) + img_paths = sorted(glob.glob(f'{asset_path}/*')) label_paths = img2label_paths(img_paths) for image_file, label_file in zip(img_paths, label_paths): image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) try: - artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) - artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) + artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split}) except ValueError as e: logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') - logger.error(f"COMET ERROR: {e}") + logger.error(f'COMET ERROR: {e}') continue return artifact def upload_dataset_artifact(self): - dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") - path = str((ROOT / Path(self.data_dict["path"])).resolve()) + dataset_name = self.data_dict.get('dataset_name', 'yolov5-dataset') + path = str((ROOT / Path(self.data_dict['path'])).resolve()) metadata = self.data_dict.copy() - for key in ["train", "val", "test"]: + for key in ['train', 'val', 'test']: split_path = metadata.get(key) if split_path is not None: - metadata[key] = split_path.replace(path, "") + metadata[key] = split_path.replace(path, '') - artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) + artifact = comet_ml.Artifact(name=dataset_name, artifact_type='dataset', metadata=metadata) for key in metadata.keys(): - if key in ["train", "val", "test"]: + if key in ['train', 'val', 'test']: if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): continue @@ -352,13 +352,13 @@ def download_dataset_artifact(self, artifact_path): metadata = logged_artifact.metadata data_dict = metadata.copy() - data_dict["path"] = artifact_save_dir + data_dict['path'] = artifact_save_dir - metadata_names = metadata.get("names") + metadata_names = metadata.get('names') if type(metadata_names) == dict: - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()} elif type(metadata_names) == list: - data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} else: raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" @@ -366,13 +366,13 @@ def download_dataset_artifact(self, artifact_path): return data_dict def update_data_paths(self, data_dict): - path = data_dict.get("path", "") + path = data_dict.get('path', '') - for split in ["train", "val", "test"]: + for split in ['train', 'val', 'test']: if data_dict.get(split): split_path = data_dict.get(split) - data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ - f"{path}/{x}" for x in split_path]) + data_dict[split] = (f'{path}/{split_path}' if isinstance(split, str) else [ + f'{path}/{x}' for x in split_path]) return data_dict @@ -413,11 +413,11 @@ def on_train_batch_end(self, log_dict, step): def on_train_end(self, files, save_dir, last, best, epoch, results): if self.comet_log_predictions: curr_epoch = self.experiment.curr_epoch - self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) + self.experiment.log_asset_data(self.metadata_dict, 'image-metadata.json', epoch=curr_epoch) for f in files: - self.log_asset(f, metadata={"epoch": epoch}) - self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) + self.log_asset(f, metadata={'epoch': epoch}) + self.log_asset(f'{save_dir}/results.csv', metadata={'epoch': epoch}) if not self.opt.evolve: model_path = str(best if best.exists() else last) @@ -481,7 +481,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) if self.comet_log_confusion_matrix: epoch = self.experiment.curr_epoch class_names = list(self.class_names.values()) - class_names.append("background") + class_names.append('background') num_classes = len(class_names) self.experiment.log_confusion_matrix( @@ -491,7 +491,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) epoch=epoch, column_label='Actual Category', row_label='Predicted Category', - file_name=f"confusion-matrix-epoch-{epoch}.json", + file_name=f'confusion-matrix-epoch-{epoch}.json', ) def on_fit_epoch_end(self, result, epoch): diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index 3cbd45156b57..27600761ad28 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -11,28 +11,28 @@ logger = logging.getLogger(__name__) -COMET_PREFIX = "comet://" -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") -COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") +COMET_PREFIX = 'comet://' +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt') def download_model_checkpoint(opt, experiment): - model_dir = f"{opt.project}/{experiment.name}" + model_dir = f'{opt.project}/{experiment.name}' os.makedirs(model_dir, exist_ok=True) model_name = COMET_MODEL_NAME model_asset_list = experiment.get_model_asset_list(model_name) if len(model_asset_list) == 0: - logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") + logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}') return model_asset_list = sorted( model_asset_list, - key=lambda x: x["step"], + key=lambda x: x['step'], reverse=True, ) - logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} + logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list} resource_url = urlparse(opt.weights) checkpoint_filename = resource_url.query @@ -44,22 +44,22 @@ def download_model_checkpoint(opt, experiment): checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME if asset_id is None: - logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") + logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment') return try: - logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") + logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}') asset_filename = checkpoint_filename - model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - model_download_path = f"{model_dir}/{asset_filename}" - with open(model_download_path, "wb") as f: + model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) + model_download_path = f'{model_dir}/{asset_filename}' + with open(model_download_path, 'wb') as f: f.write(model_binary) opt.weights = model_download_path except Exception as e: - logger.warning("COMET WARNING: Unable to download checkpoint from Comet") + logger.warning('COMET WARNING: Unable to download checkpoint from Comet') logger.exception(e) @@ -75,9 +75,9 @@ def set_opt_parameters(opt, experiment): resume_string = opt.resume for asset in asset_list: - if asset["fileName"] == "opt.yaml": - asset_id = asset["assetId"] - asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + if asset['fileName'] == 'opt.yaml': + asset_id = asset['assetId'] + asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) opt_dict = yaml.safe_load(asset_binary) for key, value in opt_dict.items(): setattr(opt, key, value) @@ -85,11 +85,11 @@ def set_opt_parameters(opt, experiment): # Save hyperparameters to YAML file # Necessary to pass checks in training script - save_dir = f"{opt.project}/{experiment.name}" + save_dir = f'{opt.project}/{experiment.name}' os.makedirs(save_dir, exist_ok=True) - hyp_yaml_path = f"{save_dir}/hyp.yaml" - with open(hyp_yaml_path, "w") as f: + hyp_yaml_path = f'{save_dir}/hyp.yaml' + with open(hyp_yaml_path, 'w') as f: yaml.dump(opt.hyp, f) opt.hyp = hyp_yaml_path @@ -113,7 +113,7 @@ def check_comet_weights(opt): if opt.weights.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.weights) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) download_model_checkpoint(opt, experiment) return True @@ -140,7 +140,7 @@ def check_comet_resume(opt): if opt.resume.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.resume) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) set_opt_parameters(opt, experiment) download_model_checkpoint(opt, experiment) diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index 7dd5c92e8de1..fc49115c1358 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -21,7 +21,7 @@ # Project Configuration config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") +COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') def get_args(known=False): @@ -68,30 +68,30 @@ def get_args(known=False): parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') # Comet Arguments - parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") - parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") - parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") - parser.add_argument("--comet_optimizer_workers", + parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') + parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') + parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') + parser.add_argument('--comet_optimizer_workers', type=int, default=1, - help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") + help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') return parser.parse_known_args()[0] if known else parser.parse_args() def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} + hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get("batch_size") - opt.epochs = parameters.get("epochs") + opt.batch_size = parameters.get('batch_size') + opt.epochs = parameters.get('epochs') device = select_device(opt.device, batch_size=opt.batch_size) train(hyp_dict, opt, device, callbacks=Callbacks()) -if __name__ == "__main__": +if __name__ == '__main__': opt = get_args(known=True) opt.weights = str(opt.weights) @@ -99,7 +99,7 @@ def run(parameters, opt): opt.data = str(opt.data) opt.project = str(opt.project) - optimizer_id = os.getenv("COMET_OPTIMIZER_ID") + optimizer_id = os.getenv('COMET_OPTIMIZER_ID') if optimizer_id is None: with open(opt.comet_optimizer_config) as f: optimizer_config = json.load(f) @@ -110,9 +110,9 @@ def run(parameters, opt): opt.comet_optimizer_id = optimizer.id status = optimizer.status() - opt.comet_optimizer_objective = status["spec"]["objective"] - opt.comet_optimizer_metric = status["spec"]["metric"] + opt.comet_optimizer_objective = status['spec']['objective'] + opt.comet_optimizer_metric = status['spec']['metric'] - logger.info("COMET INFO: Starting Hyperparameter Sweep") + logger.info('COMET INFO: Starting Hyperparameter Sweep') for parameter in optimizer.get_parameters(): - run(parameter["parameters"], opt) + run(parameter['parameters'], opt) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 6bc2ec510d0a..c8ab38197381 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -17,7 +17,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH RANK = int(os.getenv('RANK', -1)) DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \ - f"See supported integrations at https://github.com/ultralytics/yolov5#integrations." + f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' try: import wandb @@ -65,7 +65,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.data_dict = None if self.wandb: self.wandb_run = wandb.init(config=opt, - resume="allow", + resume='allow', project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, entity=opt.entity, name=opt.name if opt.name != 'exp' else None, @@ -97,7 +97,7 @@ def setup_training(self, opt): if isinstance(opt.resume, str): model_dir, _ = self.download_model_artifact(opt) if model_dir: - self.weights = Path(model_dir) / "last.pt" + self.weights = Path(model_dir) / 'last.pt' config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ @@ -131,7 +131,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") + LOGGER.info(f'Saving model artifact on epoch {epoch + 1}') def val_one_image(self, pred, predn, path, names, im): pass @@ -160,7 +160,7 @@ def end_epoch(self): wandb.log(self.log_dict) except BaseException as e: LOGGER.info( - f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" + f'An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}' ) self.wandb_run.finish() self.wandb_run = None diff --git a/utils/metrics.py b/utils/metrics.py index 7fb077774384..95f364c23f34 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -28,7 +28,7 @@ def smooth(y, f=0.05): return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -194,14 +194,14 @@ def plot(self, normalize=True, save_dir='', names=()): nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - ticklabels = (names + ['background']) if labels else "auto" + ticklabels = (names + ['background']) if labels else 'auto' with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap(array, ax=ax, annot=nc < 30, annot_kws={ - "size": 8}, + 'size': 8}, cmap='Blues', fmt='.2f', square=True, @@ -331,7 +331,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): ax.set_ylabel('Precision') ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title('Precision-Recall Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) @@ -354,7 +354,7 @@ def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confi ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title(f'{ylabel}-Confidence Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py index f84aed9fb5c7..24c618c80b59 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -450,7 +450,7 @@ def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() if verbose: - LOGGER.info(f"Saving {f}") + LOGGER.info(f'Saving {f}') if labels is not None: LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) if pred is not None: diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index d66b36115e3f..097a5d5cb058 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -95,7 +95,7 @@ def __init__( stride=32, pad=0, min_items=0, - prefix="", + prefix='', downsample_ratio=1, overlap=False, ): @@ -116,7 +116,7 @@ def __getitem__(self, index): shapes = None # MixUp augmentation - if random.random() < hyp["mixup"]: + if random.random() < hyp['mixup']: img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) else: @@ -147,11 +147,11 @@ def __getitem__(self, index): img, labels, segments = random_perspective(img, labels, segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"]) + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) nl = len(labels) # number of labels if nl: @@ -177,17 +177,17 @@ def __getitem__(self, index): nl = len(labels) # update after albumentations # HSV color-space - augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Flip up-down - if random.random() < hyp["flipud"]: + if random.random() < hyp['flipud']: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] masks = torch.flip(masks, dims=[1]) # Flip left-right - if random.random() < hyp["fliplr"]: + if random.random() < hyp['fliplr']: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] @@ -251,15 +251,15 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) img4, labels4, segments4 = random_perspective(img4, labels4, segments4, - degrees=self.hyp["degrees"], - translate=self.hyp["translate"], - scale=self.hyp["scale"], - shear=self.hyp["shear"], - perspective=self.hyp["perspective"], + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img4, labels4, segments4 diff --git a/utils/segment/loss.py b/utils/segment/loss.py index b45b2c27e0a0..2a8a4c680f6f 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -83,7 +83,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model # Mask regression if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample - masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) for bi in b.unique(): @@ -101,10 +101,10 @@ def __call__(self, preds, targets, masks): # predictions, targets, model if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp["box"] - lobj *= self.hyp["obj"] - lcls *= self.hyp["cls"] - lseg *= self.hyp["box"] / bs + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + lseg *= self.hyp['box'] / bs loss = lbox + lobj + lcls + lseg return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() @@ -112,7 +112,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): # Mask loss for one image pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) - loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() def build_targets(self, p, targets): diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index b09ce23fb9e3..c9f137e38ead 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -21,7 +21,7 @@ def ap_per_class_box_and_mask( pred_cls, target_cls, plot=False, - save_dir=".", + save_dir='.', names=(), ): """ @@ -37,7 +37,7 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Box")[2:] + prefix='Box')[2:] results_masks = ap_per_class(tp_m, conf, pred_cls, @@ -45,21 +45,21 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Mask")[2:] + prefix='Mask')[2:] results = { - "boxes": { - "p": results_boxes[0], - "r": results_boxes[1], - "ap": results_boxes[3], - "f1": results_boxes[2], - "ap_class": results_boxes[4]}, - "masks": { - "p": results_masks[0], - "r": results_masks[1], - "ap": results_masks[3], - "f1": results_masks[2], - "ap_class": results_masks[4]}} + 'boxes': { + 'p': results_boxes[0], + 'r': results_boxes[1], + 'ap': results_boxes[3], + 'f1': results_boxes[2], + 'ap_class': results_boxes[4]}, + 'masks': { + 'p': results_masks[0], + 'r': results_masks[1], + 'ap': results_masks[3], + 'f1': results_masks[2], + 'ap_class': results_masks[4]}} return results @@ -159,8 +159,8 @@ def update(self, results): Args: results: Dict{'boxes': Dict{}, 'masks': Dict{}} """ - self.metric_box.update(list(results["boxes"].values())) - self.metric_mask.update(list(results["masks"].values())) + self.metric_box.update(list(results['boxes'].values())) + self.metric_mask.update(list(results['masks'].values())) def mean_results(self): return self.metric_box.mean_results() + self.metric_mask.mean_results() @@ -178,33 +178,33 @@ def ap_class_index(self): KEYS = [ - "train/box_loss", - "train/seg_loss", # train loss - "train/obj_loss", - "train/cls_loss", - "metrics/precision(B)", - "metrics/recall(B)", - "metrics/mAP_0.5(B)", - "metrics/mAP_0.5:0.95(B)", # metrics - "metrics/precision(M)", - "metrics/recall(M)", - "metrics/mAP_0.5(M)", - "metrics/mAP_0.5:0.95(M)", # metrics - "val/box_loss", - "val/seg_loss", # val loss - "val/obj_loss", - "val/cls_loss", - "x/lr0", - "x/lr1", - "x/lr2",] + 'train/box_loss', + 'train/seg_loss', # train loss + 'train/obj_loss', + 'train/cls_loss', + 'metrics/precision(B)', + 'metrics/recall(B)', + 'metrics/mAP_0.5(B)', + 'metrics/mAP_0.5:0.95(B)', # metrics + 'metrics/precision(M)', + 'metrics/recall(M)', + 'metrics/mAP_0.5(M)', + 'metrics/mAP_0.5:0.95(M)', # metrics + 'val/box_loss', + 'val/seg_loss', # val loss + 'val/obj_loss', + 'val/cls_loss', + 'x/lr0', + 'x/lr1', + 'x/lr2',] BEST_KEYS = [ - "best/epoch", - "best/precision(B)", - "best/recall(B)", - "best/mAP_0.5(B)", - "best/mAP_0.5:0.95(B)", - "best/precision(M)", - "best/recall(M)", - "best/mAP_0.5(M)", - "best/mAP_0.5:0.95(M)",] + 'best/epoch', + 'best/precision(B)', + 'best/recall(B)', + 'best/mAP_0.5(B)', + 'best/mAP_0.5:0.95(B)', + 'best/precision(M)', + 'best/recall(M)', + 'best/mAP_0.5(M)', + 'best/mAP_0.5:0.95(M)',] diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 9b90900b3772..3ba097624fcd 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -108,13 +108,13 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' annotator.im.save(fname) # save -def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): +def plot_results_with_masks(file='path/to/results.csv', dir='', best=True): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) ax = ax.ravel() - files = list(save_dir.glob("results*.csv")) - assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' for f in files: try: data = pd.read_csv(f) @@ -125,19 +125,19 @@ def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): y = data.values[:, j] # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=2) if best: # best - ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + ax[i].scatter(index, y[index], color='r', label=f'best:{index}', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[index], 5)}') else: # last - ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + ax[i].scatter(x[-1], y[-1], color='r', label='last', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[-1], 5)}') # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print(f"Warning: Plotting error for {f}: {e}") + print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() - fig.savefig(save_dir / "results.png", dpi=200) + fig.savefig(save_dir / 'results.png', dpi=200) plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 77549b005ceb..5b67b3fa7a06 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -291,7 +291,7 @@ def model_info(model, verbose=False, imgsz=640): fs = '' name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' - LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + LOGGER.info(f'{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}') def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) @@ -342,7 +342,7 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " - f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias") + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias') return optimizer diff --git a/utils/triton.py b/utils/triton.py index a94ef0ad197d..25928021477e 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -21,7 +21,7 @@ def __init__(self, url: str): """ parsed_url = urlparse(url) - if parsed_url.scheme == "grpc": + if parsed_url.scheme == 'grpc': from tritonclient.grpc import InferenceServerClient, InferInput self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client @@ -31,7 +31,7 @@ def __init__(self, url: str): def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] else: from tritonclient.http import InferenceServerClient, InferInput @@ -43,14 +43,14 @@ def create_input_placeholders() -> typing.List[InferInput]: def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] self._create_input_placeholders_fn = create_input_placeholders @property def runtime(self): """Returns the model runtime""" - return self.metadata.get("backend", self.metadata.get("platform")) + return self.metadata.get('backend', self.metadata.get('platform')) def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: """ Invokes the model. Parameters can be provided via args or kwargs. @@ -68,14 +68,14 @@ def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[t def _create_inputs(self, *args, **kwargs): args_len, kwargs_len = len(args), len(kwargs) if not args_len and not kwargs_len: - raise RuntimeError("No inputs provided.") + raise RuntimeError('No inputs provided.') if args_len and kwargs_len: - raise RuntimeError("Cannot specify args and kwargs at the same time") + raise RuntimeError('Cannot specify args and kwargs at the same time') placeholders = self._create_input_placeholders_fn() if args_len: if args_len != len(placeholders): - raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.') for input, value in zip(placeholders, args): input.set_data_from_numpy(value.cpu().numpy()) else: diff --git a/val.py b/val.py index 7829afb68b79..d4073b42fe78 100644 --- a/val.py +++ b/val.py @@ -304,7 +304,7 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -404,6 +404,6 @@ def main(opt): raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt)