diff --git a/docs/en/datasets/detect/coco.md b/docs/en/datasets/detect/coco.md index 5fd170053..65d5c949c 100644 --- a/docs/en/datasets/detect/coco.md +++ b/docs/en/datasets/detect/coco.md @@ -8,6 +8,17 @@ keywords: Ultralytics, COCO dataset, object detection, YOLO, YOLO model training The [COCO](https://cocodataset.org/#home) (Common Objects in Context) dataset is a large-scale object detection, segmentation, and captioning dataset. It is designed to encourage research on a wide variety of object categories and is commonly used for benchmarking computer vision models. It is an essential dataset for researchers and developers working on object detection, segmentation, and pose estimation tasks. +

+
+ +
+ Watch: Ultralytics COCO Dataset Overview +

+ ## Key Features - COCO contains 330K images, with 200K images having annotations for object detection, segmentation, and captioning tasks. diff --git a/docs/en/datasets/detect/coco8.md b/docs/en/datasets/detect/coco8.md index a16af1f5d..dd4070ee5 100644 --- a/docs/en/datasets/detect/coco8.md +++ b/docs/en/datasets/detect/coco8.md @@ -10,6 +10,17 @@ keywords: Ultralytics, COCO8 dataset, object detection, model testing, dataset c [Ultralytics](https://ultralytics.com) COCO8 is a small, but versatile object detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets. +

+
+ +
+ Watch: Ultralytics COCO Dataset Overview +

+ This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com) and [YOLOv8](https://github.com/ultralytics/ultralytics). ## Dataset YAML diff --git a/docs/en/guides/workouts-monitoring.md b/docs/en/guides/workouts-monitoring.md index fd4d7e0f6..4e78f218b 100644 --- a/docs/en/guides/workouts-monitoring.md +++ b/docs/en/guides/workouts-monitoring.md @@ -50,7 +50,8 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi print("Video frame is empty or video processing has been successfully completed.") break frame_count += 1 - results = model.predict(im0, verbose=False) + results = model.track(im0, verbose=False) # Tracking recommended + #results = model.predict(im0) # Prediction also supported im0 = gym_object.start_counting(im0, results, frame_count) cv2.destroyAllWindows() @@ -86,7 +87,8 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi print("Video frame is empty or video processing has been successfully completed.") break frame_count += 1 - results = model.predict(im0, verbose=False) + results = model.track(im0, verbose=False) # Tracking recommended + #results = model.predict(im0) # Prediction also supported im0 = gym_object.start_counting(im0, results, frame_count) video_writer.write(im0) @@ -132,3 +134,15 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi | `classes` | `list[int]` | `None` | filter results by class, i.e. classes=0, or classes=[0,2,3] | | `retina_masks` | `bool` | `False` | use high-resolution segmentation masks | | `embed` | `list[int]` | `None` | return feature vectors/embeddings from given layers | + +### Arguments `model.track` + +| Name | Type | Default | Description | +|-----------|---------|----------------|-------------------------------------------------------------| +| `source` | `im0` | `None` | source directory for images or videos | +| `persist` | `bool` | `False` | persisting tracks between frames | +| `tracker` | `str` | `botsort.yaml` | Tracking method 'bytetrack' or 'botsort' | +| `conf` | `float` | `0.3` | Confidence Threshold | +| `iou` | `float` | `0.5` | IOU Threshold | +| `classes` | `list` | `None` | filter results by class, i.e. classes=0, or classes=[0,2,3] | +| `verbose` | `bool` | `True` | Display the object tracking results | \ No newline at end of file diff --git a/ultralytics/solutions/ai_gym.py b/ultralytics/solutions/ai_gym.py index b089ed029..b78cf598d 100644 --- a/ultralytics/solutions/ai_gym.py +++ b/ultralytics/solutions/ai_gym.py @@ -80,14 +80,6 @@ def start_counting(self, im0, results, frame_count): self.keypoints = results[0].keypoints.data self.annotator = Annotator(im0, line_width=2) - num_keypoints = len(results[0]) - - # Resize self.angle, self.count, and self.stage if the number of keypoints has changed - if len(self.angle) != num_keypoints: - self.angle = [0] * num_keypoints - self.count = [0] * num_keypoints - self.stage = ["-" for _ in range(num_keypoints)] - for ind, k in enumerate(reversed(self.keypoints)): if self.pose_type in ["pushup", "pullup"]: self.angle[ind] = self.annotator.estimate_pose_angle( diff --git a/ultralytics/solutions/object_counter.py b/ultralytics/solutions/object_counter.py index 3cb64fd3b..18f42c624 100644 --- a/ultralytics/solutions/object_counter.py +++ b/ultralytics/solutions/object_counter.py @@ -171,7 +171,7 @@ def extract_and_process_tracks(self, tracks): # Extract tracks for box, track_id, cls in zip(boxes, track_ids, clss): # Draw bounding box - self.annotator.box_label(box, label=f"{track_id}:{self.names[cls]}", color=colors(int(cls), True)) + self.annotator.box_label(box, label=f"{track_id}:{self.names[cls]}", color=colors(int(track_id), True)) # Draw Tracks track_line = self.track_history[track_id]