diff --git a/src/classify/clipclassifier.py b/src/classify/clipclassifier.py index 9bcaa135..9e7dd279 100644 --- a/src/classify/clipclassifier.py +++ b/src/classify/clipclassifier.py @@ -245,7 +245,17 @@ def save_metadata( prediction = predictions.prediction_for(track.get_id()) if prediction is None: continue - + # DEBUGGING STUFF REMOVE ME + # logging.info("Track predictions %s", track) + # for p in prediction.predictions: + # logging.info( + # "Have %s sum %s smoothed %s mass %s", + # p, + # np.sum(p.prediction), + # np.round(p.smoothed_prediction), + # p.mass, + # ) + # logging.info("smoothed %s", np.round(100 * prediction.class_best_score)) prediction_meta = prediction.get_metadata() prediction_meta["model_id"] = model_id prediction_info.append(prediction_meta) diff --git a/src/classify/trackprediction.py b/src/classify/trackprediction.py index 94afe43b..a9af5056 100644 --- a/src/classify/trackprediction.py +++ b/src/classify/trackprediction.py @@ -110,18 +110,23 @@ def __init__(self, track_id, labels, keep_all=True, start_frame=None): self.masses = [] def classified_clip( - self, predictions, smoothed_predictions, prediction_frames, top_score=None + self, + predictions, + smoothed_predictions, + prediction_frames, + masses, + top_score=None, ): self.num_frames_classified = len(predictions) - for prediction, smoothed_prediction, frames in zip( - predictions, smoothed_predictions, prediction_frames + for prediction, smoothed_prediction, frames, mass in zip( + predictions, smoothed_predictions, prediction_frames, masses ): prediction = Prediction( prediction, smoothed_prediction, frames, np.amax(frames), - None, + mass, ) self.predictions.append(prediction) diff --git a/src/ml_tools/datasetstructures.py b/src/ml_tools/datasetstructures.py index 731e65a2..6a7fbd61 100644 --- a/src/ml_tools/datasetstructures.py +++ b/src/ml_tools/datasetstructures.py @@ -1058,6 +1058,7 @@ def get_segments( segment_count = int(segment_count) if max_segments is not None: segment_count = min(max_segments, segment_count) + # take any segment_width frames, this could be done each epoch whole_indices = frame_indices random_frames = segment_type in [ @@ -1074,8 +1075,7 @@ def get_segments( np.random.shuffle(frame_indices) for i in range(segment_count): # always get atleast one segment, not doing annymore - # if i > 0: - if (len(frame_indices) < segment_width and len(segments) > 1) or len( + if (len(frame_indices) < segment_width / 2.0 and len(segments) > 1) or len( frame_indices ) < segment_width / 4: break @@ -1089,6 +1089,7 @@ def get_segments( replace=False, ) frames = section[indices] + # might need to change that gp 11/05 - 2024 frame_indices = frame_indices[segment_frame_spacing:] elif random_frames: # frame indices already randomized so just need to grab some diff --git a/src/ml_tools/imageprocessing.py b/src/ml_tools/imageprocessing.py index 4eeebcac..42312dee 100644 --- a/src/ml_tools/imageprocessing.py +++ b/src/ml_tools/imageprocessing.py @@ -34,8 +34,6 @@ def resize_and_pad( resize_dim = (width, height) if pad is None: pad = np.min(frame) - else: - pad = 0 resized = np.full(new_dim, pad, dtype=frame.dtype) offset_x = 0 @@ -80,20 +78,14 @@ def resize_cv(image, dim, interpolation=cv2.INTER_LINEAR, extra_h=0, extra_v=0): ) -def square_clip(data, frames_per_row, tile_dim, normalize=True): +def square_clip(data, frames_per_row, tile_dim, frame_samples, normalize=True): # lay each frame out side by side in rows new_frame = np.zeros((frames_per_row * tile_dim[0], frames_per_row * tile_dim[1])) i = 0 success = False for x in range(frames_per_row): for y in range(frames_per_row): - if i >= len(data): - frame = data[-1] - else: - frame = data[i] - - # cv2.imshow("frame", np.uint8(frame)) - # cv2.waitKey(0) + frame = data[frame_samples[i]] if normalize: frame, stats = normalize(frame, new_max=255) if not stats[0]: diff --git a/src/ml_tools/interpreter.py b/src/ml_tools/interpreter.py index 12a5c57b..2b299181 100644 --- a/src/ml_tools/interpreter.py +++ b/src/ml_tools/interpreter.py @@ -146,6 +146,7 @@ def classify_track(self, clip, track, segment_frames=None): # self.model.predict(preprocessed) top_score = None smoothed_predictions = None + if self.params.smooth_predictions: masses = np.array(masses) top_score = np.sum(masses) @@ -155,6 +156,7 @@ def classify_track(self, clip, track, segment_frames=None): output, smoothed_predictions, prediction_frames, + masses, top_score=top_score, ) track_prediction.classify_time = time.time() - start @@ -213,6 +215,7 @@ def preprocess_frames( diff_frame = region.subimage(f.thermal) - region.subimage( clip.background ) + new_max = np.amax(diff_frame) new_min = np.amin(diff_frame) if min_diff is None or new_min < min_diff: @@ -299,7 +302,7 @@ def preprocess_segments( from_last=predict_from_last, max_segments=max_segments, dont_filter=dont_filter, - filter_by_fp = False, + filter_by_fp=False, ) frame_indices = set() for segment in segments: diff --git a/src/ml_tools/preprocess.py b/src/ml_tools/preprocess.py index fe02199a..9ab61c8f 100644 --- a/src/ml_tools/preprocess.py +++ b/src/ml_tools/preprocess.py @@ -147,6 +147,13 @@ def preprocess_movement( ): frame_types = {} data = [] + frame_samples = list(np.arange(len(preprocess_frames))) + if len(preprocess_frames) < frames_per_row * 5: + extra_samples = np.random.choice( + frame_samples, frames_per_row * 5 - len(preprocess_frames) + ) + frame_samples.extend(extra_samples) + frame_samples.sort() for channel in channels: if isinstance(channel, str): channel = TrackChannels[channel] @@ -158,6 +165,7 @@ def preprocess_movement( channel_segment, frames_per_row, (frame_size, frame_size), + frame_samples, normalize=False, ) # already done normalization diff --git a/src/ml_tools/previewer.py b/src/ml_tools/previewer.py index ddd203ea..34255047 100644 --- a/src/ml_tools/previewer.py +++ b/src/ml_tools/previewer.py @@ -91,8 +91,8 @@ def export_clip_preview(self, filename, clip: Clip, predictions=None): if self.debug: footer = Previewer.stats_footer(clip.stats) if predictions and ( - self.preview_type == self.PREVIEW_CLASSIFIED - or self.preview_type == self.PREVIEW_TRACKING + self.preview_type == PREVIEW_CLASSIFIED + or self.preview_type == PREVIEW_TRACKING ): self.create_track_descriptions(clip, predictions) @@ -103,14 +103,14 @@ def export_clip_preview(self, filename, clip: Clip, predictions=None): res_x = clip.res_x res_y = clip.res_y - if self.preview_type == self.PREVIEW_TRACKING: + if self.preview_type == PREVIEW_TRACKING: res_x *= 2 res_y *= 2 mpeg = MPEGCreator(str(filename)) frame_scale = 4 for frame_number, frame in enumerate(clip.frame_buffer): - if self.preview_type == self.PREVIEW_RAW: + if self.preview_type == PREVIEW_RAW: image = self.convert_and_resize( frame.thermal, clip.stats.min_temp, clip.stats.max_temp, clip.type ) diff --git a/src/track/clip.py b/src/track/clip.py index e21ccbc8..72cc489d 100644 --- a/src/track/clip.py +++ b/src/track/clip.py @@ -185,7 +185,6 @@ def calculate_background(self, frame_reader): self.update_background(frame.pix) self._background_calculated() return - first_frame = frame initial_frames = None initial_diff = None diff --git a/src/track/track.py b/src/track/track.py index 5550391c..165ee39d 100644 --- a/src/track/track.py +++ b/src/track/track.py @@ -444,6 +444,7 @@ def get_segments( max_segments=None, ffc_frames=None, dont_filter=False, + filter_by_fp=False, ): if from_last is not None: if from_last == 0: @@ -476,23 +477,28 @@ def get_segments( ) segments.append(segment) else: - segments, _ = get_segments( - self.clip_id, - self._id, - start_frame, - segment_frame_spacing=segment_frame_spacing, - segment_width=segment_width, - regions=regions, - ffc_frames=ffc_frames, - repeats=repeats, - # frame_temp_median=frame_temp_median, - min_frames=min_frames, - segment_frames=None, - segment_type=segment_type, - max_segments=max_segments, - dont_filter=dont_filter, - ) - return segments + all_segments = [] + for seg_type in [SegmentType.ALL_RANDOM, SegmentType.ALL_SECTIONS]: + segments, _ = get_segments( + self.clip_id, + self._id, + start_frame, + segment_frame_spacing=segment_frame_spacing, + segment_width=segment_width, + regions=regions, + ffc_frames=ffc_frames, + repeats=repeats, + # frame_temp_median=frame_temp_median, + min_frames=min_frames, + segment_frames=None, + segment_type=seg_type, + max_segments=max_segments, + dont_filter=dont_filter, + # segment_type=seg_type, + ) + all_segments.extend(segments) + + return all_segments @classmethod def from_region(cls, clip, region, tracker_version=None, tracking_config=None):