diff --git a/src/piclassifier/cptvmotiondetector.py b/src/piclassifier/cptvmotiondetector.py index ae8d5493..c6c9da73 100644 --- a/src/piclassifier/cptvmotiondetector.py +++ b/src/piclassifier/cptvmotiondetector.py @@ -167,7 +167,7 @@ def skip_frame(self): def is_affected_by_ffc(cptv_frame): if cptv_frame.time_on is None or cptv_frame.last_ffc_time is None: return False - if isinstance(cptv_frame.time_on,int): + if isinstance(cptv_frame.time_on, int): return ( cptv_frame.time_on - cptv_frame.last_ffc_time ) < CPTVMotionDetector.FFC_PERIOD.seconds diff --git a/src/track/clip.py b/src/track/clip.py index f791422b..0fb2668c 100644 --- a/src/track/clip.py +++ b/src/track/clip.py @@ -185,8 +185,7 @@ def calculate_background(self, frame_reader): self.update_background(frame.pix) self._background_calculated() return - - + first_frame = frame initial_frames = None initial_diff = None @@ -251,7 +250,7 @@ def remove_background_animals(self, initial_frame, initial_diff): max_region = Rectangle(0, 0, self.res_x, self.res_y) for component, centroid in zip(lower_objects[1:], centroids[1:]): - print("Found component",component) + print("Found component", component) region = Region( component[0], component[1], @@ -267,11 +266,11 @@ def remove_background_animals(self, initial_frame, initial_diff): component[4], ) continue - print("Region is",region) + print("Region is", region) background_region = region.subimage(initial_frame) norm_back = background_region.copy() norm_back, _ = normalize(norm_back, new_max=255) - print(norm_back.dtype,norm_back.max()) + print(norm_back.dtype, norm_back.max()) sub_components, sub_connected, sub_stats, centroids = detect_objects( np.uint8(norm_back), otsus=True ) diff --git a/src/track/cliptrackextractor.py b/src/track/cliptrackextractor.py index a56e0167..63df479b 100644 --- a/src/track/cliptrackextractor.py +++ b/src/track/cliptrackextractor.py @@ -106,7 +106,7 @@ def parse_clip(self, clip, process_background=False): camera_model = None if header.model: camera_model = header.model - print("Camera",camera_model) + print("Camera", camera_model) clip.set_model(camera_model) # if we have the triggered motion threshold should use that @@ -116,7 +116,7 @@ def parse_clip(self, clip, process_background=False): temp_thresh = motion.get("triggeredthresh") if temp_thresh: clip.temp_thresh = temp_thresh - video_start_time = datetime.fromtimestamp(header.timestamp/1000000) + video_start_time = datetime.fromtimestamp(header.timestamp / 1000000) video_start_time = video_start_time.astimezone(Clip.local_tz) clip.set_video_stats(video_start_time) clip.calculate_background(reader)