From c0bb7fa60fb5972a36fd3b4c181a4d2bb12f2536 Mon Sep 17 00:00:00 2001 From: Ronan Date: Tue, 9 Jul 2024 11:00:45 +0200 Subject: [PATCH 01/14] feat: at least the server start but still lots of bugs --- .gitignore | 2 +- Makefile | 3 + app/callbacks/data_callbacks.py | 275 ++++++---------- app/callbacks/display_callbacks.py | 341 ++++++++++---------- app/components/{alerts.py => detections.py} | 17 +- app/index.py | 18 +- app/layouts/main_layout.py | 30 +- app/pages/homepage.py | 14 +- app/services/__init__.py | 4 +- app/services/api.py | 58 ++-- app/utils/data.py | 91 ------ app/utils/display.py | 47 ++- app/utils/sites.py | 38 --- pyproject.toml | 2 +- 14 files changed, 360 insertions(+), 580 deletions(-) rename app/components/{alerts.py => detections.py} (52%) delete mode 100644 app/utils/sites.py diff --git a/.gitignore b/.gitignore index 7ecb179..e07340a 100644 --- a/.gitignore +++ b/.gitignore @@ -109,7 +109,7 @@ venv/ ENV/ env.bak/ venv.bak/ - +venv* # Spyder project settings .spyderproject .spyproject diff --git a/Makefile b/Makefile index 7114ca4..e07d6e3 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,9 @@ run_dev: poetry export -f requirements.txt --without-hashes --output requirements.txt docker compose -f docker-compose-dev.yml up -d --build +run_local: + python app/index.py --host 0.0.0.0 --port 8050 + # Run the docker stop: docker compose down diff --git a/app/callbacks/data_callbacks.py b/app/callbacks/data_callbacks.py index 0bc6b7c..7a2d14c 100644 --- a/app/callbacks/data_callbacks.py +++ b/app/callbacks/data_callbacks.py @@ -4,6 +4,7 @@ # See LICENSE or go to for full license details. import json +from datetime import datetime, timedelta import dash import logging_config @@ -15,33 +16,26 @@ from pyroclient import Client import config as cfg -from services import api_client, call_api -from utils.data import ( - convert_time, - past_ndays_api_events, - process_bbox, - read_stored_DataFrame, - retrieve_site_from_device_id, -) +from services import instantiate_token +from utils.data import read_stored_DataFrame logger = logging_config.configure_logging(cfg.DEBUG, cfg.SENTRY_DSN) @app.callback( [ - Output("user_credentials", "data"), - Output("user_headers", "data"), + Output("client_token", "data"), Output("form_feedback_area", "children"), ], Input("send_form_button", "n_clicks"), [ State("username_input", "value"), State("password_input", "value"), - State("user_headers", "data"), + State("client_token", "data"), ], ) -def login_callback(n_clicks, username, password, user_headers): - if user_headers is not None: +def login_callback(n_clicks, username, password, client_token): + if client_token is not None: return dash.no_update, dash.no_update, dash.no_update if n_clicks: @@ -59,10 +53,10 @@ def login_callback(n_clicks, username, password, user_headers): else: # This is the route of the API that we are going to use for the credential check try: - client = Client(cfg.API_URL, username, password) + client = instantiate_token(username, password) return ( - {"username": username, "password": password}, + client.token, client.headers, dash.no_update, ) @@ -77,52 +71,45 @@ def login_callback(n_clicks, username, password, user_headers): @app.callback( [ - Output("store_api_events_data", "data"), - Output("store_api_alerts_data", "data"), - Output("trigger_no_events", "data"), + Output("store_wildfires_data", "data"), + Output("store_detections_data", "data"), + Output("trigger_no_wildfires", "data"), ], [Input("main_api_fetch_interval", "n_intervals")], [ - State("store_api_events_data", "data"), - State("store_api_alerts_data", "data"), - State("user_headers", "data"), - State("user_credentials", "data"), + State("client_token", "data"), ], prevent_initial_call=True, ) -def api_watcher(n_intervals, local_events, local_alerts, user_headers, user_credentials): +def api_watcher(n_intervals, client_token): """ - Fetches and processes live event and alert data from the API at regular intervals. + Fetches and processes live wildfire and detection data from the API at regular intervals. - This callback periodically checks for new event and alert data from the API. + This callback periodically checks for new wildfire and detection data from the API. It processes the new data, updates local storage with the latest information, and prepares it for displaying in the application. Parameters: - n_intervals (int): Number of intervals passed since the start of the app, used to trigger the periodic update. - - local_events (json): Currently stored events data in JSON format. - - local_alerts (json): Currently stored alerts data in JSON format. - - user_headers (dict): User authorization headers for API requests. - - user_credentials (tuple): User credentials (username, password). + - client_token (str): Client token for API calls + Returns: - - json: Updated events data in JSON format. - - json: Updated alerts data in JSON format. + - json: Updated wildfires data in JSON format. + - json: Updated detections data in JSON format. """ - if user_headers is None: + if client_token is None: raise PreventUpdate - user_token = user_headers["Authorization"].split(" ")[1] - api_client.token = user_token - # Read local data - local_events, event_data_loaded = read_stored_DataFrame(local_events) - local_alerts, alerts_data_loaded = read_stored_DataFrame(local_alerts) logger.info("Start Fetching the events") - # Fetch events - api_events = pd.DataFrame(call_api(api_client.get_unacknowledged_events, user_credentials)()) - api_events["created_at"] = convert_time(api_events) - if len(api_events) == 0: + # Fetch Detections + yesterday = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d_%H:%M:%S") + api_client = Client(client_token, cfg.API_URL) + response = api_client.fetch_unacknowledged_detections(from_date=yesterday) + api_detections = pd.DataFrame(response.json()) + + if api_detections.empty: return [ json.dumps( { @@ -138,178 +125,116 @@ def api_watcher(n_intervals, local_events, local_alerts, user_headers, user_cred ), dash.no_update, ] - else: - api_events = past_ndays_api_events(api_events, n_days=0) # keep only events from today - if api_events.empty: - return dash.no_update, dash.no_update, True - api_events = api_events[::-1] # Display the last alert first - - # Drop acknowledged - if not local_events.empty: - local_events = local_events[local_events["id"].isin(api_events["id"])] - - if event_data_loaded and api_events.empty and local_events.empty: - new_api_events = api_events[~api_events["id"].isin(local_events["id"])].copy() + # Find ongoing detections for the wildfires started within 30 minutes; + # after that, any new detection is part of a new wildfire + api_detections["created_at"] = pd.to_datetime(api_detections["created_at"]) + # Trier les détections par "created_at" + api_detections = api_detections.sort_values(by="created_at") + + # Initialiser la liste pour les wildfires + wildfires = [] + id = 1 + cameras = pd.DataFrame(api_client.fetch_cameras().json()) + + # Initialiser le premier wildfire + current_wildfire = { + "created_at": api_detections.iloc[0]["created_at"], + "detection_ids": [api_detections.iloc[0]["id"]], + "id": id, + "camera_name": cameras.loc[cameras["id"] == api_detections.iloc[0]["camera_id"], "name"].values[0], + } + + # Parcourir les détections pour les regrouper en wildfires + for i in range(1, len(api_detections)): + detection = api_detections.iloc[i] + time_diff = detection["created_at"] - current_wildfire["created_at"] + + if time_diff <= pd.Timedelta(minutes=30): + # Si la différence de temps est inférieure à 30 minutes, ajouter à l'actuel wildfire + current_wildfire["detection_ids"].append(detection["id"]) else: - new_api_events = api_events.copy() - - if alerts_data_loaded and not local_alerts.empty: - # drop old alerts - - local_alerts = local_alerts[local_alerts["event_id"].isin(api_events["id"])] + # Sinon, terminer le current_wildfire et commencer un nouveau + wildfires.append(current_wildfire) + id = id + 1 + current_wildfire = { + "id": id, + "camera_name": cameras.loc[cameras["id"] == detection["camera_id"], "name"].values[0], + "created_at": detection["created_at"], + "detection_ids": [detection["id"]], + } - # Find ongoing alerts for the events started within 30 minutes; - # after that, any new alert is part of a new event - local_alerts["created_at"] = pd.to_datetime(local_alerts["created_at"]) - - # Define the end_event timestamp as timezone-naive - end_event = pd.Timestamp.utcnow().replace(tzinfo=None) - pd.Timedelta(minutes=30) - - # Get ongoing alerts - ongoing_local_alerts = local_alerts[local_alerts["created_at"] > end_event].copy() - get_alerts = call_api(api_client.get_alerts_for_event, user_credentials) - ongoing_alerts = pd.DataFrame() - - # Iterate over each unique event_id - for event_id in ongoing_local_alerts["event_id"].drop_duplicates(): - # Get the alerts for the current event_id and convert to DataFrame - alerts_df = pd.DataFrame(get_alerts(event_id)) - - # Concatenate the current alerts DataFrame to the ongoing_alerts DataFrame - ongoing_alerts = pd.concat([ongoing_alerts, alerts_df], ignore_index=True) - - if not ongoing_alerts.empty: - ongoing_alerts = ( - ongoing_alerts.groupby(["event_id"]).head(cfg.MAX_ALERTS_PER_EVENT).reset_index(drop=True) - ) - ongoing_alerts = ongoing_alerts[~ongoing_alerts["id"].isin(local_alerts["id"])].copy() - ongoing_alerts["processed_loc"] = ongoing_alerts["localization"].apply(process_bbox) + # Ajouter le dernier wildfire + wildfires.append(current_wildfire) - # Get new alerts - new_alerts = pd.DataFrame() + # Convertir la liste des wildfires en DataFrame + wildfires_df = pd.DataFrame(wildfires) - # Iterate over each unique event_id - for event_id in new_api_events["id"].drop_duplicates(): - # Get the alerts for the current event_id and convert to DataFrame - alerts_df = pd.DataFrame(get_alerts(event_id)) - - # Concatenate the current alerts DataFrame to the new_alerts DataFrame - new_alerts = pd.concat([new_alerts, alerts_df], ignore_index=True) - - if not new_alerts.empty: - new_alerts = new_alerts.groupby(["event_id"]).head(cfg.MAX_ALERTS_PER_EVENT).reset_index(drop=True) - new_alerts["processed_loc"] = new_alerts["localization"].apply(process_bbox) - new_alerts = pd.concat([new_alerts, ongoing_alerts], join="outer") - local_alerts = pd.concat([local_alerts, new_alerts], join="outer") - local_alerts = local_alerts.drop_duplicates(subset=["id"]) - - else: - get_alerts = call_api(api_client.get_alerts_for_event, user_credentials) - _ = api_events["id"].apply(lambda x: pd.DataFrame(get_alerts(x))) # type: ignore[arg-type, return-value] - local_alerts = ( - pd.concat(_.values).groupby(["event_id"]).head(cfg.MAX_ALERTS_PER_EVENT).reset_index(drop=True) - ) - local_alerts["created_at"] = pd.to_datetime(local_alerts["created_at"]) - local_alerts["processed_loc"] = local_alerts["localization"].apply(process_bbox) - - if len(new_api_events): - alerts_data = new_api_events.merge(local_alerts, left_on="id", right_on="event_id").drop_duplicates( - subset=["id_x"] - )[["azimuth", "device_id"]] - - new_api_events["device_name"] = [ - f"{retrieve_site_from_device_id(api_client, user_credentials, device_id)} - {int(azimuth)}°".title() - for _, (azimuth, device_id) in alerts_data.iterrows() - ] - - if event_data_loaded: - local_events = pd.concat([local_events, new_api_events], join="outer") - local_events = local_events.drop_duplicates() - - else: - local_events = new_api_events - - return [ - json.dumps({"data": local_events.to_json(orient="split"), "data_loaded": True}), - json.dumps({"data": local_alerts.to_json(orient="split"), "data_loaded": True}), - dash.no_update, - ] + return [ + json.dumps({"data": wildfires_df.to_json(orient="split"), "data_loaded": True}), + json.dumps({"data": api_detections.to_json(orient="split"), "data_loaded": True}), + dash.no_update, + ] @app.callback( Output("media_url", "data"), - Input("store_api_alerts_data", "data"), + Input("store_detections_data", "data"), [ State("media_url", "data"), - State("user_headers", "data"), - State("user_credentials", "data"), + State("client_token", "data"), ], prevent_initial_call=True, ) def get_media_url( - local_alerts, + local_detections, media_url, - user_headers, - user_credentials, + client_token, ): """ - Retrieves media URLs for alerts and manages the fetching process from the API. + Retrieves media URLs for detections and manages the fetching process from the API. This callback is designed to efficiently load media URLs during app initialization - and subsequently update them. Initially, it focuses on loading URLs event by event - to quickly provide data for visualization. Once URLs for all events are loaded, the + and subsequently update them. Initially, it focuses on loading URLs wildfire by wildfire + to quickly provide data for visualization. Once URLs for all wildfires are loaded, the callback then methodically checks for and retrieves any missing URLs. The callback is triggered by two inputs: a change in the data to load and a regular - interval check. It includes a cleanup step to remove event IDs no longer present in - local alerts. + interval check. It includes a cleanup step to remove wildfire IDs no longer present in + local detections. Parameters: - interval (int): Current interval for fetching URLs. - - local_alerts (json): Currently stored alerts data in JSON format. - - media_url (dict): Dictionary holding media URLs for alerts. - - user_headers (dict): User authorization headers for API requests. - - user_credentials (tuple): User credentials (username, password). + - local_detections (json): Currently stored detections data in JSON format. + - media_url (dict): Dictionary holding media URLs for detections. + - client_token (str): Token used for API calls + Returns: - - dict: Updated dictionary with media URLs for each alert. + - dict: Updated dictionary with media URLs for each detection. """ - if user_headers is None: + if client_token is None: raise PreventUpdate - user_token = user_headers["Authorization"].split(" ")[1] - api_client.token = user_token - local_alerts, alerts_data_loaded = read_stored_DataFrame(local_alerts) + local_detections, detections_data_loaded = read_stored_DataFrame(local_detections) - if not alerts_data_loaded: + if not detections_data_loaded: raise PreventUpdate - if local_alerts.empty: + if local_detections.empty: return {} - current_event_ids = set(local_alerts["event_id"].astype(str)) + # Loop through each row in local_detections + for _, row in local_detections.iterrows(): + detection_id = str(row["id"]) - # Cleanup: Remove any event_ids from media_url not present in local_alerts - media_url_keys = set(media_url.keys()) - for event_id in media_url_keys - current_event_ids: - del media_url[event_id] - - # Loop through each row in local_alerts - for _, row in local_alerts.iterrows(): - event_id = str(row["event_id"]) - media_id = str(row["media_id"]) - if event_id not in media_url: - media_url[event_id] = {} - - # Check if the URL for this event_id and media_id already exists - if media_id not in media_url[event_id]: - # Fetch the URL for this media_id - try: - media_url[event_id][media_id] = call_api(api_client.get_media_url, user_credentials)(media_id)["url"] - except Exception: # General catch-all for other exceptions - media_url[event_id][media_id] = "" # Handle potential exceptions + # Fetch the URL for this media_id + try: + media_url[detection_id] = Client(client_token, cfg.API_URL).get_detection_url(detection_id)["url"] + # TODO REFACTOR : should be removed since we already loaded all the Detections in an other callback !!! + except Exception: # General catch-all for other exceptions + media_url[detection_id] = "" # Handle potential exceptions return media_url diff --git a/app/callbacks/display_callbacks.py b/app/callbacks/display_callbacks.py index d0c2323..952159e 100644 --- a/app/callbacks/display_callbacks.py +++ b/app/callbacks/display_callbacks.py @@ -14,130 +14,134 @@ from dash.dependencies import ALL, Input, Output, State from dash.exceptions import PreventUpdate from main import app +from pyroclient import Client import config as cfg -from services import api_client, call_api from utils.data import read_stored_DataFrame -from utils.display import build_vision_polygon, create_event_list_from_df +from utils.display import build_vision_polygon, create_wildfire_list_from_df logger = logging_config.configure_logging(cfg.DEBUG, cfg.SENTRY_DSN) @app.callback( Output("modal-loading", "is_open"), - [Input("media_url", "data"), Input("user_headers", "data"), Input("trigger_no_events", "data")], - State("store_api_alerts_data", "data"), + [Input("media_url", "data"), Input("client_token", "data"), Input("trigger_no_wildfires", "data")], + State("store_detections_data", "data"), prevent_initial_call=True, ) -def toggle_modal(media_url, user_headers, trigger_no_events, local_alerts): +def toggle_modal(media_url, client_token, trigger_no_wildfires, local_detections): """ - Toggles the visibility of the loading modal based on the presence of media URLs and the state of alerts data. + Toggles the visibility of the loading modal based on the presence of media URLs and the state of detections data. - This function is triggered by changes in the media URLs, user headers, or a trigger indicating no events. - It checks the current state of alerts data and decides whether to display the loading modal. + This function is triggered by changes in the media URLs, user headers, or a trigger indicating no wildfires. + It checks the current state of detections data and decides whether to display the loading modal. Parameters: - - media_url (dict): Dictionary containing media URLs for alerts. - - user_headers (dict): Dictionary containing user header information. - - trigger_no_events (bool): Trigger indicating whether there are no events to process. - - local_alerts (json): JSON formatted data containing current alerts information. + - media_url (dict): Dictionary containing media URLs for detections. + - client_token (str): Token used for API requests + - trigger_no_wildfires (bool): Trigger indicating whether there are no wildfires to process. + - local_detections (json): JSON formatted data containing current detections information. Returns: - - bool: True to show the modal, False to hide it. The modal is shown if alerts data is not loaded and there are no media URLs; hidden otherwise. + - bool: True to show the modal, False to hide it. The modal is shown if detections data is not loaded and there are no media URLs; hidden otherwise. """ - if trigger_no_events: + if trigger_no_wildfires: return False - if user_headers is None: + if client_token is None: raise PreventUpdate - local_alerts, alerts_data_loaded = read_stored_DataFrame(local_alerts) - return True if not alerts_data_loaded and len(media_url.keys()) == 0 else False + local_detections, detections_data_loaded = read_stored_DataFrame(local_detections) + return True if not detections_data_loaded and len(media_url.keys()) == 0 else False -# Create event list +# Createwildfire list @app.callback( - Output("alert-list-container", "children"), + Output("detection-list-container", "children"), [ - Input("store_api_events_data", "data"), + Input("store_wildfires_data", "data"), Input("to_acknowledge", "data"), ], State("media_url", "data"), prevent_initial_call=True, ) -def update_event_list(local_events, to_acknowledge, media_url): +def update_wildfire_list(local_wildfires, to_acknowledge, media_url): """ - Updates the event list based on changes in the events data or acknowledgement actions. + Updates the wildfire list based on changes in the wildfires data or acknowledgement actions. Parameters: - - local_events (json): JSON formatted data containing current event information. - - to_acknowledge (int): Event ID that is being acknowledged. - - media_url (dict): Dictionary containing media URLs for alerts. + - local_wildfires (json): JSON formatted data containing current wildfire information. + - to_acknowledge (int): wildfire ID that is being acknowledged. + - media_url (dict): Dictionary containing media URLs for detections. Returns: - - html.Div: A Div containing the updated list of alerts. + - html.Div: A Div containing the updated list of detections. """ trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] if trigger_id == "to_acknowledge" and str(to_acknowledge) not in media_url.keys(): raise PreventUpdate - local_events, event_data_loaded = read_stored_DataFrame(local_events) - if not event_data_loaded: + local_wildfires, wildfire_data_loaded = read_stored_DataFrame(local_wildfires) + if not wildfire_data_loaded: raise PreventUpdate - if len(local_events): - local_events = local_events[~local_events["id"].isin([to_acknowledge])] + if len(local_wildfires): + local_wildfires = local_wildfires[~local_wildfires["id"].isin([to_acknowledge])] + return create_wildfire_list_from_df(local_wildfires) - return create_event_list_from_df(local_events) - -# Select the event id +# Select the wildfire id @app.callback( [ - Output({"type": "event-button", "index": ALL}, "style"), - Output("event_id_on_display", "data"), + Output({"type": "wildfire-button", "index": ALL}, "style"), + Output("wildfire_id_on_display", "data"), Output("auto-move-button", "n_clicks"), ], [ - Input({"type": "event-button", "index": ALL}, "n_clicks"), + Input({"type": "wildfire-button", "index": ALL}, "n_clicks"), Input("to_acknowledge", "data"), ], [ State("media_url", "data"), - State({"type": "event-button", "index": ALL}, "id"), - State("store_api_alerts_data", "data"), - State("event_id_on_display", "data"), + State({"type": "wildfire-button", "index": ALL}, "id"), + State("store_detections_data", "data"), + State("wildfire_id_on_display", "data"), ], prevent_initial_call=True, ) -def select_event_with_button(n_clicks, to_acknowledge, media_url, button_ids, local_alerts, event_id_on_display): +def select_wildfire_with_button( + n_clicks, to_acknowledge, media_url, button_ids, local_detections, wildfire_id_on_display +): """ - Handles event selection through button clicks. + Handles wildfire selection through button clicks. Parameters: - - n_clicks (list): List of click counts for each event button. - - to_acknowledge (int): Event ID that is being acknowledged. - - media_url (dict): Dictionary containing media URLs for alerts. - - button_ids (list): List of button IDs corresponding to events. - - local_alerts (json): JSON formatted data containing current alert information. - - event_id_on_display (int): Currently displayed event ID. + - n_clicks (list): List of click counts for eachwildfire button. + - to_acknowledge (int): Wildfire ID that is being acknowledged. + - media_url (dict): Dictionary containing media URLs for detections. + - button_ids (list): List of button IDs corresponding to wildfires. + - local_detections (json): JSON formatted data containing current detection information. + - wildfire_id_on_display (int): Currently displayed wildfire ID. Returns: - - list: List of styles for event buttons. - - int: ID of the event to display. + - list: List of styles for wildfire buttons. + - int: ID of the wildfire to display. """ ctx = dash.callback_context - local_alerts, alerts_data_loaded = read_stored_DataFrame(local_alerts) - if len(local_alerts) == 0: + local_detections, detections_data_loaded = read_stored_DataFrame(local_detections) + if len(local_detections) == 0: return [[], 0, 1] - if not alerts_data_loaded: + if not detections_data_loaded: raise PreventUpdate trigger_id = ctx.triggered[0]["prop_id"].split(".")[0] + print("on rentre ACKNOWLEDGE") + print(str(trigger_id)) + if trigger_id == "to_acknowledge": - idx = local_alerts[~local_alerts["event_id"].isin([to_acknowledge])]["event_id"].values + idx = local_detections[~local_detections["wildfire_id"].isin([to_acknowledge])]["wildfire_id"].values if len(idx) == 0: button_index = 0 # No more images available else: @@ -153,8 +157,12 @@ def select_event_with_button(n_clicks, to_acknowledge, media_url, button_ids, lo nb_clicks = ctx.triggered[0]["value"] # check if the button was clicked or just initialized (=0) - if nb_clicks == 0 and event_id_on_display > 0 and event_id_on_display in local_alerts["event_id"].values: - button_index = event_id_on_display + if ( + nb_clicks == 0 + and wildfire_id_on_display > 0 + and wildfire_id_on_display in local_detections["wildfire_id"].values + ): + button_index = wildfire_id_on_display # Highlight the button styles = [] @@ -184,30 +192,33 @@ def select_event_with_button(n_clicks, to_acknowledge, media_url, button_ids, lo return [styles, button_index, 1] -# Get event_id data +# Getwildfire_id data @app.callback( - Output("alert_on_display", "data"), - Input("event_id_on_display", "data"), - State("store_api_alerts_data", "data"), + Output("detection_on_display", "data"), + Input("wildfire_id_on_display", "data"), + State("store_detections_data", "data"), prevent_initial_call=True, ) -def update_display_data(event_id_on_display, local_alerts): +def update_display_data(wildfire_id_on_display, local_detections): """ - Updates the display data based on the currently selected event ID. + Updates the display data based on the currently selected wildfire ID. Parameters: - - event_id_on_display (int): Currently displayed event ID. - - local_alerts (json): JSON formatted data containing current alert information. + - wildfire_id_on_display (int): Currently displayed wildfire ID. + - local_detections (json): JSON formatted data containing current detection information. Returns: - - json: JSON formatted data for the selected event. + - json: JSON formatted data for the selected wildfire. """ - local_alerts, data_loaded = read_stored_DataFrame(local_alerts) + local_detections, data_loaded = read_stored_DataFrame(local_detections) if not data_loaded: raise PreventUpdate - if event_id_on_display == 0: + print("wildfire_id_on_display") + print(wildfire_id_on_display) + + if wildfire_id_on_display == 0: return json.dumps( { "data": pd.DataFrame().to_json(orient="split"), @@ -215,12 +226,12 @@ def update_display_data(event_id_on_display, local_alerts): } ) else: - if event_id_on_display == 0: - event_id_on_display = local_alerts["event_id"].values[0] + if wildfire_id_on_display == 0: + wildfire_id_on_display = local_detections["detection_id"].values[0] - alert_on_display = local_alerts[local_alerts["event_id"] == event_id_on_display] + detection_on_display = local_detections[local_detections["detection_id"] == wildfire_id_on_display] - return json.dumps({"data": alert_on_display.to_json(orient="split"), "data_loaded": True}) + return json.dumps({"data": detection_on_display.to_json(orient="split"), "data_loaded": True}) @app.callback( @@ -229,34 +240,39 @@ def update_display_data(event_id_on_display, local_alerts): Output("bbox-container", "children"), # Output for the bounding box Output("image-slider", "max"), ], - [Input("image-slider", "value"), Input("alert_on_display", "data")], + [Input("image-slider", "value"), Input("detection_on_display", "data")], [ State("media_url", "data"), - State("alert-list-container", "children"), + State("detection-list-container", "children"), ], prevent_initial_call=True, ) -def update_image_and_bbox(slider_value, alert_data, media_url, alert_list): +def update_image_and_bbox(slider_value, detection_data, media_url, detection_list): """ Updates the image and bounding box display based on the slider value. Parameters: - slider_value (int): Current value of the image slider. - - alert_data (json): JSON formatted data for the selected event. - - media_url (dict): Dictionary containing media URLs for alerts. + - detection_data (json): JSON formatted data for the selectedwildfire. + - media_url (dict): Dictionary containing media URLs for detections. Returns: - - html.Img: An image element displaying the selected alert image. + - html.Img: An image element displaying the selected detection image. - list: A list of html.Div elements representing bounding boxes. """ img_src = "" bbox_style = {} bbox_divs: List[html.Div] = [] # This will contain the bounding box as an html.Div - alert_data, data_loaded = read_stored_DataFrame(alert_data) + detection_data, data_loaded = read_stored_DataFrame(detection_data) if not data_loaded: raise PreventUpdate - if len(alert_list) == 0: + print("Detection list :") + print(detection_list) + print("Detection data :") # TODO : why do we need both ? + print(detection_data) + + if len(detection_list) == 0: img_html = html.Img( src="./assets/images/no-alert-default.png", className="common-style", @@ -266,44 +282,32 @@ def update_image_and_bbox(slider_value, alert_data, media_url, alert_list): # Filter images with non-empty URLs images = [] - boxes = [] - for _, alert in alert_data.iterrows(): - event_id = str(alert["event_id"]) - media_id = str(alert["media_id"]) - if event_id in media_url and media_url[event_id].get(media_id, "").strip(): - images.append(media_url[event_id][media_id]) - boxes.append(alert["processed_loc"]) - - if not images: - img_html = html.Img( - src="./assets/images/no-alert-default.png", - className="common-style", - style={"width": "100%", "height": "auto"}, - ) - return img_html, bbox_divs, 0 - # Ensure slider_value is within the range of available images - slider_value = slider_value % len(images) - img_src = images[slider_value] - images_bbox_list = boxes[slider_value] - - img_src = images[slider_value] - images_bbox_list = boxes[slider_value] - - if len(images_bbox_list): - # Calculate the position and size of the bounding box - x0, y0, width, height = images_bbox_list[0] # first box for now - - # Create the bounding box style - bbox_style = { - "position": "absolute", - "left": f"{x0}%", # Left position based on image width - "top": f"{y0}%", # Top position based on image height - "width": f"{width}%", # Width based on image width - "height": f"{height}%", # Height based on image height - "border": "2px solid red", - "zIndex": "10", - } + if str(detection_data["wildfire_id"].values[0]) not in media_url.keys(): + raise PreventUpdate + + for _, detection in detection_data.iterrows(): + images.append(media_url[str(detection["wildfire_id"])][str(detection["media_id"])]) + boxes = detection_data["processed_loc"].tolist() + + if slider_value < len(images): + img_src = images[slider_value] + images_bbox_list = boxes[slider_value] + + if len(images_bbox_list): + # Calculate the position and size of the bounding box + x0, y0, width, height = images_bbox_list[0] # first box for now + + # Create the bounding box style + bbox_style = { + "position": "absolute", + "left": f"{x0}%", # Left position based on image width + "top": f"{y0}%", # Top position based on image height + "width": f"{width}%", # Width based on image width + "height": f"{height}%", # Height based on image height + "border": "2px solid red", + "zIndex": "10", + } # Create a div that represents the bounding box bbox_div = html.Div(style=bbox_style) @@ -375,11 +379,11 @@ def toggle_auto_move(n_clicks, data): State("image-slider", "value"), State("image-slider", "max"), State("auto-move-button", "n_clicks"), - State("alert-list-container", "children"), + State("detection-list-container", "children"), ], prevent_initial_call=True, ) -def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, alert_list): +def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, detection_list): """ Automatically moves the image slider based on a regular interval and the current auto-move state. @@ -388,12 +392,16 @@ def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, al - current_value (int): Current value of the image slider. - max_value (int): Maximum value of the image slider. - auto_move_clicks (int): Number of clicks on the auto-move button. - - alert_list(list) : Ongoing alert list + - detection_list(list) : Ongoing detection list Returns: - int: Updated value for the image slider. """ - if auto_move_clicks % 2 != 0 and len(alert_list): # Auto-move is active and there is ongoing alerts + print("ON EST ICI") + print(current_value) + print(max_value) + + if auto_move_clicks % 2 != 0 and len(detection_list): # Auto-move is active and there is ongoing detections return (current_value + 1) % (max_value + 1) else: raise PreventUpdate @@ -402,31 +410,31 @@ def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, al @app.callback( Output("download-link", "href"), [Input("image-slider", "value")], - [State("alert_on_display", "data"), State("media_url", "data")], + [State("detection_on_display", "data"), State("media_url", "data")], prevent_initial_call=True, ) -def update_download_link(slider_value, alert_data, media_url): +def update_download_link(slider_value, detection_data, media_url): """ Updates the download link for the currently displayed image. Parameters: - slider_value (int): Current value of the image slider. - - alert_data (json): JSON formatted data for the selected event. - - media_url (dict): Dictionary containing media URLs for alerts. + - detection_data (json): JSON formatted data for the selectedwildfire. + - media_url (dict): Dictionary containing media URLs for detections. Returns: - str: URL for downloading the current image. """ - alert_data, data_loaded = read_stored_DataFrame(alert_data) - if data_loaded and len(alert_data): + print("ON RENTRE DANS update_download_link") + detection_data, data_loaded = read_stored_DataFrame(detection_data) + if data_loaded and len(detection_data): try: - event_id, media_id = alert_data.iloc[slider_value][["event_id", "media_id"]] - if str(event_id) in media_url.keys(): - return media_url[str(event_id)][str(media_id)] + wildfire_id, media_id = detection_data.iloc[slider_value][["wildfire_id", "media_id"]] + if str(wildfire_id) in media_url.keys(): + return media_url[str(wildfire_id)][str(media_id)] except Exception as e: logger.info(e) - logger.info(f"Size of the alert_data dataframe: {alert_data.size}") - + logger.info(f"Size of the detection_data dataframe: {detection_data.size}") return "" # Return empty string if no image URL is available @@ -444,48 +452,48 @@ def update_download_link(slider_value, alert_data, media_url): Output("alert-information", "style"), Output("slider-container", "style"), ], - Input("alert_on_display", "data"), - [State("store_api_events_data", "data"), State("event_id_on_display", "data")], + Input("detection_on_display", "data"), + [State("store_wildfires_data", "data"), State("wildfire_id_on_display", "data")], prevent_initial_call=True, ) -def update_map_and_alert_info(alert_data, local_events, event_id_on_display): +def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_display): """ - Updates the map's vision polygons, center, and alert information based on the current alert data. - + Updates the map's vision polygons, center, and alert information based on the current alert data. + ) Parameters: - - alert_data (json): JSON formatted data for the selected event. + - detection_data (json): JSON formatted data for the selecte dwildfire. Returns: - - list: List of vision polygon elements to be displayed on the map. - - list: New center coordinates for the map. - - list: List of vision polygon elements to be displayed on the modal map. - - list: New center coordinates for the modal map. - - str: Camera information for the alert. - - str: Location information for the alert. - - str: Detection angle for the alert. - - str: Date of the alert. + - list: List of vision polygon elements to be displayed on the map. + - list: New center coordinates for the map. + - list: List of vision polygon elements to be displayed on the modal map. + - list: New center coordinates for the modal map. + - str: Camera information for the alert. + - str: Location information for the alert. + - str: Detection angle for the alert. + - str: Date of the alert. """ - alert_data, data_loaded = read_stored_DataFrame(alert_data) + detection_data, data_loaded = read_stored_DataFrame(detection_data) if not data_loaded: raise PreventUpdate - if not alert_data.empty: - local_events, event_data_loaded = read_stored_DataFrame(local_events) - if not event_data_loaded: + if not detection_data.empty: + local_wildfires, wildfire_data_loaded = read_stored_DataFrame(local_wildfires) + if not wildfire_data_loaded: raise PreventUpdate # Convert the 'localization' column to a list (empty lists if the original value was '[]'). - alert_data["localization"] = alert_data["localization"].apply( + detection_data["localization"] = detection_data["localization"].apply( lambda x: ast.literal_eval(x) if isinstance(x, str) and x.strip() != "[]" else [] ) # Filter out rows where 'localization' is not empty and get the last one. # If all are empty, then simply get the last row of the DataFrame. row_with_localization = ( - alert_data[alert_data["localization"].astype(bool)].iloc[-1] - if not alert_data[alert_data["localization"].astype(bool)].empty - else alert_data.iloc[-1] + detection_data[detection_data["localization"].astype(bool)].iloc[-1] + if not detection_data[detection_data["localization"].astype(bool)].empty + else detection_data.iloc[-1] ) polygon, detection_azimuth = build_vision_polygon( @@ -497,7 +505,7 @@ def update_map_and_alert_info(alert_data, local_events, event_id_on_display): localization=row_with_localization["processed_loc"], ) - date_val, cam_name = local_events[local_events["id"] == event_id_on_display][ + date_val, cam_name = local_wildfires[local_wildfires["id"] == wildfire_id_on_display][ ["created_at", "device_name"] ].values[0] @@ -537,33 +545,30 @@ def update_map_and_alert_info(alert_data, local_events, event_id_on_display): Output("to_acknowledge", "data"), [Input("acknowledge-button", "n_clicks")], [ - State("event_id_on_display", "data"), - State("user_headers", "data"), - State("user_credentials", "data"), + State("wildfire_id_on_display", "data"), + State("client_token", "data"), ], prevent_initial_call=True, ) -def acknowledge_event(n_clicks, event_id_on_display, user_headers, user_credentials): +def acknowledge_event(n_clicks, wildfire_id_on_display, client_token): """ - Acknowledges the selected event and updates the state to reflect this. + Acknowledges the selected wildfire and updates the state to reflect this. Parameters: - n_clicks (int): Number of clicks on the acknowledge button. - - event_id_on_display (int): Currently displayed event ID. - - user_headers (dict): User authorization headers for API requests. - - user_credentials (tuple): User credentials (username, password). + -wildfire_id_on_display (int): Currently displayedwildfire ID. + - client_token (str): Token used for API requests. Returns: - - int: The ID of the event that has been acknowledged. + - int: The ID of thewildfire that has been acknowledged. """ - if event_id_on_display == 0 or n_clicks == 0: + print("ON RENTRE DANS acknowledge-button") + if wildfire_id_on_display == 0 or n_clicks == 0: raise PreventUpdate - user_token = user_headers["Authorization"].split(" ")[1] - api_client.token = user_token - call_api(api_client.acknowledge_event, user_credentials)(event_id=int(event_id_on_display)) + Client(client_token, cfg.API_URL).acknowledge_event(wildfire_id=int(wildfire_id_on_display)) - return event_id_on_display + return wildfire_id_on_display # Modal issue let's add this later @@ -583,7 +588,7 @@ def toggle_fullscreen_map(n_clicks_open, is_open): @app.callback( Output("map", "zoom"), [ - Input({"type": "event-button", "index": ALL}, "n_clicks"), + Input({"type": "wildfire-button", "index": ALL}, "n_clicks"), ], ) def reset_zoom(n_clicks): diff --git a/app/components/alerts.py b/app/components/detections.py similarity index 52% rename from app/components/alerts.py rename to app/components/detections.py index 8b56a9a..c7f1861 100644 --- a/app/components/alerts.py +++ b/app/components/detections.py @@ -6,20 +6,20 @@ from dash import html -def create_event_list(): +def create_wildfire_list(): """ - Creates a container for the alert list with a fixed height and scrollable content. + Creates a container for the detection list with a fixed height and scrollable content. This function generates a Dash HTML Div element containing a header and an empty container. - The empty container ('alert-list-container') is meant to be populated with alert buttons + The empty container ('detection-list-container') is meant to be populated with detection buttons dynamically via a callback. The container has a fixed height and is scrollable, allowing - users to browse through a potentially long list of alerts. + users to browse through a potentially long list of detections. Returns: - - dash.html.Div: A Div element containing the header and the empty container for alert buttons. + - dash.html.Div: A Div element containing the header and the empty container for detection buttons. """ - # Set a fixed height for the alert list container and enable scrolling - event_list_style = { + # Set a fixed height for the detection list container and enable scrolling + wildfire_list_style = { "height": "calc(100vh - 120px)", # Adjust the height as required "overflowY": "scroll", # Enable vertical scrolling "padding": "10px", @@ -27,6 +27,7 @@ def create_event_list(): return html.Div( [ - html.Div(id="alert-list-container", style=event_list_style, children=[]), # Empty container + html.H1("Detections en cours", style={"textAlign": "center", "fontSize": "30px"}), + html.Div(id="detection-list-container", style=wildfire_list_style, children=[]), # Empty container ] ) diff --git a/app/index.py b/app/index.py index eb0039d..ee373df 100644 --- a/app/index.py +++ b/app/index.py @@ -8,7 +8,7 @@ import callbacks.display_callbacks # noqa: F401 import logging_config from dash import html -from dash.dependencies import Input, Output, State +from dash.dependencies import Input, Output from layouts.main_layout import get_main_layout from main import app @@ -26,22 +26,20 @@ # Manage Pages @app.callback( Output("page-content", "children"), - [Input("url", "pathname"), Input("user_headers", "data")], - State("user_credentials", "data"), + [Input("url", "pathname"), Input("client_token", "data")], ) -def display_page(pathname, user_headers, user_credentials): +def display_page(pathname, client_token): logger.debug( - "display_page called with pathname: %s, user_headers: %s, user_credentials: %s", + "display_page called with pathname: %s, user_credentials: %s", pathname, - user_headers, - user_credentials, + {cfg.API_LOGIN, cfg.API_PWD}, ) - if user_headers is None: - logger.info("No user headers found, showing login layout.") + if client_token is None: + logger.info("No token found, showing login layout.") return login_layout() if pathname == "/" or pathname is None: logger.info("Showing homepage layout.") - return homepage_layout(user_headers, user_credentials) + return homepage_layout(client_token) else: logger.warning("Unable to find page for pathname: %s", pathname) return html.Div([html.P("Unable to find this page.", className="alert alert-warning")]) diff --git a/app/layouts/main_layout.py b/app/layouts/main_layout.py index fdeac17..b036ee2 100644 --- a/app/layouts/main_layout.py +++ b/app/layouts/main_layout.py @@ -8,22 +8,11 @@ import dash_bootstrap_components as dbc import pandas as pd from dash import dcc, html -from pyroclient import Client -import config as cfg from components.navbar import Navbar -from services import api_client +from services import instantiate_token -if not cfg.LOGIN: - client = Client(cfg.API_URL, cfg.API_LOGIN, cfg.API_PWD) - user_headers = client.headers - user_token = user_headers["Authorization"].split(" ")[1] - api_client.token = user_token - user_credentials = {"username": cfg.API_LOGIN, "password": cfg.API_PWD} - -else: - user_credentials = {} - user_headers = None +api_client = instantiate_token() def get_main_layout(): @@ -38,7 +27,7 @@ def get_main_layout(): ), dcc.Interval(id="main_api_fetch_interval", interval=30 * 1000), dcc.Store( - id="store_api_events_data", + id="store_wildfires_data", storage_type="session", data=json.dumps( { @@ -48,7 +37,7 @@ def get_main_layout(): ), ), dcc.Store( - id="store_api_alerts_data", + id="store_detections_data", storage_type="session", data=json.dumps( { @@ -57,9 +46,9 @@ def get_main_layout(): } ), ), - dcc.Store(id="last_displayed_event_id", storage_type="session"), + dcc.Store(id="last_displayed_wildfire_id", storage_type="session"), dcc.Store( - id="alert_on_display", + id="detection_on_display", storage_type="session", data=json.dumps( { @@ -73,7 +62,7 @@ def get_main_layout(): storage_type="session", data={}, ), - dcc.Store(id="event_id_on_display", data=0), + dcc.Store(id="wildfire_id_on_display", data=0), dcc.Store(id="auto-move-state", data={"active": True}), # Add this to your app.layout dcc.Store(id="bbox_visibility", data={"visible": True}), @@ -103,10 +92,9 @@ def get_main_layout(): is_open=False, ), # Storage components saving the user's headers and credentials - dcc.Store(id="user_headers", storage_type="session", data=user_headers), + dcc.Store(id="client_token", storage_type="session", data=api_client.token), # [TEMPORARY FIX] Storing the user's credentials to refresh the token when needed - dcc.Store(id="user_credentials", storage_type="session", data=user_credentials), dcc.Store(id="to_acknowledge", data=0), - dcc.Store(id="trigger_no_events", data=False), + dcc.Store(id="trigger_no_wildfires", data=False), ] ) diff --git a/app/pages/homepage.py b/app/pages/homepage.py index 6a767bb..a607da2 100644 --- a/app/pages/homepage.py +++ b/app/pages/homepage.py @@ -7,19 +7,21 @@ import dash_bootstrap_components as dbc from dash import Dash, dcc, html -from components.alerts import create_event_list -from utils.display import build_alerts_map +from components.detections import create_wildfire_list +from utils.display import build_detections_map app = Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) app.css.append_css({"external_url": "/assets/style.css"}) -def homepage_layout(user_headers, user_credentials): +def homepage_layout(client_token): return dbc.Container( [ dbc.Row( [ - dbc.Col([create_event_list()], width=2, className="mb-4"), + # Column for the alert list + dbc.Col(create_wildfire_list(), width=2, className="mb-4"), + # Column for the image dbc.Col( [ html.Div( @@ -113,7 +115,7 @@ def homepage_layout(user_headers, user_credentials): ), dbc.Row( dbc.Col( - build_alerts_map(user_headers, user_credentials), + build_detections_map(client_token), className="common-style", style={ "position": "relative", @@ -149,7 +151,7 @@ def homepage_layout(user_headers, user_credentials): [ dbc.ModalHeader("Carte"), dbc.ModalBody( - build_alerts_map(user_headers, user_credentials, id_suffix="-md"), + build_detections_map(client_token, id_suffix="-md"), ), ], id="map-modal", diff --git a/app/services/__init__.py b/app/services/__init__.py index 061b12e..7815e66 100644 --- a/app/services/__init__.py +++ b/app/services/__init__.py @@ -1,3 +1,3 @@ -from .api import api_client, call_api +from .api import instantiate_token -__all__ = ["api_client", "call_api"] +__all__ = ["instantiate_token"] diff --git a/app/services/api.py b/app/services/api.py index 204fc50..a15686b 100644 --- a/app/services/api.py +++ b/app/services/api.py @@ -3,49 +3,37 @@ # This program is licensed under the Apache License 2.0. # See LICENSE or go to for full license details. -from functools import wraps -from typing import Callable, Dict +from typing import Optional +from urllib.parse import urljoin +import requests from pyroclient import Client import config as cfg -__all__ = ["api_client", "call_api"] +__all__ = ["instantiate_token"] -if any(not isinstance(val, str) for val in [cfg.API_URL, cfg.API_LOGIN, cfg.API_PWD]): - raise ValueError("The following environment variables need to be set: 'API_URL', 'API_LOGIN', 'API_PWD'") +def instantiate_token(login: Optional[str] = None, passwrd: Optional[str] = None): -api_client = Client(cfg.API_URL, cfg.API_LOGIN, cfg.API_PWD) + if not cfg.LOGIN: + if any(not isinstance(val, str) for val in [cfg.API_URL, cfg.API_LOGIN, cfg.API_PWD]): + raise ValueError("The following environment variables need to be set: 'API_URL', 'API_LOGIN', 'API_PWD'") + else: + access_token = requests.post( + urljoin(cfg.API_URL, "/api/v1/login/creds"), + data={"username": cfg.API_LOGIN, "password": cfg.API_PWD}, + timeout=5, + ).json()["access_token"] + api_client = Client(access_token, cfg.API_URL) + return api_client -def call_api(func: Callable, user_credentials: Dict[str, str]) -> Callable: - """Decorator to call API method and renew the token if needed. Usage: + access_token = requests.post( + urljoin(cfg.API_URL, "/api/v1/login/creds"), + data={"username": login, "password": passwrd}, + timeout=5, + ).json()["access_token"] - result = call_api(my_func, user_credentials)(1, 2, verify=False) - - Instead of: - - response = my_func(1, verify=False) - if response.status_code == 401: - api_client.refresh_token(user_credentials["username"], user_credentials["password"]) - response = my_func(1, verify=False) - result = response.json() - - Args: - func: function that calls API method - user_credentials: a dictionary with two keys, the username and password for authentication - - Returns: decorated function, to be called with positional and keyword arguments - """ - - @wraps(func) - def wrapper(*args, **kwargs): - response = func(*args, **kwargs) - if response.status_code == 401: - api_client.refresh_token(user_credentials["username"], user_credentials["password"]) - response = func(*args, **kwargs) - assert response.status_code // 100 == 2, response.text - return response.json() - - return wrapper + api_client = Client(access_token, cfg.API_URL) + return api_client diff --git a/app/utils/data.py b/app/utils/data.py index 26dbb3e..31144f3 100644 --- a/app/utils/data.py +++ b/app/utils/data.py @@ -7,15 +7,12 @@ import json from datetime import datetime from io import StringIO -from pathlib import Path from typing import List import pandas as pd import pytz from timezonefinder import TimezoneFinder -from utils.sites import get_sites - tf = TimezoneFinder() @@ -95,91 +92,3 @@ def process_bbox(input_str): new_boxes.append([x0 * 100, y0 * 100, width * 100, height * 100]) return new_boxes - - -def past_ndays_api_events(api_events, n_days=0): - """ - Filters the given live events to retain only those within the past n days. - - Args: - api_events (pd.Dataframe): DataFrame containing live events data. It must have a "created_at" column - indicating the datetime of the event. - n_days (int, optional): Specifies the number of days into the past to retain events. Defaults to 0. - - Returns: - pd.DataFrame: A filtered DataFrame containing only events from the past n_days. - """ - # Ensure the column is in datetime format - api_events["created_at"] = pd.to_datetime(api_events["created_at"]) - - # Define the end date (now) for the filter - end_date = pd.Timestamp.now() - - if n_days == 0: - # When n_days is 0, adjust start_date to the beginning of today to include today's events - start_date = end_date.normalize() - else: - # For n_days > 0 - start_date = end_date - pd.Timedelta(days=n_days) - - # Filter events from the past n days, including all events from today when n_days is 0 - api_events = api_events[(api_events["created_at"] > start_date) | (api_events["created_at"] == start_date)] - - return api_events - - -# Sites - - -def retrieve_site_from_device_id(api_client, user_credentials, device_id): - """ - Retrieves the site name associated with a given device ID by looking up in the site devices data. - - Args: - api_client: API client to interact with the remote server. - user_credentials (tuple): User credentials (username, password). - device_id: Device ID for which the site name is to be retrieved. - - Returns: - str: The name of the site associated with the given device ID. - """ - site_devices_data = load_site_data_file(api_client, user_credentials) - device_id = str(int(device_id)) - - if device_id in site_devices_data.keys(): - return site_devices_data[device_id] - else: - site_devices_data = load_site_data_file(api_client, user_credentials, force_dl=True) - - return site_devices_data[device_id] - - -def load_site_data_file(api_client, user_credentials, site_devices_file="site_devices.json", force_dl=False): - """ - Loads site device data from a file or fetches it from the API if the file does not exist or if forced to download. - - Args: - api_client: API client to interact with the remote server. - user_credentials (tuple): User credentials (username, password). - site_devices_file (str): Path to the file where site device data is stored. Defaults to 'site_devices.json'. - force_dl (bool): If True, forces downloading the data from the API. Defaults to False. - - Returns: - A dictionary mapping site names to device IDs. - """ - site_devices_path = Path(site_devices_file) - - if site_devices_path.is_file() and not force_dl: - with site_devices_path.open() as json_file: - return json.load(json_file) - - client_sites = get_sites(user_credentials) - site_devices_dict = {} - for _, site in client_sites.iterrows(): - site_ids = set(api_client.get_site_devices(site["id"]).json()) - for site_id in site_ids: - site_devices_dict[str(site_id)] = site["name"].replace("_", " ") - with site_devices_path.open("w") as fp: - json.dump(site_devices_dict, fp) - - return site_devices_dict diff --git a/app/utils/display.py b/app/utils/display.py index ed3ffe0..5460e26 100644 --- a/app/utils/display.py +++ b/app/utils/display.py @@ -5,14 +5,14 @@ import dash_leaflet as dl +import pandas as pd import requests from dash import html from geopy import Point from geopy.distance import geodesic +from pyroclient import Client import config as cfg -from services import api_client -from utils.sites import get_sites DEPARTMENTS = requests.get(cfg.GEOJSON_FILE, timeout=10).json() @@ -51,7 +51,7 @@ def calculate_new_polygon_parameters(azimuth, opening_angle, localization): return int(new_azimuth) % 360, int(new_opening_angle) -def build_sites_markers(user_headers, user_credentials): +def build_cameras_markers(token: str): """ This function reads the site markers by making the API, that contains all the information about the sites equipped with detection units. @@ -70,17 +70,15 @@ def build_sites_markers(user_headers, user_credentials): "popupAnchor": [0, -20], # Point from which the popup should open relative to the iconAnchor } - user_token = user_headers["Authorization"].split(" ")[1] - api_client.token = user_token + cameras = pd.DataFrame(Client(token, cfg.API_URL).fetch_cameras().json()) - client_sites = get_sites(user_credentials) markers = [] - for _, site in client_sites.iterrows(): - site_id = site["id"] - lat = round(site["lat"], 4) - lon = round(site["lon"], 4) - site_name = site["name"].replace("_", " ").title() + for _, camera in cameras.iterrows(): + site_id = camera["id"] + lat = round(camera["lat"], 4) + lon = round(camera["lon"], 4) + site_name = camera["name"].replace("_", " ").title() markers.append( dl.Marker( id=f"site_{site_id}", # Necessary to set an id for each marker to receive callbacks @@ -99,7 +97,7 @@ def build_sites_markers(user_headers, user_credentials): ) # We group all dl.Marker objects in a dl.MarkerClusterGroup object and return it - return markers, client_sites + return markers, cameras def build_vision_polygon(site_lat, site_lon, azimuth, opening_angle, dist_km, localization=None): @@ -137,7 +135,7 @@ def build_vision_polygon(site_lat, site_lon, azimuth, opening_angle, dist_km, lo return polygon, azimuth -def build_alerts_map(user_headers, user_credentials, id_suffix=""): +def build_detections_map(client_token, id_suffix=""): """ The following function mobilises functions defined hereabove or in the utils module to instantiate and return a dl.Map object, corresponding to the "Alerts and Infrastructure" view. @@ -150,12 +148,12 @@ def build_alerts_map(user_headers, user_credentials, id_suffix=""): "height": "100%", } - markers, client_sites = build_sites_markers(user_headers, user_credentials) + markers, cameras = build_cameras_markers(client_token) map_object = dl.Map( center=[ - client_sites["lat"].median(), - client_sites["lon"].median(), + cameras["lat"].median(), + cameras["lon"].median(), ], # Determines the point around which the map is centered zoom=10, # Determines the initial level of zoom around the center point children=[ @@ -171,23 +169,24 @@ def build_alerts_map(user_headers, user_credentials, id_suffix=""): return map_object -def create_event_list_from_df(api_events): +def create_wildfire_list_from_df(wildfires): """ - This function build the list of events on the left based on event data + This function build the list of wildfires on the left based on wildfire data """ - if api_events.empty: + if wildfires.empty: return [] - filtered_events = api_events.sort_values("created_at").drop_duplicates("id", keep="last")[::-1] + + filtered_wildfires = wildfires.sort_values("created_at").drop_duplicates("id", keep="last")[::-1] return [ html.Button( - id={"type": "event-button", "index": event["id"]}, + id={"type": "wildfire-button", "index": wildfire["id"]}, children=[ html.Div( - f"{event['device_name']}", + f"{wildfire['camera_name']}", style={"fontWeight": "bold"}, ), - html.Div(event["created_at"].strftime("%Y-%m-%d %H:%M")), + html.Div(wildfire["created_at"].strftime("%Y-%m-%d %H:%M")), ], n_clicks=0, style={ @@ -198,5 +197,5 @@ def create_event_list_from_df(api_events): "width": "100%", }, ) - for _, event in filtered_events.iterrows() + for _, wildfire in filtered_wildfires.iterrows() ] diff --git a/app/utils/sites.py b/app/utils/sites.py deleted file mode 100644 index 7111530..0000000 --- a/app/utils/sites.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2020-2024, Pyronear. - -# This program is licensed under the Apache License 2.0. -# See LICENSE or go to for full license details. - -from typing import Any, Dict, Optional - -import pandas as pd -import requests - -import config as cfg - - -def get_token(api_url: str, login: str, pwd: str) -> str: - response = requests.post(f"{api_url}/login/access-token", data={"username": login, "password": pwd}, timeout=3) - if response.status_code != 200: - raise ValueError(response.json()["detail"]) - return response.json()["access_token"] - - -def api_request(method_type: str, route: str, headers=Dict[str, str], payload: Optional[Dict[str, Any]] = None): - kwargs = {"json": payload} if isinstance(payload, dict) else {} - - response = getattr(requests, method_type)(route, headers=headers, **kwargs) - return response.json() - - -def get_sites(user_credentials): - api_url = cfg.API_URL.rstrip("/") - superuser_login = user_credentials["username"] - superuser_pwd = user_credentials["password"] - - superuser_auth = { - "Authorization": f"Bearer {get_token(api_url, superuser_login, superuser_pwd)}", - "Content-Type": "application/json", - } - api_sites = api_request("get", f"{api_url}/sites/", superuser_auth) - return pd.DataFrame(api_sites) diff --git a/pyproject.toml b/pyproject.toml index 85b5bf0..e4f8099 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dash = ">=2.14.0" dash-bootstrap-components = ">=1.5.0" dash-leaflet = "^0.1.4" pandas = ">=2.1.4" -pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "767be30a781b52b29d68579d543e3f45ac8c4713", subdirectory = "client" } +pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "e5465bbf1aabde6041f0825b0502fe61aca2e1cc", subdirectory = "client" } python-dotenv = ">=1.0.0" geopy = ">=2.4.0" From 74d17dee83df77c1dc59b3258a3f2e766e272e64 Mon Sep 17 00:00:00 2001 From: Ronan Date: Thu, 11 Jul 2024 11:41:13 +0200 Subject: [PATCH 02/14] fix ongoing --- app/callbacks/data_callbacks.py | 81 +++++++++++++++++----------- app/callbacks/display_callbacks.py | 87 +++++++++++++++--------------- app/components/detections.py | 4 +- pyproject.toml | 2 +- 4 files changed, 95 insertions(+), 79 deletions(-) diff --git a/app/callbacks/data_callbacks.py b/app/callbacks/data_callbacks.py index 7a2d14c..0d4362c 100644 --- a/app/callbacks/data_callbacks.py +++ b/app/callbacks/data_callbacks.py @@ -17,7 +17,7 @@ import config as cfg from services import instantiate_token -from utils.data import read_stored_DataFrame +from utils.data import process_bbox, read_stored_DataFrame logger = logging_config.configure_logging(cfg.DEBUG, cfg.SENTRY_DSN) @@ -81,7 +81,7 @@ def login_callback(n_clicks, username, password, client_token): ], prevent_initial_call=True, ) -def api_watcher(n_intervals, client_token): +def data_transform(n_intervals, client_token): """ Fetches and processes live wildfire and detection data from the API at regular intervals. @@ -133,39 +133,62 @@ def api_watcher(n_intervals, client_token): api_detections = api_detections.sort_values(by="created_at") # Initialiser la liste pour les wildfires - wildfires = [] - id = 1 + id_counter = 1 cameras = pd.DataFrame(api_client.fetch_cameras().json()) + api_detections["lat"] = None + api_detections["lon"] = None + api_detections["wildfire_id"] = None + api_detections["processed_loc"] = None + api_detections["processed_loc"] = api_detections["localization"].apply(process_bbox) - # Initialiser le premier wildfire - current_wildfire = { - "created_at": api_detections.iloc[0]["created_at"], - "detection_ids": [api_detections.iloc[0]["id"]], - "id": id, - "camera_name": cameras.loc[cameras["id"] == api_detections.iloc[0]["camera_id"], "name"].values[0], - } + last_detection_time_per_camera: dict[int, str] = {} + wildfires_dict: dict[int, list] = {} # Parcourir les détections pour les regrouper en wildfires - for i in range(1, len(api_detections)): + for i in range(0, len(api_detections)): + camera_id = api_detections.at[i, "camera_id"] + camera = cameras.loc[cameras["id"] == camera_id] + camera = camera.iloc[0] # Ensure camera is a Series + api_detections.at[i, "lat"] = camera["lat"] + api_detections.at[i, "lon"] = camera["lon"] detection = api_detections.iloc[i] - time_diff = detection["created_at"] - current_wildfire["created_at"] - if time_diff <= pd.Timedelta(minutes=30): - # Si la différence de temps est inférieure à 30 minutes, ajouter à l'actuel wildfire - current_wildfire["detection_ids"].append(detection["id"]) - else: - # Sinon, terminer le current_wildfire et commencer un nouveau - wildfires.append(current_wildfire) - id = id + 1 - current_wildfire = { - "id": id, - "camera_name": cameras.loc[cameras["id"] == detection["camera_id"], "name"].values[0], + if camera_id not in wildfires_dict: + wildfires_dict.setdefault(camera_id, []) + last_detection_time_per_camera.setdefault(camera_id, "") + # Initialize the first wildfire for this camera + wildfire = { + "id": id_counter, + "camera_name": camera["name"], "created_at": detection["created_at"], "detection_ids": [detection["id"]], } + wildfires_dict[camera_id] = [wildfire] + id_counter += 1 + else: + time_diff = detection["created_at"] - last_detection_time_per_camera[camera_id] + + if time_diff <= pd.Timedelta(minutes=30): + # Si la différence de temps est inférieure à 30 minutes, ajouter à l'actuel wildfire + wildfires_dict[camera_id][-1]["detection_ids"].append(detection["id"]) + print("ON AJOUTE UNE DETECTION : " + str(wildfires_dict[camera_id][-1]["detection_ids"])) + else: + # Initialize a new wildfire for this camera + wildfire = { + "id": id_counter, + "camera_name": camera["name"], + "created_at": detection["created_at"], + "detection_ids": [detection["id"]], + } + wildfires_dict[camera_id].append(wildfire) + id_counter += 1 + api_detections.at[i, "wildfire_id"] = wildfires_dict[camera_id][-1]["id"] + last_detection_time_per_camera[camera_id] = detection["created_at"] - # Ajouter le dernier wildfire - wildfires.append(current_wildfire) + # Convert the dictionary to a list of wildfires + wildfires = [] + for wildfire_list in wildfires_dict.values(): + wildfires.extend(wildfire_list) # Convertir la liste des wildfires en DataFrame wildfires_df = pd.DataFrame(wildfires) @@ -231,10 +254,6 @@ def get_media_url( detection_id = str(row["id"]) # Fetch the URL for this media_id - try: - media_url[detection_id] = Client(client_token, cfg.API_URL).get_detection_url(detection_id)["url"] - # TODO REFACTOR : should be removed since we already loaded all the Detections in an other callback !!! - except Exception: # General catch-all for other exceptions - media_url[detection_id] = "" # Handle potential exceptions - + response = Client(client_token, cfg.API_URL).get_detection_url(detection_id) + media_url[detection_id] = response.json()["url"] return media_url diff --git a/app/callbacks/display_callbacks.py b/app/callbacks/display_callbacks.py index 952159e..9b34a39 100644 --- a/app/callbacks/display_callbacks.py +++ b/app/callbacks/display_callbacks.py @@ -55,7 +55,7 @@ def toggle_modal(media_url, client_token, trigger_no_wildfires, local_detections # Createwildfire list @app.callback( - Output("detection-list-container", "children"), + Output("wildfire-list-container", "children"), [ Input("store_wildfires_data", "data"), Input("to_acknowledge", "data"), @@ -86,6 +86,7 @@ def update_wildfire_list(local_wildfires, to_acknowledge, media_url): if len(local_wildfires): local_wildfires = local_wildfires[~local_wildfires["id"].isin([to_acknowledge])] + return create_wildfire_list_from_df(local_wildfires) @@ -137,9 +138,6 @@ def select_wildfire_with_button( trigger_id = ctx.triggered[0]["prop_id"].split(".")[0] - print("on rentre ACKNOWLEDGE") - print(str(trigger_id)) - if trigger_id == "to_acknowledge": idx = local_detections[~local_detections["wildfire_id"].isin([to_acknowledge])]["wildfire_id"].values if len(idx) == 0: @@ -215,9 +213,6 @@ def update_display_data(wildfire_id_on_display, local_detections): if not data_loaded: raise PreventUpdate - print("wildfire_id_on_display") - print(wildfire_id_on_display) - if wildfire_id_on_display == 0: return json.dumps( { @@ -226,10 +221,7 @@ def update_display_data(wildfire_id_on_display, local_detections): } ) else: - if wildfire_id_on_display == 0: - wildfire_id_on_display = local_detections["detection_id"].values[0] - - detection_on_display = local_detections[local_detections["detection_id"] == wildfire_id_on_display] + detection_on_display = local_detections[local_detections["id"] == wildfire_id_on_display] return json.dumps({"data": detection_on_display.to_json(orient="split"), "data_loaded": True}) @@ -243,17 +235,17 @@ def update_display_data(wildfire_id_on_display, local_detections): [Input("image-slider", "value"), Input("detection_on_display", "data")], [ State("media_url", "data"), - State("detection-list-container", "children"), + State("wildfire-list-container", "children"), ], prevent_initial_call=True, ) -def update_image_and_bbox(slider_value, detection_data, media_url, detection_list): +def update_image_and_bbox(slider_value, detection_data, media_url, wildfire_list): """ Updates the image and bounding box display based on the slider value. Parameters: - slider_value (int): Current value of the image slider. - - detection_data (json): JSON formatted data for the selectedwildfire. + - detection_data (json): JSON formatted data for the selected wildfire. - media_url (dict): Dictionary containing media URLs for detections. Returns: @@ -267,12 +259,7 @@ def update_image_and_bbox(slider_value, detection_data, media_url, detection_lis if not data_loaded: raise PreventUpdate - print("Detection list :") - print(detection_list) - print("Detection data :") # TODO : why do we need both ? - print(detection_data) - - if len(detection_list) == 0: + if len(wildfire_list) == 0: img_html = html.Img( src="./assets/images/no-alert-default.png", className="common-style", @@ -282,14 +269,15 @@ def update_image_and_bbox(slider_value, detection_data, media_url, detection_lis # Filter images with non-empty URLs images = [] - - if str(detection_data["wildfire_id"].values[0]) not in media_url.keys(): + if str(detection_data["id"].values[0]) not in media_url.keys(): raise PreventUpdate for _, detection in detection_data.iterrows(): - images.append(media_url[str(detection["wildfire_id"])][str(detection["media_id"])]) + images.append(media_url[str(detection["id"])]) boxes = detection_data["processed_loc"].tolist() + print("slider value dans update_image_and_bbox: " + str(slider_value)) + print("slider max value: " + str(len(images) - 1)) if slider_value < len(images): img_src = images[slider_value] images_bbox_list = boxes[slider_value] @@ -379,11 +367,12 @@ def toggle_auto_move(n_clicks, data): State("image-slider", "value"), State("image-slider", "max"), State("auto-move-button", "n_clicks"), - State("detection-list-container", "children"), + State("wildfire_id_on_display", "data"), + State("store_wildfires_data", "data"), ], prevent_initial_call=True, ) -def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, detection_list): +def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, wildfire_id, wildfires): """ Automatically moves the image slider based on a regular interval and the current auto-move state. @@ -397,11 +386,15 @@ def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, de Returns: - int: Updated value for the image slider. """ - print("ON EST ICI") - print(current_value) - print(max_value) + wildfires_df, data_loaded = read_stored_DataFrame(wildfires) + if data_loaded: + detection_ids_list = wildfires_df.loc[wildfires_df["id"] == wildfire_id, "detection_ids"].tolist()[0] + print("detection_ids_list size:" + str(len(detection_ids_list))) + print(str(wildfires_df.loc[wildfires_df["id"] == wildfire_id, "detection_ids"])) + else: + detection_ids_list = [] - if auto_move_clicks % 2 != 0 and len(detection_list): # Auto-move is active and there is ongoing detections + if auto_move_clicks % 2 != 0 and len(detection_ids_list): # Auto-move is active and there is ongoing detections return (current_value + 1) % (max_value + 1) else: raise PreventUpdate @@ -410,31 +403,32 @@ def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, de @app.callback( Output("download-link", "href"), [Input("image-slider", "value")], - [State("detection_on_display", "data"), State("media_url", "data")], + [State("wildfire_id_on_display", "data"), State("store_wildfires_data", "data"), State("media_url", "data")], prevent_initial_call=True, ) -def update_download_link(slider_value, detection_data, media_url): +def update_download_link(slider_value, wildfire_id, wildfires, media_url): """ Updates the download link for the currently displayed image. Parameters: - slider_value (int): Current value of the image slider. - - detection_data (json): JSON formatted data for the selectedwildfire. + - detection_data (json): JSON formatted data for the selected wildfire. - media_url (dict): Dictionary containing media URLs for detections. Returns: - str: URL for downloading the current image. """ - print("ON RENTRE DANS update_download_link") - detection_data, data_loaded = read_stored_DataFrame(detection_data) - if data_loaded and len(detection_data): + wildfires_df, data_loaded = read_stored_DataFrame(wildfires) + detection_ids_list = wildfires_df.loc[wildfires_df["id"] == wildfire_id, "detection_ids"].tolist()[0] + if data_loaded and len(detection_ids_list): try: - wildfire_id, media_id = detection_data.iloc[slider_value][["wildfire_id", "media_id"]] - if str(wildfire_id) in media_url.keys(): - return media_url[str(wildfire_id)][str(media_id)] + print("Value of the slider : " + str(slider_value)) + detection_id = detection_ids_list[slider_value] + if str(detection_id) in media_url.keys(): + return media_url[str(detection_id)] except Exception as e: logger.info(e) - logger.info(f"Size of the detection_data dataframe: {detection_data.size}") + logger.info(f"Size of the detections list: {len(detection_ids_list)}") return "" # Return empty string if no image URL is available @@ -486,7 +480,7 @@ def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_di # Convert the 'localization' column to a list (empty lists if the original value was '[]'). detection_data["localization"] = detection_data["localization"].apply( lambda x: ast.literal_eval(x) if isinstance(x, str) and x.strip() != "[]" else [] - ) + ) # WHY? # Filter out rows where 'localization' is not empty and get the last one. # If all are empty, then simply get the last row of the DataFrame. @@ -506,7 +500,7 @@ def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_di ) date_val, cam_name = local_wildfires[local_wildfires["id"] == wildfire_id_on_display][ - ["created_at", "device_name"] + ["created_at", "camera_name"] ].values[0] camera_info = f"Camera: {cam_name}" @@ -545,12 +539,13 @@ def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_di Output("to_acknowledge", "data"), [Input("acknowledge-button", "n_clicks")], [ + State("store_wildfires_data", "data"), State("wildfire_id_on_display", "data"), State("client_token", "data"), ], prevent_initial_call=True, ) -def acknowledge_event(n_clicks, wildfire_id_on_display, client_token): +def acknowledge_event(n_clicks, local_wildfires, wildfire_id_on_display, client_token): """ Acknowledges the selected wildfire and updates the state to reflect this. @@ -562,11 +557,13 @@ def acknowledge_event(n_clicks, wildfire_id_on_display, client_token): Returns: - int: The ID of thewildfire that has been acknowledged. """ - print("ON RENTRE DANS acknowledge-button") if wildfire_id_on_display == 0 or n_clicks == 0: raise PreventUpdate - - Client(client_token, cfg.API_URL).acknowledge_event(wildfire_id=int(wildfire_id_on_display)) + local_wildfires, _ = read_stored_DataFrame(local_wildfires) + wildfire_id = int(wildfire_id_on_display) + detection_ids = local_wildfires[local_wildfires["id"] == wildfire_id][["detection_ids"]].values + for detection_id in detection_ids: + Client(client_token, cfg.API_URL).label_detection(detection_id=detection_id, is_wildfire=False) return wildfire_id_on_display diff --git a/app/components/detections.py b/app/components/detections.py index c7f1861..4343cf7 100644 --- a/app/components/detections.py +++ b/app/components/detections.py @@ -11,7 +11,7 @@ def create_wildfire_list(): Creates a container for the detection list with a fixed height and scrollable content. This function generates a Dash HTML Div element containing a header and an empty container. - The empty container ('detection-list-container') is meant to be populated with detection buttons + The empty container ('wildfire-list-container') is meant to be populated with detection buttons dynamically via a callback. The container has a fixed height and is scrollable, allowing users to browse through a potentially long list of detections. @@ -28,6 +28,6 @@ def create_wildfire_list(): return html.Div( [ html.H1("Detections en cours", style={"textAlign": "center", "fontSize": "30px"}), - html.Div(id="detection-list-container", style=wildfire_list_style, children=[]), # Empty container + html.Div(id="wildfire-list-container", style=wildfire_list_style, children=[]), # Empty container ] ) diff --git a/pyproject.toml b/pyproject.toml index e4f8099..faf40ba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dash = ">=2.14.0" dash-bootstrap-components = ">=1.5.0" dash-leaflet = "^0.1.4" pandas = ">=2.1.4" -pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "e5465bbf1aabde6041f0825b0502fe61aca2e1cc", subdirectory = "client" } +pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "5c492a57d6454d953c81d9f75968babcaab6dfec", subdirectory = "client" } python-dotenv = ">=1.0.0" geopy = ">=2.4.0" From 4c9f94d4d2ae5c7f967448aeb198e481e245bc43 Mon Sep 17 00:00:00 2001 From: Ronan Date: Fri, 12 Jul 2024 16:26:48 +0200 Subject: [PATCH 03/14] fix slider issue --- app/callbacks/data_callbacks.py | 5 ++--- app/callbacks/display_callbacks.py | 30 ++++++++++++++---------------- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/app/callbacks/data_callbacks.py b/app/callbacks/data_callbacks.py index 0d4362c..8f47153 100644 --- a/app/callbacks/data_callbacks.py +++ b/app/callbacks/data_callbacks.py @@ -114,13 +114,13 @@ def data_transform(n_intervals, client_token): json.dumps( { "data": pd.DataFrame().to_json(orient="split"), - "data_loaded": True, + "data_loaded": False, } ), json.dumps( { "data": pd.DataFrame().to_json(orient="split"), - "data_loaded": True, + "data_loaded": False, } ), dash.no_update, @@ -171,7 +171,6 @@ def data_transform(n_intervals, client_token): if time_diff <= pd.Timedelta(minutes=30): # Si la différence de temps est inférieure à 30 minutes, ajouter à l'actuel wildfire wildfires_dict[camera_id][-1]["detection_ids"].append(detection["id"]) - print("ON AJOUTE UNE DETECTION : " + str(wildfires_dict[camera_id][-1]["detection_ids"])) else: # Initialize a new wildfire for this camera wildfire = { diff --git a/app/callbacks/display_callbacks.py b/app/callbacks/display_callbacks.py index 9b34a39..58ebb6d 100644 --- a/app/callbacks/display_callbacks.py +++ b/app/callbacks/display_callbacks.py @@ -190,14 +190,15 @@ def select_wildfire_with_button( return [styles, button_index, 1] -# Getwildfire_id data +# Get wildfire_id data @app.callback( Output("detection_on_display", "data"), Input("wildfire_id_on_display", "data"), State("store_detections_data", "data"), + State("store_wildfires_data", "data"), prevent_initial_call=True, ) -def update_display_data(wildfire_id_on_display, local_detections): +def update_display_data(wildfire_id_on_display, local_detections, local_wildfires): """ Updates the display data based on the currently selected wildfire ID. @@ -208,21 +209,23 @@ def update_display_data(wildfire_id_on_display, local_detections): Returns: - json: JSON formatted data for the selected wildfire. """ - local_detections, data_loaded = read_stored_DataFrame(local_detections) + local_detections, data_detections_loaded = read_stored_DataFrame(local_detections) + local_wildfires, data_wildfires_loaded = read_stored_DataFrame(local_wildfires) - if not data_loaded: + if not data_detections_loaded or not data_wildfires_loaded: raise PreventUpdate if wildfire_id_on_display == 0: return json.dumps( { "data": pd.DataFrame().to_json(orient="split"), - "data_loaded": True, + "data_loaded": False, } ) else: - detection_on_display = local_detections[local_detections["id"] == wildfire_id_on_display] + detection_ids = local_wildfires[local_wildfires["id"] == wildfire_id_on_display]["detection_ids"].values[0] + detection_on_display = local_detections[local_detections["id"].isin(detection_ids)] return json.dumps({"data": detection_on_display.to_json(orient="split"), "data_loaded": True}) @@ -276,8 +279,6 @@ def update_image_and_bbox(slider_value, detection_data, media_url, wildfire_list images.append(media_url[str(detection["id"])]) boxes = detection_data["processed_loc"].tolist() - print("slider value dans update_image_and_bbox: " + str(slider_value)) - print("slider max value: " + str(len(images) - 1)) if slider_value < len(images): img_src = images[slider_value] images_bbox_list = boxes[slider_value] @@ -372,7 +373,7 @@ def toggle_auto_move(n_clicks, data): ], prevent_initial_call=True, ) -def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, wildfire_id, wildfires): +def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, wildfire_id_on_display, local_wildfires): """ Automatically moves the image slider based on a regular interval and the current auto-move state. @@ -386,11 +387,9 @@ def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, wi Returns: - int: Updated value for the image slider. """ - wildfires_df, data_loaded = read_stored_DataFrame(wildfires) + local_wildfires, data_loaded = read_stored_DataFrame(local_wildfires) if data_loaded: - detection_ids_list = wildfires_df.loc[wildfires_df["id"] == wildfire_id, "detection_ids"].tolist()[0] - print("detection_ids_list size:" + str(len(detection_ids_list))) - print(str(wildfires_df.loc[wildfires_df["id"] == wildfire_id, "detection_ids"])) + detection_ids_list = local_wildfires[local_wildfires["id"] == wildfire_id_on_display]["detection_ids"].values[0] else: detection_ids_list = [] @@ -422,7 +421,6 @@ def update_download_link(slider_value, wildfire_id, wildfires, media_url): detection_ids_list = wildfires_df.loc[wildfires_df["id"] == wildfire_id, "detection_ids"].tolist()[0] if data_loaded and len(detection_ids_list): try: - print("Value of the slider : " + str(slider_value)) detection_id = detection_ids_list[slider_value] if str(detection_id) in media_url.keys(): return media_url[str(detection_id)] @@ -545,7 +543,7 @@ def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_di ], prevent_initial_call=True, ) -def acknowledge_event(n_clicks, local_wildfires, wildfire_id_on_display, client_token): +def acknowledge_wildfire(n_clicks, local_wildfires, wildfire_id_on_display, client_token): """ Acknowledges the selected wildfire and updates the state to reflect this. @@ -555,7 +553,7 @@ def acknowledge_event(n_clicks, local_wildfires, wildfire_id_on_display, client_ - client_token (str): Token used for API requests. Returns: - - int: The ID of thewildfire that has been acknowledged. + - int: The ID of the wildfire that has been acknowledged. """ if wildfire_id_on_display == 0 or n_clicks == 0: raise PreventUpdate From d76034e7de1a1690aa69e35264dca8474c8d4589 Mon Sep 17 00:00:00 2001 From: Ronan Date: Sat, 13 Jul 2024 21:34:41 +0200 Subject: [PATCH 04/14] feat: use new endpoint API --- app/callbacks/data_callbacks.py | 75 +++++------------------------- app/callbacks/display_callbacks.py | 2 +- pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 66 deletions(-) diff --git a/app/callbacks/data_callbacks.py b/app/callbacks/data_callbacks.py index 8f47153..bc0000e 100644 --- a/app/callbacks/data_callbacks.py +++ b/app/callbacks/data_callbacks.py @@ -73,15 +73,17 @@ def login_callback(n_clicks, username, password, client_token): [ Output("store_wildfires_data", "data"), Output("store_detections_data", "data"), + Output("media_url", "data"), Output("trigger_no_wildfires", "data"), ], [Input("main_api_fetch_interval", "n_intervals")], [ State("client_token", "data"), + State("media_url", "data"), ], prevent_initial_call=True, ) -def data_transform(n_intervals, client_token): +def data_transform(n_intervals, client_token, media_url): """ Fetches and processes live wildfire and detection data from the API at regular intervals. @@ -106,8 +108,8 @@ def data_transform(n_intervals, client_token): # Fetch Detections yesterday = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d_%H:%M:%S") api_client = Client(client_token, cfg.API_URL) - response = api_client.fetch_unacknowledged_detections(from_date=yesterday) - api_detections = pd.DataFrame(response.json()) + response = api_client.fetch_unlabeled_detections(from_date=yesterday) + api_detections = pd.DataFrame(response.json()[0]) if api_detections.empty: return [ @@ -123,7 +125,8 @@ def data_transform(n_intervals, client_token): "data_loaded": False, } ), - dash.no_update, + [], + True, ] # Find ongoing detections for the wildfires started within 30 minutes; @@ -144,6 +147,8 @@ def data_transform(n_intervals, client_token): last_detection_time_per_camera: dict[int, str] = {} wildfires_dict: dict[int, list] = {} + media_list = response.json()[1] + # Parcourir les détections pour les regrouper en wildfires for i in range(0, len(api_detections)): camera_id = api_detections.at[i, "camera_id"] @@ -152,6 +157,7 @@ def data_transform(n_intervals, client_token): api_detections.at[i, "lat"] = camera["lat"] api_detections.at[i, "lon"] = camera["lon"] detection = api_detections.iloc[i] + media_url[str(detection["id"])] = media_list[i]["url"] if camera_id not in wildfires_dict: wildfires_dict.setdefault(camera_id, []) @@ -191,68 +197,9 @@ def data_transform(n_intervals, client_token): # Convertir la liste des wildfires en DataFrame wildfires_df = pd.DataFrame(wildfires) - return [ json.dumps({"data": wildfires_df.to_json(orient="split"), "data_loaded": True}), json.dumps({"data": api_detections.to_json(orient="split"), "data_loaded": True}), + media_url, dash.no_update, ] - - -@app.callback( - Output("media_url", "data"), - Input("store_detections_data", "data"), - [ - State("media_url", "data"), - State("client_token", "data"), - ], - prevent_initial_call=True, -) -def get_media_url( - local_detections, - media_url, - client_token, -): - """ - Retrieves media URLs for detections and manages the fetching process from the API. - - This callback is designed to efficiently load media URLs during app initialization - and subsequently update them. Initially, it focuses on loading URLs wildfire by wildfire - to quickly provide data for visualization. Once URLs for all wildfires are loaded, the - callback then methodically checks for and retrieves any missing URLs. - - The callback is triggered by two inputs: a change in the data to load and a regular - interval check. It includes a cleanup step to remove wildfire IDs no longer present in - local detections. - - Parameters: - - - interval (int): Current interval for fetching URLs. - - local_detections (json): Currently stored detections data in JSON format. - - media_url (dict): Dictionary holding media URLs for detections. - - client_token (str): Token used for API calls - - - - Returns: - - dict: Updated dictionary with media URLs for each detection. - """ - if client_token is None: - raise PreventUpdate - - local_detections, detections_data_loaded = read_stored_DataFrame(local_detections) - - if not detections_data_loaded: - raise PreventUpdate - - if local_detections.empty: - return {} - - # Loop through each row in local_detections - for _, row in local_detections.iterrows(): - detection_id = str(row["id"]) - - # Fetch the URL for this media_id - response = Client(client_token, cfg.API_URL).get_detection_url(detection_id) - media_url[detection_id] = response.json()["url"] - return media_url diff --git a/app/callbacks/display_callbacks.py b/app/callbacks/display_callbacks.py index 58ebb6d..32622e4 100644 --- a/app/callbacks/display_callbacks.py +++ b/app/callbacks/display_callbacks.py @@ -388,7 +388,7 @@ def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, wi - int: Updated value for the image slider. """ local_wildfires, data_loaded = read_stored_DataFrame(local_wildfires) - if data_loaded: + if data_loaded and wildfire_id_on_display != 0: detection_ids_list = local_wildfires[local_wildfires["id"] == wildfire_id_on_display]["detection_ids"].values[0] else: detection_ids_list = [] diff --git a/pyproject.toml b/pyproject.toml index faf40ba..7f8bcaf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dash = ">=2.14.0" dash-bootstrap-components = ">=1.5.0" dash-leaflet = "^0.1.4" pandas = ">=2.1.4" -pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "5c492a57d6454d953c81d9f75968babcaab6dfec", subdirectory = "client" } +pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "39b700c83c438fb25fac9d7dc861af3db8fcc690", subdirectory = "client" } python-dotenv = ">=1.0.0" geopy = ">=2.4.0" From 507a50a0533f035095bfb57bf2f0232cb587a607 Mon Sep 17 00:00:00 2001 From: Ronan Date: Sat, 13 Jul 2024 21:58:23 +0200 Subject: [PATCH 05/14] fix LOGIN --- app/callbacks/data_callbacks.py | 3 +-- app/layouts/main_layout.py | 9 +++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/app/callbacks/data_callbacks.py b/app/callbacks/data_callbacks.py index bc0000e..632e80d 100644 --- a/app/callbacks/data_callbacks.py +++ b/app/callbacks/data_callbacks.py @@ -57,14 +57,13 @@ def login_callback(n_clicks, username, password, client_token): return ( client.token, - client.headers, dash.no_update, ) except Exception: # This if statement is verified if credentials are invalid form_feedback.append(html.P("Nom d'utilisateur et/ou mot de passe erroné.")) - return dash.no_update, dash.no_update, form_feedback + return dash.no_update, form_feedback raise PreventUpdate diff --git a/app/layouts/main_layout.py b/app/layouts/main_layout.py index b036ee2..4cff009 100644 --- a/app/layouts/main_layout.py +++ b/app/layouts/main_layout.py @@ -11,8 +11,13 @@ from components.navbar import Navbar from services import instantiate_token +import config as cfg -api_client = instantiate_token() +if not cfg.LOGIN: + api_client = instantiate_token() + token = api_client.token +else: + token = None def get_main_layout(): @@ -92,7 +97,7 @@ def get_main_layout(): is_open=False, ), # Storage components saving the user's headers and credentials - dcc.Store(id="client_token", storage_type="session", data=api_client.token), + dcc.Store(id="client_token", storage_type="session", data=token), # [TEMPORARY FIX] Storing the user's credentials to refresh the token when needed dcc.Store(id="to_acknowledge", data=0), dcc.Store(id="trigger_no_wildfires", data=False), From 986ee9a9c998e8a73fc98e29a52a7994492a3039 Mon Sep 17 00:00:00 2001 From: Ronan Date: Sat, 13 Jul 2024 22:09:23 +0200 Subject: [PATCH 06/14] fix lint --- Makefile | 2 +- app/callbacks/data_callbacks.py | 2 +- app/layouts/main_layout.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index e07d6e3..d71f6fb 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ quality: # this target runs checks on all files and potentially modifies some of them style: black . - ruff --fix . + ruff check --fix . # Build the docker build: diff --git a/app/callbacks/data_callbacks.py b/app/callbacks/data_callbacks.py index 632e80d..c1affc0 100644 --- a/app/callbacks/data_callbacks.py +++ b/app/callbacks/data_callbacks.py @@ -17,7 +17,7 @@ import config as cfg from services import instantiate_token -from utils.data import process_bbox, read_stored_DataFrame +from utils.data import process_bbox logger = logging_config.configure_logging(cfg.DEBUG, cfg.SENTRY_DSN) diff --git a/app/layouts/main_layout.py b/app/layouts/main_layout.py index 4cff009..ba6b26c 100644 --- a/app/layouts/main_layout.py +++ b/app/layouts/main_layout.py @@ -9,9 +9,9 @@ import pandas as pd from dash import dcc, html +import config as cfg from components.navbar import Navbar from services import instantiate_token -import config as cfg if not cfg.LOGIN: api_client = instantiate_token() From d81eb2e488600ab974872051119e8540963dcadb Mon Sep 17 00:00:00 2001 From: Ronan Date: Sun, 14 Jul 2024 16:46:44 +0200 Subject: [PATCH 07/14] feat: upgrade client commit --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 7f8bcaf..1d61a2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dash = ">=2.14.0" dash-bootstrap-components = ">=1.5.0" dash-leaflet = "^0.1.4" pandas = ">=2.1.4" -pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "39b700c83c438fb25fac9d7dc861af3db8fcc690", subdirectory = "client" } +pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "8d50d923d3fef81343ed78b6b364c83ebea07c0b", subdirectory = "client" } python-dotenv = ">=1.0.0" geopy = ">=2.4.0" From 288ce4848c7a7907a5a6357df30463de8e71e01a Mon Sep 17 00:00:00 2001 From: Ronan Date: Sun, 14 Jul 2024 21:48:02 +0200 Subject: [PATCH 08/14] upgrade Client --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 1d61a2b..b27be8d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dash = ">=2.14.0" dash-bootstrap-components = ">=1.5.0" dash-leaflet = "^0.1.4" pandas = ">=2.1.4" -pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "8d50d923d3fef81343ed78b6b364c83ebea07c0b", subdirectory = "client" } +pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "963e666f6f27b281e58aa98bb97710c43b480b81", subdirectory = "client" } python-dotenv = ">=1.0.0" geopy = ">=2.4.0" From 8f7c521877b756279fac9c99fa7c726d8786f69f Mon Sep 17 00:00:00 2001 From: Ronan Date: Mon, 22 Jul 2024 15:26:32 +0200 Subject: [PATCH 09/14] DetectionWithUrl + Bboxes --- app/callbacks/data_callbacks.py | 15 +++++++++------ app/callbacks/display_callbacks.py | 26 +++++++++++++------------- app/utils/display.py | 14 +++++++------- pyproject.toml | 2 +- 4 files changed, 30 insertions(+), 27 deletions(-) diff --git a/app/callbacks/data_callbacks.py b/app/callbacks/data_callbacks.py index c1affc0..25fee49 100644 --- a/app/callbacks/data_callbacks.py +++ b/app/callbacks/data_callbacks.py @@ -108,7 +108,7 @@ def data_transform(n_intervals, client_token, media_url): yesterday = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d_%H:%M:%S") api_client = Client(client_token, cfg.API_URL) response = api_client.fetch_unlabeled_detections(from_date=yesterday) - api_detections = pd.DataFrame(response.json()[0]) + api_detections = pd.DataFrame(response.json()) if api_detections.empty: return [ @@ -141,22 +141,25 @@ def data_transform(n_intervals, client_token, media_url): api_detections["lon"] = None api_detections["wildfire_id"] = None api_detections["processed_loc"] = None - api_detections["processed_loc"] = api_detections["localization"].apply(process_bbox) + api_detections["processed_loc"] = api_detections["bboxes"].apply(process_bbox) last_detection_time_per_camera: dict[int, str] = {} wildfires_dict: dict[int, list] = {} - media_list = response.json()[1] + media_dict = api_detections.set_index("id")["url"].to_dict() # Parcourir les détections pour les regrouper en wildfires - for i in range(0, len(api_detections)): + for i, detection in api_detections.iterrows(): camera_id = api_detections.at[i, "camera_id"] camera = cameras.loc[cameras["id"] == camera_id] camera = camera.iloc[0] # Ensure camera is a Series api_detections.at[i, "lat"] = camera["lat"] api_detections.at[i, "lon"] = camera["lon"] - detection = api_detections.iloc[i] - media_url[str(detection["id"])] = media_list[i]["url"] + print("ICI") + print(detection["id"]) + print(media_dict[int(detection["id"])]) + + media_url[detection["id"]] = media_dict[detection["id"]] if camera_id not in wildfires_dict: wildfires_dict.setdefault(camera_id, []) diff --git a/app/callbacks/display_callbacks.py b/app/callbacks/display_callbacks.py index 32622e4..51e1c5f 100644 --- a/app/callbacks/display_callbacks.py +++ b/app/callbacks/display_callbacks.py @@ -475,26 +475,26 @@ def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_di if not wildfire_data_loaded: raise PreventUpdate - # Convert the 'localization' column to a list (empty lists if the original value was '[]'). - detection_data["localization"] = detection_data["localization"].apply( + # Convert the 'bboxes' column to a list (empty lists if the original value was '[]'). + detection_data["bboxes"] = detection_data["bboxes"].apply( lambda x: ast.literal_eval(x) if isinstance(x, str) and x.strip() != "[]" else [] ) # WHY? - # Filter out rows where 'localization' is not empty and get the last one. + # Filter out rows where 'bboxes' is not empty and get the last one. # If all are empty, then simply get the last row of the DataFrame. - row_with_localization = ( - detection_data[detection_data["localization"].astype(bool)].iloc[-1] - if not detection_data[detection_data["localization"].astype(bool)].empty + row_with_bboxes = ( + detection_data[detection_data["bboxes"].astype(bool)].iloc[-1] + if not detection_data[detection_data["bboxes"].astype(bool)].empty else detection_data.iloc[-1] ) polygon, detection_azimuth = build_vision_polygon( - site_lat=row_with_localization["lat"], - site_lon=row_with_localization["lon"], - azimuth=row_with_localization["azimuth"], + site_lat=row_with_bboxes["lat"], + site_lon=row_with_bboxes["lon"], + azimuth=row_with_bboxes["azimuth"], opening_angle=cfg.CAM_OPENING_ANGLE, dist_km=cfg.CAM_RANGE_KM, - localization=row_with_localization["processed_loc"], + bboxes=row_with_bboxes["processed_loc"], ) date_val, cam_name = local_wildfires[local_wildfires["id"] == wildfire_id_on_display][ @@ -502,15 +502,15 @@ def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_di ].values[0] camera_info = f"Camera: {cam_name}" - location_info = f"Localisation: {row_with_localization['lat']:.4f}, {row_with_localization['lon']:.4f}" + location_info = f"Localisation: {row_with_bboxes['lat']:.4f}, {row_with_bboxes['lon']:.4f}" angle_info = f"Azimuth de detection: {detection_azimuth}°" date_info = f"Date: {date_val}" return ( polygon, - [row_with_localization["lat"], row_with_localization["lon"]], + [row_with_bboxes["lat"], row_with_bboxes["lon"]], polygon, - [row_with_localization["lat"], row_with_localization["lon"]], + [row_with_bboxes["lat"], row_with_bboxes["lon"]], camera_info, location_info, angle_info, diff --git a/app/utils/display.py b/app/utils/display.py index 5460e26..88b9bdb 100644 --- a/app/utils/display.py +++ b/app/utils/display.py @@ -34,12 +34,12 @@ def build_departments_geojson(): return geojson -def calculate_new_polygon_parameters(azimuth, opening_angle, localization): +def calculate_new_polygon_parameters(azimuth, opening_angle, bboxes): """ - This function compute the vision polygon parameters based on localization + This function compute the vision polygon parameters based on bboxes """ - # Assuming localization is in the format [x0, y0, x1, y1, confidence] - x0, _, width, _ = localization + # Assuming bboxes is in the format [x0, y0, x1, y1, confidence] + x0, _, width, _ = bboxes xc = (x0 + width / 2) / 100 # New azimuth @@ -100,12 +100,12 @@ def build_cameras_markers(token: str): return markers, cameras -def build_vision_polygon(site_lat, site_lon, azimuth, opening_angle, dist_km, localization=None): +def build_vision_polygon(site_lat, site_lon, azimuth, opening_angle, dist_km, bboxes=None): """ Create a vision polygon using dl.Polygon. This polygon is placed on the map using alerts data. """ - if len(localization): - azimuth, opening_angle = calculate_new_polygon_parameters(azimuth, opening_angle, localization[0]) + if len(bboxes): + azimuth, opening_angle = calculate_new_polygon_parameters(azimuth, opening_angle, bboxes[0]) # The center corresponds the point from which the vision angle "starts" center = [site_lat, site_lon] diff --git a/pyproject.toml b/pyproject.toml index b27be8d..ee547de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dash = ">=2.14.0" dash-bootstrap-components = ">=1.5.0" dash-leaflet = "^0.1.4" pandas = ">=2.1.4" -pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "963e666f6f27b281e58aa98bb97710c43b480b81", subdirectory = "client" } +pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "5da3d23d38cb78a4a4e15cf2f9f83bf2da7cdaee", subdirectory = "client" } python-dotenv = ">=1.0.0" geopy = ">=2.4.0" From ea4f862dad14596322259b49a300c3d808ec6b1a Mon Sep 17 00:00:00 2001 From: Ronan Date: Mon, 22 Jul 2024 22:21:27 +0200 Subject: [PATCH 10/14] create an incremental algorithm for calculating wildfire --- app/callbacks/data_callbacks.py | 50 +++++++++++------- app/callbacks/display_callbacks.py | 85 +++++++++++++++++++++--------- app/layouts/main_layout.py | 3 +- app/utils/display.py | 2 +- 4 files changed, 92 insertions(+), 48 deletions(-) diff --git a/app/callbacks/data_callbacks.py b/app/callbacks/data_callbacks.py index 25fee49..87340aa 100644 --- a/app/callbacks/data_callbacks.py +++ b/app/callbacks/data_callbacks.py @@ -74,15 +74,18 @@ def login_callback(n_clicks, username, password, client_token): Output("store_detections_data", "data"), Output("media_url", "data"), Output("trigger_no_wildfires", "data"), + Output("previous_time_event", "data"), ], [Input("main_api_fetch_interval", "n_intervals")], [ State("client_token", "data"), State("media_url", "data"), + State("store_wildfires_data", "data"), + State("previous_time_event", "data"), ], prevent_initial_call=True, ) -def data_transform(n_intervals, client_token, media_url): +def data_transform(n_intervals, client_token, media_url, store_wildfires_data, previous_time_event): """ Fetches and processes live wildfire and detection data from the API at regular intervals. @@ -105,16 +108,21 @@ def data_transform(n_intervals, client_token, media_url): logger.info("Start Fetching the events") # Fetch Detections - yesterday = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d_%H:%M:%S") + # Use the last event time or default to yesterday + if previous_time_event is None: + previous_time_event = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d_%H:%M:%S") + else: + previous_time_event = pd.to_datetime(previous_time_event).strftime("%Y-%m-%d_%H:%M:%S") + api_client = Client(client_token, cfg.API_URL) - response = api_client.fetch_unlabeled_detections(from_date=yesterday) + response = api_client.fetch_unlabeled_detections(from_date=previous_time_event) api_detections = pd.DataFrame(response.json()) - + previous_time_event = api_detections["created_at"].max() if api_detections.empty: return [ json.dumps( { - "data": pd.DataFrame().to_json(orient="split"), + "data": store_wildfires_data, "data_loaded": False, } ), @@ -126,16 +134,17 @@ def data_transform(n_intervals, client_token, media_url): ), [], True, + previous_time_event, ] # Find ongoing detections for the wildfires started within 30 minutes; # after that, any new detection is part of a new wildfire api_detections["created_at"] = pd.to_datetime(api_detections["created_at"]) + # Trier les détections par "created_at" api_detections = api_detections.sort_values(by="created_at") # Initialiser la liste pour les wildfires - id_counter = 1 cameras = pd.DataFrame(api_client.fetch_cameras().json()) api_detections["lat"] = None api_detections["lon"] = None @@ -143,9 +152,17 @@ def data_transform(n_intervals, client_token, media_url): api_detections["processed_loc"] = None api_detections["processed_loc"] = api_detections["bboxes"].apply(process_bbox) - last_detection_time_per_camera: dict[int, str] = {} - wildfires_dict: dict[int, list] = {} + wildfires_dict = json.loads(store_wildfires_data)["data"] + # Load existing wildfires data + if wildfires_dict != {}: + id_counter = ( + max(wildfire["id"] for camera_wildfires in wildfires_dict.values() for wildfire in camera_wildfires) + 1 + ) + else: + wildfires_dict = {} + id_counter = 1 + last_detection_time_per_camera: dict[int, str] = {} media_dict = api_detections.set_index("id")["url"].to_dict() # Parcourir les détections pour les regrouper en wildfires @@ -155,9 +172,6 @@ def data_transform(n_intervals, client_token, media_url): camera = camera.iloc[0] # Ensure camera is a Series api_detections.at[i, "lat"] = camera["lat"] api_detections.at[i, "lon"] = camera["lon"] - print("ICI") - print(detection["id"]) - print(media_dict[int(detection["id"])]) media_url[detection["id"]] = media_dict[detection["id"]] @@ -168,7 +182,7 @@ def data_transform(n_intervals, client_token, media_url): wildfire = { "id": id_counter, "camera_name": camera["name"], - "created_at": detection["created_at"], + "created_at": detection["created_at"].strftime("%Y-%m-%d %H:%M:%S"), "detection_ids": [detection["id"]], } wildfires_dict[camera_id] = [wildfire] @@ -184,7 +198,7 @@ def data_transform(n_intervals, client_token, media_url): wildfire = { "id": id_counter, "camera_name": camera["name"], - "created_at": detection["created_at"], + "created_at": detection["created_at"].strftime("%Y-%m-%d %H:%M:%S"), "detection_ids": [detection["id"]], } wildfires_dict[camera_id].append(wildfire) @@ -192,16 +206,12 @@ def data_transform(n_intervals, client_token, media_url): api_detections.at[i, "wildfire_id"] = wildfires_dict[camera_id][-1]["id"] last_detection_time_per_camera[camera_id] = detection["created_at"] - # Convert the dictionary to a list of wildfires - wildfires = [] - for wildfire_list in wildfires_dict.values(): - wildfires.extend(wildfire_list) - + wildfires_dict = {int(k): v for k, v in wildfires_dict.items()} # Convertir la liste des wildfires en DataFrame - wildfires_df = pd.DataFrame(wildfires) return [ - json.dumps({"data": wildfires_df.to_json(orient="split"), "data_loaded": True}), + json.dumps({"data": wildfires_dict, "data_loaded": True}), json.dumps({"data": api_detections.to_json(orient="split"), "data_loaded": True}), media_url, dash.no_update, + previous_time_event, ] diff --git a/app/callbacks/display_callbacks.py b/app/callbacks/display_callbacks.py index 51e1c5f..ece9df6 100644 --- a/app/callbacks/display_callbacks.py +++ b/app/callbacks/display_callbacks.py @@ -63,7 +63,7 @@ def toggle_modal(media_url, client_token, trigger_no_wildfires, local_detections State("media_url", "data"), prevent_initial_call=True, ) -def update_wildfire_list(local_wildfires, to_acknowledge, media_url): +def update_wildfire_list(store_wildfires_data, to_acknowledge, media_url): """ Updates the wildfire list based on changes in the wildfires data or acknowledgement actions. @@ -80,14 +80,22 @@ def update_wildfire_list(local_wildfires, to_acknowledge, media_url): if trigger_id == "to_acknowledge" and str(to_acknowledge) not in media_url.keys(): raise PreventUpdate - local_wildfires, wildfire_data_loaded = read_stored_DataFrame(local_wildfires) - if not wildfire_data_loaded: + json_wildfire = json.loads(store_wildfires_data) + wildfires_dict = json_wildfire["data"] + data_loaded = json_wildfire["data_loaded"] + + if not data_loaded: raise PreventUpdate - if len(local_wildfires): - local_wildfires = local_wildfires[~local_wildfires["id"].isin([to_acknowledge])] + wildfires_list = [] - return create_wildfire_list_from_df(local_wildfires) + if len(wildfires_dict): + for camera_key, wildfires_item in wildfires_dict.items(): + wildfires_dict[camera_key] = [wf for wf in wildfires_item if wf["id"] != to_acknowledge] + for wildfires_item in wildfires_dict.values(): + wildfires_list.extend(wildfires_item) + + return create_wildfire_list_from_df(pd.DataFrame(wildfires_list)) # Select the wildfire id @@ -198,7 +206,7 @@ def select_wildfire_with_button( State("store_wildfires_data", "data"), prevent_initial_call=True, ) -def update_display_data(wildfire_id_on_display, local_detections, local_wildfires): +def update_display_data(wildfire_id_on_display, local_detections, store_wildfires_data): """ Updates the display data based on the currently selected wildfire ID. @@ -210,7 +218,8 @@ def update_display_data(wildfire_id_on_display, local_detections, local_wildfire - json: JSON formatted data for the selected wildfire. """ local_detections, data_detections_loaded = read_stored_DataFrame(local_detections) - local_wildfires, data_wildfires_loaded = read_stored_DataFrame(local_wildfires) + wildfires_dict = json.loads(store_wildfires_data)["data"] + data_wildfires_loaded = json.loads(store_wildfires_data)["data_loaded"] if not data_detections_loaded or not data_wildfires_loaded: raise PreventUpdate @@ -223,8 +232,10 @@ def update_display_data(wildfire_id_on_display, local_detections, local_wildfire } ) else: - - detection_ids = local_wildfires[local_wildfires["id"] == wildfire_id_on_display]["detection_ids"].values[0] + for wildfires_item in wildfires_dict.values(): + for wildfire in wildfires_item: + if wildfire["id"] == wildfire_id_on_display: + detection_ids = wildfire["detection_ids"] detection_on_display = local_detections[local_detections["id"].isin(detection_ids)] return json.dumps({"data": detection_on_display.to_json(orient="split"), "data_loaded": True}) @@ -373,7 +384,9 @@ def toggle_auto_move(n_clicks, data): ], prevent_initial_call=True, ) -def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, wildfire_id_on_display, local_wildfires): +def auto_move_slider( + n_intervals, current_value, max_value, auto_move_clicks, wildfire_id_on_display, store_wildfires_data +): """ Automatically moves the image slider based on a regular interval and the current auto-move state. @@ -387,9 +400,15 @@ def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, wi Returns: - int: Updated value for the image slider. """ - local_wildfires, data_loaded = read_stored_DataFrame(local_wildfires) + json_wildfire = json.loads(store_wildfires_data) + wildfires_dict = json_wildfire["data"] + data_loaded = json_wildfire["data_loaded"] + if data_loaded and wildfire_id_on_display != 0: - detection_ids_list = local_wildfires[local_wildfires["id"] == wildfire_id_on_display]["detection_ids"].values[0] + for wildfires_item in wildfires_dict.values(): + for wildfire in wildfires_item: + if wildfire["id"] == wildfire_id_on_display: + detection_ids_list = wildfire["detection_ids"] else: detection_ids_list = [] @@ -405,7 +424,7 @@ def auto_move_slider(n_intervals, current_value, max_value, auto_move_clicks, wi [State("wildfire_id_on_display", "data"), State("store_wildfires_data", "data"), State("media_url", "data")], prevent_initial_call=True, ) -def update_download_link(slider_value, wildfire_id, wildfires, media_url): +def update_download_link(slider_value, wildfire_id_on_display, store_wildfires_data, media_url): """ Updates the download link for the currently displayed image. @@ -417,8 +436,14 @@ def update_download_link(slider_value, wildfire_id, wildfires, media_url): Returns: - str: URL for downloading the current image. """ - wildfires_df, data_loaded = read_stored_DataFrame(wildfires) - detection_ids_list = wildfires_df.loc[wildfires_df["id"] == wildfire_id, "detection_ids"].tolist()[0] + json_wildfire = json.loads(store_wildfires_data) + wildfires_dict = json_wildfire["data"] + data_loaded = json_wildfire["data_loaded"] + for wildfires_item in wildfires_dict.values(): + for wildfire in wildfires_item: + if wildfire["id"] == wildfire_id_on_display: + detection_ids_list = wildfire["detection_ids"] + if data_loaded and len(detection_ids_list): try: detection_id = detection_ids_list[slider_value] @@ -448,7 +473,7 @@ def update_download_link(slider_value, wildfire_id, wildfires, media_url): [State("store_wildfires_data", "data"), State("wildfire_id_on_display", "data")], prevent_initial_call=True, ) -def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_display): +def update_map_and_alert_info(detection_data, store_wildfires_data, wildfire_id_on_display): """ Updates the map's vision polygons, center, and alert information based on the current alert data. ) @@ -471,8 +496,10 @@ def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_di raise PreventUpdate if not detection_data.empty: - local_wildfires, wildfire_data_loaded = read_stored_DataFrame(local_wildfires) - if not wildfire_data_loaded: + json_wildfire = json.loads(store_wildfires_data) + wildfires_dict = json_wildfire["data"] + data_loaded = json_wildfire["data_loaded"] + if not data_loaded: raise PreventUpdate # Convert the 'bboxes' column to a list (empty lists if the original value was '[]'). @@ -497,9 +524,11 @@ def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_di bboxes=row_with_bboxes["processed_loc"], ) - date_val, cam_name = local_wildfires[local_wildfires["id"] == wildfire_id_on_display][ - ["created_at", "camera_name"] - ].values[0] + for wildfires_item in wildfires_dict.values(): + for wildfire in wildfires_item: + if wildfire["id"] == wildfire_id_on_display: + date_val = wildfire["created_at"] + cam_name = wildfire["camera_name"] camera_info = f"Camera: {cam_name}" location_info = f"Localisation: {row_with_bboxes['lat']:.4f}, {row_with_bboxes['lon']:.4f}" @@ -543,7 +572,7 @@ def update_map_and_alert_info(detection_data, local_wildfires, wildfire_id_on_di ], prevent_initial_call=True, ) -def acknowledge_wildfire(n_clicks, local_wildfires, wildfire_id_on_display, client_token): +def acknowledge_wildfire(n_clicks, store_wildfires_data, wildfire_id_on_display, client_token): """ Acknowledges the selected wildfire and updates the state to reflect this. @@ -557,10 +586,14 @@ def acknowledge_wildfire(n_clicks, local_wildfires, wildfire_id_on_display, clie """ if wildfire_id_on_display == 0 or n_clicks == 0: raise PreventUpdate - local_wildfires, _ = read_stored_DataFrame(local_wildfires) + json_wildfire = json.loads(store_wildfires_data) + wildfires_dict = json_wildfire["data"] wildfire_id = int(wildfire_id_on_display) - detection_ids = local_wildfires[local_wildfires["id"] == wildfire_id][["detection_ids"]].values - for detection_id in detection_ids: + for wildfires_item in wildfires_dict.values(): + for wildfire in wildfires_item: + if wildfire["id"] == wildfire_id: + detection_ids_list = wildfire["detection_ids"] + for detection_id in detection_ids_list: Client(client_token, cfg.API_URL).label_detection(detection_id=detection_id, is_wildfire=False) return wildfire_id_on_display diff --git a/app/layouts/main_layout.py b/app/layouts/main_layout.py index ba6b26c..7560f5c 100644 --- a/app/layouts/main_layout.py +++ b/app/layouts/main_layout.py @@ -36,7 +36,7 @@ def get_main_layout(): storage_type="session", data=json.dumps( { - "data": pd.DataFrame().to_json(orient="split"), + "data": {}, "data_loaded": False, } ), @@ -101,5 +101,6 @@ def get_main_layout(): # [TEMPORARY FIX] Storing the user's credentials to refresh the token when needed dcc.Store(id="to_acknowledge", data=0), dcc.Store(id="trigger_no_wildfires", data=False), + dcc.Store(id="previous_time_event", data=None), ] ) diff --git a/app/utils/display.py b/app/utils/display.py index 88b9bdb..a3295f5 100644 --- a/app/utils/display.py +++ b/app/utils/display.py @@ -186,7 +186,7 @@ def create_wildfire_list_from_df(wildfires): f"{wildfire['camera_name']}", style={"fontWeight": "bold"}, ), - html.Div(wildfire["created_at"].strftime("%Y-%m-%d %H:%M")), + html.Div(wildfire["created_at"]), ], n_clicks=0, style={ From a72c38461748b93fd815dd44be7753e3c9431532 Mon Sep 17 00:00:00 2001 From: Ronan Date: Tue, 17 Sep 2024 17:46:58 +0200 Subject: [PATCH 11/14] update api-client --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ee547de..1e70613 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dash = ">=2.14.0" dash-bootstrap-components = ">=1.5.0" dash-leaflet = "^0.1.4" pandas = ">=2.1.4" -pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "5da3d23d38cb78a4a4e15cf2f9f83bf2da7cdaee", subdirectory = "client" } +pyroclient = { git = "https://github.com/pyronear/pyro-api.git", rev = "a46f5a00869049ffd1a8bb920ac685e44f18deb5", subdirectory = "client" } python-dotenv = ">=1.0.0" geopy = ">=2.4.0" From 16f011e415ebd9b91a9ae1b725b48539432d18be Mon Sep 17 00:00:00 2001 From: Ronan Date: Wed, 18 Sep 2024 10:40:33 +0200 Subject: [PATCH 12/14] fix precommit hook error --- .github/workflows/build.yml | 2 +- .github/workflows/style.yml | 2 +- Makefile | 2 +- README.md | 2 +- app/assets/css/style.css | 4 ++-- app/services/api.py | 1 - docker-compose.yml | 2 +- traefik.toml | 2 +- 8 files changed, 8 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ff6cead..4cd7fc2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -64,6 +64,6 @@ jobs: API_PWD: ${{ secrets.API_PWD }} run: | docker network create web - docker-compose up -d --build + docker compose up -d --build - name: Check docker sanity run: sleep 200 && docker-compose logs && curl http://platform.localhost:8050/ diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 8d8ad6b..8ebab5f 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -78,7 +78,7 @@ jobs: black --version black --check --diff . - bandit: + bandit: runs-on: ${{ matrix.os }} strategy: matrix: diff --git a/Makefile b/Makefile index d71f6fb..d595b8a 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ style: build: poetry export -f requirements.txt --without-hashes --output requirements.txt docker build . -t pyronear/pyro-platform:latest - + # Run the docker for production run: poetry export -f requirements.txt --without-hashes --output requirements.txt diff --git a/README.md b/README.md index 9e15a78..d25d1e8 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ make stop ``` If you need to launch the pyro-api in your development environment you can use the pyro-devops project. -You can use it in two different ways : +You can use it in two different ways : => by building the pyro-platform image and launch the full development environment with the command : ```shell make run diff --git a/app/assets/css/style.css b/app/assets/css/style.css index 69efe08..dae0e37 100644 --- a/app/assets/css/style.css +++ b/app/assets/css/style.css @@ -35,13 +35,13 @@ a.no-underline { /* Common style for containers and panels */ .common-style { border: 2px solid #044448; - border-radius: 10px; + border-radius: 10px; background-color: rgba(4, 68, 72, 0.1); } .common-style-slider { border: 2px solid #044448; - border-radius: 10px; + border-radius: 10px; background-color: rgba(4, 68, 72, 0.1); margin-top: 10px; } diff --git a/app/services/api.py b/app/services/api.py index a15686b..490f8e7 100644 --- a/app/services/api.py +++ b/app/services/api.py @@ -15,7 +15,6 @@ def instantiate_token(login: Optional[str] = None, passwrd: Optional[str] = None): - if not cfg.LOGIN: if any(not isinstance(val, str) for val in [cfg.API_URL, cfg.API_LOGIN, cfg.API_PWD]): raise ValueError("The following environment variables need to be set: 'API_URL', 'API_LOGIN', 'API_PWD'") diff --git a/docker-compose.yml b/docker-compose.yml index fd6e4c4..8f8f1b9 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -46,4 +46,4 @@ services: networks: web: - external: true \ No newline at end of file + external: true diff --git a/traefik.toml b/traefik.toml index 30232c4..d17c4e7 100644 --- a/traefik.toml +++ b/traefik.toml @@ -25,4 +25,4 @@ email = "contact@pyronear.org" storage = "acme.json" caServer = "https://acme-v02.api.letsencrypt.org/directory" - [certificatesResolvers.default.acme.tlsChallenge] \ No newline at end of file + [certificatesResolvers.default.acme.tlsChallenge] From 2101446762986a037e999c1d194f8be2993631b2 Mon Sep 17 00:00:00 2001 From: Ronan Date: Wed, 18 Sep 2024 10:53:46 +0200 Subject: [PATCH 13/14] fix gitAction error --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4cd7fc2..9281e4e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -66,4 +66,4 @@ jobs: docker network create web docker compose up -d --build - name: Check docker sanity - run: sleep 200 && docker-compose logs && curl http://platform.localhost:8050/ + run: sleep 200 && docker compose logs && curl http://platform.localhost:8050/ From 21816e709134d9589ce9a3dfe027f6104331f323 Mon Sep 17 00:00:00 2001 From: Ronan Date: Mon, 4 Nov 2024 11:14:13 +0100 Subject: [PATCH 14/14] feat: rewrite README --- README.md | 53 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 36 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index d25d1e8..39978c6 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,36 @@ The building blocks of our wildfire detection & monitoring API. ## Quick Tour +### Pyro API +1. Hosted by pyronear +You can use apidev.pyronear.org/docs if you don't need to modify the API for your task + +What to do : +=> you need to ask an administrator to create a user for you. Better to have an admin user in order to be able to create cameras & detections with it. +=> check that the version of the dev API will be fitter to your need. +=> modify the API_URL var env in your .env + +2. Locally +You can use the pyro-devops project in two different ways : +=> by building the pyro-platform image and launch the full development environment with the command : +```shell +make run +``` +=> by launching the development environment without the platform : +```shell +make run-engine +``` +adding this line in your /etc/hosts : +``` +127.0.0.1 www.localstack.com localstack +``` +after that you can set up the .env of the pyro-platform project according to the values contained in the .env of the pyro-devops project +And launch your project according to the section below "Directly in python" + -### Running/stopping the service +### Running/stopping the service +1. Dockerized You can run the app container using this command for dev purposes: ```shell @@ -27,23 +54,15 @@ In order to stop the service, run: make stop ``` -If you need to launch the pyro-api in your development environment you can use the pyro-devops project. -You can use it in two different ways : -=> by building the pyro-platform image and launch the full development environment with the command : -```shell -make run -``` -=> by launching the development environment without the platform : +This dockerized setup won't work with an API launch thanks to the pyro-devops projet + +2. Directly in python +Set up your .env + ```shell -make run-engine -``` -adding this line in your /etc/hosts : -``` -127.0.0.1 www.localstack.com localstack -``` -and launching your project locally : -``` -python3 app/index.py +pip install -r requirements.txt +pip install --no-cache-dir git+https://github.com/pyronear/pyro-api.git@ce7bf66d1624fcb615daee567dfa77d7d5bca487#subdirectory=client +python app/index.py --host 0.0.0.0 --port 8050 ``` ## Installation