From 2ecda7409615b7625d94db2ea73bbf74e8099c88 Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Sun, 1 Dec 2024 23:03:01 -0800 Subject: [PATCH 01/13] Moved process creation to 'spawn' and added some extra cleanup steps for a clean teardown of the backend --- backend/main.py | 133 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 107 insertions(+), 26 deletions(-) diff --git a/backend/main.py b/backend/main.py index 2d56067..10de91c 100644 --- a/backend/main.py +++ b/backend/main.py @@ -4,17 +4,23 @@ Starts the flask application on the configured port (default 8081) Look in api.routes for the actual api code """ +import sys +import time +import signal +import logging +import logging.handlers as handlers -from multiprocessing import Process, Queue -from microlab.core import startMicrolabProcess -from api.core import run_flask -import config import multiprocessing_logging -import logging -import logging.handlers as handlers +import config + +from multiprocessing import Process, Queue, set_start_method + +from api.core import run_flask +from microlab.core import startMicrolabProcess from util.logFormatter import MultiLineFormatter -import sys + +LOGGER = logging.getLogger(__name__) def setupLogging(): @@ -37,25 +43,100 @@ def setupLogging(): multiprocessing_logging.install_mp_handler() -if __name__ == "__main__": - config.initialSetup() - setupLogging() +class BackendManager: - logging.info("### STARTING MAIN MICROLAB SERVICE ###") + def __init__(self): + self._q1 = Queue() + self._q2 = Queue() - q1 = Queue() - q2 = Queue() + self._processes = [] + self._queues = [] - microlabProcess = Process( - target=startMicrolabProcess, args=(q1, q2), name="microlab" - ) - microlabProcess.start() - flaskProcess = Process(target=run_flask, args=(q2, q1), name="flask") - flaskProcess.start() - - microlabProcess.join() - flaskProcess.join() - q1.close() - q2.close() - q1.join_thread() - q2.join_thread() + def _are_processes_alive(self) -> bool: + return any([process.is_alive() for process in self._processes]) + + def _cleanup_queues(self): + LOGGER.debug('Cleaning up queues') + + self._q1.close() + self._q2.close() + + self._q1.join_thread() + self._q2.join_thread() + + LOGGER.debug('Completed cleanup up queues') + + def _cleanup_processes(self): + LOGGER.debug('Cleaning up processes') + + while self._are_processes_alive(): + for proc in self._processes: + try: + self._q1.get_nowait() + except Exception: + pass + + try: + self._q2.get_nowait() + except Exception: + pass + + if proc.is_alive(): + LOGGER.debug(f'Attempting to join proc: {proc.pid}') + proc.join(timeout=1) + + LOGGER.debug('Completed cleaning up processes') + + def _cleanup_everything(self): + self._cleanup_processes() + self._cleanup_queues() + + def _handle_exit_signals(self, signum, frame): + LOGGER.debug('Beginning to handle exit signals in BackendManager') + self._cleanup_everything() + LOGGER.debug('Completed handling exit signals in BackendManager') + + def _start_microlab(self): + self._microlab_manager_process = Process( + target=startMicrolabProcess, args=(self._q1, self._q2), name="microlab" + ) + + self._processes.append(self._microlab_manager_process) + + LOGGER.debug('Starting the microlab process') + self._microlab_manager_process.start() + LOGGER.debug(f'microlab process pid: {self._microlab_manager_process.pid}') + + def _start_server(self): + self._flaskProcess = Process(target=run_flask, args=(self._q2, self._q1), name="flask", daemon=True) + self._processes.append(self._flaskProcess) + LOGGER.debug('Starting the server process') + self._flaskProcess.start() + print(f'server process pid: {self._flaskProcess.pid}') + + def run(self): + config.initialSetup() + + LOGGER.info("### STARTING MAIN MICROLAB SERVICE ###") + + self._start_microlab() + self._start_server() + + signal.signal(signal.SIGINT, self._handle_exit_signals) + signal.signal(signal.SIGTERM, self._handle_exit_signals) + + while self._are_processes_alive(): + time.sleep(0.1) + + LOGGER.debug("### ENDING MICROLAB SERVICE EXECUTION ###") + + +def main(): + backend_manager = BackendManager() + backend_manager.run() + + +if __name__ == "__main__": + set_start_method('spawn') + setupLogging() + main() From 90f91c6559a615c0e20c278642c793c884edac53 Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Sun, 15 Dec 2024 19:33:59 -0800 Subject: [PATCH 02/13] Added a new multiprocess logger --- backend/util/logger.py | 107 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 backend/util/logger.py diff --git a/backend/util/logger.py b/backend/util/logger.py new file mode 100644 index 0000000..8705b53 --- /dev/null +++ b/backend/util/logger.py @@ -0,0 +1,107 @@ +import logging +import logging.handlers +import queue +import sys +import traceback + +import config + +from typing import Union +from multiprocessing import Queue +from util.logFormatter import MultiLineFormatter + + +class MultiprocessingLogger: + + _logging_queue = None + _configured_loggers = {} + + _processing_logger = None + + @classmethod + def initialize_logger(cls, logging_queue: Union[Queue, None] = None) -> None: + """ Initialize the logger. + + Must be called in the base process that start the other processes without any argument. + In other process it must be called with a dedicated logging queue.""" + if logging_queue is None: + logging_queue = Queue() + + cls._logging_queue = logging_queue + + @classmethod + def get_logging_queue(cls) -> Union[Queue, None]: + return cls._logging_queue + + @classmethod + def get_logger(cls, logger_name: str) -> logging.Logger: + # We only need to configure once, multiple calls to logging.getLogger() + # will return the same configured instance + if logger_name in cls._configured_loggers: + return logging.getLogger(logger_name) + + logger = logging.getLogger(logger_name) + logger.addHandler(logging.handlers.QueueHandler(cls._logging_queue)) + logger.setLevel(config.microlabConfig.logLevel) + + cls._configured_loggers[logger_name] = True + + return logger + + @classmethod + def cleanup_logging(cls): + # Without the retries check added here I would occasionally see a hang here + # on cleanup + retries = 0 + while not cls._logging_queue.empty() and retries < 5: + cls.process_logs() + retries += 1 + + @classmethod + def _get_processing_logger(cls, logger_name: str) -> logging.Logger: + if logger_name in cls._configured_loggers: + return logging.getLogger(logger_name) + + formatter = MultiLineFormatter(fmt="%(asctime)s %(name)-10s [%(levelname)s]: %(message)s") + + log_handlers = [] + + rotating_file_handler = logging.handlers.RotatingFileHandler( + "{0}/microlab.log".format(config.microlabConfig.logDirectory), + maxBytes=config.microlabConfig.logFileMaxBytes, + backupCount=config.microlabConfig.logFileBackupCount, + ) + rotating_file_handler.setFormatter(formatter) + log_handlers.append(rotating_file_handler) + + if config.microlabConfig.logToStderr: + stderr_logger = logging.StreamHandler(sys.stderr) + stderr_logger.setFormatter(formatter) + log_handlers.append(stderr_logger) + + logger = logging.getLogger(logger_name) + logger.setLevel(config.microlabConfig.logLevel) + for handler in log_handlers: + logger.addHandler(handler) + + cls._configured_loggers[logger_name] = True + + return logger + + @classmethod + def process_logs(cls): + # print('processing logs') + try: + record = cls._logging_queue.get_nowait() + logger = cls._get_processing_logger(record.name) + logger.handle(record) + except queue.Empty: + return + except ValueError as e: + print(f'Value error: {e}') + return + except Exception as e: + sys.stderr.write(f'Encountered exception: {e} while attempting to process logs. Traceback:\n') + sys.stderr.write(traceback.format_exc()) + + From 62db71cee1e67b4350ba4bc4257f33d47fb0f029 Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Sun, 15 Dec 2024 19:51:12 -0800 Subject: [PATCH 03/13] Cut over to the new multiprocessing logger --- backend/api/core.py | 14 ++-- backend/api/server.py | 22 ++++-- backend/hardware/core.py | 23 ++++-- backend/hardware/devicelist.py | 9 ++- backend/hardware/gpiochip/gpiod.py | 6 +- backend/hardware/gpiochip/gpiod_chipset.py | 8 +- backend/hardware/gpiochip/gpiod_simulation.py | 6 +- .../reagentdispenser/peristalticpump.py | 9 ++- .../hardware/reagentdispenser/simulation.py | 18 ++--- .../hardware/reagentdispenser/syringepump.py | 8 +- .../basictempcontroller.py | 16 ++-- .../temperaturecontroller/simulation.py | 25 ++++--- backend/hardware/thermometer/serial.py | 22 +++--- backend/main.py | 75 ++++++++----------- backend/microlab/core.py | 17 +++-- backend/microlab/interface.py | 12 ++- backend/recipes/base.py | 21 +++--- backend/recipes/core.py | 8 +- backend/recipes/tasks.py | 42 +++++++---- backend/requirements.txt | 4 +- 20 files changed, 206 insertions(+), 159 deletions(-) diff --git a/backend/api/core.py b/backend/api/core.py index b5d0fa4..0aed2bc 100644 --- a/backend/api/core.py +++ b/backend/api/core.py @@ -8,14 +8,18 @@ from api.server import WaitressAPIServer from api.app import FlaskApp from microlab.interface import MicrolabInterface +from util.logger import MultiprocessingLogger -def run_flask(in_queue, out_queue): +def run_flask(in_queue, out_queue, logging_queue): - logging.info("### STARTING API ###") - werkzeugLogger = logging.getLogger("werkzeug") - # suppresses logging of individual requests to endpoints. Prevents log spam - werkzeugLogger.setLevel(logging.WARNING) + # The initialize_logger call only needs to happen once when a new process is started. + # Logs from this point on will just require a call to MultiprocessingLogger.get_logger() + # within the same process. + MultiprocessingLogger.initialize_logger(logging_queue) + + logger = MultiprocessingLogger.get_logger(__name__) + logger.info("### STARTING API ###") microlab_interface = MicrolabInterface(in_queue, out_queue) diff --git a/backend/api/server.py b/backend/api/server.py index cad798d..6e444f8 100644 --- a/backend/api/server.py +++ b/backend/api/server.py @@ -7,9 +7,7 @@ from config import microlabConfig as config from microlab.interface import MicrolabInterface - - -LOGGER = logging.getLogger(__name__) +from util.logger import MultiprocessingLogger class WaitressAPIServer: @@ -17,17 +15,26 @@ class WaitressAPIServer: _server = None _microlab_interface = None + _logger = None + def __init__(self, app: Flask): self._app = app signal.signal(signal.SIGINT, self._shutdown_signal_handler) signal.signal(signal.SIGTERM, self._shutdown_signal_handler) + if self._logger is None: + self._logger = self._get_logger() + + @classmethod + def _get_logger(cls) -> logging.Logger: + return MultiprocessingLogger.get_logger(__name__) + @classmethod def set_microlab_interface(cls, microlab_interface: MicrolabInterface): cls._microlab_interface = microlab_interface def run(self): - LOGGER.info('Starting backend waitress server') + self._logger.info('Starting backend waitress server') self._get_server(self._app).run() def _shutdown_signal_handler(self, signum, frame): @@ -42,10 +49,13 @@ def _get_server(cls, app: Flask): @classmethod def shutdown(cls): + if cls._logger is None: + cls._logger = cls._get_logger() + if cls._server: - LOGGER.debug('Shutting down waitress server') + cls._logger.debug('Shutting down waitress server') cls._server.close() - LOGGER.debug('Completed shut down of waitress server') + cls._logger.debug('Completed shut down of waitress server') cls._microlab_interface.close_to_microlab_queue() diff --git a/backend/hardware/core.py b/backend/hardware/core.py index 9f84fa0..425401f 100644 --- a/backend/hardware/core.py +++ b/backend/hardware/core.py @@ -7,11 +7,12 @@ file. """ +import logging import time import config from hardware import devicelist from enum import Enum -import logging +from util.logger import MultiprocessingLogger from hardware.devicelist import loadHardwareConfiguration @@ -25,11 +26,13 @@ class MicroLabHardwareState(Enum): class MicroLabHardware: _microlabHardware = None + _logger = None def __init__(self, deviceDefinition: list[dict]): """ Constructor. Initializes the hardware. """ + self.startTime = None self.devices = {} self.state = MicroLabHardwareState.STARTING @@ -38,12 +41,22 @@ def __init__(self, deviceDefinition: list[dict]): self.startTime = time.monotonic() self.loadHardware(deviceDefinition) + if self._logger is None: + self._logger = self._get_logger() + + @classmethod + def _get_logger(cls) -> logging.Logger: + return MultiprocessingLogger.get_logger(__name__) + @classmethod def get_microlab_hardware_controller(cls): + if cls._logger is None: + cls._logger = cls._get_logger() + if not cls._microlabHardware: - logging.info("") - logging.info("### STARTING MICROLAB HARDWARE CONTROLLER ###") - logging.info("Loading microlab hardware configuration.") + cls._logger.info("") + cls._logger.info("### STARTING MICROLAB HARDWARE CONTROLLER ###") + cls._logger.info("Loading microlab hardware configuration.") hardwareConfig = loadHardwareConfiguration() deviceDefinitions = hardwareConfig['devices'] @@ -67,7 +80,7 @@ def loadHardware(self, deviceDefinition: list[dict]): self.state = MicroLabHardwareState.INITIALIZED return True, '' except Exception as e: - logging.exception(str(e)) + self._logger.exception(str(e)) self.state = MicroLabHardwareState.FAILED_TO_START self.error = e return False, str(e) diff --git a/backend/hardware/devicelist.py b/backend/hardware/devicelist.py index 1c04e68..ad0cf72 100644 --- a/backend/hardware/devicelist.py +++ b/backend/hardware/devicelist.py @@ -6,9 +6,9 @@ from config import microlabConfig as config import yaml from os.path import exists -import logging from functools import cmp_to_key from copy import copy +from util.logger import MultiprocessingLogger def sort_device_configs(deviceConfigs: list[dict]): @@ -47,12 +47,13 @@ def loadHardwareConfiguration() -> dict: def setupDevices(deviceDefinitions: list[dict]): validateConfiguration(deviceDefinitions) + logger = MultiprocessingLogger.get_logger(__name__) devices = {} for device in deviceDefinitions: - logging.info('Loading device "{0}".'.format(device['id'])) - logging.debug('{0} configuration: {1}'.format(device['id'], device)) + logger.info('Loading device "{0}".'.format(device['id'])) + logger.debug('{0} configuration: {1}'.format(device['id'], device)) deviceType = device["type"] deviceID = device['id'] if deviceType == "tempController": @@ -67,7 +68,7 @@ def setupDevices(deviceDefinitions: list[dict]): devices[deviceID] = gpiochip.createGPIOChip(device, devices) else: raise Exception("Unsupported device type '{0}'".format(deviceType)) - logging.info('"{0}" loaded successfully.'.format(device['id'])) + logger.info('"{0}" loaded successfully.'.format(device['id'])) return devices diff --git a/backend/hardware/gpiochip/gpiod.py b/backend/hardware/gpiochip/gpiod.py index 95e27d1..e51311a 100644 --- a/backend/hardware/gpiochip/gpiod.py +++ b/backend/hardware/gpiochip/gpiod.py @@ -1,6 +1,6 @@ from hardware.gpiochip.base import GPIOChip, LINE_REQ_DIR_OUT import gpiod -import logging +from util.logger import MultiprocessingLogger class GPIODChip(GPIOChip): @@ -15,6 +15,8 @@ def __init__(self, gpio_config: dict): dictionary mapping strings to line numbers for adding human readable names to GPIO lines """ + self._logger = MultiprocessingLogger.get_logger(__name__) + self.output_offsets = [] self.output_values = [] self.output_lines = [] @@ -24,7 +26,7 @@ def __init__(self, gpio_config: dict): if 'lineAliases' in gpio_config: for alias, line in gpio_config['lineAliases'].items(): self.lineAliases[alias] = line - logging.debug(self.lineAliases) + self._logger.debug(self.lineAliases) def __output(self): """ diff --git a/backend/hardware/gpiochip/gpiod_chipset.py b/backend/hardware/gpiochip/gpiod_chipset.py index f28db46..843c06e 100644 --- a/backend/hardware/gpiochip/gpiod_chipset.py +++ b/backend/hardware/gpiochip/gpiod_chipset.py @@ -1,5 +1,5 @@ from hardware.gpiochip.base import GPIOChip, LINE_REQ_DIR_OUT -import logging +from util.logger import MultiprocessingLogger class GPIODChipset(GPIOChip): @@ -14,6 +14,8 @@ def __init__(self, gpio_config: dict, devices: dict): dictionary mapping strings to line numbers for adding human readable names to GPIO lines """ + self._logger = MultiprocessingLogger.get_logger(__name__) + self.chips = { "defaultChip": devices[gpio_config["defaultChipID"]], } @@ -25,10 +27,10 @@ def __init__(self, gpio_config: dict, devices: dict): for chipID, chip in self.chips.items(): for alias, line in chip.lineAliases.items(): if alias in self.lineAliases: - logging.warning("GPIO line alias '{0}' has a conflict between chips {1} and {2}. Using {2}.".format(alias, chipID, self.lineAliases[alias])) + self._logger.warning("GPIO line alias '{0}' has a conflict between chips {1} and {2}. Using {2}.".format(alias, chipID, self.lineAliases[alias])) continue self.lineAliases[alias] = chipID - logging.debug(self.lineAliases) + self._logger.debug(self.lineAliases) def setup(self, pin, pinType=LINE_REQ_DIR_OUT, outputValue=0): """ diff --git a/backend/hardware/gpiochip/gpiod_simulation.py b/backend/hardware/gpiochip/gpiod_simulation.py index 80dcf8b..84c64d7 100644 --- a/backend/hardware/gpiochip/gpiod_simulation.py +++ b/backend/hardware/gpiochip/gpiod_simulation.py @@ -1,5 +1,5 @@ from hardware.gpiochip.base import LINE_REQ_DIR_OUT -import logging +from util.logger import MultiprocessingLogger class GPIODChipSimulation(): @@ -14,6 +14,8 @@ def __init__(self, gpio_config: dict): dictionary mapping strings to line numbers for adding human readable names to GPIO lines """ + self._logger = MultiprocessingLogger.get_logger(__name__) + self.output_offsets = [] self.output_values = [] self.output_lines = [] @@ -22,7 +24,7 @@ def __init__(self, gpio_config: dict): if 'lineAliases' in gpio_config: for alias, line in gpio_config['lineAliases'].items(): self.lineAliases[alias] = line - logging.debug(self.lineAliases) + self._logger.debug(self.lineAliases) def __output(self): """ diff --git a/backend/hardware/reagentdispenser/peristalticpump.py b/backend/hardware/reagentdispenser/peristalticpump.py index 5c48171..32574fb 100644 --- a/backend/hardware/reagentdispenser/peristalticpump.py +++ b/backend/hardware/reagentdispenser/peristalticpump.py @@ -1,7 +1,6 @@ import serial from hardware.reagentdispenser.base import ReagentDispenser -import logging - +from util.logger import MultiprocessingLogger class PeristalticPump(ReagentDispenser): def __init__(self, reagent_dispenser_config: dict): @@ -21,6 +20,8 @@ def __init__(self, reagent_dispenser_config: dict): Z mmPerml Arbitrary scaling factor """ + self._logger = MultiprocessingLogger.get_logger(__name__) + self.peristalticPumpsConfig = reagent_dispenser_config["peristalticPumpsConfig"] self.grblSer = serial.Serial(reagent_dispenser_config["arduinoPort"], 115200, timeout=1) self.grblWrite(self.grblSer, "G91") @@ -44,11 +45,11 @@ def dispense(self, pumpId, volume, duration=None): if duration: dispenseSpeed = min((volume / duration) * 60 * mmPerml, dispenseSpeed) command = "G91 G1 {0}{1} F{2}\n".format(pumpId, totalmm, dispenseSpeed) - logging.debug("Dispensing with command '{}'".format(command)) + self._logger.debug("Dispensing with command '{}'".format(command)) self.grblWrite(self.grblSer, command) dispenseTime = abs(totalmm) / (dispenseSpeed / 60) - logging.info( + self._logger.info( "Dispensing {}ml with motor speed of {}mm/min over {} seconds".format( volume, dispenseSpeed, dispenseTime ) diff --git a/backend/hardware/reagentdispenser/simulation.py b/backend/hardware/reagentdispenser/simulation.py index 9a64252..7e5d168 100644 --- a/backend/hardware/reagentdispenser/simulation.py +++ b/backend/hardware/reagentdispenser/simulation.py @@ -1,14 +1,11 @@ -import logging - from hardware.reagentdispenser.base import ReagentDispenser - - -def log(message): - logging.info('reagentdispenser.simulation - {0}'.format(message)) +from util.logger import MultiprocessingLogger class SimulatedReagentDispenser(ReagentDispenser): def __init__(self, reagent_dispenser_config: dict): + self._logger = MultiprocessingLogger.get_logger(__name__) + self.minSpeed = 0.1 self.maxSpeed = 10 if 'minSpeed' in reagent_dispenser_config: @@ -16,6 +13,9 @@ def __init__(self, reagent_dispenser_config: dict): if 'maxSpeed' in reagent_dispenser_config: self.maxSpeed = reagent_dispenser_config['maxSpeed'] + def log(self, message): + self._logger.info('reagentdispenser.simulation - {0}'.format(message)) + def dispense(self, pumpId, volume, duration=None): """ Displays pump dispensing message. @@ -28,11 +28,11 @@ def dispense(self, pumpId, volume, duration=None): None """ if pumpId == 'X': - log('Dispensing {0}ml from pump X'.format(volume)) + self.log('Dispensing {0}ml from pump X'.format(volume)) elif pumpId == 'Y': - log('Dispensing {0}ml from pump Y'.format(volume)) + self.log('Dispensing {0}ml from pump Y'.format(volume)) elif pumpId == 'Z': - log('Dispensing {0}ml from pump Z'.format(volume)) + self.log('Dispensing {0}ml from pump Z'.format(volume)) else: raise ValueError("Pump '{0}' does not exist.".format(pumpId)) return abs(volume) diff --git a/backend/hardware/reagentdispenser/syringepump.py b/backend/hardware/reagentdispenser/syringepump.py index 8029b43..56405b5 100644 --- a/backend/hardware/reagentdispenser/syringepump.py +++ b/backend/hardware/reagentdispenser/syringepump.py @@ -1,7 +1,7 @@ import serial from hardware.reagentdispenser.base import ReagentDispenser -import logging import math +from util.logger import MultiprocessingLogger class SyringePump(ReagentDispenser): @@ -46,6 +46,8 @@ def __init__(self, reagent_dispenser_config: dict): mmPerml maxmmPerMin """ + self._logger = MultiprocessingLogger.get_logger(__name__) + self.syringePumpsConfig = reagent_dispenser_config["syringePumpsConfig"] self.grblSer = serial.Serial(reagent_dispenser_config["arduinoPort"], 115200, timeout=1) self.axisMinmmPerMin = {} @@ -86,11 +88,11 @@ def dispense(self, pumpId, volume, duration=None): dispenseSpeed = min((volume / duration) * 60 * mmPerml, dispenseSpeed) totalmm = volume * mmPerml command = "G91 G1 {0}{1} F{2}\n".format(pumpId, totalmm, dispenseSpeed) - logging.debug("Dispensing with command '{}'".format(command)) + self._logger.debug("Dispensing with command '{}'".format(command)) self.grblWrite(self.grblSer, command) dispenseTime = abs(totalmm) / (dispenseSpeed / 60) - logging.info( + self._logger.info( "Dispensing {}ml with motor speed of {}mm/min over {} seconds".format( volume, dispenseSpeed, dispenseTime ) diff --git a/backend/hardware/temperaturecontroller/basictempcontroller.py b/backend/hardware/temperaturecontroller/basictempcontroller.py index 5f0652c..6c1b6c6 100644 --- a/backend/hardware/temperaturecontroller/basictempcontroller.py +++ b/backend/hardware/temperaturecontroller/basictempcontroller.py @@ -1,5 +1,5 @@ from hardware.temperaturecontroller.base import TempController -import logging +from util.logger import MultiprocessingLogger RELAY_ON = 1 @@ -26,6 +26,8 @@ def __init__(self, temp_controller_config: dict, devices: dict): Minimum temperature the hardware will support """ super().__init__(temp_controller_config, devices) + self._logger = MultiprocessingLogger.get_logger(__name__) + self.gpio = devices[temp_controller_config["gpioID"]] self.heaterPin = temp_controller_config["heaterPin"] self.heaterPumpPin = temp_controller_config["heaterPumpPin"] @@ -50,7 +52,7 @@ def turnHeaterOn(self): :return: None """ - logging.debug("heater turned on") + self._logger.debug("heater turned on") self.gpio.output(self.heaterPin, RELAY_ON) def turnHeaterOff(self): @@ -60,15 +62,15 @@ def turnHeaterOff(self): :return: None """ - logging.debug("heater turned off") + self._logger.debug("heater turned off") self.gpio.output(self.heaterPin, RELAY_OFF) def turnHeaterPumpOn(self): - logging.debug("heater pump turned on") + self._logger.debug("heater pump turned on") self.gpio.output(self.heaterPumpPin, RELAY_ON) def turnHeaterPumpOff(self): - logging.debug("heater pump turned off") + self._logger.debug("heater pump turned off") self.gpio.output(self.heaterPumpPin, RELAY_OFF) def turnCoolerOn(self): @@ -78,7 +80,7 @@ def turnCoolerOn(self): :return: None """ - logging.debug("cooler turned on") + self._logger.debug("cooler turned on") self.gpio.output(self.coolerPin, RELAY_ON) def turnCoolerOff(self): @@ -88,7 +90,7 @@ def turnCoolerOff(self): :return: None """ - logging.debug("cooler turned off") + self._logger.debug("cooler turned off") self.gpio.output(self.coolerPin, RELAY_OFF) def getTemp(self): diff --git a/backend/hardware/temperaturecontroller/simulation.py b/backend/hardware/temperaturecontroller/simulation.py index fbed100..635c284 100644 --- a/backend/hardware/temperaturecontroller/simulation.py +++ b/backend/hardware/temperaturecontroller/simulation.py @@ -1,14 +1,12 @@ from hardware.temperaturecontroller.base import TempController -import logging - - -def log(message): - logging.info('tempcontroller.simulation - ' + str(message)) +from util.logger import MultiprocessingLogger class SimulatedTempController(TempController): def __init__(self, sim_temp_controller_config: dict): super().__init__(sim_temp_controller_config, devices=None) + self._logger = MultiprocessingLogger.get_logger(__name__) + self.maxTemp = sim_temp_controller_config["maxTemp"] self.minTemp = sim_temp_controller_config["minTemp"] self.heating = False @@ -17,6 +15,9 @@ def __init__(self, sim_temp_controller_config: dict): if 'temp' in sim_temp_controller_config: self.temperature = sim_temp_controller_config['temp'] + def log(self, message): + self._logger.info('tempcontroller.simulation - ' + str(message)) + def turnHeaterOn(self): """ Sets the heater flag for the simulation. @@ -24,7 +25,7 @@ def turnHeaterOn(self): :return: None """ - log('Turning on heat') + self.log('Turning on heat') self.heating = True def turnHeaterOff(self): @@ -34,14 +35,14 @@ def turnHeaterOff(self): :return: None """ - log('Turning off heat') + self.log('Turning off heat') self.heating = False def turnHeaterPumpOn(self): - log("heater pump turned on") + self.log("heater pump turned on") def turnHeaterPumpOff(self): - log("heater pump turned off") + self.log("heater pump turned off") def turnCoolerOn(self): """ @@ -50,7 +51,7 @@ def turnCoolerOn(self): :return: None """ - log('Turning on cooling') + self.log('Turning on cooling') self.cooling = True def turnCoolerOff(self): @@ -60,7 +61,7 @@ def turnCoolerOff(self): :return: None """ - log('Turning off cooling') + self.log('Turning off cooling') self.cooling = False def getTemp(self): @@ -85,7 +86,7 @@ def getTemp(self): self.temperature = self.temperature - 0.1 elif self.temperature < 24: self.temperature = self.temperature + 0.1 - logging.info('Temperature read as: {0}'.format(self.temperature)) + self.log('Temperature read as: {0}'.format(self.temperature)) return self.temperature def getMaxTemperature(self): diff --git a/backend/hardware/thermometer/serial.py b/backend/hardware/thermometer/serial.py index 3561e99..b95442b 100644 --- a/backend/hardware/thermometer/serial.py +++ b/backend/hardware/thermometer/serial.py @@ -1,9 +1,9 @@ from hardware.thermometer.base import TempSensor import serial import time -import logging from hardware.util.exceptions import HardwareLoadError from datetime import datetime, timedelta +from util.logger import MultiprocessingLogger class SerialTempSensor(TempSensor): @@ -15,6 +15,8 @@ def __init__(self, thermometer_config: dict): serialDevice A string with the device read from """ + self._logger = MultiprocessingLogger.get_logger(__name__) + self.lastTemp = 0 self.nextTempReadingTime = datetime.now() @@ -49,11 +51,11 @@ def getTemp(self): try: line = self.tempSer.readline().decode() except Exception as e: - logging.error("Error reading from thermometer") - logging.exception(str(e)) + self._logger.error("Error reading from thermometer") + self._logger.exception(str(e)) continue finally: - logging.debug("ser read " + str(len(line)) + " " + line) + self._logger.debug("ser read " + str(len(line)) + " " + line) time.sleep(0.5) lastLine = str(line) @@ -74,7 +76,7 @@ def getTemp(self): if end == -1: # Maybe just go to the end? end = len(lastLine) - 1 - logging.debug( + self._logger.debug( "found " + str(start) + " " @@ -90,14 +92,14 @@ def getTemp(self): self.lastTemp = float(lastLine[start:end]) self.nextTempReadingTime = datetime.now() + timedelta(seconds=1) except Exception as e: - logging.error("Error converting temperature reading") - logging.error(lastLine[start:end]) - logging.exception(str(e)) + self._logger.error("Error converting temperature reading") + self._logger.error(lastLine[start:end]) + self._logger.exception(str(e)) self.lastTemp = "-999" else: self.lastTemp = "-999" - logging.error("erroneous reading: {0}".format(lastLine[start:end])) - logging.debug( + self._logger.error("erroneous reading: {0}".format(lastLine[start:end])) + self._logger.debug( "Read temperature " + str(self.lastTemp) ) # + ' ' + str(lastLine)) diff --git a/backend/main.py b/backend/main.py index 10de91c..90691e3 100644 --- a/backend/main.py +++ b/backend/main.py @@ -4,13 +4,7 @@ Starts the flask application on the configured port (default 8081) Look in api.routes for the actual api code """ -import sys -import time import signal -import logging -import logging.handlers as handlers - -import multiprocessing_logging import config @@ -18,29 +12,7 @@ from api.core import run_flask from microlab.core import startMicrolabProcess -from util.logFormatter import MultiLineFormatter - -LOGGER = logging.getLogger(__name__) - - -def setupLogging(): - logHandlers = [] - formatter = MultiLineFormatter(fmt="%(asctime)s [%(levelname)s]: %(message)s") - - fileLogger = handlers.RotatingFileHandler( - "{0}/microlab.log".format(config.microlabConfig.logDirectory), - maxBytes=config.microlabConfig.logFileMaxBytes, - backupCount=config.microlabConfig.logFileBackupCount, - ) - fileLogger.setFormatter(formatter) - logHandlers.append(fileLogger) - if config.microlabConfig.logToStderr: - stderrLogger = logging.StreamHandler(sys.stderr) - stderrLogger.setFormatter(formatter) - logHandlers.append(stderrLogger) - - logging.basicConfig(handlers=logHandlers, level=config.microlabConfig.logLevel) - multiprocessing_logging.install_mp_handler() +from util.logger import MultiprocessingLogger class BackendManager: @@ -52,11 +24,21 @@ def __init__(self): self._processes = [] self._queues = [] + # The MultiprocessingLogger.initialize_logger call needs to be out of the global scope + # as it seems if anything that is done that creates a process safe object (in this case a Queue) + # it seem to 'lock in' the type of process creation and calls to set_start_method('spawn') will + # throw an exception + + # Additionally the initialize_logger call without any arguments should only happen in the process that + # is creating other processes + MultiprocessingLogger.initialize_logger() + self._logger = MultiprocessingLogger.get_logger(__name__) + def _are_processes_alive(self) -> bool: return any([process.is_alive() for process in self._processes]) def _cleanup_queues(self): - LOGGER.debug('Cleaning up queues') + self._logger.debug('Cleaning up queues') self._q1.close() self._q2.close() @@ -64,10 +46,10 @@ def _cleanup_queues(self): self._q1.join_thread() self._q2.join_thread() - LOGGER.debug('Completed cleanup up queues') + self._logger.debug('Completed cleanup up queues') def _cleanup_processes(self): - LOGGER.debug('Cleaning up processes') + self._logger.debug('Cleaning up processes') while self._are_processes_alive(): for proc in self._processes: @@ -82,42 +64,45 @@ def _cleanup_processes(self): pass if proc.is_alive(): - LOGGER.debug(f'Attempting to join proc: {proc.pid}') + self._logger.debug(f'Attempting to join proc: {proc.pid}') proc.join(timeout=1) - LOGGER.debug('Completed cleaning up processes') + self._logger.debug('Completed cleaning up processes') def _cleanup_everything(self): self._cleanup_processes() self._cleanup_queues() + MultiprocessingLogger.cleanup_logging() + # Add a cleanup_logging here + def _handle_exit_signals(self, signum, frame): - LOGGER.debug('Beginning to handle exit signals in BackendManager') + self._logger.debug('Beginning to handle exit signals in BackendManager') self._cleanup_everything() - LOGGER.debug('Completed handling exit signals in BackendManager') + self._logger.debug('Completed handling exit signals in BackendManager') def _start_microlab(self): self._microlab_manager_process = Process( - target=startMicrolabProcess, args=(self._q1, self._q2), name="microlab" + target=startMicrolabProcess, args=(self._q1, self._q2, MultiprocessingLogger.get_logging_queue()), name="microlab" ) self._processes.append(self._microlab_manager_process) - LOGGER.debug('Starting the microlab process') + self._logger.debug('Starting the microlab process') self._microlab_manager_process.start() - LOGGER.debug(f'microlab process pid: {self._microlab_manager_process.pid}') + self._logger.debug(f'microlab process pid: {self._microlab_manager_process.pid}') def _start_server(self): - self._flaskProcess = Process(target=run_flask, args=(self._q2, self._q1), name="flask", daemon=True) + self._flaskProcess = Process(target=run_flask, args=(self._q2, self._q1, MultiprocessingLogger.get_logging_queue()), name="flask", daemon=True) self._processes.append(self._flaskProcess) - LOGGER.debug('Starting the server process') + self._logger.debug('Starting the server process') self._flaskProcess.start() print(f'server process pid: {self._flaskProcess.pid}') def run(self): config.initialSetup() - LOGGER.info("### STARTING MAIN MICROLAB SERVICE ###") + self._logger.info("### STARTING MAIN MICROLAB SERVICE ###") self._start_microlab() self._start_server() @@ -126,9 +111,9 @@ def run(self): signal.signal(signal.SIGTERM, self._handle_exit_signals) while self._are_processes_alive(): - time.sleep(0.1) + MultiprocessingLogger.process_logs() - LOGGER.debug("### ENDING MICROLAB SERVICE EXECUTION ###") + self._logger.debug("### ENDING MICROLAB SERVICE EXECUTION ###") def main(): @@ -138,5 +123,5 @@ def main(): if __name__ == "__main__": set_start_method('spawn') - setupLogging() + # setupLogging() main() diff --git a/backend/microlab/core.py b/backend/microlab/core.py index 461c570..57bdcaf 100644 --- a/backend/microlab/core.py +++ b/backend/microlab/core.py @@ -13,12 +13,13 @@ from config import microlabConfig as config from hardware.core import MicroLabHardware +from util.logger import MultiprocessingLogger HALT = threading.Event() MUTEX = threading.Lock() -def startMicrolabProcess(in_queue, out_queue): +def startMicrolabProcess(in_queue, out_queue, logging_queue): """ Starts up the microlab process @@ -28,6 +29,9 @@ def startMicrolabProcess(in_queue, out_queue): :param out_queue: The queue responses will be sent to, when applicable. + :param logging_queue: + The queue logging messages will be sent through. + All commands sent over the queue have the following format: dict @@ -73,6 +77,9 @@ def startMicrolabProcess(in_queue, out_queue): (False, message) on failure. reference "selectOption" in /recipes/__init__.py for more info """ + MultiprocessingLogger.initialize_logger(logging_queue) + logger = MultiprocessingLogger.get_logger(__name__) + microlabHardware = MicroLabHardware.get_microlab_hardware_controller() def runMicrolab(): @@ -92,18 +99,18 @@ def runMicrolab(): microlab.start() def handleSignal(_a, _b): - logging.info("") - logging.info("Shutting down microlab.") + logger.info("") + logger.info("Shutting down microlab.") HALT.set() microlab.join() - logging.info("Shutdown completed.") + logger.info("Shutdown completed.") sys.exit() signal.signal(signal.SIGINT, handleSignal) signal.signal(signal.SIGTERM, handleSignal) def reloadHardware(): - logging.info("Reloading microlab device configuration") + logger.info("Reloading microlab device configuration") hardwareConfig = hardware.devicelist.loadHardwareConfiguration() deviceDefinitions = hardwareConfig['devices'] return microlabHardware.loadHardware(deviceDefinitions) diff --git a/backend/microlab/interface.py b/backend/microlab/interface.py index 763581f..4f707f2 100644 --- a/backend/microlab/interface.py +++ b/backend/microlab/interface.py @@ -1,9 +1,5 @@ -import logging - from queue import Empty - - -LOGGER = logging.getLogger(__name__) +from util.logger import MultiprocessingLogger class MicrolabInterface: @@ -21,8 +17,10 @@ def __init__(self, in_queue, out_queue): self.toMicrolab = out_queue self.fromMicrolab = in_queue + self._logger = MultiprocessingLogger.get_logger(__name__) + def close_to_microlab_queue(self): - LOGGER.debug('Begining purge of to microlab queue') + self._logger.debug('Beginning purge of to microlab queue') while True: try: @@ -31,7 +29,7 @@ def close_to_microlab_queue(self): self.toMicrolab.close() break - LOGGER.debug('Completed purge of to microlab queue') + self._logger.debug('Completed purge of to microlab queue') def start(self, name): """ diff --git a/backend/recipes/base.py b/backend/recipes/base.py index 1eb85f0..723e3cc 100644 --- a/backend/recipes/base.py +++ b/backend/recipes/base.py @@ -85,9 +85,8 @@ from recipes import tasks from hardware.core import MicroLabHardware from datetime import datetime, timedelta, timezone -import traceback from enum import Enum -import logging +from util.logger import MultiprocessingLogger class RecipeState(str, Enum): @@ -116,6 +115,8 @@ def __init__(self, plan, microlabHardware: MicroLabHardware): :param plan: The recipe plan. See module documentation for object description. """ + self._logger = MultiprocessingLogger.get_logger(__name__) + self.step = 0 self.message = '' self.status = RecipeState.IDLE @@ -138,11 +139,11 @@ def start(self): """ supported, msg = self.isRecipeSupported(self.plan) if supported: - logging.info('Starting recipe {0}'.format(self.currentRecipe)) + self._logger.info('Starting recipe {0}'.format(self.currentRecipe)) self.step = 0 self.runStep() else: - logging.info('Recipe {0} unsupported: {1}'.format(self.currentRecipe, msg)) + self._logger.info('Recipe {0} unsupported: {1}'.format(self.currentRecipe, msg)) self.status = RecipeState.RECIPE_UNSUPPORTED self.message = msg @@ -167,7 +168,7 @@ def stop(self): :return: None """ - logging.info('Stopping recipe {0}'.format(self.currentRecipe)) + self._logger.info('Stopping recipe {0}'.format(self.currentRecipe)) self.step = -1 if self.status != RecipeState.ERROR: self.status = RecipeState.IDLE @@ -274,7 +275,7 @@ def runStep(self): string The message to display to the user in case of failure. """ - logging.info('Running step {0}'.format(self.step)) + self._logger.info('Running step {0}'.format(self.step)) step = self.plan[RECIPE_STEPS][self.step] self.message = step['message'] self.stepCompletionTime = None @@ -347,18 +348,18 @@ def tickTasks(self): """ for task in self.currentTasks: if not task["done"] and datetime.now() > task["nextTime"]: - logging.debug("task is ready for next iteration") + self._logger.debug("task is ready for next iteration") try: res = next(task["fn"]) if res == None: - logging.debug("task is done") + self._logger.debug("task is done") task["done"] = True else: duration = timedelta(seconds=res) - logging.debug("task is scheduled for {0}".format(datetime.now() + duration)) + self._logger.debug("task is scheduled for {0}".format(datetime.now() + duration)) task["nextTime"] = datetime.now() + duration except Exception as e: - logging.exception(str(e)) + self._logger.exception(str(e)) task["exception"] = e self.status = RecipeState.ERROR self.message = 'Task execution failed.' diff --git a/backend/recipes/core.py b/backend/recipes/core.py index e17f739..5974aab 100644 --- a/backend/recipes/core.py +++ b/backend/recipes/core.py @@ -13,8 +13,8 @@ from recipes import state from recipes.base import Recipe from hardware.core import MicroLabHardware, MicroLabHardwareState -from config import microlabConfig as config -import logging +from config import microlabConfig as config +from util.logger import MultiprocessingLogger def getRecipeList(): """ @@ -22,6 +22,8 @@ def getRecipeList(): A list of modules in the config.recipesPackages. It is assumed that these are all recipes. """ + logger = MultiprocessingLogger.get_logger(__name__) + path = config.recipesDirectory files = [f for f in listdir(path) if isfile(join(path, f))] recipeList = [] @@ -32,7 +34,7 @@ def getRecipeList(): with open(join(path, f)) as inf: recipeList.append(json.load(inf)) except json.JSONDecodeError: - logging.error("Error loading recipe file: {0}. File is not in proper JSON format".format(f)) + logger.error("Error loading recipe file: {0}. File is not in proper JSON format".format(f)) # This doesn't actually work yet because .4tv are not importable as modules if f.endswith('.4tv'): recipeList.append(f[:-4]) diff --git a/backend/recipes/tasks.py b/backend/recipes/tasks.py index e74df22..27be322 100644 --- a/backend/recipes/tasks.py +++ b/backend/recipes/tasks.py @@ -9,10 +9,10 @@ """ from datetime import datetime -import logging from simple_pid import PID from hardware.core import MicroLabHardware +from util.logger import MultiprocessingLogger def heat(microlab: MicroLabHardware, parameters: dict): @@ -25,8 +25,10 @@ def heat(microlab: MicroLabHardware, parameters: dict): :return: None """ + logger = MultiprocessingLogger.get_logger(__name__) + targetTemp = parameters["temp"] - logging.info("heating water to {0}...".format(targetTemp)) + logger.info("heating water to {0}...".format(targetTemp)) microlab.turnHeaterOn() microlab.turnHeaterPumpOn() while True: @@ -47,8 +49,10 @@ def cool(microlab: MicroLabHardware, parameters: dict): :return: None """ + logger = MultiprocessingLogger.get_logger(__name__) + targetTemp = parameters["temp"] - logging.info("cooling water to {0}...".format(targetTemp)) + logger.info("cooling water to {0}...".format(targetTemp)) microlab.turnCoolerOn() while True: if microlab.getTemp() <= targetTemp: @@ -139,6 +143,8 @@ def maintainSimple(microlab: MicroLabHardware, parameters: dict): :return: None """ + logger = MultiprocessingLogger.get_logger(__name__) + duration = parameters["time"] targetTemp = parameters["temp"] tolerance = parameters["tolerance"] @@ -152,18 +158,18 @@ def maintainSimple(microlab: MicroLabHardware, parameters: dict): interval = 0.5 start = microlab.secondSinceStart() - logging.info( + logger.info( "Maintaining {0}C for {1} seconds with {2}C tolerance".format( targetTemp, duration, tolerance ) ) # default temp control - logging.debug("Maintaining with default temperature control") + logger.debug("Maintaining with default temperature control") while True: try: currentTemp = microlab.getTemp() - logging.debug("temperature @ {0}".format(currentTemp)) + logger.debug("temperature @ {0}".format(currentTemp)) if (microlab.secondSinceStart() - start) >= duration: microlab.turnHeaterOff() microlab.turnHeaterPumpOff() @@ -185,7 +191,7 @@ def maintainSimple(microlab: MicroLabHardware, parameters: dict): yield interval except Exception as e: - logging.error( + logger.error( "Error in maintainSimple. currentTemp: {0}, targetTemp: {1}. Exception: {2}".format( currentTemp, targetTemp, e ) @@ -214,6 +220,8 @@ def maintainPID(microlab: MicroLabHardware, parameters: dict): :return: None """ + logger = MultiprocessingLogger.get_logger(__name__) + duration = parameters["time"] targetTemp = parameters["temp"] tolerance = parameters["tolerance"] @@ -226,12 +234,12 @@ def maintainPID(microlab: MicroLabHardware, parameters: dict): start = microlab.secondSinceStart() - logging.info( + logger.info( "Maintaining {0}C for {1} seconds with {2}C tolerance".format( targetTemp, duration, tolerance ) ) - logging.debug("Maintaining with PID temperature control") + logger.debug("Maintaining with PID temperature control") pidConfig = microlab.getPIDConfig() pid = PID(pidConfig["P"], pidConfig["I"], pidConfig["D"], setpoint=targetTemp) @@ -252,7 +260,7 @@ def maintainPID(microlab: MicroLabHardware, parameters: dict): currentTemp = microlab.getTemp() control = pid(currentTemp) p, i, d = pid.components - logging.info( + logger.info( "Heater PID values: {} {} {} {} {}".format(currentTemp, control, p, i, d) ) @@ -278,7 +286,7 @@ def maintainPID(microlab: MicroLabHardware, parameters: dict): t = microlab.getTemp() a = pid(t) p, i, d = pid.components - logging.debug("Heater PID values: {} {} {} {} {}".format(t, a, p, i, d)) + logger.debug("Heater PID values: {} {} {} {} {}".format(t, a, p, i, d)) if (microlab.secondSinceStart() - start) >= duration: microlab.turnHeaterOff() @@ -300,10 +308,12 @@ def pump(microlab: MicroLabHardware, parameters: dict): :return: None """ + logger = MultiprocessingLogger.get_logger(__name__) + pump = parameters["pump"] volume = parameters["volume"] duration = parameters.get("time", None) - logging.info("Dispensing {0}ml from pump {1}".format(volume, pump)) + logger.info("Dispensing {0}ml from pump {1}".format(volume, pump)) pumpSpeedLimits = microlab.getPumpSpeedLimits(pump) minSpeed = pumpSpeedLimits["minSpeed"] maxSpeed = pumpSpeedLimits["maxSpeed"] @@ -312,7 +322,7 @@ def pump(microlab: MicroLabHardware, parameters: dict): mlPerSecond = volume / duration if mlPerSecond > maxSpeed: - logging.info( + logger.info( "Pump {0} cannot operate fast enough. Dispensing at maximum speed.".format( pump ) @@ -339,7 +349,7 @@ def pump(microlab: MicroLabHardware, parameters: dict): # helps keep the actual task completion time more accurate # to desired duration executionTime = microlab.secondSinceStart() - startTime - logging.debug("dispense exeuction time: {0}".format(executionTime)) + logger.debug("dispense exeuction time: {0}".format(executionTime)) yield 1 / onTime - executionTime # dispense remaining volume remaining = volume - volumeDispensed @@ -360,8 +370,10 @@ def stir(microlab: MicroLabHardware, parameters: dict): :return: None """ + logger = MultiprocessingLogger.get_logger(__name__) + duration = parameters["time"] - logging.info("Stirring for {0} seconds".format(duration)) + logger.info("Stirring for {0} seconds".format(duration)) start = microlab.secondSinceStart() microlab.turnStirrerOn() while True: diff --git a/backend/requirements.txt b/backend/requirements.txt index 6cd1952..f0c32c1 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -6,5 +6,5 @@ pyserial==3.5 w1thermsensor==2.0.0 pyyaml==6.0.1 configobj==5.0.8 -multiprocessing-logging==0.3.4 -simple-pid==2.0.0 \ No newline at end of file +simple-pid==2.0.0 +waitress==3.0.0 \ No newline at end of file From e1f968c9f9f52db8a4e93727168e765c8621b4b5 Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Mon, 30 Dec 2024 00:14:50 -0800 Subject: [PATCH 04/13] Changed logger to cleanup without possible log drops from processes as long as they are running --- backend/main.py | 6 +----- backend/util/logger.py | 14 ++++---------- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/backend/main.py b/backend/main.py index 90691e3..e09f35b 100644 --- a/backend/main.py +++ b/backend/main.py @@ -73,9 +73,6 @@ def _cleanup_everything(self): self._cleanup_processes() self._cleanup_queues() - MultiprocessingLogger.cleanup_logging() - # Add a cleanup_logging here - def _handle_exit_signals(self, signum, frame): self._logger.debug('Beginning to handle exit signals in BackendManager') self._cleanup_everything() @@ -110,7 +107,7 @@ def run(self): signal.signal(signal.SIGINT, self._handle_exit_signals) signal.signal(signal.SIGTERM, self._handle_exit_signals) - while self._are_processes_alive(): + while self._are_processes_alive() or MultiprocessingLogger.remaining_logs_to_process(): MultiprocessingLogger.process_logs() self._logger.debug("### ENDING MICROLAB SERVICE EXECUTION ###") @@ -123,5 +120,4 @@ def main(): if __name__ == "__main__": set_start_method('spawn') - # setupLogging() main() diff --git a/backend/util/logger.py b/backend/util/logger.py index 8705b53..719aadc 100644 --- a/backend/util/logger.py +++ b/backend/util/logger.py @@ -48,15 +48,6 @@ def get_logger(cls, logger_name: str) -> logging.Logger: return logger - @classmethod - def cleanup_logging(cls): - # Without the retries check added here I would occasionally see a hang here - # on cleanup - retries = 0 - while not cls._logging_queue.empty() and retries < 5: - cls.process_logs() - retries += 1 - @classmethod def _get_processing_logger(cls, logger_name: str) -> logging.Logger: if logger_name in cls._configured_loggers: @@ -88,9 +79,12 @@ def _get_processing_logger(cls, logger_name: str) -> logging.Logger: return logger + @classmethod + def remaining_logs_to_process(cls) -> bool: + return cls._logging_queue.empty() == False + @classmethod def process_logs(cls): - # print('processing logs') try: record = cls._logging_queue.get_nowait() logger = cls._get_processing_logger(record.name) From 96806aecd3627914b7047bd51815991f721a5ec2 Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Mon, 30 Dec 2024 00:22:47 -0800 Subject: [PATCH 05/13] Removed duplication in logging for tempcontroller.simulation and same with reagentdispenser --- backend/hardware/reagentdispenser/simulation.py | 9 +++------ .../temperaturecontroller/simulation.py | 17 +++++++---------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/backend/hardware/reagentdispenser/simulation.py b/backend/hardware/reagentdispenser/simulation.py index 7e5d168..9f8f714 100644 --- a/backend/hardware/reagentdispenser/simulation.py +++ b/backend/hardware/reagentdispenser/simulation.py @@ -13,9 +13,6 @@ def __init__(self, reagent_dispenser_config: dict): if 'maxSpeed' in reagent_dispenser_config: self.maxSpeed = reagent_dispenser_config['maxSpeed'] - def log(self, message): - self._logger.info('reagentdispenser.simulation - {0}'.format(message)) - def dispense(self, pumpId, volume, duration=None): """ Displays pump dispensing message. @@ -28,11 +25,11 @@ def dispense(self, pumpId, volume, duration=None): None """ if pumpId == 'X': - self.log('Dispensing {0}ml from pump X'.format(volume)) + self._logger.info('Dispensing {0}ml from pump X'.format(volume)) elif pumpId == 'Y': - self.log('Dispensing {0}ml from pump Y'.format(volume)) + self._logger.info('Dispensing {0}ml from pump Y'.format(volume)) elif pumpId == 'Z': - self.log('Dispensing {0}ml from pump Z'.format(volume)) + self._logger.info('Dispensing {0}ml from pump Z'.format(volume)) else: raise ValueError("Pump '{0}' does not exist.".format(pumpId)) return abs(volume) diff --git a/backend/hardware/temperaturecontroller/simulation.py b/backend/hardware/temperaturecontroller/simulation.py index 635c284..7f8db16 100644 --- a/backend/hardware/temperaturecontroller/simulation.py +++ b/backend/hardware/temperaturecontroller/simulation.py @@ -15,9 +15,6 @@ def __init__(self, sim_temp_controller_config: dict): if 'temp' in sim_temp_controller_config: self.temperature = sim_temp_controller_config['temp'] - def log(self, message): - self._logger.info('tempcontroller.simulation - ' + str(message)) - def turnHeaterOn(self): """ Sets the heater flag for the simulation. @@ -25,7 +22,7 @@ def turnHeaterOn(self): :return: None """ - self.log('Turning on heat') + self._logger.info('Turning on heat') self.heating = True def turnHeaterOff(self): @@ -35,14 +32,14 @@ def turnHeaterOff(self): :return: None """ - self.log('Turning off heat') + self._logger.info('Turning off heat') self.heating = False def turnHeaterPumpOn(self): - self.log("heater pump turned on") + self._logger.info("heater pump turned on") def turnHeaterPumpOff(self): - self.log("heater pump turned off") + self._logger.info("heater pump turned off") def turnCoolerOn(self): """ @@ -51,7 +48,7 @@ def turnCoolerOn(self): :return: None """ - self.log('Turning on cooling') + self._logger.info('Turning on cooling') self.cooling = True def turnCoolerOff(self): @@ -61,7 +58,7 @@ def turnCoolerOff(self): :return: None """ - self.log('Turning off cooling') + self._logger.info('Turning off cooling') self.cooling = False def getTemp(self): @@ -86,7 +83,7 @@ def getTemp(self): self.temperature = self.temperature - 0.1 elif self.temperature < 24: self.temperature = self.temperature + 0.1 - self.log('Temperature read as: {0}'.format(self.temperature)) + self._logger.info('Temperature read as: {0}'.format(self.temperature)) return self.temperature def getMaxTemperature(self): From b21ee4c2b5f863ed3809e28d3a3a3f27a014722d Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Mon, 30 Dec 2024 00:43:13 -0800 Subject: [PATCH 06/13] Fixed multiprocess logger issues with logging from the application entry point (__main__) --- backend/util/logger.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/backend/util/logger.py b/backend/util/logger.py index 719aadc..a0429a6 100644 --- a/backend/util/logger.py +++ b/backend/util/logger.py @@ -87,7 +87,15 @@ def remaining_logs_to_process(cls) -> bool: def process_logs(cls): try: record = cls._logging_queue.get_nowait() - logger = cls._get_processing_logger(record.name) + + # We do the record.name against __main__ specifically so we can get the logger from the main thread + # and not have it return the QueueHandler which not only causes issues but also means dropped logging + # from the main thread + if record.name == '__main__': + logger = cls._get_processing_logger('main') + else: + logger = cls._get_processing_logger(record.name) + logger.handle(record) except queue.Empty: return From 8bcaf5be25217f2cf161ec2d424f99a46e7fbb98 Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Mon, 30 Dec 2024 00:43:47 -0800 Subject: [PATCH 07/13] Moved the debug message for ending microlab service execution to a place where it can actually be logged --- backend/main.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/backend/main.py b/backend/main.py index e09f35b..c8609c0 100644 --- a/backend/main.py +++ b/backend/main.py @@ -77,6 +77,7 @@ def _handle_exit_signals(self, signum, frame): self._logger.debug('Beginning to handle exit signals in BackendManager') self._cleanup_everything() self._logger.debug('Completed handling exit signals in BackendManager') + self._logger.debug("### ENDING MICROLAB SERVICE EXECUTION ###") def _start_microlab(self): self._microlab_manager_process = Process( @@ -107,11 +108,11 @@ def run(self): signal.signal(signal.SIGINT, self._handle_exit_signals) signal.signal(signal.SIGTERM, self._handle_exit_signals) + + while self._are_processes_alive() or MultiprocessingLogger.remaining_logs_to_process(): MultiprocessingLogger.process_logs() - self._logger.debug("### ENDING MICROLAB SERVICE EXECUTION ###") - def main(): backend_manager = BackendManager() From 703ba92de6a3ac552f6f283c624fb25b677f0edd Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Mon, 30 Dec 2024 01:09:20 -0800 Subject: [PATCH 08/13] Found a better general solution to the main thread logger problem --- backend/util/logger.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/backend/util/logger.py b/backend/util/logger.py index a0429a6..2944668 100644 --- a/backend/util/logger.py +++ b/backend/util/logger.py @@ -44,7 +44,7 @@ def get_logger(cls, logger_name: str) -> logging.Logger: logger.addHandler(logging.handlers.QueueHandler(cls._logging_queue)) logger.setLevel(config.microlabConfig.logLevel) - cls._configured_loggers[logger_name] = True + cls._configured_loggers[logger_name] = logger return logger @@ -75,7 +75,7 @@ def _get_processing_logger(cls, logger_name: str) -> logging.Logger: for handler in log_handlers: logger.addHandler(handler) - cls._configured_loggers[logger_name] = True + cls._configured_loggers[logger_name] = logger return logger @@ -83,18 +83,26 @@ def _get_processing_logger(cls, logger_name: str) -> logging.Logger: def remaining_logs_to_process(cls) -> bool: return cls._logging_queue.empty() == False + @classmethod + def _does_logger_have_queue_handler(cls, logger: logging.Logger) -> bool: + for handler in logger.handlers: + if isinstance(handler, logging.handlers.QueueHandler): + return True + @classmethod def process_logs(cls): try: record = cls._logging_queue.get_nowait() - # We do the record.name against __main__ specifically so we can get the logger from the main thread - # and not have it return the QueueHandler which not only causes issues but also means dropped logging - # from the main thread - if record.name == '__main__': - logger = cls._get_processing_logger('main') - else: - logger = cls._get_processing_logger(record.name) + record_name = record.name + + if record_name in cls._configured_loggers: + if cls._does_logger_have_queue_handler(cls._configured_loggers[record_name]): + # We're mangling the name here so if we're in the main thread and we attempt to process the log + # we don't return the queue handler again causing issues as well as the logs to be dropped. + record_name = f'_{record_name}' + + logger = cls._get_processing_logger(record_name) logger.handle(record) except queue.Empty: From dc5cdf60a22ba231ac4792103ca5cfd71cc6285d Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Mon, 30 Dec 2024 01:26:18 -0800 Subject: [PATCH 09/13] Set a flag based on the main process or not for logging --- backend/util/logger.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/backend/util/logger.py b/backend/util/logger.py index 2944668..941c9e0 100644 --- a/backend/util/logger.py +++ b/backend/util/logger.py @@ -18,6 +18,8 @@ class MultiprocessingLogger: _processing_logger = None + _is_main_process = False + @classmethod def initialize_logger(cls, logging_queue: Union[Queue, None] = None) -> None: """ Initialize the logger. @@ -25,6 +27,7 @@ def initialize_logger(cls, logging_queue: Union[Queue, None] = None) -> None: Must be called in the base process that start the other processes without any argument. In other process it must be called with a dedicated logging queue.""" if logging_queue is None: + cls._is_main_process = True logging_queue = Queue() cls._logging_queue = logging_queue @@ -34,7 +37,7 @@ def get_logging_queue(cls) -> Union[Queue, None]: return cls._logging_queue @classmethod - def get_logger(cls, logger_name: str) -> logging.Logger: + def _get_queue_logger(cls, logger_name: str) -> logging.Logger: # We only need to configure once, multiple calls to logging.getLogger() # will return the same configured instance if logger_name in cls._configured_loggers: @@ -48,6 +51,13 @@ def get_logger(cls, logger_name: str) -> logging.Logger: return logger + @classmethod + def get_logger(cls, logger_name: str) -> logging.Logger: + if cls._is_main_process: + return cls._get_processing_logger(logger_name) + else: + return cls._get_queue_logger(logger_name) + @classmethod def _get_processing_logger(cls, logger_name: str) -> logging.Logger: if logger_name in cls._configured_loggers: @@ -93,17 +103,7 @@ def _does_logger_have_queue_handler(cls, logger: logging.Logger) -> bool: def process_logs(cls): try: record = cls._logging_queue.get_nowait() - - record_name = record.name - - if record_name in cls._configured_loggers: - if cls._does_logger_have_queue_handler(cls._configured_loggers[record_name]): - # We're mangling the name here so if we're in the main thread and we attempt to process the log - # we don't return the queue handler again causing issues as well as the logs to be dropped. - record_name = f'_{record_name}' - - logger = cls._get_processing_logger(record_name) - + logger = cls._get_processing_logger(record.name) logger.handle(record) except queue.Empty: return From 0a5e013599914bfa99e56a66f891b0af32c00c7a Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Mon, 30 Dec 2024 10:35:28 -0800 Subject: [PATCH 10/13] Mocked out Multiprocessing logger queue for testing --- backend/tests/tasks_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/backend/tests/tasks_test.py b/backend/tests/tasks_test.py index 5ec22d3..944cfc8 100644 --- a/backend/tests/tasks_test.py +++ b/backend/tests/tasks_test.py @@ -4,6 +4,9 @@ import hardware.core from unittest.mock import patch, MagicMock +from util.logger import MultiprocessingLogger +MultiprocessingLogger._logging_queue = MagicMock() + @pytest.fixture def devices(request): From 47d2f60abb8c4147ee3a409822a1381030394e98 Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Mon, 30 Dec 2024 20:03:10 -0800 Subject: [PATCH 11/13] Changed hardware/gpiochip/grbl.py and hardware/grbl/serial.py to use the new multiprocessing logger --- backend/hardware/gpiochip/grbl.py | 6 ++++-- backend/hardware/grbl/serial.py | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/backend/hardware/gpiochip/grbl.py b/backend/hardware/gpiochip/grbl.py index 76df840..092e5f6 100644 --- a/backend/hardware/gpiochip/grbl.py +++ b/backend/hardware/gpiochip/grbl.py @@ -1,5 +1,5 @@ from hardware.gpiochip.base import GPIOChip, LINE_REQ_DIR_OUT -import logging +from util.logger import MultiprocessingLogger class GRBLChip(GPIOChip): def __init__(self, gpio_config: dict, devices: dict): @@ -13,6 +13,8 @@ def __init__(self, gpio_config: dict, devices: dict): dictionary mapping strings to pin numbers for adding human readable names to GPIO pins """ + self._logger = MultiprocessingLogger.get_logger(__name__) + self.grbl = devices[gpio_config["grblID"]] self.output_offsets = [] self.output_values = [] @@ -20,7 +22,7 @@ def __init__(self, gpio_config: dict, devices: dict): if 'lineAliases' in gpio_config: for alias, pin in gpio_config['lineAliases'].items(): self.pinAliases[alias] = pin - logging.debug(self.pinAliases) + self._logger.debug(self.pinAliases) def __output(self): """ diff --git a/backend/hardware/grbl/serial.py b/backend/hardware/grbl/serial.py index 277965f..8c73abe 100644 --- a/backend/hardware/grbl/serial.py +++ b/backend/hardware/grbl/serial.py @@ -1,6 +1,6 @@ from hardware.grbl.base import GRBL import serial -import logging +from util.logger import MultiprocessingLogger class GRBLSerial(GRBL): def __init__(self, grbl_config: dict): @@ -11,6 +11,8 @@ def __init__(self, grbl_config: dict): grblPort string - Serial device for communication with grbl """ + self._logger = MultiprocessingLogger.get_logger(__name__) + self.grblSer = serial.Serial(grbl_config["grblPort"], 115200, timeout=1) def grblWrite(self, command: str, retries=3): @@ -22,7 +24,7 @@ def grblWrite(self, command: str, retries=3): response = self.grblSer.read_until() if "error" in str(response): if retries > 0: - logging.warning("grbl error: {0} for command: {1}, retrying" + self._logger.warning("grbl error: {0} for command: {1}, retrying" .format(response, command)) self.grblWrite(command, retries - 1) else: From 9d6329dec3ca7531cbdcdf0bfe76efa544953ce4 Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Wed, 1 Jan 2025 12:03:39 -0800 Subject: [PATCH 12/13] Moved config validation and writing to main process only --- backend/api/core.py | 2 -- backend/config.py | 20 +++++++++++--------- backend/main.py | 3 ++- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/backend/api/core.py b/backend/api/core.py index 0aed2bc..e995554 100644 --- a/backend/api/core.py +++ b/backend/api/core.py @@ -2,8 +2,6 @@ Module init. Contains function for starting up the flask process """ -import logging - from api.routes import RouteManager from api.server import WaitressAPIServer from api.app import FlaskApp diff --git a/backend/config.py b/backend/config.py index c6a3967..c324863 100644 --- a/backend/config.py +++ b/backend/config.py @@ -19,24 +19,26 @@ class to abstract away setters to write to disk, and ability to reload changes from disk. """ def __init__(self): - vdt = Validator() + config_file_name = '/etc/microlab/microlab.ini' - configFileName = '/etc/microlab/microlab.ini' + makedirs(path.dirname(config_file_name), exist_ok=True) - makedirs(path.dirname(configFileName), exist_ok=True) + self.config = ConfigObj(config_file_name, configspec="defaultconfig.ini") - self.config = ConfigObj(configFileName, configspec="defaultconfig.ini") + def validate_config(self): - res = self.config.validate(vdt, copy=True, preserve_errors=True) + validator = Validator() + + validation_data = self.config.validate(validator, copy=True, preserve_errors=True) self.config.write() - for entry in flatten_errors(self.config, res): + for entry in flatten_errors(self.config, validation_data): section_list, key, error = entry - partialKey = self.config + partial_key = self.config for section in section_list: - partialKey = partialKey[section] - default = partialKey.restore_default(key) + partial_key = partial_key[section] + default = partial_key.restore_default(key) if key is not None: section_list.append(key) diff --git a/backend/main.py b/backend/main.py index c8609c0..07fea4e 100644 --- a/backend/main.py +++ b/backend/main.py @@ -13,6 +13,7 @@ from api.core import run_flask from microlab.core import startMicrolabProcess from util.logger import MultiprocessingLogger +from config import microlabConfig class BackendManager: @@ -99,7 +100,6 @@ def _start_server(self): def run(self): config.initialSetup() - self._logger.info("### STARTING MAIN MICROLAB SERVICE ###") self._start_microlab() @@ -115,6 +115,7 @@ def run(self): def main(): + microlabConfig.validate_config() backend_manager = BackendManager() backend_manager.run() From 8e7e1ed5c01abcf60a2341094eaca8e2819e90ae Mon Sep 17 00:00:00 2001 From: Fall Byrd Date: Wed, 1 Jan 2025 12:44:54 -0800 Subject: [PATCH 13/13] Add config init to start of tests, removed some unused imports in tests --- backend/tests/devicelist_test.py | 6 +++--- backend/tests/tasks_test.py | 5 ++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/backend/tests/devicelist_test.py b/backend/tests/devicelist_test.py index f19bdee..0840ec7 100644 --- a/backend/tests/devicelist_test.py +++ b/backend/tests/devicelist_test.py @@ -1,8 +1,8 @@ +from config import microlabConfig +microlabConfig.validate_config() + from hardware.devicelist import validateConfiguration, sort_device_configs import pytest -import yaml -import hardware.core -from unittest.mock import patch, MagicMock def test_detects_deplicate_ids(): diff --git a/backend/tests/tasks_test.py b/backend/tests/tasks_test.py index 944cfc8..488b5d8 100644 --- a/backend/tests/tasks_test.py +++ b/backend/tests/tasks_test.py @@ -1,8 +1,11 @@ +from config import microlabConfig +microlabConfig.validate_config() + from recipes import tasks import pytest import yaml import hardware.core -from unittest.mock import patch, MagicMock +from unittest.mock import MagicMock from util.logger import MultiprocessingLogger MultiprocessingLogger._logging_queue = MagicMock()