diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..a7b191e2d --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.7 + hooks: + # Run the linter. + - id: ruff + # Run the formatter. + - id: ruff-format \ No newline at end of file diff --git a/pynxtools/dataconverter/convert.py b/pynxtools/dataconverter/convert.py index 59af45eac..a03a9e909 100644 --- a/pynxtools/dataconverter/convert.py +++ b/pynxtools/dataconverter/convert.py @@ -56,7 +56,9 @@ def entry_points(group): def get_reader(reader_name) -> BaseReader: """Helper function to get the reader object from it's given name""" - path_prefix = f"{os.path.dirname(__file__)}{os.sep}" if os.path.dirname(__file__) else "" + path_prefix = ( + f"{os.path.dirname(__file__)}{os.sep}" if os.path.dirname(__file__) else "" + ) path = os.path.join(path_prefix, "readers", reader_name, "reader.py") spec = importlib.util.spec_from_file_location("reader.py", path) try: @@ -64,10 +66,9 @@ def get_reader(reader_name) -> BaseReader: spec.loader.exec_module(module) # type: ignore[attr-defined] except FileNotFoundError as exc: # pylint: disable=unexpected-keyword-arg - importlib_module = entry_points(group='pynxtools.reader') - if ( - importlib_module - and reader_name in map(lambda ep: ep.name, importlib_module) + importlib_module = entry_points(group="pynxtools.reader") + if importlib_module and reader_name in map( + lambda ep: ep.name, importlib_module ): return importlib_module[reader_name].load() raise ValueError(f"The reader, {reader_name}, was not found.") from exc @@ -76,15 +77,21 @@ def get_reader(reader_name) -> BaseReader: def get_names_of_all_readers() -> List[str]: """Helper function to populate a list of all available readers""" - path_prefix = f"{os.path.dirname(__file__)}{os.sep}" if os.path.dirname(__file__) else "" + path_prefix = ( + f"{os.path.dirname(__file__)}{os.sep}" if os.path.dirname(__file__) else "" + ) files = sorted(glob.glob(os.path.join(path_prefix, "readers", "*", "reader.py"))) all_readers = [] for file in files: if f"{os.sep}base{os.sep}" not in file: - index_of_readers_folder_name = file.rindex(f"readers{os.sep}") + len(f"readers{os.sep}") + index_of_readers_folder_name = file.rindex(f"readers{os.sep}") + len( + f"readers{os.sep}" + ) index_of_last_path_sep = file.rindex(os.sep) - all_readers.append(file[index_of_readers_folder_name:index_of_last_path_sep]) - plugins = list(map(lambda ep: ep.name, entry_points(group='pynxtools.reader'))) + all_readers.append( + file[index_of_readers_folder_name:index_of_last_path_sep] + ) + plugins = list(map(lambda ep: ep.name, entry_points(group="pynxtools.reader"))) return all_readers + plugins @@ -113,25 +120,34 @@ def get_nxdl_root_and_path(nxdl: str): if nxdl == "NXtest": nxdl_f_path = os.path.join( f"{os.path.abspath(os.path.dirname(__file__))}/../../", - "tests", "data", "dataconverter", "NXtest.nxdl.xml") + "tests", + "data", + "dataconverter", + "NXtest.nxdl.xml", + ) elif nxdl == "NXroot": nxdl_f_path = os.path.join(definitions_path, "base_classes", "NXroot.nxdl.xml") else: - nxdl_f_path = os.path.join(definitions_path, "contributed_definitions", f"{nxdl}.nxdl.xml") + nxdl_f_path = os.path.join( + definitions_path, "contributed_definitions", f"{nxdl}.nxdl.xml" + ) if not os.path.exists(nxdl_f_path): - nxdl_f_path = os.path.join(definitions_path, "applications", f"{nxdl}.nxdl.xml") + nxdl_f_path = os.path.join( + definitions_path, "applications", f"{nxdl}.nxdl.xml" + ) if not os.path.exists(nxdl_f_path): - nxdl_f_path = os.path.join(definitions_path, "base_classes", f"{nxdl}.nxdl.xml") + nxdl_f_path = os.path.join( + definitions_path, "base_classes", f"{nxdl}.nxdl.xml" + ) if not os.path.exists(nxdl_f_path): raise FileNotFoundError(f"The nxdl file, {nxdl}, was not found.") return ET.parse(nxdl_f_path).getroot(), nxdl_f_path -def transfer_data_into_template(input_file, - reader, nxdl_name, - nxdl_root: Optional[ET.Element] = None, - **kwargs): +def transfer_data_into_template( + input_file, reader, nxdl_name, nxdl_root: Optional[ET.Element] = None, **kwargs +): """Transfer parse and merged data from input experimental file, config file and eln. Experimental and eln files will be parsed and finally will be merged into template. @@ -164,32 +180,38 @@ def transfer_data_into_template(input_file, input_file = (input_file,) bulletpoint = "\n\u2022 " - logger.info("Using %s reader to convert the given files: %s ", - reader, - bulletpoint.join((" ", *input_file))) + logger.info( + "Using %s reader to convert the given files: %s ", + reader, + bulletpoint.join((" ", *input_file)), + ) data_reader = get_reader(reader) - if not (nxdl_name in data_reader.supported_nxdls or "*" in data_reader.supported_nxdls): - raise NotImplementedError("The chosen NXDL isn't supported by the selected reader.") + if not ( + nxdl_name in data_reader.supported_nxdls or "*" in data_reader.supported_nxdls + ): + raise NotImplementedError( + "The chosen NXDL isn't supported by the selected reader." + ) data = data_reader().read( # type: ignore[operator] - template=Template(template), - file_paths=input_file, - **kwargs + template=Template(template), file_paths=input_file, **kwargs ) helpers.validate_data_dict(template, data, nxdl_root) return data # pylint: disable=too-many-arguments,too-many-locals -def convert(input_file: Tuple[str, ...], - reader: str, - nxdl: str, - output: str, - generate_template: bool = False, - fair: bool = False, - undocumented: bool = False, - **kwargs): +def convert( + input_file: Tuple[str, ...], + reader: str, + nxdl: str, + output: str, + generate_template: bool = False, + fair: bool = False, + undocumented: bool = False, + **kwargs, +): """The conversion routine that takes the input parameters and calls the necessary functions. Parameters @@ -223,13 +245,19 @@ def convert(input_file: Tuple[str, ...], logger.info(template) return - data = transfer_data_into_template(input_file=input_file, reader=reader, - nxdl_name=nxdl, nxdl_root=nxdl_root, - **kwargs) + data = transfer_data_into_template( + input_file=input_file, + reader=reader, + nxdl_name=nxdl, + nxdl_root=nxdl_root, + **kwargs, + ) if undocumented: logger.setLevel(UNDOCUMENTED) if fair and data.undocumented.keys(): - logger.warning("There are undocumented paths in the template. This is not acceptable!") + logger.warning( + "There are undocumented paths in the template. This is not acceptable!" + ) return for path in data.undocumented.keys(): @@ -238,7 +266,7 @@ def convert(input_file: Tuple[str, ...], logger.log( UNDOCUMENTED, "The path, %s, is being written but has no documentation.", - path + path, ) helpers.add_default_root_attributes(data=data, filename=os.path.basename(output)) Writer(data=data, nxdl_f_path=nxdl_f_path, output_path=output).write() @@ -248,7 +276,7 @@ def convert(input_file: Tuple[str, ...], def parse_params_file(params_file): """Parses the parameters from a given dictionary and returns them""" - params = yaml.load(params_file, Loader=yaml.Loader)['dataconverter'] + params = yaml.load(params_file, Loader=yaml.Loader)["dataconverter"] for param in list(params.keys()): params[param.replace("-", "_")] = params.pop(param) return params @@ -256,81 +284,89 @@ def parse_params_file(params_file): @click.command() @click.option( - '--input-file', + "--input-file", default=[], multiple=True, - help='The path to the input data file to read. (Repeat for more than one file.)' + help="The path to the input data file to read. (Repeat for more than one file.)", ) @click.option( - '--reader', - default='json_map', + "--reader", + default="json_map", type=click.Choice(get_names_of_all_readers(), case_sensitive=False), - help='The reader to use. default="example"' + help='The reader to use. default="example"', ) @click.option( - '--nxdl', + "--nxdl", default=None, required=False, - help='The name of the NXDL file to use without extension.' + help="The name of the NXDL file to use without extension.", ) @click.option( - '--output', - default='output.nxs', - help='The path to the output NeXus file to be generated.' + "--output", + default="output.nxs", + help="The path to the output NeXus file to be generated.", ) @click.option( - '--generate-template', + "--generate-template", is_flag=True, default=False, - help='Just print out the template generated from given NXDL file.' + help="Just print out the template generated from given NXDL file.", ) @click.option( - '--fair', + "--fair", is_flag=True, default=False, - help='Let the converter know to be stricter in checking the documentation.' + help="Let the converter know to be stricter in checking the documentation.", ) @click.option( - '--params-file', - type=click.File('r'), + "--params-file", + type=click.File("r"), default=None, - help='Allows to pass a .yaml file with all the parameters the converter supports.' + help="Allows to pass a .yaml file with all the parameters the converter supports.", ) @click.option( - '--undocumented', + "--undocumented", is_flag=True, default=False, - help='Shows a log output for all undocumented fields' + help="Shows a log output for all undocumented fields", ) @click.option( - '--mapping', - help='Takes a .mapping.json file and converts data from given input files.' + "--mapping", + help="Takes a .mapping.json file and converts data from given input files.", ) # pylint: disable=too-many-arguments -def convert_cli(input_file: Tuple[str, ...], - reader: str, - nxdl: str, - output: str, - generate_template: bool, - fair: bool, - params_file: str, - undocumented: bool, - mapping: str): +def convert_cli( + input_file: Tuple[str, ...], + reader: str, + nxdl: str, + output: str, + generate_template: bool, + fair: bool, + params_file: str, + undocumented: bool, + mapping: str, +): """The CLI entrypoint for the convert function""" if params_file: try: convert(**parse_params_file(params_file)) except TypeError as exc: sys.tracebacklimit = 0 - raise TypeError(("Please make sure you have the following entries in your " - "parameter file:\n\n# NeXusParser Parameter File - v0.0.1" - "\n\ndataconverter:\n\treader: value\n\tnxdl: value\n\tin" - "put-file: value")) from exc + raise TypeError( + ( + "Please make sure you have the following entries in your " + "parameter file:\n\n# NeXusParser Parameter File - v0.0.1" + "\n\ndataconverter:\n\treader: value\n\tnxdl: value\n\tin" + "put-file: value" + ) + ) from exc else: if nxdl is None: sys.tracebacklimit = 0 - raise IOError("\nError: Please supply an NXDL file with the option:" - " --nxdl ") + raise IOError( + "\nError: Please supply an NXDL file with the option:" + " --nxdl " + ) if mapping: reader = "json_map" if mapping: @@ -338,5 +374,5 @@ def convert_cli(input_file: Tuple[str, ...], convert(input_file, reader, nxdl, output, generate_template, fair, undocumented) -if __name__ == '__main__': +if __name__ == "__main__": convert_cli() # pylint: disable=no-value-for-parameter diff --git a/pynxtools/dataconverter/hdfdict.py b/pynxtools/dataconverter/hdfdict.py index a4bbf87e6..3292e457f 100644 --- a/pynxtools/dataconverter/hdfdict.py +++ b/pynxtools/dataconverter/hdfdict.py @@ -164,7 +164,9 @@ def pack_dataset(hdfobject, key, value): # Obviously the data was not serializable. To give it # a last try; serialize it to yaml # and save it to the hdf file: - dataset = hdfobject.create_dataset(name=key, data=string_(yaml.safe_dump(value))) + dataset = hdfobject.create_dataset( + name=key, data=string_(yaml.safe_dump(value)) + ) dataset.attrs.create(name=TYPEID, data=string_("yaml")) # if this fails again, restructure your data! diff --git a/pynxtools/dataconverter/helpers.py b/pynxtools/dataconverter/helpers.py index 57d526f4b..9805b88eb 100644 --- a/pynxtools/dataconverter/helpers.py +++ b/pynxtools/dataconverter/helpers.py @@ -52,8 +52,10 @@ def get_nxdl_name_from_elem(xml_element) -> str: if "name" in xml_element.attrib: name_to_add = xml_element.attrib["name"] elif "type" in xml_element.attrib: - name_to_add = (f"{convert_nexus_to_caps(xml_element.attrib['type'])}" - f"[{convert_nexus_to_suggested_name(xml_element.attrib['type'])}]") + name_to_add = ( + f"{convert_nexus_to_caps(xml_element.attrib['type'])}" + f"[{convert_nexus_to_suggested_name(xml_element.attrib['type'])}]" + ) return name_to_add @@ -70,14 +72,17 @@ def get_all_defined_required_children_for_elem(xml_element): if tag in ("field", "attribute"): name_to_add = f"@{name_to_add}" if tag == "attribute" else name_to_add list_of_children_to_add.add(name_to_add) - if tag == "field" \ - and ("units" in child.attrib.keys() - and child.attrib["units"] != "NX_UNITLESS"): + if tag == "field" and ( + "units" in child.attrib.keys() + and child.attrib["units"] != "NX_UNITLESS" + ): list_of_children_to_add.add(f"{name_to_add}/@units") elif tag == "group": - nxdlpath = f'{xml_element.get("nxdlpath")}/{get_nxdl_name_from_elem(child)}' + nxdlpath = ( + f'{xml_element.get("nxdlpath")}/{get_nxdl_name_from_elem(child)}' + ) nxdlbase = xml_element.get("nxdlbase") - nx_name = nxdlbase[nxdlbase.rfind("/") + 1:nxdlbase.rfind(".nxdl")] + nx_name = nxdlbase[nxdlbase.rfind("/") + 1 : nxdlbase.rfind(".nxdl")] if nxdlpath not in visited_paths: visited_paths.append(nxdlpath) children = get_all_defined_required_children(nxdlpath, nx_name) @@ -110,12 +115,16 @@ def add_inherited_children(list_of_children_to_add, path, nxdl_root, template): child_path = f"{path.rsplit('/', 1)[0]}/{child}" if child_path not in template.keys(): optional_parent = check_for_optional_parent(child_path, nxdl_root) - optionality = "required" if optional_parent == "<>" else "optional" + optionality = ( + "required" if optional_parent == "<>" else "optional" + ) template[optionality][f"{path.rsplit('/', 1)[0]}/{child}"] = None return template -def generate_template_from_nxdl(root, template, path="", nxdl_root=None, nxdl_name=None): +def generate_template_from_nxdl( + root, template, path="", nxdl_root=None, nxdl_name=None +): """Helper function to generate a template dictionary for given NXDL""" if nxdl_root is None: nxdl_name = root.attrib["name"] @@ -129,9 +138,9 @@ def generate_template_from_nxdl(root, template, path="", nxdl_root=None, nxdl_na suffix = "" if "name" in root.attrib: - suffix = root.attrib['name'] + suffix = root.attrib["name"] elif "type" in root.attrib: - nexus_class = convert_nexus_to_caps(root.attrib['type']) + nexus_class = convert_nexus_to_caps(root.attrib["type"]) hdf5name = f"[{convert_nexus_to_suggested_name(root.attrib['type'])}]" suffix = f"{nexus_class}{hdf5name}" @@ -142,25 +151,32 @@ def generate_template_from_nxdl(root, template, path="", nxdl_root=None, nxdl_na optionality = get_required_string(root) if optionality == "required": optional_parent = check_for_optional_parent(path, nxdl_root) - optionality = "required" if optional_parent == "<>" else "optional" + optionality = ( + "required" if optional_parent == "<>" else "optional" + ) if optional_parent != "<>": template.optional_parents.append(optional_parent) template[optionality][path] = None # Only add units if it is a field and the the units are defined but not set to NX_UNITLESS - if tag == "field" \ - and ("units" in root.attrib.keys() and root.attrib["units"] != "NX_UNITLESS"): + if tag == "field" and ( + "units" in root.attrib.keys() and root.attrib["units"] != "NX_UNITLESS" + ): template[optionality][f"{path}/@units"] = None parent_path = convert_data_converter_dict_to_nxdl_path(path.rsplit("/", 1)[0]) - list_of_children_to_add = get_all_defined_required_children(parent_path, nxdl_name) + list_of_children_to_add = get_all_defined_required_children( + parent_path, nxdl_name + ) add_inherited_children(list_of_children_to_add, path, nxdl_root, template) elif tag == "group" and is_a_lone_group(root): template[get_required_string(root)][path] = None template["lone_groups"].append(path) path_nxdl = convert_data_converter_dict_to_nxdl_path(path) - list_of_children_to_add = get_all_defined_required_children(path_nxdl, nxdl_name) + list_of_children_to_add = get_all_defined_required_children( + path_nxdl, nxdl_name + ) add_inherited_children(list_of_children_to_add, path, nxdl_root, template) # Handling link: link has a target attibute that store absolute path of concept to be # linked. Writer reads link from template in the format {'link': } @@ -175,7 +191,7 @@ def generate_template_from_nxdl(root, template, path="", nxdl_root=None, nxdl_na # if optionality == "optional": # template.optional_parents.append(optional_parent) optionality = "optional" - template[optionality][path] = {'link': root.attrib['target']} + template[optionality][path] = {"link": root.attrib["target"]} for child in root: generate_template_from_nxdl(child, template, path, nxdl_root, nxdl_name) @@ -201,7 +217,7 @@ def convert_data_converter_entry_to_nxdl_path_entry(entry) -> Union[str, None]: Helper function to convert data converter style entry to NXDL style entry: ENTRY[entry] -> ENTRY """ - regex = re.compile(r'(.*?)(?=\[)') + regex = re.compile(r"(.*?)(?=\[)") results = regex.search(entry) return entry if results is None else results.group(1) @@ -211,9 +227,9 @@ def convert_data_converter_dict_to_nxdl_path(path) -> str: Helper function to convert data converter style path to NXDL style path: /ENTRY[entry]/sample -> /ENTRY/sample """ - nxdl_path = '' - for entry in path.split('/')[1:]: - nxdl_path += '/' + convert_data_converter_entry_to_nxdl_path_entry(entry) + nxdl_path = "" + for entry in path.split("/")[1:]: + nxdl_path += "/" + convert_data_converter_entry_to_nxdl_path_entry(entry) return nxdl_path @@ -222,7 +238,7 @@ def get_name_from_data_dict_entry(entry: str) -> str: ENTRY[entry] -> entry """ - regex = re.compile(r'(?<=\[)(.*?)(?=\])') + regex = re.compile(r"(?<=\[)(.*?)(?=\])") results = regex.search(entry) if results is None: return entry @@ -236,9 +252,9 @@ def convert_data_dict_path_to_hdf5_path(path) -> str: /ENTRY[entry]/sample -> /entry/sample """ - hdf5path = '' - for entry in path.split('/')[1:]: - hdf5path += '/' + get_name_from_data_dict_entry(entry) + hdf5path = "" + for entry in path.split("/")[1:]: + hdf5path += "/" + get_name_from_data_dict_entry(entry) return hdf5path @@ -246,7 +262,9 @@ def is_value_valid_element_of_enum(value, elem) -> Tuple[bool, list]: """Checks whether a value has to be specific from the NXDL enumeration and returns options.""" if elem is not None: has_enums, enums = nexus.get_enums(elem) - if has_enums and (isinstance(value, list) or value not in enums[0:-1] or value == ""): + if has_enums and ( + isinstance(value, list) or value not in enums[0:-1] or value == "" + ): return False, enums return True, [] @@ -264,10 +282,22 @@ def is_value_valid_element_of_enum(value, elem) -> Tuple[bool, list]: "NX_FLOAT": (float, np.ndarray, np.floating), "NX_INT": (int, np.ndarray, np.signedinteger), "NX_UINT": (np.ndarray, np.unsignedinteger), - "NX_NUMBER": (int, float, np.ndarray, np.signedinteger, np.unsignedinteger, np.floating, dict), - "NX_POSINT": (int, np.ndarray, np.signedinteger), # > 0 is checked in is_valid_data_field() + "NX_NUMBER": ( + int, + float, + np.ndarray, + np.signedinteger, + np.unsignedinteger, + np.floating, + dict, + ), + "NX_POSINT": ( + int, + np.ndarray, + np.signedinteger, + ), # > 0 is checked in is_valid_data_field() "NX_COMPLEX": (complex, np.ndarray, np.cdouble, np.csingle), - "NXDL_TYPE_UNAVAILABLE": (str,) # Defaults to a string if a type is not provided. + "NXDL_TYPE_UNAVAILABLE": (str,), # Defaults to a string if a type is not provided. } @@ -290,6 +320,7 @@ def is_valid_data_type(value, accepted_types): def is_positive_int(value): """Checks whether the given value or its children are positive.""" + def is_greater_than(num): return num.flat[0] > 0 if isinstance(num, np.ndarray) else num > 0 @@ -302,7 +333,7 @@ def is_greater_than(num): def convert_str_to_bool_safe(value): """Only returns True or False if someone mistakenly adds quotation marks but mean a bool. - For everything else it returns None. + For everything else it returns None. """ if value.lower() == "true": return True @@ -314,12 +345,12 @@ def convert_str_to_bool_safe(value): def is_valid_data_field(value, nxdl_type, path): """Checks whether a given value is valid according to what is defined in the NXDL. - This function will also try to convert typical types, for example int to float, - and return the successful conversion. + This function will also try to convert typical types, for example int to float, + and return the successful conversion. - If it fails to convert, it raises an Exception. + If it fails to convert, it raises an Exception. - As a default it just returns the value again. + As a default it just returns the value again. """ accepted_types = NEXUS_TO_PYTHON_DATA_TYPES[nxdl_type] @@ -331,20 +362,26 @@ def is_valid_data_field(value, nxdl_type, path): raise ValueError return accepted_types[0](value) except ValueError as exc: - raise ValueError(f"The value at {path} should be of Python type: {accepted_types}" - f", as defined in the NXDL as {nxdl_type}.") from exc + raise ValueError( + f"The value at {path} should be of Python type: {accepted_types}" + f", as defined in the NXDL as {nxdl_type}." + ) from exc if nxdl_type == "NX_POSINT" and not is_positive_int(value): raise ValueError(f"The value at {path} should be a positive int.") if nxdl_type in ("ISO8601", "NX_DATE_TIME"): - iso8601 = re.compile(r"^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:" - r"\.\d*)?)(((?!-00:00)(\+|-)(\d{2}):(\d{2})|Z){1})$") + iso8601 = re.compile( + r"^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:" + r"\.\d*)?)(((?!-00:00)(\+|-)(\d{2}):(\d{2})|Z){1})$" + ) results = iso8601.search(value) if results is None: - raise ValueError(f"The date at {path} should be a timezone aware ISO8601 " - f"formatted str. For example, 2022-01-22T12:14:12.05018Z" - f" or 2022-01-22T12:14:12.05018+00:00.") + raise ValueError( + f"The date at {path} should be a timezone aware ISO8601 " + f"formatted str. For example, 2022-01-22T12:14:12.05018Z" + f" or 2022-01-22T12:14:12.05018+00:00." + ) return value @@ -375,24 +412,31 @@ def check_for_optional_parent(path: str, nxdl_root: ET.Element) -> str: def is_node_required(nxdl_key, nxdl_root): """Checks whether a node at given nxdl path is required""" - if nxdl_key[nxdl_key.rindex("/") + 1:] == "@units": + if nxdl_key[nxdl_key.rindex("/") + 1 :] == "@units": return False if nxdl_key[nxdl_key.rindex("/") + 1] == "@": - nxdl_key = nxdl_key[0:nxdl_key.rindex("/") + 1] + nxdl_key[nxdl_key.rindex("/") + 2:] + nxdl_key = ( + nxdl_key[0 : nxdl_key.rindex("/") + 1] + + nxdl_key[nxdl_key.rindex("/") + 2 :] + ) node = nexus.get_node_at_nxdl_path(nxdl_key, elem=nxdl_root, exc=False) return nexus.get_required_string(node) == "<>" def all_required_children_are_set(optional_parent_path, data, nxdl_root): """Walks over optional parent's children and makes sure all required ones are set""" - optional_parent_path = convert_data_converter_dict_to_nxdl_path(optional_parent_path) + optional_parent_path = convert_data_converter_dict_to_nxdl_path( + optional_parent_path + ) for key in data: if key in data["lone_groups"]: continue nxdl_key = convert_data_converter_dict_to_nxdl_path(key) - if nxdl_key[0:nxdl_key.rfind("/")] == optional_parent_path \ - and is_node_required(nxdl_key, nxdl_root) \ - and data[key] is None: + if ( + nxdl_key[0 : nxdl_key.rfind("/")] == optional_parent_path + and is_node_required(nxdl_key, nxdl_root) + and data[key] is None + ): return False return True @@ -401,27 +445,25 @@ def all_required_children_are_set(optional_parent_path, data, nxdl_root): def is_nxdl_path_a_child(nxdl_path: str, parent: str): """Takes an NXDL path for an element and an NXDL parent and confirms it is a child.""" while nxdl_path.rfind("/") != -1: - nxdl_path = nxdl_path[0:nxdl_path.rfind("/")] + nxdl_path = nxdl_path[0 : nxdl_path.rfind("/")] if parent == nxdl_path: return True return False -def check_optionality_based_on_parent_group( - path, - nxdl_path, - nxdl_root, - data, - template): +def check_optionality_based_on_parent_group(path, nxdl_path, nxdl_root, data, template): """Checks whether field is part of an optional parent and then confirms its optionality""" for optional_parent in template["optional_parents"]: optional_parent_nxdl = convert_data_converter_dict_to_nxdl_path(optional_parent) - if is_nxdl_path_a_child(nxdl_path, optional_parent_nxdl) \ - and not all_required_children_are_set(optional_parent, data, nxdl_root): - raise LookupError(f"The data entry, {path}, has an optional parent, " - f"{optional_parent}, with required children set. Either" - f" provide no children for {optional_parent} or provide" - f" all required ones.") + if is_nxdl_path_a_child( + nxdl_path, optional_parent_nxdl + ) and not all_required_children_are_set(optional_parent, data, nxdl_root): + raise LookupError( + f"The data entry, {path}, has an optional parent, " + f"{optional_parent}, with required children set. Either" + f" provide no children for {optional_parent} or provide" + f" all required ones." + ) def is_group_part_of_path(path_to_group: str, path_of_entry: str) -> bool: @@ -450,7 +492,7 @@ def does_group_exist(path_to_group, data): def ensure_all_required_fields_exist(template, data, nxdl_root): """Checks whether all the required fields are in the returned data object.""" for path in template["required"]: - entry_name = get_name_from_data_dict_entry(path[path.rindex('/') + 1:]) + entry_name = get_name_from_data_dict_entry(path[path.rindex("/") + 1 :]) if entry_name == "@units": continue nxdl_path = convert_data_converter_dict_to_nxdl_path(path) @@ -460,22 +502,28 @@ def ensure_all_required_fields_exist(template, data, nxdl_root): if path in template["lone_groups"]: opt_parent = check_for_optional_parent(path, nxdl_root) if opt_parent != "<>": - if does_group_exist(opt_parent, data) and not does_group_exist(renamed_path, data): - raise ValueError(f"The required group, {path}, hasn't been supplied" - f" while its optional parent, {path}, is supplied.") + if does_group_exist(opt_parent, data) and not does_group_exist( + renamed_path, data + ): + raise ValueError( + f"The required group, {path}, hasn't been supplied" + f" while its optional parent, {path}, is supplied." + ) continue if not does_group_exist(renamed_path, data): raise ValueError(f"The required group, {path}, hasn't been supplied.") continue if not is_path_in_data_dict or data[renamed_path] is None: - raise ValueError(f"The data entry corresponding to {path} is required " - f"and hasn't been supplied by the reader.") + raise ValueError( + f"The data entry corresponding to {path} is required " + f"and hasn't been supplied by the reader." + ) def try_undocumented(data, nxdl_root: ET.Element): """Tries to move entries used that are from base classes but not in AppDef""" for path in list(data.undocumented): - entry_name = get_name_from_data_dict_entry(path[path.rindex('/') + 1:]) + entry_name = get_name_from_data_dict_entry(path[path.rindex("/") + 1 :]) nxdl_path = convert_data_converter_dict_to_nxdl_path(path) @@ -484,7 +532,7 @@ def try_undocumented(data, nxdl_root: ET.Element): if entry_name[0] == "@" and "@" in nxdl_path: index_of_at = nxdl_path.rindex("@") - nxdl_path = nxdl_path[0:index_of_at] + nxdl_path[index_of_at + 1:] + nxdl_path = nxdl_path[0:index_of_at] + nxdl_path[index_of_at + 1 :] try: elem = nexus.get_node_at_nxdl_path(nxdl_path=nxdl_path, elem=nxdl_root) @@ -512,7 +560,7 @@ def validate_data_dict(template, data, nxdl_root: ET.Element): for path in data.get_documented().keys(): if data[path] is not None: - entry_name = get_name_from_data_dict_entry(path[path.rindex('/') + 1:]) + entry_name = get_name_from_data_dict_entry(path[path.rindex("/") + 1 :]) nxdl_path = convert_data_converter_dict_to_nxdl_path(path) if entry_name == "@units": @@ -520,7 +568,7 @@ def validate_data_dict(template, data, nxdl_root: ET.Element): if entry_name[0] == "@" and "@" in nxdl_path: index_of_at = nxdl_path.rindex("@") - nxdl_path = nxdl_path[0:index_of_at] + nxdl_path[index_of_at + 1:] + nxdl_path = nxdl_path[0:index_of_at] + nxdl_path[index_of_at + 1 :] if nxdl_path in nxdl_path_to_elm: elem = nxdl_path_to_elm[nxdl_path] @@ -530,18 +578,28 @@ def validate_data_dict(template, data, nxdl_root: ET.Element): # Only check for validation in the NXDL if we did find the entry # otherwise we just pass it along - if elem is not None \ - and elem.attrib["name"] == entry_name \ - and remove_namespace_from_tag(elem.tag) in ("field", "attribute"): - check_optionality_based_on_parent_group(path, nxdl_path, nxdl_root, data, template) + if ( + elem is not None + and elem.attrib["name"] == entry_name + and remove_namespace_from_tag(elem.tag) in ("field", "attribute") + ): + check_optionality_based_on_parent_group( + path, nxdl_path, nxdl_root, data, template + ) attrib = elem.attrib - nxdl_type = attrib["type"] if "type" in attrib.keys() else "NXDL_TYPE_UNAVAILABLE" + nxdl_type = ( + attrib["type"] + if "type" in attrib.keys() + else "NXDL_TYPE_UNAVAILABLE" + ) data[path] = is_valid_data_field(data[path], nxdl_type, path) is_valid_enum, enums = is_value_valid_element_of_enum(data[path], elem) if not is_valid_enum: - raise ValueError(f"The value at {path} should be on of the " - f"following strings: {enums}") + raise ValueError( + f"The value at {path} should be on of the " + f"following strings: {enums}" + ) return True @@ -561,17 +619,21 @@ def get_first_group(root): def check_for_valid_atom_types(atoms: Union[str, list]): - """Check for whether atom exists in periodic table. """ + """Check for whether atom exists in periodic table.""" if isinstance(atoms, list): for elm in atoms: if elm not in chemical_symbols: - raise ValueError(f"The element {elm} is not found in periodictable, " - f"check for correct element name") + raise ValueError( + f"The element {elm} is not found in periodictable, " + f"check for correct element name" + ) elif isinstance(atoms, str): if atoms not in chemical_symbols: - raise ValueError(f"The element {atoms} is not found in periodictable, " - f"check for correct element name") + raise ValueError( + f"The element {atoms} is not found in periodictable, " + f"check for correct element name" + ) def convert_to_hill(atoms_typ): @@ -580,10 +642,10 @@ def convert_to_hill(atoms_typ): atoms_typ = list(atoms_typ) atoms_typ = sorted(atoms_typ) atom_list = [] - if 'C' in atoms_typ: - atom_list.append('C') - if 'H' in atoms_typ: - atom_list.append('H') + if "C" in atoms_typ: + atom_list.append("C") + if "H" in atoms_typ: + atom_list.append("H") if atom_list: for char in atom_list: atoms_typ.remove(char) @@ -594,12 +656,15 @@ def add_default_root_attributes(data, filename): """ Takes a dict/Template and adds NXroot fields/attributes that are inherently available """ + def update_and_warn(key: str, value: str): if key in data and data[key] != value: logger.warning( "The NXroot entry '%s' (value: %s) should not be populated by the reader. " "This is overwritten by the actually used value '%s'", - key, data[key], value + key, + data[key], + value, ) data[key] = value @@ -610,14 +675,14 @@ def update_and_warn(key: str, value: str): update_and_warn( "/@NeXus_repository", "https://github.com/FAIRmat-NFDI/nexus_definitions/" - f"blob/{get_nexus_version_hash()}" + f"blob/{get_nexus_version_hash()}", ) update_and_warn("/@NeXus_version", get_nexus_version()) - update_and_warn("/@HDF5_version", '.'.join(map(str, h5py.h5.get_libversion()))) + update_and_warn("/@HDF5_version", ".".join(map(str, h5py.h5.get_libversion()))) update_and_warn("/@h5py_version", h5py.__version__) -def extract_atom_types(formula, mode='hill'): +def extract_atom_types(formula, mode="hill"): """Extract atom types form chemical formula.""" atom_types: set = set() element: str = "" @@ -647,7 +712,7 @@ def extract_atom_types(formula, mode='hill'): atom_types = list(atom_types) atom_types = sorted(atom_types) - if mode == 'hill': + if mode == "hill": return convert_to_hill(atom_types) return atom_types @@ -675,7 +740,7 @@ def transform_to_intended_dt(str_value: Any) -> Optional[Any]: Converted data type """ - symbol_list_for_data_seperation = [';', ' '] + symbol_list_for_data_seperation = [";", " "] transformed: Any = None if isinstance(str_value, list): @@ -694,7 +759,7 @@ def transform_to_intended_dt(str_value: Any) -> Optional[Any]: try: transformed = float(str_value) except ValueError: - if '[' in str_value and ']' in str_value: + if "[" in str_value and "]" in str_value: transformed = json.loads(str_value) if transformed is not None: return transformed @@ -713,11 +778,11 @@ def transform_to_intended_dt(str_value: Any) -> Optional[Any]: return str_value -def nested_dict_to_slash_separated_path(nested_dict: dict, - flattened_dict: dict, - parent_path=''): +def nested_dict_to_slash_separated_path( + nested_dict: dict, flattened_dict: dict, parent_path="" +): """Convert nested dict into slash separeted path upto certain level.""" - sep = '/' + sep = "/" for key, val in nested_dict.items(): path = parent_path + sep + key diff --git a/pynxtools/dataconverter/readers/apm/map_concepts/apm_deployment_specifics_to_nx_map.py b/pynxtools/dataconverter/readers/apm/map_concepts/apm_deployment_specifics_to_nx_map.py index d4cdf84f6..f9fcf3fab 100644 --- a/pynxtools/dataconverter/readers/apm/map_concepts/apm_deployment_specifics_to_nx_map.py +++ b/pynxtools/dataconverter/readers/apm/map_concepts/apm_deployment_specifics_to_nx_map.py @@ -40,13 +40,18 @@ # such that it executes after reading generic ELN data (eventually available entries) # in the template get overwritten -from pynxtools.dataconverter.readers.apm.utils.apm_versioning \ - import NX_APM_ADEF_NAME, NX_APM_ADEF_VERSION, NX_APM_EXEC_NAME, NX_APM_EXEC_VERSION +from pynxtools.dataconverter.readers.apm.utils.apm_versioning import ( + NX_APM_ADEF_NAME, + NX_APM_ADEF_VERSION, + NX_APM_EXEC_NAME, + NX_APM_EXEC_VERSION, +) -NxApmDeploymentSpecificInput \ - = {"/ENTRY[entry*]/@version": f"{NX_APM_ADEF_VERSION}", - "/ENTRY[entry*]/definition": f"{NX_APM_ADEF_NAME}", - "/ENTRY[entry*]/PROGRAM[program1]/program": f"{NX_APM_EXEC_NAME}", - "/ENTRY[entry*]/PROGRAM[program1]/program/@version": f"{NX_APM_EXEC_VERSION}", - "/ENTRY[entry*]/atom_probe/location": {"fun": "load_from", "terms": "location"}} +NxApmDeploymentSpecificInput = { + "/ENTRY[entry*]/@version": f"{NX_APM_ADEF_VERSION}", + "/ENTRY[entry*]/definition": f"{NX_APM_ADEF_NAME}", + "/ENTRY[entry*]/PROGRAM[program1]/program": f"{NX_APM_EXEC_NAME}", + "/ENTRY[entry*]/PROGRAM[program1]/program/@version": f"{NX_APM_EXEC_VERSION}", + "/ENTRY[entry*]/atom_probe/location": {"fun": "load_from", "terms": "location"}, +} diff --git a/pynxtools/dataconverter/readers/apm/map_concepts/apm_eln_to_nx_map.py b/pynxtools/dataconverter/readers/apm/map_concepts/apm_eln_to_nx_map.py index 76c763f47..b6def4321 100644 --- a/pynxtools/dataconverter/readers/apm/map_concepts/apm_eln_to_nx_map.py +++ b/pynxtools/dataconverter/readers/apm/map_concepts/apm_eln_to_nx_map.py @@ -17,93 +17,309 @@ # """Dict mapping custom schema instances from eln_data.yaml file on concepts in NXapm.""" -NxApmElnInput = {"IGNORE": {"fun": "load_from_dict_list", "terms": "em_lab/detector"}, - "IGNORE": {"fun": "load_from", "terms": "em_lab/ebeam_column/aberration_correction/applied"}, - "IGNORE": {"fun": "load_from_dict_list", "terms": "em_lab/ebeam_column/aperture_em"}, - "/ENTRY[entry*]/PROGRAM[program2]/program": {"fun": "load_from", "terms": "atom_probe/control_software_program"}, - "/ENTRY[entry*]/PROGRAM[program2]/program/@version": {"fun": "load_from", "terms": "atom_probe/control_software_program__attr_version"}, - "/ENTRY[entry*]/experiment_identifier": {"fun": "load_from", "terms": "entry/experiment_identifier"}, - "/ENTRY[entry*]/start_time": {"fun": "load_from", "terms": "entry/start_time"}, - "/ENTRY[entry*]/end_time": {"fun": "load_from", "terms": "entry/end_time"}, - "/ENTRY[entry*]/run_number": {"fun": "load_from", "terms": "entry/run_number"}, - "/ENTRY[entry*]/operation_mode": {"fun": "load_from", "terms": "entry/operation_mode"}, - "/ENTRY[entry*]/experiment_description": {"fun": "load_from", "terms": "entry/experiment_description"}, - "IGNORE": {"fun": "load_from", "terms": "sample/alias"}, - "/ENTRY[entry*]/sample/grain_diameter": {"fun": "load_from", "terms": "sample/grain_diameter/value"}, - "/ENTRY[entry*]/sample/grain_diameter/@units": {"fun": "load_from", "terms": "sample/grain_diameter/unit"}, - "/ENTRY[entry*]/sample/grain_diameter_error": {"fun": "load_from", "terms": "sample/grain_diameter_error/value"}, - "/ENTRY[entry*]/sample/grain_diameter_error/@units": {"fun": "load_from", "terms": "sample/grain_diameter_error/unit"}, - "/ENTRY[entry*]/sample/heat_treatment_quenching_rate": {"fun": "load_from", "terms": "sample/heat_treatment_quenching_rate/value"}, - "/ENTRY[entry*]/sample/heat_treatment_quenching_rate/@units": {"fun": "load_from", "terms": "sample/heat_treatment_quenching_rate/unit"}, - "/ENTRY[entry*]/sample/heat_treatment_quenching_rate_error": {"fun": "load_from", "terms": "sample/heat_treatment_quenching_rate_error/value"}, - "/ENTRY[entry*]/sample/heat_treatment_quenching_rate_error/@units": {"fun": "load_from", "terms": "sample/heat_treatment_quenching_rate_error/unit"}, - "/ENTRY[entry*]/sample/heat_treatment_temperature": {"fun": "load_from", "terms": "sample/heat_treatment_temperature/value"}, - "/ENTRY[entry*]/sample/heat_treatment_temperature/@units": {"fun": "load_from", "terms": "sample/heat_treatment_temperature/unit"}, - "/ENTRY[entry*]/sample/heat_treatment_temperature_error": {"fun": "load_from", "terms": "sample/heat_treatment_temperature_error/value"}, - "/ENTRY[entry*]/sample/heat_treatment_temperature_error/@units": {"fun": "load_from", "terms": "sample/heat_treatment_temperature_error/unit"}, - "/ENTRY[entry*]/specimen/name": {"fun": "load_from", "terms": "specimen/name"}, - "/ENTRY[entry*]/specimen/preparation_date": {"fun": "load_from", "terms": "specimen/preparation_date"}, - "IGNORE": {"fun": "load_from", "terms": "specimen/sample_history"}, - "/ENTRY[entry*]/specimen/alias": {"fun": "load_from", "terms": "specimen/alias"}, - "/ENTRY[entry*]/specimen/is_polycrystalline": {"fun": "load_from", "terms": "specimen/is_polycrystalline"}, - "/ENTRY[entry*]/specimen/description": {"fun": "load_from", "terms": "specimen/description"}, - "/ENTRY[entry*]/atom_probe/FABRICATION[fabrication]/identifier": {"fun": "load_from", "terms": "atom_probe/fabrication_identifier"}, - "/ENTRY[entry*]/atom_probe/FABRICATION[fabrication]/model": {"fun": "load_from", "terms": "atom_probe/fabrication_model"}, - "/ENTRY[entry*]/atom_probe/FABRICATION[fabrication]/vendor": {"fun": "load_from", "terms": "atom_probe/fabrication_vendor"}, - "/ENTRY[entry*]/atom_probe/analysis_chamber/pressure": {"fun": "load_from", "terms": "atom_probe/analysis_chamber_pressure/value"}, - "/ENTRY[entry*]/atom_probe/analysis_chamber/pressure/@units": {"fun": "load_from", "terms": "atom_probe/analysis_chamber_pressure/unit"}, - "/ENTRY[entry*]/atom_probe/control_software/PROGRAM[program1]/program": {"fun": "load_from", "terms": "atom_probe/control_software_program"}, - "/ENTRY[entry*]/atom_probe/control_software/PROGRAM[program1]/program/@version": {"fun": "load_from", "terms": "atom_probe/control_software_program__attr_version"}, - "/ENTRY[entry*]/atom_probe/field_of_view": {"fun": "load_from", "terms": "atom_probe/field_of_view/value"}, - "/ENTRY[entry*]/atom_probe/field_of_view/@units": {"fun": "load_from", "terms": "atom_probe/field_of_view/unit"}, - "/ENTRY[entry*]/atom_probe/flight_path_length": {"fun": "load_from", "terms": "atom_probe/flight_path_length/value"}, - "/ENTRY[entry*]/atom_probe/flight_path_length/@units": {"fun": "load_from", "terms": "atom_probe/flight_path_length/unit"}, - "/ENTRY[entry*]/atom_probe/instrument_name": {"fun": "load_from", "terms": "atom_probe/instrument_name"}, - "/ENTRY[entry*]/atom_probe/ion_detector/model": {"fun": "load_from", "terms": "atom_probe/ion_detector_model"}, - "/ENTRY[entry*]/atom_probe/ion_detector/name": {"fun": "load_from", "terms": "atom_probe/ion_detector_name"}, - "/ENTRY[entry*]/atom_probe/ion_detector/serial_number": {"fun": "load_from", "terms": "atom_probe/ion_detector_serial_number"}, - "/ENTRY[entry*]/atom_probe/ion_detector/type": {"fun": "load_from", "terms": "atom_probe/ion_detector_type"}, - "/ENTRY[entry*]/atom_probe/local_electrode/name": {"fun": "load_from", "terms": "atom_probe/local_electrode_name"}, - "/ENTRY[entry*]/atom_probe/location": {"fun": "load_from", "terms": "atom_probe/location"}, - "/ENTRY[entry*]/atom_probe/REFLECTRON[reflectron]/applied": {"fun": "load_from", "terms": "atom_probe/reflectron_applied"}, - "/ENTRY[entry*]/atom_probe/stage_lab/base_temperature": {"fun": "load_from", "terms": "atom_probe/stage_lab_base_temperature/value"}, - "/ENTRY[entry*]/atom_probe/stage_lab/base_temperature/@units": {"fun": "load_from", "terms": "atom_probe/stage_lab_base_temperature/unit"}, - "/ENTRY[entry*]/atom_probe/specimen_monitoring/detection_rate": {"fun": "load_from", "terms": "atom_probe/specimen_monitoring_detection_rate/value"}, - "/ENTRY[entry*]/atom_probe/specimen_monitoring/detection_rate/@units": {"fun": "load_from", "terms": "atom_probe/specimen_monitoring_detection_rate/unit"}, - "/ENTRY[entry*]/atom_probe/specimen_monitoring/initial_radius": {"fun": "load_from", "terms": "atom_probe/specimen_monitoring_initial_radius/value"}, - "/ENTRY[entry*]/atom_probe/specimen_monitoring/initial_radius/@units": {"fun": "load_from", "terms": "atom_probe/specimen_monitoring_initial_radius/unit"}, - "/ENTRY[entry*]/atom_probe/specimen_monitoring/shank_angle": {"fun": "load_from", "terms": "atom_probe/specimen_monitoring_shank_angle/value"}, - "/ENTRY[entry*]/atom_probe/specimen_monitoring/shank_angle/@units": {"fun": "load_from", "terms": "atom_probe/specimen_monitoring_shank_angle/unit"}, - "/ENTRY[entry*]/atom_probe/status": {"fun": "load_from", "terms": "atom_probe/status"}, - "/ENTRY[entry*]/atom_probe/pulser/pulse_fraction": {"fun": "load_from", "terms": "atom_probe/pulser/pulse_fraction"}, - "/ENTRY[entry*]/atom_probe/pulser/pulse_frequency": {"fun": "load_from", "terms": "atom_probe/pulser/pulse_frequency/value"}, - "/ENTRY[entry*]/atom_probe/pulser/pulse_frequency/@units": {"fun": "load_from", "terms": "atom_probe/pulser/pulse_frequency/unit"}, - "/ENTRY[entry*]/atom_probe/pulser/pulse_mode": {"fun": "load_from", "terms": "atom_probe/pulser/pulse_mode"}, - "/ENTRY[entry*]/atom_probe/ranging/PROGRAM[program1]/program": {"fun": "load_from", "terms": "atom_probe/ranging/program"}, - "/ENTRY[entry*]/atom_probe/ranging/PROGRAM[program1]/program/@version": {"fun": "load_from", "terms": "atom_probe/ranging/program__attr_version"}, - "/ENTRY[entry*]/atom_probe/reconstruction/PROGRAM[program1]/program": {"fun": "load_from", "terms": "atom_probe/reconstruction/program"}, - "/ENTRY[entry*]/atom_probe/reconstruction/PROGRAM[program1]/program/@version": {"fun": "load_from", "terms": "atom_probe/reconstruction/program__attr_version"}, - "/ENTRY[entry*]/atom_probe/reconstruction/crystallographic_calibration": {"fun": "load_from", "terms": "atom_probe/reconstruction/crystallographic_calibration"}, - "/ENTRY[entry*]/atom_probe/reconstruction/parameter": {"fun": "load_from", "terms": "atom_probe/reconstruction/parameter"}, - "/ENTRY[entry*]/atom_probe/reconstruction/protocol_name": {"fun": "load_from", "terms": "atom_probe/reconstruction/protocol_name"}} +NxApmElnInput = { + "IGNORE": {"fun": "load_from_dict_list", "terms": "em_lab/detector"}, + "IGNORE": { + "fun": "load_from", + "terms": "em_lab/ebeam_column/aberration_correction/applied", + }, + "IGNORE": { + "fun": "load_from_dict_list", + "terms": "em_lab/ebeam_column/aperture_em", + }, + "/ENTRY[entry*]/PROGRAM[program2]/program": { + "fun": "load_from", + "terms": "atom_probe/control_software_program", + }, + "/ENTRY[entry*]/PROGRAM[program2]/program/@version": { + "fun": "load_from", + "terms": "atom_probe/control_software_program__attr_version", + }, + "/ENTRY[entry*]/experiment_identifier": { + "fun": "load_from", + "terms": "entry/experiment_identifier", + }, + "/ENTRY[entry*]/start_time": {"fun": "load_from", "terms": "entry/start_time"}, + "/ENTRY[entry*]/end_time": {"fun": "load_from", "terms": "entry/end_time"}, + "/ENTRY[entry*]/run_number": {"fun": "load_from", "terms": "entry/run_number"}, + "/ENTRY[entry*]/operation_mode": { + "fun": "load_from", + "terms": "entry/operation_mode", + }, + "/ENTRY[entry*]/experiment_description": { + "fun": "load_from", + "terms": "entry/experiment_description", + }, + "IGNORE": {"fun": "load_from", "terms": "sample/alias"}, + "/ENTRY[entry*]/sample/grain_diameter": { + "fun": "load_from", + "terms": "sample/grain_diameter/value", + }, + "/ENTRY[entry*]/sample/grain_diameter/@units": { + "fun": "load_from", + "terms": "sample/grain_diameter/unit", + }, + "/ENTRY[entry*]/sample/grain_diameter_error": { + "fun": "load_from", + "terms": "sample/grain_diameter_error/value", + }, + "/ENTRY[entry*]/sample/grain_diameter_error/@units": { + "fun": "load_from", + "terms": "sample/grain_diameter_error/unit", + }, + "/ENTRY[entry*]/sample/heat_treatment_quenching_rate": { + "fun": "load_from", + "terms": "sample/heat_treatment_quenching_rate/value", + }, + "/ENTRY[entry*]/sample/heat_treatment_quenching_rate/@units": { + "fun": "load_from", + "terms": "sample/heat_treatment_quenching_rate/unit", + }, + "/ENTRY[entry*]/sample/heat_treatment_quenching_rate_error": { + "fun": "load_from", + "terms": "sample/heat_treatment_quenching_rate_error/value", + }, + "/ENTRY[entry*]/sample/heat_treatment_quenching_rate_error/@units": { + "fun": "load_from", + "terms": "sample/heat_treatment_quenching_rate_error/unit", + }, + "/ENTRY[entry*]/sample/heat_treatment_temperature": { + "fun": "load_from", + "terms": "sample/heat_treatment_temperature/value", + }, + "/ENTRY[entry*]/sample/heat_treatment_temperature/@units": { + "fun": "load_from", + "terms": "sample/heat_treatment_temperature/unit", + }, + "/ENTRY[entry*]/sample/heat_treatment_temperature_error": { + "fun": "load_from", + "terms": "sample/heat_treatment_temperature_error/value", + }, + "/ENTRY[entry*]/sample/heat_treatment_temperature_error/@units": { + "fun": "load_from", + "terms": "sample/heat_treatment_temperature_error/unit", + }, + "/ENTRY[entry*]/specimen/name": {"fun": "load_from", "terms": "specimen/name"}, + "/ENTRY[entry*]/specimen/preparation_date": { + "fun": "load_from", + "terms": "specimen/preparation_date", + }, + "IGNORE": {"fun": "load_from", "terms": "specimen/sample_history"}, + "/ENTRY[entry*]/specimen/alias": {"fun": "load_from", "terms": "specimen/alias"}, + "/ENTRY[entry*]/specimen/is_polycrystalline": { + "fun": "load_from", + "terms": "specimen/is_polycrystalline", + }, + "/ENTRY[entry*]/specimen/description": { + "fun": "load_from", + "terms": "specimen/description", + }, + "/ENTRY[entry*]/atom_probe/FABRICATION[fabrication]/identifier": { + "fun": "load_from", + "terms": "atom_probe/fabrication_identifier", + }, + "/ENTRY[entry*]/atom_probe/FABRICATION[fabrication]/model": { + "fun": "load_from", + "terms": "atom_probe/fabrication_model", + }, + "/ENTRY[entry*]/atom_probe/FABRICATION[fabrication]/vendor": { + "fun": "load_from", + "terms": "atom_probe/fabrication_vendor", + }, + "/ENTRY[entry*]/atom_probe/analysis_chamber/pressure": { + "fun": "load_from", + "terms": "atom_probe/analysis_chamber_pressure/value", + }, + "/ENTRY[entry*]/atom_probe/analysis_chamber/pressure/@units": { + "fun": "load_from", + "terms": "atom_probe/analysis_chamber_pressure/unit", + }, + "/ENTRY[entry*]/atom_probe/control_software/PROGRAM[program1]/program": { + "fun": "load_from", + "terms": "atom_probe/control_software_program", + }, + "/ENTRY[entry*]/atom_probe/control_software/PROGRAM[program1]/program/@version": { + "fun": "load_from", + "terms": "atom_probe/control_software_program__attr_version", + }, + "/ENTRY[entry*]/atom_probe/field_of_view": { + "fun": "load_from", + "terms": "atom_probe/field_of_view/value", + }, + "/ENTRY[entry*]/atom_probe/field_of_view/@units": { + "fun": "load_from", + "terms": "atom_probe/field_of_view/unit", + }, + "/ENTRY[entry*]/atom_probe/flight_path_length": { + "fun": "load_from", + "terms": "atom_probe/flight_path_length/value", + }, + "/ENTRY[entry*]/atom_probe/flight_path_length/@units": { + "fun": "load_from", + "terms": "atom_probe/flight_path_length/unit", + }, + "/ENTRY[entry*]/atom_probe/instrument_name": { + "fun": "load_from", + "terms": "atom_probe/instrument_name", + }, + "/ENTRY[entry*]/atom_probe/ion_detector/model": { + "fun": "load_from", + "terms": "atom_probe/ion_detector_model", + }, + "/ENTRY[entry*]/atom_probe/ion_detector/name": { + "fun": "load_from", + "terms": "atom_probe/ion_detector_name", + }, + "/ENTRY[entry*]/atom_probe/ion_detector/serial_number": { + "fun": "load_from", + "terms": "atom_probe/ion_detector_serial_number", + }, + "/ENTRY[entry*]/atom_probe/ion_detector/type": { + "fun": "load_from", + "terms": "atom_probe/ion_detector_type", + }, + "/ENTRY[entry*]/atom_probe/local_electrode/name": { + "fun": "load_from", + "terms": "atom_probe/local_electrode_name", + }, + "/ENTRY[entry*]/atom_probe/location": { + "fun": "load_from", + "terms": "atom_probe/location", + }, + "/ENTRY[entry*]/atom_probe/REFLECTRON[reflectron]/applied": { + "fun": "load_from", + "terms": "atom_probe/reflectron_applied", + }, + "/ENTRY[entry*]/atom_probe/stage_lab/base_temperature": { + "fun": "load_from", + "terms": "atom_probe/stage_lab_base_temperature/value", + }, + "/ENTRY[entry*]/atom_probe/stage_lab/base_temperature/@units": { + "fun": "load_from", + "terms": "atom_probe/stage_lab_base_temperature/unit", + }, + "/ENTRY[entry*]/atom_probe/specimen_monitoring/detection_rate": { + "fun": "load_from", + "terms": "atom_probe/specimen_monitoring_detection_rate/value", + }, + "/ENTRY[entry*]/atom_probe/specimen_monitoring/detection_rate/@units": { + "fun": "load_from", + "terms": "atom_probe/specimen_monitoring_detection_rate/unit", + }, + "/ENTRY[entry*]/atom_probe/specimen_monitoring/initial_radius": { + "fun": "load_from", + "terms": "atom_probe/specimen_monitoring_initial_radius/value", + }, + "/ENTRY[entry*]/atom_probe/specimen_monitoring/initial_radius/@units": { + "fun": "load_from", + "terms": "atom_probe/specimen_monitoring_initial_radius/unit", + }, + "/ENTRY[entry*]/atom_probe/specimen_monitoring/shank_angle": { + "fun": "load_from", + "terms": "atom_probe/specimen_monitoring_shank_angle/value", + }, + "/ENTRY[entry*]/atom_probe/specimen_monitoring/shank_angle/@units": { + "fun": "load_from", + "terms": "atom_probe/specimen_monitoring_shank_angle/unit", + }, + "/ENTRY[entry*]/atom_probe/status": { + "fun": "load_from", + "terms": "atom_probe/status", + }, + "/ENTRY[entry*]/atom_probe/pulser/pulse_fraction": { + "fun": "load_from", + "terms": "atom_probe/pulser/pulse_fraction", + }, + "/ENTRY[entry*]/atom_probe/pulser/pulse_frequency": { + "fun": "load_from", + "terms": "atom_probe/pulser/pulse_frequency/value", + }, + "/ENTRY[entry*]/atom_probe/pulser/pulse_frequency/@units": { + "fun": "load_from", + "terms": "atom_probe/pulser/pulse_frequency/unit", + }, + "/ENTRY[entry*]/atom_probe/pulser/pulse_mode": { + "fun": "load_from", + "terms": "atom_probe/pulser/pulse_mode", + }, + "/ENTRY[entry*]/atom_probe/ranging/PROGRAM[program1]/program": { + "fun": "load_from", + "terms": "atom_probe/ranging/program", + }, + "/ENTRY[entry*]/atom_probe/ranging/PROGRAM[program1]/program/@version": { + "fun": "load_from", + "terms": "atom_probe/ranging/program__attr_version", + }, + "/ENTRY[entry*]/atom_probe/reconstruction/PROGRAM[program1]/program": { + "fun": "load_from", + "terms": "atom_probe/reconstruction/program", + }, + "/ENTRY[entry*]/atom_probe/reconstruction/PROGRAM[program1]/program/@version": { + "fun": "load_from", + "terms": "atom_probe/reconstruction/program__attr_version", + }, + "/ENTRY[entry*]/atom_probe/reconstruction/crystallographic_calibration": { + "fun": "load_from", + "terms": "atom_probe/reconstruction/crystallographic_calibration", + }, + "/ENTRY[entry*]/atom_probe/reconstruction/parameter": { + "fun": "load_from", + "terms": "atom_probe/reconstruction/parameter", + }, + "/ENTRY[entry*]/atom_probe/reconstruction/protocol_name": { + "fun": "load_from", + "terms": "atom_probe/reconstruction/protocol_name", + }, +} # NeXus concept specific mapping tables which require special treatment as the current # NOMAD OASIS custom schema implementation delivers them as a list of dictionaries instead # of a directly flattenable list of keyword, value pairs -NxUserFromListOfDict = {"/ENTRY[entry*]/USER[user*]/name": {"fun": "load_from", "terms": "name"}, - "/ENTRY[entry*]/USER[user*]/affiliation": {"fun": "load_from", "terms": "affiliation"}, - "/ENTRY[entry*]/USER[user*]/address": {"fun": "load_from", "terms": "address"}, - "/ENTRY[entry*]/USER[user*]/email": {"fun": "load_from", "terms": "email"}, - "/ENTRY[entry*]/USER[user*]/orcid": {"fun": "load_from", "terms": "orcid"}, - "/ENTRY[entry*]/USER[user*]/orcid_platform": {"fun": "load_from", "terms": "orcid_platform"}, - "/ENTRY[entry*]/USER[user*]/telephone_number": {"fun": "load_from", "terms": "telephone_number"}, - "/ENTRY[entry*]/USER[user*]/role": {"fun": "load_from", "terms": "role"}, - "/ENTRY[entry*]/USER[user*]/social_media_name": {"fun": "load_from", "terms": "social_media_name"}, - "/ENTRY[entry*]/USER[user*]/social_media_platform": {"fun": "load_from", "terms": "social_media_platform"}} +NxUserFromListOfDict = { + "/ENTRY[entry*]/USER[user*]/name": {"fun": "load_from", "terms": "name"}, + "/ENTRY[entry*]/USER[user*]/affiliation": { + "fun": "load_from", + "terms": "affiliation", + }, + "/ENTRY[entry*]/USER[user*]/address": {"fun": "load_from", "terms": "address"}, + "/ENTRY[entry*]/USER[user*]/email": {"fun": "load_from", "terms": "email"}, + "/ENTRY[entry*]/USER[user*]/orcid": {"fun": "load_from", "terms": "orcid"}, + "/ENTRY[entry*]/USER[user*]/orcid_platform": { + "fun": "load_from", + "terms": "orcid_platform", + }, + "/ENTRY[entry*]/USER[user*]/telephone_number": { + "fun": "load_from", + "terms": "telephone_number", + }, + "/ENTRY[entry*]/USER[user*]/role": {"fun": "load_from", "terms": "role"}, + "/ENTRY[entry*]/USER[user*]/social_media_name": { + "fun": "load_from", + "terms": "social_media_name", + }, + "/ENTRY[entry*]/USER[user*]/social_media_platform": { + "fun": "load_from", + "terms": "social_media_platform", + }, +} # LEAP6000 can use up to two lasers and voltage pulsing (both at the same time?) -NxPulserFromListOfDict = {"/ENTRY[entry*]/atom_probe/pulser/SOURCE[source*]/name": {"fun": "load_from", "terms": "name"}, - "/ENTRY[entry*]/atom_probe/pulser/SOURCE[source*]/power": {"fun": "load_from", "terms": "power"}, - "/ENTRY[entry*]/atom_probe/pulser/SOURCE[source*]/pulse_energy": {"fun": "load_from", "terms": "pulse_energy"}, - "/ENTRY[entry*]/atom_probe/pulser/SOURCE[source*]/wavelength": {"fun": "load_from", "terms": "wavelength"}} +NxPulserFromListOfDict = { + "/ENTRY[entry*]/atom_probe/pulser/SOURCE[source*]/name": { + "fun": "load_from", + "terms": "name", + }, + "/ENTRY[entry*]/atom_probe/pulser/SOURCE[source*]/power": { + "fun": "load_from", + "terms": "power", + }, + "/ENTRY[entry*]/atom_probe/pulser/SOURCE[source*]/pulse_energy": { + "fun": "load_from", + "terms": "pulse_energy", + }, + "/ENTRY[entry*]/atom_probe/pulser/SOURCE[source*]/wavelength": { + "fun": "load_from", + "terms": "wavelength", + }, +} diff --git a/pynxtools/dataconverter/readers/apm/reader.py b/pynxtools/dataconverter/readers/apm/reader.py index 2e946257f..1451a6c39 100644 --- a/pynxtools/dataconverter/readers/apm/reader.py +++ b/pynxtools/dataconverter/readers/apm/reader.py @@ -23,26 +23,33 @@ from pynxtools.dataconverter.readers.base.reader import BaseReader -from pynxtools.dataconverter.readers.apm.utils.apm_define_io_cases \ - import ApmUseCaseSelector +from pynxtools.dataconverter.readers.apm.utils.apm_define_io_cases import ( + ApmUseCaseSelector, +) -from pynxtools.dataconverter.readers.apm.utils.apm_load_deployment_specifics \ - import NxApmNomadOasisConfigurationParser +from pynxtools.dataconverter.readers.apm.utils.apm_load_deployment_specifics import ( + NxApmNomadOasisConfigurationParser, +) -from pynxtools.dataconverter.readers.apm.utils.apm_load_generic_eln \ - import NxApmNomadOasisElnSchemaParser +from pynxtools.dataconverter.readers.apm.utils.apm_load_generic_eln import ( + NxApmNomadOasisElnSchemaParser, +) -from pynxtools.dataconverter.readers.apm.utils.apm_load_reconstruction \ - import ApmReconstructionParser +from pynxtools.dataconverter.readers.apm.utils.apm_load_reconstruction import ( + ApmReconstructionParser, +) -from pynxtools.dataconverter.readers.apm.utils.apm_load_ranging \ - import ApmRangingDefinitionsParser +from pynxtools.dataconverter.readers.apm.utils.apm_load_ranging import ( + ApmRangingDefinitionsParser, +) -from pynxtools.dataconverter.readers.apm.utils.apm_create_nx_default_plots \ - import apm_default_plot_generator +from pynxtools.dataconverter.readers.apm.utils.apm_create_nx_default_plots import ( + apm_default_plot_generator, +) -from pynxtools.dataconverter.readers.apm.utils.apm_generate_synthetic_data \ - import ApmCreateExampleData +from pynxtools.dataconverter.readers.apm.utils.apm_generate_synthetic_data import ( + ApmCreateExampleData, +) # this apm parser combines multiple sub-parsers # so we need the following input: @@ -74,10 +81,12 @@ class ApmReader(BaseReader): # Whitelist for the NXDLs that the reader supports and can process supported_nxdls = ["NXapm"] - def read(self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None) -> dict: + def read( + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, + ) -> dict: """Read data from given file, return filled template dictionary apm.""" template.clear() @@ -95,8 +104,9 @@ def read(self, else: # eln_data, and ideally recon and ranging definitions from technology partner file print("Parse ELN and technology partner file(s)...") case = ApmUseCaseSelector(file_paths) - assert case.is_valid is True, \ - "Such a combination of input-file(s, if any) is not supported !" + assert ( + case.is_valid is True + ), "Such a combination of input-file(s, if any) is not supported !" print("Parse (meta)data coming from an ELN...") if len(case.eln) == 1: @@ -112,7 +122,9 @@ def read(self, nx_apm_cfg.report(template) # having and or using a deployment-specific configuration is optional - print("Parse (numerical) data and metadata from ranging definitions file...") + print( + "Parse (numerical) data and metadata from ranging definitions file..." + ) if len(case.reconstruction) == 1: nx_apm_recon = ApmReconstructionParser(case.reconstruction[0], entry_id) nx_apm_recon.report(template) diff --git a/pynxtools/dataconverter/readers/apm/utils/apm_create_nx_default_plots.py b/pynxtools/dataconverter/readers/apm/utils/apm_create_nx_default_plots.py index a0eb9846e..19745157d 100644 --- a/pynxtools/dataconverter/readers/apm/utils/apm_create_nx_default_plots.py +++ b/pynxtools/dataconverter/readers/apm/utils/apm_create_nx_default_plots.py @@ -21,8 +21,7 @@ import numpy as np -from pynxtools.dataconverter.readers.shared.shared_utils \ - import get_repo_last_commit +from pynxtools.dataconverter.readers.shared.shared_utils import get_repo_last_commit def create_default_plot_reconstruction(template: dict, entry_id: int) -> dict: @@ -39,38 +38,46 @@ def create_default_plot_reconstruction(template: dict, entry_id: int) -> dict: # make the bounding box a quadric prism imi = np.floor(bounds[0, 0]) - resolution imx = np.ceil(bounds[0, 1]) + resolution - xedges = np.linspace(imi, imx, num=int(np.ceil((imx - imi) / resolution)) + 1, - endpoint=True) + xedges = np.linspace( + imi, imx, num=int(np.ceil((imx - imi) / resolution)) + 1, endpoint=True + ) # this partitioning is not general enough, imi and imx should be left and right # bounds respectively imi = np.floor(bounds[1, 0]) - resolution imx = np.ceil(bounds[1, 1]) + resolution - yedges = np.linspace(imi, imx, num=int(np.ceil((imx - imi) / resolution)) + 1, - endpoint=True) + yedges = np.linspace( + imi, imx, num=int(np.ceil((imx - imi) / resolution)) + 1, endpoint=True + ) imi = np.floor(bounds[2, 0]) - resolution imx = np.ceil(bounds[2, 1]) + resolution - zedges = np.linspace(imi, imx, - num=int(np.ceil((imx - imi) / resolution)) + 1, - endpoint=True) + zedges = np.linspace( + imi, imx, num=int(np.ceil((imx - imi) / resolution)) + 1, endpoint=True + ) - hist3d = np.histogramdd((xyz[:, 0], xyz[:, 1], xyz[:, 2]), - bins=(xedges, yedges, zedges)) + hist3d = np.histogramdd( + (xyz[:, 0], xyz[:, 1], xyz[:, 2]), bins=(xedges, yedges, zedges) + ) del xyz - assert isinstance(hist3d[0], np.ndarray), \ - "Hist3d computation from the reconstruction failed!" - assert len(np.shape(hist3d[0])) == 3, \ - "Hist3d computation from the reconstruction failed!" + assert isinstance( + hist3d[0], np.ndarray + ), "Hist3d computation from the reconstruction failed!" + assert ( + len(np.shape(hist3d[0])) == 3 + ), "Hist3d computation from the reconstruction failed!" for i in np.arange(0, 3): - assert np.shape(hist3d[0])[i] > 0, \ - "Dimensions " + str(i) + " has no length!" + assert np.shape(hist3d[0])[i] > 0, "Dimensions " + str(i) + " has no length!" - trg = f"/ENTRY[entry{entry_id}]/atom_probe/reconstruction/" \ - f"naive_point_cloud_density_map/" + trg = ( + f"/ENTRY[entry{entry_id}]/atom_probe/reconstruction/" + f"naive_point_cloud_density_map/" + ) template[f"{trg}PROGRAM[program1]/program"] = "nomad-parser-nexus/apm/reader.py" template[f"{trg}PROGRAM[program1]/program/@version"] = get_repo_last_commit() - trg = f"/ENTRY[entry{entry_id}]/atom_probe/reconstruction/" \ - f"naive_point_cloud_density_map/DATA[data]/" + trg = ( + f"/ENTRY[entry{entry_id}]/atom_probe/reconstruction/" + f"naive_point_cloud_density_map/DATA[data]/" + ) template[f"{trg}title"] = "Discretized reconstruction space" # template[f"{trg}@long_name"] = "Discretized reconstruction space" template[f"{trg}@signal"] = "data_counts" @@ -81,18 +88,26 @@ def create_default_plot_reconstruction(template: dict, entry_id: int) -> dict: # mind that histogram does not follow Cartesian conventions so a transpose # might be necessary, for now we implement the transpose in the appdef - template[f"{trg}DATA[data_counts]"] \ - = {"compress": np.array(hist3d[0], np.uint32), "strength": 1} - template[f"{trg}AXISNAME[axis_x]"] \ - = {"compress": np.array(hist3d[1][0][1::], np.float32), "strength": 1} + template[f"{trg}DATA[data_counts]"] = { + "compress": np.array(hist3d[0], np.uint32), + "strength": 1, + } + template[f"{trg}AXISNAME[axis_x]"] = { + "compress": np.array(hist3d[1][0][1::], np.float32), + "strength": 1, + } template[f"{trg}AXISNAME[axis_x]/@units"] = "nm" template[f"{trg}AXISNAME[axis_x]/@long_name"] = "x (nm)" - template[f"{trg}AXISNAME[axis_y]"] \ - = {"compress": np.array(hist3d[1][1][1::], np.float32), "strength": 1} + template[f"{trg}AXISNAME[axis_y]"] = { + "compress": np.array(hist3d[1][1][1::], np.float32), + "strength": 1, + } template[f"{trg}AXISNAME[axis_y]/@units"] = "nm" template[f"{trg}AXISNAME[axis_y]/@long_name"] = "y (nm)" - template[f"{trg}AXISNAME[axis_z]"] \ - = {"compress": np.array(hist3d[1][2][1::], np.float32), "strength": 1} + template[f"{trg}AXISNAME[axis_z]"] = { + "compress": np.array(hist3d[1][2][1::], np.float32), + "strength": 1, + } template[f"{trg}AXISNAME[axis_z]/@units"] = "nm" template[f"{trg}AXISNAME[axis_z]/@long_name"] = "z (nm)" print("Default plot 3D discretized reconstruction at 1 nm binning.") @@ -113,17 +128,19 @@ def create_default_plot_mass_spectrum(template: dict, entry_id: int) -> dict: hist1d = np.histogram( m_z[:], - np.linspace(mqmin, mqmax, - num=int(np.ceil((mqmax - mqmin) / mqincr)) + 1, - endpoint=True)) + np.linspace( + mqmin, mqmax, num=int(np.ceil((mqmax - mqmin) / mqincr)) + 1, endpoint=True + ), + ) del m_z - assert isinstance(hist1d[0], np.ndarray), \ - "Hist1d computation from the mass spectrum failed!" - assert len(np.shape(hist1d[0])) == 1, \ - "Hist1d computation from the mass spectrum failed!" + assert isinstance( + hist1d[0], np.ndarray + ), "Hist1d computation from the mass spectrum failed!" + assert ( + len(np.shape(hist1d[0])) == 1 + ), "Hist1d computation from the mass spectrum failed!" for i in np.arange(0, 1): - assert np.shape(hist1d[0])[i] > 0, \ - "Dimensions " + str(i) + " has no length!" + assert np.shape(hist1d[0])[i] > 0, "Dimensions " + str(i) + " has no length!" trg = f"/ENTRY[entry{entry_id}]/atom_probe/ranging/mass_to_charge_distribution/" template[f"{trg}PROGRAM[program1]/program"] = "nomad-parser-nexus/apm/reader.py" @@ -134,20 +151,27 @@ def create_default_plot_mass_spectrum(template: dict, entry_id: int) -> dict: template[f"{trg}range_minmax"] = np.array([mqmin, mqmax], np.float32) template[f"{trg}range_minmax/@units"] = "Da" - trg = f"/ENTRY[entry{entry_id}]/atom_probe/ranging/" \ - f"mass_to_charge_distribution/mass_spectrum/" + trg = ( + f"/ENTRY[entry{entry_id}]/atom_probe/ranging/" + f"mass_to_charge_distribution/mass_spectrum/" + ) template[f"{trg}title"] = "Mass spectrum (0.01 Da binning)" template[f"{trg}@signal"] = "data_counts" template[f"{trg}@axes"] = "axis_mass_to_charge" template[f"{trg}@AXISNAME_indices[axis_mass_to_charge]"] = np.uint32(0) - template[f"{trg}DATA[data_counts]"] \ - = {"compress": np.array(hist1d[0], np.uint32), "strength": 1} + template[f"{trg}DATA[data_counts]"] = { + "compress": np.array(hist1d[0], np.uint32), + "strength": 1, + } template[f"{trg}DATA[data_counts]/@long_name"] = "Counts (1)" - template[f"{trg}AXISNAME[axis_mass_to_charge]"] \ - = {"compress": np.array(hist1d[1][1::], np.float32), "strength": 1} + template[f"{trg}AXISNAME[axis_mass_to_charge]"] = { + "compress": np.array(hist1d[1][1::], np.float32), + "strength": 1, + } template[f"{trg}AXISNAME[axis_mass_to_charge]/@units"] = "Da" - template[f"{trg}AXISNAME[axis_mass_to_charge]/@long_name"] \ - = "Mass-to-charge-state ratio (Da)" + template[ + f"{trg}AXISNAME[axis_mass_to_charge]/@long_name" + ] = "Mass-to-charge-state ratio (Da)" print("Plot mass spectrum at 0.01 Da binning was created.") del hist1d @@ -183,8 +207,9 @@ def apm_default_plot_generator(template: dict, n_entries: int) -> dict: has_valid_xyz = True has_default_data = has_valid_m_z or has_valid_xyz - assert has_default_data is True, \ - "Having no recon or mass-to-charge data is inacceptable at the moment!" + assert ( + has_default_data is True + ), "Having no recon or mass-to-charge data is inacceptable at the moment!" # NEW ISSUE: fall-back solution to plot something else, however # currently POS, EPOS and APT provide always xyz, and m_z data diff --git a/pynxtools/dataconverter/readers/apm/utils/apm_define_io_cases.py b/pynxtools/dataconverter/readers/apm/utils/apm_define_io_cases.py index 26a73a1e9..838669abf 100644 --- a/pynxtools/dataconverter/readers/apm/utils/apm_define_io_cases.py +++ b/pynxtools/dataconverter/readers/apm/utils/apm_define_io_cases.py @@ -42,7 +42,15 @@ def __init__(self, file_paths: Tuple[str] = None): self.ranging: List[str] = [] self.is_valid = False self.supported_mime_types = [ - "pos", "epos", "apt", "rrng", "rng", "txt", "yaml", "yml"] + "pos", + "epos", + "apt", + "rrng", + "rng", + "txt", + "yaml", + "yml", + ] for mime_type in self.supported_mime_types: self.case[mime_type] = [] @@ -54,7 +62,7 @@ def sort_files_by_mime_type(self, file_paths: Tuple[str] = None): for file_name in file_paths: index = file_name.lower().rfind(".") if index >= 0: - suffix = file_name.lower()[index + 1::] + suffix = file_name.lower()[index + 1 : :] if suffix in self.supported_mime_types: if file_name not in self.case[suffix]: self.case[suffix].append(file_name) @@ -86,8 +94,9 @@ def check_validity_of_file_combinations(self): for mime_type in ["yaml", "yml"]: yml += self.case[mime_type] for entry in yml: - if entry.endswith(".oasis.specific.yaml") \ - or entry.endswith(".oasis.specific.yml"): + if entry.endswith(".oasis.specific.yaml") or entry.endswith( + ".oasis.specific.yml" + ): self.cfg += [entry] else: self.eln += [entry] diff --git a/pynxtools/dataconverter/readers/apm/utils/apm_generate_synthetic_data.py b/pynxtools/dataconverter/readers/apm/utils/apm_generate_synthetic_data.py index c34d30f7b..33e3daaf1 100644 --- a/pynxtools/dataconverter/readers/apm/utils/apm_generate_synthetic_data.py +++ b/pynxtools/dataconverter/readers/apm/utils/apm_generate_synthetic_data.py @@ -34,28 +34,37 @@ from ase.lattice.cubic import FaceCenteredCubic from ase.data import atomic_numbers, atomic_masses, chemical_symbols -from ifes_apt_tc_data_modeling.utils.utils \ - import create_isotope_vector, isotope_vector_to_nuclid_list, \ - isotope_vector_to_human_readable_name, MAX_NUMBER_OF_ATOMS_PER_ION, MQ_EPSILON +from ifes_apt_tc_data_modeling.utils.utils import ( + create_isotope_vector, + isotope_vector_to_nuclid_list, + isotope_vector_to_human_readable_name, + MAX_NUMBER_OF_ATOMS_PER_ION, + MQ_EPSILON, +) # do not use ase directly any longer for NIST isotopes, instead this syntatic equivalent # from ifes_apt_tc_data_modeling.utils.nist_isotope_data \ # import isotopes -from pynxtools.dataconverter.readers.apm.utils.apm_versioning \ - import NX_APM_ADEF_NAME, NX_APM_ADEF_VERSION, NX_APM_EXEC_NAME, NX_APM_EXEC_VERSION +from pynxtools.dataconverter.readers.apm.utils.apm_versioning import ( + NX_APM_ADEF_NAME, + NX_APM_ADEF_VERSION, + NX_APM_EXEC_NAME, + NX_APM_EXEC_VERSION, +) -from pynxtools.dataconverter.readers.apm.utils.apm_load_ranging \ - import add_unknown_iontype +from pynxtools.dataconverter.readers.apm.utils.apm_load_ranging import ( + add_unknown_iontype, +) # parameter affecting reconstructed positions and size CRYSTAL_ORIENTATION = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] # MK::add analysis how large aggregate has to be RECON_SIZE = (50, 50, 300) -RECON_ATOM_SPACING = 5. -RECON_HEIGHT = 300. # angstroem -RECON_RADIUS = 50. # angstroem +RECON_ATOM_SPACING = 5.0 +RECON_HEIGHT = 300.0 # angstroem +RECON_RADIUS = 50.0 # angstroem MAX_COMPONENTS = 5 # how many different molecular ions in one dataset/entry MAX_ATOMS = 10 # determine power-law fraction of n_atoms per ion MULTIPLES_FACTOR = 0.6 # controls how likely multiple ions are synthesized @@ -108,20 +117,26 @@ def create_reconstructed_positions(self): # assumptions: # identity orientation, no periodic boundary conditions print(f"Using the following version of ase {ase.__version__}") - xyz = np.asarray(FaceCenteredCubic(directions=CRYSTAL_ORIENTATION, - size=RECON_SIZE, symbol="Cu", - latticeconstant=RECON_ATOM_SPACING, - pbc=(0, 0, 0)).get_positions(), np.float32) + xyz = np.asarray( + FaceCenteredCubic( + directions=CRYSTAL_ORIENTATION, + size=RECON_SIZE, + symbol="Cu", + latticeconstant=RECON_ATOM_SPACING, + pbc=(0, 0, 0), + ).get_positions(), + np.float32, + ) # Cu will be ignored, only the lattice with positions is relevant - centre_of_mass = np.asarray([np.mean(xyz[:, 0]), - np.mean(xyz[:, 1]), - np.mean(xyz[:, 2])], np.float32) + centre_of_mass = np.asarray( + [np.mean(xyz[:, 0]), np.mean(xyz[:, 1]), np.mean(xyz[:, 2])], np.float32 + ) # print("Centre of mass of ASE lattice is (with coordinates in angstroem)") # print(centre_of_mass) xyz = xyz - centre_of_mass - centre_of_mass = np.asarray([np.mean(xyz[:, 0]), - np.mean(xyz[:, 1]), - np.mean(xyz[:, 2])], np.float32) + centre_of_mass = np.asarray( + [np.mean(xyz[:, 0]), np.mean(xyz[:, 1]), np.mean(xyz[:, 2])], np.float32 + ) # print("Updated centre of mass") # print(centre_of_mass) # axis_aligned_bbox = np.asarray([np.min(xyz[:, 0]), np.max(xyz[:, 0]), @@ -134,10 +149,11 @@ def create_reconstructed_positions(self): mask = None mask = xyz[:, 2] <= (origin[2] + 0.5 * RECON_HEIGHT) mask &= xyz[:, 2] >= (origin[2] - 0.5 * RECON_HEIGHT) - mask &= ((xyz[:, 0] - origin[0])**2 - + (xyz[:, 1] - origin[1])**2) <= RECON_RADIUS**2 + mask &= ( + (xyz[:, 0] - origin[0]) ** 2 + (xyz[:, 1] - origin[1]) ** 2 + ) <= RECON_RADIUS**2 self.xyz = xyz[mask] - shift = [0., 0., 0.5 * RECON_HEIGHT] + shift = [0.0, 0.0, 0.5 * RECON_HEIGHT] for idx in np.arange(0, 3): self.xyz[:, idx] += shift[idx] self.xyz *= 0.1 # from angstroem to nm @@ -161,13 +177,12 @@ def place_atoms_from_periodic_table(self): # power law model for multiplicity of molecular ions # !! warning: for real world datasets depends on evaporation physics - self.n_ivec = np.asarray(np.linspace(1, - MAX_ATOMS, - num=MAX_ATOMS, - endpoint=True), np.float64) + self.n_ivec = np.asarray( + np.linspace(1, MAX_ATOMS, num=MAX_ATOMS, endpoint=True), np.float64 + ) accept_reject = MULTIPLES_FACTOR**self.n_ivec accept_reject = np.cumsum(accept_reject) / np.sum(accept_reject) - unifrnd = np.random.uniform(low=0., high=1., size=(self.n_components,)) + unifrnd = np.random.uniform(low=0.0, high=1.0, size=(self.n_components,)) self.multiplicity = np.ones((self.n_components,)) for idx in np.arange(0, len(accept_reject) - 1): mask = unifrnd[:] >= accept_reject[idx] @@ -178,10 +193,10 @@ def place_atoms_from_periodic_table(self): # uniform model for distribution of charge states # !! warning: for real world datasets actual ion charge depends # on (evaporation) physics, very complicated in fact a topic of current research - self.charge_state = np.asarray(np.random.uniform(low=1, - high=MAX_CHARGE_STATE, - size=(self.n_components,)), - np.uint32) + self.charge_state = np.asarray( + np.random.uniform(low=1, high=MAX_CHARGE_STATE, size=(self.n_components,)), + np.uint32, + ) # compose for each component randomly sampled hypothetical molecular ions # uniform random model which elements to pick from periodic table of elements @@ -197,23 +212,30 @@ def place_atoms_from_periodic_table(self): composition = [] # list of tuples, one for each composition for idx in np.arange(0, self.n_components): isotope_vector = [] - mass_sum = 0. + mass_sum = 0.0 # sample atoms for building the ion sampled_elements = np.asarray( - np.random.uniform(low=1, high=MAX_ATOMIC_NUMBER, - size=(self.multiplicity[idx],)), np.uint32) + np.random.uniform( + low=1, high=MAX_ATOMIC_NUMBER, size=(self.multiplicity[idx],) + ), + np.uint32, + ) for val in sampled_elements: symbol = value_to_pse_symbol_lookup[val] isotope_vector.append(symbol) mass_sum += atomic_masses[atomic_numbers[symbol]] - composition.append((isotope_vector, - self.charge_state[idx], - mass_sum / self.charge_state[idx], - np.float64(np.random.uniform(low=1, high=100)))) + composition.append( + ( + isotope_vector, + self.charge_state[idx], + mass_sum / self.charge_state[idx], + np.float64(np.random.uniform(low=1, high=100)), + ) + ) - weighting_factor_sum = 0. + weighting_factor_sum = 0.0 for idx in np.arange(0, self.n_components): weighting_factor_sum += composition[idx][3] @@ -222,23 +244,27 @@ def place_atoms_from_periodic_table(self): self.nrm_composition = [] # print(composition) for idx in np.arange(0, self.n_components): - self.nrm_composition.append(( - composition[idx][0], - composition[idx][1], - composition[idx][2], - composition[idx][3] / weighting_factor_sum)) + self.nrm_composition.append( + ( + composition[idx][0], + composition[idx][1], + composition[idx][2], + composition[idx][3] / weighting_factor_sum, + ) + ) self.nrm_composition.sort(key=lambda a: a[3]) # sort asc. for composition - accept_reject = [0.] + accept_reject = [0.0] for idx in self.nrm_composition: accept_reject.append(idx[3]) accept_reject = np.cumsum(accept_reject) - assert self.xyz != [], \ - "self.xyz must not be an empty dataset, create a geometry first!" + assert ( + self.xyz != [] + ), "self.xyz must not be an empty dataset, create a geometry first!" # print("Accept/reject sampling m/q values for " # + str(np.shape(self.xyz)[0]) + " ions") - unifrnd = np.random.uniform(low=0., high=1., size=(np.shape(self.xyz)[0],)) + unifrnd = np.random.uniform(low=0.0, high=1.0, size=(np.shape(self.xyz)[0],)) self.m_z = np.empty((np.shape(self.xyz)[0],)) self.m_z[:] = np.nan for idx in np.arange(0, len(accept_reject) - 1): @@ -267,18 +293,21 @@ def composition_to_ranging_definitions(self, template: dict) -> dict: for tpl in self.nrm_composition: path = f"{trg}ION[ion{ion_id}]/" ivec = create_isotope_vector(tpl[0]) - template[f"{path}isotope_vector"] \ - = np.reshape(np.asarray(ivec, np.uint16), - (1, MAX_NUMBER_OF_ATOMS_PER_ION)) + template[f"{path}isotope_vector"] = np.reshape( + np.asarray(ivec, np.uint16), (1, MAX_NUMBER_OF_ATOMS_PER_ION) + ) # template[path + "isotope_vector/@units"] = "" template[f"{path}charge_state"] = np.int8(tpl[1]) template[f"{path}mass_to_charge_range"] = np.reshape( - np.asarray([tpl[2], tpl[2] + MQ_EPSILON], np.float32), (1, 2)) + np.asarray([tpl[2], tpl[2] + MQ_EPSILON], np.float32), (1, 2) + ) template[f"{path}mass_to_charge_range/@units"] = "Da" nuclid_list = np.zeros([2, 32], np.uint16) nuclid_list = isotope_vector_to_nuclid_list(ivec) template[f"{path}nuclid_list"] = np.asarray(nuclid_list, np.uint16) - template[path + "name"] = isotope_vector_to_human_readable_name(ivec, tpl[1]) + template[path + "name"] = isotope_vector_to_human_readable_name( + ivec, tpl[1] + ) ion_id += 1 trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/ranging/" @@ -298,18 +327,20 @@ def emulate_entry(self, template: dict) -> dict: template[f"{trg}PROGRAM[program1]/program/@version"] = NX_APM_EXEC_VERSION template[f"{trg}start_time"] = datetime.datetime.now().astimezone().isoformat() template[f"{trg}end_time"] = datetime.datetime.now().astimezone().isoformat() - msg = ''' + msg = """ WARNING: These are mocked data !! They are meant to be used exclusively for verifying NOMAD search capabilities. - ''' + """ template[f"{trg}experiment_description"] = msg - experiment_identifier \ - = str(f"R{np.random.choice(100, 1)[0]}-{np.random.choice(100000, 1)[0]}") + experiment_identifier = str( + f"R{np.random.choice(100, 1)[0]}-{np.random.choice(100000, 1)[0]}" + ) template[f"{trg}experiment_identifier"] = experiment_identifier template[f"{trg}run_number"] = experiment_identifier.split("-")[1] - template[f"{trg}operation_mode"] \ - = str(np.random.choice(["apt", "fim", "apt_fim"], 1)[0]) + template[f"{trg}operation_mode"] = str( + np.random.choice(["apt", "fim", "apt_fim"], 1)[0] + ) return template def emulate_user(self, template: dict) -> dict: @@ -318,12 +349,32 @@ def emulate_user(self, template: dict) -> dict: # print("Parsing user...") prefix = f"/ENTRY[entry{self.entry_id}]/" user_names = np.unique( - np.random.choice(["Sherjeel", "MarkusK", "Dierk", "Baptiste", - "Alexander", "Lorenz", "Sophie", "Stefan", - "Katharina", "Florian", "Daniel", "Sandor", - "Carola", "Andrea", "Hampus", "Pepe", "Lauri", - "MarkusS", "Christoph", "Claudia"], - 1 + np.random.choice(MAX_USERS, 1))) + np.random.choice( + [ + "Sherjeel", + "MarkusK", + "Dierk", + "Baptiste", + "Alexander", + "Lorenz", + "Sophie", + "Stefan", + "Katharina", + "Florian", + "Daniel", + "Sandor", + "Carola", + "Andrea", + "Hampus", + "Pepe", + "Lauri", + "MarkusS", + "Christoph", + "Claudia", + ], + 1 + np.random.choice(MAX_USERS, 1), + ) + ) user_id = 1 for name in user_names: trg = f"{prefix}USER[user{user_id}]/" @@ -347,13 +398,17 @@ def emulate_specimen(self, template: dict) -> dict: print(f"Unique elements are: {list(unique_elements)}") template[f"{trg}atom_types"] = ", ".join(list(unique_elements)) - specimen_name = str(f"Mocked atom probe specimen {np.random.choice(1000, 1)[0]}") + specimen_name = str( + f"Mocked atom probe specimen {np.random.choice(1000, 1)[0]}" + ) template[f"{trg}name"] = specimen_name template[f"{trg}sample_history"] = "n/a" - template[f"{trg}preparation_date"] \ - = datetime.datetime.now().astimezone().isoformat() - template[f"{trg}short_title"] \ - = specimen_name.replace("Mocked atom probe specimen ", "") + template[f"{trg}preparation_date"] = ( + datetime.datetime.now().astimezone().isoformat() + ) + template[f"{trg}short_title"] = specimen_name.replace( + "Mocked atom probe specimen ", "" + ) template[f"{trg}description"] = "n/a" return template @@ -362,8 +417,9 @@ def emulate_control_software(self, template: dict) -> dict: # print("Parsing control software...") trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/control_software/" template[f"{trg}PROGRAM[program1]/program"] = "IVAS" - template[f"{trg}PROGRAM[program1]/program/@version"] \ - = str(f"3.{np.random.choice(9, 1)[0]}.{np.random.choice(9, 1)[0]}") + template[f"{trg}PROGRAM[program1]/program/@version"] = str( + f"3.{np.random.choice(9, 1)[0]}.{np.random.choice(9, 1)[0]}" + ) return template def emulate_instrument_header(self, template: dict) -> dict: @@ -371,10 +427,12 @@ def emulate_instrument_header(self, template: dict) -> dict: # check if required fields exists and are valid # print("Parsing instrument header...") trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/" - template[f"{trg}instrument_name"] \ - = str(f"test instrument {np.random.choice(100, 1)[0]}") - template[f"{trg}flight_path_length"] \ - = np.float64(np.random.normal(loc=1.0, scale=0.05)) + template[f"{trg}instrument_name"] = str( + f"test instrument {np.random.choice(100, 1)[0]}" + ) + template[f"{trg}flight_path_length"] = np.float64( + np.random.normal(loc=1.0, scale=0.05) + ) template[f"{trg}flight_path_length/@units"] = "m" return template @@ -382,13 +440,26 @@ def emulate_fabrication(self, template: dict) -> dict: """Copy data in fabrication section.""" # print("Parsing fabrication...") trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/FABRICATION[fabrication]/" - template[f"{trg}vendor"] \ - = str(np.random.choice(["AMETEK/Cameca", "customized"], 1)[0]) - template[f"{trg}model"] \ - = str(np.random.choice(["LEAP3000", "LEAP4000", "LEAP5000", - "LEAP6000", "OxCart", "MTAP", "FIM"], 1)[0]) - template[f"{trg}identifier"] \ - = str(hashlib.sha256("IVAS".encode("utf-8")).hexdigest()) + template[f"{trg}vendor"] = str( + np.random.choice(["AMETEK/Cameca", "customized"], 1)[0] + ) + template[f"{trg}model"] = str( + np.random.choice( + [ + "LEAP3000", + "LEAP4000", + "LEAP5000", + "LEAP6000", + "OxCart", + "MTAP", + "FIM", + ], + 1, + )[0] + ) + template[f"{trg}identifier"] = str( + hashlib.sha256("IVAS".encode("utf-8")).hexdigest() + ) # template[f"{trg}capabilities"] = "" return template @@ -397,7 +468,8 @@ def emulate_analysis_chamber(self, template: dict) -> dict: # print("Parsing analysis chamber...") trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/analysis_chamber/" template[f"{trg}pressure"] = np.float64( - np.random.normal(loc=1.0e-10, scale=0.2e-11)) + np.random.normal(loc=1.0e-10, scale=0.2e-11) + ) template[f"{trg}pressure/@units"] = "torr" return template @@ -412,8 +484,7 @@ def emulate_local_electrode(self, template: dict) -> dict: """Copy data in local_electrode section.""" # print("Parsing local electrode...") trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/local_electrode/" - template[f"{trg}name"] \ - = str(f"electrode {np.random.choice(1000, 1)[0]}") + template[f"{trg}name"] = str(f"electrode {np.random.choice(1000, 1)[0]}") return template def emulate_detector(self, template: dict) -> dict: @@ -425,7 +496,8 @@ def emulate_detector(self, template: dict) -> dict: template[f"{trg}name"] = detector_model_type template[f"{trg}model"] = detector_model_type template[f"{trg}serial_number"] = hashlib.sha256( - detector_model_type.encode("utf-8")).hexdigest() + detector_model_type.encode("utf-8") + ).hexdigest() return template def emulate_stage_lab(self, template: dict) -> dict: @@ -440,11 +512,11 @@ def emulate_specimen_monitoring(self, template: dict) -> dict: """Copy data in specimen_monitoring section.""" # print("Parsing specimen monitoring...") trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/specimen_monitoring/" - eta = np.min((np.random.normal(loc=0.6, scale=0.1), 1.)) + eta = np.min((np.random.normal(loc=0.6, scale=0.1), 1.0)) template[f"{trg}detection_rate"] = np.float64(eta) template[f"{trg}initial_radius"] = np.float64(RECON_RADIUS * 0.1) template[f"{trg}initial_radius/@units"] = "nm" - template[f"{trg}shank_angle"] = np.float64(0.) # = np.random.choice(10, 1)[0] + template[f"{trg}shank_angle"] = np.float64(0.0) # = np.random.choice(10, 1)[0] template[f"{trg}shank_angle/@units"] = "degree" return template @@ -452,25 +524,33 @@ def emulate_pulser(self, template: dict) -> dict: """Copy data in pulser section.""" # print("Parsing pulser...") trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/pulser/" - pulse_mode = str(np.random.choice( - ["laser", "voltage", "laser_and_voltage"], 1)[0]) + pulse_mode = str( + np.random.choice(["laser", "voltage", "laser_and_voltage"], 1)[0] + ) template[f"{trg}pulse_mode"] = pulse_mode - template[f"{trg}pulse_fraction"] \ - = np.float64(np.random.normal(loc=0.1, scale=0.02)) - template[f"{trg}pulse_frequency"] \ - = np.float64(np.random.normal(loc=250, scale=10)) + template[f"{trg}pulse_fraction"] = np.float64( + np.random.normal(loc=0.1, scale=0.02) + ) + template[f"{trg}pulse_frequency"] = np.float64( + np.random.normal(loc=250, scale=10) + ) template[f"{trg}pulse_frequency/@units"] = "kHz" if pulse_mode != "voltage": - trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/pulser/SOURCE[laser_source1]/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/atom_probe/pulser/SOURCE[laser_source1]/" + ) template[f"{trg}name"] = "laser" - template[f"{trg}wavelength"] \ - = np.float64((30 + np.random.choice(30, 1)) * 1.0e-8) + template[f"{trg}wavelength"] = np.float64( + (30 + np.random.choice(30, 1)) * 1.0e-8 + ) template[f"{trg}wavelength/@units"] = "m" - template[f"{trg}pulse_energy"] \ - = np.float64(np.random.normal(loc=1.2e-11, scale=0.2e-12)) + template[f"{trg}pulse_energy"] = np.float64( + np.random.normal(loc=1.2e-11, scale=0.2e-12) + ) template[f"{trg}pulse_energy/@units"] = "J" - template[f"{trg}power"] \ - = np.float64(np.random.normal(loc=2.0e-8, scale=0.2e-9)) + template[f"{trg}power"] = np.float64( + np.random.normal(loc=2.0e-8, scale=0.2e-9) + ) template[f"{trg}power/@units"] = "W" return template @@ -479,12 +559,15 @@ def emulate_reconstruction(self, template: dict) -> dict: # print("Parsing reconstruction...") trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/reconstruction/" src = f"/ENTRY[entry{self.entry_id}]/atom_probe/control_software/" - template[f"{trg}PROGRAM[program1]/program"] \ - = template[f"{src}PROGRAM[program1]/program"] - template[f"{trg}PROGRAM[program1]/program/@version"] \ - = template[f"{src}PROGRAM[program1]/program/@version"] - template[f"{trg}protocol_name"] \ - = str(np.random.choice(["bas", "geiser", "gault", "cameca", "other"], 1)[0]) + template[f"{trg}PROGRAM[program1]/program"] = template[ + f"{src}PROGRAM[program1]/program" + ] + template[f"{trg}PROGRAM[program1]/program/@version"] = template[ + f"{src}PROGRAM[program1]/program/@version" + ] + template[f"{trg}protocol_name"] = str( + np.random.choice(["bas", "geiser", "gault", "cameca", "other"], 1)[0] + ) template[f"{trg}parameter"] = "n/a" template[f"{trg}crystallographic_calibration"] = "n/a" return template @@ -494,10 +577,12 @@ def emulate_ranging(self, template: dict) -> dict: # print("Parsing ranging...") trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/ranging/" src = f"/ENTRY[entry{self.entry_id}]/atom_probe/control_software/" - template[f"{trg}PROGRAM[program1]/program"] \ - = template[f"{src}PROGRAM[program1]/program"] - template[f"{trg}PROGRAM[program1]/program/@version"] \ - = template[f"{src}PROGRAM[program1]/program/@version"] + template[f"{trg}PROGRAM[program1]/program"] = template[ + f"{src}PROGRAM[program1]/program" + ] + template[f"{trg}PROGRAM[program1]/program/@version"] = template[ + f"{src}PROGRAM[program1]/program/@version" + ] return template def emulate_random_input_from_eln(self, template: dict) -> dict: @@ -545,13 +630,17 @@ def synthesize(self, template: dict) -> dict: # heavy numerical data, here the synthesized "measurement" data prefix = f"/ENTRY[entry{self.entry_id}]/atom_probe/" trg = f"{prefix}reconstruction/" - template[f"{trg}reconstructed_positions"] \ - = {"compress": np.asarray(self.xyz, np.float32), "strength": 1} + template[f"{trg}reconstructed_positions"] = { + "compress": np.asarray(self.xyz, np.float32), + "strength": 1, + } template[f"{trg}reconstructed_positions/@units"] = "nm" trg = f"{prefix}mass_to_charge_conversion/" - template[f"{trg}mass_to_charge"] \ - = {"compress": np.asarray(self.m_z, np.float32), "strength": 1} + template[f"{trg}mass_to_charge"] = { + "compress": np.asarray(self.m_z, np.float32), + "strength": 1, + } template[f"{trg}mass_to_charge/@units"] = "Da" return template diff --git a/pynxtools/dataconverter/readers/apm/utils/apm_load_deployment_specifics.py b/pynxtools/dataconverter/readers/apm/utils/apm_load_deployment_specifics.py index 87dc05950..64807e2dd 100644 --- a/pynxtools/dataconverter/readers/apm/utils/apm_load_deployment_specifics.py +++ b/pynxtools/dataconverter/readers/apm/utils/apm_load_deployment_specifics.py @@ -23,20 +23,27 @@ import yaml -from pynxtools.dataconverter.readers.apm.map_concepts.apm_deployment_specifics_to_nx_map \ - import NxApmDeploymentSpecificInput +from pynxtools.dataconverter.readers.apm.map_concepts.apm_deployment_specifics_to_nx_map import ( + NxApmDeploymentSpecificInput, +) -from pynxtools.dataconverter.readers.shared.map_concepts.mapping_functors \ - import apply_modifier, variadic_path_to_specific_path +from pynxtools.dataconverter.readers.shared.map_concepts.mapping_functors import ( + apply_modifier, + variadic_path_to_specific_path, +) class NxApmNomadOasisConfigurationParser: # pylint: disable=too-few-public-methods """Parse deployment specific configuration.""" def __init__(self, file_name: str, entry_id: int): - print(f"Extracting data from deployment specific configuration file: {file_name}") - if (file_name.rsplit('/', 1)[-1].endswith(".oasis.specific.yaml") - or file_name.endswith(".oasis.specific.yml")) and entry_id > 0: + print( + f"Extracting data from deployment specific configuration file: {file_name}" + ) + if ( + file_name.rsplit("/", 1)[-1].endswith(".oasis.specific.yaml") + or file_name.endswith(".oasis.specific.yml") + ) and entry_id > 0: self.entry_id = entry_id self.file_name = file_name with open(self.file_name, "r", encoding="utf-8") as stream: diff --git a/pynxtools/dataconverter/readers/apm/utils/apm_load_generic_eln.py b/pynxtools/dataconverter/readers/apm/utils/apm_load_generic_eln.py index ed36eec23..ec5b0fc0e 100644 --- a/pynxtools/dataconverter/readers/apm/utils/apm_load_generic_eln.py +++ b/pynxtools/dataconverter/readers/apm/utils/apm_load_generic_eln.py @@ -25,14 +25,19 @@ from ase.data import chemical_symbols -from pynxtools.dataconverter.readers.apm.map_concepts.apm_eln_to_nx_map \ - import NxApmElnInput, NxUserFromListOfDict +from pynxtools.dataconverter.readers.apm.map_concepts.apm_eln_to_nx_map import ( + NxApmElnInput, + NxUserFromListOfDict, +) -from pynxtools.dataconverter.readers.shared.map_concepts.mapping_functors \ - import variadic_path_to_specific_path, apply_modifier +from pynxtools.dataconverter.readers.shared.map_concepts.mapping_functors import ( + variadic_path_to_specific_path, + apply_modifier, +) -from pynxtools.dataconverter.readers.apm.utils.apm_parse_composition_table \ - import parse_composition_table +from pynxtools.dataconverter.readers.apm.utils.apm_parse_composition_table import ( + parse_composition_table, +) class NxApmNomadOasisElnSchemaParser: # pylint: disable=too-few-public-methods @@ -55,8 +60,10 @@ class NxApmNomadOasisElnSchemaParser: # pylint: disable=too-few-public-methods def __init__(self, file_name: str, entry_id: int): print(f"Extracting data from ELN file: {file_name}") - if (file_name.rsplit('/', 1)[-1].startswith("eln_data") - or file_name.startswith("eln_data")) and entry_id > 0: + if ( + file_name.rsplit("/", 1)[-1].startswith("eln_data") + or file_name.startswith("eln_data") + ) and entry_id > 0: self.entry_id = entry_id self.file_name = file_name with open(self.file_name, "r", encoding="utf-8") as stream: @@ -73,11 +80,20 @@ def parse_sample_composition(self, template: dict) -> dict: if isinstance(self.yml[src], list): dct = parse_composition_table(self.yml[src]) - prfx = f"/ENTRY[entry{self.entry_id}]/sample/" \ - f"CHEMICAL_COMPOSITION[chemical_composition]" + prfx = ( + f"/ENTRY[entry{self.entry_id}]/sample/" + f"CHEMICAL_COMPOSITION[chemical_composition]" + ) unit = "at.-%" # the assumed default unit if "normalization" in dct: - if dct["normalization"] in ["%", "at%", "at-%", "at.-%", "ppm", "ppb"]: + if dct["normalization"] in [ + "%", + "at%", + "at-%", + "at.-%", + "ppm", + "ppb", + ]: unit = "at.-%" template[f"{prfx}/normalization"] = "atom_percent" elif dct["normalization"] in ["wt%", "wt-%", "wt.-%"]: @@ -117,7 +133,9 @@ def parse_user_section(self, template: dict) -> dict: # table and check if we can find these for nx_path, modifier in NxUserFromListOfDict.items(): if nx_path not in ("IGNORE", "UNCLEAR"): - trg = variadic_path_to_specific_path(nx_path, identifier) + trg = variadic_path_to_specific_path( + nx_path, identifier + ) res = apply_modifier(modifier, user_dict) if res is not None: template[trg] = res @@ -139,20 +157,25 @@ def parse_laser_pulser_details(self, template: dict) -> dict: if all(isinstance(entry, dict) for entry in self.yml[src]) is True: laser_id = 1 # custom schema delivers a list of dictionaries... - trg = f"/ENTRY[entry{self.entry_id}]/atom_probe/pulser" \ - f"/SOURCE[source{laser_id}]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/atom_probe/pulser" + f"/SOURCE[source{laser_id}]" + ) for laser_dict in self.yml[src]: if "name" in laser_dict.keys(): template[f"{trg}/name"] = laser_dict["name"] quantities = ["power", "pulse_energy", "wavelength"] for quant in quantities: if isinstance(laser_dict[quant], dict): - if ("value" in laser_dict[quant].keys()) \ - and ("unit" in laser_dict[quant].keys()): - template[f"{trg}/{quant}"] \ - = laser_dict[quant]["value"] - template[f"{trg}/{quant}/@units"] \ - = laser_dict[quant]["unit"] + if ("value" in laser_dict[quant].keys()) and ( + "unit" in laser_dict[quant].keys() + ): + template[f"{trg}/{quant}"] = laser_dict[quant][ + "value" + ] + template[f"{trg}/{quant}/@units"] = laser_dict[ + quant + ]["unit"] laser_id += 1 return template diff --git a/pynxtools/dataconverter/readers/apm/utils/apm_load_ranging.py b/pynxtools/dataconverter/readers/apm/utils/apm_load_ranging.py index c2e3ca91f..7cf345ca6 100644 --- a/pynxtools/dataconverter/readers/apm/utils/apm_load_ranging.py +++ b/pynxtools/dataconverter/readers/apm/utils/apm_load_ranging.py @@ -27,12 +27,16 @@ # ase encodes the zeroth entry as the unknown element X to have # atom_numbers all starting with 1 up to len(chemical_symbols) - 1 -from ifes_apt_tc_data_modeling.utils.utils \ - import create_isotope_vector, isotope_vector_to_nuclid_list, \ - isotope_vector_to_human_readable_name +from ifes_apt_tc_data_modeling.utils.utils import ( + create_isotope_vector, + isotope_vector_to_nuclid_list, + isotope_vector_to_human_readable_name, +) -from ifes_apt_tc_data_modeling.utils.definitions \ - import MAX_NUMBER_OF_ATOMS_PER_ION, MQ_EPSILON +from ifes_apt_tc_data_modeling.utils.definitions import ( + MAX_NUMBER_OF_ATOMS_PER_ION, + MQ_EPSILON, +) from ifes_apt_tc_data_modeling.rng.rng_reader import ReadRngFileFormat @@ -40,21 +44,26 @@ from ifes_apt_tc_data_modeling.fig.fig_reader import ReadFigTxtFileFormat -from pynxtools.dataconverter.readers.apm.utils.apm_versioning \ - import NX_APM_EXEC_NAME, NX_APM_EXEC_VERSION +from pynxtools.dataconverter.readers.apm.utils.apm_versioning import ( + NX_APM_EXEC_NAME, + NX_APM_EXEC_VERSION, +) def add_unknown_iontype(template: dict, entry_id: int) -> dict: """Add default unknown iontype.""" # all unidentifiable ions are mapped on the unknown type - trg = f"/ENTRY[entry{entry_id}]/atom_probe/ranging/" \ - f"peak_identification/ION[ion0]/" + trg = ( + f"/ENTRY[entry{entry_id}]/atom_probe/ranging/" f"peak_identification/ION[ion0]/" + ) ivec = create_isotope_vector([]) - template[f"{trg}isotope_vector"] \ - = np.reshape(np.asarray(ivec, np.uint16), (1, MAX_NUMBER_OF_ATOMS_PER_ION)) + template[f"{trg}isotope_vector"] = np.reshape( + np.asarray(ivec, np.uint16), (1, MAX_NUMBER_OF_ATOMS_PER_ION) + ) template[f"{trg}charge_state"] = np.int8(0) - template[f"{trg}mass_to_charge_range"] \ - = np.reshape(np.asarray([0.0, MQ_EPSILON], np.float32), (1, 2)) + template[f"{trg}mass_to_charge_range"] = np.reshape( + np.asarray([0.0, MQ_EPSILON], np.float32), (1, 2) + ) template[f"{trg}mass_to_charge_range/@units"] = "Da" nuclid_list = isotope_vector_to_nuclid_list(ivec) template[f"{trg}nuclid_list"] = np.asarray(nuclid_list, np.uint16) @@ -63,7 +72,9 @@ def add_unknown_iontype(template: dict, entry_id: int) -> dict: return template -def add_standardize_molecular_ions(ion_lst: list, template: dict, entry_id: int) -> dict: +def add_standardize_molecular_ions( + ion_lst: list, template: dict, entry_id: int +) -> dict: """Added standard formatted molecular ion entries.""" ion_id = 1 trg = f"/ENTRY[entry{entry_id}]/atom_probe/ranging/peak_identification/" @@ -72,40 +83,57 @@ def add_standardize_molecular_ions(ion_lst: list, template: dict, entry_id: int) template[f"{path}isotope_vector"] = np.reshape( np.asarray(ion.isotope_vector.typed_value, np.uint16), - (1, MAX_NUMBER_OF_ATOMS_PER_ION)) + (1, MAX_NUMBER_OF_ATOMS_PER_ION), + ) template[f"{path}charge_state"] = np.int8(ion.charge_state.typed_value) - template[f"{path}mass_to_charge_range"] \ - = np.array(ion.ranges.typed_value, np.float32) + template[f"{path}mass_to_charge_range"] = np.array( + ion.ranges.typed_value, np.float32 + ) template[f"{path}mass_to_charge_range/@units"] = "Da" # ion.ranges.unit template[f"{path}nuclid_list"] = ion.nuclid_list.typed_value template[f"{path}name"] = ion.name.typed_value path = f"{trg}ION[ion{ion_id}]/charge_state_model/" - template[f"{path}min_abundance"] \ - = np.float64(ion.charge_state_model["min_abundance"]) - template[f"{path}min_abundance_product"] \ - = np.float64(ion.charge_state_model["min_abundance_product"]) - template[f"{path}min_half_life"] \ - = np.float64(ion.charge_state_model["min_half_life"]) + template[f"{path}min_abundance"] = np.float64( + ion.charge_state_model["min_abundance"] + ) + template[f"{path}min_abundance_product"] = np.float64( + ion.charge_state_model["min_abundance_product"] + ) + template[f"{path}min_half_life"] = np.float64( + ion.charge_state_model["min_half_life"] + ) template[f"{path}min_half_life/@units"] = "s" - template[f"{path}sacrifice_isotopic_uniqueness"] \ - = np.uint8(ion.charge_state_model["sacrifice_isotopic_uniqueness"]) - template[f"{path}isotope_matrix"] \ - = {"compress": np.array(ion.charge_state_model["isotope_matrix"], - np.uint16), "strength": 1} - template[f"{path}charge_state_vector"] \ - = {"compress": np.array(ion.charge_state_model["charge_state_vector"], - np.int8), "strength": 1} - template[f"{path}mass_vector"] \ - = {"compress": np.array(ion.charge_state_model["mass_vector"], - np.float64), "strength": 1} + template[f"{path}sacrifice_isotopic_uniqueness"] = np.uint8( + ion.charge_state_model["sacrifice_isotopic_uniqueness"] + ) + template[f"{path}isotope_matrix"] = { + "compress": np.array(ion.charge_state_model["isotope_matrix"], np.uint16), + "strength": 1, + } + template[f"{path}charge_state_vector"] = { + "compress": np.array( + ion.charge_state_model["charge_state_vector"], np.int8 + ), + "strength": 1, + } + template[f"{path}mass_vector"] = { + "compress": np.array(ion.charge_state_model["mass_vector"], np.float64), + "strength": 1, + } template[f"{path}mass_vector/@units"] = "u" - template[f"{path}natural_abundance_product_vector"] \ - = {"compress": np.array(ion.charge_state_model["nat_abun_prod_vector"], - np.float64), "strength": 1} - template[f"{path}min_half_life_vector"] \ - = {"compress": np.array(ion.charge_state_model["min_half_life_vector"], - np.float64), "strength": 1} + template[f"{path}natural_abundance_product_vector"] = { + "compress": np.array( + ion.charge_state_model["nat_abun_prod_vector"], np.float64 + ), + "strength": 1, + } + template[f"{path}min_half_life_vector"] = { + "compress": np.array( + ion.charge_state_model["min_half_life_vector"], np.float64 + ), + "strength": 1, + } template[f"{path}min_half_life_vector/@units"] = "s" ion_id += 1 @@ -127,11 +155,11 @@ def extract_data_from_rng_file(file_name: str, template: dict, entry_id: int) -> rangefile = ReadRngFileFormat(file_name) # ion indices are on the interval [0, 256) - assert len(rangefile.rng["molecular_ions"]) <= np.iinfo(np.uint8).max + 1, \ - "Current implementation does not support more than 256 ion types" + assert ( + len(rangefile.rng["molecular_ions"]) <= np.iinfo(np.uint8).max + 1 + ), "Current implementation does not support more than 256 ion types" - add_standardize_molecular_ions( - rangefile.rng["molecular_ions"], template, entry_id) + add_standardize_molecular_ions(rangefile.rng["molecular_ions"], template, entry_id) return template @@ -147,11 +175,11 @@ def extract_data_from_rrng_file(file_name: str, template: dict, entry_id) -> dic rangefile = ReadRrngFileFormat(file_name) # ion indices are on the interval [0, 256) - assert len(rangefile.rrng["molecular_ions"]) <= np.iinfo(np.uint8).max + 1, \ - "Current implementation does not support more than 256 ion types" + assert ( + len(rangefile.rrng["molecular_ions"]) <= np.iinfo(np.uint8).max + 1 + ), "Current implementation does not support more than 256 ion types" - add_standardize_molecular_ions( - rangefile.rrng["molecular_ions"], template, entry_id) + add_standardize_molecular_ions(rangefile.rrng["molecular_ions"], template, entry_id) return template @@ -162,11 +190,11 @@ def extract_data_from_fig_txt_file(file_name: str, template: dict, entry_id) -> rangefile = ReadFigTxtFileFormat(file_name) # ion indices are on the interval [0, 256) - assert len(rangefile.fig["molecular_ions"]) <= np.iinfo(np.uint8).max + 1, \ - "Current implementation does not support more than 256 ion types" + assert ( + len(rangefile.fig["molecular_ions"]) <= np.iinfo(np.uint8).max + 1 + ), "Current implementation does not support more than 256 ion types" - add_standardize_molecular_ions( - rangefile.fig["molecular_ions"], template, entry_id) + add_standardize_molecular_ions(rangefile.fig["molecular_ions"], template, entry_id) return template @@ -181,7 +209,7 @@ def __init__(self, file_name: str, entry_id: int): self.meta["entry_id"] = entry_id index = file_name.lower().rfind(".") if index >= 0: - mime_type = file_name.lower()[index + 1::] + mime_type = file_name.lower()[index + 1 : :] self.meta["file_format"] = mime_type def update_atom_types_ranging_definitions_based(self, template: dict) -> dict: @@ -190,12 +218,16 @@ def update_atom_types_ranging_definitions_based(self, template: dict) -> dict: prefix = f"/ENTRY[entry{self.meta['entry_id']}]/atom_probe/ranging/" if f"{prefix}number_of_ion_types" in template.keys(): number_of_ion_types = template[f"{prefix}number_of_ion_types"] - print(f"Auto-detecting elements from ranging {number_of_ion_types} ion types...") + print( + f"Auto-detecting elements from ranging {number_of_ion_types} ion types..." + ) unique_atom_numbers = set() max_atom_number = len(chemical_symbols) - 1 - prefix = f"/ENTRY[entry{self.meta['entry_id']}]/atom_probe/" \ - f"ranging/peak_identification/" + prefix = ( + f"/ENTRY[entry{self.meta['entry_id']}]/atom_probe/" + f"ranging/peak_identification/" + ) for ion_id in np.arange(1, number_of_ion_types): trg = f"{prefix}ION[ion{ion_id}]/nuclid_list" if trg in template.keys(): @@ -231,8 +263,10 @@ def report(self, template: dict) -> dict: # background_quantification data are not available in RNG/RRNG files # peak_search_and_deconvolution data are not available in RNG/RRNG files - trg = f"/ENTRY[entry{self.meta['entry_id']}]/atom_probe/" \ - f"ranging/peak_identification/" + trg = ( + f"/ENTRY[entry{self.meta['entry_id']}]/atom_probe/" + f"ranging/peak_identification/" + ) template[f"{trg}PROGRAM[program1]/program"] = NX_APM_EXEC_NAME template[f"{trg}PROGRAM[program1]/program/@version"] = NX_APM_EXEC_VERSION @@ -241,19 +275,16 @@ def report(self, template: dict) -> dict: if self.meta["file_name"] != "" and self.meta["file_format"] != "none": if self.meta["file_format"] == "rng": extract_data_from_rng_file( - self.meta["file_name"], - template, - self.meta["entry_id"]) + self.meta["file_name"], template, self.meta["entry_id"] + ) elif self.meta["file_format"] == "rrng": extract_data_from_rrng_file( - self.meta["file_name"], - template, - self.meta["entry_id"]) + self.meta["file_name"], template, self.meta["entry_id"] + ) elif self.meta["file_format"] == "txt": extract_data_from_fig_txt_file( - self.meta["file_name"], - template, - self.meta["entry_id"]) + self.meta["file_name"], template, self.meta["entry_id"] + ) else: trg = f"/ENTRY[entry{self.meta['entry_id']}]/atom_probe/ranging/" template[f"{trg}number_of_ion_types"] = 1 diff --git a/pynxtools/dataconverter/readers/apm/utils/apm_load_reconstruction.py b/pynxtools/dataconverter/readers/apm/utils/apm_load_reconstruction.py index 36e6eb20f..e8027d1b2 100644 --- a/pynxtools/dataconverter/readers/apm/utils/apm_load_reconstruction.py +++ b/pynxtools/dataconverter/readers/apm/utils/apm_load_reconstruction.py @@ -35,15 +35,19 @@ def extract_data_from_pos_file(file_name: str, prefix: str, template: dict) -> d trg = f"{prefix}reconstruction/" xyz = posfile.get_reconstructed_positions() - template[f"{trg}reconstructed_positions"] \ - = {"compress": np.array(xyz.typed_value, np.float32), "strength": 1} + template[f"{trg}reconstructed_positions"] = { + "compress": np.array(xyz.typed_value, np.float32), + "strength": 1, + } template[f"{trg}reconstructed_positions/@units"] = xyz.unit del xyz trg = f"{prefix}mass_to_charge_conversion/" m_z = posfile.get_mass_to_charge_state_ratio() - template[f"{trg}mass_to_charge"] \ - = {"compress": np.array(m_z.typed_value, np.float32), "strength": 1} + template[f"{trg}mass_to_charge"] = { + "compress": np.array(m_z.typed_value, np.float32), + "strength": 1, + } template[f"{trg}mass_to_charge/@units"] = m_z.unit del m_z return template @@ -56,15 +60,19 @@ def extract_data_from_epos_file(file_name: str, prefix: str, template: dict) -> trg = f"{prefix}reconstruction/" xyz = eposfile.get_reconstructed_positions() - template[f"{trg}reconstructed_positions"] \ - = {"compress": np.array(xyz.typed_value, np.float32), "strength": 1} + template[f"{trg}reconstructed_positions"] = { + "compress": np.array(xyz.typed_value, np.float32), + "strength": 1, + } template[f"{trg}reconstructed_positions/@units"] = xyz.unit del xyz trg = f"{prefix}mass_to_charge_conversion/" m_z = eposfile.get_mass_to_charge_state_ratio() - template[f"{trg}mass_to_charge"] \ - = {"compress": np.array(m_z.typed_value, np.float32), "strength": 1} + template[f"{trg}mass_to_charge"] = { + "compress": np.array(m_z.typed_value, np.float32), + "strength": 1, + } template[f"{trg}mass_to_charge/@units"] = m_z.unit del m_z @@ -130,15 +138,19 @@ def extract_data_from_apt_file(file_name: str, prefix: str, template: dict) -> d trg = f"{prefix}reconstruction/" xyz = aptfile.get_named_quantity("Position") - template[f"{trg}reconstructed_positions"] \ - = {"compress": np.array(xyz.typed_value, np.float32), "strength": 1} + template[f"{trg}reconstructed_positions"] = { + "compress": np.array(xyz.typed_value, np.float32), + "strength": 1, + } template[f"{trg}reconstructed_positions/@units"] = xyz.unit del xyz trg = f"{prefix}mass_to_charge_conversion/" m_z = aptfile.get_named_quantity("Mass") - template[f"{trg}mass_to_charge"] \ - = {"compress": np.array(m_z.typed_value, np.float32), "strength": 1} + template[f"{trg}mass_to_charge"] = { + "compress": np.array(m_z.typed_value, np.float32), + "strength": 1, + } template[f"{trg}mass_to_charge/@units"] = m_z.unit del m_z @@ -158,7 +170,7 @@ def __init__(self, file_name: str, entry_id: int): self.file_name = file_name index = file_name.lower().rfind(".") if index >= 0: - mime_type = file_name.lower()[index + 1::] + mime_type = file_name.lower()[index + 1 : :] if mime_type in ["pos", "epos", "apt"]: self.file_format = mime_type self.entry_id = entry_id @@ -172,12 +184,9 @@ def report(self, template: dict) -> dict: prfx = f"/ENTRY[entry{self.entry_id}]/atom_probe/" if self.file_name != "" and self.file_format != "none": if self.file_format == "pos": - extract_data_from_pos_file( - self.file_name, prfx, template) + extract_data_from_pos_file(self.file_name, prfx, template) if self.file_format == "epos": - extract_data_from_epos_file( - self.file_name, prfx, template) + extract_data_from_epos_file(self.file_name, prfx, template) if self.file_format == "apt": - extract_data_from_apt_file( - self.file_name, prfx, template) + extract_data_from_apt_file(self.file_name, prfx, template) return template diff --git a/pynxtools/dataconverter/readers/apm/utils/apm_parse_composition_table.py b/pynxtools/dataconverter/readers/apm/utils/apm_parse_composition_table.py index cf8f2bc56..4e78da5ad 100644 --- a/pynxtools/dataconverter/readers/apm/utils/apm_parse_composition_table.py +++ b/pynxtools/dataconverter/readers/apm/utils/apm_parse_composition_table.py @@ -98,24 +98,25 @@ def parse_human_readable_composition_information(eln_input): if (element_symbol != "X") and (element_symbol in chemical_symbols): # case: "Mo" if len(args) == 1: - return parse_human_readable_composition_case_one( - element_symbol) + return parse_human_readable_composition_case_one(element_symbol) # case: "Mo matrix" or "Mo 98.0", always assuming at.-%! if len(args) == 2: - return parse_human_readable_composition_case_two( - args, element_symbol) + return parse_human_readable_composition_case_two(args, element_symbol) # case: "Mo 98 wt.-%", selectable at.-%, ppm, ppb, or wt.-%! if len(args) == 3: return parse_human_readable_composition_case_three( - eln_input, args, element_symbol) + eln_input, args, element_symbol + ) # case: "Mo 98 +- 2", always assuming at.-%! if len(args) == 4: return parse_human_readable_composition_case_four( - eln_input, element_symbol) + eln_input, element_symbol + ) # case: "Mo 98 wt.-% +- 2", selectable at.-%, ppm, ppb, or wt.-%! if len(args) == 5: return parse_human_readable_composition_case_five( - eln_input, args, element_symbol) + eln_input, args, element_symbol + ) return (None, None, None, None, None) @@ -124,8 +125,13 @@ def parse_composition_table(composition_list): composition_table = {} # check that there are no contradictions or inconsistenc for entry in composition_list: - instruction, element, composition, stdev, normalization \ - = parse_human_readable_composition_information(entry) + ( + instruction, + element, + composition, + stdev, + normalization, + ) = parse_human_readable_composition_information(entry) # print(f"{instruction}, {element}, {composition}, {stdev}, {normalization}") if instruction == "add_element": @@ -138,25 +144,31 @@ def parse_composition_table(composition_list): # percent normalization in a composition_table if normalization is not None: if normalization != composition_table["normalization"]: - raise ValueError("Composition list is contradicting as it \ - mixes atom- with weight-percent normalization!") + raise ValueError( + "Composition list is contradicting as it \ + mixes atom- with weight-percent normalization!" + ) if element not in composition_table: composition_table[element] = (composition, stdev) else: - raise ValueError("Composition list is incorrectly formatted as if has \ - at least multiple lines for the same element!") + raise ValueError( + "Composition list is incorrectly formatted as if has \ + at least multiple lines for the same element!" + ) continue if instruction == "define_matrix": if element not in composition_table: composition_table[element] = (None, None) # because the fraction is unclear at this point else: - raise ValueError("Composition list is contradicting as it includes \ - at least two statements what the matrix should be!") + raise ValueError( + "Composition list is contradicting as it includes \ + at least two statements what the matrix should be!" + ) # determine remaining fraction - total_fractions = 0. + total_fractions = 0.0 remainder_element = None for keyword, tpl in composition_table.items(): if keyword != "normalization": @@ -166,8 +178,10 @@ def parse_composition_table(composition_list): remainder_element = keyword # print(f"Total fractions {total_fractions}, remainder element {remainder_element}") if remainder_element is None: - raise ValueError("Composition list inconsistent because either fractions for \ - elements do not add up to 100. or no symbol for matrix defined!") + raise ValueError( + "Composition list inconsistent because either fractions for \ + elements do not add up to 100. or no symbol for matrix defined!" + ) if composition_table: # means != {} composition_table[remainder_element] = (1.0e2 - total_fractions, None) diff --git a/pynxtools/dataconverter/readers/apm/utils/apm_versioning.py b/pynxtools/dataconverter/readers/apm/utils/apm_versioning.py index a7e9a39a9..114542013 100644 --- a/pynxtools/dataconverter/readers/apm/utils/apm_versioning.py +++ b/pynxtools/dataconverter/readers/apm/utils/apm_versioning.py @@ -19,13 +19,13 @@ # pylint: disable=no-member -from pynxtools.dataconverter.readers.shared.shared_utils \ - import get_repo_last_commit +from pynxtools.dataconverter.readers.shared.shared_utils import get_repo_last_commit NX_APM_ADEF_NAME = "NXapm" -NX_APM_ADEF_VERSION = "nexus-fairmat-proposal successor of " \ - "9636feecb79bb32b828b1a9804269573256d7696" +NX_APM_ADEF_VERSION = ( + "nexus-fairmat-proposal successor of " "9636feecb79bb32b828b1a9804269573256d7696" +) # based on https://fairmat-experimental.github.io/nexus-fairmat-proposal NX_APM_EXEC_NAME = "dataconverter/readers/apm/reader.py" NX_APM_EXEC_VERSION = get_repo_last_commit() diff --git a/pynxtools/dataconverter/readers/base/reader.py b/pynxtools/dataconverter/readers/base/reader.py index e292f99af..8f1f51fe6 100644 --- a/pynxtools/dataconverter/readers/base/reader.py +++ b/pynxtools/dataconverter/readers/base/reader.py @@ -40,10 +40,12 @@ class BaseReader(ABC): supported_nxdls = [""] @abstractmethod - def read(self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None) -> dict: + def read( + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, + ) -> dict: """Reads data from given file and returns a filled template dictionary""" return template diff --git a/pynxtools/dataconverter/readers/ellips/mock.py b/pynxtools/dataconverter/readers/ellips/mock.py index 696da48ae..05b790b67 100644 --- a/pynxtools/dataconverter/readers/ellips/mock.py +++ b/pynxtools/dataconverter/readers/ellips/mock.py @@ -22,32 +22,34 @@ from pynxtools.dataconverter.helpers import extract_atom_types -class MockEllips(): - """ A generic class for generating duplicate outputs for ELLIPSOMETRY +class MockEllips: + """A generic class for generating duplicate outputs for ELLIPSOMETRY - Contains: - - mock_sample: - Chooses random entry from sample_list, overwrites sample_name - and extracts atom_types - - mock_chemical_formula: - Creates a list of chemical formulas consisting of two atom types - - modify_spectral_range: - Change spectral range (i.e. wavelength array) and step size. - - mock_angles: - Change value and number of incident angles - - choose_data_type: - Chooses random entry from data_types - - mock_signals: - Mock data if data_type is Psi/Delta or tan(Psi)/cos(Delta) - - mock_mueller_matrix: - Mock data if data_type is Mueller matrix - - mock_template: - Creates mock ellipsometry data (by calling the above routines) + Contains: + - mock_sample: + Chooses random entry from sample_list, overwrites sample_name + and extracts atom_types + - mock_chemical_formula: + Creates a list of chemical formulas consisting of two atom types + - modify_spectral_range: + Change spectral range (i.e. wavelength array) and step size. + - mock_angles: + Change value and number of incident angles + - choose_data_type: + Chooses random entry from data_types + - mock_signals: + Mock data if data_type is Psi/Delta or tan(Psi)/cos(Delta) + - mock_mueller_matrix: + Mock data if data_type is Mueller matrix + - mock_template: + Creates mock ellipsometry data (by calling the above routines) """ def __init__(self, data_template) -> None: self.data = data_template["measured_data"] - self.wavelength = data_template["data_collection/NAME_spectrum[wavelength_spectrum]"] + self.wavelength = data_template[ + "data_collection/NAME_spectrum[wavelength_spectrum]" + ] self.atom_types = data_template["atom_types"] self.sample_list: list = [] self.data_types = ["Psi/Delta", "tan(Psi)/cos(Delta)", "Mueller matrix"] @@ -55,8 +57,8 @@ def __init__(self, data_template) -> None: self.number_of_signals = 0 def mock_sample(self, data_template) -> None: - """ Chooses random entry from sample_list, overwrites sample_name - and extracts atom_types + """Chooses random entry from sample_list, overwrites sample_name + and extracts atom_types """ self.mock_chemical_formula() name = random.choice(self.sample_list) @@ -67,8 +69,7 @@ def mock_sample(self, data_template) -> None: data_template["experiment_description"] = f"RC2 scan on {name} bulk" def choose_data_type(self, data_template) -> None: - """ Chooses random entry from data_types - """ + """Chooses random entry from data_types""" data_type = random.choice(self.data_types) data_template["data_type"] = data_type if data_type == "Mueller matrix": @@ -77,7 +78,7 @@ def choose_data_type(self, data_template) -> None: self.number_of_signals = 2 def mock_chemical_formula(self) -> None: - """ Creates a list of chemical formulas consisting of two atom types """ + """Creates a list of chemical formulas consisting of two atom types""" part_1 = ase.atom.chemical_symbols[1:] part_2 = list(range(2, 20, 1)) @@ -90,8 +91,7 @@ def mock_chemical_formula(self) -> None: self.sample_list.append(chemical_formula) def mock_angles(self, data_template) -> None: - """ Change value and number of incident angles - """ + """Change value and number of incident angles""" angle_list = [40, 45, 50, 55, 60, 65, 70, 75, 80] for _ in range(random.randrange(1, 4)): angle = random.choice(angle_list) @@ -105,14 +105,12 @@ def mock_angles(self, data_template) -> None: self.mock_mueller_matrix(data_template) def mock_signals(self, data_template) -> None: - """ Mock data if data_type is Psi/Delta or tan(Psi)/cos(Delta) - considering the (new) number of incident angles + """Mock data if data_type is Psi/Delta or tan(Psi)/cos(Delta) + considering the (new) number of incident angles """ - my_numpy_array = np.empty([ - len(self.angles), - self.number_of_signals, - len(self.wavelength) - ]) + my_numpy_array = np.empty( + [len(self.angles), self.number_of_signals, len(self.wavelength)] + ) for index in range(0, len(self.angles)): noise = np.random.normal(0, 0.5, self.data[0, 0, :].size) my_numpy_array[index] = self.data[0] * random.uniform(0.5, 1.5) + noise @@ -120,32 +118,31 @@ def mock_signals(self, data_template) -> None: data_template["measured_data"] = my_numpy_array def mock_mueller_matrix(self, data_template) -> None: - """ Mock data if data_type is Mueller matrix (i.e. 16 elements/signals) - considering the (new) number of incident angles + """Mock data if data_type is Mueller matrix (i.e. 16 elements/signals) + considering the (new) number of incident angles """ - my_numpy_array = np.empty([ - len(self.angles), - self.number_of_signals, - len(self.wavelength) - ]) + my_numpy_array = np.empty( + [len(self.angles), self.number_of_signals, len(self.wavelength)] + ) for idx in range(0, len(self.angles)): - noise = np.random.normal(0, 0.1, self .data[0, 0, :].size) + noise = np.random.normal(0, 0.1, self.data[0, 0, :].size) for m_idx in range(1, self.number_of_signals): - my_numpy_array[idx][m_idx] = self.data[0][0] * random.uniform(.5, 1.) + my_numpy_array[idx][m_idx] = self.data[0][0] * random.uniform(0.5, 1.0) my_numpy_array[idx][m_idx] += noise my_numpy_array[idx][0] = my_numpy_array[0][0] / my_numpy_array[0][0] data_template["measured_data"] = my_numpy_array def modify_spectral_range(self, data_template) -> None: - """ Change spectral range (i.e. wavlength array) and step size, - while length of the wavelength array remains the same. + """Change spectral range (i.e. wavlength array) and step size, + while length of the wavelength array remains the same. """ temp = random.uniform(0.25, 23) - data_template["data_collection/NAME_spectrum[wavelength_spectrum]"] = \ + data_template["data_collection/NAME_spectrum[wavelength_spectrum]"] = ( temp * data_template["data_collection/NAME_spectrum[wavelength_spectrum]"] + ) def mock_template(self, data_template) -> None: - """ Creates a mock ellipsometry template """ + """Creates a mock ellipsometry template""" self.mock_sample(data_template) self.modify_spectral_range(data_template) self.choose_data_type(data_template) diff --git a/pynxtools/dataconverter/readers/ellips/reader.py b/pynxtools/dataconverter/readers/ellips/reader.py index bd7c8bf19..c2604b462 100644 --- a/pynxtools/dataconverter/readers/ellips/reader.py +++ b/pynxtools/dataconverter/readers/ellips/reader.py @@ -29,63 +29,63 @@ from pynxtools.dataconverter.readers.utils import flatten_and_replace, FlattenSettings from pynxtools import get_nexus_version, get_nexus_version_hash -DEFAULT_HEADER = {'sep': '\t', 'skip': 0} +DEFAULT_HEADER = {"sep": "\t", "skip": 0} CONVERT_DICT = { - 'unit': '@units', - 'Beam_path': 'BEAM_PATH[beam_path]', - 'Detector': 'DETECTOR[detector]', - 'Data': 'data_collection', - 'Derived_parameters': 'derived_parameters', - 'Environment': 'environment_conditions', - 'Instrument': 'INSTRUMENT[instrument]', - 'Sample': 'SAMPLE[sample]', - 'Sample_stage': 'sample_stage', - 'User': 'USER[user]', - 'Instrument/angle_of_incidence': 'INSTRUMENT[instrument]/angle_of_incidence', - 'Instrument/angle_of_incidence/unit': 'INSTRUMENT[instrument]/angle_of_incidence/@units', - 'column_names': 'data_collection/column_names', - 'data_error': 'data_collection/data_error', - 'depolarization': 'derived_parameters/depolarization', - 'measured_data': 'data_collection/measured_data', - 'software': 'software/program', - 'data_software': 'data_software/program', + "unit": "@units", + "Beam_path": "BEAM_PATH[beam_path]", + "Detector": "DETECTOR[detector]", + "Data": "data_collection", + "Derived_parameters": "derived_parameters", + "Environment": "environment_conditions", + "Instrument": "INSTRUMENT[instrument]", + "Sample": "SAMPLE[sample]", + "Sample_stage": "sample_stage", + "User": "USER[user]", + "Instrument/angle_of_incidence": "INSTRUMENT[instrument]/angle_of_incidence", + "Instrument/angle_of_incidence/unit": "INSTRUMENT[instrument]/angle_of_incidence/@units", + "column_names": "data_collection/column_names", + "data_error": "data_collection/data_error", + "depolarization": "derived_parameters/depolarization", + "measured_data": "data_collection/measured_data", + "software": "software/program", + "data_software": "data_software/program", } CONFIG_KEYS = [ - 'colnames', - 'derived_parameter_type', - 'err-var', - 'filename', - 'parameters', - 'plot_name', - 'sep', - 'skip', - 'spectrum_type', - 'spectrum_unit' + "colnames", + "derived_parameter_type", + "err-var", + "filename", + "parameters", + "plot_name", + "sep", + "skip", + "spectrum_type", + "spectrum_unit", ] REPLACE_NESTED = { - 'Instrument/Beam_path': 'INSTRUMENT[instrument]/BEAM_PATH[beam_path]', - 'Env_Conditions': 'INSTRUMENT[instrument]/sample_stage/environment_conditions', - 'Instrument': 'INSTRUMENT[instrument]' + "Instrument/Beam_path": "INSTRUMENT[instrument]/BEAM_PATH[beam_path]", + "Env_Conditions": "INSTRUMENT[instrument]/sample_stage/environment_conditions", + "Instrument": "INSTRUMENT[instrument]", } def load_header(filename, default): - """ load the yaml description file, and apply defaults from - the defalut dict for all keys not found from the file. + """load the yaml description file, and apply defaults from + the defalut dict for all keys not found from the file. - Parameters: - filename: a yaml file containing the definitions - default_header: predefined default values + Parameters: + filename: a yaml file containing the definitions + default_header: predefined default values - Returns: - a dict containing the loaded information + Returns: + a dict containing the loaded information """ - with open(filename, 'rt', encoding='utf8') as file: + with open(filename, "rt", encoding="utf8") as file: header = yaml.safe_load(file) clean_header = header @@ -94,53 +94,55 @@ def load_header(filename, default): if key not in clean_header: clean_header[key] = value - if 'sep' in header: - clean_header['sep'] = header['sep'].encode("utf-8").decode("unicode_escape") + if "sep" in header: + clean_header["sep"] = header["sep"].encode("utf-8").decode("unicode_escape") return clean_header def load_as_pandas_array(my_file, header): - """ Load a CSV output file using the header dict. - Use the fields: colnames, skip and sep from the header - to instruct the csv reader about: - colnames -- column names - skip -- how many lines to skip - sep -- separator character in the file - - Parameters: - my_file string, file name - header dict header read from a yaml file - - Returns: - A pandas array is returned. + """Load a CSV output file using the header dict. + Use the fields: colnames, skip and sep from the header + to instruct the csv reader about: + colnames -- column names + skip -- how many lines to skip + sep -- separator character in the file + + Parameters: + my_file string, file name + header dict header read from a yaml file + + Returns: + A pandas array is returned. """ required_parameters = ("colnames", "skip", "sep") for required_parameter in required_parameters: if required_parameter not in header: - raise ValueError('colnames, skip and sep are required header parameters!') + raise ValueError("colnames, skip and sep are required header parameters!") if not os.path.isfile(my_file): - raise IOError(f'File not found error: {my_file}') - - whole_data = pd.read_csv(my_file, - # use header = None and names to define custom column names - header=None, - names=header['colnames'], - skiprows=header['skip'], - delimiter=header['sep']) + raise IOError(f"File not found error: {my_file}") + + whole_data = pd.read_csv( + my_file, + # use header = None and names to define custom column names + header=None, + names=header["colnames"], + skiprows=header["skip"], + delimiter=header["sep"], + ) return whole_data def populate_header_dict(file_paths): - """ This function creates and populates the header dictionary - reading one or more yaml file. + """This function creates and populates the header dictionary + reading one or more yaml file. - Parameters: - file_paths a list of file paths to be read + Parameters: + file_paths a list of file paths to be read - Returns: - a dict merging the content of all files + Returns: + a dict merging the content of all files """ header = DEFAULT_HEADER @@ -160,9 +162,7 @@ def populate_header_dict(file_paths): def populate_template_dict(header, template): - """The template dictionary is then populated according to the content of header dictionary. - - """ + """The template dictionary is then populated according to the content of header dictionary.""" if "calibration_filename" in header: calibration = load_as_pandas_array(header["calibration_filename"], header) @@ -174,7 +174,7 @@ def populate_template_dict(header, template): dic=header, convert_dict=CONVERT_DICT, replace_nested=REPLACE_NESTED, - ignore_keys=CONFIG_KEYS + ignore_keys=CONFIG_KEYS, ) ) template.update(eln_data_dict) @@ -183,9 +183,7 @@ def populate_template_dict(header, template): def header_labels(header, unique_angles): - """ Define data labels (column names) - - """ + """Define data labels (column names)""" if header["Data"]["data_type"] == "Psi/Delta": labels = {"Psi": [], "Delta": []} @@ -205,9 +203,7 @@ def header_labels(header, unique_angles): def mock_function(header): - """ Mock ellipsometry data - - """ + """Mock ellipsometry data""" mock_header = MockEllips(header) mock_header.mock_template(header) @@ -225,27 +221,19 @@ def mock_function(header): def data_set_dims(whole_data): - """ User defined variables to produce slices of the whole data set - - """ - energy = whole_data['type'].astype(str).values.tolist().count("E") - unique_angles, counts = np.unique(whole_data["angle_of_incidence" - ].to_numpy()[0:energy].astype("int64"), - return_counts=True - ) + """User defined variables to produce slices of the whole data set""" + energy = whole_data["type"].astype(str).values.tolist().count("E") + unique_angles, counts = np.unique( + whole_data["angle_of_incidence"].to_numpy()[0:energy].astype("int64"), + return_counts=True, + ) return unique_angles, counts def parameter_array(whole_data, header, unique_angles, counts): - """ User defined variables to produce slices of the whole data set - - """ - my_data_array = np.empty([ - len(unique_angles), - 1, - counts[0] - ]) + """User defined variables to produce slices of the whole data set""" + my_data_array = np.empty([len(unique_angles), 1, counts[0]]) block_idx = [np.int64(0)] index = 0 @@ -256,38 +244,34 @@ def parameter_array(whole_data, header, unique_angles, counts): # derived parameters: # takes last but one column from the right (skips empty columns): data_index = 1 - temp = whole_data[header["colnames"][-data_index]].to_numpy()[ - block_idx[-1] - 1].astype("float64") + temp = ( + whole_data[header["colnames"][-data_index]] + .to_numpy()[block_idx[-1] - 1] + .astype("float64") + ) while math.isnan(temp): - temp = whole_data[header["colnames"][-data_index]].to_numpy()[ - block_idx[-1] - 1].astype("float64") + temp = ( + whole_data[header["colnames"][-data_index]] + .to_numpy()[block_idx[-1] - 1] + .astype("float64") + ) data_index += 1 for index in range(len(unique_angles)): - my_data_array[ - index, - 0, - :] = whole_data[header["colnames"][-data_index]].to_numpy()[ - block_idx[index + 6]:block_idx[index + 7]].astype("float64") + my_data_array[index, 0, :] = ( + whole_data[header["colnames"][-data_index]] + .to_numpy()[block_idx[index + 6] : block_idx[index + 7]] + .astype("float64") + ) return my_data_array def data_array(whole_data, unique_angles, counts, labels): - """ User defined variables to produce slices of the whole data set - - """ - my_data_array = np.empty([ - len(unique_angles), - len(labels), - counts[0] - ]) - my_error_array = np.empty([ - len(unique_angles), - len(labels), - counts[0] - ]) + """User defined variables to produce slices of the whole data set""" + my_data_array = np.empty([len(unique_angles), len(labels), counts[0]]) + my_error_array = np.empty([len(unique_angles), len(labels), counts[0]]) block_idx = [np.int64(0)] index = 0 @@ -298,21 +282,21 @@ def data_array(whole_data, unique_angles, counts, labels): data_index = 0 for key, val in labels.items(): for index in range(len(val)): - my_data_array[ - index, - data_index, - :] = whole_data[key].to_numpy()[block_idx[index]:block_idx[index + 1] - ].astype("float64") + my_data_array[index, data_index, :] = ( + whole_data[key] + .to_numpy()[block_idx[index] : block_idx[index + 1]] + .astype("float64") + ) data_index += 1 data_index = 0 for key, val in labels.items(): for index in range(len(val)): - my_error_array[ - index, - data_index, - :] = whole_data[f"err.{key}"].to_numpy()[block_idx[index]:block_idx[index + 1] - ].astype("float64") + my_error_array[index, data_index, :] = ( + whole_data[f"err.{key}"] + .to_numpy()[block_idx[index] : block_idx[index + 1]] + .astype("float64") + ) data_index += 1 return my_data_array, my_error_array @@ -353,16 +337,19 @@ def populate_header_dict_with_datasets(file_paths, is_mock=False): labels = header_labels(header, unique_angles) - header["measured_data"], header["data_error"] = \ - data_array(whole_data, unique_angles, counts, labels) - header[header["derived_parameter_type"]] = \ - parameter_array(whole_data, header, unique_angles, counts) + header["measured_data"], header["data_error"] = data_array( + whole_data, unique_angles, counts, labels + ) + header[header["derived_parameter_type"]] = parameter_array( + whole_data, header, unique_angles, counts + ) spectrum_type = header["Data"]["spectrum_type"] if spectrum_type not in header["colnames"]: print("ERROR: spectrum type not found in 'colnames'") header[f"data_collection/NAME_spectrum[{spectrum_type}_spectrum]"] = ( - whole_data[spectrum_type].to_numpy()[0:counts[0]].astype("float64")) + whole_data[spectrum_type].to_numpy()[0 : counts[0]].astype("float64") + ) def write_scan_axis(name: str, values: list, units: str): base_path = f"Env_Conditions/PARAMETER[{name}]" @@ -381,32 +368,36 @@ def write_scan_axis(name: str, values: list, units: str): header, labels = mock_function(header) if "atom_types" not in header["Sample"]: - header["atom_types"] = extract_atom_types(header["Sample"]["chemical_formula"]) + header["atom_types"] = extract_atom_types( + header["Sample"]["chemical_formula"] + ) return header, labels - def read(self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None, - is_mock: bool = False) -> dict: - """ Reads data from given file and returns a filled template dictionary. + def read( + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, + is_mock: bool = False, + ) -> dict: + """Reads data from given file and returns a filled template dictionary. - A handlings of virtual datasets is implemented: + A handlings of virtual datasets is implemented: - virtual dataset are created inside the final NeXus file. + virtual dataset are created inside the final NeXus file. - The template entry is filled with a dictionary containing the following keys: - - link: the path of the external data file and the path of desired dataset inside it - - shape: numpy array slice object (according to array slice notation) + The template entry is filled with a dictionary containing the following keys: + - link: the path of the external data file and the path of desired dataset inside it + - shape: numpy array slice object (according to array slice notation) """ if not file_paths: raise IOError("No input files were given to Ellipsometry Reader.") # The header dictionary is filled with entries. - header, labels = ( - EllipsometryReader.populate_header_dict_with_datasets(file_paths, is_mock) + header, labels = EllipsometryReader.populate_header_dict_with_datasets( + file_paths, is_mock ) data_list = [] @@ -426,31 +417,32 @@ def read(self, # because test-data.data has improper units like Angstroms or degrees # the fix above prevents that these incorrect units are get just blindly carried # over into the nxs file and thus causing nomas to fail - template[f"/ENTRY[entry]/plot/AXISNAME[{spectrum_type}]"] = \ - {"link": f"/entry/data_collection/{spectrum_type}_spectrum"} - template[f"/ENTRY[entry]/data_collection/NAME_spectrum[{spectrum_type}_spectrum]/@units"] \ - = spectrum_unit + template[f"/ENTRY[entry]/plot/AXISNAME[{spectrum_type}]"] = { + "link": f"/entry/data_collection/{spectrum_type}_spectrum" + } + template[ + f"/ENTRY[entry]/data_collection/NAME_spectrum[{spectrum_type}_spectrum]/@units" + ] = spectrum_unit template[ f"/ENTRY[entry]/data_collection/NAME_spectrum[{spectrum_type}_spectrum]/@long_name" ] = f"{spectrum_type} ({spectrum_unit})" plot_name = header["plot_name"] for dindx in range(0, len(labels.keys())): for index, key in enumerate(data_list[dindx]): - template[f"/ENTRY[entry]/plot/DATA[{key}]"] = \ - { - "link": "/entry/data_collection/measured_data", - "shape": np.index_exp[index, dindx, :] + template[f"/ENTRY[entry]/plot/DATA[{key}]"] = { + "link": "/entry/data_collection/measured_data", + "shape": np.index_exp[index, dindx, :], } # MK:: Carola, Ron, Flo, Tamas, Sandor refactor the following line # using a proper unit parsing logic template[f"/ENTRY[entry]/plot/DATA[{key}]/@units"] = "degree" if dindx == 0 and index == 0: - template[f"/ENTRY[entry]/plot/DATA[{key}]/@long_name"] = \ - f"{plot_name} (degree)" - template[f"/ENTRY[entry]/plot/DATA[{key}_errors]"] = \ - { - "link": "/entry/data_collection/data_error", - "shape": np.index_exp[index, dindx, :] + template[ + f"/ENTRY[entry]/plot/DATA[{key}]/@long_name" + ] = f"{plot_name} (degree)" + template[f"/ENTRY[entry]/plot/DATA[{key}_errors]"] = { + "link": "/entry/data_collection/data_error", + "shape": np.index_exp[index, dindx, :], } # MK:: Carola, Ron, Flo, Tamas, Sandor refactor the following line template[f"/ENTRY[entry]/plot/DATA[{key}_errors]/@units"] = "degree" @@ -475,7 +467,9 @@ def read(self, template["/ENTRY[entry]/definition/@version"] = get_nexus_version() template["/ENTRY[entry]/program_name"] = "pynxtools" template["/ENTRY[entry]/program_name/@version"] = version("pynxtools") - template["/ENTRY[entry]/program_name/@url"] = "https://github.com/FAIRmat-NFDI/pynxtools" + template[ + "/ENTRY[entry]/program_name/@url" + ] = "https://github.com/FAIRmat-NFDI/pynxtools" return template diff --git a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_display_items_to_nx.py b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_display_items_to_nx.py index 39c02051f..0fc2675de 100644 --- a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_display_items_to_nx.py +++ b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_display_items_to_nx.py @@ -22,37 +22,41 @@ import flatdict as fd -metadata_constraints = {"type": str, - "uuid": str, - "created": str, - "data_shape": list, - "data_dtype": str, - "is_sequence": bool, - "dimensional_calibrations": list, - "data_modified": str, - "timezone": str, - "timezone_offset": str, - "metadata/hardware_source/hardware_source_id": str, - "version": int, - "modified": str} +metadata_constraints = { + "type": str, + "uuid": str, + "created": str, + "data_shape": list, + "data_dtype": str, + "is_sequence": bool, + "dimensional_calibrations": list, + "data_modified": str, + "timezone": str, + "timezone_offset": str, + "metadata/hardware_source/hardware_source_id": str, + "version": int, + "modified": str, +} -nexus_concept_dict = {"ITULL": "NxImageSetRealSpace", - "IFLL": "NxImageSetRealSpace", - "IFL": None, - "ITUL": None, - "STUUE": "NxSpectrumSetEelsOmegaQ", - "STULLE": "NxSpectrumSetEels", - "STULLUE": "NxSpectrumSetOmegaQ", - "SFLLUE": "NxSpectrumSetOmegaQ", - "SFLLE": "NxSpectrumSetEels", - "SFUE": "NxSpectrumSetEelsOmegaQ", - "RFAA": "NxImageAngSpace", - "RTUAA": "NxImageAngSpace"} +nexus_concept_dict = { + "ITULL": "NxImageSetRealSpace", + "IFLL": "NxImageSetRealSpace", + "IFL": None, + "ITUL": None, + "STUUE": "NxSpectrumSetEelsOmegaQ", + "STULLE": "NxSpectrumSetEels", + "STULLUE": "NxSpectrumSetOmegaQ", + "SFLLUE": "NxSpectrumSetOmegaQ", + "SFLLE": "NxSpectrumSetEels", + "SFUE": "NxSpectrumSetEelsOmegaQ", + "RFAA": "NxImageAngSpace", + "RTUAA": "NxImageAngSpace", +} def check_existence_of_required_fields(dct: dict, constraint_dct: dict) -> bool: """Checks if given dictionary has fields with values which match constraints.""" - flat_dct = fd.FlatDict(dct, delimiter='/') + flat_dct = fd.FlatDict(dct, delimiter="/") for keyword, dtyp in constraint_dct.items(): if keyword not in flat_dct.keys(): print(f"-->{keyword} not keyword") @@ -87,14 +91,17 @@ def identify_nexus_concept_key(dct: dict) -> str: set_unit_catg = set(lst_unit_catg) if "A" in set_unit_catg: - nexus_concept_key \ - = f"R{str(dct['is_sequence']).upper()[0:1]}{''.join(lst_unit_catg)}" + nexus_concept_key = ( + f"R{str(dct['is_sequence']).upper()[0:1]}{''.join(lst_unit_catg)}" + ) elif "E" in set_unit_catg: - nexus_concept_key \ - = f"S{str(dct['is_sequence']).upper()[0:1]}{''.join(lst_unit_catg)}" + nexus_concept_key = ( + f"S{str(dct['is_sequence']).upper()[0:1]}{''.join(lst_unit_catg)}" + ) elif "E" not in set_unit_catg: - nexus_concept_key \ - = f"I{str(dct['is_sequence']).upper()[0:1]}{''.join(lst_unit_catg)}" + nexus_concept_key = ( + f"I{str(dct['is_sequence']).upper()[0:1]}{''.join(lst_unit_catg)}" + ) else: return nexus_concept_key return nexus_concept_key diff --git a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_eln_to_nx_map.py b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_eln_to_nx_map.py index b6af59467..f7d7e8566 100644 --- a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_eln_to_nx_map.py +++ b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_eln_to_nx_map.py @@ -33,67 +33,182 @@ # results file directly into the template which the em_nion reader has to fill and pass # then to the data converter -NxEmElnInput = {"IGNORE": {"fun": "load_from_dict_list", "terms": "em_lab/detector"}, - "IGNORE": {"fun": "load_from", "terms": "em_lab/ebeam_column/aberration_correction/applied"}, - "IGNORE": {"fun": "load_from_dict_list", "terms": "em_lab/ebeam_column/aperture_em"}, - "/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/emitter_type": {"fun": "load_from", "terms": "em_lab/ebeam_column/electron_source/emitter_type"}, - "/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage/@units": {"fun": "load_from", "terms": "em_lab/ebeam_column/electron_source/voltage/unit"}, - "/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage": {"fun": "load_from", "terms": "em_lab/ebeam_column/electron_source/voltage/value"}, - "/ENTRY[entry*]/em_lab/FABRICATION[fabrication]/capabilities": {"fun": "load_from", "terms": "em_lab/fabrication/capabilities"}, - "/ENTRY[entry*]/em_lab/FABRICATION[fabrication]/identifier": {"fun": "load_from", "terms": "em_lab/fabrication/identifier"}, - "/ENTRY[entry*]/em_lab/FABRICATION[fabrication]/model": {"fun": "load_from", "terms": "em_lab/fabrication/model"}, - "/ENTRY[entry*]/em_lab/FABRICATION[fabrication]/vendor": {"fun": "load_from", "terms": "em_lab/fabrication/vendor"}, - "/ENTRY[entry*]/em_lab/instrument_name": {"fun": "load_from", "terms": "em_lab/instrument_name"}, - "/ENTRY[entry*]/em_lab/location": {"fun": "load_from", "terms": "em_lab/location"}, - "IGNORE": {"fun": "load_from", "terms": "em_lab/optical_system_em/beam_current/unit"}, - "IGNORE": {"fun": "load_from", "terms": "em_lab/optical_system_em/beam_current/value"}, - "IGNORE": {"fun": "load_from", "terms": "em_lab/optical_system_em/beam_current_description"}, - "IGNORE": {"fun": "load_from", "terms": "em_lab/optical_system_em/magnification"}, - "IGNORE": {"fun": "load_from", "terms": "em_lab/optical_system_em/semi_convergence_angle/unit"}, - "IGNORE": {"fun": "load_from", "terms": "em_lab/optical_system_em/semi_convergence_angle/value"}, - "/ENTRY[entry*]/em_lab/stage_lab/description": {"fun": "load_from", "terms": "em_lab/stage_lab/description"}, - "/ENTRY[entry*]/em_lab/stage_lab/name": {"fun": "load_from", "terms": "em_lab/stage_lab/name"}, - "/ENTRY[entry*]/@version": {"fun": "load_from", "terms": "entry/attr_version"}, - "/ENTRY[entry*]/definition": {"fun": "load_from", "terms": "entry/definition"}, - "/ENTRY[entry*]/end_time": {"fun": "load_from", "terms": "entry/end_time"}, - "/ENTRY[entry*]/experiment_description": {"fun": "load_from", "terms": "entry/experiment_description"}, - "/ENTRY[entry*]/experiment_identifier": {"fun": "load_from", "terms": "entry/experiment_identifier"}, - "/ENTRY[entry*]/PROGRAM[program*]/program": {"fun": "load_from", "terms": "entry/program"}, - "/ENTRY[entry*]/PROGRAM[program*]/program/@version": {"fun": "load_from", "terms": "entry/program__attr_version"}, - "/ENTRY[entry*]/start_time": {"fun": "load_from", "terms": "entry/start_time"}, - "IGNORE": {"fun": "load_from_list_of_dict", "terms": "user"}} +NxEmElnInput = { + "IGNORE": {"fun": "load_from_dict_list", "terms": "em_lab/detector"}, + "IGNORE": { + "fun": "load_from", + "terms": "em_lab/ebeam_column/aberration_correction/applied", + }, + "IGNORE": { + "fun": "load_from_dict_list", + "terms": "em_lab/ebeam_column/aperture_em", + }, + "/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/emitter_type": { + "fun": "load_from", + "terms": "em_lab/ebeam_column/electron_source/emitter_type", + }, + "/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage/@units": { + "fun": "load_from", + "terms": "em_lab/ebeam_column/electron_source/voltage/unit", + }, + "/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage": { + "fun": "load_from", + "terms": "em_lab/ebeam_column/electron_source/voltage/value", + }, + "/ENTRY[entry*]/em_lab/FABRICATION[fabrication]/capabilities": { + "fun": "load_from", + "terms": "em_lab/fabrication/capabilities", + }, + "/ENTRY[entry*]/em_lab/FABRICATION[fabrication]/identifier": { + "fun": "load_from", + "terms": "em_lab/fabrication/identifier", + }, + "/ENTRY[entry*]/em_lab/FABRICATION[fabrication]/model": { + "fun": "load_from", + "terms": "em_lab/fabrication/model", + }, + "/ENTRY[entry*]/em_lab/FABRICATION[fabrication]/vendor": { + "fun": "load_from", + "terms": "em_lab/fabrication/vendor", + }, + "/ENTRY[entry*]/em_lab/instrument_name": { + "fun": "load_from", + "terms": "em_lab/instrument_name", + }, + "/ENTRY[entry*]/em_lab/location": {"fun": "load_from", "terms": "em_lab/location"}, + "IGNORE": { + "fun": "load_from", + "terms": "em_lab/optical_system_em/beam_current/unit", + }, + "IGNORE": { + "fun": "load_from", + "terms": "em_lab/optical_system_em/beam_current/value", + }, + "IGNORE": { + "fun": "load_from", + "terms": "em_lab/optical_system_em/beam_current_description", + }, + "IGNORE": {"fun": "load_from", "terms": "em_lab/optical_system_em/magnification"}, + "IGNORE": { + "fun": "load_from", + "terms": "em_lab/optical_system_em/semi_convergence_angle/unit", + }, + "IGNORE": { + "fun": "load_from", + "terms": "em_lab/optical_system_em/semi_convergence_angle/value", + }, + "/ENTRY[entry*]/em_lab/stage_lab/description": { + "fun": "load_from", + "terms": "em_lab/stage_lab/description", + }, + "/ENTRY[entry*]/em_lab/stage_lab/name": { + "fun": "load_from", + "terms": "em_lab/stage_lab/name", + }, + "/ENTRY[entry*]/@version": {"fun": "load_from", "terms": "entry/attr_version"}, + "/ENTRY[entry*]/definition": {"fun": "load_from", "terms": "entry/definition"}, + "/ENTRY[entry*]/end_time": {"fun": "load_from", "terms": "entry/end_time"}, + "/ENTRY[entry*]/experiment_description": { + "fun": "load_from", + "terms": "entry/experiment_description", + }, + "/ENTRY[entry*]/experiment_identifier": { + "fun": "load_from", + "terms": "entry/experiment_identifier", + }, + "/ENTRY[entry*]/PROGRAM[program*]/program": { + "fun": "load_from", + "terms": "entry/program", + }, + "/ENTRY[entry*]/PROGRAM[program*]/program/@version": { + "fun": "load_from", + "terms": "entry/program__attr_version", + }, + "/ENTRY[entry*]/start_time": {"fun": "load_from", "terms": "entry/start_time"}, + "IGNORE": {"fun": "load_from_list_of_dict", "terms": "user"}, +} # NeXus concept specific mapping tables which require special treatment as the current # NOMAD OASIS custom schema implementation delivers them as a list of dictionaries instead # of a directly flattenable list of keyword, value pairs -NxApertureEmFromListOfDict = {"/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/APERTURE_EM[aperture_em*]/name": {"fun": "load_from", "terms": "name"}, - "/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/APERTURE_EM[aperture_em*]/value": {"fun": "load_from", "terms": "value"}} +NxApertureEmFromListOfDict = { + "/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/APERTURE_EM[aperture_em*]/name": { + "fun": "load_from", + "terms": "name", + }, + "/ENTRY[entry*]/em_lab/EBEAM_COLUMN[ebeam_column]/APERTURE_EM[aperture_em*]/value": { + "fun": "load_from", + "terms": "value", + }, +} -NxUserFromListOfDict = {"/ENTRY[entry*]/USER[user*]/name": {"fun": "load_from", "terms": "name"}, - "/ENTRY[entry*]/USER[user*]/affiliation": {"fun": "load_from", "terms": "affiliation"}, - "/ENTRY[entry*]/USER[user*]/address": {"fun": "load_from", "terms": "address"}, - "/ENTRY[entry*]/USER[user*]/email": {"fun": "load_from", "terms": "email"}, - "/ENTRY[entry*]/USER[user*]/orcid": {"fun": "load_from", "terms": "orcid"}, - "/ENTRY[entry*]/USER[user*]/orcid_platform": {"fun": "load_from", "terms": "orcid_platform"}, - "/ENTRY[entry*]/USER[user*]/telephone_number": {"fun": "load_from", "terms": "telephone_number"}, - "/ENTRY[entry*]/USER[user*]/role": {"fun": "load_from", "terms": "role"}, - "/ENTRY[entry*]/USER[user*]/social_media_name": {"fun": "load_from", "terms": "social_media_name"}, - "/ENTRY[entry*]/USER[user*]/social_media_platform": {"fun": "load_from", "terms": "social_media_platform"}} +NxUserFromListOfDict = { + "/ENTRY[entry*]/USER[user*]/name": {"fun": "load_from", "terms": "name"}, + "/ENTRY[entry*]/USER[user*]/affiliation": { + "fun": "load_from", + "terms": "affiliation", + }, + "/ENTRY[entry*]/USER[user*]/address": {"fun": "load_from", "terms": "address"}, + "/ENTRY[entry*]/USER[user*]/email": {"fun": "load_from", "terms": "email"}, + "/ENTRY[entry*]/USER[user*]/orcid": {"fun": "load_from", "terms": "orcid"}, + "/ENTRY[entry*]/USER[user*]/orcid_platform": { + "fun": "load_from", + "terms": "orcid_platform", + }, + "/ENTRY[entry*]/USER[user*]/telephone_number": { + "fun": "load_from", + "terms": "telephone_number", + }, + "/ENTRY[entry*]/USER[user*]/role": {"fun": "load_from", "terms": "role"}, + "/ENTRY[entry*]/USER[user*]/social_media_name": { + "fun": "load_from", + "terms": "social_media_name", + }, + "/ENTRY[entry*]/USER[user*]/social_media_platform": { + "fun": "load_from", + "terms": "social_media_platform", + }, +} -NxDetectorListOfDict = {"/ENTRY[entry*]/em_lab/DETECTOR[detector*]/local_name": {"fun": "load_from", "terms": "local_name"}} +NxDetectorListOfDict = { + "/ENTRY[entry*]/em_lab/DETECTOR[detector*]/local_name": { + "fun": "load_from", + "terms": "local_name", + } +} # atom_types is a good example for specific cases where one cannot just blindly map # the list that comes from the custom schema ELN instance, because # people may enter invalid types of atoms (which would generate problems in NOMAD OASIS) # and for NeXus we would like to have a "string of a comma-separated list of element names" -NxSample = {"IGNORE": {"fun": "load_from", "terms": "sample/atom_types"}, - "/ENTRY[entry*]/sample/description": {"fun": "load_from", "terms": "sample/description"}, - "/ENTRY[entry*]/sample/method": {"fun": "load_from", "terms": "sample/method"}, - "/ENTRY[entry*]/sample/name": {"fun": "load_from", "terms": "sample/name"}, - "/ENTRY[entry*]/sample/preparation_date": {"fun": "load_from", "terms": "sample/preparation_date"}, - "/ENTRY[entry*]/sample/sample_history": {"fun": "load_from", "terms": "sample/sample_history"}, - "/ENTRY[entry*]/sample/short_title": {"fun": "load_from", "terms": "sample/short_title"}, - "/ENTRY[entry*]/sample/thickness": {"fun": "load_from", "terms": "sample/thickness/value"}, - "/ENTRY[entry*]/sample/thickness/@units": {"fun": "load_from", "terms": "sample/thickness/unit"}} +NxSample = { + "IGNORE": {"fun": "load_from", "terms": "sample/atom_types"}, + "/ENTRY[entry*]/sample/description": { + "fun": "load_from", + "terms": "sample/description", + }, + "/ENTRY[entry*]/sample/method": {"fun": "load_from", "terms": "sample/method"}, + "/ENTRY[entry*]/sample/name": {"fun": "load_from", "terms": "sample/name"}, + "/ENTRY[entry*]/sample/preparation_date": { + "fun": "load_from", + "terms": "sample/preparation_date", + }, + "/ENTRY[entry*]/sample/sample_history": { + "fun": "load_from", + "terms": "sample/sample_history", + }, + "/ENTRY[entry*]/sample/short_title": { + "fun": "load_from", + "terms": "sample/short_title", + }, + "/ENTRY[entry*]/sample/thickness": { + "fun": "load_from", + "terms": "sample/thickness/value", + }, + "/ENTRY[entry*]/sample/thickness/@units": { + "fun": "load_from", + "terms": "sample/thickness/unit", + }, +} diff --git a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_image_ang_space.py b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_image_ang_space.py index 18162269a..3b6aa9b22 100644 --- a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_image_ang_space.py +++ b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_image_ang_space.py @@ -29,57 +29,124 @@ # as single-line instructions is more convenient to read and parsable by human eye -NxImageAngSpaceDict = {"IGNORE": {"fun": "load_from", "terms": "type"}, - "IGNORE": {"fun": "load_from", "terms": "uuid"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/start_time": {"fun": "convert_iso8601", "terms": ["created", "timezone"]}, - "IGNORE": {"fun": "load_from", "terms": "is_sequence"}, - "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/offset"}, - "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/scale"}, - "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/units"}, - "IGNORE": {"fun": "load_from", "terms": "dimensional_calibrations"}, - "IGNORE": {"fun": "load_from", "terms": "timezone_offset"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/binning": {"fun": "load_from", "terms": "metadata/hardware_source/autostem/Acquisition:Binning"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/dark_mode": {"fun": "load_from", "terms": "metadata/hardware_source/autostem/Acquisition:DarkMode"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/exposure_time": {"fun": "load_from", "terms": "metadata/hardware_source/autostem/Acquisition:ExposureTime(s)"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/exposure_time": "s", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/gain_mode": {"fun": "load_from", "terms": "metadata/hardware_source/autostem/Acquisition:GainMode"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/flipped": {"fun": "load_from", "terms": "metadata/hardware_source/autostem/Acquisition:IsFlipped"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/readout_tlbr": {"fun": "load_from", "terms": ["metadata/hardware_source/autostem/Acquisition:ReadOutTop", "metadata/hardware_source/autostem/Acquisition:ReadOutLeft", "metadata/hardware_source/autostem/Acquisition:ReadOutBottom", "metadata/hardware_source/autostem/Acquisition:ReadOutRight"]}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/normalization": {"fun": "load_from", "terms": "metadata/hardware_source/autostem/Acquisition:ValueNormalization"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/source"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/timestamp"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/sensor_dimensions_hw"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/sensor_readout_area_tlbr"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/is_flipped_horizontally"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/is_gain_corrected"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/is_dark_subtracted"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/frame_number"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/time_point_ns"}, - "IGNORE": "ns", - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/integration_count"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/counts_per_electron"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/detector_identifier": {"fun": "load_from", "terms": "metadata/hardware_source/hardware_source_id"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/hardware_source_name"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/exposure"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/binning"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/signal_type"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/valid_rows"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/frame_index"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/channel_index"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/reference_key"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage": {"fun": "load_from", "terms": "metadata/instrument/high_tension"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage/@units": "V", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/defocus": {"fun": "load_from", "terms": "metadata/instrument/defocus"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/defocus": "m", - "IGNORE": {"fun": "load_from", "terms": "title"}, - "IGNORE": {"fun": "load_from", "terms": "session_id"}, - "IGNORE": {"fun": "load_from", "terms": "session"}, - "IGNORE": {"fun": "load_from", "terms": "category"}, - "IGNORE": {"fun": "load_from", "terms": "version"}, - "IGNORE": {"fun": "load_from", "terms": "modified"}, - "IGNORE": {"fun": "load_from", "terms": "data_shape"}, - "IGNORE": {"fun": "load_from", "terms": "data_dtype"}, - "IGNORE": {"fun": "load_from", "terms": "collection_dimension_count"}, - "IGNORE": {"fun": "load_from", "terms": "datum_dimension_count"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/end_time": {"fun": "convert_iso8601", "terms": ["data_modified", "timezone"]}, - "IGNORE": {"fun": "load_from", "terms": "__large_format"}} +NxImageAngSpaceDict = { + "IGNORE": {"fun": "load_from", "terms": "type"}, + "IGNORE": {"fun": "load_from", "terms": "uuid"}, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/start_time": { + "fun": "convert_iso8601", + "terms": ["created", "timezone"], + }, + "IGNORE": {"fun": "load_from", "terms": "is_sequence"}, + "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/offset"}, + "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/scale"}, + "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/units"}, + "IGNORE": {"fun": "load_from", "terms": "dimensional_calibrations"}, + "IGNORE": {"fun": "load_from", "terms": "timezone_offset"}, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/binning": { + "fun": "load_from", + "terms": "metadata/hardware_source/autostem/Acquisition:Binning", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/dark_mode": { + "fun": "load_from", + "terms": "metadata/hardware_source/autostem/Acquisition:DarkMode", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/exposure_time": { + "fun": "load_from", + "terms": "metadata/hardware_source/autostem/Acquisition:ExposureTime(s)", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/exposure_time": "s", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/gain_mode": { + "fun": "load_from", + "terms": "metadata/hardware_source/autostem/Acquisition:GainMode", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/flipped": { + "fun": "load_from", + "terms": "metadata/hardware_source/autostem/Acquisition:IsFlipped", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/readout_tlbr": { + "fun": "load_from", + "terms": [ + "metadata/hardware_source/autostem/Acquisition:ReadOutTop", + "metadata/hardware_source/autostem/Acquisition:ReadOutLeft", + "metadata/hardware_source/autostem/Acquisition:ReadOutBottom", + "metadata/hardware_source/autostem/Acquisition:ReadOutRight", + ], + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/PROCESS[process]/normalization": { + "fun": "load_from", + "terms": "metadata/hardware_source/autostem/Acquisition:ValueNormalization", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/source"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/timestamp"}, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/sensor_dimensions_hw", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/sensor_readout_area_tlbr", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/is_flipped_horizontally", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/is_gain_corrected", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/is_dark_subtracted", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/frame_number"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/time_point_ns"}, + "IGNORE": "ns", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/integration_count", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/counts_per_electron", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE[ronchicam]/detector_identifier": { + "fun": "load_from", + "terms": "metadata/hardware_source/hardware_source_id", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/hardware_source_name", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/exposure"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/binning"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/signal_type"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/valid_rows"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/frame_index"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/channel_index"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/reference_key"}, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage": { + "fun": "load_from", + "terms": "metadata/instrument/high_tension", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage/@units": "V", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/defocus": { + "fun": "load_from", + "terms": "metadata/instrument/defocus", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/defocus": "m", + "IGNORE": {"fun": "load_from", "terms": "title"}, + "IGNORE": {"fun": "load_from", "terms": "session_id"}, + "IGNORE": {"fun": "load_from", "terms": "session"}, + "IGNORE": {"fun": "load_from", "terms": "category"}, + "IGNORE": {"fun": "load_from", "terms": "version"}, + "IGNORE": {"fun": "load_from", "terms": "modified"}, + "IGNORE": {"fun": "load_from", "terms": "data_shape"}, + "IGNORE": {"fun": "load_from", "terms": "data_dtype"}, + "IGNORE": {"fun": "load_from", "terms": "collection_dimension_count"}, + "IGNORE": {"fun": "load_from", "terms": "datum_dimension_count"}, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/end_time": { + "fun": "convert_iso8601", + "terms": ["data_modified", "timezone"], + }, + "IGNORE": {"fun": "load_from", "terms": "__large_format"}, +} diff --git a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_image_real_space.py b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_image_real_space.py index c3ede2142..b8f670ced 100644 --- a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_image_real_space.py +++ b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_image_real_space.py @@ -33,169 +33,460 @@ # is composed of individual groups of deflection coils in which case one could also # think about using one NXebeam_deflector for every single coil... -NxImageRealSpaceDict = {"IGNORE": {"fun": "load_from", "terms": "type"}, - "IGNORE": {"fun": "load_from", "terms": "uuid"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/start_time": {"fun": "convert_iso8601", "terms": ["created", "timezone"]}, - "IGNORE": {"fun": "load_from", "terms": "is_sequence"}, - "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/offset"}, - "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/scale"}, - "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/units"}, - "IGNORE": {"fun": "load_from", "terms": "dimensional_calibrations"}, - "IGNORE": {"fun": "load_from", "terms": "timezone"}, - "IGNORE": {"fun": "load_from", "terms": "timezone_offset"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/high_tension"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/defocus"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/EHT"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage/@units": "V", - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/PMTBF_gain"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/PMTDF_gain"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/tilt1": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/StageOutA"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/tilt1/@units": "deg", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/tilt2": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/StageOutB"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/tilt2/@units": "deg", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/position": {"fun": "load_from", "terms": ["metadata/instrument/ImageScanned/StageOutX", "metadata/instrument/ImageScanned/StageOutY", "metadata/instrument/ImageScanned/StageOutZ"]}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/position/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_0/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C10"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_0/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_2_a/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C12.a"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_2_a/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_2_b/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C12.b"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_2_b/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_1_a/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C21.a"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_1_a/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_1_b/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C21.b"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_1_b/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_3_a/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C23.a"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_3_a/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_3_b/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C23.b"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_3_b/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_0/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C30"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_0/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_2_a/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C32.a"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_2_a/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_2_b/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C32.b"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_2_b/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_4_a/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C34.a"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_4_a/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_4_b/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C34.b"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_4_b/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_5_0/magnitude": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C50"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_5_0/magnitude/@units": "m", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em1]/value": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C1 ConstW"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em1]/name": "C1", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em2]/value": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C2 ConstW"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em2]/name": "C2", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em3]/value": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C3 ConstW"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em3]/name": "C3", - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/PMT2_gain"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/SuperFEG.^EmissionCurrent"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/G_2Db"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/LastTuneCurrent"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/semi_convergence_angle": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/probe_ha"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/semi_convergence_angle/@units": "mrad", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/inner_half_angle": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/HAADF_Inner_ha"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/inner_half_angle/@units": "mrad", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/outer_half_angle": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/HAADF_Outer_ha"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/outer_half_angle/@units": "mrad", - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/GeometricProbeSize"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/hardware_source_id"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/hardware_source_name"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_id"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/center": {"fun": "load_from", "terms": ["metadata/scan/center_x_nm", "metadata/scan/center_y_nm"]}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/center/@units": "nm", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/field_of_view": {"fun": "load_from", "terms": "metadata/scan/fov_nm"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/field_of_view/@units": "nm", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/rotation": {"fun": "load_from", "terms": "metadata/scan/rotation"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/rotation/@units": "deg", - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/rotation_deg"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_context_size"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/subscan_fractional_size"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_size"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/subscan_fractional_center"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/size"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/center_nm"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/pixel_time_us"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/fov_nm"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/rotation_rad"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_clock_wait_time": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/external_clock_wait_time_ms"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_clock_wait_time": "ms", - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_clock_mode": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/external_clock_mode"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_scan_mode": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/external_scan_mode"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_scan_ratio": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/external_scan_ratio"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/ac_line_sync"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/ac_frame_sync": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/ac_frame_sync"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/flyback_time_us"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/subscan_pixel_size"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/subscan_fractional_size"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/subscan_fractional_center"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/top_left_override"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/data_shape_override"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/state_override"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/section_rect"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/scan_id"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/ac_line_sync": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/ac_line_sync"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/calibration_style": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/calibration_style"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/center_x_nm"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/center_y_nm"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/flyback_time": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/flyback_time_us"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/flyback_time/@units": "µs", - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/fov_nm"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/line_time": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/line_time_us"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/line_time/@units": "µs", - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/pixel_time_us"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/pixels_x"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/pixels_y"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/pixel_time_target": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/requested_pixel_time_us"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/pixel_time_target/@units": "µs", - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/rotation_deg"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/rotation_rad"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac1]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 0"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac2]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 1"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac3]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 2"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac4]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 3"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac5]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 4"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac6]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 5"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac7]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 6"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac8]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 7"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac9]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 8"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac10]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 9"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac11]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 10"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac12]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 11"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/relay": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 Relay"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac1]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 0"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac2]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 1"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac3]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 2"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac4]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 3"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac5]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 4"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac6]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 5"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac7]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 6"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac8]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 7"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac9]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 8"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac10]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 9"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac11]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 10"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac12]/value": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 11"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/relay": {"fun": "load_from", "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 Relay"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/valid_rows"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/detector_identifier": {"fun": "load_from", "terms": "metadata/hardware_source/hardware_source_id"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/hardware_source_name"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/exposure"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/frame_index"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/channel_id"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/channel_name"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/pixel_time_us"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/line_time_us"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/valid_rows"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/channel_index"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/reference_key"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/view_id"}, - "IGNORE": {"fun": "load_from", "terms": "title"}, - "IGNORE": {"fun": "load_from", "terms": "session_id"}, - "IGNORE": {"fun": "load_from", "terms": "session"}, - "IGNORE": {"fun": "load_from", "terms": "category"}, - "IGNORE": {"fun": "load_from", "terms": "version"}, - "IGNORE": {"fun": "load_from", "terms": "modified"}, - "IGNORE": {"fun": "load_from", "terms": "data_shape"}, - "IGNORE": {"fun": "load_from", "terms": "data_dtype"}, - "IGNORE": {"fun": "load_from", "terms": "collection_dimension_count"}, - "IGNORE": {"fun": "load_from", "terms": "datum_dimension_count"}, - "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/end_time": {"fun": "convert_iso8601", "terms": ["data_modified", "timezone"]}} +NxImageRealSpaceDict = { + "IGNORE": {"fun": "load_from", "terms": "type"}, + "IGNORE": {"fun": "load_from", "terms": "uuid"}, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/start_time": { + "fun": "convert_iso8601", + "terms": ["created", "timezone"], + }, + "IGNORE": {"fun": "load_from", "terms": "is_sequence"}, + "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/offset"}, + "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/scale"}, + "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/units"}, + "IGNORE": {"fun": "load_from", "terms": "dimensional_calibrations"}, + "IGNORE": {"fun": "load_from", "terms": "timezone"}, + "IGNORE": {"fun": "load_from", "terms": "timezone_offset"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/high_tension"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/defocus"}, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/EHT", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/electron_source/voltage/@units": "V", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/PMTBF_gain", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/PMTDF_gain", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/tilt1": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/StageOutA", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/tilt1/@units": "deg", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/tilt2": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/StageOutB", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/tilt2/@units": "deg", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/position": { + "fun": "load_from", + "terms": [ + "metadata/instrument/ImageScanned/StageOutX", + "metadata/instrument/ImageScanned/StageOutY", + "metadata/instrument/ImageScanned/StageOutZ", + ], + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/STAGE_LAB[stage_lab]/position/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_0/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C10", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_0/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_2_a/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C12.a", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_2_a/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_2_b/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C12.b", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_1_2_b/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_1_a/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C21.a", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_1_a/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_1_b/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C21.b", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_1_b/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_3_a/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C23.a", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_3_a/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_3_b/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C23.b", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_2_3_b/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_0/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C30", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_0/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_2_a/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C32.a", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_2_a/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_2_b/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C32.b", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_2_b/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_4_a/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C34.a", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_4_a/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_4_b/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C34.b", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_3_4_b/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_5_0/magnitude": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C50", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/aberration_correction/ZEMLIN_TABLEAU/PROCESS[process]/nion/c_5_0/magnitude/@units": "m", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em1]/value": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C1 ConstW", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em1]/name": "C1", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em2]/value": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C2 ConstW", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em2]/name": "C2", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em3]/value": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C3 ConstW", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_COLUMN[ebeam_column]/LENS_EM[lens_em3]/name": "C3", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/PMT2_gain", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/SuperFEG.^EmissionCurrent", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/G_2Db"}, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/LastTuneCurrent", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/semi_convergence_angle": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/probe_ha", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/semi_convergence_angle/@units": "mrad", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/inner_half_angle": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/HAADF_Inner_ha", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/inner_half_angle/@units": "mrad", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/outer_half_angle": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/HAADF_Outer_ha", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/outer_half_angle/@units": "mrad", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/GeometricProbeSize", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/hardware_source_id"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/hardware_source_name"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_id"}, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/center": { + "fun": "load_from", + "terms": ["metadata/scan/center_x_nm", "metadata/scan/center_y_nm"], + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/center/@units": "nm", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/field_of_view": { + "fun": "load_from", + "terms": "metadata/scan/fov_nm", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/field_of_view/@units": "nm", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/rotation": { + "fun": "load_from", + "terms": "metadata/scan/rotation", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/rotation/@units": "deg", + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/rotation_deg"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_context_size"}, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/subscan_fractional_size"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_size"}, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/subscan_fractional_center"}, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/size", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/center_nm", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/pixel_time_us", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/fov_nm", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/rotation_rad", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_clock_wait_time": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/external_clock_wait_time_ms", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_clock_wait_time": "ms", + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_clock_mode": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/external_clock_mode", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_scan_mode": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/external_scan_mode", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/external_scan_ratio": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/external_scan_ratio", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/ac_line_sync", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/ac_frame_sync": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/ac_frame_sync", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/flyback_time_us", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/subscan_pixel_size", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/subscan_fractional_size", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/subscan_fractional_center", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/top_left_override", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/data_shape_override", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/state_override", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/section_rect", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/scan_id", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/ac_line_sync": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/ac_line_sync", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/calibration_style": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/calibration_style", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/center_x_nm", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/center_y_nm", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/flyback_time": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/flyback_time_us", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/flyback_time/@units": "µs", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/fov_nm", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/line_time": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/line_time_us", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/line_time/@units": "µs", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/pixel_time_us", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/pixels_x", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/pixels_y", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/pixel_time_target": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/requested_pixel_time_us", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/pixel_time_target/@units": "µs", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/rotation_deg", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/rotation_rad", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac1]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 0", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac2]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 1", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac3]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 2", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac4]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 3", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac5]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 4", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac6]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 5", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac7]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 6", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac8]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 7", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac9]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 8", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac10]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 9", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac11]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 10", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/DAC[dac12]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 DAC 11", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board1]/relay": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 0 Relay", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac1]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 0", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac2]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 1", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac3]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 2", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac4]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 3", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac5]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 4", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac6]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 5", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac7]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 6", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac8]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 7", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac9]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 8", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac10]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 9", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac11]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 10", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/DAC[dac12]/value": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 DAC 11", + }, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/em_lab/EBEAM_DEFLECTOR[ebeam_deflector1]/CIRCUIT_BOARD[mag_board2]/relay": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_properties/mag_boards/MagBoard 1 Relay", + }, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/scan/valid_rows"}, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/IMAGE_SET[image_set*]/detector_identifier": { + "fun": "load_from", + "terms": "metadata/hardware_source/hardware_source_id", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/hardware_source_name", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/exposure"}, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/frame_index"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/channel_id"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/channel_name"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/pixel_time_us"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/line_time_us"}, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/valid_rows"}, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/channel_index"}, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/reference_key"}, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/hardware_source/view_id"}, + "IGNORE": {"fun": "load_from", "terms": "title"}, + "IGNORE": {"fun": "load_from", "terms": "session_id"}, + "IGNORE": {"fun": "load_from", "terms": "session"}, + "IGNORE": {"fun": "load_from", "terms": "category"}, + "IGNORE": {"fun": "load_from", "terms": "version"}, + "IGNORE": {"fun": "load_from", "terms": "modified"}, + "IGNORE": {"fun": "load_from", "terms": "data_shape"}, + "IGNORE": {"fun": "load_from", "terms": "data_dtype"}, + "IGNORE": {"fun": "load_from", "terms": "collection_dimension_count"}, + "IGNORE": {"fun": "load_from", "terms": "datum_dimension_count"}, + "/ENTRY[entry*]/measurement/EVENT_DATA_EM[event_data_em*]/end_time": { + "fun": "convert_iso8601", + "terms": ["data_modified", "timezone"], + }, +} diff --git a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_spectrum_eels.py b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_spectrum_eels.py index eed0efe7e..1ec51c571 100644 --- a/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_spectrum_eels.py +++ b/pynxtools/dataconverter/readers/em_nion/map_concepts/swift_to_nx_spectrum_eels.py @@ -24,209 +24,631 @@ # as single-line instructions is more convenient to read and parsable by human eye -NxSpectrumEels = {"IGNORE": {"fun": "load_from", "terms": "type"}, - "IGNORE": {"fun": "load_from", "terms": "uuid"}, - "IGNORE": {"fun": "convert_iso8601", "terms": ["created", "timezone"]}, - "IGNORE": {"fun": "load_from", "terms": "is_sequence"}, - "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/offset"}, - "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/scale"}, - "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/units"}, - "IGNORE": {"fun": "load_from", "terms": "dimensional_calibrations"}, - "IGNORE": {"fun": "load_from", "terms": "timezone_offset"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/header_info/header_detail"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/header_info/htype"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/header_info/series"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/auto_summation"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/beam_center_x"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/beam_center_y"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/bit_depth_image"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/bit_depth_readout"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/chi_increment"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/chi_start"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/compression"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/count_time"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/countrate_correction_applied"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/countrate_correction_count_cutoff"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/data_collection_date"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/description"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/detector_distance"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/detector_number"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/detector_readout_time"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/detector_translation"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/eiger_fw_version"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/element"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/flatfield_correction_applied"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/frame_count_time"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/frame_period"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/frame_time"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/kappa_increment"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/kappa_start"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/nimages"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/ntrigger"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/number_of_excluded_pixels"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/omega_increment"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/omega_start"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/phi_increment"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/phi_start"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/photon_energy"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/pixel_mask_applied"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/roi_mode"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/sensor_material"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/sensor_thickness"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/software_version"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/threshold_energy"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/trigger_mode"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/two_theta_increment"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/two_theta_start"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/virtual_pixel_correction_applied"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/wavelength"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/x_pixel_size"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/x_pixels_in_detector"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/y_pixel_size"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/detector_configuration/y_pixels_in_detector"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/bad_pixels"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/processing"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/flip_l_r"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/binning"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/chip_size"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/sensor_dimensions"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/readout_area"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/countrate_correction_cutoff"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/interpolate_racetracks"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/mark_saturated_pixels"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/apply_countrate_correction"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/countrate_correction_factor"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/apply_gain_correction"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/camera_processing_parameters/always_interpolate_racetracks"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/high_tension"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/defocus"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/EHT"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/MajorOL"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/StageOutA"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/StageOutB"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/StageOutX"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/StageOutY"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/StageOutZ"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/probe_ha"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/SuperFEG.^EmissionCurrent"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/LastTuneCurrent"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C10"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C12.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C12.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C21.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C21.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C23.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C23.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C30"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C32.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C32.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C34.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C34.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/ImageRonchigram/C50"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/hardware_source_id"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/hardware_source_name"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/exposure"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/binning"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/signal_type"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/hardware_source_id"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/hardware_source_name"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_id"}, - "IGNORE": {"fun": "load_from", "terms": ["metadata/scan/center_x_nm", "metadata/scan/center_y_nm"]}, - "IGNORE": "nm", - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/fov_nm"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/rotation"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/rotation_deg"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_context_size"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_size"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/size"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/center_nm"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/pixel_time_us"}, - "IGNORE": "µs", - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/fov_nm"}, - "IGNORE": "nm", - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/rotation_rad"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/external_clock_wait_time_ms"}, - "IGNORE": "ms", - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/external_clock_mode"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/external_scan_mode"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/external_scan_ratio"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/ac_line_sync"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/ac_frame_sync"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/flyback_time_us"}, - "IGNORE": "µs", - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_device_parameters/scan_id"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/scan/valid_rows"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/high_tension"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/defocus"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/EHT"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/PMTBF_gain"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/PMTDF_gain"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/StageOutA"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/StageOutB"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/StageOutX"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/StageOutY"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/StageOutZ"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C10"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C12.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C12.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C21.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C21.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C23.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C23.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C30"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C32.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C32.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C34.a"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C34.b"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C50"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C1 ConstW"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C2 ConstW"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C3 ConstW"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/PMT2_gain"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/SuperFEG.^EmissionCurrent"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/G_2Db"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/LastTuneCurrent"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/probe_ha"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/HAADF_Inner_ha"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/HAADF_Outer_ha"}, - "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/GeometricProbeSize"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/x_shifter"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/blanker"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/x_shift_delay"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/focus"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/focus_delay"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/auto_dark_subtract"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/processing"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/blanker_delay"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/sum_frames"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/camera_hardware_source_id"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/use_multi_eels_calibration"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/shift_each_sequence_slice"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/y_shifter"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/x_units_per_ev"}, - "UNCLEAR": "eV", - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/y_units_per_px"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/y_shift_delay"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/saturation_value"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/y_align"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/stitch_spectra"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.parameters/index"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.parameters/offset_x"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.parameters/offset_y"}, - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.parameters/exposure_ms"}, - "UNCLEAR": "ms", - "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.parameters/frames"}, - "IGNORE": {"fun": "load_from", "terms": "title"}, - "IGNORE": {"fun": "load_from", "terms": "session_id"}, - "IGNORE": {"fun": "load_from", "terms": "session"}, - "IGNORE": {"fun": "load_from", "terms": "category"}, - "IGNORE": {"fun": "load_from", "terms": "version"}, - "IGNORE": {"fun": "load_from", "terms": "modified"}, - "IGNORE": {"fun": "load_from", "terms": "data_shape"}, - "IGNORE": {"fun": "load_from", "terms": "data_dtype"}, - "IGNORE": {"fun": "load_from", "terms": "collection_dimension_count"}, - "IGNORE": {"fun": "load_from", "terms": "datum_dimension_count"}, - "IGNORE": {"fun": "convert_iso8601", "terms": ["data_modified", "timezone"]}, - "UNCLEAR": {"fun": "load_from", "terms": "session/site"}} +NxSpectrumEels = { + "IGNORE": {"fun": "load_from", "terms": "type"}, + "IGNORE": {"fun": "load_from", "terms": "uuid"}, + "IGNORE": {"fun": "convert_iso8601", "terms": ["created", "timezone"]}, + "IGNORE": {"fun": "load_from", "terms": "is_sequence"}, + "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/offset"}, + "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/scale"}, + "IGNORE": {"fun": "load_from", "terms": "intensity_calibration/units"}, + "IGNORE": {"fun": "load_from", "terms": "dimensional_calibrations"}, + "IGNORE": {"fun": "load_from", "terms": "timezone_offset"}, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/header_info/header_detail", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/header_info/htype", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/header_info/series", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/auto_summation", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/beam_center_x", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/beam_center_y", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/bit_depth_image", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/bit_depth_readout", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/chi_increment", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/chi_start", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/compression", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/count_time", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/countrate_correction_applied", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/countrate_correction_count_cutoff", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/data_collection_date", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/description", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/detector_distance", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/detector_number", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/detector_readout_time", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/detector_translation", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/eiger_fw_version", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/element", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/flatfield_correction_applied", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/frame_count_time", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/frame_period", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/frame_time", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/kappa_increment", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/kappa_start", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/nimages", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/ntrigger", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/number_of_excluded_pixels", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/omega_increment", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/omega_start", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/phi_increment", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/phi_start", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/photon_energy", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/pixel_mask_applied", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/roi_mode", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/sensor_material", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/sensor_thickness", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/software_version", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/threshold_energy", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/trigger_mode", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/two_theta_increment", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/two_theta_start", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/virtual_pixel_correction_applied", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/wavelength", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/x_pixel_size", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/x_pixels_in_detector", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/y_pixel_size", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/detector_configuration/y_pixels_in_detector", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/bad_pixels", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/processing", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/flip_l_r", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/binning", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/chip_size", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/sensor_dimensions", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/readout_area", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/countrate_correction_cutoff", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/interpolate_racetracks", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/mark_saturated_pixels", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/apply_countrate_correction", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/countrate_correction_factor", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/apply_gain_correction", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/camera_processing_parameters/always_interpolate_racetracks", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/high_tension"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/defocus"}, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/EHT", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/MajorOL", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/StageOutA", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/StageOutB", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/StageOutX", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/StageOutY", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/StageOutZ", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/probe_ha", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/SuperFEG.^EmissionCurrent", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/LastTuneCurrent", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C10", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C12.a", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C12.b", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C21.a", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C21.b", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C23.a", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C23.b", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C30", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C32.a", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C32.b", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C34.a", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C34.b", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/ImageRonchigram/C50", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/hardware_source_id", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/hardware_source/hardware_source_name", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/exposure"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/binning"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/hardware_source/signal_type"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/hardware_source_id"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/hardware_source_name"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_id"}, + "IGNORE": { + "fun": "load_from", + "terms": ["metadata/scan/center_x_nm", "metadata/scan/center_y_nm"], + }, + "IGNORE": "nm", + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/fov_nm"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/rotation"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/rotation_deg"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_context_size"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/scan_size"}, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/size", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/center_nm", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/pixel_time_us", + }, + "IGNORE": "µs", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/fov_nm", + }, + "IGNORE": "nm", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/rotation_rad", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/external_clock_wait_time_ms", + }, + "IGNORE": "ms", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/external_clock_mode", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/external_scan_mode", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/external_scan_ratio", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/ac_line_sync", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/ac_frame_sync", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/flyback_time_us", + }, + "IGNORE": "µs", + "IGNORE": { + "fun": "load_from", + "terms": "metadata/scan/scan_device_parameters/scan_id", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/scan/valid_rows"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/high_tension"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/defocus"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/EHT"}, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/PMTBF_gain", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/PMTDF_gain", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/StageOutA", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/StageOutB", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/StageOutX", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/StageOutY", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/StageOutZ", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C10"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C12.a"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C12.b"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C21.a"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C21.b"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C23.a"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C23.b"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C30"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C32.a"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C32.b"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C34.a"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C34.b"}, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/C50"}, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C1 ConstW", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C2 ConstW", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/C3 ConstW", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/PMT2_gain", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/SuperFEG.^EmissionCurrent", + }, + "IGNORE": {"fun": "load_from", "terms": "metadata/instrument/ImageScanned/G_2Db"}, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/LastTuneCurrent", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/probe_ha", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/HAADF_Inner_ha", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/HAADF_Outer_ha", + }, + "IGNORE": { + "fun": "load_from", + "terms": "metadata/instrument/ImageScanned/GeometricProbeSize", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/x_shifter", + }, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/blanker"}, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/x_shift_delay", + }, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/focus"}, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/focus_delay", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/auto_dark_subtract", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/processing", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/blanker_delay", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/sum_frames", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/camera_hardware_source_id", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/use_multi_eels_calibration", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/shift_each_sequence_slice", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/y_shifter", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/x_units_per_ev", + }, + "UNCLEAR": "eV", + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/y_units_per_px", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/y_shift_delay", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/saturation_value", + }, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.settings/y_align"}, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.settings/stitch_spectra", + }, + "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.parameters/index"}, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.parameters/offset_x", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.parameters/offset_y", + }, + "UNCLEAR": { + "fun": "load_from", + "terms": "metadata/MultiAcquire.parameters/exposure_ms", + }, + "UNCLEAR": "ms", + "UNCLEAR": {"fun": "load_from", "terms": "metadata/MultiAcquire.parameters/frames"}, + "IGNORE": {"fun": "load_from", "terms": "title"}, + "IGNORE": {"fun": "load_from", "terms": "session_id"}, + "IGNORE": {"fun": "load_from", "terms": "session"}, + "IGNORE": {"fun": "load_from", "terms": "category"}, + "IGNORE": {"fun": "load_from", "terms": "version"}, + "IGNORE": {"fun": "load_from", "terms": "modified"}, + "IGNORE": {"fun": "load_from", "terms": "data_shape"}, + "IGNORE": {"fun": "load_from", "terms": "data_dtype"}, + "IGNORE": {"fun": "load_from", "terms": "collection_dimension_count"}, + "IGNORE": {"fun": "load_from", "terms": "datum_dimension_count"}, + "IGNORE": {"fun": "convert_iso8601", "terms": ["data_modified", "timezone"]}, + "UNCLEAR": {"fun": "load_from", "terms": "session/site"}, +} # {"fun": "convert_iso8601", "terms": ["created", "timezone"]} diff --git a/pynxtools/dataconverter/readers/em_nion/reader.py b/pynxtools/dataconverter/readers/em_nion/reader.py index e226aca91..d387b54e8 100644 --- a/pynxtools/dataconverter/readers/em_nion/reader.py +++ b/pynxtools/dataconverter/readers/em_nion/reader.py @@ -23,17 +23,21 @@ from pynxtools.dataconverter.readers.base.reader import BaseReader -from pynxtools.dataconverter.readers.em_nion.utils.swift_define_io_cases \ - import EmNionUseCaseSelector +from pynxtools.dataconverter.readers.em_nion.utils.swift_define_io_cases import ( + EmNionUseCaseSelector, +) -from pynxtools.dataconverter.readers.em_nion.utils.swift_load_generic_eln \ - import NxEmNionElnSchemaParser +from pynxtools.dataconverter.readers.em_nion.utils.swift_load_generic_eln import ( + NxEmNionElnSchemaParser, +) -from pynxtools.dataconverter.readers.em_nion.utils.swift_zipped_project_parser \ - import NxEmNionSwiftProjectParser +from pynxtools.dataconverter.readers.em_nion.utils.swift_zipped_project_parser import ( + NxEmNionSwiftProjectParser, +) -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_plots \ - import em_spctrscpy_default_plot_generator +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_plots import ( + em_spctrscpy_default_plot_generator, +) class EmNionReader(BaseReader): @@ -49,10 +53,12 @@ class EmNionReader(BaseReader): supported_nxdls = ["NXem"] # pylint: disable=duplicate-code - def read(self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None) -> dict: + def read( + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, + ) -> dict: """Read data from given file, return filled template dictionary em.""" # pylint: disable=duplicate-code template.clear() diff --git a/pynxtools/dataconverter/readers/em_nion/utils/em_nion_versioning.py b/pynxtools/dataconverter/readers/em_nion/utils/em_nion_versioning.py index fa65ae0f3..28bf202c6 100644 --- a/pynxtools/dataconverter/readers/em_nion/utils/em_nion_versioning.py +++ b/pynxtools/dataconverter/readers/em_nion/utils/em_nion_versioning.py @@ -19,13 +19,13 @@ # pylint: disable=no-member -from pynxtools.dataconverter.readers.shared.shared_utils \ - import get_repo_last_commit +from pynxtools.dataconverter.readers.shared.shared_utils import get_repo_last_commit NX_EM_NION_ADEF_NAME = "NXem" -NX_EM_NION_ADEF_VERSION = "nexus-fairmat-proposal successor of " \ - "9636feecb79bb32b828b1a9804269573256d7696" +NX_EM_NION_ADEF_VERSION = ( + "nexus-fairmat-proposal successor of " "9636feecb79bb32b828b1a9804269573256d7696" +) # based on https://fairmat-experimental.github.io/nexus-fairmat-proposal NX_EM_NION_EXEC_NAME = "dataconverter/reader/em_nion/reader.py" NX_EM_NION_EXEC_VERSION = get_repo_last_commit() diff --git a/pynxtools/dataconverter/readers/em_nion/utils/swift_define_io_cases.py b/pynxtools/dataconverter/readers/em_nion/utils/swift_define_io_cases.py index 7a817e91a..f729d4df7 100644 --- a/pynxtools/dataconverter/readers/em_nion/utils/swift_define_io_cases.py +++ b/pynxtools/dataconverter/readers/em_nion/utils/swift_define_io_cases.py @@ -49,9 +49,10 @@ def analyze_mime_types(self, file_paths: Tuple[str] = None): for file_name in file_paths: index = file_name.lower().rfind(".") if index >= 0: - suffix = file_name.lower()[index + 1::] - add = (suffix in self.supported_mime_types) \ - and (file_name not in self.mime_types[suffix]) + suffix = file_name.lower()[index + 1 : :] + add = (suffix in self.supported_mime_types) and ( + file_name not in self.mime_types[suffix] + ) if add is True: self.mime_types[suffix].append(file_name) print(self.mime_types) diff --git a/pynxtools/dataconverter/readers/em_nion/utils/swift_generate_dimscale_axes.py b/pynxtools/dataconverter/readers/em_nion/utils/swift_generate_dimscale_axes.py index fbd9cfcf2..ae439b666 100644 --- a/pynxtools/dataconverter/readers/em_nion/utils/swift_generate_dimscale_axes.py +++ b/pynxtools/dataconverter/readers/em_nion/utils/swift_generate_dimscale_axes.py @@ -23,15 +23,17 @@ import numpy as np -from pynxtools.dataconverter.readers.em_nion.map_concepts.swift_display_items_to_nx \ - import metadata_constraints, check_existence_of_required_fields # nexus_concept_dict +from pynxtools.dataconverter.readers.em_nion.map_concepts.swift_display_items_to_nx import ( + metadata_constraints, + check_existence_of_required_fields, +) # nexus_concept_dict def get_list_of_dimension_scale_axes(dct: dict) -> list: # , concept_key: str """Create a list of dimension scale axes value, unit tuples.""" # use only when we know already onto which concept a display_item will be mapped axes: List[Any] = [] - if (check_existence_of_required_fields(dct, metadata_constraints) is False): + if check_existence_of_required_fields(dct, metadata_constraints) is False: return axes # or concept_key not in nexus_concept_dict.keys(): # if nexus_concept_dict[concept_key] is None: @@ -46,14 +48,18 @@ def get_list_of_dimension_scale_axes(dct: dict) -> list: # , concept_key: str nvalues = dct["data_shape"][idx] axis_dict = dct["dimensional_calibrations"][idx] if isinstance(nvalues, int) and isinstance(axis_dict, dict): - if (nvalues > 0) \ - and (set(axis_dict.keys()) == set(["offset", "scale", "units"])): + if (nvalues > 0) and ( + set(axis_dict.keys()) == set(["offset", "scale", "units"]) + ): start = axis_dict["offset"] + 0.5 * axis_dict["scale"] stop = axis_dict["offset"] + ((nvalues - 1) + 0.5) * axis_dict["scale"] axes.append( - {"value": np.asarray(np.linspace(start, - stop, - num=nvalues, - endpoint=True), np.float64), - "unit": axis_dict["units"]}) + { + "value": np.asarray( + np.linspace(start, stop, num=nvalues, endpoint=True), + np.float64, + ), + "unit": axis_dict["units"], + } + ) return axes diff --git a/pynxtools/dataconverter/readers/em_nion/utils/swift_load_generic_eln.py b/pynxtools/dataconverter/readers/em_nion/utils/swift_load_generic_eln.py index 4028e4986..dcf7394d2 100644 --- a/pynxtools/dataconverter/readers/em_nion/utils/swift_load_generic_eln.py +++ b/pynxtools/dataconverter/readers/em_nion/utils/swift_load_generic_eln.py @@ -27,17 +27,27 @@ from ase.data import chemical_symbols -from pynxtools.dataconverter.readers.em_nion.utils.em_nion_versioning \ - import NX_EM_NION_ADEF_NAME, NX_EM_NION_ADEF_VERSION +from pynxtools.dataconverter.readers.em_nion.utils.em_nion_versioning import ( + NX_EM_NION_ADEF_NAME, + NX_EM_NION_ADEF_VERSION, +) -from pynxtools.dataconverter.readers.em_nion.utils.em_nion_versioning \ - import NX_EM_NION_EXEC_NAME, NX_EM_NION_EXEC_VERSION +from pynxtools.dataconverter.readers.em_nion.utils.em_nion_versioning import ( + NX_EM_NION_EXEC_NAME, + NX_EM_NION_EXEC_VERSION, +) -from pynxtools.dataconverter.readers.shared.map_concepts.mapping_functors \ - import apply_modifier, variadic_path_to_specific_path +from pynxtools.dataconverter.readers.shared.map_concepts.mapping_functors import ( + apply_modifier, + variadic_path_to_specific_path, +) -from pynxtools.dataconverter.readers.em_nion.map_concepts.swift_eln_to_nx_map \ - import NxEmElnInput, NxUserFromListOfDict, NxDetectorListOfDict, NxSample +from pynxtools.dataconverter.readers.em_nion.map_concepts.swift_eln_to_nx_map import ( + NxEmElnInput, + NxUserFromListOfDict, + NxDetectorListOfDict, + NxSample, +) class NxEmNionElnSchemaParser: @@ -51,8 +61,10 @@ class NxEmNionElnSchemaParser: def __init__(self, file_name: str, entry_id: int): print(f"Extracting data from ELN file: {file_name}") - if (file_name.rsplit('/', 1)[-1].startswith("eln_data") - or file_name.startswith("eln_data")) and entry_id > 0: + if ( + file_name.rsplit("/", 1)[-1].startswith("eln_data") + or file_name.startswith("eln_data") + ) and entry_id > 0: self.entry_id = entry_id self.file_name = file_name with open(self.file_name, "r", encoding="utf-8") as stream: @@ -67,7 +79,7 @@ def parse_user_section(self, template: dict) -> dict: src = "user" if src in self.yml.keys(): if isinstance(self.yml[src], list): - if (all(isinstance(entry, dict) for entry in self.yml[src]) is True): + if all(isinstance(entry, dict) for entry in self.yml[src]) is True: user_id = 1 # custom schema delivers a list of dictionaries... for user_dict in self.yml[src]: @@ -78,7 +90,9 @@ def parse_user_section(self, template: dict) -> dict: # table and check if we can find these for nx_path, modifier in NxUserFromListOfDict.items(): if (nx_path != "IGNORE") and (nx_path != "UNCLEAR"): - trg = variadic_path_to_specific_path(nx_path, identifier) + trg = variadic_path_to_specific_path( + nx_path, identifier + ) res = apply_modifier(modifier, user_dict) if res is not None: template[trg] = res @@ -93,8 +107,11 @@ def parse_sample_section(self, template: dict) -> dict: if (isinstance(self.yml[src], list)) and (len(self.yml[src]) >= 1): atom_types_are_valid = True for symbol in self.yml[src]: - valid = isinstance(symbol, str) \ - and (symbol in chemical_symbols) and (symbol != "X") + valid = ( + isinstance(symbol, str) + and (symbol in chemical_symbols) + and (symbol != "X") + ) if valid is False: atom_types_are_valid = False break @@ -115,7 +132,7 @@ def parse_detector_section(self, template: dict) -> dict: src = "em_lab/detector" if src in self.yml.keys(): if isinstance(self.yml[src], list): - if (all(isinstance(entry, dict) for entry in self.yml[src]) is True): + if all(isinstance(entry, dict) for entry in self.yml[src]) is True: detector_id = 1 # custom schema delivers a list of dictionaries... for detector_dict in self.yml[src]: @@ -126,7 +143,9 @@ def parse_detector_section(self, template: dict) -> dict: # table and check if we can find these for nx_path, modifier in NxDetectorListOfDict.items(): if (nx_path != "IGNORE") and (nx_path != "UNCLEAR"): - trg = variadic_path_to_specific_path(nx_path, identifier) + trg = variadic_path_to_specific_path( + nx_path, identifier + ) res = apply_modifier(modifier, detector_dict) if res is not None: template[trg] = res diff --git a/pynxtools/dataconverter/readers/em_nion/utils/swift_uuid_to_file_name.py b/pynxtools/dataconverter/readers/em_nion/utils/swift_uuid_to_file_name.py index 0a435110c..1ee9d9fb3 100644 --- a/pynxtools/dataconverter/readers/em_nion/utils/swift_uuid_to_file_name.py +++ b/pynxtools/dataconverter/readers/em_nion/utils/swift_uuid_to_file_name.py @@ -35,6 +35,6 @@ def encode(uuid_: uuid.UUID, alphabet: str) -> str: def uuid_to_file_name(data_item_uuid_str: str) -> str: - data_item_uuid_uuid = uuid.UUID(f'{data_item_uuid_str}') + data_item_uuid_uuid = uuid.UUID(f"{data_item_uuid_str}") return f'data_{encode(data_item_uuid_uuid, "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890")}' # 25 character results diff --git a/pynxtools/dataconverter/readers/em_nion/utils/swift_zipped_project_parser.py b/pynxtools/dataconverter/readers/em_nion/utils/swift_zipped_project_parser.py index 17f74ba61..64d0aa494 100644 --- a/pynxtools/dataconverter/readers/em_nion/utils/swift_zipped_project_parser.py +++ b/pynxtools/dataconverter/readers/em_nion/utils/swift_zipped_project_parser.py @@ -35,31 +35,40 @@ from zipfile37 import ZipFile -from pynxtools.dataconverter.readers.em_nion.utils.swift_uuid_to_file_name \ - import uuid_to_file_name - -from pynxtools.dataconverter.readers.em_nion.utils.swift_generate_dimscale_axes \ - import get_list_of_dimension_scale_axes - -from pynxtools.dataconverter.readers.em_nion.map_concepts.swift_display_items_to_nx \ - import nexus_concept_dict, identify_nexus_concept_key - -from pynxtools.dataconverter.readers.shared.map_concepts.mapping_functors \ - import apply_modifier, variadic_path_to_specific_path - -from pynxtools.dataconverter.readers.em_nion.map_concepts.swift_to_nx_image_real_space \ - import NxImageRealSpaceDict - -from pynxtools.dataconverter.readers.em_nion.utils.em_nion_versioning \ - import NX_EM_NION_SWIFT_NAME, NX_EM_NION_SWIFT_VERSION -from pynxtools.dataconverter.readers.em_nion.utils.em_nion_versioning \ - import NX_EM_NION_EXEC_NAME, NX_EM_NION_EXEC_VERSION +from pynxtools.dataconverter.readers.em_nion.utils.swift_uuid_to_file_name import ( + uuid_to_file_name, +) + +from pynxtools.dataconverter.readers.em_nion.utils.swift_generate_dimscale_axes import ( + get_list_of_dimension_scale_axes, +) + +from pynxtools.dataconverter.readers.em_nion.map_concepts.swift_display_items_to_nx import ( + nexus_concept_dict, + identify_nexus_concept_key, +) + +from pynxtools.dataconverter.readers.shared.map_concepts.mapping_functors import ( + apply_modifier, + variadic_path_to_specific_path, +) + +from pynxtools.dataconverter.readers.em_nion.map_concepts.swift_to_nx_image_real_space import ( + NxImageRealSpaceDict, +) + +from pynxtools.dataconverter.readers.em_nion.utils.em_nion_versioning import ( + NX_EM_NION_SWIFT_NAME, + NX_EM_NION_SWIFT_VERSION, +) +from pynxtools.dataconverter.readers.em_nion.utils.em_nion_versioning import ( + NX_EM_NION_EXEC_NAME, + NX_EM_NION_EXEC_VERSION, +) class NxEmNionSwiftProjectParser: - """Parse NionSwift project file. - - """ + """Parse NionSwift project file.""" def __init__(self, file_name, entry_id): """Class wrapping swift parser.""" @@ -85,11 +94,11 @@ def check_project_file(self): with ZipFile(self.file_name) as zip_file_hdl: for file in zip_file_hdl.namelist(): if file.endswith(".h5"): - key = file[file.rfind("/") + 1:].replace(".h5", "") + key = file[file.rfind("/") + 1 :].replace(".h5", "") if key not in self.hdf_file_dict: self.hdf_file_dict[key] = file elif file.endswith(".ndata"): - key = file[file.rfind("/") + 1:].replace(".ndata", "") + key = file[file.rfind("/") + 1 :].replace(".ndata", "") if key not in self.ndata_file_dict: self.ndata_file_dict[key] = file elif file.endswith(".nsproj"): @@ -126,39 +135,51 @@ def add_nx_image_real_space(self, meta, arr, template): axes_lst = get_list_of_dimension_scale_axes(meta) # print(axes_lst) - axes_names = [("axis_image_identifier", "image_identifier", 2), - ("axis_y", "y", 1), - ("axis_x", "x", 0)] - print(f"Add NXdata len(axes_lst) {len(axes_lst)}, len(axes_names) {len(axes_names)}") + axes_names = [ + ("axis_image_identifier", "image_identifier", 2), + ("axis_y", "y", 1), + ("axis_x", "x", 0), + ] + print( + f"Add NXdata len(axes_lst) {len(axes_lst)}, len(axes_names) {len(axes_names)}" + ) if 2 <= len(axes_lst) <= len(axes_names): - trg = f"/ENTRY[entry{self.entry_id}]/measurement/EVENT_DATA_EM[event_data_em" \ - f"{self.event_data_em_id}]/IMAGE_SET[image_set{self.image_id}]/" \ - f"PROCESS[process]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/measurement/EVENT_DATA_EM[event_data_em" + f"{self.event_data_em_id}]/IMAGE_SET[image_set{self.image_id}]/" + f"PROCESS[process]" + ) template[f"{trg}/source"] = "n/a" template[f"{trg}/source/@version"] = "n/a" - template[f"{trg}/PROGRAM[program1]/program"] \ - = f"We do not know because the nsproj file does not store it explicitly "\ - f"which nionswift version and dependencies are used when writing "\ - f"the nsproj file!" + template[f"{trg}/PROGRAM[program1]/program"] = ( + f"We do not know because the nsproj file does not store it explicitly " + f"which nionswift version and dependencies are used when writing " + f"the nsproj file!" + ) template[f"{trg}/PROGRAM[program1]/program/@version"] = "not recoverable" - template[f"{trg}/PROGRAM[program2]/program"] \ - = f"{NX_EM_NION_SWIFT_NAME}" - template[f"{trg}/PROGRAM[program2]/program/@version"] \ - = f"{NX_EM_NION_SWIFT_VERSION}" - template[f"{trg}/PROGRAM[program3]/program"] \ - = f"{NX_EM_NION_EXEC_NAME}" - template[f"{trg}/PROGRAM[program3]/program/@version"] \ - = f"{NX_EM_NION_EXEC_VERSION}" - - trg = f"/ENTRY[entry{self.entry_id}]/measurement/EVENT_DATA_EM[event_data_em" \ - f"{self.event_data_em_id}]/IMAGE_SET[image_set{self.image_id}]/DATA[stack]" - template[f"{trg}/@NX_class"] = "NXdata" # ##TODO one should not need to add this manually + template[f"{trg}/PROGRAM[program2]/program"] = f"{NX_EM_NION_SWIFT_NAME}" + template[ + f"{trg}/PROGRAM[program2]/program/@version" + ] = f"{NX_EM_NION_SWIFT_VERSION}" + template[f"{trg}/PROGRAM[program3]/program"] = f"{NX_EM_NION_EXEC_NAME}" + template[ + f"{trg}/PROGRAM[program3]/program/@version" + ] = f"{NX_EM_NION_EXEC_VERSION}" + + trg = ( + f"/ENTRY[entry{self.entry_id}]/measurement/EVENT_DATA_EM[event_data_em" + f"{self.event_data_em_id}]/IMAGE_SET[image_set{self.image_id}]/DATA[stack]" + ) + template[ + f"{trg}/@NX_class" + ] = "NXdata" # ##TODO one should not need to add this manually template[f"{trg}/title"] = str("Should come from NionSwift directly") template[f"{trg}/@signal"] = "data_counts" template[f"{trg}/@axes"] = ["axis_image_identifier", "axis_y", "axis_x"] for idx in np.arange(0, 3): - template[f"{trg}/@AXISNAME_indices[{axes_names[idx][0]}_indices]"] \ - = np.uint32(axes_names[idx][2]) + template[ + f"{trg}/@AXISNAME_indices[{axes_names[idx][0]}_indices]" + ] = np.uint32(axes_names[idx][2]) # the following three lines would be required by H5Web to plot RGB maps # template[f"{trg}/@CLASS"] = "IMAGE" # template[f"{trg}/@IMAGE_VERSION"] = "1.2" @@ -166,35 +187,47 @@ def add_nx_image_real_space(self, meta, arr, template): if len(axes_lst) == 2: ny, nx = np.shape(arr) - template[f"{trg}/data_counts"] \ - = {"compress": np.reshape(arr, (1, ny, nx), order="C"), "strength": 1} + template[f"{trg}/data_counts"] = { + "compress": np.reshape(arr, (1, ny, nx), order="C"), + "strength": 1, + } template[f"{trg}/data_counts/@long_name"] = "Signal" # no image_identifier axis available - template[f"{trg}/AXISNAME[{axes_names[0][0]}]"] \ - = {"compress": np.asarray([1], np.uint32), "strength": 1} - template[f"{trg}/AXISNAME[{axes_names[0][0]}]/@long_name"] \ - = f"Image identifier (a. u.)" + template[f"{trg}/AXISNAME[{axes_names[0][0]}]"] = { + "compress": np.asarray([1], np.uint32), + "strength": 1, + } + template[ + f"{trg}/AXISNAME[{axes_names[0][0]}]/@long_name" + ] = f"Image identifier (a. u.)" template[f"{trg}/AXISNAME[{axes_names[0][0]}]/@units"] = "" for idx in [1, 2]: - template[f"{trg}/AXISNAME[{axes_names[idx][0]}]"] \ - = {"compress": axes_lst[idx - 1]["value"], "strength": 1} - template[f"{trg}/AXISNAME[{axes_names[idx][0]}]/@long_name"] \ - = f"Calibrated position along {axes_names[idx][1]}-axis " \ - f"({axes_lst[idx - 1]['unit']})" - template[f"{trg}/AXISNAME[{axes_names[idx][0]}]/@units"] \ - = f"{axes_lst[idx - 1]['unit']}" + template[f"{trg}/AXISNAME[{axes_names[idx][0]}]"] = { + "compress": axes_lst[idx - 1]["value"], + "strength": 1, + } + template[f"{trg}/AXISNAME[{axes_names[idx][0]}]/@long_name"] = ( + f"Calibrated position along {axes_names[idx][1]}-axis " + f"({axes_lst[idx - 1]['unit']})" + ) + template[ + f"{trg}/AXISNAME[{axes_names[idx][0]}]/@units" + ] = f"{axes_lst[idx - 1]['unit']}" else: # len(axes_lst) == 3 template[f"{trg}/data_counts"] = {"compress": arr, "strength": 1} for idx in [0, 1, 2]: # TODO check that casting works properly - template[f"{trg}/AXISNAME[{axes_names[idx][0]}]"] \ - = {"compress": np.asarray(axes_lst[idx]["value"], np.uint32), - "strength": 1} - template[f"{trg}/AXISNAME[{axes_names[idx][0]}]/@long_name"] \ - = f"Calibrated position along {axes_names[idx][1]}-axis " \ - f"({axes_lst[idx]['unit']})" - template[f"{trg}/AXISNAME[{axes_names[idx][0]}]/@units"] \ - = f"{axes_lst[idx]['unit']}" + template[f"{trg}/AXISNAME[{axes_names[idx][0]}]"] = { + "compress": np.asarray(axes_lst[idx]["value"], np.uint32), + "strength": 1, + } + template[f"{trg}/AXISNAME[{axes_names[idx][0]}]/@long_name"] = ( + f"Calibrated position along {axes_names[idx][1]}-axis " + f"({axes_lst[idx]['unit']})" + ) + template[ + f"{trg}/AXISNAME[{axes_names[idx][0]}]/@units" + ] = f"{axes_lst[idx]['unit']}" self.image_id += 1 self.event_data_written = True @@ -235,20 +268,21 @@ def process_ndata(self, file_hdl, full_path, template): for offset, tpl in local_files.items(): # print(f"{tpl}") - if tpl[0] == b'metadata.json': + if tpl[0] == b"metadata.json": print(f"Extract metadata.json from {full_path} at offset {offset}") # ... explicit jump back to beginning of the file file_hdl.seek(0) - metadata_dict = nsnd.read_json(file_hdl, - local_files, - dir_files, - b'metadata.json') + metadata_dict = nsnd.read_json( + file_hdl, local_files, dir_files, b"metadata.json" + ) nx_concept_key = identify_nexus_concept_key(metadata_dict) nx_concept_name = nexus_concept_dict[nx_concept_key] - print(f"Display_item {full_path}, concept {nx_concept_key}, maps {nx_concept_name}") + print( + f"Display_item {full_path}, concept {nx_concept_key}, maps {nx_concept_name}" + ) - flat_metadata_dict = fd.FlatDict(metadata_dict, delimiter='/') + flat_metadata_dict = fd.FlatDict(metadata_dict, delimiter="/") break # because we expect (based on Benedikt's example) to find only one json file # in that *.ndata file pointed to by file_hdl @@ -258,13 +292,10 @@ def process_ndata(self, file_hdl, full_path, template): for offset, tpl in local_files.items(): # print(f"{tpl}") - if tpl[0] == b'data.npy': + if tpl[0] == b"data.npy": print(f"Extract data.npy from {full_path} at offset {offset}") file_hdl.seek(0) - data_arr = nsnd.read_data(file_hdl, - local_files, - dir_files, - b'data.npy') + data_arr = nsnd.read_data(file_hdl, local_files, dir_files, b"data.npy") break # because we expect (based on Benedikt's example) to find only one npy file # in that *.ndata file pointed to by file_hdl @@ -292,9 +323,11 @@ def process_hdf(self, file_hdl, full_path, template): nx_concept_key = identify_nexus_concept_key(metadata_dict) nx_concept_name = nexus_concept_dict[nx_concept_key] - print(f"Display_item {full_path}, concept {nx_concept_key}, maps {nx_concept_name}") + print( + f"Display_item {full_path}, concept {nx_concept_key}, maps {nx_concept_name}" + ) - flat_metadata_dict = fd.FlatDict(metadata_dict, delimiter='/') + flat_metadata_dict = fd.FlatDict(metadata_dict, delimiter="/") if flat_metadata_dict == {}: # only continue if some metadata were retrieved return template @@ -318,7 +351,7 @@ def parse_project_file(self, template: dict) -> dict: with ZipFile(self.file_name) as zip_file_hdl: with zip_file_hdl.open(self.proj_file_names[0]) as file_hdl: # with open(file_name, 'r') as stream: - swift_proj_dict = fd.FlatDict(yaml.safe_load(file_hdl), delimiter='/') + swift_proj_dict = fd.FlatDict(yaml.safe_load(file_hdl), delimiter="/") # for entry in swift_proj_dict["display_items"]: # if isinstance(entry, dict): # for key, val in entry.items(): @@ -327,37 +360,48 @@ def parse_project_file(self, template: dict) -> dict: return template for itm in swift_proj_dict["display_items"]: - if set(["type", "uuid", "created", "display_data_channels"]).issubset(itm.keys()): + if set(["type", "uuid", "created", "display_data_channels"]).issubset( + itm.keys() + ): if len(itm["display_data_channels"]) == 1: if "data_item_reference" in itm["display_data_channels"][0].keys(): key = uuid_to_file_name( - itm["display_data_channels"][0]["data_item_reference"]) + itm["display_data_channels"][0]["data_item_reference"] + ) # file_name without the mime type if key in self.ndata_file_dict: - print(f"Key {key} is *.ndata maps to {self.ndata_file_dict[key]}") + print( + f"Key {key} is *.ndata maps to {self.ndata_file_dict[key]}" + ) with ZipFile(self.file_name) as zip_file_hdl: print(f"Parsing {self.ndata_file_dict[key]}...") - with zip_file_hdl.open(self.ndata_file_dict[key]) as file_hdl: + with zip_file_hdl.open( + self.ndata_file_dict[key] + ) as file_hdl: self.process_ndata( - file_hdl, - self.ndata_file_dict[key], - template) + file_hdl, self.ndata_file_dict[key], template + ) elif key in self.hdf_file_dict: - print(f"Key {key} is *.h5 maps to {self.hdf_file_dict[key]}") + print( + f"Key {key} is *.h5 maps to {self.hdf_file_dict[key]}" + ) with ZipFile(self.file_name) as zip_file_hdl: print(f"Parsing {self.hdf_file_dict[key]}...") - with zip_file_hdl.open(self.hdf_file_dict[key]) as file_hdl: + with zip_file_hdl.open( + self.hdf_file_dict[key] + ) as file_hdl: self.process_hdf( - file_hdl, - self.hdf_file_dict[key], - template) + file_hdl, self.hdf_file_dict[key], template + ) else: print(f"Key {key} has no corresponding data file") return template def parse(self, template: dict) -> dict: """Parse NOMAD OASIS relevant data and metadata from swift project.""" - print("Parsing lazily from compressed NionSwift project (nsproj + directory)...") + print( + "Parsing lazily from compressed NionSwift project (nsproj + directory)..." + ) print(self.file_name) print(f"{self.entry_id}") if self.check_project_file() is False: diff --git a/pynxtools/dataconverter/readers/em_om/reader.py b/pynxtools/dataconverter/readers/em_om/reader.py index 771df721b..bbe4c7df0 100644 --- a/pynxtools/dataconverter/readers/em_om/reader.py +++ b/pynxtools/dataconverter/readers/em_om/reader.py @@ -23,26 +23,33 @@ from pynxtools.dataconverter.readers.base.reader import BaseReader -from pynxtools.dataconverter.readers.em_om.utils.use_case_selector \ - import EmOmUseCaseSelector +from pynxtools.dataconverter.readers.em_om.utils.use_case_selector import ( + EmOmUseCaseSelector, +) -from pynxtools.dataconverter.readers.em_om.utils.generic_eln_io \ - import NxEmOmGenericElnSchemaParser +from pynxtools.dataconverter.readers.em_om.utils.generic_eln_io import ( + NxEmOmGenericElnSchemaParser, +) -from pynxtools.dataconverter.readers.em_om.utils.orix_ebsd_parser \ - import NxEmOmOrixEbsdParser +from pynxtools.dataconverter.readers.em_om.utils.orix_ebsd_parser import ( + NxEmOmOrixEbsdParser, +) -from pynxtools.dataconverter.readers.em_om.utils.mtex_ebsd_parser \ - import NxEmOmMtexEbsdParser +from pynxtools.dataconverter.readers.em_om.utils.mtex_ebsd_parser import ( + NxEmOmMtexEbsdParser, +) -from pynxtools.dataconverter.readers.em_om.utils.zip_ebsd_parser \ - import NxEmOmZipEbsdParser +from pynxtools.dataconverter.readers.em_om.utils.zip_ebsd_parser import ( + NxEmOmZipEbsdParser, +) -from pynxtools.dataconverter.readers.em_om.utils.dream3d_ebsd_parser \ - import NxEmOmDreamThreedEbsdParser +from pynxtools.dataconverter.readers.em_om.utils.dream3d_ebsd_parser import ( + NxEmOmDreamThreedEbsdParser, +) -from pynxtools.dataconverter.readers.em_om.utils.em_nexus_plots \ - import em_om_default_plot_generator +from pynxtools.dataconverter.readers.em_om.utils.em_nexus_plots import ( + em_om_default_plot_generator, +) class EmOmReader(BaseReader): @@ -58,10 +65,12 @@ class EmOmReader(BaseReader): supported_nxdls = ["NXem_ebsd"] # how to combine with "NXem"? # pylint: disable=duplicate-code - def read(self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None) -> dict: + def read( + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, + ) -> dict: """Read data from given file, return filled template dictionary em.""" # pylint: disable=duplicate-code template.clear() @@ -89,7 +98,9 @@ def read(self, pattern_simulation = False if case.dat_parser_type == "zip": pattern_simulation = True - eln = NxEmOmGenericElnSchemaParser(case.eln[0], entry_id, pattern_simulation) + eln = NxEmOmGenericElnSchemaParser( + case.eln[0], entry_id, pattern_simulation + ) eln.parse(template) else: print("No interpretable ELN input found!") diff --git a/pynxtools/dataconverter/readers/em_om/utils/dream3d_ebsd_parser.py b/pynxtools/dataconverter/readers/em_om/utils/dream3d_ebsd_parser.py index 156346d89..020c69615 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/dream3d_ebsd_parser.py +++ b/pynxtools/dataconverter/readers/em_om/utils/dream3d_ebsd_parser.py @@ -37,13 +37,13 @@ from pynxtools.dataconverter.readers.em_om.utils.image_transform import thumbnail -from pynxtools.dataconverter.readers.em_om.utils.em_nexus_plots import HFIVE_WEB_MAX_SIZE +from pynxtools.dataconverter.readers.em_om.utils.em_nexus_plots import ( + HFIVE_WEB_MAX_SIZE, +) class NxEmOmDreamThreedEbsdParser: - """Parse DREAM3D EBSD data. - - """ + """Parse DREAM3D EBSD data.""" def __init__(self, file_name, entry_id): """Class wrapping dream3d parser.""" @@ -84,9 +84,11 @@ def parse_roi_geometry(self): # NEW ISSUE: interpret from filter! has_required_metadata = True has_correct_shape = True - req_field_names = [("dims", "DIMENSIONS"), - ("origin", "ORIGIN"), - ("spacing", "SPACING")] + req_field_names = [ + ("dims", "DIMENSIONS"), + ("origin", "ORIGIN"), + ("spacing", "SPACING"), + ] for field_tuple in req_field_names: if f"{grpnm}/{field_tuple[1]}" in h5r: self.stack_meta[field_tuple[0]] = h5r[f"{grpnm}/{field_tuple[1]}"][...] @@ -130,11 +132,15 @@ def parse_roi_dimension_scale_axes(self): # * self.stack_meta["spacing"][i], num=self.stack_meta["dims"][i], # endpoint=True), np.float32) # DREAM.3D uses single precision self.xyz[axisname[1]] = np.asarray( - np.linspace(self.stack_meta["origin"][i], - self.stack_meta["origin"][i] + self.stack_meta["dims"][i] - * self.stack_meta["spacing"][i], num=self.stack_meta["dims"][i], - endpoint=True), - np.float32) # DREAM.3D uses single precision + np.linspace( + self.stack_meta["origin"][i], + self.stack_meta["origin"][i] + + self.stack_meta["dims"][i] * self.stack_meta["spacing"][i], + num=self.stack_meta["dims"][i], + endpoint=True, + ), + np.float32, + ) # DREAM.3D uses single precision # endpoint true? voxel center or its min or max bound? @@ -159,11 +165,14 @@ def parse_roi_default_plot(self, template: dict) -> dict: axes_names = [("axis_x", "x"), ("axis_y", "y"), ("axis_z", "z")] for axisname in axes_names: - trg = f"/ENTRY[entry{self.entry_id}]/correlation/region_of_interest" \ - f"/roi/AXISNAME[{axisname[0]}]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/correlation/region_of_interest" + f"/roi/AXISNAME[{axisname[0]}]" + ) template[f"{trg}"] = {"compress": self.xyz[axisname[1]], "strength": 1} - template[f"{trg}/@long_name"] \ - = f"Calibrated position along {axisname[1]}-axis (µm)" + template[ + f"{trg}/@long_name" + ] = f"Calibrated position along {axisname[1]}-axis (µm)" template[f"{trg}/@units"] = "µm" # parse from filter! return template @@ -193,21 +202,31 @@ def parse_phases(self, template: dict) -> dict: self.phases[identifier] = {} self.phases[identifier]["name"] = phase_names[identifier] - trg = f"/ENTRY[entry{self.entry_id}]/correlation" \ - f"/EM_EBSD_CRYSTAL_STRUCTURE_MODEL" \ - f"[em_ebsd_crystal_structure_model{identifier}]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/correlation" + f"/EM_EBSD_CRYSTAL_STRUCTURE_MODEL" + f"[em_ebsd_crystal_structure_model{identifier}]" + ) template[f"{trg}/phase_identifier"] = np.uint32(identifier) template[f"{trg}/phase_name"] = str(phase_names[identifier]) template[f"{trg}/unit_cell_abc"] = np.asarray( - [unit_cells[identifier, 0] * 0.1, - unit_cells[identifier, 1] * 0.1, - unit_cells[identifier, 2] * 0.1], np.float32) + [ + unit_cells[identifier, 0] * 0.1, + unit_cells[identifier, 1] * 0.1, + unit_cells[identifier, 2] * 0.1, + ], + np.float32, + ) # ##? DREAM.3D reports in angstroem but no units attribute in dream3d file! template[f"{trg}/unit_cell_abc/@units"] = "nm" template[f"{trg}/unit_cell_alphabetagamma"] = np.asarray( - [unit_cells[identifier, 3], - unit_cells[identifier, 4], - unit_cells[identifier, 5]], np.float32) + [ + unit_cells[identifier, 3], + unit_cells[identifier, 4], + unit_cells[identifier, 5], + ], + np.float32, + ) # ##? DREAM.3D reports in degree template[f"{trg}/unit_cell_alphabetagamma/@units"] = "°" @@ -229,7 +248,9 @@ def parse_inverse_pole_figure_map(self, identifier, template: dict) -> dict: """Create default plot for the IPF-Z orientation mapping.""" phase_id = identifier phase_name = self.phases[identifier]["name"] - print(f"Generate inverse pole figure (IPF) map for {identifier}, {phase_name}...") + print( + f"Generate inverse pole figure (IPF) map for {identifier}, {phase_name}..." + ) trg = f"/ENTRY[entry{self.entry_id}]/correlation/PROCESS[ipf_map{phase_id}]" template[f"{trg}/bitdepth"] = np.uint32(8) @@ -237,10 +258,14 @@ def parse_inverse_pole_figure_map(self, identifier, template: dict) -> dict: template[f"{trg}/phase_name"] = str(phase_name) template[f"{trg}/PROGRAM[program1]/program"] = str("dream3d") template[f"{trg}/PROGRAM[program1]/program/@version"] = "v6.5.163" - template[f"{trg}/projection_direction"] = np.asarray([0., 0., 1.], np.float32) - - trg = f"/ENTRY[entry{self.entry_id}]/correlation" \ - f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_map" + template[f"{trg}/projection_direction"] = np.asarray( + [0.0, 0.0, 1.0], np.float32 + ) + + trg = ( + f"/ENTRY[entry{self.entry_id}]/correlation" + f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_map" + ) template[f"{trg}/title"] = str("DREAM.3D ROI inverse-pole-figure colored") template[f"{trg}/@signal"] = "data" # template[f"{trg}/@axes"] = ["axis_x", "axis_y", "axis_z"] @@ -250,8 +275,10 @@ def parse_inverse_pole_figure_map(self, identifier, template: dict) -> dict: template[f"{trg}/@AXISNAME_indices[axis_z_indices]"] = np.uint32(2) # check again order x, y, z ?? - trg = f"/ENTRY[entry{self.entry_id}]/correlation" \ - f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_map/data" + trg = ( + f"/ENTRY[entry{self.entry_id}]/correlation" + f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_map/data" + ) # NEW ISSUE: needs one more check if precise currently ipf_map = np.asarray(np.zeros(np.shape(self.stack), np.uint8)) # 4d array # msk = self.phase_id == phase_id @@ -274,11 +301,14 @@ def parse_inverse_pole_figure_map(self, identifier, template: dict) -> dict: axes_names = [("axis_x", "x"), ("axis_y", "y"), ("axis_z", "z")] for axisname in axes_names: - trg = f"/ENTRY[entry{self.entry_id}]/correlation" \ - f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_map/AXISNAME[{axisname[0]}]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/correlation" + f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_map/AXISNAME[{axisname[0]}]" + ) template[f"{trg}"] = {"compress": self.xyz[axisname[1]], "strength": 1} - template[f"{trg}/@long_name"] \ - = f"Calibrated position along {axisname[1]}-axis (µm)" + template[ + f"{trg}/@long_name" + ] = f"Calibrated position along {axisname[1]}-axis (µm)" template[f"{trg}/@units"] = "µm" # parse from filter! return template @@ -290,7 +320,9 @@ def parse_inverse_pole_figure_color_key(self, identifier, template: dict) -> dic # em_om reader uses phase_id = identifier phase_name = self.phases[identifier]["name"] - print(f"Parse inverse pole figure (IPF) color key {identifier}, {phase_name}...") + print( + f"Parse inverse pole figure (IPF) color key {identifier}, {phase_name}..." + ) # the key problem is that the DREAM.3D pipeline does not store the point group # so we need to have a heuristic approach which selects the correct IPF @@ -299,28 +331,37 @@ def parse_inverse_pole_figure_color_key(self, identifier, template: dict) -> dic # OrientationAnalysis/Data/OrientationAnalysis/IPF_Legend # DREAM.3D stores a library of prerendered color keys as image files color_key_path = (__file__).replace( - "dream3d_ebsd_parser.py", "dream3d_v65163_color_keys") + "dream3d_ebsd_parser.py", "dream3d_v65163_color_keys" + ) color_key_file_name = f"{color_key_path}/Cubic_High.png" # NEW ISSUE:must not be Cubic_High.png only, this holds only for this example! # constraint further to 8bit RGB and no flipping # im = np.asarray(imageio.v3.imread(symm_name)) - img = np.asarray(thumbnail(pil.open(color_key_file_name, "r", ["png"]), - size=HFIVE_WEB_MAX_SIZE), np.uint8) + img = np.asarray( + thumbnail( + pil.open(color_key_file_name, "r", ["png"]), size=HFIVE_WEB_MAX_SIZE + ), + np.uint8, + ) img = img[:, :, 0:3] # discard potential alpha channel # ##MK::need to constrain more the writing of the image that it is guaranteed # a specific type of image and bitdepth and color model, and avoid implicit # image transformations such as flips or rotations - trg = f"/ENTRY[entry{self.entry_id}]/correlation" \ - f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model" + trg = ( + f"/ENTRY[entry{self.entry_id}]/correlation" + f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model" + ) template[f"{trg}/title"] = str("Inverse pole figure color key with SST") template[f"{trg}/@signal"] = "data" template[f"{trg}/@axes"] = ["axis_y", "axis_x"] template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = np.uint32(0) template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = np.uint32(1) - trg = f"/ENTRY[entry{self.entry_id}]/correlation" \ - f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model/DATA[data]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/correlation" + f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model/DATA[data]" + ) template[f"{trg}"] = {"compress": img, "strength": 1} template[f"{trg}/@CLASS"] = "IMAGE" template[f"{trg}/@IMAGE_VERSION"] = "1.2" @@ -328,11 +369,22 @@ def parse_inverse_pole_figure_color_key(self, identifier, template: dict) -> dic axes_names = [("axis_y", 0, "y-axis"), ("axis_x", 1, "x-axis")] for axis in axes_names: - trg = f"/ENTRY[entry{self.entry_id}]/correlation/PROCESS[ipf_map{phase_id}]" \ - f"/ipf_rgb_color_model/AXISNAME[{axis[0]}]" - template[f"{trg}"] = {"compress": np.asarray( - np.linspace(1, np.shape(img)[axis[1]], num=np.shape(img)[axis[1]], - endpoint=True), np.uint32), "strength": 1} + trg = ( + f"/ENTRY[entry{self.entry_id}]/correlation/PROCESS[ipf_map{phase_id}]" + f"/ipf_rgb_color_model/AXISNAME[{axis[0]}]" + ) + template[f"{trg}"] = { + "compress": np.asarray( + np.linspace( + 1, + np.shape(img)[axis[1]], + num=np.shape(img)[axis[1]], + endpoint=True, + ), + np.uint32, + ), + "strength": 1, + } template[f"{trg}/@long_name"] = f"Pixel along {axis[2]}" template[f"{trg}/@units"] = "px" diff --git a/pynxtools/dataconverter/readers/em_om/utils/em_nexus_plots.py b/pynxtools/dataconverter/readers/em_om/utils/em_nexus_plots.py index e133e422f..229b68dab 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/em_nexus_plots.py +++ b/pynxtools/dataconverter/readers/em_om/utils/em_nexus_plots.py @@ -68,8 +68,10 @@ def roi_plot_available(template: dict, entry_id: int) -> bool: return True # fall-back display an IPF mapping - trg = f"/ENTRY[entry{entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map1]/ipf_rgb_map/data" + trg = ( + f"/ENTRY[entry{entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map1]/ipf_rgb_map/data" + ) # by definition if there is one it will be always the first one and we use this IPF map if trg in template.keys(): print(f"Found image default plot for entry{entry_id}") @@ -90,8 +92,10 @@ def roi_plot_available(template: dict, entry_id: int) -> bool: def diffraction_pattern_available(template: dict, entry_id: int) -> bool: """Choose a preferred NXdata/data instance diffraction pattern.""" - trg = f"/ENTRY[entry{entry_id}]/simulation/IMAGE_SET_EM_KIKUCHI" \ - f"[image_set_em_kikuchi]/stack/data_counts" + trg = ( + f"/ENTRY[entry{entry_id}]/simulation/IMAGE_SET_EM_KIKUCHI" + f"[image_set_em_kikuchi]/stack/data_counts" + ) if trg in template.keys(): print(f"Found image default plot for entry{entry_id}") trg = "/" diff --git a/pynxtools/dataconverter/readers/em_om/utils/euler_angle_convention.py b/pynxtools/dataconverter/readers/em_om/utils/euler_angle_convention.py index 64b2be8a2..6b61c7a62 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/euler_angle_convention.py +++ b/pynxtools/dataconverter/readers/em_om/utils/euler_angle_convention.py @@ -34,30 +34,32 @@ # "Britton" https://doi.org/10.1016/j.matchar.2016.04.008 -which_euler_convention = {"xxx": (False, "", ""), - "xxy": (True, "", ""), - "xxz": (True, "", ""), - "xyx": (True, "", ""), - "xyy": (True, "", ""), - "xyz": (True, "", ""), - "xzx": (True, "", ""), - "xzy": (True, "", ""), - "xzz": (True, "", ""), - "yxx": (True, "", ""), - "yxy": (True, "", ""), - "yxz": (True, "", ""), - "yyx": (True, "", ""), - "yyy": (False, "", ""), - "yyz": (True, "", ""), - "yzx": (True, "", ""), - "yzy": (True, "", ""), - "yzz": (True, "", ""), - "zxx": (True, "", ""), - "zxy": (True, "", ""), - "zxz": (True, "Bunge", "proper"), - "zyx": (True, "", ""), - "zyy": (True, "", ""), - "zyz": (True, "", ""), - "zzx": (True, "", ""), - "zzy": (True, "", ""), - "zzz": (False, "", "")} +which_euler_convention = { + "xxx": (False, "", ""), + "xxy": (True, "", ""), + "xxz": (True, "", ""), + "xyx": (True, "", ""), + "xyy": (True, "", ""), + "xyz": (True, "", ""), + "xzx": (True, "", ""), + "xzy": (True, "", ""), + "xzz": (True, "", ""), + "yxx": (True, "", ""), + "yxy": (True, "", ""), + "yxz": (True, "", ""), + "yyx": (True, "", ""), + "yyy": (False, "", ""), + "yyz": (True, "", ""), + "yzx": (True, "", ""), + "yzy": (True, "", ""), + "yzz": (True, "", ""), + "zxx": (True, "", ""), + "zxy": (True, "", ""), + "zxz": (True, "Bunge", "proper"), + "zyx": (True, "", ""), + "zyy": (True, "", ""), + "zyz": (True, "", ""), + "zzx": (True, "", ""), + "zzy": (True, "", ""), + "zzz": (False, "", ""), +} diff --git a/pynxtools/dataconverter/readers/em_om/utils/generic_eln_io.py b/pynxtools/dataconverter/readers/em_om/utils/generic_eln_io.py index 4a97ecb60..337f2086e 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/generic_eln_io.py +++ b/pynxtools/dataconverter/readers/em_om/utils/generic_eln_io.py @@ -27,13 +27,20 @@ # from ase.data import chemical_symbols -from pynxtools.dataconverter.readers.em_om.utils.versioning \ - import NX_EM_OM_ADEF_NAME, NX_EM_OM_ADEF_VERSION -from pynxtools.dataconverter.readers.em_om.utils.versioning \ - import NX_EM_OM_EXEC_NAME, NX_EM_OM_EXEC_VERSION +from pynxtools.dataconverter.readers.em_om.utils.versioning import ( + NX_EM_OM_ADEF_NAME, + NX_EM_OM_ADEF_VERSION, +) +from pynxtools.dataconverter.readers.em_om.utils.versioning import ( + NX_EM_OM_EXEC_NAME, + NX_EM_OM_EXEC_VERSION, +) -from pynxtools.dataconverter.readers.em_om.utils.handed_cartesian \ - import REFERENCE_FRAMES, AXIS_DIRECTIONS, is_cs_well_defined +from pynxtools.dataconverter.readers.em_om.utils.handed_cartesian import ( + REFERENCE_FRAMES, + AXIS_DIRECTIONS, + is_cs_well_defined, +) # example how to check against different types of Euler angle conventions # from pynxtools.dataconverter.readers.em_om.utils.euler_angle_convention \ @@ -45,16 +52,16 @@ class NxEmOmGenericElnSchemaParser: - """Parse eln_data.yaml dump file content generated from an (e.g. OASIS) ELN. - - """ + """Parse eln_data.yaml dump file content generated from an (e.g. OASIS) ELN.""" def __init__(self, file_name: str, entry_id: int, pattern_simulation: bool): """Fill template with ELN pieces of information.""" self.pattern_simulation = pattern_simulation print(f"Extracting data from ELN file: {file_name}") - if (file_name.rsplit('/', 1)[-1].startswith("eln_data") - or file_name.startswith("eln_data")) and entry_id > 0: + if ( + file_name.rsplit("/", 1)[-1].startswith("eln_data") + or file_name.startswith("eln_data") + ) and entry_id > 0: self.entry_id = entry_id self.file_name = file_name with open(self.file_name, "r", encoding="utf-8") as stream: @@ -88,25 +95,32 @@ def parse(self, template: dict) -> dict: return template def parse_entry_section(self, template: dict) -> dict: - """"Parse entry section.""" + """ "Parse entry section.""" print("Parse entry...") src = "entry" trg = f"/ENTRY[entry{self.entry_id}]/" - if (self.yml[f"{src}:attr_version"] == NX_EM_OM_ADEF_VERSION) \ - and (self.yml[f"{src}:definition"] == NX_EM_OM_ADEF_NAME): + if (self.yml[f"{src}:attr_version"] == NX_EM_OM_ADEF_VERSION) and ( + self.yml[f"{src}:definition"] == NX_EM_OM_ADEF_NAME + ): template[f"{trg}@version"] = NX_EM_OM_ADEF_VERSION template[f"{trg}definition"] = NX_EM_OM_ADEF_NAME template[f"{trg}PROGRAM[program1]/program"] = NX_EM_OM_EXEC_NAME template[f"{trg}PROGRAM[program1]/program/@version"] = NX_EM_OM_EXEC_VERSION - if ("program" in self.yml[src].keys()) \ - and ("program__attr_version" in self.yml[src].keys()): - template[f"{trg}PROGRAM[program2]/program"] \ - = self.yml[f"{src}:program"] - template[f"{trg}PROGRAM[program2]/program/@version"] \ - = self.yml[f"{src}:program__attr_version"] + if ("program" in self.yml[src].keys()) and ( + "program__attr_version" in self.yml[src].keys() + ): + template[f"{trg}PROGRAM[program2]/program"] = self.yml[f"{src}:program"] + template[f"{trg}PROGRAM[program2]/program/@version"] = self.yml[ + f"{src}:program__attr_version" + ] # check that versions NX_EM_OM_* match - req_field_names = ["definition", "start_time", "end_time", - "workflow_description", "workflow_identifier"] + req_field_names = [ + "definition", + "start_time", + "end_time", + "workflow_description", + "workflow_identifier", + ] for field in req_field_names: if field in self.yml[src].keys(): template[f"{trg}{field}"] = self.yml[f"{src}:{field}"] @@ -123,10 +137,17 @@ def parse_user_section(self, template: dict) -> dict: for user_list in self.yml[src]: trg = f"/ENTRY[entry{self.entry_id}]/USER[user{user_id}]/" field_names = [ - "name", "email", "affiliation", "address", - "orcid", "orcid_platform", - "telephone_number", "role", - "social_media_name", "social_media_platform"] + "name", + "email", + "affiliation", + "address", + "orcid", + "orcid_platform", + "telephone_number", + "role", + "social_media_name", + "social_media_platform", + ] for field_name in field_names: if field_name in user_list.keys(): template[f"{trg}{field_name}"] = user_list[field_name] @@ -138,17 +159,19 @@ def parse_commerical_on_the_fly_section(self, template: dict) -> dict: print("Parse commercial on-the-fly") src = "commercial_on_the_fly_indexing" trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/on_the_fly_indexing/" - if ("program" in self.yml[src].keys()) \ - and ("program__attr_version" in self.yml[src].keys()): - template[f"{trg}PROGRAM[program1]/program"] \ - = self.yml[f"{src}:program"] - template[f"{trg}PROGRAM[program1]/program/@version"] \ - = self.yml[f"{src}:program__attr_version"] + if ("program" in self.yml[src].keys()) and ( + "program__attr_version" in self.yml[src].keys() + ): + template[f"{trg}PROGRAM[program1]/program"] = self.yml[f"{src}:program"] + template[f"{trg}PROGRAM[program1]/program/@version"] = self.yml[ + f"{src}:program__attr_version" + ] if "results_file" in self.yml[src].keys(): template[f"{trg}origin"] = self.yml[f"{src}:results_file"] if "results_file__attr_version" in self.yml[src].keys(): - template[f"{trg}origin/@version"] \ - = self.yml[f"{src}:results_file__attr_version"] + template[f"{trg}origin/@version"] = self.yml[ + f"{src}:results_file__attr_version" + ] template[f"{trg}path"] = str("undefined") # NEW ISSUE: this is a bug not results_file version in eln but path !! return template @@ -187,14 +210,16 @@ def parse_rotation_convention_section(self, template: dict) -> dict: "three_dimensional_rotation_handedness", "rotation_convention", "euler_angle_convention", - "axis_angle_convention"] + "axis_angle_convention", + ] for term in terms: if term in self.yml[src].keys(): template[f"{trg}{term}"] = self.yml[f"{src}:{term}"].lower() # one term named differently in ELN than in NeXus appdef template keyword if "sign_convention" in self.yml[src].keys(): - template[f"{trg}orientation_parameterization_sign_convention"] \ - = self.yml[f"{src}:sign_convention"].lower() + template[f"{trg}orientation_parameterization_sign_convention"] = self.yml[ + f"{src}:sign_convention" + ].lower() # if desired one could check conventions are consistent with specific ones return template @@ -205,19 +230,24 @@ def parse_processing_frame_section(self, template: dict) -> dict: trg = f"/ENTRY[entry{self.entry_id}]/conventions/processing_reference_frame/" if "reference_frame_type" in self.yml[src].keys(): if self.yml[f"{src}:reference_frame_type"] in REFERENCE_FRAMES: - template[f"{trg}reference_frame_type"] \ - = self.yml[f"{src}:reference_frame_type"] + template[f"{trg}reference_frame_type"] = self.yml[ + f"{src}:reference_frame_type" + ] xyz_directions = ["undefined", "undefined", "undefined"] xyz_aliases = ["", "", ""] for idx in np.arange(0, 3): axis_name = axes_names[idx] if f"{axis_name}axis_direction" in self.yml[src].keys(): if self.yml[f"{src}:{axis_name}axis_direction"] in AXIS_DIRECTIONS: - xyz_directions[idx] = self.yml[f"{src}:{axis_name}axis_direction"] + xyz_directions[idx] = self.yml[ + f"{src}:{axis_name}axis_direction" + ] if f"{axis_name}axis_alias" in self.yml[src].keys(): xyz_aliases[idx] = self.yml[f"{src}:{axis_name}axis_alias"] - if is_cs_well_defined(self.yml[f"{src}:reference_frame_type"], xyz_directions): + if is_cs_well_defined( + self.yml[f"{src}:reference_frame_type"], xyz_directions + ): for idx in np.arange(0, 3): axis_name = axes_names[idx] template[f"{trg}{axis_name}axis_direction"] = xyz_directions[idx] @@ -233,15 +263,20 @@ def parse_sample_frame_section(self, template: dict) -> dict: trg = f"/ENTRY[entry{self.entry_id}]/conventions/sample_reference_frame/" if "reference_frame_type" in self.yml[src].keys(): if self.yml[f"{src}:reference_frame_type"] in REFERENCE_FRAMES: - template[f"{trg}reference_frame_type"] \ - = self.yml[f"{src}:reference_frame_type"] + template[f"{trg}reference_frame_type"] = self.yml[ + f"{src}:reference_frame_type" + ] xyz_directions = ["undefined", "undefined", "undefined"] for idx in np.arange(0, 3): axis_name = axes_names[idx] if f"{axis_name}axis_direction" in self.yml[src].keys(): if self.yml[f"{src}:{axis_name}axis_direction"] in AXIS_DIRECTIONS: - xyz_directions[idx] = self.yml[f"{src}:{axis_name}axis_direction"] - if is_cs_well_defined(self.yml[f"{src}:reference_frame_type"], xyz_directions): + xyz_directions[idx] = self.yml[ + f"{src}:{axis_name}axis_direction" + ] + if is_cs_well_defined( + self.yml[f"{src}:reference_frame_type"], xyz_directions + ): for idx in np.arange(0, 3): axis_name = axes_names[idx] template[f"{trg}{axis_name}axis_direction"] = xyz_directions[idx] @@ -256,15 +291,20 @@ def parse_detector_frame_section(self, template: dict) -> dict: trg = f"/ENTRY[entry{self.entry_id}]/conventions/detector_reference_frame/" if "reference_frame_type" in self.yml[src].keys(): if self.yml[f"{src}:reference_frame_type"] in REFERENCE_FRAMES: - template[f"{trg}reference_frame_type"] \ - = self.yml[f"{src}:reference_frame_type"] + template[f"{trg}reference_frame_type"] = self.yml[ + f"{src}:reference_frame_type" + ] xyz_directions = ["undefined", "undefined", "undefined"] for idx in np.arange(0, 3): axis_name = axes_names[idx] if f"{axis_name}axis_direction" in self.yml[src].keys(): if self.yml[f"{src}:{axis_name}axis_direction"] in AXIS_DIRECTIONS: - xyz_directions[idx] = self.yml[f"{src}:{axis_name}axis_direction"] - if is_cs_well_defined(self.yml[f"{src}:reference_frame_type"], xyz_directions): + xyz_directions[idx] = self.yml[ + f"{src}:{axis_name}axis_direction" + ] + if is_cs_well_defined( + self.yml[f"{src}:reference_frame_type"], xyz_directions + ): for idx in np.arange(0, 3): axis_name = axes_names[idx] template[f"{trg}{axis_name}axis_direction"] = xyz_directions[idx] @@ -277,19 +317,26 @@ def parse_gnomonic_projection_section(self, template: dict) -> dict: """Parse for the gnomonic projection.""" axes_names = ["x", "y", "z"] src = "gnomonic_projection:gnomonic_projection_reference_frame" - trg = f"/ENTRY[entry{self.entry_id}]/conventions" \ - f"/gnomonic_projection_reference_frame/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/conventions" + f"/gnomonic_projection_reference_frame/" + ) if "reference_frame_type" in self.yml[src].keys(): if self.yml[f"{src}:reference_frame_type"] in REFERENCE_FRAMES: - template[f"{trg}reference_frame_type"] \ - = self.yml[f"{src}:reference_frame_type"] + template[f"{trg}reference_frame_type"] = self.yml[ + f"{src}:reference_frame_type" + ] xyz_directions = ["undefined", "undefined", "undefined"] for idx in np.arange(0, 3): axis_name = axes_names[idx] if f"{axis_name}axis_direction" in self.yml[src].keys(): if self.yml[f"{src}:{axis_name}axis_direction"] in AXIS_DIRECTIONS: - xyz_directions[idx] = self.yml[f"{src}:{axis_name}axis_direction"] - if is_cs_well_defined(self.yml[f"{src}:reference_frame_type"], xyz_directions): + xyz_directions[idx] = self.yml[ + f"{src}:{axis_name}axis_direction" + ] + if is_cs_well_defined( + self.yml[f"{src}:reference_frame_type"], xyz_directions + ): for idx in np.arange(0, 3): axis_name = axes_names[idx] template[f"{trg}{axis_name}axis_direction"] = xyz_directions[idx] @@ -304,8 +351,9 @@ def parse_gnomonic_projection_section(self, template: dict) -> dict: axis_name = axes_names[idx] for field_name in field_names: if f"{axis_name}{field_name}" in self.yml[src].keys(): - template[f"{trg}{axis_name}{field_name}"] \ - = self.yml[f"{src}:{axis_name}{field_name}"] + template[f"{trg}{axis_name}{field_name}"] = self.yml[ + f"{src}:{axis_name}{field_name}" + ] return template def parse_indexing(self, template: dict) -> dict: diff --git a/pynxtools/dataconverter/readers/em_om/utils/handed_cartesian.py b/pynxtools/dataconverter/readers/em_om/utils/handed_cartesian.py index 331ef1544..f0d9f63d0 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/handed_cartesian.py +++ b/pynxtools/dataconverter/readers/em_om/utils/handed_cartesian.py @@ -42,693 +42,697 @@ # is a right-handed Cartesian coordinate system sufficiently constrained, when # at least two base vector directions are chosen -is_cs_rh_unambiguous = {"undefined_undefined_undefined": False, - "undefined_undefined_north": False, - "undefined_undefined_east": False, - "undefined_undefined_south": False, - "undefined_undefined_west": False, - "undefined_undefined_in": False, - "undefined_undefined_out": False, - "undefined_north_undefined": False, - "undefined_north_north": False, - "undefined_north_east": True, - "undefined_north_south": True, - "undefined_north_west": True, - "undefined_north_in": True, - "undefined_north_out": True, - "undefined_east_undefined": False, - "undefined_east_north": True, - "undefined_east_east": False, - "undefined_east_south": True, - "undefined_east_west": False, - "undefined_east_in": True, - "undefined_east_out": True, - "undefined_south_undefined": False, - "undefined_south_north": False, - "undefined_south_east": True, - "undefined_south_south": False, - "undefined_south_west": True, - "undefined_south_in": True, - "undefined_south_out": True, - "undefined_west_undefined": False, - "undefined_west_north": True, - "undefined_west_east": False, - "undefined_west_south": True, - "undefined_west_west": False, - "undefined_west_in": True, - "undefined_west_out": True, - "undefined_in_undefined": False, - "undefined_in_north": True, - "undefined_in_east": True, - "undefined_in_south": True, - "undefined_in_west": True, - "undefined_in_in": False, - "undefined_in_out": False, - "undefined_out_undefined": False, - "undefined_out_north": True, - "undefined_out_east": True, - "undefined_out_south": True, - "undefined_out_west": True, - "undefined_out_in": False, - "undefined_out_out": False, - "north_undefined_undefined": False, - "north_undefined_north": False, - "north_undefined_east": True, - "north_undefined_south": False, - "north_undefined_west": True, - "north_undefined_in": True, - "north_undefined_out": True, - "north_north_undefined": False, - "north_north_north": False, - "north_north_east": False, - "north_north_south": False, - "north_north_west": False, - "north_north_in": False, - "north_north_out": False, - "north_east_undefined": True, - "north_east_north": False, - "north_east_east": False, - "north_east_south": False, - "north_east_west": False, - "north_east_in": True, - "north_east_out": False, - "north_south_undefined": False, - "north_south_north": False, - "north_south_east": False, - "north_south_south": False, - "north_south_west": False, - "north_south_in": False, - "north_south_out": False, - "north_west_undefined": True, - "north_west_north": False, - "north_west_east": False, - "north_west_south": False, - "north_west_west": False, - "north_west_in": False, - "north_west_out": True, - "north_in_undefined": True, - "north_in_north": False, - "north_in_east": False, - "north_in_south": False, - "north_in_west": True, - "north_in_in": False, - "north_in_out": False, - "north_out_undefined": True, - "north_out_north": False, - "north_out_east": True, - "north_out_south": False, - "north_out_west": False, - "north_out_in": False, - "north_out_out": False, - "east_undefined_undefined": False, - "east_undefined_north": True, - "east_undefined_east": False, - "east_undefined_south": True, - "east_undefined_west": False, - "east_undefined_in": True, - "east_undefined_out": True, - "east_north_undefined": True, - "east_north_north": False, - "east_north_east": False, - "east_north_south": False, - "east_north_west": False, - "east_north_in": False, - "east_north_out": True, - "east_east_undefined": False, - "east_east_north": False, - "east_east_east": False, - "east_east_south": False, - "east_east_west": False, - "east_east_in": False, - "east_east_out": False, - "east_south_undefined": True, - "east_south_north": False, - "east_south_east": False, - "east_south_south": False, - "east_south_west": False, - "east_south_in": True, - "east_south_out": False, - "east_west_undefined": False, - "east_west_north": False, - "east_west_east": False, - "east_west_south": False, - "east_west_west": False, - "east_west_in": False, - "east_west_out": False, - "east_in_undefined": True, - "east_in_north": True, - "east_in_east": False, - "east_in_south": False, - "east_in_west": False, - "east_in_in": False, - "east_in_out": False, - "east_out_undefined": True, - "east_out_north": False, - "east_out_east": False, - "east_out_south": True, - "east_out_west": False, - "east_out_in": False, - "east_out_out": False, - "south_undefined_undefined": False, - "south_undefined_north": False, - "south_undefined_east": True, - "south_undefined_south": False, - "south_undefined_west": True, - "south_undefined_in": True, - "south_undefined_out": True, - "south_north_undefined": False, - "south_north_north": False, - "south_north_east": False, - "south_north_south": False, - "south_north_west": False, - "south_north_in": False, - "south_north_out": False, - "south_east_undefined": True, - "south_east_north": False, - "south_east_east": False, - "south_east_south": False, - "south_east_west": False, - "south_east_in": False, - "south_east_out": True, - "south_south_undefined": False, - "south_south_north": False, - "south_south_east": False, - "south_south_south": False, - "south_south_west": False, - "south_south_in": False, - "south_south_out": False, - "south_west_undefined": True, - "south_west_north": False, - "south_west_east": False, - "south_west_south": False, - "south_west_west": False, - "south_west_in": True, - "south_west_out": False, - "south_in_undefined": True, - "south_in_north": False, - "south_in_east": True, - "south_in_south": False, - "south_in_west": False, - "south_in_in": False, - "south_in_out": False, - "south_out_undefined": True, - "south_out_north": False, - "south_out_east": False, - "south_out_south": False, - "south_out_west": True, - "south_out_in": False, - "south_out_out": False, - "west_undefined_undefined": False, - "west_undefined_north": True, - "west_undefined_east": False, - "west_undefined_south": True, - "west_undefined_west": False, - "west_undefined_in": True, - "west_undefined_out": True, - "west_north_undefined": True, - "west_north_north": False, - "west_north_east": False, - "west_north_south": False, - "west_north_west": False, - "west_north_in": True, - "west_north_out": False, - "west_east_undefined": False, - "west_east_north": False, - "west_east_east": False, - "west_east_south": False, - "west_east_west": False, - "west_east_in": False, - "west_east_out": False, - "west_south_undefined": True, - "west_south_north": False, - "west_south_east": False, - "west_south_south": False, - "west_south_west": False, - "west_south_in": False, - "west_south_out": True, - "west_west_undefined": False, - "west_west_north": False, - "west_west_east": False, - "west_west_south": False, - "west_west_west": False, - "west_west_in": False, - "west_west_out": False, - "west_in_undefined": True, - "west_in_north": False, - "west_in_east": False, - "west_in_south": True, - "west_in_west": False, - "west_in_in": False, - "west_in_out": False, - "west_out_undefined": True, - "west_out_north": True, - "west_out_east": False, - "west_out_south": False, - "west_out_west": False, - "west_out_in": False, - "west_out_out": False, - "in_undefined_undefined": False, - "in_undefined_north": True, - "in_undefined_east": True, - "in_undefined_south": True, - "in_undefined_west": True, - "in_undefined_in": False, - "in_undefined_out": False, - "in_north_undefined": True, - "in_north_north": False, - "in_north_east": True, - "in_north_south": False, - "in_north_west": False, - "in_north_in": False, - "in_north_out": False, - "in_east_undefined": True, - "in_east_north": False, - "in_east_east": False, - "in_east_south": True, - "in_east_west": False, - "in_east_in": False, - "in_east_out": False, - "in_south_undefined": True, - "in_south_north": False, - "in_south_east": False, - "in_south_south": False, - "in_south_west": True, - "in_south_in": False, - "in_south_out": False, - "in_west_undefined": True, - "in_west_north": True, - "in_west_east": False, - "in_west_south": False, - "in_west_west": False, - "in_west_in": False, - "in_west_out": False, - "in_in_undefined": False, - "in_in_north": False, - "in_in_east": False, - "in_in_south": False, - "in_in_west": False, - "in_in_in": False, - "in_in_out": False, - "in_out_undefined": False, - "in_out_north": False, - "in_out_east": False, - "in_out_south": False, - "in_out_west": False, - "in_out_in": False, - "in_out_out": False, - "out_undefined_undefined": False, - "out_undefined_north": True, - "out_undefined_east": True, - "out_undefined_south": True, - "out_undefined_west": True, - "out_undefined_in": False, - "out_undefined_out": False, - "out_north_undefined": True, - "out_north_north": False, - "out_north_east": False, - "out_north_south": False, - "out_north_west": True, - "out_north_in": False, - "out_north_out": False, - "out_east_undefined": True, - "out_east_north": True, - "out_east_east": False, - "out_east_south": False, - "out_east_west": False, - "out_east_in": False, - "out_east_out": False, - "out_south_undefined": True, - "out_south_north": False, - "out_south_east": True, - "out_south_south": False, - "out_south_west": False, - "out_south_in": False, - "out_south_out": False, - "out_west_undefined": True, - "out_west_north": False, - "out_west_east": False, - "out_west_south": True, - "out_west_west": False, - "out_west_in": False, - "out_west_out": False, - "out_in_undefined": False, - "out_in_north": False, - "out_in_east": False, - "out_in_south": False, - "out_in_west": False, - "out_in_in": False, - "out_in_out": False, - "out_out_undefined": False, - "out_out_north": False, - "out_out_east": False, - "out_out_south": False, - "out_out_west": False, - "out_out_in": False, - "out_out_out": False} +is_cs_rh_unambiguous = { + "undefined_undefined_undefined": False, + "undefined_undefined_north": False, + "undefined_undefined_east": False, + "undefined_undefined_south": False, + "undefined_undefined_west": False, + "undefined_undefined_in": False, + "undefined_undefined_out": False, + "undefined_north_undefined": False, + "undefined_north_north": False, + "undefined_north_east": True, + "undefined_north_south": True, + "undefined_north_west": True, + "undefined_north_in": True, + "undefined_north_out": True, + "undefined_east_undefined": False, + "undefined_east_north": True, + "undefined_east_east": False, + "undefined_east_south": True, + "undefined_east_west": False, + "undefined_east_in": True, + "undefined_east_out": True, + "undefined_south_undefined": False, + "undefined_south_north": False, + "undefined_south_east": True, + "undefined_south_south": False, + "undefined_south_west": True, + "undefined_south_in": True, + "undefined_south_out": True, + "undefined_west_undefined": False, + "undefined_west_north": True, + "undefined_west_east": False, + "undefined_west_south": True, + "undefined_west_west": False, + "undefined_west_in": True, + "undefined_west_out": True, + "undefined_in_undefined": False, + "undefined_in_north": True, + "undefined_in_east": True, + "undefined_in_south": True, + "undefined_in_west": True, + "undefined_in_in": False, + "undefined_in_out": False, + "undefined_out_undefined": False, + "undefined_out_north": True, + "undefined_out_east": True, + "undefined_out_south": True, + "undefined_out_west": True, + "undefined_out_in": False, + "undefined_out_out": False, + "north_undefined_undefined": False, + "north_undefined_north": False, + "north_undefined_east": True, + "north_undefined_south": False, + "north_undefined_west": True, + "north_undefined_in": True, + "north_undefined_out": True, + "north_north_undefined": False, + "north_north_north": False, + "north_north_east": False, + "north_north_south": False, + "north_north_west": False, + "north_north_in": False, + "north_north_out": False, + "north_east_undefined": True, + "north_east_north": False, + "north_east_east": False, + "north_east_south": False, + "north_east_west": False, + "north_east_in": True, + "north_east_out": False, + "north_south_undefined": False, + "north_south_north": False, + "north_south_east": False, + "north_south_south": False, + "north_south_west": False, + "north_south_in": False, + "north_south_out": False, + "north_west_undefined": True, + "north_west_north": False, + "north_west_east": False, + "north_west_south": False, + "north_west_west": False, + "north_west_in": False, + "north_west_out": True, + "north_in_undefined": True, + "north_in_north": False, + "north_in_east": False, + "north_in_south": False, + "north_in_west": True, + "north_in_in": False, + "north_in_out": False, + "north_out_undefined": True, + "north_out_north": False, + "north_out_east": True, + "north_out_south": False, + "north_out_west": False, + "north_out_in": False, + "north_out_out": False, + "east_undefined_undefined": False, + "east_undefined_north": True, + "east_undefined_east": False, + "east_undefined_south": True, + "east_undefined_west": False, + "east_undefined_in": True, + "east_undefined_out": True, + "east_north_undefined": True, + "east_north_north": False, + "east_north_east": False, + "east_north_south": False, + "east_north_west": False, + "east_north_in": False, + "east_north_out": True, + "east_east_undefined": False, + "east_east_north": False, + "east_east_east": False, + "east_east_south": False, + "east_east_west": False, + "east_east_in": False, + "east_east_out": False, + "east_south_undefined": True, + "east_south_north": False, + "east_south_east": False, + "east_south_south": False, + "east_south_west": False, + "east_south_in": True, + "east_south_out": False, + "east_west_undefined": False, + "east_west_north": False, + "east_west_east": False, + "east_west_south": False, + "east_west_west": False, + "east_west_in": False, + "east_west_out": False, + "east_in_undefined": True, + "east_in_north": True, + "east_in_east": False, + "east_in_south": False, + "east_in_west": False, + "east_in_in": False, + "east_in_out": False, + "east_out_undefined": True, + "east_out_north": False, + "east_out_east": False, + "east_out_south": True, + "east_out_west": False, + "east_out_in": False, + "east_out_out": False, + "south_undefined_undefined": False, + "south_undefined_north": False, + "south_undefined_east": True, + "south_undefined_south": False, + "south_undefined_west": True, + "south_undefined_in": True, + "south_undefined_out": True, + "south_north_undefined": False, + "south_north_north": False, + "south_north_east": False, + "south_north_south": False, + "south_north_west": False, + "south_north_in": False, + "south_north_out": False, + "south_east_undefined": True, + "south_east_north": False, + "south_east_east": False, + "south_east_south": False, + "south_east_west": False, + "south_east_in": False, + "south_east_out": True, + "south_south_undefined": False, + "south_south_north": False, + "south_south_east": False, + "south_south_south": False, + "south_south_west": False, + "south_south_in": False, + "south_south_out": False, + "south_west_undefined": True, + "south_west_north": False, + "south_west_east": False, + "south_west_south": False, + "south_west_west": False, + "south_west_in": True, + "south_west_out": False, + "south_in_undefined": True, + "south_in_north": False, + "south_in_east": True, + "south_in_south": False, + "south_in_west": False, + "south_in_in": False, + "south_in_out": False, + "south_out_undefined": True, + "south_out_north": False, + "south_out_east": False, + "south_out_south": False, + "south_out_west": True, + "south_out_in": False, + "south_out_out": False, + "west_undefined_undefined": False, + "west_undefined_north": True, + "west_undefined_east": False, + "west_undefined_south": True, + "west_undefined_west": False, + "west_undefined_in": True, + "west_undefined_out": True, + "west_north_undefined": True, + "west_north_north": False, + "west_north_east": False, + "west_north_south": False, + "west_north_west": False, + "west_north_in": True, + "west_north_out": False, + "west_east_undefined": False, + "west_east_north": False, + "west_east_east": False, + "west_east_south": False, + "west_east_west": False, + "west_east_in": False, + "west_east_out": False, + "west_south_undefined": True, + "west_south_north": False, + "west_south_east": False, + "west_south_south": False, + "west_south_west": False, + "west_south_in": False, + "west_south_out": True, + "west_west_undefined": False, + "west_west_north": False, + "west_west_east": False, + "west_west_south": False, + "west_west_west": False, + "west_west_in": False, + "west_west_out": False, + "west_in_undefined": True, + "west_in_north": False, + "west_in_east": False, + "west_in_south": True, + "west_in_west": False, + "west_in_in": False, + "west_in_out": False, + "west_out_undefined": True, + "west_out_north": True, + "west_out_east": False, + "west_out_south": False, + "west_out_west": False, + "west_out_in": False, + "west_out_out": False, + "in_undefined_undefined": False, + "in_undefined_north": True, + "in_undefined_east": True, + "in_undefined_south": True, + "in_undefined_west": True, + "in_undefined_in": False, + "in_undefined_out": False, + "in_north_undefined": True, + "in_north_north": False, + "in_north_east": True, + "in_north_south": False, + "in_north_west": False, + "in_north_in": False, + "in_north_out": False, + "in_east_undefined": True, + "in_east_north": False, + "in_east_east": False, + "in_east_south": True, + "in_east_west": False, + "in_east_in": False, + "in_east_out": False, + "in_south_undefined": True, + "in_south_north": False, + "in_south_east": False, + "in_south_south": False, + "in_south_west": True, + "in_south_in": False, + "in_south_out": False, + "in_west_undefined": True, + "in_west_north": True, + "in_west_east": False, + "in_west_south": False, + "in_west_west": False, + "in_west_in": False, + "in_west_out": False, + "in_in_undefined": False, + "in_in_north": False, + "in_in_east": False, + "in_in_south": False, + "in_in_west": False, + "in_in_in": False, + "in_in_out": False, + "in_out_undefined": False, + "in_out_north": False, + "in_out_east": False, + "in_out_south": False, + "in_out_west": False, + "in_out_in": False, + "in_out_out": False, + "out_undefined_undefined": False, + "out_undefined_north": True, + "out_undefined_east": True, + "out_undefined_south": True, + "out_undefined_west": True, + "out_undefined_in": False, + "out_undefined_out": False, + "out_north_undefined": True, + "out_north_north": False, + "out_north_east": False, + "out_north_south": False, + "out_north_west": True, + "out_north_in": False, + "out_north_out": False, + "out_east_undefined": True, + "out_east_north": True, + "out_east_east": False, + "out_east_south": False, + "out_east_west": False, + "out_east_in": False, + "out_east_out": False, + "out_south_undefined": True, + "out_south_north": False, + "out_south_east": True, + "out_south_south": False, + "out_south_west": False, + "out_south_in": False, + "out_south_out": False, + "out_west_undefined": True, + "out_west_north": False, + "out_west_east": False, + "out_west_south": True, + "out_west_west": False, + "out_west_in": False, + "out_west_out": False, + "out_in_undefined": False, + "out_in_north": False, + "out_in_east": False, + "out_in_south": False, + "out_in_west": False, + "out_in_in": False, + "out_in_out": False, + "out_out_undefined": False, + "out_out_north": False, + "out_out_east": False, + "out_out_south": False, + "out_out_west": False, + "out_out_in": False, + "out_out_out": False, +} -is_cs_lh_unambiguous = {"undefined_undefined_undefined": False, - "undefined_undefined_north": False, - "undefined_undefined_east": False, - "undefined_undefined_south": False, - "undefined_undefined_west": False, - "undefined_undefined_in": False, - "undefined_undefined_out": False, - "undefined_north_undefined": False, - "undefined_north_north": False, - "undefined_north_east": True, - "undefined_north_south": False, - "undefined_north_west": True, - "undefined_north_in": True, - "undefined_north_out": True, - "undefined_east_undefined": False, - "undefined_east_north": True, - "undefined_east_east": False, - "undefined_east_south": True, - "undefined_east_west": False, - "undefined_east_in": True, - "undefined_east_out": True, - "undefined_south_undefined": False, - "undefined_south_north": False, - "undefined_south_east": True, - "undefined_south_south": False, - "undefined_south_west": True, - "undefined_south_in": True, - "undefined_south_out": True, - "undefined_west_undefined": False, - "undefined_west_north": True, - "undefined_west_east": False, - "undefined_west_south": True, - "undefined_west_west": False, - "undefined_west_in": True, - "undefined_west_out": True, - "undefined_in_undefined": False, - "undefined_in_north": True, - "undefined_in_east": True, - "undefined_in_south": True, - "undefined_in_west": True, - "undefined_in_in": False, - "undefined_in_out": False, - "undefined_out_undefined": False, - "undefined_out_north": True, - "undefined_out_east": True, - "undefined_out_south": True, - "undefined_out_west": True, - "undefined_out_in": False, - "undefined_out_out": False, - "north_undefined_undefined": False, - "north_undefined_north": False, - "north_undefined_east": True, - "north_undefined_south": False, - "north_undefined_west": True, - "north_undefined_in": True, - "north_undefined_out": True, - "north_north_undefined": False, - "north_north_north": False, - "north_north_east": False, - "north_north_south": False, - "north_north_west": False, - "north_north_in": False, - "north_north_out": False, - "north_east_undefined": True, - "north_east_north": False, - "north_east_east": False, - "north_east_south": False, - "north_east_west": False, - "north_east_in": False, - "north_east_out": True, - "north_south_undefined": False, - "north_south_north": False, - "north_south_east": False, - "north_south_south": False, - "north_south_west": False, - "north_south_in": False, - "north_south_out": False, - "north_west_undefined": True, - "north_west_north": False, - "north_west_east": False, - "north_west_south": False, - "north_west_west": False, - "north_west_in": True, - "north_west_out": False, - "north_in_undefined": True, - "north_in_north": False, - "north_in_east": True, - "north_in_south": False, - "north_in_west": False, - "north_in_in": False, - "north_in_out": False, - "north_out_undefined": True, - "north_out_north": False, - "north_out_east": False, - "north_out_south": False, - "north_out_west": True, - "north_out_in": False, - "north_out_out": False, - "east_undefined_undefined": False, - "east_undefined_north": True, - "east_undefined_east": False, - "east_undefined_south": True, - "east_undefined_west": False, - "east_undefined_in": True, - "east_undefined_out": True, - "east_north_undefined": True, - "east_north_north": False, - "east_north_east": False, - "east_north_south": False, - "east_north_west": False, - "east_north_in": True, - "east_north_out": False, - "east_east_undefined": False, - "east_east_north": False, - "east_east_east": False, - "east_east_south": False, - "east_east_west": False, - "east_east_in": False, - "east_east_out": False, - "east_south_undefined": True, - "east_south_north": False, - "east_south_east": False, - "east_south_south": False, - "east_south_west": False, - "east_south_in": False, - "east_south_out": True, - "east_west_undefined": False, - "east_west_north": False, - "east_west_east": False, - "east_west_south": False, - "east_west_west": False, - "east_west_in": False, - "east_west_out": False, - "east_in_undefined": True, - "east_in_north": False, - "east_in_east": False, - "east_in_south": True, - "east_in_west": False, - "east_in_in": False, - "east_in_out": False, - "east_out_undefined": True, - "east_out_north": True, - "east_out_east": False, - "east_out_south": False, - "east_out_west": False, - "east_out_in": False, - "east_out_out": False, - "south_undefined_undefined": False, - "south_undefined_north": False, - "south_undefined_east": True, - "south_undefined_south": False, - "south_undefined_west": True, - "south_undefined_in": True, - "south_undefined_out": True, - "south_north_undefined": False, - "south_north_north": False, - "south_north_east": False, - "south_north_south": False, - "south_north_west": False, - "south_north_in": False, - "south_north_out": False, - "south_east_undefined": True, - "south_east_north": False, - "south_east_east": False, - "south_east_south": False, - "south_east_west": False, - "south_east_in": True, - "south_east_out": False, - "south_south_undefined": False, - "south_south_north": False, - "south_south_east": False, - "south_south_south": False, - "south_south_west": False, - "south_south_in": False, - "south_south_out": False, - "south_west_undefined": True, - "south_west_north": False, - "south_west_east": False, - "south_west_south": False, - "south_west_west": False, - "south_west_in": False, - "south_west_out": True, - "south_in_undefined": True, - "south_in_north": False, - "south_in_east": False, - "south_in_south": False, - "south_in_west": True, - "south_in_in": False, - "south_in_out": False, - "south_out_undefined": True, - "south_out_north": False, - "south_out_east": True, - "south_out_south": False, - "south_out_west": False, - "south_out_in": False, - "south_out_out": False, - "west_undefined_undefined": False, - "west_undefined_north": True, - "west_undefined_east": False, - "west_undefined_south": True, - "west_undefined_west": False, - "west_undefined_in": True, - "west_undefined_out": True, - "west_north_undefined": True, - "west_north_north": False, - "west_north_east": False, - "west_north_south": False, - "west_north_west": False, - "west_north_in": False, - "west_north_out": True, - "west_east_undefined": False, - "west_east_north": False, - "west_east_east": False, - "west_east_south": False, - "west_east_west": False, - "west_east_in": False, - "west_east_out": False, - "west_south_undefined": True, - "west_south_north": False, - "west_south_east": False, - "west_south_south": False, - "west_south_west": False, - "west_south_in": True, - "west_south_out": False, - "west_west_undefined": False, - "west_west_north": False, - "west_west_east": False, - "west_west_south": False, - "west_west_west": False, - "west_west_in": False, - "west_west_out": False, - "west_in_undefined": True, - "west_in_north": True, - "west_in_east": False, - "west_in_south": False, - "west_in_west": False, - "west_in_in": False, - "west_in_out": False, - "west_out_undefined": True, - "west_out_north": False, - "west_out_east": False, - "west_out_south": True, - "west_out_west": False, - "west_out_in": False, - "west_out_out": False, - "in_undefined_undefined": False, - "in_undefined_north": True, - "in_undefined_east": True, - "in_undefined_south": True, - "in_undefined_west": True, - "in_undefined_in": False, - "in_undefined_out": False, - "in_north_undefined": True, - "in_north_north": False, - "in_north_east": False, - "in_north_south": False, - "in_north_west": True, - "in_north_in": False, - "in_north_out": False, - "in_east_undefined": True, - "in_east_north": True, - "in_east_east": False, - "in_east_south": False, - "in_east_west": False, - "in_east_in": False, - "in_east_out": False, - "in_south_undefined": True, - "in_south_north": False, - "in_south_east": True, - "in_south_south": False, - "in_south_west": False, - "in_south_in": False, - "in_south_out": False, - "in_west_undefined": True, - "in_west_north": False, - "in_west_east": False, - "in_west_south": True, - "in_west_west": False, - "in_west_in": False, - "in_west_out": False, - "in_in_undefined": False, - "in_in_north": False, - "in_in_east": False, - "in_in_south": False, - "in_in_west": False, - "in_in_in": False, - "in_in_out": False, - "in_out_undefined": False, - "in_out_north": False, - "in_out_east": False, - "in_out_south": False, - "in_out_west": False, - "in_out_in": False, - "in_out_out": False, - "out_undefined_undefined": False, - "out_undefined_north": True, - "out_undefined_east": True, - "out_undefined_south": True, - "out_undefined_west": True, - "out_undefined_in": False, - "out_undefined_out": False, - "out_north_undefined": True, - "out_north_north": False, - "out_north_east": True, - "out_north_south": False, - "out_north_west": False, - "out_north_in": False, - "out_north_out": False, - "out_east_undefined": True, - "out_east_north": False, - "out_east_east": False, - "out_east_south": True, - "out_east_west": False, - "out_east_in": False, - "out_east_out": False, - "out_south_undefined": True, - "out_south_north": False, - "out_south_east": False, - "out_south_south": False, - "out_south_west": True, - "out_south_in": False, - "out_south_out": False, - "out_west_undefined": True, - "out_west_north": True, - "out_west_east": False, - "out_west_south": False, - "out_west_west": False, - "out_west_in": False, - "out_west_out": False, - "out_in_undefined": False, - "out_in_north": False, - "out_in_east": False, - "out_in_south": False, - "out_in_west": False, - "out_in_in": False, - "out_in_out": False, - "out_out_undefined": False, - "out_out_north": False, - "out_out_east": False, - "out_out_south": False, - "out_out_west": False, - "out_out_in": False, - "out_out_out": False} +is_cs_lh_unambiguous = { + "undefined_undefined_undefined": False, + "undefined_undefined_north": False, + "undefined_undefined_east": False, + "undefined_undefined_south": False, + "undefined_undefined_west": False, + "undefined_undefined_in": False, + "undefined_undefined_out": False, + "undefined_north_undefined": False, + "undefined_north_north": False, + "undefined_north_east": True, + "undefined_north_south": False, + "undefined_north_west": True, + "undefined_north_in": True, + "undefined_north_out": True, + "undefined_east_undefined": False, + "undefined_east_north": True, + "undefined_east_east": False, + "undefined_east_south": True, + "undefined_east_west": False, + "undefined_east_in": True, + "undefined_east_out": True, + "undefined_south_undefined": False, + "undefined_south_north": False, + "undefined_south_east": True, + "undefined_south_south": False, + "undefined_south_west": True, + "undefined_south_in": True, + "undefined_south_out": True, + "undefined_west_undefined": False, + "undefined_west_north": True, + "undefined_west_east": False, + "undefined_west_south": True, + "undefined_west_west": False, + "undefined_west_in": True, + "undefined_west_out": True, + "undefined_in_undefined": False, + "undefined_in_north": True, + "undefined_in_east": True, + "undefined_in_south": True, + "undefined_in_west": True, + "undefined_in_in": False, + "undefined_in_out": False, + "undefined_out_undefined": False, + "undefined_out_north": True, + "undefined_out_east": True, + "undefined_out_south": True, + "undefined_out_west": True, + "undefined_out_in": False, + "undefined_out_out": False, + "north_undefined_undefined": False, + "north_undefined_north": False, + "north_undefined_east": True, + "north_undefined_south": False, + "north_undefined_west": True, + "north_undefined_in": True, + "north_undefined_out": True, + "north_north_undefined": False, + "north_north_north": False, + "north_north_east": False, + "north_north_south": False, + "north_north_west": False, + "north_north_in": False, + "north_north_out": False, + "north_east_undefined": True, + "north_east_north": False, + "north_east_east": False, + "north_east_south": False, + "north_east_west": False, + "north_east_in": False, + "north_east_out": True, + "north_south_undefined": False, + "north_south_north": False, + "north_south_east": False, + "north_south_south": False, + "north_south_west": False, + "north_south_in": False, + "north_south_out": False, + "north_west_undefined": True, + "north_west_north": False, + "north_west_east": False, + "north_west_south": False, + "north_west_west": False, + "north_west_in": True, + "north_west_out": False, + "north_in_undefined": True, + "north_in_north": False, + "north_in_east": True, + "north_in_south": False, + "north_in_west": False, + "north_in_in": False, + "north_in_out": False, + "north_out_undefined": True, + "north_out_north": False, + "north_out_east": False, + "north_out_south": False, + "north_out_west": True, + "north_out_in": False, + "north_out_out": False, + "east_undefined_undefined": False, + "east_undefined_north": True, + "east_undefined_east": False, + "east_undefined_south": True, + "east_undefined_west": False, + "east_undefined_in": True, + "east_undefined_out": True, + "east_north_undefined": True, + "east_north_north": False, + "east_north_east": False, + "east_north_south": False, + "east_north_west": False, + "east_north_in": True, + "east_north_out": False, + "east_east_undefined": False, + "east_east_north": False, + "east_east_east": False, + "east_east_south": False, + "east_east_west": False, + "east_east_in": False, + "east_east_out": False, + "east_south_undefined": True, + "east_south_north": False, + "east_south_east": False, + "east_south_south": False, + "east_south_west": False, + "east_south_in": False, + "east_south_out": True, + "east_west_undefined": False, + "east_west_north": False, + "east_west_east": False, + "east_west_south": False, + "east_west_west": False, + "east_west_in": False, + "east_west_out": False, + "east_in_undefined": True, + "east_in_north": False, + "east_in_east": False, + "east_in_south": True, + "east_in_west": False, + "east_in_in": False, + "east_in_out": False, + "east_out_undefined": True, + "east_out_north": True, + "east_out_east": False, + "east_out_south": False, + "east_out_west": False, + "east_out_in": False, + "east_out_out": False, + "south_undefined_undefined": False, + "south_undefined_north": False, + "south_undefined_east": True, + "south_undefined_south": False, + "south_undefined_west": True, + "south_undefined_in": True, + "south_undefined_out": True, + "south_north_undefined": False, + "south_north_north": False, + "south_north_east": False, + "south_north_south": False, + "south_north_west": False, + "south_north_in": False, + "south_north_out": False, + "south_east_undefined": True, + "south_east_north": False, + "south_east_east": False, + "south_east_south": False, + "south_east_west": False, + "south_east_in": True, + "south_east_out": False, + "south_south_undefined": False, + "south_south_north": False, + "south_south_east": False, + "south_south_south": False, + "south_south_west": False, + "south_south_in": False, + "south_south_out": False, + "south_west_undefined": True, + "south_west_north": False, + "south_west_east": False, + "south_west_south": False, + "south_west_west": False, + "south_west_in": False, + "south_west_out": True, + "south_in_undefined": True, + "south_in_north": False, + "south_in_east": False, + "south_in_south": False, + "south_in_west": True, + "south_in_in": False, + "south_in_out": False, + "south_out_undefined": True, + "south_out_north": False, + "south_out_east": True, + "south_out_south": False, + "south_out_west": False, + "south_out_in": False, + "south_out_out": False, + "west_undefined_undefined": False, + "west_undefined_north": True, + "west_undefined_east": False, + "west_undefined_south": True, + "west_undefined_west": False, + "west_undefined_in": True, + "west_undefined_out": True, + "west_north_undefined": True, + "west_north_north": False, + "west_north_east": False, + "west_north_south": False, + "west_north_west": False, + "west_north_in": False, + "west_north_out": True, + "west_east_undefined": False, + "west_east_north": False, + "west_east_east": False, + "west_east_south": False, + "west_east_west": False, + "west_east_in": False, + "west_east_out": False, + "west_south_undefined": True, + "west_south_north": False, + "west_south_east": False, + "west_south_south": False, + "west_south_west": False, + "west_south_in": True, + "west_south_out": False, + "west_west_undefined": False, + "west_west_north": False, + "west_west_east": False, + "west_west_south": False, + "west_west_west": False, + "west_west_in": False, + "west_west_out": False, + "west_in_undefined": True, + "west_in_north": True, + "west_in_east": False, + "west_in_south": False, + "west_in_west": False, + "west_in_in": False, + "west_in_out": False, + "west_out_undefined": True, + "west_out_north": False, + "west_out_east": False, + "west_out_south": True, + "west_out_west": False, + "west_out_in": False, + "west_out_out": False, + "in_undefined_undefined": False, + "in_undefined_north": True, + "in_undefined_east": True, + "in_undefined_south": True, + "in_undefined_west": True, + "in_undefined_in": False, + "in_undefined_out": False, + "in_north_undefined": True, + "in_north_north": False, + "in_north_east": False, + "in_north_south": False, + "in_north_west": True, + "in_north_in": False, + "in_north_out": False, + "in_east_undefined": True, + "in_east_north": True, + "in_east_east": False, + "in_east_south": False, + "in_east_west": False, + "in_east_in": False, + "in_east_out": False, + "in_south_undefined": True, + "in_south_north": False, + "in_south_east": True, + "in_south_south": False, + "in_south_west": False, + "in_south_in": False, + "in_south_out": False, + "in_west_undefined": True, + "in_west_north": False, + "in_west_east": False, + "in_west_south": True, + "in_west_west": False, + "in_west_in": False, + "in_west_out": False, + "in_in_undefined": False, + "in_in_north": False, + "in_in_east": False, + "in_in_south": False, + "in_in_west": False, + "in_in_in": False, + "in_in_out": False, + "in_out_undefined": False, + "in_out_north": False, + "in_out_east": False, + "in_out_south": False, + "in_out_west": False, + "in_out_in": False, + "in_out_out": False, + "out_undefined_undefined": False, + "out_undefined_north": True, + "out_undefined_east": True, + "out_undefined_south": True, + "out_undefined_west": True, + "out_undefined_in": False, + "out_undefined_out": False, + "out_north_undefined": True, + "out_north_north": False, + "out_north_east": True, + "out_north_south": False, + "out_north_west": False, + "out_north_in": False, + "out_north_out": False, + "out_east_undefined": True, + "out_east_north": False, + "out_east_east": False, + "out_east_south": True, + "out_east_west": False, + "out_east_in": False, + "out_east_out": False, + "out_south_undefined": True, + "out_south_north": False, + "out_south_east": False, + "out_south_south": False, + "out_south_west": True, + "out_south_in": False, + "out_south_out": False, + "out_west_undefined": True, + "out_west_north": True, + "out_west_east": False, + "out_west_south": False, + "out_west_west": False, + "out_west_in": False, + "out_west_out": False, + "out_in_undefined": False, + "out_in_north": False, + "out_in_east": False, + "out_in_south": False, + "out_in_west": False, + "out_in_in": False, + "out_in_out": False, + "out_out_undefined": False, + "out_out_north": False, + "out_out_east": False, + "out_out_south": False, + "out_out_west": False, + "out_out_in": False, + "out_out_out": False, +} def is_cs_well_defined(handedness, directions): diff --git a/pynxtools/dataconverter/readers/em_om/utils/image_transform.py b/pynxtools/dataconverter/readers/em_om/utils/image_transform.py index 7369ebef8..1cd077e95 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/image_transform.py +++ b/pynxtools/dataconverter/readers/em_om/utils/image_transform.py @@ -29,8 +29,8 @@ def thumbnail(img, size=300): """Create a thumbnail, i.e. resized version of an image.""" img = img.copy() - if img.mode not in ('L', 'RGB'): - img = img.convert('RGB') + if img.mode not in ("L", "RGB"): + img = img.convert("RGB") old_width, old_height = img.size diff --git a/pynxtools/dataconverter/readers/em_om/utils/msmse_convention.py b/pynxtools/dataconverter/readers/em_om/utils/msmse_convention.py index 0b2d48327..1748ad4cb 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/msmse_convention.py +++ b/pynxtools/dataconverter/readers/em_om/utils/msmse_convention.py @@ -23,7 +23,7 @@ "three_dimensional_rotation_handedness": "counter_clockwise", "rotation_convention": "passive", "euler_angle_convention": "zxz", - "axis_angle_convention": "rotation_angle_on_interval_zero_to_pi" + "axis_angle_convention": "rotation_angle_on_interval_zero_to_pi", } # the sign convention is mentioned in the paper but left as a parameter # "sign_convention": "p_minus_one" @@ -36,7 +36,8 @@ def is_consistent_with_msmse_convention(dct): "three_dimensional_rotation_handedness", "rotation_convention", "euler_angle_convention", - "axis_angle_convention"] + "axis_angle_convention", + ] for field_name in req_fields: if (field_name not in dct) or (field_name not in msmse_convention): return "unclear" diff --git a/pynxtools/dataconverter/readers/em_om/utils/mtex_ebsd_parser.py b/pynxtools/dataconverter/readers/em_om/utils/mtex_ebsd_parser.py index d1ca355c0..b45cb4554 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/mtex_ebsd_parser.py +++ b/pynxtools/dataconverter/readers/em_om/utils/mtex_ebsd_parser.py @@ -25,9 +25,7 @@ class NxEmOmMtexEbsdParser: - """Parse *.mtex EBSD data. - - """ + """Parse *.mtex EBSD data.""" def __init__(self, file_name: str, entry_id: int): """Class wrapping reading HDF5 files formatted according NXem_ebsd from MTex.""" @@ -42,7 +40,9 @@ def parse_roi_default_plot(self, template: dict) -> dict: src = "/entry1/indexing/region_of_interest" trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/region_of_interest" - template[f"{trg}/descriptor"] = str(h5r[f"{src}/descriptor"][()].decode("utf-8")) + template[f"{trg}/descriptor"] = str( + h5r[f"{src}/descriptor"][()].decode("utf-8") + ) src = "/entry1/indexing/region_of_interest/roi" trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/region_of_interest/roi" @@ -55,12 +55,18 @@ def parse_roi_default_plot(self, template: dict) -> dict: template[f"{trg}/title"] = str("Region-of-interest overview image") template[f"{trg}/@signal"] = grp.attrs["signal"] template[f"{trg}/@axes"] = grp.attrs["axes"] - template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = grp.attrs["axis_x_indices"] - template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = grp.attrs["axis_y_indices"] + template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = grp.attrs[ + "axis_x_indices" + ] + template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = grp.attrs[ + "axis_y_indices" + ] src = "/entry1/indexing/region_of_interest/roi/data" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/region_of_interest/roi/data" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/region_of_interest/roi/data" + ) if src not in h5r.keys(): # must not happen, dst is required print(f"{src} not found !") @@ -75,8 +81,10 @@ def parse_roi_default_plot(self, template: dict) -> dict: axes_names = ["axis_x", "axis_y"] for axis_name in axes_names: src = f"/entry1/indexing/region_of_interest/roi/{axis_name}" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" \ - f"region_of_interest/roi/{axis_name}" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" + f"region_of_interest/roi/{axis_name}" + ) if src not in h5r.keys(): # must not happen, dst is required # print(f"{src} not found !") @@ -96,7 +104,7 @@ def parse_phases(self, template: dict) -> dict: src = "/entry1/indexing" # mtex2nexus MTex/Matlab scripts writes controlled terms phaseID - group_names = [entry for entry in h5r[src].keys() if entry.startswith('phase')] + group_names = [entry for entry in h5r[src].keys() if entry.startswith("phase")] if len(group_names) == 0: return template # group_names end up sorted in ascending order @@ -106,14 +114,18 @@ def parse_phases(self, template: dict) -> dict: # must not happen, verifier will complain return template src = f"/entry1/indexing/{group_name}" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" \ - f"EM_EBSD_CRYSTAL_STRUCTURE_MODEL" \ - f"[em_ebsd_crystal_structure_model{identifier}]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" + f"EM_EBSD_CRYSTAL_STRUCTURE_MODEL" + f"[em_ebsd_crystal_structure_model{identifier}]" + ) template[f"{trg}/phase_identifier"] = h5r[f"{src}/phase_identifier"][0] - template[f"{trg}/phase_name"] \ - = str(h5r[f"{src}/phase_name"][()].decode("utf-8")) - template[f"{trg}/point_group"] \ - = str(h5r[f"{src}/point_group"][()].decode("utf-8")) + template[f"{trg}/phase_name"] = str( + h5r[f"{src}/phase_name"][()].decode("utf-8") + ) + template[f"{trg}/point_group"] = str( + h5r[f"{src}/point_group"][()].decode("utf-8") + ) dst = h5r[f"{src}/unit_cell_abc"] template[f"{trg}/unit_cell_abc"] = dst[:] template[f"{trg}/unit_cell_abc/@units"] = dst.attrs["units"] @@ -131,7 +143,9 @@ def parse_inverse_pole_figures(self, template: dict) -> dict: src = "/entry1/indexing" # mtex2nexus MTex/Matlab scripts writes controlled terms phaseID - group_names = [entry for entry in h5r[src].keys() if entry.startswith('ipf_map')] + group_names = [ + entry for entry in h5r[src].keys() if entry.startswith("ipf_map") + ] if len(group_names) == 0: return template # group_names end up sorted in ascending order @@ -155,23 +169,33 @@ def parse_inverse_pole_figure_map(self, h5r, identifier, template: dict) -> dict group_name = f"ipf_map{identifier}" print(f"Parse inverse pole figure (IPF) map for {group_name}...") src = f"/entry1/indexing/{group_name}" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{identifier}]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{identifier}]" + ) if src not in h5r.keys(): # print(f"WARNING: {group_name} not found !") return template template[f"{trg}/bitdepth"] = np.uint32(8) # h5r[f"{src}/bitdepth"][0] - template[f"{trg}/phase_identifier"] = np.uint32(h5r[f"{src}/phase_identifier"][0]) - template[f"{trg}/phase_name"] = str(h5r[f"{src}/phase_name"][()].decode("utf-8")) + template[f"{trg}/phase_identifier"] = np.uint32( + h5r[f"{src}/phase_identifier"][0] + ) + template[f"{trg}/phase_name"] = str( + h5r[f"{src}/phase_name"][()].decode("utf-8") + ) dst = h5r[f"{src}/program"] template[f"{trg}/PROGRAM[program1]/program"] = str(dst[()].decode("utf-8")) template[f"{trg}/PROGRAM[program1]/program/@version"] = dst.attrs["version"] - template[f"{trg}/projection_direction"] = np.asarray([0., 0., 1.], np.float32) + template[f"{trg}/projection_direction"] = np.asarray( + [0.0, 0.0, 1.0], np.float32 + ) # there should be a depends on etc src = f"/entry1/indexing/{group_name}/ipf_rgb_map" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" \ - f"PROCESS[ipf_map{identifier}]/ipf_rgb_map" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" + f"PROCESS[ipf_map{identifier}]/ipf_rgb_map" + ) if src not in h5r.keys(): # must not happen, grp is required # print(f"WARNING: {group_name} not found, ipf_rgb_map !") @@ -181,12 +205,18 @@ def parse_inverse_pole_figure_map(self, h5r, identifier, template: dict) -> dict template[f"{trg}/title"] = str("Inverse pole figure color map") template[f"{trg}/@signal"] = grp.attrs["signal"] template[f"{trg}/@axes"] = grp.attrs["axes"] - template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = grp.attrs["axis_x_indices"] - template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = grp.attrs["axis_y_indices"] + template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = grp.attrs[ + "axis_x_indices" + ] + template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = grp.attrs[ + "axis_y_indices" + ] src = f"/entry1/indexing/{group_name}/ipf_rgb_map/data" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" \ - f"PROCESS[ipf_map{identifier}]/ipf_rgb_map/DATA[data]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" + f"PROCESS[ipf_map{identifier}]/ipf_rgb_map/DATA[data]" + ) if src not in h5r.keys(): # must not happen, dst is required # print(f"WARNING: {group_name} not found, ipf_rgb_map, data !") @@ -201,8 +231,10 @@ def parse_inverse_pole_figure_map(self, h5r, identifier, template: dict) -> dict axes_names = ["axis_x", "axis_y"] for axis_name in axes_names: src = f"/entry1/indexing/{group_name}/ipf_rgb_map/{axis_name}" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{identifier}]/ipf_rgb_map/AXISNAME[{axis_name}]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{identifier}]/ipf_rgb_map/AXISNAME[{axis_name}]" + ) if src not in h5r.keys(): # must not happen, dst is required # print(f"WARNING: {group_name} not found, ipf_rgb_map, {axis_name} !") @@ -214,13 +246,17 @@ def parse_inverse_pole_figure_map(self, h5r, identifier, template: dict) -> dict return template - def parse_inverse_pole_figure_color_key(self, h5r, identifier, template: dict) -> dict: + def parse_inverse_pole_figure_color_key( + self, h5r, identifier, template: dict + ) -> dict: """Parse color key renderings of inverse-pole-figure (IPF) mappings.""" group_name = f"ipf_map{identifier}" print(f"Parse inverse pole figure (IPF) color key {group_name}...") src = f"/entry1/indexing/{group_name}/ipf_rgb_color_model" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{identifier}]/ipf_rgb_color_model" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{identifier}]/ipf_rgb_color_model" + ) if src not in h5r.keys(): # must not happen, grp is required # print(f"WARNING: {group_name} not found, ipf_rgb_color_model") @@ -229,12 +265,18 @@ def parse_inverse_pole_figure_color_key(self, h5r, identifier, template: dict) - template[f"{trg}/title"] = str("Inverse pole figure color key with SST") template[f"{trg}/@signal"] = grp.attrs["signal"] template[f"{trg}/@axes"] = grp.attrs["axes"] - template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = grp.attrs["axis_x_indices"] - template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = grp.attrs["axis_y_indices"] + template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = grp.attrs[ + "axis_x_indices" + ] + template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = grp.attrs[ + "axis_y_indices" + ] src = f"/entry1/indexing/{group_name}/ipf_rgb_color_model/data" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{identifier}]/ipf_rgb_color_model/DATA[data]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{identifier}]/ipf_rgb_color_model/DATA[data]" + ) if src not in h5r.keys(): # must not happen, dst is required # print(f"WARNING: {group_name} not found, ipf_rgb_color_model, data") @@ -249,9 +291,11 @@ def parse_inverse_pole_figure_color_key(self, h5r, identifier, template: dict) - axes_names = ["axis_x", "axis_y"] for axis_name in axes_names: src = f"/entry1/indexing/{group_name}/ipf_rgb_color_model/{axis_name}" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{identifier}]/ipf_rgb_color_model" \ - f"/AXISNAME[{axis_name}]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{identifier}]/ipf_rgb_color_model" + f"/AXISNAME[{axis_name}]" + ) if src not in h5r.keys(): # must not happen, dst is required # print(f"WARNING: {group_name} not found, diff --git a/pynxtools/dataconverter/readers/em_om/utils/orix_ebsd_parser.py b/pynxtools/dataconverter/readers/em_om/utils/orix_ebsd_parser.py index 26a8736f3..85c66f7ba 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/orix_ebsd_parser.py +++ b/pynxtools/dataconverter/readers/em_om/utils/orix_ebsd_parser.py @@ -46,7 +46,9 @@ from pynxtools.dataconverter.readers.em_om.utils.image_transform import thumbnail -from pynxtools.dataconverter.readers.em_om.utils.em_nexus_plots import HFIVE_WEB_MAX_SIZE +from pynxtools.dataconverter.readers.em_om.utils.em_nexus_plots import ( + HFIVE_WEB_MAX_SIZE, +) orix_params = { "figure.facecolor": "w", @@ -62,9 +64,7 @@ class NxEmOmOrixEbsdParser: - """Parse *.h5oina EBSD data. - - """ + """Parse *.h5oina EBSD data.""" def __init__(self, file_name, entry_id): """Class wrapping pyxem/orix H5OINA parser.""" @@ -81,17 +81,19 @@ def __init__(self, file_name, entry_id): self.xaxis: List[float] = [] self.yaxis: List[float] = [] self.xmap = CrystalMap - self.oina: Dict[str, Any] = {"n_slices": 1, - "rotation": Rotation, - "scan_point_x": [], - "scan_point_y": [], - "phase_identifier": [], - "band_contrast": [], - "scan_size": [0, 0], - "scan_step": [0., 0.], - "scan_unit": ["n/a", "n/a"], - "phase": [], - "space_group": []} + self.oina: Dict[str, Any] = { + "n_slices": 1, + "rotation": Rotation, + "scan_point_x": [], + "scan_point_y": [], + "phase_identifier": [], + "band_contrast": [], + "scan_size": [0, 0], + "scan_step": [0.0, 0.0], + "scan_unit": ["n/a", "n/a"], + "phase": [], + "space_group": [], + } # y (aka height), x (aka width) ! def parse_h5oina(self, slice_id): @@ -105,7 +107,9 @@ def parse_h5oina(self, slice_id): h5r = h5py.File(self.file_names, "r") self.oina_version_identifier = h5r["/Format Version"][0].decode("utf-8") self.oina["n_slices"] = h5r["/Index"][0] - print(f"H5OINA v{self.oina_version_identifier} has {self.oina['n_slices']} slices") + print( + f"H5OINA v{self.oina_version_identifier} has {self.oina['n_slices']} slices" + ) if self.oina_version_identifier != "5.0" or self.oina["n_slices"] != 1: print("This examples supports H5OINA only in version 5.0 with one slice!") return @@ -129,9 +133,9 @@ def parse_h5oina_ebsd_data(self, h5r, slice_id): # required entries in v5.0 dset_name = f"{group_name}/Euler" if dset_name in h5r: - self.oina["rotation"] = Rotation.from_euler(euler=h5r[dset_name], - direction='lab2crystal', - degrees=False) # rad in v5.0 + self.oina["rotation"] = Rotation.from_euler( + euler=h5r[dset_name], direction="lab2crystal", degrees=False + ) # rad in v5.0 dset_name = f"{group_name}/Phase" if dset_name in h5r: self.oina["phase_identifier"] = np.asarray(h5r[dset_name], np.int32) @@ -144,7 +148,9 @@ def parse_h5oina_ebsd_data(self, h5r, slice_id): if dset_name in h5r: self.oina["scan_point_y"] = np.asarray(h5r[dset_name], np.float32) dset_name = f"{group_name}/Band Contrast" - if dset_name in h5r: # is not required but should how else to create a ROI image + if ( + dset_name in h5r + ): # is not required but should how else to create a ROI image self.oina["band_contrast"] = np.asarray(h5r[dset_name], np.uint8) def parse_h5oina_ebsd_header(self, h5r, slice_id): @@ -181,7 +187,7 @@ def parse_h5oina_phase(self, h5r, slice_id, name): # "Reference", but even examples from Oxford place no DOIs here dset_name = f"{sub_group_name}/Lattice Angles" if dset_name in h5r: - alpha_beta_gamma = np.asarray(h5r[dset_name][:].flatten()) / np.pi * 180. + alpha_beta_gamma = np.asarray(h5r[dset_name][:].flatten()) / np.pi * 180.0 # rad2deg dset_name = f"{sub_group_name}/Lattice Dimensions" if dset_name in h5r: @@ -200,18 +206,26 @@ def parse_h5oina_phase(self, h5r, slice_id, name): self.oina["space_group"].append(space_group) self.oina["phase"].append( - Structure(title=phase_name, - atoms=None, - lattice=Lattice(a_b_c[0], a_b_c[1], a_b_c[2], - alpha_beta_gamma[0], - alpha_beta_gamma[1], - alpha_beta_gamma[2]))) + Structure( + title=phase_name, + atoms=None, + lattice=Lattice( + a_b_c[0], + a_b_c[1], + a_b_c[2], + alpha_beta_gamma[0], + alpha_beta_gamma[1], + alpha_beta_gamma[2], + ), + ) + ) def generate_xmap(self): """Generate the orientation map and orix/diffsims data structures.""" coordinates, _ = create_coordinate_arrays( (self.oina["scan_size"][0], self.oina["scan_size"][1]), - (self.oina["scan_step"][0], self.oina["scan_step"][1])) + (self.oina["scan_step"][0], self.oina["scan_step"][1]), + ) self.xaxis = coordinates["x"] self.yaxis = coordinates["y"] del coordinates @@ -223,13 +237,17 @@ def generate_xmap(self): self.oina["phase_identifier"] = self.oina["phase_identifier"] - 1 print(np.unique(self.oina["phase_identifier"])) - self.xmap = CrystalMap(rotations=self.oina["rotation"], - x=self.xaxis, y=self.yaxis, - phase_id=self.oina["phase_identifier"], - phase_list=PhaseList(space_groups=self.oina["space_group"], - structures=self.oina["phase"]), - prop={"bc": self.oina["band_contrast"]}, - scan_unit=self.oina["scan_unit"]) + self.xmap = CrystalMap( + rotations=self.oina["rotation"], + x=self.xaxis, + y=self.yaxis, + phase_id=self.oina["phase_identifier"], + phase_list=PhaseList( + space_groups=self.oina["space_group"], structures=self.oina["phase"] + ), + prop={"bc": self.oina["band_contrast"]}, + scan_unit=self.oina["scan_unit"], + ) print(self.xmap) def parse_roi_default_plot(self, template: dict) -> dict: @@ -249,34 +267,60 @@ def parse_roi_default_plot(self, template: dict) -> dict: template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = np.uint32(0) template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = np.uint32(1) - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/region_of_interest/roi/data" - template[f"{trg}"] = {"compress": np.reshape( - np.asarray(np.asarray((self.xmap.bc / np.max(self.xmap.bc) * 255.), - np.uint32), np.uint8), (self.xmap.shape[0], self.xmap.shape[1]), - order="C"), "strength": 1} + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/region_of_interest/roi/data" + ) + template[f"{trg}"] = { + "compress": np.reshape( + np.asarray( + np.asarray( + (self.xmap.bc / np.max(self.xmap.bc) * 255.0), np.uint32 + ), + np.uint8, + ), + (self.xmap.shape[0], self.xmap.shape[1]), + order="C", + ), + "strength": 1, + } # 0 is y while 1 is x ! template[f"{trg}/@long_name"] = "Signal" template[f"{trg}/@CLASS"] = "IMAGE" # required by H5Web to plot RGB maps template[f"{trg}/@IMAGE_VERSION"] = "1.2" template[f"{trg}/@SUBCLASS_VERSION"] = np.int64(15) - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/region_of_interest/roi/axis_x" - template[f"{trg}"] = {"compress": np.asarray( - 0.5 * self.oina["scan_step"][1] + self.xaxis[0:self.xmap.shape[1]], - np.float32), "strength": 1} - template[f"{trg}/@long_name"] \ - = f"Calibrated coordinate along x-axis ({self.oina['scan_unit'][1]})" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/region_of_interest/roi/axis_x" + ) + template[f"{trg}"] = { + "compress": np.asarray( + 0.5 * self.oina["scan_step"][1] + self.xaxis[0 : self.xmap.shape[1]], + np.float32, + ), + "strength": 1, + } + template[ + f"{trg}/@long_name" + ] = f"Calibrated coordinate along x-axis ({self.oina['scan_unit'][1]})" template[f"{trg}/@units"] = self.oina["scan_unit"][1] - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/region_of_interest/roi/axis_y" - template[f"{trg}"] = {"compress": np.asarray( - 0.5 * self.oina["scan_step"][0] + self.yaxis[0:self.xmap.size:self.xmap.shape[1]], - np.float32), "strength": 1} - template[f"{trg}/@long_name"] \ - = f"Calibrated coordinate along y-axis ({self.oina['scan_unit'][0]})" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/region_of_interest/roi/axis_y" + ) + template[f"{trg}"] = { + "compress": np.asarray( + 0.5 * self.oina["scan_step"][0] + + self.yaxis[0 : self.xmap.size : self.xmap.shape[1]], + np.float32, + ), + "strength": 1, + } + template[ + f"{trg}/@long_name" + ] = f"Calibrated coordinate along y-axis ({self.oina['scan_unit'][0]})" template[f"{trg}/@units"] = self.oina["scan_unit"][0] return template @@ -286,18 +330,21 @@ def parse_phases(self, template: dict) -> dict: identifier = 1 # identifier match because phase is a list asc. sorted by numerical keys for phase in self.oina["phase"]: - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" \ - f"EM_EBSD_CRYSTAL_STRUCTURE_MODEL" \ - f"[em_ebsd_crystal_structure_model{identifier}]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" + f"EM_EBSD_CRYSTAL_STRUCTURE_MODEL" + f"[em_ebsd_crystal_structure_model{identifier}]" + ) template[f"{trg}/phase_identifier"] = np.uint32(identifier) template[f"{trg}/phase_name"] = str(phase.title) template[f"{trg}/unit_cell_abc"] = np.asarray( - [phase.lattice.a, phase.lattice.b, phase.lattice.c], - np.float32) + [phase.lattice.a, phase.lattice.b, phase.lattice.c], np.float32 + ) template[f"{trg}/unit_cell_abc/@units"] = "nm" template[f"{trg}/unit_cell_alphabetagamma"] = np.asarray( [phase.lattice.alpha, phase.lattice.beta, phase.lattice.gamma], - np.float32) + np.float32, + ) template[f"{trg}/unit_cell_alphabetagamma/@units"] = "°" identifier += 1 return template @@ -319,29 +366,39 @@ def parse_inverse_pole_figure_map(self, identifier, template: dict) -> dict: # +1 because for orix not_indexed -1 and "first" phase has ID 0 ! phase_id = identifier + 1 phase_name = self.xmap.phases[identifier].name - print(f"Generate inverse pole figure (IPF) map for {identifier}, {phase_name}...") + print( + f"Generate inverse pole figure (IPF) map for {identifier}, {phase_name}..." + ) phase_id_ipf_key = plot.IPFColorKeyTSL( - self.xmap.phases[identifier].point_group.laue, - direction=Vector3d.zvector()) + self.xmap.phases[identifier].point_group.laue, direction=Vector3d.zvector() + ) rgb_px_with_phase_id = np.asarray( - np.asarray(phase_id_ipf_key.orientation2color( - self.xmap[phase_name].rotations) * 255., np.uint32), np.uint8) + np.asarray( + phase_id_ipf_key.orientation2color(self.xmap[phase_name].rotations) + * 255.0, + np.uint32, + ), + np.uint8, + ) ipf_rgb_map = np.asarray( - np.uint8(np.zeros((self.xmap.shape[0] * self.xmap.shape[1], 3)) * 255.0)) + np.uint8(np.zeros((self.xmap.shape[0] * self.xmap.shape[1], 3)) * 255.0) + ) # background is black instead of white (which would be more pleasing) # but IPF color maps have a whitepoint which encodes in fact an orientation # and because of that we may have a single crystal with an orientation # close to the whitepoint which become a fully white seemingly "empty" image ipf_rgb_map[self.xmap.phase_id == identifier, :] = rgb_px_with_phase_id - ipf_rgb_map = np.reshape(ipf_rgb_map, - (self.xmap.shape[0], self.xmap.shape[1], 3), - order="C") # 0 is y while 1 is x ! - - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{phase_id}]" + ipf_rgb_map = np.reshape( + ipf_rgb_map, (self.xmap.shape[0], self.xmap.shape[1], 3), order="C" + ) # 0 is y while 1 is x ! + + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{phase_id}]" + ) template[f"{trg}/bitdepth"] = np.uint32(8) template[f"{trg}/phase_identifier"] = np.uint32(phase_id) template[f"{trg}/phase_name"] = str(phase_name) @@ -349,41 +406,61 @@ def parse_inverse_pole_figure_map(self, identifier, template: dict) -> dict: template[f"{trg}/PROGRAM[program1]/program/@version"] = orix.__version__ template[f"{trg}/PROGRAM[program2]/program"] = str("diffsims") template[f"{trg}/PROGRAM[program2]/program/@version"] = diffsims.__version__ - template[f"{trg}/projection_direction"] = np.asarray([0., 0., 1.], np.float32) + template[f"{trg}/projection_direction"] = np.asarray( + [0.0, 0.0, 1.0], np.float32 + ) # should have a reference so that it is directly interpretable - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" \ - f"PROCESS[ipf_map{phase_id}]/ipf_rgb_map" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" + f"PROCESS[ipf_map{phase_id}]/ipf_rgb_map" + ) template[f"{trg}/title"] = str("Inverse pole figure color map") template[f"{trg}/@signal"] = "data" template[f"{trg}/@axes"] = ["axis_y", "axis_x"] template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = np.uint32(0) template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = np.uint32(1) - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" \ - f"PROCESS[ipf_map{phase_id}]/ipf_rgb_map/DATA[data]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" + f"PROCESS[ipf_map{phase_id}]/ipf_rgb_map/DATA[data]" + ) template[f"{trg}"] = {"compress": ipf_rgb_map, "strength": 1} template[f"{trg}/@CLASS"] = "IMAGE" # required by H5Web to plot RGB maps template[f"{trg}/@IMAGE_VERSION"] = "1.2" template[f"{trg}/@SUBCLASS_VERSION"] = np.int64(15) # dimension scale axes value arrays same for each phase, entire IPF map - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" \ - f"PROCESS[ipf_map{phase_id}]/ipf_rgb_map/axis_x" - template[f"{trg}"] = {"compress": np.asarray( - 0.5 * self.oina["scan_step"][1] + self.xaxis[0:self.xmap.shape[1]], - np.float32), "strength": 1} - template[f"{trg}/@long_name"] \ - = f"Calibrated coordinate along x-axis ({self.oina['scan_unit'][1]})" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" + f"PROCESS[ipf_map{phase_id}]/ipf_rgb_map/axis_x" + ) + template[f"{trg}"] = { + "compress": np.asarray( + 0.5 * self.oina["scan_step"][1] + self.xaxis[0 : self.xmap.shape[1]], + np.float32, + ), + "strength": 1, + } + template[ + f"{trg}/@long_name" + ] = f"Calibrated coordinate along x-axis ({self.oina['scan_unit'][1]})" template[f"{trg}/@units"] = self.oina["scan_unit"][1] - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" \ - f"PROCESS[ipf_map{phase_id}]/ipf_rgb_map/axis_y" - template[f"{trg}"] = {"compress": np.asarray( - 0.5 * self.oina["scan_step"][0] - + self.yaxis[0:self.xmap.size:self.xmap.shape[1]], - np.float32), "strength": 1} - template[f"{trg}/@long_name"] \ - = f"Calibrated coordinate along y-axis ({self.oina['scan_unit'][0]})" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing/" + f"PROCESS[ipf_map{phase_id}]/ipf_rgb_map/axis_y" + ) + template[f"{trg}"] = { + "compress": np.asarray( + 0.5 * self.oina["scan_step"][0] + + self.yaxis[0 : self.xmap.size : self.xmap.shape[1]], + np.float32, + ), + "strength": 1, + } + template[ + f"{trg}/@long_name" + ] = f"Calibrated coordinate along y-axis ({self.oina['scan_unit'][0]})" template[f"{trg}/@units"] = self.oina["scan_unit"][0] return template @@ -393,21 +470,34 @@ def parse_inverse_pole_figure_color_key(self, identifier, template: dict) -> dic # +1 because for orix not_indexed -1 and "first" phase has ID 0 ! phase_id = identifier + 1 phase_name = self.xmap.phases[identifier].name - print(f"Parse inverse pole figure (IPF) color key {identifier}, {phase_name}...") + print( + f"Parse inverse pole figure (IPF) color key {identifier}, {phase_name}..." + ) phase_id_ipf_key = plot.IPFColorKeyTSL( - self.xmap.phases[identifier].point_group.laue, - direction=Vector3d.zvector()) + self.xmap.phases[identifier].point_group.laue, direction=Vector3d.zvector() + ) # render domain-specific IPF color keys using orix fig = phase_id_ipf_key.plot(return_figure=True) - fig.savefig("temporary.png", dpi=300, facecolor='w', edgecolor='w', - orientation='landscape', format='png', transparent=False, - bbox_inches='tight', pad_inches=0.1, metadata=None) + fig.savefig( + "temporary.png", + dpi=300, + facecolor="w", + edgecolor="w", + orientation="landscape", + format="png", + transparent=False, + bbox_inches="tight", + pad_inches=0.1, + metadata=None, + ) # constraint further to 8bit RGB and no flipping # im = np.asarray(imageio.v3.imread(symm_name)) - img = np.asarray(thumbnail(pil.open("temporary.png", "r", ["png"]), - size=HFIVE_WEB_MAX_SIZE), np.uint8) + img = np.asarray( + thumbnail(pil.open("temporary.png", "r", ["png"]), size=HFIVE_WEB_MAX_SIZE), + np.uint8, + ) img = img[:, :, 0:3] # discard alpha channel if os.path.exists("temporary.png"): os.remove("temporary.png") @@ -415,39 +505,49 @@ def parse_inverse_pole_figure_color_key(self, identifier, template: dict) -> dic # a specific type of image and bitdepth and color model, and avoid implicit # image transformations such as flips or rotations - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model" + ) template[f"{trg}/title"] = str("Inverse pole figure color key with SST") template[f"{trg}/@signal"] = "data" template[f"{trg}/@axes"] = ["axis_y", "axis_x"] template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = np.uint32(0) template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = np.uint32(1) - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model/DATA[data]" + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model/DATA[data]" + ) template[f"{trg}"] = {"compress": img, "strength": 1} template[f"{trg}/@CLASS"] = "IMAGE" template[f"{trg}/@IMAGE_VERSION"] = "1.2" template[f"{trg}/@SUBCLASS_VERSION"] = np.int64(15) - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model/AXISNAME[axis_y]" - template[f"{trg}"] = {"compress": - np.asarray(np.linspace(1, - np.shape(img)[0], - num=np.shape(img)[0], - endpoint=True), np.uint32), - "strength": 1} + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model/AXISNAME[axis_y]" + ) + template[f"{trg}"] = { + "compress": np.asarray( + np.linspace(1, np.shape(img)[0], num=np.shape(img)[0], endpoint=True), + np.uint32, + ), + "strength": 1, + } template[f"{trg}/@long_name"] = "Pixel along y-axis" template[f"{trg}/@units"] = "px" - trg = f"/ENTRY[entry{self.entry_id}]/experiment/indexing" \ - f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model/AXISNAME[axis_x]" - template[f"{trg}"] = {"compress": - np.asarray(np.linspace(1, - np.shape(img)[1], - num=np.shape(img)[1], - endpoint=True), np.uint32), - "strength": 1} + trg = ( + f"/ENTRY[entry{self.entry_id}]/experiment/indexing" + f"/PROCESS[ipf_map{phase_id}]/ipf_rgb_color_model/AXISNAME[axis_x]" + ) + template[f"{trg}"] = { + "compress": np.asarray( + np.linspace(1, np.shape(img)[1], num=np.shape(img)[1], endpoint=True), + np.uint32, + ), + "strength": 1, + } template[f"{trg}/@long_name"] = "Pixel along x-axis" template[f"{trg}/@units"] = "px" diff --git a/pynxtools/dataconverter/readers/em_om/utils/use_case_selector.py b/pynxtools/dataconverter/readers/em_om/utils/use_case_selector.py index e9eccf1f3..ea9ce4f17 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/use_case_selector.py +++ b/pynxtools/dataconverter/readers/em_om/utils/use_case_selector.py @@ -50,9 +50,10 @@ def analyze_mime_types(self, file_paths: Tuple[str] = None): for file_name in file_paths: index = file_name.lower().rfind(".") if index >= 0: - suffix = file_name.lower()[index + 1::] - add = (suffix in self.supported_mime_types) \ - and (file_name not in self.mime_types[suffix]) + suffix = file_name.lower()[index + 1 : :] + add = (suffix in self.supported_mime_types) and ( + file_name not in self.mime_types[suffix] + ) if add is True: self.mime_types[suffix].append(file_name) print(self.mime_types) diff --git a/pynxtools/dataconverter/readers/em_om/utils/versioning.py b/pynxtools/dataconverter/readers/em_om/utils/versioning.py index 9744446ed..ed0c6a6cd 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/versioning.py +++ b/pynxtools/dataconverter/readers/em_om/utils/versioning.py @@ -19,13 +19,13 @@ # pylint: disable=no-member -from pynxtools.dataconverter.readers.shared.shared_utils \ - import get_repo_last_commit +from pynxtools.dataconverter.readers.shared.shared_utils import get_repo_last_commit NX_EM_OM_ADEF_NAME = "NXem_ebsd" -NX_EM_OM_ADEF_VERSION = "nexus-fairmat-proposal successor of " \ - "9636feecb79bb32b828b1a9804269573256d7696" +NX_EM_OM_ADEF_VERSION = ( + "nexus-fairmat-proposal successor of " "9636feecb79bb32b828b1a9804269573256d7696" +) # based on https://fairmat-experimental.github.io/nexus-fairmat-proposal NX_EM_OM_EXEC_NAME = "dataconverter/reader/em_om/reader.py" NX_EM_OM_EXEC_VERSION = get_repo_last_commit() diff --git a/pynxtools/dataconverter/readers/em_om/utils/zip_ebsd_parser.py b/pynxtools/dataconverter/readers/em_om/utils/zip_ebsd_parser.py index e6acb6c85..b403b9c77 100644 --- a/pynxtools/dataconverter/readers/em_om/utils/zip_ebsd_parser.py +++ b/pynxtools/dataconverter/readers/em_om/utils/zip_ebsd_parser.py @@ -34,13 +34,13 @@ # import imageio.v3 as iio from PIL import Image as pil -from pynxtools.dataconverter.readers.em_om.utils.em_nexus_plots import HFIVE_WEB_MAX_SIZE +from pynxtools.dataconverter.readers.em_om.utils.em_nexus_plots import ( + HFIVE_WEB_MAX_SIZE, +) class NxEmOmZipEbsdParser: - """Parse *.zip EBSD data. - - """ + """Parse *.zip EBSD data.""" def __init__(self, file_name, entry_id): """Class wrapping zip parser.""" @@ -64,7 +64,7 @@ def parse_zip(self, template: dict) -> dict: # ASSUME that pattern have numeral components in their file name zip_content_table = {} for file in zip_file_hdl.namelist(): - keyword = str(np.uint64(re.sub('[^0-9]', '', file))) + keyword = str(np.uint64(re.sub("[^0-9]", "", file))) if len(keyword) > 0 and keyword not in zip_content_table: zip_content_table[keyword] = file else: @@ -82,15 +82,19 @@ def parse_zip(self, template: dict) -> dict: # ...here we immediately see how problematic custom directory structures # for storing research data are even if they were to contain only exactly # always data of expected format... - self.stack_meta = {"fname": "", - "size": (0, 0), - "dtype": np.uint8, - "ftype": ""} + self.stack_meta = { + "fname": "", + "size": (0, 0), + "dtype": np.uint8, + "ftype": "", + } # in pixel, use axisy and axisx for dimension scale axes # ASSUME slow axis is y, fast axis is x for keyword, value in zip_content_table.items(): tmp = value.split(".") - if (len(tmp) > 1) and (tmp[-1].lower() in ["bmp", "jpg", "png", "tiff"]): + if (len(tmp) > 1) and ( + tmp[-1].lower() in ["bmp", "jpg", "png", "tiff"] + ): # there are examples where people store Kikuchi diffraction pattern # as lossy and lossless raster... # pil supports reading of files in more formats but the above are @@ -103,8 +107,12 @@ def parse_zip(self, template: dict) -> dict: # how-can-i-get-the-depth-of-a-jpg-file break shp = (img.height, img.width) # np.shape(img) - if (shp[0] > 0) and (shp[0] <= HFIVE_WEB_MAX_SIZE) \ - and (shp[1] > 0) and (shp[1] <= HFIVE_WEB_MAX_SIZE): + if ( + (shp[0] > 0) + and (shp[0] <= HFIVE_WEB_MAX_SIZE) + and (shp[1] > 0) + and (shp[1] <= HFIVE_WEB_MAX_SIZE) + ): # found the guiding image self.stack_meta["size"] = (shp[0], shp[1]) # , 3) self.stack_meta["fname"] = value @@ -119,10 +127,14 @@ def parse_zip(self, template: dict) -> dict: # with useful numeral names and that these have the same metadata # (size, filetype, dtype) identifier = 0 - self.stack = np.zeros((len(zip_content_table), - self.stack_meta["size"][0], - self.stack_meta["size"][1]), - self.stack_meta["dtype"]) + self.stack = np.zeros( + ( + len(zip_content_table), + self.stack_meta["size"][0], + self.stack_meta["size"][1], + ), + self.stack_meta["dtype"], + ) for keyword, value in zip_content_table.items(): tmp = value.split(".") if (len(tmp) > 1) and (tmp[-1].lower() == self.stack_meta["ftype"]): @@ -137,8 +149,9 @@ def parse_zip(self, template: dict) -> dict: else: break - if (np.shape(img) == self.stack_meta["size"]) \ - and (img.dtype == self.stack_meta["dtype"]): + if (np.shape(img) == self.stack_meta["size"]) and ( + img.dtype == self.stack_meta["dtype"] + ): self.stack[identifier, :, :] = img # Kikuchi pattern may come as 8-bit (grayscale) RGBs # or as simulated intensities (as floats) @@ -154,8 +167,10 @@ def parse_zip(self, template: dict) -> dict: def parse_pattern_stack_default_plot(self, template: dict) -> dict: """Parse data for the Kikuchi image stack default plot.""" print("Parse Kikuchi pattern stack default plot...") - trg = f"/ENTRY[entry{self.entry_id}]/simulation/IMAGE_SET_EM_KIKUCHI" \ - f"[image_set_em_kikuchi]/stack" + trg = ( + f"/ENTRY[entry{self.entry_id}]/simulation/IMAGE_SET_EM_KIKUCHI" + f"[image_set_em_kikuchi]/stack" + ) template[f"{trg}/title"] = str("Kikuchi diffraction pattern stack") template[f"{trg}/@signal"] = "data_counts" @@ -163,8 +178,10 @@ def parse_pattern_stack_default_plot(self, template: dict) -> dict: template[f"{trg}/@AXISNAME_indices[axis_x_indices]"] = np.uint32(0) template[f"{trg}/@AXISNAME_indices[axis_y_indices]"] = np.uint32(1) - trg = f"/ENTRY[entry{self.entry_id}]/simulation/IMAGE_SET_EM_KIKUCHI" \ - f"[image_set_em_kikuchi]/stack/data_counts" + trg = ( + f"/ENTRY[entry{self.entry_id}]/simulation/IMAGE_SET_EM_KIKUCHI" + f"[image_set_em_kikuchi]/stack/data_counts" + ) template[f"{trg}"] = {"compress": self.stack, "strength": 1} # 0 is y while 1 is x ! template[f"{trg}/@long_name"] = "Signal" @@ -174,12 +191,19 @@ def parse_pattern_stack_default_plot(self, template: dict) -> dict: axes_names = [("axis_x", 1, "x-axis"), ("axis_y", 0, "y-axis")] for axis in axes_names: - trg = f"/ENTRY[entry{self.entry_id}]/simulation/IMAGE_SET_EM_KIKUCHI" \ - f"[image_set_em_kikuchi]/stack/{axis[0]}" + trg = ( + f"/ENTRY[entry{self.entry_id}]/simulation/IMAGE_SET_EM_KIKUCHI" + f"[image_set_em_kikuchi]/stack/{axis[0]}" + ) axis_i = np.asarray( - np.linspace(0, self.stack_meta["size"][axis[1]], - num=self.stack_meta["size"][axis[1]], - endpoint=True), np.float64) + np.linspace( + 0, + self.stack_meta["size"][axis[1]], + num=self.stack_meta["size"][axis[1]], + endpoint=True, + ), + np.float64, + ) # overwrite with calibrated scale if available # i.e. when self.stack_meta["axis_x"] not None: template[f"{trg}"] = {"compress": axis_i, "strength": 1} diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/reader.py b/pynxtools/dataconverter/readers/em_spctrscpy/reader.py index a5adff6e9..0d3c14863 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/reader.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/reader.py @@ -23,20 +23,25 @@ from pynxtools.dataconverter.readers.base.reader import BaseReader -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_use_case_selector \ - import EmUseCaseSelector +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_use_case_selector import ( + EmUseCaseSelector, +) -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_generic_eln_io \ - import NxEmNomadOasisElnSchemaParser +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_generic_eln_io import ( + NxEmNomadOasisElnSchemaParser, +) -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_event_data \ - import NxEventDataEm +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_event_data import ( + NxEventDataEm, +) -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_plots \ - import em_spctrscpy_default_plot_generator +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_plots import ( + em_spctrscpy_default_plot_generator, +) -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_example_data \ - import EmSpctrscpyCreateExampleData +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_example_data import ( + EmSpctrscpyCreateExampleData, +) def hyperspy_parser(file_name: str, template: dict, entry_id: int) -> dict: @@ -66,10 +71,12 @@ class EmSpctrscpyReader(BaseReader): supported_nxdls = ["NXem"] # pylint: disable=duplicate-code - def read(self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None) -> dict: + def read( + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, + ) -> dict: """Read data from given file, return filled template dictionary em.""" # pylint: disable=duplicate-code template.clear() @@ -96,8 +103,9 @@ def read(self, else: print("Parse ELN and technology partner file(s)...") case = EmUseCaseSelector(file_paths) - assert case.is_valid is True, \ - "Such a combination of input-file(s, if any) is not supported !" + assert ( + case.is_valid is True + ), "Such a combination of input-file(s, if any) is not supported !" print("Parse (meta)data coming from an ELN...") if case.eln_parser == "nomad-oasis": @@ -106,7 +114,9 @@ def read(self, print("No input file defined for eln data !") return {} - print("Parse (numerical) data and metadata from technology partner files...") + print( + "Parse (numerical) data and metadata from technology partner files..." + ) if case.vendor_parser == "oina": # oina_parser(case.vendor[0], template, entry_id) return {} diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_event_data.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_event_data.py index f10086f1f..e418a8365 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_event_data.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_event_data.py @@ -23,17 +23,21 @@ import hyperspy.api as hs -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_base_classes \ - import NxObject +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_base_classes import ( + NxObject, +) -from pynxtools.dataconverter.readers.em_spctrscpy.utils.hspy.em_hspy_xray \ - import NxSpectrumSetEmXray +from pynxtools.dataconverter.readers.em_spctrscpy.utils.hspy.em_hspy_xray import ( + NxSpectrumSetEmXray, +) -from pynxtools.dataconverter.readers.em_spctrscpy.utils.hspy.em_hspy_eels \ - import NxSpectrumSetEmEels +from pynxtools.dataconverter.readers.em_spctrscpy.utils.hspy.em_hspy_eels import ( + NxSpectrumSetEmEels, +) -from pynxtools.dataconverter.readers.em_spctrscpy.utils.hspy.em_hspy_adf \ - import NxImageSetEmAdf +from pynxtools.dataconverter.readers.em_spctrscpy.utils.hspy.em_hspy_adf import ( + NxImageSetEmAdf, +) from pynxtools.dataconverter.file_hashing import get_file_hashvalue @@ -94,8 +98,10 @@ def report(self, template: dict) -> dict: Paths in template are prefixed by prefix and have to be compliant with the application definition. """ - prefix = f"/ENTRY[entry{self.entry_id}]/measurement/" \ - f"EVENT_DATA_EM[event_data_em1]/" + prefix = ( + f"/ENTRY[entry{self.entry_id}]/measurement/" + f"EVENT_DATA_EM[event_data_em1]/" + ) # now = datetime.datetime.now().astimezone().isoformat() # hyperspy cannot implement per-event time stamping especially @@ -105,34 +111,36 @@ def report(self, template: dict) -> dict: template[f"{prefix}start_time"] = self.meta["start_time"].value template[f"{prefix}end_time"] = self.meta["end_time"].value - event_info = {"source_file_name": self.file_name, - "source_file_version": self.file_sha256} - - prefix = f"/ENTRY[entry{self.entry_id}]/measurement/" \ - f"EVENT_DATA_EM[event_data_em1]/" + event_info = { + "source_file_name": self.file_name, + "source_file_version": self.file_sha256, + } + + prefix = ( + f"/ENTRY[entry{self.entry_id}]/measurement/" + f"EVENT_DATA_EM[event_data_em1]/" + ) # connect and compare frame_id with that of hspy if self.spectrum_set_em_xray is not None: - if isinstance(self.spectrum_set_em_xray, - NxSpectrumSetEmXray) is True: - self.spectrum_set_em_xray.report( - prefix, 1, event_info, template) + if isinstance(self.spectrum_set_em_xray, NxSpectrumSetEmXray) is True: + self.spectrum_set_em_xray.report(prefix, 1, event_info, template) - prefix = f"/ENTRY[entry{self.entry_id}]/measurement/" \ - f"EVENT_DATA_EM[event_data_em1]/" + prefix = ( + f"/ENTRY[entry{self.entry_id}]/measurement/" + f"EVENT_DATA_EM[event_data_em1]/" + ) if self.spectrum_set_em_eels is not None: - if isinstance(self.spectrum_set_em_eels, - NxSpectrumSetEmEels) is True: - self.spectrum_set_em_eels.report( - prefix, 1, event_info, template) + if isinstance(self.spectrum_set_em_eels, NxSpectrumSetEmEels) is True: + self.spectrum_set_em_eels.report(prefix, 1, event_info, template) - prefix = f"/ENTRY[entry{self.entry_id}]/measurement/" \ - f"EVENT_DATA_EM[event_data_em1]/" + prefix = ( + f"/ENTRY[entry{self.entry_id}]/measurement/" + f"EVENT_DATA_EM[event_data_em1]/" + ) # connect and compare frame_id with that of hspy if self.image_set_em_adf is not None: - if isinstance(self.image_set_em_adf, - NxImageSetEmAdf) is True: - self.image_set_em_adf.report( - prefix, 1, event_info, template) + if isinstance(self.image_set_em_adf, NxImageSetEmAdf) is True: + self.image_set_em_adf.report(prefix, 1, event_info, template) # add generic images diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_example_data.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_example_data.py index 8a36579ed..4e6be394f 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_example_data.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_example_data.py @@ -33,8 +33,12 @@ from ase.data import chemical_symbols -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_versioning \ - import NX_EM_ADEF_NAME, NX_EM_ADEF_VERSION, NX_EM_EXEC_NAME, NX_EM_EXEC_VERSION +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_versioning import ( + NX_EM_ADEF_NAME, + NX_EM_ADEF_VERSION, + NX_EM_EXEC_NAME, + NX_EM_EXEC_VERSION, +) # parameter affecting reconstructed positions and size # default parameter @@ -42,7 +46,7 @@ # compose hypothetical spectrum (ignoring background) XRAY_ENERGY_DELTA = 0.01 # in keV PEAK_WIDTH_SIGMA = 0.1 # stddev in keV assumed the same for all peaks -SIGNAL_YIELD = 1000. # arbitrarily assumed signal intensity +SIGNAL_YIELD = 1000.0 # arbitrarily assumed signal intensity class EmSpctrscpyCreateExampleData: @@ -76,15 +80,16 @@ def emulate_entry(self, template: dict) -> dict: template[f"{trg}start_time"] = datetime.datetime.now().astimezone().isoformat() template[f"{trg}end_time"] = datetime.datetime.now().astimezone().isoformat() - msg = ''' + msg = """ !!! WARNING: These are mocked data !!! These are meant to be used exclusively for verifying NOMAD search capabilities. - ''' + """ template[f"{trg}experiment_description"] = msg template[f"{trg}experiment_documentation"] = "free text field" experiment_identifier = str( - f"EM{np.random.choice(100, 1)[0]}/{np.random.choice(100000, 1)[0]}") + f"EM{np.random.choice(100, 1)[0]}/{np.random.choice(100000, 1)[0]}" + ) template[f"{trg}experiment_identifier"] = experiment_identifier return template @@ -94,15 +99,46 @@ def emulate_user(self, template: dict) -> dict: # print("Parsing user...") prefix = f"/ENTRY[entry{self.entry_id}]/" user_names = np.unique( - np.random.choice(["Sherjeel", "MarkusK", "Benedikt", "Johannes", - "Gerardo", "Kristiane", "Sabine", "Sophie", "Tom", - "Volker", "MarkusW", "PeterK", "Oonagh", "Annika", - "ChristophP", "Thomas", "Mariano", "Tilmann", - "ChristophF", "Niels", "Dieter", "Alexander", - "Katharina", "Florian", "Sebastian", "Sandor", - "Carola", "Chris", "Hampus", "Pepe", "Lauri", - "MarkusS", "Christoph", "Claudia"], - 1 + np.random.choice(MAX_USERS, 1))) + np.random.choice( + [ + "Sherjeel", + "MarkusK", + "Benedikt", + "Johannes", + "Gerardo", + "Kristiane", + "Sabine", + "Sophie", + "Tom", + "Volker", + "MarkusW", + "PeterK", + "Oonagh", + "Annika", + "ChristophP", + "Thomas", + "Mariano", + "Tilmann", + "ChristophF", + "Niels", + "Dieter", + "Alexander", + "Katharina", + "Florian", + "Sebastian", + "Sandor", + "Carola", + "Chris", + "Hampus", + "Pepe", + "Lauri", + "MarkusS", + "Christoph", + "Claudia", + ], + 1 + np.random.choice(MAX_USERS, 1), + ) + ) user_id = 1 for name in user_names: trg = f"{prefix}USER[user{user_id}]/" @@ -118,10 +154,10 @@ def emulate_sample(self, template: dict) -> dict: trg = f"/ENTRY[entry{self.entry_id}]/sample/" template[f"{trg}method"] = "simulation" - self.elements_observed \ - = np.random.choice(np.asarray(np.linspace(1, 94, num=118, endpoint=True), - np.uint32), - 1 + int(np.random.uniform(low=0, high=5))) + self.elements_observed = np.random.choice( + np.asarray(np.linspace(1, 94, num=118, endpoint=True), np.uint32), + 1 + int(np.random.uniform(low=0, high=5)), + ) assert len(self.elements_observed) > 0, "List of assumed elements is empty!" unique_elements = set() for atomic_number in self.elements_observed: @@ -132,18 +168,22 @@ def emulate_sample(self, template: dict) -> dict: print(f"Unique elements are: {list(unique_elements)}") template[f"{trg}atom_types"] = ", ".join(list(unique_elements)) - specimen_name = f"Mocked electron microscopy specimen " \ - f"{np.random.choice(1000, 1)[0]}" + specimen_name = ( + f"Mocked electron microscopy specimen " f"{np.random.choice(1000, 1)[0]}" + ) template[f"{trg}name"] = specimen_name template[f"{trg}sample_history"] = "n/a" - template[f"{trg}preparation_date"] \ - = datetime.datetime.now().astimezone().isoformat() - template[f"{trg}short_title"] \ - = specimen_name.replace("Mocked atom probe specimen ", "") + template[f"{trg}preparation_date"] = ( + datetime.datetime.now().astimezone().isoformat() + ) + template[f"{trg}short_title"] = specimen_name.replace( + "Mocked atom probe specimen ", "" + ) template[f"{trg}description"] = "n/a" - template[f"{trg}thickness"] \ - = np.float64(np.max((np.random.normal(loc=40., scale=5.0), 10.))) + template[f"{trg}thickness"] = np.float64( + np.max((np.random.normal(loc=40.0, scale=5.0), 10.0)) + ) template[f"{trg}thickness/@units"] = "nm" # template[f"{trg}density"] = 0. # is optional # template[f"{trg}density/@units"] = "kg/m^3" @@ -153,27 +193,30 @@ def emulate_sample(self, template: dict) -> dict: def emulate_coordinate_system(self, template: dict) -> dict: """Define the coordinate systems to be used.""" # print("Parsing coordinate system...") - prefix = f"/ENTRY[entry{self.entry_id}]/" \ - f"COORDINATE_SYSTEM_SET[coordinate_system_set]/" + prefix = ( + f"/ENTRY[entry{self.entry_id}]/" + f"COORDINATE_SYSTEM_SET[coordinate_system_set]/" + ) grpnm = f"{prefix}TRANSFORMATIONS[laboratory]/" cs_xyz = np.asarray( - [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], np.float64) + [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], np.float64 + ) cs_names = ["x", "y", "z"] for i in np.arange(0, 3): trg = f"{grpnm}AXISNAME[{cs_names[i]}]" template[trg] = cs_xyz[:, i] - template[f"{trg}/@offset"] = np.asarray([0., 0., 0.], np.float64) + template[f"{trg}/@offset"] = np.asarray([0.0, 0.0, 0.0], np.float64) template[f"{trg}/@offset_units"] = "m" template[f"{trg}/@depends_on"] = "." - msg = ''' + msg = """ This way of defining coordinate systems is an example how these can be defined. More discussion among members of FAIRmat Area A/B/C and members of the EM community plus more examples should be used to test how this feature of NeXus can be used. - ''' + """ template[f"{prefix}@comment"] = msg return template @@ -183,17 +226,40 @@ def emulate_instrument_header(self, template: dict) -> dict: # print("Parsing instrument header...") trg = f"/ENTRY[entry{self.entry_id}]/em_lab/" instrument_name = np.random.choice( - ["Some ThermoFisher", "Some JEOL", "Some Zeiss", - "Some TEscan", "Some Hitachi"], 1 + np.random.choice(1, 1))[0] + [ + "Some ThermoFisher", + "Some JEOL", + "Some Zeiss", + "Some TEscan", + "Some Hitachi", + ], + 1 + np.random.choice(1, 1), + )[0] template[f"{trg}instrument_name"] = str(instrument_name) - template[f"{trg}location"] = str(np.random.choice( - ["Berlin", "Leipzig", "Dresden", "Düsseldorf", "Aachen", "Garching", - "Aachen", "Leoben", "Jülich"], 1 + np.random.choice(1, 1))[0]) + template[f"{trg}location"] = str( + np.random.choice( + [ + "Berlin", + "Leipzig", + "Dresden", + "Düsseldorf", + "Aachen", + "Garching", + "Aachen", + "Leoben", + "Jülich", + ], + 1 + np.random.choice(1, 1), + )[0] + ) trg = f"/ENTRY[entry{self.entry_id}]/em_lab/FABRICATION[fabrication]/" template[f"{trg}vendor"] = instrument_name.replace("Some ", "") template[f"{trg}model"] = "n/a" - template[f"{trg}identifier"] = str(hashlib.sha256( - instrument_name.replace("Some ", "").encode("utf-8")).hexdigest()) + template[f"{trg}identifier"] = str( + hashlib.sha256( + instrument_name.replace("Some ", "").encode("utf-8") + ).hexdigest() + ) template[f"{trg}capabilities"] = "n/a" return template @@ -205,15 +271,18 @@ def emulate_ebeam_column(self, template: dict) -> dict: trg = f"{prefix}electron_source/" template[f"{trg}voltage"] = np.float64( - np.random.choice(np.linspace(10., 300., num=30, endpoint=True), 1)[0]) + np.random.choice(np.linspace(10.0, 300.0, num=30, endpoint=True), 1)[0] + ) template[f"{trg}voltage/@units"] = "kV" - template[f"{trg}emitter_type"] \ - = str(np.random.choice(["thermionic", "schottky", "field_emission"], 1)[0]) + template[f"{trg}emitter_type"] = str( + np.random.choice(["thermionic", "schottky", "field_emission"], 1)[0] + ) # for aperture in apertures: trg = f"{prefix}APERTURE_EM[aperture_em1]/" template[f"{trg}value"] = np.uint32( - np.random.choice(np.linspace(1, 5, num=5, endpoint=True), 1)[0]) + np.random.choice(np.linspace(1, 5, num=5, endpoint=True), 1)[0] + ) template[f"{trg}name"] = "aperture1" template[f"{trg}description"] = "n/a" @@ -243,23 +312,34 @@ def emulate_ibeam_deflector(self, template: dict) -> dict: def emulate_optics(self, template: dict) -> dict: """Copy data in optical_system_em section.""" # print("Parsing optics...") - trg = f"/ENTRY[entry{self.entry_id}]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/em_lab/OPTICAL_SYSTEM_EM[optical_system_em]/" + ) template[f"{trg}beam_current_description"] = "undefined" - template[f"{trg}camera_length"] \ - = np.float64(np.random.normal(loc=1.0, scale=0.05)) + template[f"{trg}camera_length"] = np.float64( + np.random.normal(loc=1.0, scale=0.05) + ) template[f"{trg}camera_length/@units"] = "m" - template[f"{trg}magnification"] \ - = np.float64(np.random.choice([100., 1000., 10000., 100000.], 1)[0]) - template[f"{trg}defocus"] = np.float64(np.random.normal(loc=1.0e-7, scale=0.1e-7)) + template[f"{trg}magnification"] = np.float64( + np.random.choice([100.0, 1000.0, 10000.0, 100000.0], 1)[0] + ) + template[f"{trg}defocus"] = np.float64( + np.random.normal(loc=1.0e-7, scale=0.1e-7) + ) template[f"{trg}defocus/@units"] = "m" template[f"{trg}semi_convergence_angle"] = np.float64( - np.min((np.random.normal(loc=10., scale=1.), 1.0))) + np.min((np.random.normal(loc=10.0, scale=1.0), 1.0)) + ) template[f"{trg}semi_convergence_angle/@units"] = "degree" template[f"{trg}working_distance"] = np.float64( - np.trunc(np.random.choice(np.linspace(5., 20., num=15, endpoint=True), 1)[0])) + np.trunc( + np.random.choice(np.linspace(5.0, 20.0, num=15, endpoint=True), 1)[0] + ) + ) template[f"{trg}working_distance/@units"] = "cm" template[f"{trg}beam_current"] = np.float64( - np.min((np.random.normal(loc=10., scale=2.), 1.0))) + np.min((np.random.normal(loc=10.0, scale=2.0), 1.0)) + ) template[f"{trg}beam_current/@units"] = "pA" return template @@ -267,12 +347,16 @@ def emulate_optics(self, template: dict) -> dict: def emulate_detector(self, template: dict) -> dict: """Copy data in detector section.""" # print("Parsing detector...") - detectors = np.unique(np.random.choice( - ["SE", "BSE", "EBSD", "EDX", "INLINE"], - 1 + np.random.choice(5, 1))) + detectors = np.unique( + np.random.choice( + ["SE", "BSE", "EBSD", "EDX", "INLINE"], 1 + np.random.choice(5, 1) + ) + ) detector_id = 1 for detector in detectors: - trg = f"/ENTRY[entry{self.entry_id}]/em_lab/DETECTOR[detector{detector_id}]/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/em_lab/DETECTOR[detector{detector_id}]/" + ) template[f"{trg}local_name"] = str(detector) detector_id += 1 @@ -283,10 +367,22 @@ def emulate_stage_lab(self, template: dict) -> dict: # print("Parsing stage lab...") trg = f"/ENTRY[entry{self.entry_id}]/em_lab/stage_lab/" stage_name = np.random.choice( - ["side_entry", "top_entry", "single_tilt", "quick_change", - "multiple_specimen", "bulk_specimen", "double_tilt", "tilt_rotate", - "heating_chip", "atmosphere_chip", "electrical_biasing_chip", - "liquid_cell_chip"], 1)[0] + [ + "side_entry", + "top_entry", + "single_tilt", + "quick_change", + "multiple_specimen", + "bulk_specimen", + "double_tilt", + "tilt_rotate", + "heating_chip", + "atmosphere_chip", + "electrical_biasing_chip", + "liquid_cell_chip", + ], + 1, + )[0] template[f"{trg}name"] = str(stage_name) return template @@ -313,7 +409,8 @@ def emulate_random_xray_spectrum(self, template: dict) -> dict: # !! data meant exclusively to be used for verification purposes !! assert len(self.elements_observed > 0), "No elements were observed !" composition = np.random.uniform( - low=0., high=1., size=(len(self.elements_observed),)) + low=0.0, high=1.0, size=(len(self.elements_observed),) + ) composition = composition / np.sum(composition) signal_contributions = [] idx = 0 @@ -321,65 +418,96 @@ def emulate_random_xray_spectrum(self, template: dict) -> dict: for atomic_number in self.elements_observed: symbol = chemical_symbols[atomic_number] if symbol in hs.material.elements: - if 'Atomic_properties' in hs.material.elements[symbol]: - if 'Xray_lines' in hs.material.elements[symbol].Atomic_properties: - lines = hs.material.elements[symbol].Atomic_properties.Xray_lines + if "Atomic_properties" in hs.material.elements[symbol]: + if "Xray_lines" in hs.material.elements[symbol].Atomic_properties: + lines = hs.material.elements[ + symbol + ].Atomic_properties.Xray_lines for xline_name, xline_props in lines.as_dictionary().items(): # print(key + ", " + str(value["weight"])) - signal_contributions.append((atomic_number, - symbol, - composition[idx], - xline_name, - xline_props["energy (keV)"], - xline_props["weight"])) + signal_contributions.append( + ( + atomic_number, + symbol, + composition[idx], + xline_name, + xline_props["energy (keV)"], + xline_props["weight"], + ) + ) idx += 1 # self.elements_observed = np.unique(symbols) - xray_energy_max = 0. + xray_energy_max = 0.0 for tpl in signal_contributions: xray_energy_max = np.max((xray_energy_max, tpl[4])) # print(xray_energy_max) - n_bins = int(np.ceil( - (xray_energy_max + 3. * XRAY_ENERGY_DELTA + 1.) - / XRAY_ENERGY_DELTA)) # covering [0., n_bins * XRAY_ENERGY_DELTA] + n_bins = int( + np.ceil( + (xray_energy_max + 3.0 * XRAY_ENERGY_DELTA + 1.0) / XRAY_ENERGY_DELTA + ) + ) # covering [0., n_bins * XRAY_ENERGY_DELTA] self.e_axis = np.linspace( 0.5 * XRAY_ENERGY_DELTA, 0.5 * XRAY_ENERGY_DELTA + n_bins * XRAY_ENERGY_DELTA, - num=n_bins, endpoint=True) + num=n_bins, + endpoint=True, + ) self.cnts_summary = np.zeros((n_bins,), np.float64) for tpl in signal_contributions: # idx = np.abs(self.e_axis - tpl[4]).argmin() # integrate analytically, assume Gaussian peak with stddev PEAK_WIDTH_SIGMA - cnts_tpl = np.zeros((n_bins, ), np.float64) + cnts_tpl = np.zeros((n_bins,), np.float64) for idx in np.arange(0, n_bins): - cnts_tpl[idx] = SIGNAL_YIELD * tpl[2] * tpl[5] * 0.5 \ - * (math.erf(1. / (np.sqrt(2.) * PEAK_WIDTH_SIGMA) - * (tpl[4] - (0. + idx * XRAY_ENERGY_DELTA))) - - math.erf(1. / (np.sqrt(2.) * PEAK_WIDTH_SIGMA) - * (tpl[4] - (XRAY_ENERGY_DELTA + idx * XRAY_ENERGY_DELTA)))) + cnts_tpl[idx] = ( + SIGNAL_YIELD + * tpl[2] + * tpl[5] + * 0.5 + * ( + math.erf( + 1.0 + / (np.sqrt(2.0) * PEAK_WIDTH_SIGMA) + * (tpl[4] - (0.0 + idx * XRAY_ENERGY_DELTA)) + ) + - math.erf( + 1.0 + / (np.sqrt(2.0) * PEAK_WIDTH_SIGMA) + * (tpl[4] - (XRAY_ENERGY_DELTA + idx * XRAY_ENERGY_DELTA)) + ) + ) + ) self.cnts_summary = np.add(self.cnts_summary, cnts_tpl) # plt.plot(self.e_axis, self.cnts_summary) # plt.xlabel("energy (keV)") # plt.ylabel("cnts") # plt.xscale("log") - trg = f"/ENTRY[entry{self.entry_id}]/measurement/" \ - f"EVENT_DATA_EM[event_data_em1]/xray/summary/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/measurement/" + f"EVENT_DATA_EM[event_data_em1]/xray/summary/" + ) template[f"{trg}title"] = "Accumulated X-ray spectrum" # template[f"{trg}@long_name"] = "Xray" template[f"{trg}@signal"] = "data_counts" template[f"{trg}@axes"] = ["axis_photon_energy"] template[f"{trg}@AXISNAME_indices[axis_photon_energy_indices]"] = 0 - template[f"{trg}DATA[data_counts]"] \ - = {"compress": self.cnts_summary, "strength": 1} + template[f"{trg}DATA[data_counts]"] = { + "compress": self.cnts_summary, + "strength": 1, + } template[f"{trg}DATA[data_counts]/@units"] = "" template[f"{trg}DATA[data_counts]/@long_name"] = "Photon counts (1)" - template[f"{trg}AXISNAME[axis_photon_energy]"] \ - = {"compress": self.e_axis, "strength": 1} + template[f"{trg}AXISNAME[axis_photon_energy]"] = { + "compress": self.e_axis, + "strength": 1, + } template[f"{trg}AXISNAME[axis_photon_energy]/@units"] = "keV" - template[f"{trg}AXISNAME[axis_photon_energy]/@long_name"] = "Photon energy (keV)" + template[ + f"{trg}AXISNAME[axis_photon_energy]/@long_name" + ] = "Photon energy (keV)" return template def synthesize(self, template: dict) -> dict: diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_generic_eln_io.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_generic_eln_io.py index cdea0cfee..2a313a707 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_generic_eln_io.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_generic_eln_io.py @@ -27,8 +27,12 @@ from ase.data import chemical_symbols -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_versioning \ - import NX_EM_ADEF_NAME, NX_EM_ADEF_VERSION, NX_EM_EXEC_NAME, NX_EM_EXEC_VERSION +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_versioning import ( + NX_EM_ADEF_NAME, + NX_EM_ADEF_VERSION, + NX_EM_EXEC_NAME, + NX_EM_EXEC_VERSION, +) class NxEmNomadOasisElnSchemaParser: @@ -51,8 +55,10 @@ class NxEmNomadOasisElnSchemaParser: def __init__(self, file_name: str, entry_id: int): print(f"Extracting data from ELN file: {file_name}") - if (file_name.rsplit('/', 1)[-1].startswith("eln_data") - or file_name.startswith("eln_data")) and entry_id > 0: + if ( + file_name.rsplit("/", 1)[-1].startswith("eln_data") + or file_name.startswith("eln_data") + ) and entry_id > 0: self.entry_id = entry_id self.file_name = file_name with open(self.file_name, "r", encoding="utf-8") as stream: @@ -68,20 +74,30 @@ def parse_entry_section(self, template: dict) -> dict: trg = f"/ENTRY[entry{self.entry_id}]/" src = "entry" if isinstance(self.yml[src], fd.FlatDict): - if (self.yml[f"{src}:attr_version"] == NX_EM_ADEF_VERSION) \ - and (self.yml[f"{src}:definition"] == NX_EM_ADEF_NAME): + if (self.yml[f"{src}:attr_version"] == NX_EM_ADEF_VERSION) and ( + self.yml[f"{src}:definition"] == NX_EM_ADEF_NAME + ): template[f"{trg}@version"] = NX_EM_ADEF_VERSION template[f"{trg}definition"] = NX_EM_ADEF_NAME template[f"{trg}PROGRAM[program1]/program"] = NX_EM_EXEC_NAME - template[f"{trg}PROGRAM[program1]/program/@version"] = NX_EM_EXEC_VERSION - if ("program" in self.yml[src].keys()) \ - and ("program__attr_version" in self.yml[src].keys()): + template[ + f"{trg}PROGRAM[program1]/program/@version" + ] = NX_EM_EXEC_VERSION + if ("program" in self.yml[src].keys()) and ( + "program__attr_version" in self.yml[src].keys() + ): template[f"{trg}PROGRAM[program2]/program"] = self.yml[f"{src}:program"] - template[f"{trg}PROGRAM[program2]/program/@version"] \ - = self.yml[f"{src}:program__attr_version"] - - field_names = ["experiment_identifier", "start_time", "end_time", - "experiment_description", "experiment_documentation"] + template[f"{trg}PROGRAM[program2]/program/@version"] = self.yml[ + f"{src}:program__attr_version" + ] + + field_names = [ + "experiment_identifier", + "start_time", + "end_time", + "experiment_description", + "experiment_documentation", + ] for field_name in field_names: if field_name in self.yml[src].keys(): template[f"{trg}{field_name}"] = self.yml[f"{src}:{field_name}"] @@ -96,10 +112,18 @@ def parse_user_section(self, template: dict) -> dict: if len(self.yml[src]) >= 1: user_id = 1 for user_list in self.yml[src]: - char_field_names = ["name", "email", "affiliation", "address", - "orcid", "orcid_platform", - "telephone_number", "role", - "social_media_name", "social_media_platform"] + char_field_names = [ + "name", + "email", + "affiliation", + "address", + "orcid", + "orcid_platform", + "telephone_number", + "role", + "social_media_name", + "social_media_platform", + ] trg = f"/ENTRY[entry{self.entry_id}]/USER[user{user_id}]/" for field_name in char_field_names: @@ -116,18 +140,23 @@ def parse_sample_section(self, template: dict) -> dict: src = "sample" trg = f"/ENTRY[entry{self.entry_id}]/sample/" if isinstance(self.yml[src], fd.FlatDict): - if (isinstance(self.yml[f"{src}:atom_types"], list)) \ - and (len(self.yml[src + ":atom_types"]) >= 1): + if (isinstance(self.yml[f"{src}:atom_types"], list)) and ( + len(self.yml[src + ":atom_types"]) >= 1 + ): atom_types_are_valid = True for symbol in self.yml[f"{src}:atom_types"]: - valid = isinstance(symbol, str) \ - and (symbol in chemical_symbols) and (symbol != "X") + valid = ( + isinstance(symbol, str) + and (symbol in chemical_symbols) + and (symbol != "X") + ) if valid is False: atom_types_are_valid = False break if atom_types_are_valid is True: - template[f"{trg}atom_types"] \ - = ", ".join(list(self.yml[f"{src}:atom_types"])) + template[f"{trg}atom_types"] = ", ".join( + list(self.yml[f"{src}:atom_types"]) + ) char_req_field_names = ["method", "name", "sample_history", "preparation_date"] for field_name in char_req_field_names: @@ -140,11 +169,15 @@ def parse_sample_section(self, template: dict) -> dict: float_field_names = ["thickness", "density"] for field_name in float_field_names: - if (f"{field_name}:value" in self.yml[src].keys()) \ - and (f"{field_name}:unit" in self.yml[src].keys()): - template[f"{trg}{field_name}"] \ - = np.float64(self.yml[f"{src}:{field_name}:value"]) - template[f"{trg}{field_name}/@units"] = self.yml[f"{src}:{field_name}:unit"] + if (f"{field_name}:value" in self.yml[src].keys()) and ( + f"{field_name}:unit" in self.yml[src].keys() + ): + template[f"{trg}{field_name}"] = np.float64( + self.yml[f"{src}:{field_name}:value"] + ) + template[f"{trg}{field_name}/@units"] = self.yml[ + f"{src}:{field_name}:unit" + ] return template @@ -159,25 +192,28 @@ def parse_coordinate_system_section(self, template: dict) -> dict: # Oxford Instruments nowadays stores coordinate systems implicitly by # communicating the specification of their file format (like H5OINA) # print("Parsing coordinate system...") - prefix = f"/ENTRY[entry{self.entry_id}]/" \ - f"COORDINATE_SYSTEM_SET[coordinate_system_set]/" + prefix = ( + f"/ENTRY[entry{self.entry_id}]/" + f"COORDINATE_SYSTEM_SET[coordinate_system_set]/" + ) # this is likely not yet matching how it should be in NeXus grpnm = f"{prefix}TRANSFORMATIONS[laboratory]/" cs_xyz = np.asarray( - [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], np.float64) + [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], np.float64 + ) cs_names = ["x", "y", "z"] for i in np.arange(0, 3): trg = f"{grpnm}AXISNAME[{cs_names[i]}]" template[trg] = cs_xyz[:, i] - template[f"{trg}/@offset"] = np.asarray([0., 0., 0.], np.float64) + template[f"{trg}/@offset"] = np.asarray([0.0, 0.0, 0.0], np.float64) template[f"{trg}/@offset_units"] = "m" template[f"{trg}/@depends_on"] = "." - msg = ''' + msg = """ This way of defining coordinate systems is only a small example of what is possible and how it can be done. More discussion among members of FAIRmat Areas A, B, C, and D and the EM community is needed ! - ''' + """ template[f"{prefix}@comment"] = msg return template @@ -211,17 +247,22 @@ def parse_ebeam_column_section(self, template: dict) -> dict: src = "em_lab:ebeam_column:electron_source" if isinstance(self.yml[src], fd.FlatDict): - trg = f"/ENTRY[entry{self.entry_id}]/em_lab/" \ - f"EBEAM_COLUMN[ebeam_column]/electron_source/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/em_lab/" + f"EBEAM_COLUMN[ebeam_column]/electron_source/" + ) float_field_names = ["voltage"] for field_name in float_field_names: - if (f"{field_name}:value" in self.yml[src].keys()) \ - and (f"{field_name}:unit" in self.yml[src].keys()): - template[f"{trg}{field_name}"] \ - = np.float64(self.yml[f"{src}:{field_name}:value"]) - template[f"{trg}{field_name}/@units"] \ - = self.yml[f"{src}:{field_name}:unit"] + if (f"{field_name}:value" in self.yml[src].keys()) and ( + f"{field_name}:unit" in self.yml[src].keys() + ): + template[f"{trg}{field_name}"] = np.float64( + self.yml[f"{src}:{field_name}:value"] + ) + template[f"{trg}{field_name}/@units"] = self.yml[ + f"{src}:{field_name}:unit" + ] char_field_names = ["emitter_type"] for field_name in char_field_names: @@ -233,9 +274,11 @@ def parse_ebeam_column_section(self, template: dict) -> dict: if len(self.yml[src]) >= 1: aperture_id = 1 for aperture in self.yml[src]: - trg = f"/ENTRY[entry{self.entry_id}]/em_lab/" \ - f"EBEAM_COLUMN[ebeam_column]/" \ - f"APERTURE_EM[aperture_em{aperture_id}]/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/em_lab/" + f"EBEAM_COLUMN[ebeam_column]/" + f"APERTURE_EM[aperture_em{aperture_id}]/" + ) if "value" in aperture.keys(): template[f"{trg}value"] = np.float64(aperture["value"]) char_field_names = ["name", "description"] @@ -248,8 +291,10 @@ def parse_ebeam_column_section(self, template: dict) -> dict: # corrector_cs src = "em_lab:ebeam_column:aberration_correction" - trg = f"/ENTRY[entry{self.entry_id}]/em_lab/" \ - f"EBEAM_COLUMN[ebeam_column]/aberration_correction/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/em_lab/" + f"EBEAM_COLUMN[ebeam_column]/aberration_correction/" + ) if "applied" in self.yml[src].keys(): template[f"{trg}applied"] = self.yml[f"{src}:applied"] @@ -282,24 +327,34 @@ def parse_optics_section(self, template: dict) -> dict: # print("Parsing optics...") src = "em_lab:optical_system_em" if isinstance(self.yml[src], fd.FlatDict): - trg = f"/ENTRY[entry{self.entry_id}]/em_lab/" \ - f"OPTICAL_SYSTEM_EM[optical_system_em]/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/em_lab/" + f"OPTICAL_SYSTEM_EM[optical_system_em]/" + ) char_field_names = ["beam_current_description"] for field_name in char_field_names: if field_name in self.yml[src].keys(): template[f"{trg}{field_name}"] = self.yml[f"{src}:{field_name}"] - float_field_names = ["camera_length", "magnification", "defocus", - "semi_convergence_angle", "working_distance", - "beam_current"] + float_field_names = [ + "camera_length", + "magnification", + "defocus", + "semi_convergence_angle", + "working_distance", + "beam_current", + ] for field_name in float_field_names: - if (f"{field_name}:value" in self.yml[src].keys()) \ - and (f"{field_name}:unit" in self.yml[src].keys()): - template[f"{trg}{field_name}"] \ - = np.float64(self.yml[f"{src}:{field_name}:value"]) - template[f"{trg}{field_name}/@units"] \ - = self.yml[f"{src}:{field_name}:unit"] + if (f"{field_name}:value" in self.yml[src].keys()) and ( + f"{field_name}:unit" in self.yml[src].keys() + ): + template[f"{trg}{field_name}"] = np.float64( + self.yml[f"{src}:{field_name}:value"] + ) + template[f"{trg}{field_name}/@units"] = self.yml[ + f"{src}:{field_name}:unit" + ] return template @@ -314,8 +369,10 @@ def parse_detector_section(self, template: dict) -> dict: detector_id = 1 for detector in self.yml[src]: if isinstance(detector, dict): - trg = f"/ENTRY[entry{self.entry_id}]/em_lab/" \ - f"DETECTOR[detector{detector_id}]/" + trg = ( + f"/ENTRY[entry{self.entry_id}]/em_lab/" + f"DETECTOR[detector{detector_id}]/" + ) char_field_names = ["local_name"] for field_name in char_field_names: diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_nexus_base_classes.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_nexus_base_classes.py index 6baaba799..daccf1021 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_nexus_base_classes.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_nexus_base_classes.py @@ -21,28 +21,29 @@ from typing import Dict -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_versioning \ - import NX_EM_ADEF_NAME, NX_EM_ADEF_VERSION, \ - NX_EM_EXEC_NAME, NX_EM_EXEC_VERSION +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_versioning import ( + NX_EM_ADEF_NAME, + NX_EM_ADEF_VERSION, + NX_EM_EXEC_NAME, + NX_EM_EXEC_VERSION, +) class NxObject: # pylint: disable=too-few-public-methods """An object in a graph e.g. a field or group in NeXus.""" - def __init__(self, - name: str = None, - unit: str = None, - dtype=str, - value=None, - **kwargs): + def __init__( + self, name: str = None, unit: str = None, dtype=str, value=None, **kwargs + ): if name is not None: assert name != "", "Argument name needs to be a non-empty string !" if unit is not None: assert unit != "", "Argument unit needs to be a non-empty string !" assert dtype is not None, "Argument dtype must not be None !" if dtype is not None: - assert isinstance(dtype, type), \ - "Argument dtype needs a valid, ideally numpy, datatype !" + assert isinstance( + dtype, type + ), "Argument dtype needs a valid, ideally numpy, datatype !" # ##MK::if value is not None: self.is_a = "NXobject" self.is_attr = False # if True indicates object is attribute @@ -60,13 +61,14 @@ def __init__(self, self.value = None # value should be a numpy scalar, tensor, or string if possible if "is_attr" in kwargs: - assert isinstance(kwargs["is_attr"], bool), \ - "Kwarg is_attr needs to be a boolean !" + assert isinstance( + kwargs["is_attr"], bool + ), "Kwarg is_attr needs to be a boolean !" self.is_attr = kwargs["is_attr"] def __repr__(self): """Report values.""" - return f'''Name: {self.name}, unit: {self.unit}, dtype: {self.dtype}''' + return f"""Name: {self.name}, unit: {self.unit}, dtype: {self.dtype}""" class NxEmUser: # pylint: disable=too-few-public-methods @@ -96,12 +98,11 @@ def report(self, prefix: str, template: dict) -> dict: template[f"{prefix}/name"] = self.meta["name"].value template[f"{prefix}/orcid"] = self.meta["orcid"].value template[f"{prefix}/role"] = self.meta["role"].value - template[f"{prefix}/social_media_name"] \ - = self.meta["social_media_name"].value - template[f"{prefix}/social_media_platform"] \ - = self.meta["social_media_platform"].value - template[f"{prefix}/telephone_number"] \ - = self.meta["telephone_number"].value + template[f"{prefix}/social_media_name"] = self.meta["social_media_name"].value + template[f"{prefix}/social_media_platform"] = self.meta[ + "social_media_platform" + ].value + template[f"{prefix}/telephone_number"] = self.meta["telephone_number"].value return template @@ -142,17 +143,14 @@ class NxEmAppDefHeader: # pylint: disable=too-few-public-methods def __init__(self): self.meta: Dict[str, NxObject] = {} - self.meta["version"] \ - = NxObject(value=NX_EM_ADEF_VERSION, is_attr=True) - self.meta["definition"] \ - = NxObject(value=NX_EM_ADEF_NAME) + self.meta["version"] = NxObject(value=NX_EM_ADEF_VERSION, is_attr=True) + self.meta["definition"] = NxObject(value=NX_EM_ADEF_NAME) self.meta["experiment_identifier"] = NxObject() self.meta["experiment_description"] = NxObject() self.meta["start_time"] = NxObject() self.meta["end_time"] = NxObject() self.meta["program"] = NxObject(value=NX_EM_EXEC_NAME) - self.meta["program_version"] \ - = NxObject(value=NX_EM_EXEC_VERSION, is_attr=True) + self.meta["program_version"] = NxObject(value=NX_EM_EXEC_VERSION, is_attr=True) self.meta["experiment_documentation"] = NxObject() self.meta["thumbnail"] = NxObject() self.meta["thumbnail_type"] = NxObject() @@ -165,19 +163,19 @@ def report(self, prefix: str, template: dict) -> dict: """ template[f"{prefix}/@version"] = self.meta["version"].value template[f"{prefix}/definition"] = self.meta["definition"].value - template[f"{prefix}/experiment_identifier"] \ - = self.meta["experiment_identifier"].value - template[f"{prefix}/experiment_description"] \ - = self.meta["experiment_description"].value + template[f"{prefix}/experiment_identifier"] = self.meta[ + "experiment_identifier" + ].value + template[f"{prefix}/experiment_description"] = self.meta[ + "experiment_description" + ].value template[f"{prefix}/start_time"] = self.meta["start_time"].value template[f"{prefix}/end_time"] = self.meta["end_time"].value template[f"{prefix}/program"] = self.meta["program"].value - template[f"{prefix}/program/@version"] \ - = self.meta["program_version"].value - template[f"{prefix}/experiment_documentation"] \ - = self.meta["experiment_documentation"].value - template[f"{prefix}/thumbnail"] \ - = self.meta["thumbnail"].value - template[f"{prefix}/thumbnail/@type"] \ - = self.meta["thumbnail_type"].value + template[f"{prefix}/program/@version"] = self.meta["program_version"].value + template[f"{prefix}/experiment_documentation"] = self.meta[ + "experiment_documentation" + ].value + template[f"{prefix}/thumbnail"] = self.meta["thumbnail"].value + template[f"{prefix}/thumbnail/@type"] = self.meta["thumbnail_type"].value return template diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_nexus_plots.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_nexus_plots.py index 1e51a6e2a..c6f5b4a69 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_nexus_plots.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_nexus_plots.py @@ -38,19 +38,18 @@ def xray_plot_available(template: dict, entry_id: int) -> bool: """Choose a preferred NXdata/data instance for Xray.""" entry_name = f"entry{entry_id}" - trg = f"/ENTRY[{entry_name}]/measurement/" \ - f"EVENT_DATA_EM[event_data_em1]/xray/" + trg = f"/ENTRY[{entry_name}]/measurement/" f"EVENT_DATA_EM[event_data_em1]/xray/" path = "" if f"{trg}stack/DATA[data_counts]" in template.keys(): assert isinstance( - template[f"{trg}stack/DATA[data_counts]"]["compress"], np.ndarray), \ - "EDS data stack not existent!" + template[f"{trg}stack/DATA[data_counts]"]["compress"], np.ndarray + ), "EDS data stack not existent!" path = "stack" if f"{trg}summary/DATA[data_counts]" in template.keys(): assert isinstance( - template[f"{trg}summary/DATA[data_counts]"]["compress"], np.ndarray), \ - "EDS data summary not existent!" + template[f"{trg}summary/DATA[data_counts]"]["compress"], np.ndarray + ), "EDS data summary not existent!" path = "summary" if path != "": @@ -73,19 +72,18 @@ def xray_plot_available(template: dict, entry_id: int) -> bool: def eels_plot_available(template: dict, entry_id: int) -> bool: """Choose a preferred NXdata/data instance for EELS.""" entry_name = f"entry{entry_id}" - trg = f"/ENTRY[{entry_name}]/measurement/" \ - f"EVENT_DATA_EM[event_data_em1]/eels/" + trg = f"/ENTRY[{entry_name}]/measurement/" f"EVENT_DATA_EM[event_data_em1]/eels/" path = "" if f"{trg}stack/DATA[data_counts]" in template.keys(): assert isinstance( - template[f"{trg}stack/DATA[data_counts]"]["compress"], np.ndarray), \ - "EELS data stack not existent!" + template[f"{trg}stack/DATA[data_counts]"]["compress"], np.ndarray + ), "EELS data stack not existent!" path = "stack" if f"{trg}summary/DATA[data_counts]" in template.keys(): assert isinstance( - template[f"{trg}summary/DATA[data_counts]"]["compress"], np.ndarray), \ - "EELS data summary not existent!" + template[f"{trg}summary/DATA[data_counts]"]["compress"], np.ndarray + ), "EELS data summary not existent!" path = "summary" if path != "": @@ -108,14 +106,13 @@ def eels_plot_available(template: dict, entry_id: int) -> bool: def adf_plot_available(template: dict, entry_id: int) -> bool: """Choose a preferred NXdata/data instance for ADF.""" entry_name = f"entry{entry_id}" - trg = f"/ENTRY[{entry_name}]/measurement/" \ - f"EVENT_DATA_EM[event_data_em1]/adf/" + trg = f"/ENTRY[{entry_name}]/measurement/" f"EVENT_DATA_EM[event_data_em1]/adf/" path = "" if f"{trg}stack/DATA[data_counts]" in template.keys(): assert isinstance( - template[f"{trg}stack/DATA[data_counts]"]["compress"], np.ndarray), \ - "ADF data stack not existent!" + template[f"{trg}stack/DATA[data_counts]"]["compress"], np.ndarray + ), "ADF data stack not existent!" path = "stack" if path != "": @@ -138,14 +135,16 @@ def adf_plot_available(template: dict, entry_id: int) -> bool: def image_plot_available(template: dict, entry_id: int) -> bool: """Choose a preferred NXdata/data instance for generic image.""" entry_name = f"entry{entry_id}" - trg = f"/ENTRY[{entry_name}]/measurement/EVENT_DATA_EM[event_data_em1]/" \ - f"IMAGE_SET[image_set1]/" + trg = ( + f"/ENTRY[{entry_name}]/measurement/EVENT_DATA_EM[event_data_em1]/" + f"IMAGE_SET[image_set1]/" + ) path = "" if f"{trg}DATA[stack]/data_counts" in template.keys(): assert isinstance( - template[f"{trg}DATA[stack]/data_counts"]["compress"], np.ndarray), \ - "Generic image data stack not existent!" + template[f"{trg}DATA[stack]/data_counts"]["compress"], np.ndarray + ), "Generic image data stack not existent!" path = "stack" if path != "": @@ -165,7 +164,9 @@ def image_plot_available(template: dict, entry_id: int) -> bool: return False -def em_spctrscpy_default_plot_generator(template: dict, n_entries: int) -> dict: # ignore:R0915 +def em_spctrscpy_default_plot_generator( + template: dict, n_entries: int +) -> dict: # ignore:R0915 """For a valid NXS file at least one default plot is required.""" for entry_id in np.arange(1, n_entries + 1): diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_use_case_selector.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_use_case_selector.py index cf9c861d9..0f5309cb1 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_use_case_selector.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_use_case_selector.py @@ -47,9 +47,10 @@ def __init__(self, file_paths: Tuple[str] = None): for file_name in file_paths: index = file_name.lower().rfind(".") if index >= 0: - suffix = file_name.lower()[index + 1::] - add = (suffix in self.supported_mime_types) \ - and (file_name not in self.case[suffix]) + suffix = file_name.lower()[index + 1 : :] + add = (suffix in self.supported_mime_types) and ( + file_name not in self.case[suffix] + ) if add is True: self.case[suffix].append(file_name) # the em reader currently supports a combination of one vendor file and @@ -66,8 +67,9 @@ def __init__(self, file_paths: Tuple[str] = None): if mime_type in ["bcf", "dm3", "emd"]: hspy_input += len(value) - assert (oina_input == 1) or (hspy_input == 1), \ - "Currently the reader supports to have only one vendor input file!" + assert (oina_input == 1) or ( + hspy_input == 1 + ), "Currently the reader supports to have only one vendor input file!" if oina_input == 1: self.vendor += self.case["h5oina"] self.vendor_parser = "oina" @@ -77,8 +79,9 @@ def __init__(self, file_paths: Tuple[str] = None): self.vendor_parser = "hspy" eln_input = len(self.case["yaml"]) + len(self.case["yml"]) - assert eln_input == 1, \ - "Currently the reader supports to have only one YAML input-file!" + assert ( + eln_input == 1 + ), "Currently the reader supports to have only one YAML input-file!" for mime_type in ["yaml", "yml"]: self.eln += self.case[mime_type] @@ -86,4 +89,5 @@ def __init__(self, file_paths: Tuple[str] = None): self.is_valid = True + # test = EmUseCaseSelector(("a.bcf", "b.yaml", "c.apt", "d.h5oina")) diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_versioning.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_versioning.py index e980594a1..5ad44ec51 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_versioning.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/em_versioning.py @@ -19,13 +19,13 @@ # pylint: disable=no-member -from pynxtools.dataconverter.readers.shared.shared_utils \ - import get_repo_last_commit +from pynxtools.dataconverter.readers.shared.shared_utils import get_repo_last_commit NX_EM_ADEF_NAME = "NXem" -NX_EM_ADEF_VERSION = "nexus-fairmat-proposal successor of " \ - "9636feecb79bb32b828b1a9804269573256d7696" +NX_EM_ADEF_VERSION = ( + "nexus-fairmat-proposal successor of " "9636feecb79bb32b828b1a9804269573256d7696" +) # based on https://fairmat-experimental.github.io/nexus-fairmat-proposal NX_EM_EXEC_NAME = "dataconverter/reader/em_sptrscpy/reader.py" NX_EM_EXEC_VERSION = get_repo_last_commit() diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_adf.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_adf.py index 9b2bfca04..085e10754 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_adf.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_adf.py @@ -25,8 +25,9 @@ import hyperspy.api as hs -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_base_classes \ - import NxObject +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_base_classes import ( + NxObject, +) class HspyRectRoiAdfImage: @@ -48,29 +49,32 @@ def __init__(self, hspy_clss): def is_supported(self, hspy_s2d): """Check if the input has supported axes_manager and key metadata.""" - assert hspy_s2d.metadata["Signal"]["signal_type"] == "", \ - "hspy_s2d is not a valid hyperspy generic instance !" - assert hspy_s2d.data.ndim == 2, \ - "hspy_s2d is not a valid 2D dataset !" + assert ( + hspy_s2d.metadata["Signal"]["signal_type"] == "" + ), "hspy_s2d is not a valid hyperspy generic instance !" + assert hspy_s2d.data.ndim == 2, "hspy_s2d is not a valid 2D dataset !" axes_dict = hspy_s2d.axes_manager.as_dictionary() required_axis_names = ["axis-0", "axis-1"] for req_key in required_axis_names: - assert req_key in axes_dict.keys(), \ + assert req_key in axes_dict.keys(), ( req_key + " is unexpectedly not registered in the axes_manager !" + ) required_keywords = ["_type", "name", "units", "size", "scale", "offset"] avail_axis_names = [] for keyword in axes_dict.keys(): for req_key in required_keywords: # check if all required keys exist - assert req_key in axes_dict[keyword].keys(), \ + assert req_key in axes_dict[keyword].keys(), ( "hspy_s2d axis " + keyword + " lacks " + req_key + " !" - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + ) + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this axis is not of type UniformDataAxis !" + ) avail_axis_names.append(axes_dict[keyword]["name"]) - axes_as_expected_emd \ - = np.all(np.sort(avail_axis_names) == np.sort(["y", "x"])) - axes_as_expected_bcf \ - = np.all(np.sort(avail_axis_names) == np.sort(["height", "width"])) + axes_as_expected_emd = np.all(np.sort(avail_axis_names) == np.sort(["y", "x"])) + axes_as_expected_bcf = np.all( + np.sort(avail_axis_names) == np.sort(["height", "width"]) + ) # ##MK::Adrien/Cecile"s BCF and EMD example contains at least one # such case where the hyperspy created view in metadata is not # consistent across representations generated with different parsers @@ -98,24 +102,32 @@ def parse(self, hspy_s2d): scale = np.float64(axes_dict[keyword]["scale"]) size = np.uint32(axes_dict[keyword]["size"]) unit = str(axes_dict[keyword]["units"]) - y_axis = (axes_dict[keyword]["name"] == "y") \ - or (axes_dict[keyword]["name"] == "height") - x_axis = (axes_dict[keyword]["name"] == "x") \ - or (axes_dict[keyword]["name"] == "width") + y_axis = (axes_dict[keyword]["name"] == "y") or ( + axes_dict[keyword]["name"] == "height" + ) + x_axis = (axes_dict[keyword]["name"] == "x") or ( + axes_dict[keyword]["name"] == "width" + ) if y_axis is True: - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this x axis is not of type UniformDataAxis !" + ) self.meta["ypos"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["ypos"].unit = unit self.meta["ypos_long_name"].value = "y" # ##MK::name y always! if x_axis is True: - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this y axis is not of type UniformDataAxis !" + ) self.meta["xpos"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["xpos"].unit = unit self.meta["xpos_long_name"].value = "x" # ##MK::name x always! # ##MK::improve case handling @@ -171,21 +183,20 @@ def parse_hspy_instances(self, hspy_list): # if self.data[-1].is_valid is False: # self.is_valid = False - def report(self, prefix: str, frame_id: int, - ifo: dict, template: dict) -> dict: + def report(self, prefix: str, frame_id: int, ifo: dict, template: dict) -> dict: """Enter data from the NX-specific representation into the template.""" if self.is_valid is False: print(f"\tIn function {__name__} reporting nothing!") return template print(f"\tIn function {__name__} reporting...") - assert (len(self.data) >= 0) and (len(self.data) <= 1), \ - "More than one spectrum stack is currently not supported!" + assert (len(self.data) >= 0) and ( + len(self.data) <= 1 + ), "More than one spectrum stack is currently not supported!" if len(self.data) == 1: trg = f"{prefix}adf/PROCESS[process1]/" template[f"{trg}PROGRAM[program1]/program"] = "hyperspy" - template[f"{trg}PROGRAM[program1]/program/@version"] \ - = hs.__version__ + template[f"{trg}PROGRAM[program1]/program/@version"] = hs.__version__ template[f"{trg}mode"] = "n/a" template[f"{trg}detector_identifier"] = "n/a" template[f"{trg}source"] = ifo["source_file_name"] @@ -207,29 +218,41 @@ def report(self, prefix: str, frame_id: int, template[f"{trg}@AXISNAME[axis_x_indices]"] = np.uint32(2) template[f"{trg}@AXISNAME[axis_y_indices]"] = np.uint32(1) template[f"{trg}@AXISNAME[axis_image_identifier]"] = np.uint32(0) - template[f"{trg}DATA[data_counts]"] \ - = {"compress": np.reshape( + template[f"{trg}DATA[data_counts]"] = { + "compress": np.reshape( np.atleast_3d(self.data[0].meta["intensity"].value), - (1, - np.shape(self.data[0].meta["intensity"].value)[0], - np.shape(self.data[0].meta["intensity"].value)[1])), - "strength": 1} + ( + 1, + np.shape(self.data[0].meta["intensity"].value)[0], + np.shape(self.data[0].meta["intensity"].value)[1], + ), + ), + "strength": 1, + } # is the data layout correct? # I am pretty sure the last two have to be swopped also!! template[f"{trg}DATA[data_counts]/@long_name"] = "Counts (a.u.)" - template[f"{trg}AXISNAME[axis_x]"] \ - = {"compress": self.data[0].meta["xpos"].value, "strength": 1} + template[f"{trg}AXISNAME[axis_x]"] = { + "compress": self.data[0].meta["xpos"].value, + "strength": 1, + } template[f"{trg}AXISNAME[axis_x]/@units"] = self.data[0].meta["xpos"].unit - template[f"{trg}AXISNAME[axis_x]/@long_name"] \ - = f"x ({self.data[0].meta['xpos'].unit})" - template[f"{trg}AXISNAME[axis_y]"] \ - = {"compress": self.data[0].meta["ypos"].value, "strength": 1} + template[ + f"{trg}AXISNAME[axis_x]/@long_name" + ] = f"x ({self.data[0].meta['xpos'].unit})" + template[f"{trg}AXISNAME[axis_y]"] = { + "compress": self.data[0].meta["ypos"].value, + "strength": 1, + } template[f"{trg}AXISNAME[axis_y]/@units"] = self.data[0].meta["ypos"].unit - template[f"{trg}AXISNAME[axis_y]/@long_name"] \ - = f"y ({self.data[0].meta['ypos'].unit})" - template[f"{trg}AXISNAME[axis_image_identifier]"] \ - = np.atleast_1d(np.uint32(frame_id)) - template[f"{trg}AXISNAME[axis_image_identifier]/@long_name"] \ - = "image identifier" + template[ + f"{trg}AXISNAME[axis_y]/@long_name" + ] = f"y ({self.data[0].meta['ypos'].unit})" + template[f"{trg}AXISNAME[axis_image_identifier]"] = np.atleast_1d( + np.uint32(frame_id) + ) + template[ + f"{trg}AXISNAME[axis_image_identifier]/@long_name" + ] = "image identifier" return template diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_comments.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_comments.py index a1737ffbc..cc0f5a2f3 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_comments.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_comments.py @@ -1,4 +1,3 @@ - # # Copyright The NOMAD Authors. # diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_eels.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_eels.py index 70bed5b55..8c890e40d 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_eels.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_eels.py @@ -25,8 +25,9 @@ import hyperspy.api as hs -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_base_classes \ - import NxObject +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_base_classes import ( + NxObject, +) class HspyRectRoiEelsAllSpectra: @@ -53,30 +54,34 @@ def __init__(self, hspy_clss): def is_supported(self, hspy_s3d): """Check if the input has supported axes_manager and key metadata.""" - assert hspy_s3d.metadata["Signal"]["signal_type"] == "EELS", \ - "hspy_s3d is not a valid EELS hyperspy instance !" - assert hspy_s3d.data.ndim == 3, \ - "hspy_s3d is not a valid 3D dataset !" + assert ( + hspy_s3d.metadata["Signal"]["signal_type"] == "EELS" + ), "hspy_s3d is not a valid EELS hyperspy instance !" + assert hspy_s3d.data.ndim == 3, "hspy_s3d is not a valid 3D dataset !" axes_dict = hspy_s3d.axes_manager.as_dictionary() required_axis_names = ["axis-0", "axis-1", "axis-2"] for req_key in required_axis_names: - assert req_key in axes_dict.keys(), \ + assert req_key in axes_dict.keys(), ( req_key + " is unexpectedly not registered in the axes_manager !" + ) required_keywords = ["_type", "name", "units", "size", "scale", "offset"] avail_axis_names = [] for keyword in axes_dict.keys(): for req_key in required_keywords: # check if all required keys exist - assert req_key in axes_dict[keyword].keys(), \ + assert req_key in axes_dict[keyword].keys(), ( "hspy_s3d axis " + keyword + " lacks " + req_key + " !" + ) - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this axis is not of type UniformDataAxis !" + ) avail_axis_names.append(axes_dict[keyword]["name"]) print(np.sort(avail_axis_names)) print(np.sort(["y", "x", "Energy loss"])) axes_as_expected = np.all( - np.sort(avail_axis_names) == np.sort(["y", "x", "Energy loss"])) + np.sort(avail_axis_names) == np.sort(["y", "x", "Energy loss"]) + ) if axes_as_expected is False: print(f"\tIn function {__name__} as expected") self.is_valid = False @@ -99,20 +104,26 @@ def parse(self, hspy_s3d): unit = str(axes_dict[keyword]["units"]) if axes_dict[keyword]["name"] == "y": self.meta["ypos"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["ypos"].unit = unit self.meta["ypos_long_name"].value = "y" elif axes_dict[keyword]["name"] == "x": self.meta["xpos"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["xpos"].unit = unit self.meta["xpos_long_name"].value = "x" else: # axes_dict[keyword]["name"] == "Energy loss": self.meta["energy_loss"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["energy_loss"].unit = unit self.meta["energy_loss_long_name"].value = "Energy loss" @@ -136,30 +147,34 @@ def __init__(self, hspy_clss): def is_supported(self, hspy_s3d): """Check if the input has supported axes_manager and key metadata.""" - assert hspy_s3d.metadata["Signal"]["signal_type"] == "EELS", \ - "hspy_s3d is not a valid EELS hyperspy instance !" - assert hspy_s3d.data.ndim == 3, \ - "hspy_s3d is not a valid 3D dataset !" + assert ( + hspy_s3d.metadata["Signal"]["signal_type"] == "EELS" + ), "hspy_s3d is not a valid EELS hyperspy instance !" + assert hspy_s3d.data.ndim == 3, "hspy_s3d is not a valid 3D dataset !" axes_dict = hspy_s3d.axes_manager.as_dictionary() required_axis_names = ["axis-0", "axis-1", "axis-2"] for req_key in required_axis_names: - assert req_key in axes_dict.keys(), \ + assert req_key in axes_dict.keys(), ( req_key + " is unexpectedly not registered in the axes_manager !" + ) required_keywords = ["_type", "name", "units", "size", "scale", "offset"] avail_axis_names = [] for keyword in axes_dict.keys(): for req_key in required_keywords: # check if all required keys exist - assert req_key in axes_dict[keyword].keys(), \ + assert req_key in axes_dict[keyword].keys(), ( "hspy_s3d axis " + keyword + " lacks " + req_key + " !" + ) - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this axis is not of type UniformDataAxis !" + ) avail_axis_names.append(axes_dict[keyword]["name"]) print(np.sort(avail_axis_names)) print(np.sort(["y", "x", "Energy loss"])) axes_as_expected = np.all( - np.sort(avail_axis_names) == np.sort(["y", "x", "Energy loss"])) + np.sort(avail_axis_names) == np.sort(["y", "x", "Energy loss"]) + ) if axes_as_expected is False: print(f"\tIn function {__name__} as expected") self.is_valid = False @@ -184,8 +199,9 @@ def parse(self, hspy_s3d): self.meta["counts"].value = np.zeros([shape[2]], np.uint32) for y_pixel in np.arange(0, shape[0]): for x_pixel in np.arange(1, shape[1]): - self.meta["counts"].value \ - += np.asarray(hspy_s3d.data[y_pixel, x_pixel, :], np.uint32) + self.meta["counts"].value += np.asarray( + hspy_s3d.data[y_pixel, x_pixel, :], np.uint32 + ) # seems that hspy is adaptive, uses numpy under the hood # though, so a hspy signal's .data member is already a proper numpy dtype # therefore, an explicit call like this @@ -199,8 +215,10 @@ def parse(self, hspy_s3d): unit = str(axes_dict[keyword]["units"]) if axes_dict[keyword]["name"] == "Energy loss": self.meta["energy_loss"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["energy_loss"].unit = unit self.meta["energy_loss_long_name"].value = "Energy loss" @@ -224,8 +242,9 @@ def is_an_implemented_case(self, hspy_list): cardinality_stack = 0 for hspy_clss in hspy_list: if isinstance(hspy_clss, hs.signals.EELSSpectrum) is True: - assert hspy_clss.data.ndim in [3], \ - "Unexpectedly found unsupported-dimensional EELSSpectrum!" + assert hspy_clss.data.ndim in [ + 3 + ], "Unexpectedly found unsupported-dimensional EELSSpectrum!" if hspy_clss.data.ndim == 3: cardinality_stack += 1 if cardinality_stack != 1: @@ -238,25 +257,25 @@ def parse_hspy_instances(self, hspy_list): print(f"\tIn function {__name__}") for hspy_clss in hspy_list: if isinstance(hspy_clss, hs.signals.EELSSpectrum) is True: - assert hspy_clss.data.ndim in [3], \ - "Unexpectedly found unsupported-dimensional EELSSpectrum!" + assert hspy_clss.data.ndim in [ + 3 + ], "Unexpectedly found unsupported-dimensional EELSSpectrum!" if hspy_clss.data.ndim == 3: - self.stack_data.append( - HspyRectRoiEelsAllSpectra(hspy_clss)) - self.summary_data.append( - HspyRectRoiEelsSummarySpectrum(hspy_clss)) + self.stack_data.append(HspyRectRoiEelsAllSpectra(hspy_clss)) + self.summary_data.append(HspyRectRoiEelsSummarySpectrum(hspy_clss)) - def report(self, prefix: str, frame_id: int, - ifo: dict, template: dict) -> dict: + def report(self, prefix: str, frame_id: int, ifo: dict, template: dict) -> dict: """Enter data from the NX-specific representation into the template.""" if self.is_valid is False: print(f"\t{__name__} reporting nothing!") return template print(f"\t{__name__} reporting...") - assert (len(self.stack_data) >= 0) and (len(self.stack_data) <= 1), \ - "More than one spectrum stack is currently not supported!" - assert (len(self.summary_data) >= 0) and (len(self.summary_data) <= 1), \ - "More than one sum spectrum stack is currently not supported!" + assert (len(self.stack_data) >= 0) and ( + len(self.stack_data) <= 1 + ), "More than one spectrum stack is currently not supported!" + assert (len(self.summary_data) >= 0) and ( + len(self.summary_data) <= 1 + ), "More than one sum spectrum stack is currently not supported!" if len(self.stack_data) == 1: trg = f"{prefix}eels/PROCESS[process1]/" @@ -275,32 +294,42 @@ def report(self, prefix: str, frame_id: int, template[f"{trg}@AXISNAME[axis_energy_loss_indices]"] = np.uint32(2) template[f"{trg}@AXISNAME[axis_x_indices]"] = np.uint32(1) template[f"{trg}@AXISNAME[axis_y_indices]"] = np.uint32(0) - template[f"{trg}DATA[data_counts]"] \ - = {"compress": self.stack_data[0].meta["counts"].value, - "strength": 1} + template[f"{trg}DATA[data_counts]"] = { + "compress": self.stack_data[0].meta["counts"].value, + "strength": 1, + } template[f"{trg}DATA[data_counts]/@units"] = "" template[f"{trg}DATA[data_counts]/@long_name"] = "Signal (a.u.)" - template[f"{trg}AXISNAME[axis_energy_loss]"] \ - = {"compress": self.stack_data[0].meta["energy_loss"].value, - "strength": 1} - template[f"{trg}AXISNAME[axis_energy_loss]/@units"] \ - = self.stack_data[0].meta["energy_loss"].unit - template[f"{trg}AXISNAME[axis_energy_loss]/@long_name"] \ - = f"Electron energy loss ({self.stack_data[0].meta['energy_loss'].unit})" - template[f"{trg}AXISNAME[axis_x]"] \ - = {"compress": self.stack_data[0].meta["xpos"].value, - "strength": 1} - template[f"{trg}AXISNAME[axis_x]/@units"] \ - = self.stack_data[0].meta["xpos"].unit - template[f"{trg}AXISNAME[axis_x]/@long_name"] \ - = f"x ({self.stack_data[0].meta['xpos'].unit})" - template[f"{trg}AXISNAME[axis_y]"] \ - = {"compress": self.stack_data[0].meta["ypos"].value, - "strength": 1} - template[f"{trg}AXISNAME[axis_y]/@units"] \ - = self.stack_data[0].meta["ypos"].unit - template[f"{trg}AXISNAME[axis_y]/@long_name"] \ - = f"y ({self.stack_data[0].meta['ypos'].unit})" + template[f"{trg}AXISNAME[axis_energy_loss]"] = { + "compress": self.stack_data[0].meta["energy_loss"].value, + "strength": 1, + } + template[f"{trg}AXISNAME[axis_energy_loss]/@units"] = ( + self.stack_data[0].meta["energy_loss"].unit + ) + template[ + f"{trg}AXISNAME[axis_energy_loss]/@long_name" + ] = f"Electron energy loss ({self.stack_data[0].meta['energy_loss'].unit})" + template[f"{trg}AXISNAME[axis_x]"] = { + "compress": self.stack_data[0].meta["xpos"].value, + "strength": 1, + } + template[f"{trg}AXISNAME[axis_x]/@units"] = ( + self.stack_data[0].meta["xpos"].unit + ) + template[ + f"{trg}AXISNAME[axis_x]/@long_name" + ] = f"x ({self.stack_data[0].meta['xpos'].unit})" + template[f"{trg}AXISNAME[axis_y]"] = { + "compress": self.stack_data[0].meta["ypos"].value, + "strength": 1, + } + template[f"{trg}AXISNAME[axis_y]/@units"] = ( + self.stack_data[0].meta["ypos"].unit + ) + template[ + f"{trg}AXISNAME[axis_y]/@long_name" + ] = f"y ({self.stack_data[0].meta['ypos'].unit})" if len(self.summary_data) == 1: trg = f"{prefix}eels/summary/" @@ -309,16 +338,20 @@ def report(self, prefix: str, frame_id: int, template[f"{trg}@signal"] = "data_counts" template[f"{trg}@axes"] = ["axis_energy_loss"] template[f"{trg}@AXISNAME[axis_energy_loss_indices]"] = np.uint32(0) - template[f"{trg}DATA[data_counts]"] \ - = {"compress": self.summary_data[0].meta["counts"].value, - "strength": 1} + template[f"{trg}DATA[data_counts]"] = { + "compress": self.summary_data[0].meta["counts"].value, + "strength": 1, + } template[f"{trg}DATA[data_counts]/@long_name"] = "Signal (a.u.)" - template[f"{trg}AXISNAME[axis_energy_loss]"] \ - = {"compress": self.summary_data[0].meta["energy_loss"].value, - "strength": 1} - template[f"{trg}AXISNAME[axis_energy_loss]/@units"] \ - = self.summary_data[0].meta["energy_loss"].unit - template[f"{trg}AXISNAME[axis_energy_loss]/@long_name"] \ - = f"Energy loss ({self.summary_data[0].meta['energy_loss'].unit})" + template[f"{trg}AXISNAME[axis_energy_loss]"] = { + "compress": self.summary_data[0].meta["energy_loss"].value, + "strength": 1, + } + template[f"{trg}AXISNAME[axis_energy_loss]/@units"] = ( + self.summary_data[0].meta["energy_loss"].unit + ) + template[ + f"{trg}AXISNAME[axis_energy_loss]/@long_name" + ] = f"Energy loss ({self.summary_data[0].meta['energy_loss'].unit})" return template diff --git a/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_xray.py b/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_xray.py index 7f4cd7086..93b8ea065 100644 --- a/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_xray.py +++ b/pynxtools/dataconverter/readers/em_spctrscpy/utils/hspy/em_hspy_xray.py @@ -25,8 +25,9 @@ import hyperspy.api as hs -from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_base_classes \ - import NxObject +from pynxtools.dataconverter.readers.em_spctrscpy.utils.em_nexus_base_classes import ( + NxObject, +) class HspyRectRoiXrayAllSpectra: @@ -52,30 +53,35 @@ def __init__(self, hspy_clss): def is_supported(self, hspy_s3d): """Check if the input has supported axes_manager and key metadata.""" - assert hspy_s3d.metadata["Signal"]["signal_type"] == "EDS_TEM", \ - "hspy_s3d is not a valid hyperspy generic instance !" - assert hspy_s3d.data.ndim == 3, \ - "hspy_s3d is not a valid 3D dataset !" + assert ( + hspy_s3d.metadata["Signal"]["signal_type"] == "EDS_TEM" + ), "hspy_s3d is not a valid hyperspy generic instance !" + assert hspy_s3d.data.ndim == 3, "hspy_s3d is not a valid 3D dataset !" axes_dict = hspy_s3d.axes_manager.as_dictionary() required_axis_names = ["axis-0", "axis-1", "axis-2"] for req_key in required_axis_names: - assert req_key in axes_dict.keys(), \ + assert req_key in axes_dict.keys(), ( req_key + " is unexpectedly not registered in the axes_manager !" + ) required_keywords = ["_type", "name", "units", "size", "scale", "offset"] avail_axis_names = [] for keyword in axes_dict.keys(): for req_key in required_keywords: # check if all required keys exist - assert req_key in axes_dict[keyword].keys(), \ + assert req_key in axes_dict[keyword].keys(), ( "hspy_s3d axis " + keyword + " lacks " + req_key + " !" + ) - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this axis is not of type UniformDataAxis !" + ) avail_axis_names.append(axes_dict[keyword]["name"]) axes_as_expected_emd = np.all( - np.sort(avail_axis_names) == np.sort(["y", "x", "X-ray energy"])) + np.sort(avail_axis_names) == np.sort(["y", "x", "X-ray energy"]) + ) axes_as_expected_bcf = np.all( - np.sort(avail_axis_names) == np.sort(["height", "width", "Energy"])) + np.sort(avail_axis_names) == np.sort(["height", "width", "Energy"]) + ) if (axes_as_expected_emd is False) and (axes_as_expected_bcf is True): print(f"\tIn function {__name__} as expected") self.is_valid = False @@ -96,28 +102,37 @@ def parse(self, hspy_s3d): size = np.uint32(axes_dict[keyword]["size"]) unit = str(axes_dict[keyword]["units"]) - y_axis = (axes_dict[keyword]["name"] == "y") \ - or (axes_dict[keyword]["name"] == "height") - x_axis = (axes_dict[keyword]["name"] == "x") \ - or (axes_dict[keyword]["name"] == "width") - e_axis = (axes_dict[keyword]["name"] == "X-ray energy") \ - or (axes_dict[keyword]["name"] == "Energy") + y_axis = (axes_dict[keyword]["name"] == "y") or ( + axes_dict[keyword]["name"] == "height" + ) + x_axis = (axes_dict[keyword]["name"] == "x") or ( + axes_dict[keyword]["name"] == "width" + ) + e_axis = (axes_dict[keyword]["name"] == "X-ray energy") or ( + axes_dict[keyword]["name"] == "Energy" + ) if y_axis is True: self.meta["ypos"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["ypos"].unit = unit self.meta["ypos_long_name"].value = "y" # ##MK::name y always! if x_axis is True: self.meta["xpos"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["xpos"].unit = unit self.meta["xpos_long_name"].value = "x" # ##MK::name x always! if e_axis is True: self.meta["photon_energy"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["photon_energy"].unit = unit self.meta["photon_energy_long_name"].value = "Energy" # ##MK::name Energy always! @@ -142,28 +157,30 @@ def __init__(self, hspy_clss): def is_supported(self, hspy_s1d): """Check if the input has supported axes_manager and key metadata.""" - assert hspy_s1d.metadata["Signal"]["signal_type"] == "EDS_TEM", \ - "hspy_s3d is not a valid hyperspy generic instance !" - assert hspy_s1d.data.ndim == 1, \ - "hspy_s3d is not a valid 1D dataset !" + assert ( + hspy_s1d.metadata["Signal"]["signal_type"] == "EDS_TEM" + ), "hspy_s3d is not a valid hyperspy generic instance !" + assert hspy_s1d.data.ndim == 1, "hspy_s3d is not a valid 1D dataset !" axes_dict = hspy_s1d.axes_manager.as_dictionary() required_axis_names = ["axis-0"] for req_key in required_axis_names: - assert req_key in axes_dict.keys(), \ + assert req_key in axes_dict.keys(), ( req_key + " is unexpectedly not registered in the axes_manager !" + ) required_keywords = ["_type", "name", "units", "size", "scale", "offset"] avail_axis_names = [] for keyword in axes_dict.keys(): for req_key in required_keywords: # check if all required keys exist - assert req_key in axes_dict[keyword].keys(), \ + assert req_key in axes_dict[keyword].keys(), ( "hspy_s1d axis " + keyword + " lacks " + req_key + " !" + ) - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this axis is not of type UniformDataAxis !" + ) avail_axis_names.append(axes_dict[keyword]["name"]) - axes_as_expected = np.all( - np.sort(avail_axis_names) == np.sort(["Energy"])) + axes_as_expected = np.all(np.sort(avail_axis_names) == np.sort(["Energy"])) if axes_as_expected is False: print(f"\tIn function {__name__} as expected") self.is_valid = False @@ -189,8 +206,10 @@ def parse(self, hspy_s1d): unit = str(axes_dict[keyword]["units"]) # if axes_dict[keyword]["name"] == "Energy": self.meta["photon_energy"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["photon_energy"].unit = unit self.meta["photon_energy_long_name"].value = "Energy" @@ -216,27 +235,29 @@ def __init__(self, hspy_clss): def is_supported(self, hspy_s2d): """Check if the input has supported axes_manager and key metadata.""" - assert hspy_s2d.metadata["Signal"]["signal_type"] == "", \ - "hspy_s2d is not a valid hyperspy generic instance !" - assert hspy_s2d.data.ndim == 2, \ - "hspy_s2d is not a valid 2D dataset !" + assert ( + hspy_s2d.metadata["Signal"]["signal_type"] == "" + ), "hspy_s2d is not a valid hyperspy generic instance !" + assert hspy_s2d.data.ndim == 2, "hspy_s2d is not a valid 2D dataset !" axes_dict = hspy_s2d.axes_manager.as_dictionary() required_axis_names = ["axis-0", "axis-1"] for req_key in required_axis_names: - assert req_key in axes_dict.keys(), \ + assert req_key in axes_dict.keys(), ( req_key + " is unexpectedly not registered in the axes_manager !" + ) required_keywords = ["_type", "name", "units", "size", "scale", "offset"] avail_axis_names = [] for keyword in axes_dict.keys(): for req_key in required_keywords: # check if all required keys exist - assert req_key in axes_dict[keyword].keys(), \ + assert req_key in axes_dict[keyword].keys(), ( "hspy_s2d axis " + keyword + " lacks " + req_key + " !" - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + ) + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this axis is not of type UniformDataAxis !" + ) avail_axis_names.append(axes_dict[keyword]["name"]) - axes_as_expected = np.all( - np.sort(avail_axis_names) == np.sort(["y", "x"])) + axes_as_expected = np.all(np.sort(avail_axis_names) == np.sort(["y", "x"])) if axes_as_expected is False: print(f"\tIn function {__name__} as expected") self.is_valid = False @@ -249,7 +270,7 @@ def parse(self, hspy_s2d): self.meta["title"].value = hspy_s2d.metadata["General"]["title"] # self.meta["long_name"].value = hspy_s2d.metadata["Signal"]["signal_type"] self.meta["long_name"].value = hspy_s2d.metadata["General"]["title"] - self.meta["counts"].value = hspy_s2d.data # hspy uses numpy and adapts ?? + self.meta["counts"].value = hspy_s2d.data # hspy uses numpy and adapts ?? axes_dict = hspy_s2d.axes_manager.as_dictionary() for keyword in axes_dict.keys(): offset = np.float64(axes_dict[keyword]["offset"]) @@ -257,19 +278,25 @@ def parse(self, hspy_s2d): size = np.uint32(axes_dict[keyword]["size"]) unit = str(axes_dict[keyword]["units"]) if axes_dict[keyword]["name"] == "y": - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this x axis is not of type UniformDataAxis !" + ) self.meta["ypos"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["ypos"].unit = unit self.meta["ypos_long_name"].value = "y" else: # axes_dict[keyword]["name"] == "x": - assert axes_dict[keyword]["_type"] == "UniformDataAxis", \ + assert axes_dict[keyword]["_type"] == "UniformDataAxis", ( keyword + ", this y axis is not of type UniformDataAxis !" + ) self.meta["xpos"].value = np.asarray( - np.linspace(0., np.float64(size) * scale, num=size, - endpoint=True) + offset / 2., np.float64) + np.linspace(0.0, np.float64(size) * scale, num=size, endpoint=True) + + offset / 2.0, + np.float64, + ) self.meta["xpos"].unit = unit self.meta["xpos_long_name"].value = "x" @@ -300,8 +327,10 @@ def is_an_implemented_case(self, hspy_list): cardinality_summary = 0 for hspy_clss in hspy_list: if isinstance(hspy_clss, hs.signals.EDSTEMSpectrum) is True: - assert hspy_clss.data.ndim in [1, 3], \ - "Unexpectedly found unsupported-dimensional EDSTEMSpectrum!" + assert hspy_clss.data.ndim in [ + 1, + 3, + ], "Unexpectedly found unsupported-dimensional EDSTEMSpectrum!" if hspy_clss.data.ndim == 1: cardinality_summary += 1 elif hspy_clss.data.ndim == 3: @@ -320,11 +349,9 @@ def parse_hspy_instances(self, hspy_list): if isinstance(hspy_clss, hs.signals.EDSTEMSpectrum) is True: ndim = hspy_clss.data.ndim if ndim == 1: - self.summary_data.append( - HspyRectRoiXraySummarySpectrum(hspy_clss)) + self.summary_data.append(HspyRectRoiXraySummarySpectrum(hspy_clss)) elif ndim == 3: - self.stack_data.append( - HspyRectRoiXrayAllSpectra(hspy_clss)) + self.stack_data.append(HspyRectRoiXrayAllSpectra(hspy_clss)) else: continue elif isinstance(hspy_clss, hs.signals.Signal2D) is True: @@ -332,30 +359,29 @@ def parse_hspy_instances(self, hspy_list): if ndim == 2: title = hspy_clss.metadata["General"]["title"] if title != "HAADF": - self.composition_map[title] \ - = HspyRectRoiXrayMap(hspy_clss) + self.composition_map[title] = HspyRectRoiXrayMap(hspy_clss) else: continue - def report(self, prefix: str, frame_id: int, - ifo: dict, template: dict) -> dict: + def report(self, prefix: str, frame_id: int, ifo: dict, template: dict) -> dict: """Enter data from the NX-specific representation into the template.""" if self.is_valid is False: print(f"\t{__name__} reporting nothing!") return template print(f"\t{__name__} reporting...") - assert (len(self.stack_data) >= 0) and (len(self.stack_data) <= 1), \ - "More than one spectrum stack is currently not supported!" - assert (len(self.summary_data) >= 0) and (len(self.summary_data) <= 1), \ - "More than one sum spectrum stack is currently not supported!" + assert (len(self.stack_data) >= 0) and ( + len(self.stack_data) <= 1 + ), "More than one spectrum stack is currently not supported!" + assert (len(self.summary_data) >= 0) and ( + len(self.summary_data) <= 1 + ), "More than one sum spectrum stack is currently not supported!" # for keyword, obj in self.composition_map.items(): # print(f"{keyword}, np.shape(obj.counts.value), {np.shape(obj.counts.value)}") if len(self.stack_data) == 1: trg = f"{prefix}xray/PROCESS[process1]/" template[f"{trg}PROGRAM[program1]/program"] = "hyperspy" - template[f"{trg}PROGRAM[program1]/program/@version"] \ - = hs.__version__ + template[f"{trg}PROGRAM[program1]/program/@version"] = hs.__version__ template[f"{trg}mode"] = "n/a" template[f"{trg}detector_identifier"] = "n/a" template[f"{trg}source"] = ifo["source_file_name"] @@ -374,37 +400,47 @@ def report(self, prefix: str, frame_id: int, # template[f"{trg}@long_name"] \ # = self.stack_data[0].meta["long_name"].value template[f"{trg}@signal"] = "data_counts" - template[f"{trg}@axes"] \ - = ["axis_y", "axis_x", "axis_photon_energy"] - template[f"{trg}@AXISNAME_indices[axis_photon_energy_indices]"] \ - = np.uint32(2) + template[f"{trg}@axes"] = ["axis_y", "axis_x", "axis_photon_energy"] + template[f"{trg}@AXISNAME_indices[axis_photon_energy_indices]"] = np.uint32( + 2 + ) template[f"{trg}@AXISNAME_indices[axis_x_indices]"] = np.uint32(1) template[f"{trg}@AXISNAME_indices[axis_y_indices]"] = np.uint32(0) - template[f"{trg}DATA[data_counts]"] \ - = {"compress": self.stack_data[0].meta["counts"].value, - "strength": 1} + template[f"{trg}DATA[data_counts]"] = { + "compress": self.stack_data[0].meta["counts"].value, + "strength": 1, + } template[f"{trg}DATA[data_counts]/@long_name"] = "Photon counts (1)" - template[f"{trg}AXISNAME[axis_photon_energy]"] \ - = {"compress": self.stack_data[0].meta["photon_energy"].value, - "strength": 1} - template[f"{trg}AXISNAME[axis_photon_energy]/@units"] \ - = self.stack_data[0].meta["photon_energy"].unit - template[f"{trg}AXISNAME[axis_photon_energy]/@long_name"] \ - = f"Photon energy ({self.stack_data[0].meta['photon_energy'].unit})" - template[f"{trg}AXISNAME[axis_x]"] \ - = {"compress": self.stack_data[0].meta["xpos"].value, - "strength": 1} - template[f"{trg}AXISNAME[axis_x]/@units"] \ - = self.stack_data[0].meta["xpos"].unit - template[f"{trg}AXISNAME[axis_x]/@long_name"] \ - = f"x ({self.stack_data[0].meta['xpos'].unit})" - template[f"{trg}AXISNAME[axis_y]"] \ - = {"compress": self.stack_data[0].meta["ypos"].value, - "strength": 1} - template[f"{trg}AXISNAME[axis_y]/@units"] \ - = self.stack_data[0].meta["ypos"].unit - template[f"{trg}AXISNAME[axis_y]/@long_name"] \ - = f"y ({self.stack_data[0].meta['ypos'].unit})" + template[f"{trg}AXISNAME[axis_photon_energy]"] = { + "compress": self.stack_data[0].meta["photon_energy"].value, + "strength": 1, + } + template[f"{trg}AXISNAME[axis_photon_energy]/@units"] = ( + self.stack_data[0].meta["photon_energy"].unit + ) + template[ + f"{trg}AXISNAME[axis_photon_energy]/@long_name" + ] = f"Photon energy ({self.stack_data[0].meta['photon_energy'].unit})" + template[f"{trg}AXISNAME[axis_x]"] = { + "compress": self.stack_data[0].meta["xpos"].value, + "strength": 1, + } + template[f"{trg}AXISNAME[axis_x]/@units"] = ( + self.stack_data[0].meta["xpos"].unit + ) + template[ + f"{trg}AXISNAME[axis_x]/@long_name" + ] = f"x ({self.stack_data[0].meta['xpos'].unit})" + template[f"{trg}AXISNAME[axis_y]"] = { + "compress": self.stack_data[0].meta["ypos"].value, + "strength": 1, + } + template[f"{trg}AXISNAME[axis_y]/@units"] = ( + self.stack_data[0].meta["ypos"].unit + ) + template[ + f"{trg}AXISNAME[axis_y]/@long_name" + ] = f"y ({self.stack_data[0].meta['ypos'].unit})" if len(self.summary_data) == 1: trg = f"{prefix}xray/summary/" @@ -412,19 +448,24 @@ def report(self, prefix: str, frame_id: int, # template[f"{trg}@long_name"] = self.summary_data[0].meta["long_name"].value template[f"{trg}@signal"] = "data_counts" template[f"{trg}@axes"] = ["axis_photon_energy"] - template[f"{trg}@AXISNAME_indices[axis_photon_energy_indices]"] \ - = np.uint32(0) - template[f"{trg}DATA[data_counts]"] \ - = {"compress": self.summary_data[0].meta["counts"].value, - "strength": 1} + template[f"{trg}@AXISNAME_indices[axis_photon_energy_indices]"] = np.uint32( + 0 + ) + template[f"{trg}DATA[data_counts]"] = { + "compress": self.summary_data[0].meta["counts"].value, + "strength": 1, + } template[f"{trg}DATA[data_counts]/@long_name"] = "Photon counts (1)" - template[f"{trg}AXISNAME[axis_photon_energy]"] \ - = {"compress": self.summary_data[0].meta["photon_energy"].value, - "strength": 1} - template[f"{trg}AXISNAME[axis_photon_energy]/@units"] \ - = self.summary_data[0].meta["photon_energy"].unit - template[f"{trg}AXISNAME[axis_photon_energy]/@long_name"] \ - = f"Photon energy ({self.summary_data[0].meta['photon_energy'].unit})" + template[f"{trg}AXISNAME[axis_photon_energy]"] = { + "compress": self.summary_data[0].meta["photon_energy"].value, + "strength": 1, + } + template[f"{trg}AXISNAME[axis_photon_energy]/@units"] = ( + self.summary_data[0].meta["photon_energy"].unit + ) + template[ + f"{trg}AXISNAME[axis_photon_energy]/@long_name" + ] = f"Photon energy ({self.summary_data[0].meta['photon_energy'].unit})" return template diff --git a/pynxtools/dataconverter/readers/example/reader.py b/pynxtools/dataconverter/readers/example/reader.py index 83e7438b0..dead8f167 100644 --- a/pynxtools/dataconverter/readers/example/reader.py +++ b/pynxtools/dataconverter/readers/example/reader.py @@ -33,10 +33,12 @@ class ExampleReader(BaseReader): # Whitelist for the NXDLs that the reader supports and can process supported_nxdls = ["NXtest"] - def read(self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None) -> dict: + def read( + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, + ) -> dict: """Reads data from given file and returns a filled template dictionary""" data: dict = {} @@ -44,7 +46,7 @@ def read(self, raise IOError("No input files were given to Example Reader.") for file_path in file_paths: - file_extension = file_path[file_path.rindex("."):] + file_extension = file_path[file_path.rindex(".") :] with open(file_path, "r", encoding="utf-8") as input_file: if file_extension == ".json": data = json.loads(input_file.read()) @@ -52,19 +54,25 @@ def read(self, for k in template.keys(): # The entries in the template dict should correspond with what the dataconverter # outputs with --generate-template for a provided NXDL file - if k.startswith("/ENTRY[entry]/required_group") \ - or k == "/ENTRY[entry]/optional_parent/req_group_in_opt_group": + if ( + k.startswith("/ENTRY[entry]/required_group") + or k == "/ENTRY[entry]/optional_parent/req_group_in_opt_group" + ): continue - field_name = k[k.rfind("/") + 1:] + field_name = k[k.rfind("/") + 1 :] if field_name != "@units": template[k] = data[field_name] - if f"{field_name}_units" in data.keys() and f"{k}/@units" in template.keys(): + if ( + f"{field_name}_units" in data.keys() + and f"{k}/@units" in template.keys() + ): template[f"{k}/@units"] = data[f"{field_name}_units"] template["required"]["/ENTRY[entry]/optional_parent/required_child"] = 1 - template["optional"][("/ENTRY[entry]/optional_parent/" - "req_group_in_opt_group/DATA[data]")] = [0, 1] + template["optional"][ + ("/ENTRY[entry]/optional_parent/" "req_group_in_opt_group/DATA[data]") + ] = [0, 1] # Add non template key template["/ENTRY[entry]/does/not/exist"] = "None" @@ -73,62 +81,64 @@ def read(self, template["/ENTRY[entry]/program_name"] = "None" # internal links - template["/ENTRY[entry]/test_link/internal_link"] = {"link": - "/entry/NXODD_name/posint_value"} + template["/ENTRY[entry]/test_link/internal_link"] = { + "link": "/entry/NXODD_name/posint_value" + } # external links - template[("/ENTRY" - "[entry]/test_link" - "/external_link")] = {"link": - f"{os.path.dirname(__file__)}/../../../../tests/" - f"data/dataconverter/readers/mpes/" - f"xarray_saved_small_calibration.h5:/axes/ax3" - } + template[("/ENTRY" "[entry]/test_link" "/external_link")] = { + "link": f"{os.path.dirname(__file__)}/../../../../tests/" + f"data/dataconverter/readers/mpes/" + f"xarray_saved_small_calibration.h5:/axes/ax3" + } # virtual datasets concatenation - my_path = str(f"{os.path.dirname(__file__)}/../../../../tests/" - f"data/dataconverter/readers/mpes") - my_datasets = {"link": - [f"{my_path}/xarray_saved_small_calibration.h5:/axes/ax0", - f"{my_path}/xarray_saved_small_calibration.h5:/axes/ax1", - f"{my_path}/xarray_saved_small_calibration.h5:/axes/ax2" - ] - } - template["/ENTRY[entry]/test_virtual_dataset/concatenate_datasets"] = my_datasets + my_path = str( + f"{os.path.dirname(__file__)}/../../../../tests/" + f"data/dataconverter/readers/mpes" + ) + my_datasets = { + "link": [ + f"{my_path}/xarray_saved_small_calibration.h5:/axes/ax0", + f"{my_path}/xarray_saved_small_calibration.h5:/axes/ax1", + f"{my_path}/xarray_saved_small_calibration.h5:/axes/ax2", + ] + } + template[ + "/ENTRY[entry]/test_virtual_dataset/concatenate_datasets" + ] = my_datasets # virtual datasets slicing - my_path = str(f"{os.path.dirname(__file__)}/../../../../tests/" - f"data/dataconverter/readers/mpes") - template[("/ENTRY[entry]" - "/test_virtual" - "_dataset/sliced" - "_dataset")] = {"link": - (f"{my_path}/xarray_saved_small_" - "calibration.h5:/binned/BinnedData"), - "shape": np.index_exp[:, 1, :, :] - } - template[("/ENTRY[entry]" - "/test_virtual" - "_dataset/slic" - "ed_dataset2")] = {"link": - (f"{my_path}/xarray_saved_small" - "_calibration.h5:/binned/BinnedData"), - "shape": np.index_exp[:, :, :, 1] - } - template[("/ENTRY[entry]" - "/test_virtual" - "_dataset/slic" - "ed_dataset3")] = {"link": - (f"{my_path}/xarray_saved_small" - "_calibration.h5:/binned/BinnedData"), - "shape": np.index_exp[:, :, :, 2:4] - } + my_path = str( + f"{os.path.dirname(__file__)}/../../../../tests/" + f"data/dataconverter/readers/mpes" + ) + template[("/ENTRY[entry]" "/test_virtual" "_dataset/sliced" "_dataset")] = { + "link": ( + f"{my_path}/xarray_saved_small_" "calibration.h5:/binned/BinnedData" + ), + "shape": np.index_exp[:, 1, :, :], + } + template[("/ENTRY[entry]" "/test_virtual" "_dataset/slic" "ed_dataset2")] = { + "link": ( + f"{my_path}/xarray_saved_small" "_calibration.h5:/binned/BinnedData" + ), + "shape": np.index_exp[:, :, :, 1], + } + template[("/ENTRY[entry]" "/test_virtual" "_dataset/slic" "ed_dataset3")] = { + "link": ( + f"{my_path}/xarray_saved_small" "_calibration.h5:/binned/BinnedData" + ), + "shape": np.index_exp[:, :, :, 2:4], + } # compression my_compression_dict = {"compress": "string not to be compressed"} template["/ENTRY[entry]/test_compression/not_to_compress"] = my_compression_dict my_compression_dict2 = {"compress": np.array([1, 2, 3, 4])} - template["/ENTRY[entry]/test_compression/compressed_data"] = my_compression_dict2 + template[ + "/ENTRY[entry]/test_compression/compressed_data" + ] = my_compression_dict2 # sh = h5py.File(file_names_to_concatenate[0], 'r')[entry_key].shape # layout = h5py.VirtualLayout(shape=(len(file_names_to_concatenate),) + sh, diff --git a/pynxtools/dataconverter/readers/hall/helpers.py b/pynxtools/dataconverter/readers/hall/helpers.py index 67d6bc4ae..9ae646a11 100644 --- a/pynxtools/dataconverter/readers/hall/helpers.py +++ b/pynxtools/dataconverter/readers/hall/helpers.py @@ -140,14 +140,14 @@ def to_bool(expr: str) -> bool: bool: The boolean value. """ replacements = { - 'On': True, - 'Off': False, - 'Yes': True, - 'No': False, - 'True': True, - 'False': False, - 'true': True, - 'false': False, + "On": True, + "Off": False, + "Yes": True, + "No": False, + "True": True, + "False": False, + "true": True, + "false": False, } return replacements.get(expr) @@ -206,11 +206,11 @@ def clean(unit: str) -> str: str: The cleaned unit string. """ conversions = { - 'VS': "volt * second", - 'Sec': "s", - '²': "^2", - '³': "^3", - 'ohm cm': "ohm * cm", + "VS": "volt * second", + "Sec": "s", + "²": "^2", + "³": "^3", + "ohm cm": "ohm * cm", } for old, new in conversions.items(): @@ -247,31 +247,31 @@ def pandas_df_to_template(prefix: str, data: pd.DataFrame) -> Dict[str, Any]: Returns: Dict[str, Any]: The dict containing the data and metainfo. """ - if prefix.endswith('/'): + if prefix.endswith("/"): prefix = prefix[:-1] template: Dict[str, Any] = {} - template[f'{prefix}/@NX_class'] = 'NXdata' + template[f"{prefix}/@NX_class"] = "NXdata" def write_data(header: str, attr: str, data: np.ndarray) -> None: if header is None: - print('Warning: Trying to write dataframe without a header. Skipping.') + print("Warning: Trying to write dataframe without a header. Skipping.") return if is_value_with_unit(header): name, unit = split_str_with_unit(header) - template[f'{prefix}/{name}/@units'] = clean(unit) + template[f"{prefix}/{name}/@units"] = clean(unit) else: name = header.lower() - if attr == '@auxiliary_signals': - if f'{prefix}/{attr}' in template: - template[f'{prefix}/{attr}'].append(name) + if attr == "@auxiliary_signals": + if f"{prefix}/{attr}" in template: + template[f"{prefix}/{attr}"].append(name) else: - template[f'{prefix}/{attr}'] = [name] + template[f"{prefix}/{attr}"] = [name] else: - template[f'{prefix}/{attr}'] = name - template[f'{prefix}/{name}'] = data + template[f"{prefix}/{attr}"] = name + template[f"{prefix}/{name}"] = data if data.index.name is None: data = data.set_index(data.columns[0]) @@ -280,11 +280,11 @@ def write_data(header: str, attr: str, data: np.ndarray) -> None: if data.index.values[-1] == 0: data = data.iloc[:-1] - write_data(data.index.name, '@axes', data.index.values) - write_data(data.columns[0], '@signal', data.iloc[:, 0].values) + write_data(data.index.name, "@axes", data.index.values) + write_data(data.columns[0], "@signal", data.iloc[:, 0].values) for column in data.columns[1:]: - write_data(column, '@auxiliary_signals', data[column].values) + write_data(column, "@auxiliary_signals", data[column].values) return template @@ -302,8 +302,7 @@ def convert_date(datestr: str, timezone: str = "Europe/Berlin") -> str: try: return ( - datetime - .strptime(datestr, r'%m/%d/%y %H%M%S') + datetime.strptime(datestr, r"%m/%d/%y %H%M%S") .astimezone(pytz.timezone(timezone)) .isoformat() ) diff --git a/pynxtools/dataconverter/readers/hall/reader.py b/pynxtools/dataconverter/readers/hall/reader.py index e9f40356b..a18324ba7 100644 --- a/pynxtools/dataconverter/readers/hall/reader.py +++ b/pynxtools/dataconverter/readers/hall/reader.py @@ -1,4 +1,3 @@ - # Copyright The NOMAD Authors. # # This file is part of NOMAD. See https://nomad-lab.eu for further info. @@ -43,7 +42,7 @@ CONVERSION_FUNCTIONS = { "Start Time": helpers.convert_date, "Time Completed": helpers.convert_date, - "Skipped at": helpers.convert_date + "Skipped at": helpers.convert_date, } # Keys that indicate the start of measurement block @@ -73,7 +72,7 @@ def split_add_key(fobj: Optional[TextIO], dic: dict, prefix: str, expr: str) -> def parse_enum() -> bool: sprefix = prefix.strip("/") - if 'Keithley' not in sprefix: + if "Keithley" not in sprefix: w_trailing_num = re.search(r"(.*) \d+$", sprefix) if w_trailing_num: sprefix = w_trailing_num.group(1) @@ -149,6 +148,7 @@ def parse_txt(fname: str, encoding: str = "iso-8859-1") -> dict: Returns: dict: Dict containing the data and metadata of the measurement """ + def parse_measurement(line: str, current_section: str, current_measurement: str): data = [] for mline in fobj: @@ -160,12 +160,11 @@ def parse_measurement(line: str, current_section: str, current_measurement: str) dkey = helpers.get_unique_dkey( template, f"{current_section}{current_measurement}/data" ) - template.update(helpers.pandas_df_to_template( - dkey, - pd.DataFrame( - np.array(data, dtype=np.float64), columns=header + template.update( + helpers.pandas_df_to_template( + dkey, pd.DataFrame(np.array(data, dtype=np.float64), columns=header) ) - )) + ) return current_section, current_measurement diff --git a/pynxtools/dataconverter/readers/json_map/reader.py b/pynxtools/dataconverter/readers/json_map/reader.py index d17bb075b..80d97fad3 100644 --- a/pynxtools/dataconverter/readers/json_map/reader.py +++ b/pynxtools/dataconverter/readers/json_map/reader.py @@ -33,7 +33,7 @@ def parse_slice(slice_string): slices = slice_string.split(",") for index, item in enumerate(slices): values = item.split(":") - slices[index] = slice(*[None if x == '' else int(x) for x in values]) + slices[index] = slice(*[None if x == "" else int(x) for x in values]) return np.index_exp[tuple(slices)] @@ -47,13 +47,15 @@ def get_val_nested_keystring_from_dict(keystring, data): current_key = keystring.split("/")[0] if isinstance(data[current_key], (dict, hdfdict.LazyHdfDict)): - return get_val_nested_keystring_from_dict(keystring[keystring.find("/") + 1:], - data[current_key]) + return get_val_nested_keystring_from_dict( + keystring[keystring.find("/") + 1 :], data[current_key] + ) if isinstance(data[current_key], xarray.DataArray): return data[current_key].values if isinstance(data[current_key], xarray.core.dataset.Dataset): - raise NotImplementedError("Xarray datasets are not supported. " - "You can only use xarray dataarrays.") + raise NotImplementedError( + "Xarray datasets are not supported. " "You can only use xarray dataarrays." + ) return data[current_key] @@ -84,8 +86,9 @@ def fill_undocumented(mapping, template, data): """Fill the extra paths provided in the map file that are not in the NXDL""" for path, value in mapping.items(): if is_path(value): - template["undocumented"][path] = get_val_nested_keystring_from_dict(value[1:], - data) + template["undocumented"][path] = get_val_nested_keystring_from_dict( + value[1:], data + ) fill_attributes(path, value[1:], data, template) else: template["undocumented"][path] = value @@ -98,8 +101,9 @@ def fill_documented(template, mapping, template_provided, data): try: map_str = mapping[path] if is_path(map_str): - template[path] = get_val_nested_keystring_from_dict(map_str[1:], - data) + template[path] = get_val_nested_keystring_from_dict( + map_str[1:], data + ) fill_attributes(path, map_str[1:], data, template) else: template[path] = map_str @@ -137,7 +141,11 @@ def get_map_from_partials(partials, template, data): if template_path + "/" + part in template.keys(): template_path = template_path + "/" + part else: - nx_name = f"{attribs['NX_class'][2:].upper()}[{part}]" if attribs and "NX_class" in attribs else part # pylint: disable=line-too-long + nx_name = ( + f"{attribs['NX_class'][2:].upper()}[{part}]" + if attribs and "NX_class" in attribs + else part + ) # pylint: disable=line-too-long template_path = template_path + "/" + nx_name mapping[template_path] = path @@ -152,10 +160,12 @@ class JsonMapReader(BaseReader): # Whitelist for the NXDLs that the reader supports and can process supported_nxdls = ["NXtest", "*"] - def read(self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None) -> dict: + def read( + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, + ) -> dict: """ Reads data from given file and returns a filled template dictionary. @@ -171,9 +181,9 @@ def read(self, data = objects[0] if objects else data for file_path in file_paths: - file_extension = file_path[file_path.rindex("."):] + file_extension = file_path[file_path.rindex(".") :] if file_extension == ".json": - with open(file_path, "r", encoding='utf-8') as input_file: + with open(file_path, "r", encoding="utf-8") as input_file: if ".mapping" in file_path: mapping = json.loads(input_file.read()) else: @@ -184,7 +194,7 @@ def read(self, else: is_hdf5 = False with open(file_path, "rb") as input_file: - if input_file.read(8) == b'\x89HDF\r\n\x1a\n': + if input_file.read(8) == b"\x89HDF\r\n\x1a\n": is_hdf5 = True if is_hdf5: hdf = hdfdict.load(file_path) @@ -197,10 +207,14 @@ def read(self, if len(partials) > 0: mapping = get_map_from_partials(partials, template, data) else: - template = Template({x: "/hierarchical/path/in/your/datafile" for x in template}) - raise IOError("Please supply a JSON mapping file: --input-file" - " my_nxdl_map.mapping.json\n\n You can use this " - "template for the required fields: \n" + str(template)) + template = Template( + {x: "/hierarchical/path/in/your/datafile" for x in template} + ) + raise IOError( + "Please supply a JSON mapping file: --input-file" + " my_nxdl_map.mapping.json\n\n You can use this " + "template for the required fields: \n" + str(template) + ) new_template = Template() convert_shapes_to_slice_objects(mapping) diff --git a/pynxtools/dataconverter/readers/json_yml/reader.py b/pynxtools/dataconverter/readers/json_yml/reader.py index 6ba54889f..64491d933 100644 --- a/pynxtools/dataconverter/readers/json_yml/reader.py +++ b/pynxtools/dataconverter/readers/json_yml/reader.py @@ -60,9 +60,7 @@ def read( print(f"WARNING: File {file_path} does not exist, ignoring entry.") continue - template.update( - self.extensions.get(extension, lambda _: {})(file_path) - ) + template.update(self.extensions.get(extension, lambda _: {})(file_path)) template.update(self.extensions.get("default", lambda _: {})("")) template.update(self.extensions.get("objects", lambda _: {})(objects)) diff --git a/pynxtools/dataconverter/readers/mpes/reader.py b/pynxtools/dataconverter/readers/mpes/reader.py index 7d860765c..9adfc4e75 100644 --- a/pynxtools/dataconverter/readers/mpes/reader.py +++ b/pynxtools/dataconverter/readers/mpes/reader.py @@ -133,11 +133,11 @@ def recursive_parse_metadata(node): metadata = recursive_parse_metadata(h5_file["metadata"]) # Segment to change Vset to V in lens voltages if "file" in metadata.keys(): - for k in list(metadata['file']): + for k in list(metadata["file"]): if "VSet" in k: key = k[:-3] - metadata['file'][key] = metadata['file'][k] - del metadata['file'][k] + metadata["file"][key] = metadata["file"][k] + del metadata["file"][k] xarray = res_to_xarray(data, bin_names, axes, metadata) return xarray @@ -157,22 +157,22 @@ def iterate_dictionary(dic, key_string): CONVERT_DICT = { - 'Instrument': 'INSTRUMENT[instrument]', - 'Analyzer': 'ELECTRONANALYSER[electronanalyser]', - 'Manipulator': 'MANIPULATOR[manipulator]', - 'Beam': 'BEAM[beam]', - 'unit': '@units', - 'Sample': 'SAMPLE[sample]', - 'Source': 'SOURCE[source]', - 'User': 'USER[user]' + "Instrument": "INSTRUMENT[instrument]", + "Analyzer": "ELECTRONANALYSER[electronanalyser]", + "Manipulator": "MANIPULATOR[manipulator]", + "Beam": "BEAM[beam]", + "unit": "@units", + "Sample": "SAMPLE[sample]", + "Source": "SOURCE[source]", + "User": "USER[user]", } REPLACE_NESTED = { - 'SOURCE[source]/Probe': 'SOURCE[source]', - 'SOURCE[source]/Pump': 'SOURCE[source_pump]', - 'BEAM[beam]/Probe': 'BEAM[beam]', - 'BEAM[beam]/Pump': 'BEAM[beam_pump]', - 'sample_history': 'sample_history/description' + "SOURCE[source]/Probe": "SOURCE[source]", + "SOURCE[source]/Pump": "SOURCE[source_pump]", + "BEAM[beam]/Probe": "BEAM[beam]", + "BEAM[beam]/Pump": "BEAM[beam_pump]", + "sample_history": "sample_history/description", } @@ -184,7 +184,7 @@ def handle_h5_and_json_file(file_paths, objects): for file_path in file_paths: try: - file_extension = file_path[file_path.rindex("."):] + file_extension = file_path[file_path.rindex(".") :] except ValueError as exc: raise ValueError( f"The file path {file_path} must have an extension.", @@ -216,7 +216,7 @@ def handle_h5_and_json_file(file_paths, objects): FlattenSettings( dic=yaml.safe_load(feln), convert_dict=CONVERT_DICT, - replace_nested=REPLACE_NESTED + replace_nested=REPLACE_NESTED, ) ) @@ -233,6 +233,7 @@ def handle_h5_and_json_file(file_paths, objects): def rgetattr(obj, attr): """Get attributes recursively""" + def _getattr(obj, attr): return getattr(obj, attr) @@ -255,8 +256,10 @@ def fill_data_indices_in_config(config_file_dict, x_array_loaded): new_key = key.replace("*", dim) new_value = value.replace("*", dim) - if new_key not in config_file_dict.keys() \ - and new_value not in config_file_dict.values(): + if ( + new_key not in config_file_dict.keys() + and new_value not in config_file_dict.values() + ): config_file_dict[new_key] = new_value config_file_dict.pop(key) @@ -271,10 +274,10 @@ class MPESReader(BaseReader): supported_nxdls = ["NXmpes"] def read( # pylint: disable=too-many-branches - self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None, + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, ) -> dict: """Reads data from given file or alternatively an xarray object and returns a filled template dictionary""" @@ -291,10 +294,9 @@ def read( # pylint: disable=too-many-branches fill_data_indices_in_config(config_file_dict, x_array_loaded) for key, value in config_file_dict.items(): - if isinstance(value, str) and ":" in value: precursor = value.split(":")[0] - value = value[value.index(":") + 1:] + value = value[value.index(":") + 1 :] # Filling in the data and axes along with units from xarray if precursor == "@data": @@ -308,8 +310,7 @@ def read( # pylint: disable=too-many-branches except ValueError: print( - f"Incorrect axis name corresponding to " - f"the path {key}", + f"Incorrect axis name corresponding to " f"the path {key}", ) except AttributeError: diff --git a/pynxtools/dataconverter/readers/rii_database/reader.py b/pynxtools/dataconverter/readers/rii_database/reader.py index 32fb7c5fa..6b3a3f3d1 100644 --- a/pynxtools/dataconverter/readers/rii_database/reader.py +++ b/pynxtools/dataconverter/readers/rii_database/reader.py @@ -44,7 +44,7 @@ def __init__(self, *args, **kwargs): def read_dispersion(self, filename: str): """Reads the dispersion from the give filename""" - download_bibtex = self.kwargs.get('download_bibtex', False) + download_bibtex = self.kwargs.get("download_bibtex", False) return DispersionReader(download_bibtex).read_dispersion(filename) def appdef_defaults(self) -> Dict[str, Any]: diff --git a/pynxtools/dataconverter/readers/shared/map_concepts/mapping_functors.py b/pynxtools/dataconverter/readers/shared/map_concepts/mapping_functors.py index 6ee855b84..ca09fb5f2 100644 --- a/pynxtools/dataconverter/readers/shared/map_concepts/mapping_functors.py +++ b/pynxtools/dataconverter/readers/shared/map_concepts/mapping_functors.py @@ -44,8 +44,11 @@ def convert_iso8601_modifier(terms, dct: dict): if isinstance(terms, str): if terms in dct.keys(): return None - elif (isinstance(terms, list)) and (len(terms) == 2) \ - and (all(isinstance(entry, str) for entry in terms) is True): + elif ( + (isinstance(terms, list)) + and (len(terms) == 2) + and (all(isinstance(entry, str) for entry in terms) is True) + ): # assume the first argument is a local time # assume the second argument is a timezone string if terms[0] in dct.keys() and terms[1] in dct.keys(): @@ -56,13 +59,15 @@ def convert_iso8601_modifier(terms, dct: dict): date_time_str = dct[terms[0]].replace("T", " ") time_zone_str = dct[terms[1]] if time_zone_str in pytz.all_timezones: - date_time_obj \ - = datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S.%f') - utc_time_zone_aware \ - = pytz.timezone(time_zone_str).localize(date_time_obj) + date_time_obj = datetime.strptime( + date_time_str, "%Y-%m-%d %H:%M:%S.%f" + ) + utc_time_zone_aware = pytz.timezone(time_zone_str).localize( + date_time_obj + ) return utc_time_zone_aware else: - raise ValueError('Invalid timezone string!') + raise ValueError("Invalid timezone string!") return None else: return None @@ -98,6 +103,7 @@ def apply_modifier(modifier, dct: dict): # modd = {"fun": "convert_iso8601", "terms": ["data_modified", "timezone"]} # print(apply_modifier(modd, yml)) + def variadic_path_to_specific_path(path: str, instance_identifier: list): """Transforms a variadic path to an actual path with instances.""" if (path is not None) and (path != ""): diff --git a/pynxtools/dataconverter/readers/shared/shared_utils.py b/pynxtools/dataconverter/readers/shared/shared_utils.py index 629e29a0f..71d7af03c 100644 --- a/pynxtools/dataconverter/readers/shared/shared_utils.py +++ b/pynxtools/dataconverter/readers/shared/shared_utils.py @@ -39,20 +39,18 @@ def get_repo_last_commit() -> str: class NxObject: # pylint: disable=R0903 """An object in a graph e.g. a field or group in NeXus.""" - def __init__(self, - name: str = None, - unit: str = None, - dtype=str, - value=None, - **kwargs): + def __init__( + self, name: str = None, unit: str = None, dtype=str, value=None, **kwargs + ): if name is not None: assert name != "", "Argument name needs to be a non-empty string !" if unit is not None: assert unit != "", "Argument unit needs to be a non-empty string !" assert dtype is not None, "Argument dtype must not be None !" if dtype is not None: - assert isinstance(dtype, type), \ - "Argument dtype needs a valid, ideally numpy, datatype !" + assert isinstance( + dtype, type + ), "Argument dtype needs a valid, ideally numpy, datatype !" # ##MK::if value is not None: self.is_a = "NXobject" self.is_attr = False # if True indicates object is attribute @@ -70,8 +68,9 @@ def __init__(self, self.value = None # value should be a numpy scalar, tensor, or string if possible if "is_attr" in kwargs: - assert isinstance(kwargs["is_attr"], bool), \ - "Kwarg is_attr needs to be a boolean !" + assert isinstance( + kwargs["is_attr"], bool + ), "Kwarg is_attr needs to be a boolean !" self.is_attr = kwargs["is_attr"] def print(self): @@ -83,5 +82,6 @@ def print(self): print("dtype: ") print(self.dtype) + # test = NxObject(name="test", unit="baud", dtype=np.uint32, value=32000) # test.print() diff --git a/pynxtools/dataconverter/readers/transmission/metadata_parsers.py b/pynxtools/dataconverter/readers/transmission/metadata_parsers.py index ca4c38e83..ca15b26a8 100644 --- a/pynxtools/dataconverter/readers/transmission/metadata_parsers.py +++ b/pynxtools/dataconverter/readers/transmission/metadata_parsers.py @@ -19,8 +19,8 @@ from datetime import datetime # The min & max wavelength the instrument can measure -MIN_WAVELENGTH = 190. -MAX_WAVELENGTH = 3350. +MIN_WAVELENGTH = 190.0 +MAX_WAVELENGTH = 3350.0 def read_start_date(metadata: list) -> str: @@ -42,7 +42,7 @@ def read_ref_attenuator(metadata: list) -> int: def is_depolarizer_on(metadata: list) -> bool: """Reads whether the depolarizer was active during the measurement""" - return metadata[46] == 'on' + return metadata[46] == "on" def read_uv_monochromator_range(metadata: list) -> list: diff --git a/pynxtools/dataconverter/readers/transmission/reader.py b/pynxtools/dataconverter/readers/transmission/reader.py index ccc94374e..5863a406c 100644 --- a/pynxtools/dataconverter/readers/transmission/reader.py +++ b/pynxtools/dataconverter/readers/transmission/reader.py @@ -33,18 +33,13 @@ METADATA_MAP: Dict[str, Any] = { "/ENTRY[entry]/SAMPLE[sample]/name": 8, "/ENTRY[entry]/start_time": mpars.read_start_date, - "/ENTRY[entry]/instrument/sample_attenuator/attenuator_transmission": - mpars.read_sample_attenuator, - "/ENTRY[entry]/instrument/ref_attenuator/attenuator_transmission": - mpars.read_ref_attenuator, + "/ENTRY[entry]/instrument/sample_attenuator/attenuator_transmission": mpars.read_sample_attenuator, + "/ENTRY[entry]/instrument/ref_attenuator/attenuator_transmission": mpars.read_ref_attenuator, "/ENTRY[entry]/instrument/common_beam_mask/y_gap": 45, "/ENTRY[entry]/instrument/polarizer": 48, - "/ENTRY[entry]/instrument/common_beam_depolarizer": - mpars.is_depolarizer_on, - "/ENTRY[entry]/instrument/spectrometer/GRATING[grating]/wavelength_range": - mpars.read_uv_monochromator_range, - "/ENTRY[entry]/instrument/spectrometer/GRATING[grating1]/wavelength_range": - mpars.read_visir_monochromator_range, + "/ENTRY[entry]/instrument/common_beam_depolarizer": mpars.is_depolarizer_on, + "/ENTRY[entry]/instrument/spectrometer/GRATING[grating]/wavelength_range": mpars.read_uv_monochromator_range, + "/ENTRY[entry]/instrument/spectrometer/GRATING[grating1]/wavelength_range": mpars.read_visir_monochromator_range, "/ENTRY[entry]/instrument/SOURCE[source]/type": "D2", "/ENTRY[entry]/instrument/SOURCE[source]/wavelength_range": mpars.get_d2_range, "/ENTRY[entry]/instrument/SOURCE[source1]/type": "halogen", diff --git a/pynxtools/dataconverter/readers/utils.py b/pynxtools/dataconverter/readers/utils.py index c1826d744..f40f98be0 100644 --- a/pynxtools/dataconverter/readers/utils.py +++ b/pynxtools/dataconverter/readers/utils.py @@ -27,7 +27,7 @@ @dataclass -class FlattenSettings(): +class FlattenSettings: """Settings for flattening operations. Args: @@ -38,6 +38,7 @@ class FlattenSettings(): Parent key of the dictionary. Defaults to "/ENTRY[entry]". sep (str, optional): Separator for the keys. Defaults to "/". """ + dic: Mapping convert_dict: dict replace_nested: dict @@ -141,11 +142,14 @@ def flatten_and_replace(settings: FlattenSettings) -> dict: for key, val in settings.dic.items(): if settings.ignore_keys and key in settings.ignore_keys: continue - new_key = settings.parent_key + settings.sep + settings.convert_dict.get(key, key) + new_key = ( + settings.parent_key + settings.sep + settings.convert_dict.get(key, key) + ) if isinstance(val, Mapping): items.extend( - flatten_and_replace(replace(settings, dic=val, parent_key=new_key)) - .items() + flatten_and_replace( + replace(settings, dic=val, parent_key=new_key) + ).items() if not (settings.is_in_section and is_value_unit_pair(val)) else [[new_key, val]] ) @@ -163,9 +167,9 @@ def flatten_and_replace(settings: FlattenSettings) -> dict: def parse_yml( - file_path: str, - convert_dict: Optional[dict] = None, - replace_nested: Optional[dict] = None + file_path: str, + convert_dict: Optional[dict] = None, + replace_nested: Optional[dict] = None, ) -> Dict[str, Any]: """Parses a metadata yaml file into a dictionary. @@ -183,12 +187,12 @@ def parse_yml( convert_dict["unit"] = "@units" - with open(file_path, encoding='utf-8') as file: + with open(file_path, encoding="utf-8") as file: return flatten_and_replace( FlattenSettings( dic=yaml.safe_load(file), convert_dict=convert_dict, - replace_nested=replace_nested + replace_nested=replace_nested, ) ) diff --git a/pynxtools/dataconverter/readers/xps/file_parser.py b/pynxtools/dataconverter/readers/xps/file_parser.py index ff5190b94..cc40323a2 100644 --- a/pynxtools/dataconverter/readers/xps/file_parser.py +++ b/pynxtools/dataconverter/readers/xps/file_parser.py @@ -24,6 +24,7 @@ from typing import List, Dict from pynxtools.dataconverter.readers.xps.sle.sle_specs import SleMapperSpecs + # from pynxtools.dataconverter.readers.xps.slh.slh_specs import SlhMapperSpecs from pynxtools.dataconverter.readers.xps.txt.txt_scienta import TxtMapperScienta @@ -145,11 +146,7 @@ def _check_for_vendors_txt(cls, file: str) -> str: contents = txt_file.read() for vendor in vendor_dict: - vendor_options = [ - vendor, - vendor.upper(), - vendor.capitalize() - ] + vendor_options = [vendor, vendor.upper(), vendor.capitalize()] if any(vendor_opt in contents for vendor_opt in vendor_options): return vendor diff --git a/pynxtools/dataconverter/readers/xps/reader_utils.py b/pynxtools/dataconverter/readers/xps/reader_utils.py index 64f7a091f..2b3426955 100644 --- a/pynxtools/dataconverter/readers/xps/reader_utils.py +++ b/pynxtools/dataconverter/readers/xps/reader_utils.py @@ -24,6 +24,7 @@ class XPSMapper(ABC): """Abstract base class from mapping from a parser to NXmpes template""" + def __init__(self): self.file = None self.raw_data: list = [] diff --git a/pynxtools/dataconverter/readers/xps/sle/sle_specs.py b/pynxtools/dataconverter/readers/xps/sle/sle_specs.py index dde9aa6ae..16f12d6a1 100644 --- a/pynxtools/dataconverter/readers/xps/sle/sle_specs.py +++ b/pynxtools/dataconverter/readers/xps/sle/sle_specs.py @@ -505,7 +505,7 @@ def _get_transmission(self, node_id): length = result[1] * buffer data = result[0] for i in range(0, length, buffer): - stream.append(struct.unpack(encoding, data[i: i + buffer])[0]) + stream.append(struct.unpack(encoding, data[i : i + buffer])[0]) return stream @@ -943,7 +943,7 @@ def _get_one_scan(self, raw_id): length = result[1] * buffer data = result[0] for i in range(0, length, buffer): - stream.append(struct.unpack(encoding, data[i: i + buffer])[0]) + stream.append(struct.unpack(encoding, data[i : i + buffer])[0]) return stream def _parse_external_channels(self, channel): diff --git a/pynxtools/dataconverter/readers/xps/txt/txt_vamas_export.py b/pynxtools/dataconverter/readers/xps/txt/txt_vamas_export.py index 975600f14..1e7ac030c 100644 --- a/pynxtools/dataconverter/readers/xps/txt/txt_vamas_export.py +++ b/pynxtools/dataconverter/readers/xps/txt/txt_vamas_export.py @@ -113,11 +113,10 @@ def construct_data(self): "sample": [], "data": [ "dwell_time", - "x_units" - "y_units", + "x_units" "y_units", "start_energy", "stop_energy", - "step_size" + "step_size", ], "region": ["region_name"], } @@ -311,8 +310,8 @@ def _separate_header_and_data(self, block): None. """ - header = block[:self.n_headerlines] - data = block[self.n_headerlines:] + header = block[: self.n_headerlines] + data = block[self.n_headerlines :] return header, data @@ -472,7 +471,7 @@ def _parse_blocks(self): blocks = [ list(g) for _, g in itertools.groupby(self.lines, lambda i: "Cycle " in i) ] - blocks = [operator.add(*blocks[i: i + 2]) for i in range(0, len(blocks), 2)] + blocks = [operator.add(*blocks[i : i + 2]) for i in range(0, len(blocks), 2)] return blocks diff --git a/pynxtools/dataconverter/readers/xps/vms/vamas.py b/pynxtools/dataconverter/readers/xps/vms/vamas.py index 5a14f205e..8a9ceb59b 100644 --- a/pynxtools/dataconverter/readers/xps/vms/vamas.py +++ b/pynxtools/dataconverter/readers/xps/vms/vamas.py @@ -248,10 +248,7 @@ def __init__(self): "experiment_id", "no_comment_lines", ], - "exp_var": [ - "exp_var_label", - "exp_var_unit" - ], + "exp_var": ["exp_var_label", "exp_var_unit"], "norm_header": [ "scan_mode", "nr_regions", @@ -507,16 +504,21 @@ def _get_scan_numbers_for_spectra(self, spectra): Same list of dicts, but each spectrum gets a scan number. """ - grouped_spectra = [list(y) for x, y in groupby( - sorted(spectra, - key=lambda x: (x['group_name'], x['spectrum_type'])), - lambda x: (x['group_name'], x['spectrum_type']))] + grouped_spectra = [ + list(y) + for x, y in groupby( + sorted(spectra, key=lambda x: (x["group_name"], x["spectrum_type"])), + lambda x: (x["group_name"], x["spectrum_type"]), + ) + ] for group in grouped_spectra: for i, spectrum in enumerate(group): spectrum["scan_no"] = i - flattened_spectra = [spectrum for group in grouped_spectra for spectrum in group] + flattened_spectra = [ + spectrum for group in grouped_spectra for spectrum in group + ] return flattened_spectra @@ -623,7 +625,8 @@ def build_list(self): class VamasParserRegular(VamasParser): - """ Parser for .vms files of type REGULAR""" + """Parser for .vms files of type REGULAR""" + def _parse_norm_block(self): """ Use this method when the NORM keyword is present. @@ -792,7 +795,7 @@ def _parse_map_block(self): return block def _add_data_values(self, block): - """ Add data values to a Vamas data block.""" + """Add data values to a Vamas data block.""" data_dict = {} start = float(block.abscissa_start) step = float(block.abscissa_step) @@ -813,7 +816,7 @@ def _add_data_values(self, block): data_array = list(np.array(self.data[: block.num_ord_values], dtype=float)) - self.data = self.data[block.num_ord_values:] + self.data = self.data[block.num_ord_values :] for var in range(block.no_variables): max_var = block.no_variables @@ -828,7 +831,8 @@ def _add_data_values(self, block): # THIS DOESN'T WORK SO FAR!! class VamasParserIrregular(VamasParser): - """ Parser for .vms files of type IRREGULAR""" + """Parser for .vms files of type IRREGULAR""" + def _parse_norm_block(self): """ Use this method when the NORM keyword is present. @@ -997,7 +1001,7 @@ def _parse_map_block(self): return block def _add_data_values(self, block): - """ Add data values to a Vamas data block.""" + """Add data values to a Vamas data block.""" data_dict = {} start = float(block.abscissa_start) step = float(block.abscissa_step) @@ -1016,9 +1020,9 @@ def _add_data_values(self, block): name = "y" + str(var) data_dict[name] = [] - data_array = list(np.array(self.data[:block.num_ord_values], dtype=float)) + data_array = list(np.array(self.data[: block.num_ord_values], dtype=float)) - self.data = self.data[block.num_ord_values:] + self.data = self.data[block.num_ord_values :] for var in range(block.no_variables): max_var = block.no_variables diff --git a/pynxtools/dataconverter/readers/xps/vms/vamas_data_model.py b/pynxtools/dataconverter/readers/xps/vms/vamas_data_model.py index faf4c851f..05e21bb64 100644 --- a/pynxtools/dataconverter/readers/xps/vms/vamas_data_model.py +++ b/pynxtools/dataconverter/readers/xps/vms/vamas_data_model.py @@ -25,6 +25,7 @@ @dataclass class VamasHeader: """An object to store the Vamas header information.""" + format_id: str = ( "VAMAS Surface Chemical Analysis Standard Data Transfer Format 1988 May 4" ) @@ -50,6 +51,7 @@ class VamasHeader: @dataclass class Block: """An object to store a block of spectrum data and meta-data.""" + block_id: str = "" sample_id: str = "" year: str = "" diff --git a/pynxtools/dataconverter/readers/xps/xml/xml_specs.py b/pynxtools/dataconverter/readers/xps/xml/xml_specs.py index 00ef19e9a..7e9d07ed7 100644 --- a/pynxtools/dataconverter/readers/xps/xml/xml_specs.py +++ b/pynxtools/dataconverter/readers/xps/xml/xml_specs.py @@ -214,7 +214,8 @@ def construct_data(self): class XmlParserSpecs: - """ Parser for SpecsLab2 XML data""" + """Parser for SpecsLab2 XML data""" + def __init__(self) -> None: self.metadata_dict: dict = {} self.entry_to_data: dict = {} @@ -344,7 +345,9 @@ def struct_fc_name_sc_value(self, element_, first_child, parent_path, skip_child for unit in units: if f"_[{unit}]" in section_nm_reslvr: section_nm_reslvr, _ = section_nm_reslvr.split("_") - self.metadata_dict[f"{parent_path}/" f"{section_nm_reslvr}/@unit"] = unit + self.metadata_dict[ + f"{parent_path}/" f"{section_nm_reslvr}/@unit" + ] = unit parent_path, self.tail_part_frm_struct = self.check_last_part_repetition( parent_path, self.tail_part_frm_struct, section_nm_reslvr @@ -631,9 +634,7 @@ def cumulate_counts_series( if not counts_length: counts_length = num_of_counts if counts_length != num_of_counts: - raise ValueError( - "Count number from all scans must be equal!!" - ) + raise ValueError("Count number from all scans must be equal!!") if scan_seq_elem.attrib["type_name"] == "Counts": counts_data = self.restructure_value( diff --git a/pynxtools/dataconverter/readers/xps/xy/xy_specs.py b/pynxtools/dataconverter/readers/xps/xy/xy_specs.py index 4dbfcf332..a312be789 100644 --- a/pynxtools/dataconverter/readers/xps/xy/xy_specs.py +++ b/pynxtools/dataconverter/readers/xps/xy/xy_specs.py @@ -348,7 +348,7 @@ def _separate_header(self): """ header = self.lines[: self.n_headerlines] - groups = self.lines[self.n_headerlines:] + groups = self.lines[self.n_headerlines :] return header, groups @@ -487,7 +487,7 @@ def _handle_cycles(self, region_data): Entries are as cycle_name: cycle_data. """ - cycle_pattern = re.compile(fr"{self.prefix} Cycle: \d\n", re.IGNORECASE) + cycle_pattern = re.compile(rf"{self.prefix} Cycle: \d\n", re.IGNORECASE) cycles = OrderedDict() cycle_line_nrs = {} @@ -531,7 +531,7 @@ def _handle_individual_cycles(self, cycle_data): Entries are as scan_name: scan_data. """ - spec_pattern = fr"{self.prefix} Cycle: \d, Curve: \d" + spec_pattern = rf"{self.prefix} Cycle: \d, Curve: \d" if self.export_settings["Separate Scan Data"]: spec_pattern += r", Scan: \d" if self.export_settings["Separate Channel Data"]: diff --git a/pynxtools/dataconverter/readers/xrd/config.py b/pynxtools/dataconverter/readers/xrd/config.py index 4d3757b10..10d5eddce 100644 --- a/pynxtools/dataconverter/readers/xrd/config.py +++ b/pynxtools/dataconverter/readers/xrd/config.py @@ -2,116 +2,167 @@ # pylint: disable=C0301 xrdml = { - "/ENTRY[entry]/2theta_plot/chi": {"xrdml_1.5": {"value": "", - "@units": "", - "@chi_indices": 0}, - }, - "/ENTRY[entry]/2theta_plot/intensity": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/intensities", - "@units": "counts/s"} - }, - "/ENTRY[entry]/2theta_plot/omega": {"xrdml_1.5": {"value": "", - "@units": "", - "@omega_indices": 1}, - }, + "/ENTRY[entry]/2theta_plot/chi": { + "xrdml_1.5": {"value": "", "@units": "", "@chi_indices": 0}, + }, + "/ENTRY[entry]/2theta_plot/intensity": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/intensities", + "@units": "counts/s", + } + }, + "/ENTRY[entry]/2theta_plot/omega": { + "xrdml_1.5": {"value": "", "@units": "", "@omega_indices": 1}, + }, "/ENTRY[entry]/2theta_plot/title": "Intensity Vs. Two Theta (deg.)", - "/ENTRY[entry]/2theta_plot/phi": {"xrdml_1.5": {"value": "", - "@units": "", - "@phi_indices": 0}, - }, - "/ENTRY[entry]/2theta_plot/two_theta": {"xrdml_1.5": {"value": "", - "@units": "deg", - "@two_theta_indices": 0}, - }, - "/ENTRY[entry]/COLLECTION[collection]/beam_attenuation_factors": {"xrdml_1.5": {"value": "/beamAttenuationFactors", - "@units": ""}, - }, - "/ENTRY[entry]/COLLECTION[collection]/omega/start": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/startPosition", - "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/unit"}, - }, - "/ENTRY[entry]/COLLECTION[collection]/omega/end": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/endPosition", - "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/unit"}, - }, - "/ENTRY[entry]/COLLECTION[collection]/omega/step": {"xrdml_1.5": {"value": "/xrdMeasurements/comment/entry_2/MinimumstepsizeOmega", - "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/unit"}, - }, - "/ENTRY[entry]/COLLECTION[collection]/2theta/start": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/startPosition", - "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/unit"}, - }, - "/ENTRY[entry]/COLLECTION[collection]/2theta/end": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/endPosition", - "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/unit"}, - }, - "/ENTRY[entry]/COLLECTION[collection]/2theta/step": {"xrdml_1.5": {"value": "/xrdMeasurements/comment/entry_2/Minimumstepsize2Theta", - "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/unit"}, - }, - "/ENTRY[entry]/COLLECTION[collection]/count_time": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/commonCountingTime", - "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/commonCountingTime/unit"}, - }, - "/ENTRY[entry]/COLLECTION[collection]/data_file": {"xrdml_1.5": {"value": ""} - }, - "/ENTRY[entry]/COLLECTION[collection]/goniometer_x": {"xrdml_1.5": {"value": "/X", - "@units": ""}, - }, - "/ENTRY[entry]/COLLECTION[collection]/goniometer_y": {"xrdml_1.5": {"value": "/Y", - "@units": ""}, - }, - "/ENTRY[entry]/COLLECTION[collection]/goniometer_z": {"xrdml_1.5": {"value": "/Z", - "@units": ""}, - }, - "/ENTRY[entry]/COLLECTION[collection]/measurement_type": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/measurementType", - "@units": ""}, - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/DETECTOR[detector]/integration_time": {"xrdml_1.5": {"value": "", - "@units": ""}, - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/DETECTOR[detector]/integration_time/@units": {"xrdml_1.5": {"value": "", - "@units": ""}, - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/DETECTOR[detector]/scan_axis": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/scan/scanAxis", - "@units": ""}, - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/DETECTOR[detector]/scan_mode": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/scan/mode", - "@units": ""}, - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/k_alpha_one": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/usedWavelength/kAlpha1", - "@units": "/xrdMeasurements/xrdMeasurement/usedWavelength/kAlpha1/unit"}, - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/k_alpha_two": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/usedWavelength/kAlpha2", - "@units": "/xrdMeasurements/xrdMeasurement/usedWavelength/kAlpha2/unit"}, - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/kbeta": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/usedWavelength/kBeta", - "@units": "/xrdMeasurements/xrdMeasurement/usedWavelength/kBeta/unit"}, - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/ratio_k_alphatwo_k_alphaone": {"xrdml_1.5": {"value": "", - "@units": ""} - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/xray_tube_current": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/current", - "@units": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/current/unit"} - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/source_peak_wavelength": {"xrdml_1.5": {"value": "", - "@units": ""} - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/xray_tube_material": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/anodeMaterial", - "@units": ""}, - }, - "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/xray_tube_voltage": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/tension", - "@units": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/tension/unit"} - }, - "/ENTRY[entry]/SAMPLE[sample]/prepared_by": {"xrdml_1.5": {"value": ""} - }, - "/ENTRY[entry]/SAMPLE[sample]/sample_id": {"xrdml_1.5": {"value": ""}, - }, - "/ENTRY[entry]/SAMPLE[sample]/sample_mode": {"xrdml_1.5": {"value": ""}, - }, - "/ENTRY[entry]/SAMPLE[sample]/sample_name": {"xrdml_1.5": {"value": ""}, - }, + "/ENTRY[entry]/2theta_plot/phi": { + "xrdml_1.5": {"value": "", "@units": "", "@phi_indices": 0}, + }, + "/ENTRY[entry]/2theta_plot/two_theta": { + "xrdml_1.5": {"value": "", "@units": "deg", "@two_theta_indices": 0}, + }, + "/ENTRY[entry]/COLLECTION[collection]/beam_attenuation_factors": { + "xrdml_1.5": {"value": "/beamAttenuationFactors", "@units": ""}, + }, + "/ENTRY[entry]/COLLECTION[collection]/omega/start": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/startPosition", + "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/unit", + }, + }, + "/ENTRY[entry]/COLLECTION[collection]/omega/end": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/endPosition", + "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/unit", + }, + }, + "/ENTRY[entry]/COLLECTION[collection]/omega/step": { + "xrdml_1.5": { + "value": "/xrdMeasurements/comment/entry_2/MinimumstepsizeOmega", + "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_2/unit", + }, + }, + "/ENTRY[entry]/COLLECTION[collection]/2theta/start": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/startPosition", + "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/unit", + }, + }, + "/ENTRY[entry]/COLLECTION[collection]/2theta/end": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/endPosition", + "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/unit", + }, + }, + "/ENTRY[entry]/COLLECTION[collection]/2theta/step": { + "xrdml_1.5": { + "value": "/xrdMeasurements/comment/entry_2/Minimumstepsize2Theta", + "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/positions_1/unit", + }, + }, + "/ENTRY[entry]/COLLECTION[collection]/count_time": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/commonCountingTime", + "@units": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/commonCountingTime/unit", + }, + }, + "/ENTRY[entry]/COLLECTION[collection]/data_file": {"xrdml_1.5": {"value": ""}}, + "/ENTRY[entry]/COLLECTION[collection]/goniometer_x": { + "xrdml_1.5": {"value": "/X", "@units": ""}, + }, + "/ENTRY[entry]/COLLECTION[collection]/goniometer_y": { + "xrdml_1.5": {"value": "/Y", "@units": ""}, + }, + "/ENTRY[entry]/COLLECTION[collection]/goniometer_z": { + "xrdml_1.5": {"value": "/Z", "@units": ""}, + }, + "/ENTRY[entry]/COLLECTION[collection]/measurement_type": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/measurementType", + "@units": "", + }, + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/DETECTOR[detector]/integration_time": { + "xrdml_1.5": {"value": "", "@units": ""}, + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/DETECTOR[detector]/integration_time/@units": { + "xrdml_1.5": {"value": "", "@units": ""}, + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/DETECTOR[detector]/scan_axis": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/scan/scanAxis", + "@units": "", + }, + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/DETECTOR[detector]/scan_mode": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/scan/mode", + "@units": "", + }, + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/k_alpha_one": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/usedWavelength/kAlpha1", + "@units": "/xrdMeasurements/xrdMeasurement/usedWavelength/kAlpha1/unit", + }, + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/k_alpha_two": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/usedWavelength/kAlpha2", + "@units": "/xrdMeasurements/xrdMeasurement/usedWavelength/kAlpha2/unit", + }, + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/kbeta": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/usedWavelength/kBeta", + "@units": "/xrdMeasurements/xrdMeasurement/usedWavelength/kBeta/unit", + }, + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/ratio_k_alphatwo_k_alphaone": { + "xrdml_1.5": {"value": "", "@units": ""} + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/xray_tube_current": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/current", + "@units": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/current/unit", + } + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/source_peak_wavelength": { + "xrdml_1.5": {"value": "", "@units": ""} + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/xray_tube_material": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/anodeMaterial", + "@units": "", + }, + }, + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/xray_tube_voltage": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/tension", + "@units": "/xrdMeasurements/xrdMeasurement/incidentBeamPath/xRayTube/tension/unit", + } + }, + "/ENTRY[entry]/SAMPLE[sample]/prepared_by": {"xrdml_1.5": {"value": ""}}, + "/ENTRY[entry]/SAMPLE[sample]/sample_id": { + "xrdml_1.5": {"value": ""}, + }, + "/ENTRY[entry]/SAMPLE[sample]/sample_mode": { + "xrdml_1.5": {"value": ""}, + }, + "/ENTRY[entry]/SAMPLE[sample]/sample_name": { + "xrdml_1.5": {"value": ""}, + }, "/ENTRY[entry]/definition": "NXxrd_pan", "/ENTRY[entry]/method": "X-Ray Diffraction (XRD)", - "/ENTRY[entry]/q_plot/intensity": {"xrdml_1.5": {"value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/intensities", - "@units": "counts/s"}, - }, - "/ENTRY[entry]/q_plot/q": {"xrdml_1.5": {"value": "", - "@units": ""}, - }, + "/ENTRY[entry]/q_plot/intensity": { + "xrdml_1.5": { + "value": "/xrdMeasurements/xrdMeasurement/scan/dataPoints/intensities", + "@units": "counts/s", + }, + }, + "/ENTRY[entry]/q_plot/q": { + "xrdml_1.5": {"value": "", "@units": ""}, + }, "/@default": "entry", "/ENTRY[entry]/@default": "2theta_plot", } diff --git a/pynxtools/dataconverter/readers/xrd/reader.py b/pynxtools/dataconverter/readers/xrd/reader.py index 242498790..246872a76 100644 --- a/pynxtools/dataconverter/readers/xrd/reader.py +++ b/pynxtools/dataconverter/readers/xrd/reader.py @@ -23,22 +23,24 @@ import yaml -from pynxtools.dataconverter.helpers import (generate_template_from_nxdl, - validate_data_dict) +from pynxtools.dataconverter.helpers import ( + generate_template_from_nxdl, + validate_data_dict, +) from pynxtools.dataconverter.template import Template from pynxtools.dataconverter.readers.xrd.xrd_parser import parse_and_fill_template from pynxtools.dataconverter.readers.utils import flatten_and_replace, FlattenSettings from pynxtools.dataconverter.readers.base.reader import BaseReader CONVERT_DICT: Dict[str, str] = { - 'unit': '@units', - 'Instrument': 'INSTRUMENT[instrument]', - 'Source': 'SOURCE[source]', - 'Detector': 'DETECTOR[detector]', - 'Collection': 'COLLECTION[collection]', - 'Sample': 'SAMPLE[sample]', - 'version': '@version', - 'User': 'USER[user]', + "unit": "@units", + "Instrument": "INSTRUMENT[instrument]", + "Source": "SOURCE[source]", + "Detector": "DETECTOR[detector]", + "Collection": "COLLECTION[collection]", + "Sample": "SAMPLE[sample]", + "version": "@version", + "User": "USER[user]", } @@ -70,20 +72,20 @@ def get_template_from_nxdl_name(nxdl_name): """ nxdl_file = nxdl_name + ".nxdl.xml" current_path = Path(__file__) - def_path = current_path.parent.parent.parent.parent / 'definitions' + def_path = current_path.parent.parent.parent.parent / "definitions" # Check contributed defintions - full_nxdl_path = Path(def_path, 'contributed_definitions', nxdl_file) + full_nxdl_path = Path(def_path, "contributed_definitions", nxdl_file) root = None if full_nxdl_path.exists(): root = ET.parse(full_nxdl_path).getroot() else: # Check application definition - full_nxdl_path = Path(def_path, 'applications', nxdl_file) + full_nxdl_path = Path(def_path, "applications", nxdl_file) if root is None and full_nxdl_path.exists(): root = ET.parse(full_nxdl_path).getroot() else: - full_nxdl_path = Path(def_path, 'base_classes', nxdl_file) + full_nxdl_path = Path(def_path, "base_classes", nxdl_file) if root is None and full_nxdl_path.exists(): root = ET.parse(full_nxdl_path).getroot() @@ -113,8 +115,7 @@ def get_template_from_xrd_reader(nxdl_name, file_paths): template = get_template_from_nxdl_name(nxdl_name) - data = XRDReader().read(template=template, - file_paths=file_paths) + data = XRDReader().read(template=template, file_paths=file_paths) validate_data_dict(template=template, data=data, nxdl_root=ROOT) return data @@ -125,10 +126,12 @@ class XRDReader(BaseReader): supported_nxdls = ["NXxrd_pan"] - def read(self, - template: dict = None, - file_paths: Tuple[str] = None, - objects: Tuple[Any] = None): + def read( + self, + template: dict = None, + file_paths: Tuple[str] = None, + objects: Tuple[Any] = None, + ): """General read menthod to prepare the template.""" if not isinstance(file_paths, tuple) and not isinstance(file_paths, list): @@ -140,15 +143,14 @@ def read(self, xrd_file_ext: str = "" for file in file_paths: ext = "".join(Path(file).suffixes) - if ext == '.json': + if ext == ".json": with open(file, mode="r", encoding="utf-8") as fl_obj: config_dict = json.load(fl_obj) - elif ext in ['.yaml', '.yml']: + elif ext in [".yaml", ".yml"]: with open(file, mode="r", encoding="utf-8") as fl_obj: eln_dict = flatten_and_replace( FlattenSettings( - yaml.safe_load(fl_obj), - CONVERT_DICT, REPLACE_NESTED + yaml.safe_load(fl_obj), CONVERT_DICT, REPLACE_NESTED ) ) elif ext in XRD_FILE_EXTENSIONS: @@ -157,19 +159,22 @@ def read(self, if xrd_file: parse_and_fill_template(template, xrd_file, config_dict, eln_dict) else: - raise ValueError(f"Allowed XRD experimental with extenstion from" - f" {XRD_FILE_EXTENSIONS} found {xrd_file_ext}") + raise ValueError( + f"Allowed XRD experimental with extenstion from" + f" {XRD_FILE_EXTENSIONS} found {xrd_file_ext}" + ) # Get rid of empty concept and cleaning up Template for key, val in template.items(): - if val is None: del template[key] else: filled_template[key] = val if not filled_template.keys(): - raise ValueError("Reader could not read anything! Check for input files and the" - " corresponding extention.") + raise ValueError( + "Reader could not read anything! Check for input files and the" + " corresponding extention." + ) return filled_template diff --git a/pynxtools/dataconverter/readers/xrd/xrd_helper.py b/pynxtools/dataconverter/readers/xrd/xrd_helper.py index 40874be50..22b5ec3f8 100644 --- a/pynxtools/dataconverter/readers/xrd/xrd_helper.py +++ b/pynxtools/dataconverter/readers/xrd/xrd_helper.py @@ -26,10 +26,12 @@ class KeyValueNotFoundWaring(Warning): """New Wanrning class""" -def get_a_value_or_warn(return_value="", - warning_catagory=KeyValueNotFoundWaring, - message="Key-value not found.", - stack_level=2): +def get_a_value_or_warn( + return_value="", + warning_catagory=KeyValueNotFoundWaring, + message="Key-value not found.", + stack_level=2, +): """It returns a value that and rase the warning massage.""" warnings.warn(f"\033[1;31m {message}:\033[0m]", warning_catagory, stack_level) @@ -42,8 +44,9 @@ def check_unit(unit: str): """ if unit is None: return unit - unit_map = {'Angstrom': '\u212B', - } + unit_map = { + "Angstrom": "\u212B", + } correct_unit = unit_map.get(unit, None) if correct_unit is None: return unit @@ -77,8 +80,9 @@ def feed_xrdml_to_template(template, xrd_dict, eln_dict, file_term, config_dict= } """ - def fill_template_from_config_data(config_dict: dict, template: Template, - xrd_dict: dict, file_term: str) -> None: + def fill_template_from_config_data( + config_dict: dict, template: Template, xrd_dict: dict, file_term: str + ) -> None: """ Parameters ---------- @@ -100,29 +104,32 @@ def fill_template_from_config_data(config_dict: dict, template: Template, if isinstance(val, dict): raw_data_des: dict = val.get(file_term, None) if raw_data_des is None: - raise ValueError(f"conflict file config file does not have any data map" - f" for file {file_term}") + raise ValueError( + f"conflict file config file does not have any data map" + f" for file {file_term}" + ) # the field does not have any value - if not raw_data_des.get('value', None): + if not raw_data_des.get("value", None): continue # Note: path is the data path in raw file for val_atr_key, path in raw_data_des.items(): # data or field val - if val_atr_key == 'value': + if val_atr_key == "value": template[nx_key] = xrd_dict.get(path, None) - elif path and val_atr_key == '@units': - template[nx_key + '/' + val_atr_key] = check_unit( - xrd_dict.get(path, None)) + elif path and val_atr_key == "@units": + template[nx_key + "/" + val_atr_key] = check_unit( + xrd_dict.get(path, None) + ) # attr e.g. @AXISNAME - elif path and val_atr_key.startswith('@'): - template[nx_key + '/' + val_atr_key] = xrd_dict.get(path, None) + elif path and val_atr_key.startswith("@"): + template[nx_key + "/" + val_atr_key] = xrd_dict.get(path, None) if not isinstance(val, dict) and isinstance(val, str): template[nx_key] = val def two_theta_plot(): - - intesity = transform_to_intended_dt(template.get("/ENTRY[entry]/2theta_plot/intensity", - None)) + intesity = transform_to_intended_dt( + template.get("/ENTRY[entry]/2theta_plot/intensity", None) + ) if intesity is not None: intsity_len = np.shape(intesity)[0] else: @@ -130,30 +137,41 @@ def two_theta_plot(): two_theta_gr = "/ENTRY[entry]/2theta_plot/" if template.get(f"{two_theta_gr}omega", None) is None: - omega_start = template.get("/ENTRY[entry]/COLLECTION[collection]/omega/start", None) - omega_end = template.get("/ENTRY[entry]/COLLECTION[collection]/omega/end", None) + omega_start = template.get( + "/ENTRY[entry]/COLLECTION[collection]/omega/start", None + ) + omega_end = template.get( + "/ENTRY[entry]/COLLECTION[collection]/omega/end", None + ) - template["/ENTRY[entry]/2theta_plot/omega"] = np.linspace(float(omega_start), - float(omega_end), - intsity_len) + template["/ENTRY[entry]/2theta_plot/omega"] = np.linspace( + float(omega_start), float(omega_end), intsity_len + ) if template.get(f"{two_theta_gr}two_theta", None) is None: - tw_theta_start = template.get("/ENTRY[entry]/COLLECTION[collection]/2theta/start", - None) - tw_theta_end = template.get("/ENTRY[entry]/COLLECTION[collection]/2theta/end", None) - template[f"{two_theta_gr}two_theta"] = np.linspace(float(tw_theta_start), - float(tw_theta_end), - intsity_len) + tw_theta_start = template.get( + "/ENTRY[entry]/COLLECTION[collection]/2theta/start", None + ) + tw_theta_end = template.get( + "/ENTRY[entry]/COLLECTION[collection]/2theta/end", None + ) + template[f"{two_theta_gr}two_theta"] = np.linspace( + float(tw_theta_start), float(tw_theta_end), intsity_len + ) template[two_theta_gr + "/" + "@axes"] = ["two_theta"] template[two_theta_gr + "/" + "@signal"] = "intensity" def q_plot(): q_plot_gr = "/ENTRY[entry]/q_plot" - alpha_2 = template.get("/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/k_alpha_two", - None) - alpha_1 = template.get("/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/k_alpha_one", - None) - two_theta: np.ndarray = template.get("/ENTRY[entry]/2theta_plot/two_theta", None) + alpha_2 = template.get( + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/k_alpha_two", None + ) + alpha_1 = template.get( + "/ENTRY[entry]/INSTRUMENT[instrument]/SOURCE[source]/k_alpha_one", None + ) + two_theta: np.ndarray = template.get( + "/ENTRY[entry]/2theta_plot/two_theta", None + ) if two_theta is None: raise ValueError("Two-theta data is not found") if isinstance(two_theta, np.ndarray): @@ -176,29 +194,40 @@ def handle_special_fields(): key = "/ENTRY[entry]/COLLECTION[collection]/goniometer_x" gonio_x = template.get(key, None) - template[key] = gonio_x[0] if (isinstance(gonio_x, np.ndarray) - and gonio_x.shape == (1,)) else gonio_x + template[key] = ( + gonio_x[0] + if (isinstance(gonio_x, np.ndarray) and gonio_x.shape == (1,)) + else gonio_x + ) key = "/ENTRY[entry]/COLLECTION[collection]/goniometer_y" gonio_y = template.get(key, None) - template[key] = gonio_y[0] if (isinstance(gonio_y, np.ndarray) - and gonio_y.shape == (1,)) else gonio_y + template[key] = ( + gonio_y[0] + if (isinstance(gonio_y, np.ndarray) and gonio_y.shape == (1,)) + else gonio_y + ) key = "/ENTRY[entry]/COLLECTION[collection]/goniometer_z" gonio_z = template.get(key, None) - template[key] = gonio_z[0] if (isinstance(gonio_z, np.ndarray) - and gonio_z.shape == (1,)) else gonio_z + template[key] = ( + gonio_z[0] + if (isinstance(gonio_z, np.ndarray) and gonio_z.shape == (1,)) + else gonio_z + ) key = "/ENTRY[entry]/COLLECTION[collection]/count_time" count_time = template.get(key, None) - template[key] = count_time[0] if (isinstance(count_time, np.ndarray) - and count_time.shape == (1,)) else count_time + template[key] = ( + count_time[0] + if (isinstance(count_time, np.ndarray) and count_time.shape == (1,)) + else count_time + ) - fill_template_from_config_data(config_dict, template, - xrd_dict, file_term) + fill_template_from_config_data(config_dict, template, xrd_dict, file_term) two_theta_plot() q_plot() handle_special_fields() @@ -273,11 +302,9 @@ def fill_template_from_eln_data(eln_data_dict, template): template[e_key] = transform_to_intended_dt(e_val) -def fill_nxdata_from_xrdml(template, - xrd_flattend_dict, - dt_nevigator_from_config_file, - data_group_concept - ): +def fill_nxdata_from_xrdml( + template, xrd_flattend_dict, dt_nevigator_from_config_file, data_group_concept +): """_summary_ Parameters diff --git a/pynxtools/dataconverter/readers/xrd/xrd_parser.py b/pynxtools/dataconverter/readers/xrd/xrd_parser.py index 9d944cad7..2dba3927e 100644 --- a/pynxtools/dataconverter/readers/xrd/xrd_parser.py +++ b/pynxtools/dataconverter/readers/xrd/xrd_parser.py @@ -23,12 +23,16 @@ from pathlib import Path import warnings import xml.etree.ElementTree as ET # for XML parsing -from pynxtools.dataconverter.helpers import transform_to_intended_dt, remove_namespace_from_tag +from pynxtools.dataconverter.helpers import ( + transform_to_intended_dt, + remove_namespace_from_tag, +) from pynxtools.dataconverter.readers.xrd.xrd_helper import feed_xrdml_to_template -def fill_slash_sep_dict_from_nested_dict(parent_path: str, nested_dict: dict, - slash_sep_dict: dict): +def fill_slash_sep_dict_from_nested_dict( + parent_path: str, nested_dict: dict, slash_sep_dict: dict +): """Convert a nested dict into slash separated dict. Extend slash_sep_dict by key (slash separated key) from nested dict. @@ -74,17 +78,17 @@ def __init__(self, file_path): self.find_version() # Important note for key-val pair separator list: preceding elements have precedence on the # on the following elements - self.key_val_pair_sprtr = (';', ',') + self.key_val_pair_sprtr = (";", ",") # Important note for key-val separator list: preceding elements have precedence on the # on the following elements - self.key_val_sprtr = ('=', ':') + self.key_val_sprtr = ("=", ":") def find_version(self): """To find xrdml file version.""" schema_loc = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" # str: 'http://www.xrdml.com/XRDMeasurement/1.5 - version = self.xml_root.get(schema_loc).split(' ')[0] - self.xrdml_version = version.split('/')[-1] + version = self.xml_root.get(schema_loc).split(" ")[0] + self.xrdml_version = version.split("/")[-1] def get_slash_separated_xrd_dict(self): """Return a dict with slash separated key and value from xrd file. @@ -107,14 +111,14 @@ def handle_with_panalytical_module(self): Panalytical module extends and constructs some array data from experiment settings comes with xml file. """ - self.parse_each_elm(parent_path='/', xml_node=self.xml_root) + self.parse_each_elm(parent_path="/", xml_node=self.xml_root) nested_data_dict: Dict[str, any] = {} # Note: To use panalytical lib # Extract other numerical data e.g. 'hkl', 'Omega', '2Theta', CountTime etc # using panalytical_xml module # parsed_data = XRDMLFile(self.__file_path) # nested_data_dict = parsed_data.scan.ddict - fill_slash_sep_dict_from_nested_dict('/', nested_data_dict, self.__xrd_dict) + fill_slash_sep_dict_from_nested_dict("/", nested_data_dict, self.__xrd_dict) def process_node_text(self, parent_path, node_txt) -> None: """Processing text of node @@ -143,20 +147,26 @@ def process_node_text(self, parent_path, node_txt) -> None: for k_v_sep in self.key_val_sprtr: if k_v_sep in key_val: key, val = key_val.split(k_v_sep) - key = key.replace(' ', '') - self.__xrd_dict['/'.join([parent_path, key])] = val + key = key.replace(" ", "") + self.__xrd_dict["/".join([parent_path, key])] = val break # Handling array data comes as node text else: try: self.__xrd_dict[parent_path] = transform_to_intended_dt(node_txt) except ValueError: - warnings.warn(f'Element text {node_txt} is ignored from parseing!', - IgnoreNodeTextWarning) - - def parse_each_elm(self, parent_path, xml_node, - multi_childs_tag: str = '', - tag_extensions: Optional[List[int]] = None): + warnings.warn( + f"Element text {node_txt} is ignored from parseing!", + IgnoreNodeTextWarning, + ) + + def parse_each_elm( + self, + parent_path, + xml_node, + multi_childs_tag: str = "", + tag_extensions: Optional[List[int]] = None, + ): """Check each xml element and send the element to intended function. Parameters @@ -178,22 +188,27 @@ def parse_each_elm(self, parent_path, xml_node, tag = remove_namespace_from_tag(xml_node.tag) # Take care of special node of 'entry' tag - if tag == 'entry': - parent_path = self.parse_entry_elm(parent_path, xml_node, - multi_childs_tag, tag_extensions) + if tag == "entry": + parent_path = self.parse_entry_elm( + parent_path, xml_node, multi_childs_tag, tag_extensions + ) else: - parent_path = self.parse_general_elm(parent_path, xml_node, - multi_childs_tag, tag_extensions) + parent_path = self.parse_general_elm( + parent_path, xml_node, multi_childs_tag, tag_extensions + ) _, multi_childs_tag = self.has_multi_childs_with_same_tag(xml_node) # List of tag extensions for child nodes which have the same tag. tag_extensions = [0] for child in iter(xml_node): if child is not None: - self.parse_each_elm(parent_path, child, - multi_childs_tag, tag_extensions) + self.parse_each_elm( + parent_path, child, multi_childs_tag, tag_extensions + ) - def has_multi_childs_with_same_tag(self, parent_node: ET.Element) -> Tuple[bool, str]: + def has_multi_childs_with_same_tag( + self, parent_node: ET.Element + ) -> Tuple[bool, str]: """Check for multiple childs that have the same tag. Parameter: @@ -215,10 +230,11 @@ def has_multi_childs_with_same_tag(self, parent_node: ET.Element) -> Tuple[bool, if tag == temp_tag: return (True, tag) - return (False, '') + return (False, "") - def parse_general_elm(self, parent_path, xml_node, - multi_childs_tag, tag_extensions: List[int]): + def parse_general_elm( + self, parent_path, xml_node, multi_childs_tag, tag_extensions: List[int] + ): """Handle general element except entry element. Parameters ---------- @@ -240,22 +256,22 @@ def parse_general_elm(self, parent_path, xml_node, tag = remove_namespace_from_tag(xml_node.tag) if tag == multi_childs_tag: new_ext = tag_extensions[-1] + 1 - tag = tag + '_' + str(new_ext) + tag = tag + "_" + str(new_ext) tag_extensions.append(new_ext) - if parent_path == '/': + if parent_path == "/": parent_path = parent_path + tag else: # New parent path ends with element tag - parent_path = '/'.join([parent_path, tag]) + parent_path = "/".join([parent_path, tag]) node_attr = xml_node.attrib if node_attr: for key, val in node_attr.items(): # Some attr has namespace key = remove_namespace_from_tag(key) - key = key.replace(' ', '_') - path_extend = '/'.join([parent_path, key]) + key = key.replace(" ", "_") + path_extend = "/".join([parent_path, key]) self.__xrd_dict[path_extend] = val node_txt = xml_node.text @@ -264,8 +280,13 @@ def parse_general_elm(self, parent_path, xml_node, return parent_path - def parse_entry_elm(self, parent_path: str, xml_node: ET.Element, - multi_childs_tag: str, tag_extensions: List[int]): + def parse_entry_elm( + self, + parent_path: str, + xml_node: ET.Element, + multi_childs_tag: str, + tag_extensions: List[int], + ): """Handle entry element. Parameters @@ -291,20 +312,20 @@ def parse_entry_elm(self, parent_path: str, xml_node: ET.Element, if tag == multi_childs_tag: new_ext = tag_extensions[-1] + 1 tag_extensions.append(new_ext) - tag = tag + '_' + str(new_ext) + tag = tag + "_" + str(new_ext) - if parent_path == '/': - parent_path = '/' + tag + if parent_path == "/": + parent_path = "/" + tag else: # Parent path ends with element tag - parent_path = '/'.join([parent_path, tag]) + parent_path = "/".join([parent_path, tag]) node_attr = xml_node.attrib if node_attr: for key, val in node_attr.items(): # Some attributes have namespace key = remove_namespace_from_tag(key) - path_extend = '/'.join([parent_path, key]) + path_extend = "/".join([parent_path, key]) self.__xrd_dict[path_extend] = val # In entry element text must get special care on it @@ -333,7 +354,7 @@ def __init__(self, file_path): self.file_path = file_path self.file_parser = XRDMLParser(self.file_path) # termilnological name of file to read config file - self.file_term = 'xrdml_' + self.file_parser.xrdml_version + self.file_term = "xrdml_" + self.file_parser.xrdml_version def get_file_format(self): """Identifies the format of a given file. @@ -343,7 +364,7 @@ def get_file_format(self): str: The file extension of the file. """ - file_extension = ''.join(Path(self.file_path).suffixes) + file_extension = "".join(Path(self.file_path).suffixes) return file_extension def parse_xrdml(self): @@ -398,14 +419,20 @@ def parse_and_populate_template(self, template, config_dict, eln_dict): """ xrd_dict = self.parse() - if len(config_dict) == 0 and self.file_parser.xrdml_version == '1.5': + if len(config_dict) == 0 and self.file_parser.xrdml_version == "1.5": from pynxtools.dataconverter.readers.xrd.config import xrdml + config_dict = xrdml - feed_xrdml_to_template(template, xrd_dict, eln_dict, - file_term=self.file_term, config_dict=config_dict) + feed_xrdml_to_template( + template, + xrd_dict, + eln_dict, + file_term=self.file_term, + config_dict=config_dict, + ) def parse(self): - '''Parses the file based on its format. + """Parses the file based on its format. Returns: dict @@ -413,7 +440,7 @@ def parse(self): Raises: ValueError: If the file format is unsupported. - ''' + """ file_format = self.get_file_format() slash_sep_dict = {} if file_format == ".xrdml": diff --git a/pynxtools/dataconverter/template.py b/pynxtools/dataconverter/template.py index fa6907d36..b01a6dd05 100644 --- a/pynxtools/dataconverter/template.py +++ b/pynxtools/dataconverter/template.py @@ -47,7 +47,12 @@ def __init__(self, template=None, **kwargs): def get_accumulated_dict(self): """Returns a dictionary of all the optionalities merged into one.""" - return {**self.optional, **self.recommended, **self.required, **self.undocumented} + return { + **self.optional, + **self.recommended, + **self.required, + **self.undocumented, + } def __repr__(self): """Returns a unique string representation for the Template object.""" @@ -80,16 +85,20 @@ def __setitem__(self, k, v): elif k == "lone_groups": self.lone_groups.append(v) else: - raise KeyError("You cannot add non paths to the root template object. " - "Place them appropriately e.g. template[\"optional\"]" - "[\"/ENTRY[entry]/data/path\"]") + raise KeyError( + "You cannot add non paths to the root template object. " + 'Place them appropriately e.g. template["optional"]' + '["/ENTRY[entry]/data/path"]' + ) def keys(self): """Returns the list of keys stored in the Template object.""" - return list(self.optional.keys()) + \ - list(self.recommended.keys()) + \ - list(self.required.keys()) + \ - list(self.undocumented.keys()) + return ( + list(self.optional.keys()) + + list(self.recommended.keys()) + + list(self.required.keys()) + + list(self.undocumented.keys()) + ) def items(self): """Returns a list of tuples of key, value stored in the Template object.""" @@ -118,12 +127,14 @@ def __contains__(self, k): """ Supports in operator for the nested Template keys """ - return any([ - k in self.optional, - k in self.recommended, - k in self.undocumented, - k in self.required - ]) + return any( + [ + k in self.optional, + k in self.recommended, + k in self.undocumented, + k in self.required, + ] + ) def get(self, key: str, default=None): """Proxies the get function to our internal __getitem__""" @@ -150,23 +161,37 @@ def __getitem__(self, k): return self.undocumented[k] if k in ("required", "optional", "recommended", "undocumented"): return self.get_optionality(k) - raise KeyError("Only paths starting with '/' or one of [optional_parents, " - "lone_groups, required, optional, recommended, undocumented] can be used.") + raise KeyError( + "Only paths starting with '/' or one of [optional_parents, " + "lone_groups, required, optional, recommended, undocumented] can be used." + ) def clear(self): """Clears all data stored in the Template object.""" - for del_dict in (self.optional, self.recommended, self.required, self.undocumented): + for del_dict in ( + self.optional, + self.recommended, + self.required, + self.undocumented, + ): del_dict.clear() def rename_entry(self, old_name: str, new_name: str, deepcopy=True): """Rename all entries under old name to new name.""" - for internal_dict in (self.optional, self.recommended, self.required, self.undocumented): + for internal_dict in ( + self.optional, + self.recommended, + self.required, + self.undocumented, + ): keys = list(internal_dict.keys()) for key in keys: entry_name = helpers.get_name_from_data_dict_entry(key.split("/")[1]) entry_search_term = f"{entry_name}]" - rest_of_path = key[key.index(entry_search_term) + len(entry_search_term):] + rest_of_path = key[ + key.index(entry_search_term) + len(entry_search_term) : + ] if entry_name == old_name: value = internal_dict[key] if deepcopy else None internal_dict[f"/ENTRY[{new_name}]{rest_of_path}"] = value diff --git a/pynxtools/dataconverter/writer.py b/pynxtools/dataconverter/writer.py index 81b3045da..82ca21198 100644 --- a/pynxtools/dataconverter/writer.py +++ b/pynxtools/dataconverter/writer.py @@ -54,30 +54,30 @@ def is_not_data_empty(value) -> bool: def get_namespace(element) -> str: """Extracts the namespace for elements in the NXDL""" - return element.tag[element.tag.index("{"):element.tag.rindex("}") + 1] + return element.tag[element.tag.index("{") : element.tag.rindex("}") + 1] def split_link(data, output_path): """Handle the special syntax used in the reader for the dataset links. -Split the file:path variable in two variables file and path. -If multiple datasets are provided, the function returns two lists""" + Split the file:path variable in two variables file and path. + If multiple datasets are provided, the function returns two lists""" - if not isinstance(data['link'], list): - if ':' in data['link']: - file = data['link'].split(":", 1)[0] - path = data['link'].split(":", 1)[1] - elif ':' not in data['link']: + if not isinstance(data["link"], list): + if ":" in data["link"]: + file = data["link"].split(":", 1)[0] + path = data["link"].split(":", 1)[1] + elif ":" not in data["link"]: file = output_path - path = data['link'] + path = data["link"] else: file = [] path = [] - for dataset in data['link']: - if ':' in dataset: + for dataset in data["link"]: + if ":" in dataset: file.append(dataset.split(":", 1)[0]) path.append(dataset.split(":", 1)[1]) - elif ':' not in data['link']: + elif ":" not in data["link"]: file.append(output_path) path.append(dataset) @@ -87,20 +87,22 @@ def split_link(data, output_path): def handle_shape_entries(data, file, path): """slice generation via the key shape""" new_shape = [] - for dim, val in enumerate(data['shape']): + for dim, val in enumerate(data["shape"]): if isinstance(val, slice): start = val.start if val.start is not None else 0 - stop = val.stop if val.stop is not None else h5py.File(file, 'r')[path].shape[dim] + stop = ( + val.stop + if val.stop is not None + else h5py.File(file, "r")[path].shape[dim] + ) step = val.step if val.step is not None else 1 new_shape.append(int((stop - start) / step)) if not new_shape: new_shape = [1] - layout = h5py.VirtualLayout(shape=tuple(new_shape), - dtype=np.float64) - vsource = h5py.VirtualSource(file, - path, - shape=h5py.File(file, 'r')[path].shape - )[data['shape']] + layout = h5py.VirtualLayout(shape=tuple(new_shape), dtype=np.float64) + vsource = h5py.VirtualSource(file, path, shape=h5py.File(file, "r")[path].shape)[ + data["shape"] + ] layout[:] = vsource return layout @@ -109,61 +111,68 @@ def handle_shape_entries(data, file, path): def handle_dicts_entries(data, grp, entry_name, output_path, path): """Handle function for dictionaries found as value of the nexus file. -Several cases can be encoutered: -- Data to slice and place in virtual datasets -- Concatenate dataset in one virtual dataset -- Internal links -- External links -- compression label -""" - if 'link' in data: + Several cases can be encoutered: + - Data to slice and place in virtual datasets + - Concatenate dataset in one virtual dataset + - Internal links + - External links + - compression label""" + if "link" in data: file, path = split_link(data, output_path) # generate virtual datasets from slices - if 'shape' in data.keys(): + if "shape" in data.keys(): layout = handle_shape_entries(data, file, path) grp.create_virtual_dataset(entry_name, layout) # multiple datasets to concatenate - elif 'link' in data.keys() and isinstance(data['link'], list): + elif "link" in data.keys() and isinstance(data["link"], list): total_length = 0 sources = [] for index, source_file in enumerate(file): - vsource = h5py.VirtualSource(source_file, - path[index], - shape=h5py.File(source_file, 'r')[path[index]].shape) + vsource = h5py.VirtualSource( + source_file, + path[index], + shape=h5py.File(source_file, "r")[path[index]].shape, + ) total_length += vsource.shape[0] sources.append(vsource) layout = h5py.VirtualLayout(shape=total_length, dtype=np.float64) offset = 0 for vsource in sources: - layout[offset:offset + vsource.shape[0]] = vsource + layout[offset : offset + vsource.shape[0]] = vsource offset += vsource.shape[0] grp.create_virtual_dataset(entry_name, layout, fillvalue=0) # internal and external links - elif 'link' in data.keys(): - if ':/' not in data['link']: + elif "link" in data.keys(): + if ":/" not in data["link"]: grp[entry_name] = h5py.SoftLink(path) # internal link else: grp[entry_name] = h5py.ExternalLink(file, path) # external link - elif 'compress' in data.keys(): + elif "compress" in data.keys(): if not (isinstance(data["compress"], str) or np.isscalar(data["compress"])): strength = 9 # strongest compression is space efficient but can take long - accept = ("strength" in data.keys()) \ - and (isinstance(data['strength'], int)) \ - and (data["strength"] >= 0) \ + accept = ( + ("strength" in data.keys()) + and (isinstance(data["strength"], int)) + and (data["strength"] >= 0) and (data["strength"] <= 9) + ) if accept is True: strength = data["strength"] - grp.create_dataset(entry_name, - data=data["compress"], - compression="gzip", - chunks=True, - compression_opts=strength) + grp.create_dataset( + entry_name, + data=data["compress"], + compression="gzip", + chunks=True, + compression_opts=strength, + ) else: grp.create_dataset(entry_name, data=data["compress"]) else: - raise InvalidDictProvided("A dictionary was provided to the template but it didn't" - " fall into any of the know cases of handling" - " dictionaries. This occured for: " + entry_name) + raise InvalidDictProvided( + "A dictionary was provided to the template but it didn't" + " fall into any of the know cases of handling" + " dictionaries. This occured for: " + entry_name + ) # Check whether link has been stabilished or not try: return grp[entry_name] @@ -190,9 +199,9 @@ class Writer: nxs_namespace (str): The namespace used in the NXDL tags. Helps search for XML children. """ - def __init__(self, data: dict = None, - nxdl_f_path: str = None, - output_path: str = None): + def __init__( + self, data: dict = None, nxdl_f_path: str = None, output_path: str = None + ): """Constructs the necessary objects required by the Writer class.""" self.data = data self.nxdl_f_path = nxdl_f_path @@ -201,7 +210,7 @@ def __init__(self, data: dict = None, self.nxdl_data = ET.parse(self.nxdl_f_path).getroot() self.nxs_namespace = get_namespace(self.nxdl_data) - def __nxdl_to_attrs(self, path: str = '/') -> dict: + def __nxdl_to_attrs(self, path: str = "/") -> dict: """ Return a dictionary of all the attributes at the given path in the NXDL and the required attribute values that were requested in the NXDL from the data. @@ -212,7 +221,8 @@ def __nxdl_to_attrs(self, path: str = '/') -> dict: try: elem = nexus.get_node_at_nxdl_path( - nxdl_path, elem=copy.deepcopy(self.nxdl_data)) + nxdl_path, elem=copy.deepcopy(self.nxdl_data) + ) except nexus.NxdlAttributeError: return None @@ -224,13 +234,13 @@ def __nxdl_to_attrs(self, path: str = '/') -> dict: for attr_name in elem.findall(f"{self.nxs_namespace}attribute"): key = f"{path}/@{attr_name.get('name')}" if key in self.data: - elem.attrib[attr_name.get('name')] = self.data[key] + elem.attrib[attr_name.get("name")] = self.data[key] return elem.attrib def ensure_and_get_parent_node(self, path: str, undocumented_paths) -> h5py.Group: """Returns the parent if it exists for a given path else creates the parent group.""" - parent_path = path[0:path.rindex('/')] or '/' + parent_path = path[0 : path.rindex("/")] or "/" parent_path_hdf5 = helpers.convert_data_dict_path_to_hdf5_path(parent_path) if not does_path_exist(parent_path, self.output_nexus): parent = self.ensure_and_get_parent_node(parent_path, undocumented_paths) @@ -239,7 +249,7 @@ def ensure_and_get_parent_node(self, path: str, undocumented_paths) -> h5py.Grou attrs = self.__nxdl_to_attrs(parent_path) if attrs is not None: - grp.attrs['NX_class'] = attrs["type"] + grp.attrs["NX_class"] = attrs["type"] return grp return self.output_nexus[parent_path_hdf5] @@ -255,31 +265,39 @@ def add_units_key(dataset, path): for path, value in self.data.items(): try: - if path[path.rindex('/') + 1:] == '@units': + if path[path.rindex("/") + 1 :] == "@units": continue - entry_name = helpers.get_name_from_data_dict_entry(path[path.rindex('/') + 1:]) + entry_name = helpers.get_name_from_data_dict_entry( + path[path.rindex("/") + 1 :] + ) if is_not_data_empty(value): data = value else: continue if entry_name[0] != "@": - grp = self.ensure_and_get_parent_node(path, self.data.undocumented.keys()) + grp = self.ensure_and_get_parent_node( + path, self.data.undocumented.keys() + ) if isinstance(data, dict): if "compress" in data.keys(): - dataset = handle_dicts_entries(data, grp, entry_name, - self.output_path, path) + dataset = handle_dicts_entries( + data, grp, entry_name, self.output_path, path + ) else: - hdf5_links_for_later.append([data, grp, entry_name, - self.output_path, path]) + hdf5_links_for_later.append( + [data, grp, entry_name, self.output_path, path] + ) else: dataset = grp.create_dataset(entry_name, data=data) except InvalidDictProvided as exc: print(str(exc)) except Exception as exc: - raise IOError(f"Unknown error occured writing the path: {path} " - f"with the following message: {str(exc)}") from exc + raise IOError( + f"Unknown error occured writing the path: {path} " + f"with the following message: {str(exc)}" + ) from exc for links in hdf5_links_for_later: dataset = handle_dicts_entries(*links) @@ -289,10 +307,12 @@ def add_units_key(dataset, path): for path, value in self.data.items(): try: - if path[path.rindex('/') + 1:] == '@units': + if path[path.rindex("/") + 1 :] == "@units": continue - entry_name = helpers.get_name_from_data_dict_entry(path[path.rindex('/') + 1:]) + entry_name = helpers.get_name_from_data_dict_entry( + path[path.rindex("/") + 1 :] + ) if is_not_data_empty(value): data = value else: @@ -304,11 +324,15 @@ def add_units_key(dataset, path): add_units_key(self.output_nexus[path_hdf5], path) else: # consider changing the name here the lvalue can also be group! - dataset = self.ensure_and_get_parent_node(path, self.data.undocumented.keys()) + dataset = self.ensure_and_get_parent_node( + path, self.data.undocumented.keys() + ) dataset.attrs[entry_name[1:]] = data except Exception as exc: - raise IOError(f"Unknown error occured writing the path: {path} " - f"with the following message: {str(exc)}") from exc + raise IOError( + f"Unknown error occured writing the path: {path} " + f"with the following message: {str(exc)}" + ) from exc def write(self): """Writes the NeXus file with previously validated data from the reader with NXDL attrs.""" diff --git a/pynxtools/eln_mapper/eln.py b/pynxtools/eln_mapper/eln.py index 078dd4d18..2ac11cfb9 100755 --- a/pynxtools/eln_mapper/eln.py +++ b/pynxtools/eln_mapper/eln.py @@ -50,26 +50,26 @@ def retrieve_nxdl_file(nexus_def: str) -> str: """ definition_path = get_nexus_definitions_path() - def_path = os.path.join(definition_path, - 'contributed_definitions', - f"{nexus_def}.nxdl.xml") + def_path = os.path.join( + definition_path, "contributed_definitions", f"{nexus_def}.nxdl.xml" + ) if os.path.exists(def_path): return def_path - def_path = os.path.join(definition_path, - 'base_definitions', - f"{nexus_def}.nxdl.xml") + def_path = os.path.join( + definition_path, "base_definitions", f"{nexus_def}.nxdl.xml" + ) if os.path.exists(def_path): return def_path - def_path = os.path.join(definition_path, - 'applications', - f"{nexus_def}.nxdl.xml") + def_path = os.path.join(definition_path, "applications", f"{nexus_def}.nxdl.xml") if os.path.exists(def_path): return def_path - raise ValueError("Incorrect definition is rendered, try with correct definition name.") + raise ValueError( + "Incorrect definition is rendered, try with correct definition name." + ) def get_empty_template(nexus_def: str) -> Template: @@ -95,17 +95,17 @@ def get_empty_template(nexus_def: str) -> Template: def take_care_of_special_concepts(key: str): """For some special concepts such as @units.""" + def unit_concept(): - return {'value': None, - 'unit': None} + return {"value": None, "unit": None} - if key == '@units': + if key == "@units": return unit_concept() -def get_recursive_dict(concatenated_key: str, - recursive_dict: Dict[str, Any], - level_to_skip: int) -> None: +def get_recursive_dict( + concatenated_key: str, recursive_dict: Dict[str, Any], level_to_skip: int +) -> None: """Get recursive dict for concatenated string of keys. Parameters @@ -119,14 +119,17 @@ def get_recursive_dict(concatenated_key: str, """ # splitig keys like: '/entry[ENTRY]/position[POSITION]/xx'. # skiping the first empty '' and top parts as directed by users. - key_li = concatenated_key.split('/')[level_to_skip + 1:] + key_li = concatenated_key.split("/")[level_to_skip + 1 :] # list of key for special consideration - sp_key_li = ['@units'] + sp_key_li = ["@units"] last_key = "" last_dict = {} for key in key_li: - if '[' in key and '/' not in key: - key = re.findall(r'\[(.*?)\]', key,)[0].capitalize() + if "[" in key and "/" not in key: + key = re.findall( + r"\[(.*?)\]", + key, + )[0].capitalize() if not key: continue last_key = key @@ -152,7 +155,7 @@ def get_recursive_dict(concatenated_key: str, last_dict[last_key] = None -def generate_eln(nexus_def: str, eln_file: str = '', level_to_skip: int = 1) -> None: +def generate_eln(nexus_def: str, eln_file: str = "", level_to_skip: int = 1) -> None: """Genrate eln from application definition. Parameters @@ -171,19 +174,21 @@ def generate_eln(nexus_def: str, eln_file: str = '', level_to_skip: int = 1) -> for key, _ in template.items(): get_recursive_dict(key, recursive_dict, level_to_skip) - name_split = eln_file.rsplit('.') + name_split = eln_file.rsplit(".") if not eln_file: - if nexus_def[0:2] == 'NX': + if nexus_def[0:2] == "NX": raw_name = nexus_def[2:] - eln_file = raw_name + '.yaml' + eln_file = raw_name + ".yaml" elif len(name_split) == 1: - eln_file = eln_file + '.yaml' + eln_file = eln_file + ".yaml" - elif len(name_split) == 2 and name_split[1] == 'yaml': + elif len(name_split) == 2 and name_split[1] == "yaml": pass else: - raise ValueError("Eln file should come with 'yaml' extension or without extension.") + raise ValueError( + "Eln file should come with 'yaml' extension or without extension." + ) - with open(eln_file, encoding='utf-8', mode='w') as eln_f: + with open(eln_file, encoding="utf-8", mode="w") as eln_f: yaml.dump(recursive_dict, sort_keys=False, stream=eln_f) diff --git a/pynxtools/eln_mapper/eln_mapper.py b/pynxtools/eln_mapper/eln_mapper.py index d23918f73..b1b3cb5d0 100755 --- a/pynxtools/eln_mapper/eln_mapper.py +++ b/pynxtools/eln_mapper/eln_mapper.py @@ -23,35 +23,34 @@ @click.command() @click.option( - '--nxdl', + "--nxdl", required=True, - help="Name of NeXus definition without extension (.nxdl.xml)." + help="Name of NeXus definition without extension (.nxdl.xml).", ) @click.option( - '--skip-top-levels', + "--skip-top-levels", default=1, required=False, type=int, show_default=True, - help=("To skip the level of parent hierarchy level. E.g. for default 1 the part" - "Entry[ENTRY] from /Entry[ENTRY]/Instrument[INSTRUMENT]/... will be skiped.") + help=( + "To skip the level of parent hierarchy level. E.g. for default 1 the part" + "Entry[ENTRY] from /Entry[ENTRY]/Instrument[INSTRUMENT]/... will be skiped." + ), ) @click.option( - '--output-file', + "--output-file", required=False, - default='eln_data', - help=('Name of file that is neede to generated output file.') + default="eln_data", + help=("Name of file that is neede to generated output file."), ) @click.option( - '--eln-type', + "--eln-type", required=True, - type=click.Choice(['eln', 'scheme_eln'], case_sensitive=False), - default='eln' + type=click.Choice(["eln", "scheme_eln"], case_sensitive=False), + default="eln", ) -def get_eln(nxdl: str, - skip_top_levels: int, - output_file: str, - eln_type: str): +def get_eln(nxdl: str, skip_top_levels: int, output_file: str, eln_type: str): """To generate ELN in yaml file format. Parameters @@ -65,9 +64,9 @@ def get_eln(nxdl: str, Name of the output file. """ eln_type = eln_type.lower() - if eln_type == 'eln': + if eln_type == "eln": generate_eln(nxdl, output_file, skip_top_levels) - elif eln_type == 'scheme_eln': + elif eln_type == "scheme_eln": generate_scheme_eln(nxdl, eln_file_name=output_file) diff --git a/pynxtools/eln_mapper/scheme_eln.py b/pynxtools/eln_mapper/scheme_eln.py index 1152bbd08..029e67550 100644 --- a/pynxtools/eln_mapper/scheme_eln.py +++ b/pynxtools/eln_mapper/scheme_eln.py @@ -24,28 +24,43 @@ from pynxtools.dataconverter.helpers import remove_namespace_from_tag -NEXUS_TYPE_TO_NUMPY_TYPE = {'NX_CHAR': {'convert_typ': 'str', - 'component_nm': 'StringEditQuantity', - 'default_unit_display': ''}, - 'NX_BOOLEAN': {'convert_typ': 'bool', - 'component_nm': 'BoolEditQuantity', - 'default_unit_display': ''}, - 'NX_DATE_TIME': {'convert_typ': 'Datetime', - 'component_nm': 'DateTimeEditQuantity', - 'default_unit_display': ''}, - 'NX_FLOAT': {'convert_typ': 'np.float64', - 'component_nm': 'NumberEditQuantity', - 'default_unit_display': ''}, - 'NX_INT': {'convert_typ': 'int', - 'component_nm': 'NumberEditQuantity', - 'default_unit_display': ''}, - 'NX_NUMBER': {'convert_typ': 'np.float64', - 'component_nm': 'NumberEditQuantity', - 'default_unit_display': ''}, - '': {'convert_typ': '', - 'component_nm': '', - 'default_unit_display': ''}, - } +NEXUS_TYPE_TO_NUMPY_TYPE = { + "NX_CHAR": { + "convert_typ": "str", + "component_nm": "StringEditQuantity", + "default_unit_display": "", + }, + "NX_BOOLEAN": { + "convert_typ": "bool", + "component_nm": "BoolEditQuantity", + "default_unit_display": "", + }, + "NX_DATE_TIME": { + "convert_typ": "Datetime", + "component_nm": "DateTimeEditQuantity", + "default_unit_display": "", + }, + "NX_FLOAT": { + "convert_typ": "np.float64", + "component_nm": "NumberEditQuantity", + "default_unit_display": "", + }, + "NX_INT": { + "convert_typ": "int", + "component_nm": "NumberEditQuantity", + "default_unit_display": "", + }, + "NX_NUMBER": { + "convert_typ": "np.float64", + "component_nm": "NumberEditQuantity", + "default_unit_display": "", + }, + "": { + "convert_typ": "", + "component_nm": "", + "default_unit_display": "", + }, +} def construct_field_structure(fld_elem, quntities_dict): @@ -58,31 +73,35 @@ def construct_field_structure(fld_elem, quntities_dict): _description_ """ elm_attr = fld_elem.attrib - fld_nm = elm_attr['name'].lower() + fld_nm = elm_attr["name"].lower() quntities_dict[fld_nm] = {} fld_dict = quntities_dict[fld_nm] # handle type - if 'type' in elm_attr: - nx_fld_typ = elm_attr['type'] + if "type" in elm_attr: + nx_fld_typ = elm_attr["type"] else: - nx_fld_typ = 'NX_CHAR' + nx_fld_typ = "NX_CHAR" if nx_fld_typ in NEXUS_TYPE_TO_NUMPY_TYPE: - cov_fld_typ = NEXUS_TYPE_TO_NUMPY_TYPE[nx_fld_typ]['convert_typ'] + cov_fld_typ = NEXUS_TYPE_TO_NUMPY_TYPE[nx_fld_typ]["convert_typ"] - fld_dict['type'] = cov_fld_typ - if 'units' in elm_attr: - fld_dict['unit'] = f"" - fld_dict['value'] = "" + fld_dict["type"] = cov_fld_typ + if "units" in elm_attr: + fld_dict["unit"] = f"" + fld_dict["value"] = "" # handle m_annotation - m_annotation = {'m_annotations': {'eln': - {'component': - NEXUS_TYPE_TO_NUMPY_TYPE[nx_fld_typ]['component_nm'], - 'defaultDisplayUnit': - (NEXUS_TYPE_TO_NUMPY_TYPE[nx_fld_typ] - ['default_unit_display'])}}} + m_annotation = { + "m_annotations": { + "eln": { + "component": NEXUS_TYPE_TO_NUMPY_TYPE[nx_fld_typ]["component_nm"], + "defaultDisplayUnit": ( + NEXUS_TYPE_TO_NUMPY_TYPE[nx_fld_typ]["default_unit_display"] + ), + } + } + } fld_dict.update(m_annotation) # handle description @@ -90,17 +109,16 @@ def construct_field_structure(fld_elem, quntities_dict): def construct_decription(elm: ET.Element, concept_dict: Dict) -> None: - """Collect doc from concept doc. - """ - desc_text = '' + """Collect doc from concept doc.""" + desc_text = "" for child_elm in elm: tag = remove_namespace_from_tag(child_elm.tag) - if tag == 'doc': + if tag == "doc": desc_text = child_elm.text - desc_text = ' '.join([x.strip() for x in desc_text.split('\n')]) + desc_text = " ".join([x.strip() for x in desc_text.split("\n")]) break - concept_dict['description'] = desc_text + concept_dict["description"] = desc_text def construct_group_structure(grp_elm: ET.Element, subsections: Dict) -> None: @@ -119,21 +137,21 @@ def construct_group_structure(grp_elm: ET.Element, subsections: Dict) -> None: Dict to include group recursively """ - default_m_annot = {'m_annotations': {'eln': {'overview': True}}} + default_m_annot = {"m_annotations": {"eln": {"overview": True}}} elm_attrib = grp_elm.attrib grp_desig = "" - if 'name' in elm_attrib: - grp_desig = elm_attrib['name'].capitalize() - elif 'type' in elm_attrib: - grp_desig = elm_attrib['type'][2:].capitalize() + if "name" in elm_attrib: + grp_desig = elm_attrib["name"].capitalize() + elif "type" in elm_attrib: + grp_desig = elm_attrib["type"][2:].capitalize() subsections[grp_desig] = {} grp_dict = subsections[grp_desig] # add setion in group - grp_dict['section'] = {} - section = grp_dict['section'] + grp_dict["section"] = {} + section = grp_dict["section"] section.update(default_m_annot) # pass the grp elment for recursive search @@ -149,19 +167,21 @@ def _should_skip_iteration(elm: ET.Element) -> bool: The element to investigate to skip """ attr = elm.attrib - elm_type = '' - if 'type' in attr: - elm_type = attr['type'] - if elm_type in ['NXentry']: + elm_type = "" + if "type" in attr: + elm_type = attr["type"] + if elm_type in ["NXentry"]: return True return False -def scan_xml_element_recursively(nxdl_element: ET.Element, - recursive_dict: Dict, - root_name: str = "", - reader_name: str = '', - is_root: bool = False) -> None: +def scan_xml_element_recursively( + nxdl_element: ET.Element, + recursive_dict: Dict, + root_name: str = "", + reader_name: str = "", + is_root: bool = False, +) -> None: """Scan xml elements, and pass the element to the type of element handaler. Parameters @@ -180,14 +200,20 @@ def scan_xml_element_recursively(nxdl_element: ET.Element, if is_root: # Note for later: crate a new function to handle root part - nxdl = 'NX.nxdl' - recursive_dict[root_name] = {'base_sections': - ['nomad.datamodel.metainfo.eln.NexusDataConverter', - 'nomad.datamodel.data.EntryData']} - - m_annotations: Dict = {'m_annotations': {'template': {'reader': reader_name, - 'nxdl': nxdl}, - 'eln': {'hide': []}}} + nxdl = "NX.nxdl" + recursive_dict[root_name] = { + "base_sections": [ + "nomad.datamodel.metainfo.eln.NexusDataConverter", + "nomad.datamodel.data.EntryData", + ] + } + + m_annotations: Dict = { + "m_annotations": { + "template": {"reader": reader_name, "nxdl": nxdl}, + "eln": {"hide": []}, + } + } recursive_dict[root_name].update(m_annotations) @@ -202,15 +228,15 @@ def scan_xml_element_recursively(nxdl_element: ET.Element, if _should_skip_iteration(elm): scan_xml_element_recursively(elm, recursive_dict) continue - if tag == 'field': + if tag == "field": if quantities is None: - recursive_dict['quantities'] = {} - quantities = recursive_dict['quantities'] + recursive_dict["quantities"] = {} + quantities = recursive_dict["quantities"] construct_field_structure(elm, quantities) - if tag == 'group': + if tag == "group": if subsections is None: - recursive_dict['sub_sections'] = {} - subsections = recursive_dict['sub_sections'] + recursive_dict["sub_sections"] = {} + subsections = recursive_dict["sub_sections"] construct_group_structure(elm, subsections) @@ -226,13 +252,13 @@ def get_eln_recursive_dict(recursive_dict: Dict, nexus_full_file: str) -> None: """ nxdl_root = ET.parse(nexus_full_file).getroot() - root_name = nxdl_root.attrib['name'][2:] if 'name' in nxdl_root.attrib else "" - recursive_dict['definitions'] = {'name': '', - 'sections': {}} - sections = recursive_dict['definitions']['sections'] + root_name = ( + nxdl_root.attrib["name"][2:] if "name" in nxdl_root.attrib else "" + ) + recursive_dict["definitions"] = {"name": "", "sections": {}} + sections = recursive_dict["definitions"]["sections"] - scan_xml_element_recursively(nxdl_root, sections, - root_name=root_name, is_root=True) + scan_xml_element_recursively(nxdl_root, sections, root_name=root_name, is_root=True) def generate_scheme_eln(nexus_def: str, eln_file_name: str = None) -> None: @@ -251,7 +277,7 @@ def generate_scheme_eln(nexus_def: str, eln_file_name: str = None) -> None: """ file_parts: list = [] - out_file_ext = 'scheme.archive.yaml' + out_file_ext = "scheme.archive.yaml" raw_name = "" out_file = "" @@ -259,23 +285,23 @@ def generate_scheme_eln(nexus_def: str, eln_file_name: str = None) -> None: if eln_file_name is None: # raw_name from e.g. //NXmpes.nxdl.xml - raw_name = nxdl_file.split('/')[-1].split('.')[0][2:] - out_file = '.'.join([raw_name, out_file_ext]) + raw_name = nxdl_file.split("/")[-1].split(".")[0][2:] + out_file = ".".join([raw_name, out_file_ext]) else: - file_parts = eln_file_name.split('.') + file_parts = eln_file_name.split(".") if len(file_parts) == 1: raw_name = file_parts[0] - out_file = '.'.join([raw_name, out_file_ext]) - elif len(file_parts) == 4 and '.'.join(file_parts[1:]) == out_file_ext: + out_file = ".".join([raw_name, out_file_ext]) + elif len(file_parts) == 4 and ".".join(file_parts[1:]) == out_file_ext: out_file = eln_file_name - elif nexus_def[0:2] == 'NX': + elif nexus_def[0:2] == "NX": raw_name = nexus_def[2:] - out_file = '.'.join([raw_name, out_file_ext]) + out_file = ".".join([raw_name, out_file_ext]) else: raise ValueError("Check for correct NeXus definition and output file name.") recursive_dict: Dict[str, Any] = {} get_eln_recursive_dict(recursive_dict, nxdl_file) - with open(out_file, mode='w', encoding='utf-8') as out_f: + with open(out_file, mode="w", encoding="utf-8") as out_f: yaml.dump(recursive_dict, sort_keys=False, stream=out_f) diff --git a/pynxtools/nexus/nexus.py b/pynxtools/nexus/nexus.py index ef5f64cd5..7da178b72 100644 --- a/pynxtools/nexus/nexus.py +++ b/pynxtools/nexus/nexus.py @@ -15,14 +15,16 @@ def get_nxdl_entry(hdf_info): """Get the nxdl application definition for an HDF5 node""" entry = hdf_info - while isinstance(entry['hdf_node'], h5py.Dataset) or \ - 'NX_class' not in entry['hdf_node'].attrs.keys() or \ - entry['hdf_node'].attrs['NX_class'] != 'NXentry': + while ( + isinstance(entry["hdf_node"], h5py.Dataset) + or "NX_class" not in entry["hdf_node"].attrs.keys() + or entry["hdf_node"].attrs["NX_class"] != "NXentry" + ): entry = get_hdf_info_parent(entry) - if entry['hdf_node'].name == '/': - return 'NO NXentry found' + if entry["hdf_node"].name == "/": + return "NO NXentry found" try: - nxdef = entry['hdf_node']['definition'][()] + nxdef = entry["hdf_node"]["definition"][()] return nxdef.decode() except KeyError: # 'NO Definition referenced' return "NXentry" @@ -30,56 +32,69 @@ def get_nxdl_entry(hdf_info): def get_nx_class_path(hdf_info): """Get the full path of an HDF5 node using nexus classes -in case of a field, end with the field name""" - hdf_node = hdf_info['hdf_node'] - if hdf_node.name == '/': - return '' + in case of a field, end with the field name""" + hdf_node = hdf_info["hdf_node"] + if hdf_node.name == "/": + return "" if isinstance(hdf_node, h5py.Group): - return get_nx_class_path(get_hdf_info_parent(hdf_info)) + '/' + \ - (hdf_node.attrs['NX_class'] if 'NX_class' in hdf_node.attrs.keys() else - hdf_node.name.split('/')[-1]) + return ( + get_nx_class_path(get_hdf_info_parent(hdf_info)) + + "/" + + ( + hdf_node.attrs["NX_class"] + if "NX_class" in hdf_node.attrs.keys() + else hdf_node.name.split("/")[-1] + ) + ) if isinstance(hdf_node, h5py.Dataset): - return get_nx_class_path( - get_hdf_info_parent(hdf_info)) + '/' + hdf_node.name.split('/')[-1] - return '' + return ( + get_nx_class_path(get_hdf_info_parent(hdf_info)) + + "/" + + hdf_node.name.split("/")[-1] + ) + return "" def chk_nxdataaxis_v2(hdf_node, name, logger): """Check if dataset is an axis""" - own_signal = hdf_node.attrs.get('signal') # check for being a Signal + own_signal = hdf_node.attrs.get("signal") # check for being a Signal if own_signal is str and own_signal == "1": logger.debug("Dataset referenced (v2) as NXdata SIGNAL") - own_axes = hdf_node.attrs.get('axes') # check for being an axis + own_axes = hdf_node.attrs.get("axes") # check for being an axis if own_axes is str: - axes = own_axes.split(':') + axes = own_axes.split(":") for i in len(axes): if axes[i] and name == axes[i]: logger.debug("Dataset referenced (v2) as NXdata AXIS #%d", i) return None - ownpaxis = hdf_node.attrs.get('primary') - own_axis = hdf_node.attrs.get('axis') + ownpaxis = hdf_node.attrs.get("primary") + own_axis = hdf_node.attrs.get("axis") if own_axis is int: # also convention v1 if ownpaxis is int and ownpaxis == 1: logger.debug("Dataset referenced (v2) as NXdata AXIS #%d", own_axis - 1) else: logger.debug( - "Dataset referenced (v2) as NXdata (primary/alternative) AXIS #%d", own_axis - 1) + "Dataset referenced (v2) as NXdata (primary/alternative) AXIS #%d", + own_axis - 1, + ) return None def chk_nxdataaxis(hdf_node, name, logger): """NEXUS Data Plotting Standard v3: new version from 2014""" - if not isinstance(hdf_node, h5py.Dataset): # check if it is a field in an NXdata node + if not isinstance( + hdf_node, h5py.Dataset + ): # check if it is a field in an NXdata node return None parent = hdf_node.parent - if not parent or (parent and not parent.attrs.get('NX_class') == "NXdata"): + if not parent or (parent and not parent.attrs.get("NX_class") == "NXdata"): return None - signal = parent.attrs.get('signal') # chk for Signal + signal = parent.attrs.get("signal") # chk for Signal if signal and name == signal: logger.debug("Dataset referenced as NXdata SIGNAL") return None - axes = parent.attrs.get('axes') # check for default Axes + axes = parent.attrs.get("axes") # check for default Axes if axes is str: if name == axes: logger.debug("Dataset referenced as NXdata AXIS") @@ -87,37 +102,39 @@ def chk_nxdataaxis(hdf_node, name, logger): elif axes is not None: for i, j in enumerate(axes): if name == j: - indices = parent.attrs.get(j + '_indices') + indices = parent.attrs.get(j + "_indices") if indices is int: logger.debug(f"Dataset referenced as NXdata AXIS #{indices}") else: logger.debug(f"Dataset referenced as NXdata AXIS #{i}") return None - indices = parent.attrs.get(name + '_indices') # check for alternative Axes + indices = parent.attrs.get(name + "_indices") # check for alternative Axes if indices is int: logger.debug(f"Dataset referenced as NXdata alternative AXIS #{indices}") return chk_nxdataaxis_v2(hdf_node, name, logger) # check for older conventions def check_deprecation_enum_axis(variables, doc, elist, attr, hdf_node): - """Check for several attributes. - deprecation - enums - nxdataaxis """ + """Check for several attributes. - deprecation - enums - nxdataaxis""" logger, elem, path = variables - dep_str = elem.attrib.get('deprecated') # check for deprecation + dep_str = elem.attrib.get("deprecated") # check for deprecation if dep_str: if doc: logger.debug("DEPRECATED - " + dep_str) for base_elem in elist if not attr else [elem]: # check for enums - sdoc = get_nxdl_child(base_elem, 'enumeration', go_base=False) + sdoc = get_nxdl_child(base_elem, "enumeration", go_base=False) if sdoc is not None: if doc: logger.debug("enumeration (" + get_node_concept_path(base_elem) + "):") for item in sdoc: - if get_local_name_from_xml(item) == 'item': + if get_local_name_from_xml(item) == "item": if doc: - logger.debug("-> " + item.attrib['value']) - chk_nxdataaxis(hdf_node, path.split('/')[-1], logger) # look for NXdata reference (axes/signal) + logger.debug("-> " + item.attrib["value"]) + chk_nxdataaxis( + hdf_node, path.split("/")[-1], logger + ) # look for NXdata reference (axes/signal) for base_elem in elist if not attr else [elem]: # check for doc - sdoc = get_nxdl_child(base_elem, 'doc', go_base=False) + sdoc = get_nxdl_child(base_elem, "doc", go_base=False) if doc: logger.debug("documentation (" + get_node_concept_path(base_elem) + "):") logger.debug(sdoc.text if sdoc is not None else "") @@ -125,7 +142,8 @@ def check_deprecation_enum_axis(variables, doc, elist, attr, hdf_node): def get_nxdl_attr_doc( # pylint: disable=too-many-arguments,too-many-locals - elem, elist, attr, hdf_node, logger, doc, nxdl_path, req_str, path, hdf_info): + elem, elist, attr, hdf_node, logger, doc, nxdl_path, req_str, path, hdf_info +): """Get nxdl documentation for an attribute""" new_elem = [] old_elem = elem @@ -133,43 +151,44 @@ def get_nxdl_attr_doc( # pylint: disable=too-many-arguments,too-many-locals act_elem = act_elem1 # NX_class is a compulsory attribute for groups in a nexus file # which should match the type of the corresponding NXDL element - if attr == 'NX_class' and not isinstance(hdf_node, h5py.Dataset) and elem_index == 0: + if ( + attr == "NX_class" + and not isinstance(hdf_node, h5py.Dataset) + and elem_index == 0 + ): elem = None logger, doc, attr = write_doc_string(logger, doc, attr) new_elem = elem break # units category is a compulsory attribute for any fields - if attr == 'units' and isinstance(hdf_node, h5py.Dataset): + if attr == "units" and isinstance(hdf_node, h5py.Dataset): req_str = "<>" - logger, act_elem, nxdl_path, doc, attr = try_find_units(logger, - act_elem, - nxdl_path, - doc, - attr) + logger, act_elem, nxdl_path, doc, attr = try_find_units( + logger, act_elem, nxdl_path, doc, attr + ) # units for attributes can be given as ATTRIBUTENAME_units - elif attr.endswith('_units'): - logger, act_elem, nxdl_path, doc, attr, req_str = check_attr_name_nxdl((logger, - act_elem, - nxdl_path, - doc, - attr, - req_str)) + elif attr.endswith("_units"): + logger, act_elem, nxdl_path, doc, attr, req_str = check_attr_name_nxdl( + (logger, act_elem, nxdl_path, doc, attr, req_str) + ) # default is allowed for groups - elif attr == 'default' and not isinstance(hdf_node, h5py.Dataset): + elif attr == "default" and not isinstance(hdf_node, h5py.Dataset): req_str = "<>" # try to find if default is defined as a child of the NXDL element - act_elem = get_nxdl_child(act_elem, attr, nexus_type='attribute', go_base=False) - logger, act_elem, nxdl_path, doc, attr = try_find_default(logger, - act_elem1, - act_elem, - nxdl_path, - doc, - attr) + act_elem = get_nxdl_child( + act_elem, attr, nexus_type="attribute", go_base=False + ) + logger, act_elem, nxdl_path, doc, attr = try_find_default( + logger, act_elem1, act_elem, nxdl_path, doc, attr + ) else: # other attributes - act_elem = get_nxdl_child(act_elem, attr, nexus_type='attribute', go_base=False) + act_elem = get_nxdl_child( + act_elem, attr, nexus_type="attribute", go_base=False + ) if act_elem is not None: - logger, act_elem, nxdl_path, doc, attr = \ - other_attrs(logger, act_elem1, act_elem, nxdl_path, doc, attr) + logger, act_elem, nxdl_path, doc, attr = other_attrs( + logger, act_elem1, act_elem, nxdl_path, doc, attr + ) if act_elem is not None: new_elem.append(act_elem) if req_str is None: @@ -177,14 +196,18 @@ def get_nxdl_attr_doc( # pylint: disable=too-many-arguments,too-many-locals if doc: logger.debug(req_str) variables = [logger, act_elem, path] - logger, elem, path, doc, elist, attr, hdf_node = check_deprecation_enum_axis(variables, - doc, - elist, - attr, - hdf_node) + ( + logger, + elem, + path, + doc, + elist, + attr, + hdf_node, + ) = check_deprecation_enum_axis(variables, doc, elist, attr, hdf_node) elem = old_elem if req_str is None and doc: - if attr != 'NX_class': + if attr != "NX_class": logger.debug("@" + attr + " - IS NOT IN SCHEMA") logger.debug("") return (req_str, get_nxdl_entry(hdf_info), nxdl_path) @@ -192,37 +215,40 @@ def get_nxdl_attr_doc( # pylint: disable=too-many-arguments,too-many-locals def get_nxdl_doc(hdf_info, logger, doc, attr=False): """Get nxdl documentation for an HDF5 node (or its attribute)""" - hdf_node = hdf_info['hdf_node'] + hdf_node = hdf_info["hdf_node"] # new way: retrieve multiple inherited base classes - (class_path, nxdl_path, elist) = \ - get_inherited_hdf_nodes(nx_name=get_nxdl_entry(hdf_info), hdf_node=hdf_node, - hdf_path=hdf_info['hdf_path'] if 'hdf_path' in hdf_info else None, - hdf_root=hdf_info['hdf_root'] if 'hdf_root' in hdf_info else None) + (class_path, nxdl_path, elist) = get_inherited_hdf_nodes( + nx_name=get_nxdl_entry(hdf_info), + hdf_node=hdf_node, + hdf_path=hdf_info["hdf_path"] if "hdf_path" in hdf_info else None, + hdf_root=hdf_info["hdf_root"] if "hdf_root" in hdf_info else None, + ) elem = elist[0] if class_path and elist else None if doc: logger.debug("classpath: " + str(class_path)) - logger.debug("NOT IN SCHEMA" if elem is None else - "classes:\n" + "\n".join - (get_node_concept_path(e) for e in elist)) + logger.debug( + "NOT IN SCHEMA" + if elem is None + else "classes:\n" + "\n".join(get_node_concept_path(e) for e in elist) + ) # old solution with a single elem instead of using elist path = get_nx_class_path(hdf_info) req_str = None if elem is None: if doc: logger.debug("") - return ('None', None, None) + return ("None", None, None) if attr: - return get_nxdl_attr_doc(elem, elist, attr, hdf_node, logger, doc, nxdl_path, - req_str, path, hdf_info) + return get_nxdl_attr_doc( + elem, elist, attr, hdf_node, logger, doc, nxdl_path, req_str, path, hdf_info + ) req_str = get_required_string(elem) # check for being required if doc: logger.debug(req_str) variables = [logger, elem, path] - logger, elem, path, doc, elist, attr, hdf_node = check_deprecation_enum_axis(variables, - doc, - elist, - attr, - hdf_node) + logger, elem, path, doc, elist, attr, hdf_node = check_deprecation_enum_axis( + variables, doc, elist, attr, hdf_node + ) return (req_str, get_nxdl_entry(hdf_info), nxdl_path) @@ -232,20 +258,18 @@ def helper_get_inherited_nodes(hdf_info2, elist, pind, attr): hdf_name = hdf_path[pind] hdf_class_name = hdf_class_path[pind] if pind < len(hdf_path) - (2 if attr else 1): - act_nexus_type = 'group' + act_nexus_type = "group" elif pind == len(hdf_path) - 1 and attr: - act_nexus_type = 'attribute' + act_nexus_type = "attribute" else: - act_nexus_type = 'field' if isinstance(hdf_node, h5py.Dataset) else 'group' + act_nexus_type = "field" if isinstance(hdf_node, h5py.Dataset) else "group" # find the best fitting name in all children bestfit = -1 html_name = None for ind in range(len(elist) - 1, -1, -1): - newelem, fit = get_best_child(elist[ind], - hdf_node, - hdf_name, - hdf_class_name, - act_nexus_type) + newelem, fit = get_best_child( + elist[ind], hdf_node, hdf_name, hdf_class_name, act_nexus_type + ) if fit >= bestfit and newelem is not None: html_name = get_node_name(newelem) return hdf_path, hdf_node, hdf_class_path, elist, pind, attr, html_name @@ -253,32 +277,38 @@ def helper_get_inherited_nodes(hdf_info2, elist, pind, attr): def get_hdf_path(hdf_info): """Get the hdf_path from an hdf_info""" - if 'hdf_path' in hdf_info: - return hdf_info['hdf_path'].split('/')[1:] - return hdf_info['hdf_node'].name.split('/')[1:] + if "hdf_path" in hdf_info: + return hdf_info["hdf_path"].split("/")[1:] + return hdf_info["hdf_node"].name.split("/")[1:] # pylint: disable=too-many-arguments,too-many-locals @lru_cache(maxsize=None) -def get_inherited_hdf_nodes(nx_name: str = None, elem: ET.Element = None, - hdf_node=None, hdf_path=None, hdf_root=None, attr=False): +def get_inherited_hdf_nodes( + nx_name: str = None, + elem: ET.Element = None, + hdf_node=None, + hdf_path=None, + hdf_root=None, + attr=False, +): """Returns a list of ET.Element for the given path.""" # let us start with the given definition file if hdf_node is None: - raise ValueError('hdf_node must not be None') + raise ValueError("hdf_node must not be None") elist = [] # type: ignore[var-annotated] add_base_classes(elist, nx_name, elem) nxdl_elem_path = [elist[0]] class_path = [] # type: ignore[var-annotated] - hdf_info = {'hdf_node': hdf_node} + hdf_info = {"hdf_node": hdf_node} if hdf_path: - hdf_info['hdf_path'] = hdf_path + hdf_info["hdf_path"] = hdf_path if hdf_root: - hdf_root['hdf_root'] = hdf_root - hdf_node = hdf_info['hdf_node'] + hdf_root["hdf_root"] = hdf_root + hdf_node = hdf_info["hdf_node"] hdf_path = get_hdf_path(hdf_info) - hdf_class_path = get_nx_class_path(hdf_info).split('/')[1:] + hdf_class_path = get_nx_class_path(hdf_info).split("/")[1:] if attr: hdf_path.append(attr) hdf_class_path.append(attr) @@ -286,9 +316,15 @@ def get_inherited_hdf_nodes(nx_name: str = None, elem: ET.Element = None, for pind in range(len(path)): hdf_info2 = [hdf_path, hdf_node, hdf_class_path] - [hdf_path, hdf_node, hdf_class_path, elist, - pind, attr, html_name] = helper_get_inherited_nodes(hdf_info2, elist, - pind, attr) + [ + hdf_path, + hdf_node, + hdf_class_path, + elist, + pind, + attr, + html_name, + ] = helper_get_inherited_nodes(hdf_info2, elist, pind, attr) if html_name is None: # return if NOT IN SCHEMA return (class_path, nxdl_elem_path, None) elist, html_name = walk_elist(elist, html_name) @@ -300,16 +336,19 @@ def get_inherited_hdf_nodes(nx_name: str = None, elem: ET.Element = None, def process_node(hdf_node, hdf_path, parser, logger, doc=True): """Processes an hdf5 node. -- it logs the node found and also checks for its attributes -- retrieves the corresponding nxdl documentation -TODO: -- follow variants -- NOMAD parser: store in NOMAD """ - hdf_info = {'hdf_path': hdf_path, 'hdf_node': hdf_node} + - it logs the node found and also checks for its attributes + - retrieves the corresponding nxdl documentation + TODO: + - follow variants + - NOMAD parser: store in NOMAD""" + hdf_info = {"hdf_path": hdf_path, "hdf_node": hdf_node} if isinstance(hdf_node, h5py.Dataset): - logger.debug(f'===== FIELD (/{hdf_path}): {hdf_node}') - val = str(hdf_node[()]).split('\n') if len(hdf_node.shape) <= 1 else str( - hdf_node[0]).split('\n') + logger.debug(f"===== FIELD (/{hdf_path}): {hdf_node}") + val = ( + str(hdf_node[()]).split("\n") + if len(hdf_node.shape) <= 1 + else str(hdf_node[0]).split("\n") + ) logger.debug(f'value: {val[0]} {"..." if len(val) > 1 else ""}') else: logger.debug( @@ -319,46 +358,54 @@ def process_node(hdf_node, hdf_path, parser, logger, doc=True): ) (req_str, nxdef, nxdl_path) = get_nxdl_doc(hdf_info, logger, doc) if parser is not None and isinstance(hdf_node, h5py.Dataset): - parser({"hdf_info": hdf_info, + parser( + { + "hdf_info": hdf_info, "nxdef": nxdef, "nxdl_path": nxdl_path, "val": val, - "logger": logger}) + "logger": logger, + } + ) for key, value in hdf_node.attrs.items(): - logger.debug(f'===== ATTRS (/{hdf_path}@{key})') - val = str(value).split('\n') + logger.debug(f"===== ATTRS (/{hdf_path}@{key})") + val = str(value).split("\n") logger.debug(f'value: {val[0]} {"..." if len(val) > 1 else ""}') - (req_str, nxdef, nxdl_path) = \ - get_nxdl_doc(hdf_info, logger, doc, attr=key) + (req_str, nxdef, nxdl_path) = get_nxdl_doc(hdf_info, logger, doc, attr=key) if ( parser is not None and req_str is not None - and 'NOT IN SCHEMA' not in req_str - and 'None' not in req_str + and "NOT IN SCHEMA" not in req_str + and "None" not in req_str ): - parser({"hdf_info": hdf_info, + parser( + { + "hdf_info": hdf_info, "nxdef": nxdef, "nxdl_path": nxdl_path, "val": val, - "logger": logger}, attr=key) + "logger": logger, + }, + attr=key, + ) def logger_auxiliary_signal(logger, nxdata): """Handle the presence of auxiliary signal""" - aux = nxdata.attrs.get('auxiliary_signals') + aux = nxdata.attrs.get("auxiliary_signals") if aux is not None: if isinstance(aux, str): aux = [aux] for asig in aux: - logger.debug(f'Further auxiliary signal has been identified: {asig}') + logger.debug(f"Further auxiliary signal has been identified: {asig}") return logger def print_default_plotable_header(logger): """Print a three-lines header""" - logger.debug('========================') - logger.debug('=== Default Plotable ===') - logger.debug('========================') + logger.debug("========================") + logger.debug("=== Default Plotable ===") + logger.debug("========================") def get_default_plotable(root, logger): @@ -376,10 +423,10 @@ def get_default_plotable(root, logger): if not nxentry: nxentry = entry_helper(root) if not nxentry: - logger.debug('No NXentry has been found') + logger.debug("No NXentry has been found") return - logger.debug('') - logger.debug('NXentry has been identified: ' + nxentry.name) + logger.debug("") + logger.debug("NXentry has been identified: " + nxentry.name) # nxdata nxdata = None nxgroup = nxentry @@ -395,10 +442,10 @@ def get_default_plotable(root, logger): else: nxdata = nxgroup if not nxdata: - logger.debug('No NXdata group has been found') + logger.debug("No NXdata group has been found") return - logger.debug('') - logger.debug('NXdata group has been identified: ' + nxdata.name) + logger.debug("") + logger.debug("NXdata group has been identified: " + nxdata.name) process_node(nxdata, nxdata.name, None, logger, False) # signal signal = None @@ -410,10 +457,10 @@ def get_default_plotable(root, logger): if not signal: signal = signal_helper(nxdata) if not signal: - logger.debug('No Signal has been found') + logger.debug("No Signal has been found") return - logger.debug('') - logger.debug('Signal has been identified: ' + signal.name) + logger.debug("") + logger.debug("Signal has been identified: " + signal.name) process_node(signal, signal.name, None, logger, False) logger = logger_auxiliary_signal(logger, nxdata) # check auxiliary_signals dim = len(signal.shape) @@ -425,8 +472,11 @@ def entry_helper(root): """Check entry related data""" nxentries = [] for key in root.keys(): - if isinstance(root[key], h5py.Group) and root[key].attrs.get('NX_class') and \ - root[key].attrs['NX_class'] == "NXentry": + if ( + isinstance(root[key], h5py.Group) + and root[key].attrs.get("NX_class") + and root[key].attrs["NX_class"] == "NXentry" + ): nxentries.append(root[key]) if len(nxentries) >= 1: return nxentries[0] @@ -435,11 +485,14 @@ def entry_helper(root): def nxdata_helper(nxentry): """Check if nxentry hdf5 object has a NX_class and, if it contains NXdata, -return its value""" + return its value""" lnxdata = [] for key in nxentry.keys(): - if isinstance(nxentry[key], h5py.Group) and nxentry[key].attrs.get('NX_class') and \ - nxentry[key].attrs['NX_class'] == "NXdata": + if ( + isinstance(nxentry[key], h5py.Group) + and nxentry[key].attrs.get("NX_class") + and nxentry[key].attrs["NX_class"] == "NXdata" + ): lnxdata.append(nxentry[key]) if len(lnxdata) >= 1: return lnxdata[0] @@ -452,12 +505,17 @@ def signal_helper(nxdata): for key in nxdata.keys(): if isinstance(nxdata[key], h5py.Dataset): signals.append(nxdata[key]) - if len(signals) == 1: # v3: as there was no selection given, only 1 data field shall exists + if ( + len(signals) == 1 + ): # v3: as there was no selection given, only 1 data field shall exists return signals[0] if len(signals) > 1: # v2: select the one with an attribute signal="1" attribute for sig in signals: - if sig.attrs.get("signal") and sig.attrs.get("signal") is str and \ - sig.attrs.get("signal") == "1": + if ( + sig.attrs.get("signal") + and sig.attrs.get("signal") is str + and sig.attrs.get("signal") == "1" + ): return sig return None @@ -469,7 +527,7 @@ def find_attrib_axis_actual_dim_num(nxdata, a_item, ax_list): for key in nxdata.keys(): if isinstance(nxdata[key], h5py.Dataset): try: - if nxdata[key].attrs['axis'] == a_item + 1: + if nxdata[key].attrs["axis"] == a_item + 1: lax.append(nxdata[key]) except KeyError: pass @@ -478,7 +536,7 @@ def find_attrib_axis_actual_dim_num(nxdata, a_item, ax_list): # if there are more alternatives, prioritise the one with an attribute primary="1" elif len(lax) > 1: for sax in lax: - if sax.attrs.get('primary') and sax.attrs.get('primary') == 1: + if sax.attrs.get("primary") and sax.attrs.get("primary") == 1: ax_list.insert(0, sax) else: ax_list.append(sax) @@ -489,7 +547,7 @@ def get_single_or_multiple_axes(nxdata, ax_datasets, a_item, ax_list): try: if isinstance(ax_datasets, str): # single axis is defined # explicite definition of dimension number - ind = nxdata.attrs.get(ax_datasets + '_indices') + ind = nxdata.attrs.get(ax_datasets + "_indices") if ind and ind is int: if ind == a_item: ax_list.append(nxdata[ax_datasets]) @@ -498,7 +556,7 @@ def get_single_or_multiple_axes(nxdata, ax_datasets, a_item, ax_list): else: # multiple axes are listed # explicite definition of dimension number for aax in ax_datasets: - ind = nxdata.attrs.get(aax + '_indices') + ind = nxdata.attrs.get(aax + "_indices") if ind and isinstance(ind, int): if ind == a_item: ax_list.append(nxdata[aax]) @@ -516,22 +574,25 @@ def axis_helper(dim, nxdata, signal, axes, logger): ax_datasets = nxdata.attrs.get("axes") # primary axes listed in attribute axes ax_list = get_single_or_multiple_axes(nxdata, ax_datasets, a_item, ax_list) for attr in nxdata.attrs.keys(): # check for corresponding AXISNAME_indices - if attr.endswith('_indices') and nxdata.attrs[attr] == a_item and \ - nxdata[attr.split('_indices')[0]] not in ax_list: - ax_list.append(nxdata[attr.split('_indices')[0]]) + if ( + attr.endswith("_indices") + and nxdata.attrs[attr] == a_item + and nxdata[attr.split("_indices")[0]] not in ax_list + ): + ax_list.append(nxdata[attr.split("_indices")[0]]) # v2 # check for ':' separated axes defined in Signal if not ax_list: try: - ax_datasets = signal.attrs.get("axes").split(':') + ax_datasets = signal.attrs.get("axes").split(":") ax_list.append(nxdata[ax_datasets[a_item]]) except (KeyError, AttributeError): pass if not ax_list: # check for axis/primary specifications find_attrib_axis_actual_dim_num(nxdata, a_item, ax_list) axes.append(ax_list) - logger.debug('') + logger.debug("") logger.debug( - f'For Axis #{a_item}, {len(ax_list)} axes have been identified: {str(ax_list)}' + f"For Axis #{a_item}, {len(ax_list)} axes have been identified: {str(ax_list)}" ) @@ -539,26 +600,27 @@ def get_all_is_a_rel_from_hdf_node(hdf_node, hdf_path): """Return list of nxdl concept paths for a nxdl element which corresponds to hdf node. """ - hdf_info = {'hdf_path': hdf_path, 'hdf_node': hdf_node} - (_, _, elist) = \ - get_inherited_hdf_nodes(nx_name=get_nxdl_entry(hdf_info), hdf_node=hdf_node, - hdf_path=hdf_info['hdf_path'] if 'hdf_path' in hdf_info else None, - hdf_root=hdf_info['hdf_root'] if 'hdf_root' in hdf_info else None) + hdf_info = {"hdf_path": hdf_path, "hdf_node": hdf_node} + (_, _, elist) = get_inherited_hdf_nodes( + nx_name=get_nxdl_entry(hdf_info), + hdf_node=hdf_node, + hdf_path=hdf_info["hdf_path"] if "hdf_path" in hdf_info else None, + hdf_root=hdf_info["hdf_root"] if "hdf_root" in hdf_info else None, + ) return elist def hdf_node_to_self_concept_path(hdf_info, logger): - """ Get concept or nxdl path from given hdf_node. - """ + """Get concept or nxdl path from given hdf_node.""" # The bellow logger is for deactivatine unnecessary debug message above if logger is None: logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) (_, _, nxdl_path) = get_nxdl_doc(hdf_info, logger, None) - con_path = '' + con_path = "" if nxdl_path: for nd_ in nxdl_path: - con_path = con_path + '/' + get_node_name(nd_) + con_path = con_path + "/" + get_node_name(nd_) return con_path @@ -566,14 +628,17 @@ class HandleNexus: """documentation""" # pylint: disable=too-many-instance-attributes - def __init__(self, logger, nexus_file, - d_inq_nd=None, c_inq_nd=None, - is_in_memory_file=False): + def __init__( + self, logger, nexus_file, d_inq_nd=None, c_inq_nd=None, is_in_memory_file=False + ): self.logger = logger local_dir = os.path.abspath(os.path.dirname(__file__)) - self.input_file_name = nexus_file if nexus_file is not None else \ - os.path.join(local_dir, '../../tests/data/nexus/201805_WSe2_arpes.nxs') + self.input_file_name = ( + nexus_file + if nexus_file is not None + else os.path.join(local_dir, "../../tests/data/nexus/201805_WSe2_arpes.nxs") + ) self.parser = None self.in_file = None self.is_hdf5_file_obj = is_in_memory_file @@ -586,46 +651,55 @@ def visit_node(self, hdf_name, hdf_node): """Function called by h5py that iterates on each node of hdf5file. It allows h5py visititems function to visit nodes.""" if self.d_inq_nd is None and self.c_inq_nd is None: - process_node(hdf_node, '/' + hdf_name, self.parser, self.logger) - elif (self.d_inq_nd is not None - and hdf_name in (self.d_inq_nd, self.d_inq_nd[1:])): - process_node(hdf_node, '/' + hdf_name, self.parser, self.logger) + process_node(hdf_node, "/" + hdf_name, self.parser, self.logger) + elif self.d_inq_nd is not None and hdf_name in ( + self.d_inq_nd, + self.d_inq_nd[1:], + ): + process_node(hdf_node, "/" + hdf_name, self.parser, self.logger) elif self.c_inq_nd is not None: - attributed_concept = self.c_inq_nd.split('@') + attributed_concept = self.c_inq_nd.split("@") attr = attributed_concept[1] if len(attributed_concept) > 1 else None - elist = get_all_is_a_rel_from_hdf_node(hdf_node, '/' + hdf_name) + elist = get_all_is_a_rel_from_hdf_node(hdf_node, "/" + hdf_name) if elist is None: return fnd_superclass = False fnd_superclass_attr = False for elem in reversed(elist): - tmp_path = elem.get('nxdlbase').split('.nxdl')[0] - con_path = '/NX' + tmp_path.split('NX')[-1] + elem.get('nxdlpath') + tmp_path = elem.get("nxdlbase").split(".nxdl")[0] + con_path = "/NX" + tmp_path.split("NX")[-1] + elem.get("nxdlpath") if fnd_superclass or con_path == attributed_concept[0]: fnd_superclass = True if attr is None: self.hdf_path_list_for_c_inq_nd.append(hdf_name) break for attribute in hdf_node.attrs.keys(): - attr_concept = get_nxdl_child(elem, attribute, nexus_type='attribute', - go_base=False) - if attr_concept is not None and \ - attr_concept.get('nxdlpath').endswith(attr): + attr_concept = get_nxdl_child( + elem, attribute, nexus_type="attribute", go_base=False + ) + if attr_concept is not None and attr_concept.get( + "nxdlpath" + ).endswith(attr): fnd_superclass_attr = True - con_path = '/NX' + tmp_path.split('NX')[-1] \ - + attr_concept.get('nxdlpath') - self.hdf_path_list_for_c_inq_nd.append(hdf_name + "@" + attribute) + con_path = ( + "/NX" + + tmp_path.split("NX")[-1] + + attr_concept.get("nxdlpath") + ) + self.hdf_path_list_for_c_inq_nd.append( + hdf_name + "@" + attribute + ) break if fnd_superclass_attr: break def not_yet_visited(self, root, name): """checking if a new node has already been visited in its path""" - path = name.split('/') + path = name.split("/") for i in range(1, len(path)): - act_path = '/'.join(path[:i]) + act_path = "/".join(path[:i]) # print(act_path+' - '+name) - if root['/' + act_path] == root['/' + name]: + if root["/" + act_path] == root["/" + name]: return False return True @@ -636,7 +710,7 @@ def full_visit(self, root, hdf_node, name, func): func(name, hdf_node) if isinstance(hdf_node, h5py.Group): for ch_name, child in hdf_node.items(): - full_name = ch_name if len(name) == 0 else name + '/' + ch_name + full_name = ch_name if len(name) == 0 else name + "/" + ch_name if self.not_yet_visited(root, full_name): self.full_visit(root, child, full_name, func) @@ -648,12 +722,13 @@ def process_nexus_master_file(self, parser): self.in_file = h5py.File( self.input_file_name[0] if isinstance(self.input_file_name, list) - else self.input_file_name, 'r' + else self.input_file_name, + "r", ) else: self.in_file = self.input_file_name - self.full_visit(self.in_file, self.in_file, '', self.visit_node) + self.full_visit(self.in_file, self.in_file, "", self.visit_node) if self.d_inq_nd is None and self.c_inq_nd is None: get_default_plotable(self.in_file, self.logger) @@ -669,48 +744,58 @@ def process_nexus_master_file(self, parser): @click.command() @click.option( - '-f', - '--nexus-file', + "-f", + "--nexus-file", required=False, default=None, - help=('NeXus file with extension .nxs to learn NeXus different concept' - ' documentation and concept.') + help=( + "NeXus file with extension .nxs to learn NeXus different concept" + " documentation and concept." + ), ) @click.option( - '-d', - '--documentation', + "-d", + "--documentation", required=False, default=None, - help=("Definition path in nexus output (.nxs) file. Returns debug" - "log relavent with that definition path. Example: /entry/data/delays") + help=( + "Definition path in nexus output (.nxs) file. Returns debug" + "log relavent with that definition path. Example: /entry/data/delays" + ), ) @click.option( - '-c', - '--concept', + "-c", + "--concept", required=False, default=None, - help=("Concept path from application definition file (.nxdl,xml). Finds out" - "all the available concept definition (IS-A realation) for rendered" - "concept path. Example: /NXarpes/ENTRY/INSTRUMENT/analyser") + help=( + "Concept path from application definition file (.nxdl,xml). Finds out" + "all the available concept definition (IS-A realation) for rendered" + "concept path. Example: /NXarpes/ENTRY/INSTRUMENT/analyser" + ), ) def main(nexus_file, documentation, concept): """The main function to call when used as a script.""" logging_format = "%(levelname)s: %(message)s" stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(logging.DEBUG) - logging.basicConfig(level=logging.INFO, format=logging_format, handlers=[stdout_handler]) + logging.basicConfig( + level=logging.INFO, format=logging_format, handlers=[stdout_handler] + ) logger = logging.getLogger(__name__) logger.addHandler(stdout_handler) logger.setLevel(logging.DEBUG) logger.propagate = False if documentation and concept: - raise ValueError("Only one option either documentation (-d) or is_a relation " - "with a concept (-c) can be requested.") - nexus_helper = HandleNexus(logger, nexus_file, - d_inq_nd=documentation, - c_inq_nd=concept) + raise ValueError( + "Only one option either documentation (-d) or is_a relation " + "with a concept (-c) can be requested." + ) + nexus_helper = HandleNexus( + logger, nexus_file, d_inq_nd=documentation, c_inq_nd=concept + ) nexus_helper.process_nexus_master_file(None) -if __name__ == '__main__': +if __name__ == "__main__": main() # pylint: disable=no-value-for-parameter diff --git a/pynxtools/nexus/nxdl_utils.py b/pynxtools/nexus/nxdl_utils.py index aa64d5caa..48c6e4562 100644 --- a/pynxtools/nexus/nxdl_utils.py +++ b/pynxtools/nexus/nxdl_utils.py @@ -15,9 +15,13 @@ class NxdlAttributeError(Exception): def get_app_defs_names(): """Returns all the AppDef names without their extension: .nxdl.xml""" - app_def_path_glob = f"{get_nexus_definitions_path()}{os.sep}applications{os.sep}*.nxdl*" - contrib_def_path_glob = (f"{get_nexus_definitions_path()}{os.sep}" - f"contributed_definitions{os.sep}*.nxdl*") + app_def_path_glob = ( + f"{get_nexus_definitions_path()}{os.sep}applications{os.sep}*.nxdl*" + ) + contrib_def_path_glob = ( + f"{get_nexus_definitions_path()}{os.sep}" + f"contributed_definitions{os.sep}*.nxdl*" + ) files = sorted(glob(app_def_path_glob)) + sorted(glob(contrib_def_path_glob)) return [os.path.basename(file).split(".")[0] for file in files] + ["NXroot"] @@ -31,9 +35,9 @@ def get_xml_root(file_path): def get_nexus_definitions_path(): """Check NEXUS_DEF_PATH variable. -If it is empty, this function is filling it""" + If it is empty, this function is filling it""" try: # either given by sys env - return os.environ['NEXUS_DEF_PATH'] + return os.environ["NEXUS_DEF_PATH"] except KeyError: # or it should be available locally under the dir 'definitions' local_dir = os.path.abspath(os.path.dirname(__file__)) return os.path.join(local_dir, f"..{os.sep}definitions") @@ -42,61 +46,70 @@ def get_nexus_definitions_path(): def get_hdf_root(hdf_node): """Get the root HDF5 node""" node = hdf_node - while node.name != '/': + while node.name != "/": node = node.parent return node def get_hdf_parent(hdf_info): """Get the parent of an hdf_node in an hdf_info""" - if 'hdf_path' not in hdf_info: - return hdf_info['hdf_node'].parent - node = get_hdf_root(hdf_info['hdf_node']) if 'hdf_root' not in hdf_info \ - else hdf_info['hdf_root'] - for child_name in hdf_info['hdf_path'].split('/'): + if "hdf_path" not in hdf_info: + return hdf_info["hdf_node"].parent + node = ( + get_hdf_root(hdf_info["hdf_node"]) + if "hdf_root" not in hdf_info + else hdf_info["hdf_root"] + ) + for child_name in hdf_info["hdf_path"].split("/"): node = node[child_name] return node def get_parent_path(hdf_name): """Get parent path""" - return '/'.join(hdf_name.split('/')[:-1]) + return "/".join(hdf_name.split("/")[:-1]) def get_hdf_info_parent(hdf_info): """Get the hdf_info for the parent of an hdf_node in an hdf_info""" - if 'hdf_path' not in hdf_info: - return {'hdf_node': hdf_info['hdf_node'].parent} - node = get_hdf_root(hdf_info['hdf_node']) if 'hdf_root' not in hdf_info \ - else hdf_info['hdf_root'] - for child_name in hdf_info['hdf_path'].split('/')[1:-1]: + if "hdf_path" not in hdf_info: + return {"hdf_node": hdf_info["hdf_node"].parent} + node = ( + get_hdf_root(hdf_info["hdf_node"]) + if "hdf_root" not in hdf_info + else hdf_info["hdf_root"] + ) + for child_name in hdf_info["hdf_path"].split("/")[1:-1]: node = node[child_name] - return {'hdf_node': node, 'hdf_path': get_parent_path(hdf_info['hdf_path'])} + return {"hdf_node": node, "hdf_path": get_parent_path(hdf_info["hdf_path"])} def get_nx_class(nxdl_elem): """Get the nexus class for a NXDL node""" - if 'category' in nxdl_elem.attrib.keys(): + if "category" in nxdl_elem.attrib.keys(): return None try: - return nxdl_elem.attrib['type'] + return nxdl_elem.attrib["type"] except KeyError: - return 'NX_CHAR' + return "NX_CHAR" def get_nx_namefit(hdf_name, name, name_any=False): """Checks if an HDF5 node name corresponds to a child of the NXDL element -uppercase letters in front can be replaced by arbitraty name, but -uppercase to lowercase match is preferred, -so such match is counted as a measure of the fit""" + uppercase letters in front can be replaced by arbitraty name, but + uppercase to lowercase match is preferred, + so such match is counted as a measure of the fit""" if name == hdf_name: return len(name) * 2 # count leading capitals counting = 0 while counting < len(name) and name[counting].upper() == name[counting]: counting += 1 - if name_any or counting == len(name) or \ - (counting > 0 and hdf_name.endswith(name[counting:])): # if potential fit + if ( + name_any + or counting == len(name) + or (counting > 0 and hdf_name.endswith(name[counting:])) + ): # if potential fit # count the matching chars fit = 0 for i in range(min(counting, len(hdf_name))): @@ -107,24 +120,31 @@ def get_nx_namefit(hdf_name, name, name_any=False): if fit == min(counting, len(hdf_name)): # accept only full fits as better fits return fit return 0 - return -1 # no fit + return -1 # no fit def get_nx_classes(): """Read base classes from the NeXus definition folder. -Check each file in base_classes, applications, contributed_definitions. -If its category attribute is 'base', then it is added to the list. """ - base_classes = sorted(glob(os.path.join(get_nexus_definitions_path(), - 'base_classes', '*.nxdl.xml'))) - applications = sorted(glob(os.path.join(get_nexus_definitions_path(), - 'applications', '*.nxdl.xml'))) - contributed = sorted(glob(os.path.join(get_nexus_definitions_path(), - 'contributed_definitions', '*.nxdl.xml'))) + Check each file in base_classes, applications, contributed_definitions. + If its category attribute is 'base', then it is added to the list.""" + base_classes = sorted( + glob(os.path.join(get_nexus_definitions_path(), "base_classes", "*.nxdl.xml")) + ) + applications = sorted( + glob(os.path.join(get_nexus_definitions_path(), "applications", "*.nxdl.xml")) + ) + contributed = sorted( + glob( + os.path.join( + get_nexus_definitions_path(), "contributed_definitions", "*.nxdl.xml" + ) + ) + ) nx_clss = [] for nexus_file in base_classes + applications + contributed: root = get_xml_root(nexus_file) - if root.attrib['category'] == 'base': - nx_clss.append(str(nexus_file[nexus_file.rindex(os.sep) + 1:])[:-9]) + if root.attrib["category"] == "base": + nx_clss.append(str(nexus_file[nexus_file.rindex(os.sep) + 1 :])[:-9]) nx_clss = sorted(nx_clss) return nx_clss @@ -139,12 +159,12 @@ def get_nx_units(): units_and_type_list.append(i) flag = False for line in units_and_type_list: - if line == 'anyUnitsAttr': + if line == "anyUnitsAttr": flag = True nx_units = [] - elif 'NX' in line and flag is True: + elif "NX" in line and flag is True: nx_units.append(line) - elif line == 'primitiveType': + elif line == "primitiveType": flag = False else: pass @@ -153,7 +173,7 @@ def get_nx_units(): def get_nx_attribute_type(): """Read attribute types from the NeXus definition/nxdlTypes.xsd file""" - filepath = get_nexus_definitions_path() + '/nxdlTypes.xsd' + filepath = get_nexus_definitions_path() + "/nxdlTypes.xsd" root = get_xml_root(filepath) units_and_type_list = [] for child in root: @@ -161,12 +181,12 @@ def get_nx_attribute_type(): units_and_type_list.append(i) flag = False for line in units_and_type_list: - if line == 'primitiveType': + if line == "primitiveType": flag = True nx_types = [] - elif 'NX' in line and flag is True: + elif "NX" in line and flag is True: nx_types.append(line) - elif line == 'anyUnitsAttr': + elif line == "anyUnitsAttr": flag = False else: pass @@ -174,23 +194,23 @@ def get_nx_attribute_type(): def get_node_name(node): - '''Node - xml node. Returns html documentation name. + """Node - xml node. Returns html documentation name. Either as specified by the 'name' or taken from the type (nx_class). Note that if only class name is available, the NX prefix is removed and - the string is converted to UPPER case.''' - if 'name' in node.attrib.keys(): - name = node.attrib['name'] + the string is converted to UPPER case.""" + if "name" in node.attrib.keys(): + name = node.attrib["name"] else: - name = node.attrib['type'] - if name.startswith('NX'): + name = node.attrib["type"] + if name.startswith("NX"): name = name[2:].upper() return name def belongs_to(nxdl_elem, child, name, class_type=None, hdf_name=None): """Checks if an HDF5 node name corresponds to a child of the NXDL element -uppercase letters in front can be replaced by arbitraty name, but -uppercase to lowercase match is preferred""" + uppercase letters in front can be replaced by arbitraty name, but + uppercase to lowercase match is preferred""" if class_type and get_nx_class(child) != class_type: return False act_htmlname = get_node_name(child) @@ -200,7 +220,7 @@ def belongs_to(nxdl_elem, child, name, class_type=None, hdf_name=None): if not hdf_name: # search for name fits is only allowed for hdf_nodes return False try: # check if nameType allows different name - name_any = bool(child.attrib['nameType'] == "any") + name_any = bool(child.attrib["nameType"] == "any") except KeyError: name_any = False params = [act_htmlname, chk_name, name_any, nxdl_elem, child, name] @@ -211,17 +231,25 @@ def belongs_to_capital(params): """Checking continues for Upper case""" (act_htmlname, chk_name, name_any, nxdl_elem, child, name) = params # or starts with capital and no reserved words used - if (name_any or 'A' <= act_htmlname[0] <= 'Z') and \ - name != 'doc' and name != 'enumeration': + if ( + (name_any or "A" <= act_htmlname[0] <= "Z") + and name != "doc" + and name != "enumeration" + ): fit = get_nx_namefit(chk_name, act_htmlname, name_any) # check if name fits if fit < 0: return False for child2 in nxdl_elem: - if get_local_name_from_xml(child) != \ - get_local_name_from_xml(child2) or get_node_name(child2) == act_htmlname: + if ( + get_local_name_from_xml(child) != get_local_name_from_xml(child2) + or get_node_name(child2) == act_htmlname + ): continue # check if the name of another sibling fits better - name_any2 = "nameType" in child2.attrib.keys() and child2.attrib["nameType"] == "any" + name_any2 = ( + "nameType" in child2.attrib.keys() + and child2.attrib["nameType"] == "any" + ) fit2 = get_nx_namefit(chk_name, get_node_name(child2), name_any2) if fit2 > fit: return False @@ -232,68 +260,81 @@ def belongs_to_capital(params): def get_local_name_from_xml(element): """Helper function to extract the element tag without the namespace.""" - return element.tag[element.tag.rindex("}") + 1:] + return element.tag[element.tag.rindex("}") + 1 :] def get_own_nxdl_child_reserved_elements(child, name, nxdl_elem): """checking reserved elements, like doc, enumeration""" - if get_local_name_from_xml(child) == 'doc' and name == 'doc': - if nxdl_elem.get('nxdlbase'): - child.set('nxdlbase', nxdl_elem.get('nxdlbase')) - child.set('nxdlbase_class', nxdl_elem.get('nxdlbase_class')) - child.set('nxdlpath', nxdl_elem.get('nxdlpath') + '/doc') + if get_local_name_from_xml(child) == "doc" and name == "doc": + if nxdl_elem.get("nxdlbase"): + child.set("nxdlbase", nxdl_elem.get("nxdlbase")) + child.set("nxdlbase_class", nxdl_elem.get("nxdlbase_class")) + child.set("nxdlpath", nxdl_elem.get("nxdlpath") + "/doc") return child - if get_local_name_from_xml(child) == 'enumeration' and name == 'enumeration': - if nxdl_elem.get('nxdlbase'): - child.set('nxdlbase', nxdl_elem.get('nxdlbase')) - child.set('nxdlbase_class', nxdl_elem.get('nxdlbase_class')) - child.set('nxdlpath', nxdl_elem.get('nxdlpath') + '/enumeration') + if get_local_name_from_xml(child) == "enumeration" and name == "enumeration": + if nxdl_elem.get("nxdlbase"): + child.set("nxdlbase", nxdl_elem.get("nxdlbase")) + child.set("nxdlbase_class", nxdl_elem.get("nxdlbase_class")) + child.set("nxdlpath", nxdl_elem.get("nxdlpath") + "/enumeration") return child return False def get_own_nxdl_child_base_types(child, class_type, nxdl_elem, name, hdf_name): """checking base types of group, field,m attribute""" - if get_local_name_from_xml(child) == 'group': - if (class_type is None or (class_type and get_nx_class(child) == class_type)) and \ - belongs_to(nxdl_elem, child, name, class_type, hdf_name): - if nxdl_elem.get('nxdlbase'): - child.set('nxdlbase', nxdl_elem.get('nxdlbase')) - child.set('nxdlbase_class', nxdl_elem.get('nxdlbase_class')) - child.set('nxdlpath', nxdl_elem.get('nxdlpath') + '/' + get_node_name(child)) + if get_local_name_from_xml(child) == "group": + if ( + class_type is None or (class_type and get_nx_class(child) == class_type) + ) and belongs_to(nxdl_elem, child, name, class_type, hdf_name): + if nxdl_elem.get("nxdlbase"): + child.set("nxdlbase", nxdl_elem.get("nxdlbase")) + child.set("nxdlbase_class", nxdl_elem.get("nxdlbase_class")) + child.set( + "nxdlpath", nxdl_elem.get("nxdlpath") + "/" + get_node_name(child) + ) return child - if get_local_name_from_xml(child) == 'field' and \ - belongs_to(nxdl_elem, child, name, None, hdf_name): - if nxdl_elem.get('nxdlbase'): - child.set('nxdlbase', nxdl_elem.get('nxdlbase')) - child.set('nxdlbase_class', nxdl_elem.get('nxdlbase_class')) - child.set('nxdlpath', nxdl_elem.get('nxdlpath') + '/' + get_node_name(child)) + if get_local_name_from_xml(child) == "field" and belongs_to( + nxdl_elem, child, name, None, hdf_name + ): + if nxdl_elem.get("nxdlbase"): + child.set("nxdlbase", nxdl_elem.get("nxdlbase")) + child.set("nxdlbase_class", nxdl_elem.get("nxdlbase_class")) + child.set( + "nxdlpath", nxdl_elem.get("nxdlpath") + "/" + get_node_name(child) + ) return child - if get_local_name_from_xml(child) == 'attribute' and \ - belongs_to(nxdl_elem, child, name, None, hdf_name): - if nxdl_elem.get('nxdlbase'): - child.set('nxdlbase', nxdl_elem.get('nxdlbase')) - child.set('nxdlbase_class', nxdl_elem.get('nxdlbase_class')) - child.set('nxdlpath', nxdl_elem.get('nxdlpath') + '/' + get_node_name(child)) + if get_local_name_from_xml(child) == "attribute" and belongs_to( + nxdl_elem, child, name, None, hdf_name + ): + if nxdl_elem.get("nxdlbase"): + child.set("nxdlbase", nxdl_elem.get("nxdlbase")) + child.set("nxdlbase_class", nxdl_elem.get("nxdlbase_class")) + child.set( + "nxdlpath", nxdl_elem.get("nxdlpath") + "/" + get_node_name(child) + ) return child return False -def get_own_nxdl_child(nxdl_elem, name, class_type=None, hdf_name=None, nexus_type=None): +def get_own_nxdl_child( + nxdl_elem, name, class_type=None, hdf_name=None, nexus_type=None +): """Checks if an NXDL child node fits to the specific name (either nxdl or hdf) - name - nxdl name - class_type - nxdl type or hdf classname (for groups, it is obligatory) - hdf_name - hdf name""" + name - nxdl name + class_type - nxdl type or hdf classname (for groups, it is obligatory) + hdf_name - hdf name""" for child in nxdl_elem: - if 'name' in child.attrib and child.attrib['name'] == name: - if nxdl_elem.get('nxdlbase'): - child.set('nxdlbase', nxdl_elem.get('nxdlbase')) - child.set('nxdlbase_class', nxdl_elem.get('nxdlbase_class')) - child.set('nxdlpath', nxdl_elem.get('nxdlpath') + '/' + get_node_name(child)) + if "name" in child.attrib and child.attrib["name"] == name: + if nxdl_elem.get("nxdlbase"): + child.set("nxdlbase", nxdl_elem.get("nxdlbase")) + child.set("nxdlbase_class", nxdl_elem.get("nxdlbase_class")) + child.set( + "nxdlpath", nxdl_elem.get("nxdlpath") + "/" + get_node_name(child) + ) return child for child in nxdl_elem: if "name" in child.attrib and child.attrib["name"] == name: - child.set('nxdlbase', nxdl_elem.get('nxdlbase')) + child.set("nxdlbase", nxdl_elem.get("nxdlbase")) return child for child in nxdl_elem: @@ -302,7 +343,9 @@ def get_own_nxdl_child(nxdl_elem, name, class_type=None, hdf_name=None, nexus_ty return result if nexus_type and get_local_name_from_xml(child) != nexus_type: continue - result = get_own_nxdl_child_base_types(child, class_type, nxdl_elem, name, hdf_name) + result = get_own_nxdl_child_base_types( + child, class_type, nxdl_elem, name, hdf_name + ) if result is not False: return result return None @@ -312,19 +355,25 @@ def find_definition_file(bc_name): """find the nxdl file corresponding to the name. Note that it first checks in contributed and goes beyond only if no contributed found""" bc_filename = None - for nxdl_folder in ['contributed_definitions', 'base_classes', 'applications']: - if os.path.exists(f"{get_nexus_definitions_path()}{os.sep}" - f"{nxdl_folder}{os.sep}{bc_name}.nxdl.xml"): - bc_filename = f"{get_nexus_definitions_path()}{os.sep}" \ - f"{nxdl_folder}{os.sep}{bc_name}.nxdl.xml" + for nxdl_folder in ["contributed_definitions", "base_classes", "applications"]: + if os.path.exists( + f"{get_nexus_definitions_path()}{os.sep}" + f"{nxdl_folder}{os.sep}{bc_name}.nxdl.xml" + ): + bc_filename = ( + f"{get_nexus_definitions_path()}{os.sep}" + f"{nxdl_folder}{os.sep}{bc_name}.nxdl.xml" + ) break return bc_filename -def get_nxdl_child(nxdl_elem, name, class_type=None, hdf_name=None, nexus_type=None, go_base=True): # pylint: disable=too-many-arguments +def get_nxdl_child( + nxdl_elem, name, class_type=None, hdf_name=None, nexus_type=None, go_base=True +): # pylint: disable=too-many-arguments """Get the NXDL child node corresponding to a specific name -(e.g. of an HDF5 node,or of a documentation) note that if child is not found in application -definition, it also checks for the base classes""" + (e.g. of an HDF5 node,or of a documentation) note that if child is not found in application + definition, it also checks for the base classes""" # search for possible fits for hdf_nodes : skipped # only exact hits are returned when searching an nxdl child own_child = get_own_nxdl_child(nxdl_elem, name, class_type, hdf_name, nexus_type) @@ -333,18 +382,20 @@ def get_nxdl_child(nxdl_elem, name, class_type=None, hdf_name=None, nexus_type=N if not go_base: return None bc_name = get_nx_class(nxdl_elem) # check in the base class, app def or contributed - if bc_name[2] == '_': # filter primitive types + if bc_name[2] == "_": # filter primitive types return None - if bc_name == "group": # Check if it is the root element. Then send to NXroot.nxdl.xml + if ( + bc_name == "group" + ): # Check if it is the root element. Then send to NXroot.nxdl.xml bc_name = "NXroot" bc_filename = find_definition_file(bc_name) if not bc_filename: - raise ValueError('nxdl file not found in definitions folder!') + raise ValueError("nxdl file not found in definitions folder!") bc_obj = ET.parse(bc_filename).getroot() - bc_obj.set('nxdlbase', bc_filename) - if 'category' in bc_obj.attrib: - bc_obj.set('nxdlbase_class', bc_obj.attrib['category']) - bc_obj.set('nxdlpath', '') + bc_obj.set("nxdlbase", bc_filename) + if "category" in bc_obj.attrib: + bc_obj.set("nxdlbase_class", bc_obj.attrib["category"]) + bc_obj.set("nxdlpath", "") return get_own_nxdl_child(bc_obj, name, class_type, hdf_name, nexus_type) @@ -352,12 +403,16 @@ def get_required_string(nxdl_elem): """Check for being REQUIRED, RECOMMENDED, OPTIONAL, NOT IN SCHEMA""" if nxdl_elem is None: return "<>" - is_optional = 'optional' in nxdl_elem.attrib.keys() \ - and nxdl_elem.attrib['optional'] == "true" - is_minoccurs = 'minOccurs' in nxdl_elem.attrib.keys() \ - and nxdl_elem.attrib['minOccurs'] == "0" - is_recommended = 'recommended' in nxdl_elem.attrib.keys() \ - and nxdl_elem.attrib['recommended'] == "true" + is_optional = ( + "optional" in nxdl_elem.attrib.keys() and nxdl_elem.attrib["optional"] == "true" + ) + is_minoccurs = ( + "minOccurs" in nxdl_elem.attrib.keys() and nxdl_elem.attrib["minOccurs"] == "0" + ) + is_recommended = ( + "recommended" in nxdl_elem.attrib.keys() + and nxdl_elem.attrib["recommended"] == "true" + ) if is_recommended: return "<>" @@ -365,7 +420,7 @@ def get_required_string(nxdl_elem): return "<>" # default optionality: in BASE CLASSES is true; in APPLICATIONS is false try: - if nxdl_elem.get('nxdlbase_class') == 'base': + if nxdl_elem.get("nxdlbase_class") == "base": return "<>" except TypeError: return "<>" @@ -376,7 +431,7 @@ def get_required_string(nxdl_elem): def write_doc_string(logger, doc, attr): """Simple function that prints a line in the logger if doc exists""" if doc: - logger.debug("@" + attr + ' [NX_CHAR]') + logger.debug("@" + attr + " [NX_CHAR]") return logger, doc, attr @@ -386,61 +441,94 @@ def try_find_units(logger, elem, nxdl_path, doc, attr): try: # try to find if units is defined inside the field in the NXDL element unit = elem.attrib[attr] if doc: - logger.debug(get_node_concept_path(elem) + "@" + attr + ' [' + unit + ']') + logger.debug(get_node_concept_path(elem) + "@" + attr + " [" + unit + "]") elem = None nxdl_path.append(attr) - except KeyError: # otherwise try to find if units is defined as a child of the NXDL element + except ( + KeyError + ): # otherwise try to find if units is defined as a child of the NXDL element orig_elem = elem - elem = get_nxdl_child(elem, attr, nexus_type='attribute') + elem = get_nxdl_child(elem, attr, nexus_type="attribute") if elem is not None: if doc: - logger.debug(get_node_concept_path(orig_elem) - + "@" + attr + ' - [' + get_nx_class(elem) + ']') + logger.debug( + get_node_concept_path(orig_elem) + + "@" + + attr + + " - [" + + get_nx_class(elem) + + "]" + ) nxdl_path.append(elem) else: # if no units category were defined in NXDL: if doc: - logger.debug(get_node_concept_path(orig_elem) - + "@" + attr + " - REQUIRED, but undefined unit category") + logger.debug( + get_node_concept_path(orig_elem) + + "@" + + attr + + " - REQUIRED, but undefined unit category" + ) nxdl_path.append(attr) return logger, elem, nxdl_path, doc, attr def check_attr_name_nxdl(param): """Check for ATTRIBUTENAME_units in NXDL (normal). -If not defined, check for ATTRIBUTENAME to see if the ATTRIBUTE -is in the SCHEMA, but no units category were defined. """ + If not defined, check for ATTRIBUTENAME to see if the ATTRIBUTE + is in the SCHEMA, but no units category were defined.""" (logger, elem, nxdl_path, doc, attr, req_str) = param orig_elem = elem - elem2 = get_nxdl_child(elem, attr, nexus_type='attribute') + elem2 = get_nxdl_child(elem, attr, nexus_type="attribute") if elem2 is not None: # check for ATTRIBUTENAME_units in NXDL (normal) elem = elem2 if doc: - logger.debug(get_node_concept_path(orig_elem) - + "@" + attr + ' - [' + get_nx_class(elem) + ']') + logger.debug( + get_node_concept_path(orig_elem) + + "@" + + attr + + " - [" + + get_nx_class(elem) + + "]" + ) nxdl_path.append(elem) else: # if not defined, check for ATTRIBUTENAME to see if the ATTRIBUTE # is in the SCHEMA, but no units category were defined - elem2 = get_nxdl_child(elem, attr[:-6], nexus_type='attribute') + elem2 = get_nxdl_child(elem, attr[:-6], nexus_type="attribute") if elem2 is not None: - req_str = '<>' + req_str = "<>" if doc: - logger.debug(get_node_concept_path(orig_elem) - + "@" + attr + " - RECOMMENDED, but undefined unit category") + logger.debug( + get_node_concept_path(orig_elem) + + "@" + + attr + + " - RECOMMENDED, but undefined unit category" + ) nxdl_path.append(attr) else: # otherwise: NOT IN SCHEMA elem = elem2 if doc: - logger.debug(get_node_concept_path(orig_elem) + "@" + attr + " - IS NOT IN SCHEMA") + logger.debug( + get_node_concept_path(orig_elem) + + "@" + + attr + + " - IS NOT IN SCHEMA" + ) return logger, elem, nxdl_path, doc, attr, req_str def try_find_default(logger, orig_elem, elem, nxdl_path, doc, attr): # pylint: disable=too-many-arguments - """Try to find if default is defined as a child of the NXDL element """ + """Try to find if default is defined as a child of the NXDL element""" if elem is not None: if doc: - logger.debug(get_node_concept_path(orig_elem) - + "@" + attr + ' - [' + get_nx_class(elem) + ']') + logger.debug( + get_node_concept_path(orig_elem) + + "@" + + attr + + " - [" + + get_nx_class(elem) + + "]" + ) nxdl_path.append(elem) else: # if no default category were defined in NXDL: if doc: @@ -450,31 +538,41 @@ def try_find_default(logger, orig_elem, elem, nxdl_path, doc, attr): # pylint: def other_attrs(logger, orig_elem, elem, nxdl_path, doc, attr): # pylint: disable=too-many-arguments - """Handle remaining attributes """ + """Handle remaining attributes""" if elem is not None: if doc: - logger.debug(get_node_concept_path(orig_elem) - + "@" + attr + ' - [' + get_nx_class(elem) + ']') + logger.debug( + get_node_concept_path(orig_elem) + + "@" + + attr + + " - [" + + get_nx_class(elem) + + "]" + ) nxdl_path.append(elem) else: if doc: - logger.debug(get_node_concept_path(orig_elem) + "@" + attr + " - IS NOT IN SCHEMA") + logger.debug( + get_node_concept_path(orig_elem) + "@" + attr + " - IS NOT IN SCHEMA" + ) return logger, elem, nxdl_path, doc, attr def get_node_concept_path(elem): """get the short version of nxdlbase:nxdlpath""" - return str(elem.get('nxdlbase').split('/')[-1] + ":" + elem.get('nxdlpath')) + return str(elem.get("nxdlbase").split("/")[-1] + ":" + elem.get("nxdlpath")) def get_doc(node, ntype, nxhtml, nxpath): """Get documentation""" # URL for html documentation - anchor = '' + anchor = "" for n_item in nxpath: anchor += n_item.lower() + "-" - anchor = ('https://manual.nexusformat.org/classes/', - nxhtml + "#" + anchor.replace('_', '-') + ntype) + anchor = ( + "https://manual.nexusformat.org/classes/", + nxhtml + "#" + anchor.replace("_", "-") + ntype, + ) if not ntype: anchor = anchor[:-1] doc = "" # RST documentation from the field 'doc' @@ -483,9 +581,13 @@ def get_doc(node, ntype, nxhtml, nxpath): doc = doc_field.text (index, enums) = get_enums(node) # enums if index: - enum_str = "\n " + ("Possible values:" - if len(enums.split(',')) > 1 - else "Obligatory value:") + "\n " + enums + "\n" + enum_str = ( + "\n " + + ("Possible values:" if len(enums.split(",")) > 1 else "Obligatory value:") + + "\n " + + enums + + "\n" + ) else: enum_str = "" return anchor, doc + enum_str @@ -496,17 +598,21 @@ def print_doc(node, ntype, level, nxhtml, nxpath): anchor, doc = get_doc(node, ntype, nxhtml, nxpath) print(" " * (level + 1) + anchor) preferred_width = 80 + level * 2 - wrapper = textwrap.TextWrapper(initial_indent=' ' * (level + 1), width=preferred_width, - subsequent_indent=' ' * (level + 1), expand_tabs=False, - tabsize=0) + wrapper = textwrap.TextWrapper( + initial_indent=" " * (level + 1), + width=preferred_width, + subsequent_indent=" " * (level + 1), + expand_tabs=False, + tabsize=0, + ) if doc is not None: - for par in doc.split('\n'): + for par in doc.split("\n"): print(wrapper.fill(par)) def get_namespace(element): """Extracts the namespace for elements in the NXDL""" - return element.tag[element.tag.index("{"):element.tag.rindex("}") + 1] + return element.tag[element.tag.index("{") : element.tag.rindex("}") + 1] def get_enums(node): @@ -519,15 +625,15 @@ def get_enums(node): for enumeration in node.findall(f"{namespace}enumeration"): for item in enumeration.findall(f"{namespace}item"): enums.append(item.attrib["value"]) - enums = ','.join(enums) + enums = ",".join(enums) if enums != "": - return (True, '[' + enums + ']') + return (True, "[" + enums + "]") return (False, "") # if there is no enumeration tag, returns empty string def add_base_classes(elist, nx_name=None, elem: ET.Element = None): """Add the base classes corresponding to the last eleme in elist to the list. Note that if -elist is empty, a nxdl file with the name of nx_name or a rather room elem is used if provided""" + elist is empty, a nxdl file with the name of nx_name or a rather room elem is used if provided""" if elist and nx_name is None: nx_name = get_nx_class(elist[-1]) # to support recursive defintions, like NXsample in NXsample, the following test is removed @@ -540,48 +646,51 @@ def add_base_classes(elist, nx_name=None, elem: ET.Element = None): if nxdl_file_path is None: nxdl_file_path = f"{nx_name}.nxdl.xml" elem = ET.parse(nxdl_file_path).getroot() - elem.set('nxdlbase', nxdl_file_path) + elem.set("nxdlbase", nxdl_file_path) else: - elem.set('nxdlbase', '') - if 'category' in elem.attrib: - elem.set('nxdlbase_class', elem.attrib['category']) - elem.set('nxdlpath', '') + elem.set("nxdlbase", "") + if "category" in elem.attrib: + elem.set("nxdlbase_class", elem.attrib["category"]) + elem.set("nxdlpath", "") elist.append(elem) # add inherited base class - if 'extends' in elem.attrib and elem.attrib['extends'] != 'NXobject': - add_base_classes(elist, elem.attrib['extends']) + if "extends" in elem.attrib and elem.attrib["extends"] != "NXobject": + add_base_classes(elist, elem.attrib["extends"]) else: add_base_classes(elist) def set_nxdlpath(child, nxdl_elem): """ - Setting up child nxdlbase, nxdlpath and nxdlbase_class from nxdl_element. + Setting up child nxdlbase, nxdlpath and nxdlbase_class from nxdl_element. """ - if nxdl_elem.get('nxdlbase'): - child.set('nxdlbase', nxdl_elem.get('nxdlbase')) - child.set('nxdlbase_class', nxdl_elem.get('nxdlbase_class')) - child.set('nxdlpath', nxdl_elem.get('nxdlpath') + '/' + get_node_name(child)) + if nxdl_elem.get("nxdlbase"): + child.set("nxdlbase", nxdl_elem.get("nxdlbase")) + child.set("nxdlbase_class", nxdl_elem.get("nxdlbase_class")) + child.set("nxdlpath", nxdl_elem.get("nxdlpath") + "/" + get_node_name(child)) return child def get_direct_child(nxdl_elem, html_name): - """ returns the child of nxdl_elem which has a name - corresponding to the the html documentation name html_name""" + """returns the child of nxdl_elem which has a name + corresponding to the the html documentation name html_name""" for child in nxdl_elem: - if get_local_name_from_xml(child) in ('group', 'field', 'attribute') and \ - html_name == get_node_name(child): + if get_local_name_from_xml(child) in ( + "group", + "field", + "attribute", + ) and html_name == get_node_name(child): decorated_child = set_nxdlpath(child, nxdl_elem) return decorated_child return None def get_field_child(nxdl_elem, html_name): - """ returns the child of nxdl_elem which has a name - corresponding to the html documentation name html_name""" + """returns the child of nxdl_elem which has a name + corresponding to the html documentation name html_name""" data_child = None for child in nxdl_elem: - if get_local_name_from_xml(child) != 'field': + if get_local_name_from_xml(child) != "field": continue if get_node_name(child) == html_name: data_child = set_nxdlpath(child, nxdl_elem) @@ -590,27 +699,27 @@ def get_field_child(nxdl_elem, html_name): def get_best_nxdata_child(nxdl_elem, hdf_node, hdf_name): - """ returns the child of an NXdata nxdl_elem which has a name - corresponding to the hdf_name""" + """returns the child of an NXdata nxdl_elem which has a name + corresponding to the hdf_name""" nxdata = hdf_node.parent signals = [] - if 'signal' in nxdata.attrs.keys(): + if "signal" in nxdata.attrs.keys(): signals.append(nxdata.attrs.get("signal")) if "auxiliary_signals" in nxdata.attrs.keys(): for aux_signal in nxdata.attrs.get("auxiliary_signals"): signals.append(aux_signal) - data_child = get_field_child(nxdl_elem, 'DATA') - data_error_child = get_field_child(nxdl_elem, 'FIELDNAME_errors') + data_child = get_field_child(nxdl_elem, "DATA") + data_error_child = get_field_child(nxdl_elem, "FIELDNAME_errors") for signal in signals: if signal == hdf_name: return (data_child, 100) - if hdf_name.endswith('_errors') and signal == hdf_name[:-7]: + if hdf_name.endswith("_errors") and signal == hdf_name[:-7]: return (data_error_child, 100) axes = [] if "axes" in nxdata.attrs.keys(): for axis in nxdata.attrs.get("axes"): axes.append(axis) - axis_child = get_field_child(nxdl_elem, 'AXISNAME') + axis_child = get_field_child(nxdl_elem, "AXISNAME") for axis in axes: if axis == hdf_name: return (axis_child, 100) @@ -618,22 +727,29 @@ def get_best_nxdata_child(nxdl_elem, hdf_node, hdf_name): def get_best_child(nxdl_elem, hdf_node, hdf_name, hdf_class_name, nexus_type): - """ returns the child of nxdl_elem which has a name - corresponding to the the html documentation name html_name""" + """returns the child of nxdl_elem which has a name + corresponding to the the html documentation name html_name""" bestfit = -1 bestchild = None - if 'name' in nxdl_elem.attrib.keys() and nxdl_elem.attrib['name'] == 'NXdata' and \ - hdf_node is not None and hdf_node.parent is not None and \ - hdf_node.parent.attrs.get('NX_class') == 'NXdata': + if ( + "name" in nxdl_elem.attrib.keys() + and nxdl_elem.attrib["name"] == "NXdata" + and hdf_node is not None + and hdf_node.parent is not None + and hdf_node.parent.attrs.get("NX_class") == "NXdata" + ): (fnd_child, fit) = get_best_nxdata_child(nxdl_elem, hdf_node, hdf_name) if fnd_child is not None: return (fnd_child, fit) for child in nxdl_elem: fit = -2 - if get_local_name_from_xml(child) == nexus_type and \ - (nexus_type != 'group' or get_nx_class(child) == hdf_class_name): - name_any = "nameType" in nxdl_elem.attrib.keys() and \ - nxdl_elem.attrib["nameType"] == "any" + if get_local_name_from_xml(child) == nexus_type and ( + nexus_type != "group" or get_nx_class(child) == hdf_class_name + ): + name_any = ( + "nameType" in nxdl_elem.attrib.keys() + and nxdl_elem.attrib["nameType"] == "any" + ) fit = get_nx_namefit(hdf_name, get_node_name(child), name_any) if fit > bestfit: bestfit = fit @@ -651,9 +767,13 @@ def walk_elist(elist, html_name): for potential_direct_parent in elist: main_child = get_direct_child(potential_direct_parent, html_name) if main_child is not None: - (fitting_child, _) = get_best_child(elist[ind], None, html_name, - get_nx_class(main_child), - get_local_name_from_xml(main_child)) + (fitting_child, _) = get_best_child( + elist[ind], + None, + html_name, + get_nx_class(main_child), + get_local_name_from_xml(main_child), + ) if fitting_child is not None: child = fitting_child break @@ -662,18 +782,23 @@ def walk_elist(elist, html_name): del elist[ind] continue # override: remove low priority inheritance classes if class_type is overriden - if len(elist) > ind + 1 and get_nx_class(elist[ind]) != get_nx_class(elist[ind + 1]): - del elist[ind + 1:] + if len(elist) > ind + 1 and get_nx_class(elist[ind]) != get_nx_class( + elist[ind + 1] + ): + del elist[ind + 1 :] # add new base class(es) if new element brings such (and not a primitive type) - if len(elist) == ind + 1 and get_nx_class(elist[ind])[0:3] != 'NX_': + if len(elist) == ind + 1 and get_nx_class(elist[ind])[0:3] != "NX_": add_base_classes(elist) return elist, html_name @lru_cache(maxsize=None) -def get_inherited_nodes(nxdl_path: str = None, # pylint: disable=too-many-arguments,too-many-locals - nx_name: str = None, elem: ET.Element = None, - attr=False): # pylint: disable=unused-argument +def get_inherited_nodes( + nxdl_path: str = None, # pylint: disable=too-many-arguments,too-many-locals + nx_name: str = None, + elem: ET.Element = None, + attr=False, +): # pylint: disable=unused-argument """Returns a list of ET.Element for the given path.""" # let us start with the given definition file elist = [] # type: ignore[var-annotated] @@ -681,7 +806,7 @@ def get_inherited_nodes(nxdl_path: str = None, # pylint: disable=too-many-argum nxdl_elem_path = [elist[0]] class_path = [] # type: ignore[var-annotated] - html_path = nxdl_path.split('/')[1:] + html_path = nxdl_path.split("/")[1:] path = html_path for pind in range(len(path)): html_name = html_path[pind] @@ -693,9 +818,12 @@ def get_inherited_nodes(nxdl_path: str = None, # pylint: disable=too-many-argum return (class_path, nxdl_elem_path, elist) -def get_node_at_nxdl_path(nxdl_path: str = None, - nx_name: str = None, elem: ET.Element = None, - exc: bool = True): +def get_node_at_nxdl_path( + nxdl_path: str = None, + nx_name: str = None, + elem: ET.Element = None, + exc: bool = True, +): """Returns an ET.Element for the given path. This function either takes the name for the NeXus Application Definition we are looking for or the root elem from a previously loaded NXDL file @@ -707,15 +835,18 @@ def get_node_at_nxdl_path(nxdl_path: str = None, (class_path, nxdlpath, elist) = get_inherited_nodes(nxdl_path, nx_name, elem) except ValueError as value_error: if exc: - raise NxdlAttributeError(f"Attributes were not found for {nxdl_path}. " - "Please check this entry in the template dictionary.") \ - from value_error + raise NxdlAttributeError( + f"Attributes were not found for {nxdl_path}. " + "Please check this entry in the template dictionary." + ) from value_error return None if class_path and nxdlpath and elist: elem = elist[0] else: elem = None if exc: - raise NxdlAttributeError(f"Attributes were not found for {nxdl_path}. " - "Please check this entry in the template dictionary.") + raise NxdlAttributeError( + f"Attributes were not found for {nxdl_path}. " + "Please check this entry in the template dictionary." + ) return elem diff --git a/pyproject.toml b/pyproject.toml index 1b68efa7a..7e526d918 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,6 +63,7 @@ dev = [ "types-pytz", "types-requests", "pip-tools", + "pre-commit", ] [project.scripts] diff --git a/tests/dataconverter/test_convert.py b/tests/dataconverter/test_convert.py index a317c1470..0612f47fb 100644 --- a/tests/dataconverter/test_convert.py +++ b/tests/dataconverter/test_convert.py @@ -30,35 +30,53 @@ def move_xarray_file_to_tmp(tmp_path): """Moves the xarray file, which is used to test linking into the tmp_path directory.""" - test_file_path = os.path.join(os.path.dirname(__file__), - "../data/dataconverter/readers/mpes") - distutils.file_util.copy_file(os.path.join(test_file_path, "xarray_saved_small_calibration.h5"), - os.path.join(tmp_path, "xarray_saved_small_calibration.h5")) + test_file_path = os.path.join( + os.path.dirname(__file__), "../data/dataconverter/readers/mpes" + ) + distutils.file_util.copy_file( + os.path.join(test_file_path, "xarray_saved_small_calibration.h5"), + os.path.join(tmp_path, "xarray_saved_small_calibration.h5"), + ) def restore_xarray_file_from_tmp(tmp_path): """Restores the xarray file from the tmp_path directory.""" - test_file_path = os.path.join(os.path.dirname(__file__), - "../data/dataconverter/readers/mpes") + test_file_path = os.path.join( + os.path.dirname(__file__), "../data/dataconverter/readers/mpes" + ) os.remove(os.path.join(test_file_path, "xarray_saved_small_calibration.h5")) - distutils.file_util.move_file(os.path.join(tmp_path, "xarray_saved_small_calibration.h5"), - os.path.join(test_file_path, "xarray_saved_small_calibration.h5")) - - -@pytest.mark.parametrize("cli_inputs", [ - pytest.param([ - "--nxdl", - "NXcontainer", - ], id="exists-in-contributed"), - pytest.param([ - "--nxdl", - "NXarchive", - ], id="exists-in-applications"), - pytest.param([ - "--nxdl", - "NXdoesnotexist", - ], id="does-not-exist") -]) + distutils.file_util.move_file( + os.path.join(tmp_path, "xarray_saved_small_calibration.h5"), + os.path.join(test_file_path, "xarray_saved_small_calibration.h5"), + ) + + +@pytest.mark.parametrize( + "cli_inputs", + [ + pytest.param( + [ + "--nxdl", + "NXcontainer", + ], + id="exists-in-contributed", + ), + pytest.param( + [ + "--nxdl", + "NXarchive", + ], + id="exists-in-applications", + ), + pytest.param( + [ + "--nxdl", + "NXdoesnotexist", + ], + id="does-not-exist", + ), + ], +) def test_find_nxdl(cli_inputs): """Unit test to check if dataconverter can find NXDLs in contributed/applications folder.""" cli_inputs.extend(["--reader", "example"]) @@ -69,7 +87,9 @@ def test_find_nxdl(cli_inputs): assert isinstance(result.exception, FileNotFoundError) else: assert isinstance(result.exception, Exception) - assert "The chosen NXDL isn't supported by the selected reader." in str(result.exception) + assert "The chosen NXDL isn't supported by the selected reader." in str( + result.exception + ) def test_get_reader(): @@ -82,27 +102,25 @@ def test_get_names_of_all_readers(): assert "example" in dataconverter.get_names_of_all_readers() -@pytest.mark.parametrize("cli_inputs", [ - pytest.param([ - "--nxdl", - "NXtest", - "--generate-template" - ], id="generate-template"), - pytest.param([], id="nxdl-not-provided"), - pytest.param([ - "--nxdl", - "NXtest", - "--input-file", - "test_input" - ], id="input-file") -]) +@pytest.mark.parametrize( + "cli_inputs", + [ + pytest.param( + ["--nxdl", "NXtest", "--generate-template"], id="generate-template" + ), + pytest.param([], id="nxdl-not-provided"), + pytest.param( + ["--nxdl", "NXtest", "--input-file", "test_input"], id="input-file" + ), + ], +) def test_cli(caplog, cli_inputs): """A test for the convert CLI.""" runner = CliRunner() result = runner.invoke(dataconverter.convert_cli, cli_inputs) if "--generate-template" in cli_inputs: assert result.exit_code == 0 - assert "\"/ENTRY[entry]/NXODD_name/int_value\": \"None\"," in caplog.text + assert '"/ENTRY[entry]/NXODD_name/int_value": "None",' in caplog.text elif "--input-file" in cli_inputs: assert "test_input" in caplog.text elif result.exit_code == 2: @@ -115,38 +133,55 @@ def test_links_and_virtual_datasets(tmp_path): when the template contains links.""" move_xarray_file_to_tmp(tmp_path) - dirpath = os.path.join(os.path.dirname(__file__), - "../data/dataconverter/readers/example") + dirpath = os.path.join( + os.path.dirname(__file__), "../data/dataconverter/readers/example" + ) runner = CliRunner() - result = runner.invoke(dataconverter.convert_cli, [ - "--nxdl", - "NXtest", - "--reader", - "example", - "--input-file", - os.path.join(dirpath, "testdata.json"), - "--output", - os.path.join(tmp_path, "test_output.h5") - ]) + result = runner.invoke( + dataconverter.convert_cli, + [ + "--nxdl", + "NXtest", + "--reader", + "example", + "--input-file", + os.path.join(dirpath, "testdata.json"), + "--output", + os.path.join(tmp_path, "test_output.h5"), + ], + ) assert result.exit_code == 0 test_nxs = h5py.File(os.path.join(tmp_path, "test_output.h5"), "r") - assert 'entry/test_link/internal_link' in test_nxs + assert "entry/test_link/internal_link" in test_nxs assert isinstance(test_nxs["entry/test_link/internal_link"], h5py.Dataset) - assert 'entry/test_link/external_link' in test_nxs + assert "entry/test_link/external_link" in test_nxs assert isinstance(test_nxs["entry/test_link/external_link"], h5py.Dataset) - assert 'entry/test_virtual_dataset/concatenate_datasets' in test_nxs - assert isinstance(test_nxs["entry/test_virtual_dataset/concatenate_datasets"], h5py.Dataset) - assert 'entry/test_virtual_dataset/sliced_dataset' in test_nxs - assert isinstance(test_nxs["entry/test_virtual_dataset/sliced_dataset"], h5py.Dataset) + assert "entry/test_virtual_dataset/concatenate_datasets" in test_nxs + assert isinstance( + test_nxs["entry/test_virtual_dataset/concatenate_datasets"], h5py.Dataset + ) + assert "entry/test_virtual_dataset/sliced_dataset" in test_nxs + assert isinstance( + test_nxs["entry/test_virtual_dataset/sliced_dataset"], h5py.Dataset + ) # pylint: disable=no-member assert test_nxs["entry/test_virtual_dataset/sliced_dataset"].shape == (10, 10, 5) - assert 'entry/test_virtual_dataset/sliced_dataset2' in test_nxs - assert isinstance(test_nxs["entry/test_virtual_dataset/sliced_dataset2"], h5py.Dataset) + assert "entry/test_virtual_dataset/sliced_dataset2" in test_nxs + assert isinstance( + test_nxs["entry/test_virtual_dataset/sliced_dataset2"], h5py.Dataset + ) assert test_nxs["entry/test_virtual_dataset/sliced_dataset2"].shape == (10, 10, 10) - assert 'entry/test_virtual_dataset/sliced_dataset3' in test_nxs - assert isinstance(test_nxs["entry/test_virtual_dataset/sliced_dataset3"], h5py.Dataset) - assert test_nxs["entry/test_virtual_dataset/sliced_dataset3"].shape == (10, 10, 10, 2) + assert "entry/test_virtual_dataset/sliced_dataset3" in test_nxs + assert isinstance( + test_nxs["entry/test_virtual_dataset/sliced_dataset3"], h5py.Dataset + ) + assert test_nxs["entry/test_virtual_dataset/sliced_dataset3"].shape == ( + 10, + 10, + 10, + 2, + ) restore_xarray_file_from_tmp(tmp_path) @@ -154,8 +189,9 @@ def test_links_and_virtual_datasets(tmp_path): def test_compression(tmp_path): """A test for the convert CLI to check whether a Dataset object is compressed.""" - dirpath = os.path.join(os.path.dirname(__file__), - "../data/dataconverter/readers/example") + dirpath = os.path.join( + os.path.dirname(__file__), "../data/dataconverter/readers/example" + ) move_xarray_file_to_tmp(tmp_path) @@ -163,15 +199,15 @@ def test_compression(tmp_path): [os.path.join(dirpath, "testdata.json")], "example", "NXtest", - os.path.join(tmp_path, "test_output.h5") + os.path.join(tmp_path, "test_output.h5"), ) test_nxs = h5py.File(os.path.join(tmp_path, "test_output.h5"), "r") - assert 'entry/test_compression/compressed_data' in test_nxs - assert isinstance(test_nxs['/entry/test_compression/compressed_data'], h5py.Dataset) + assert "entry/test_compression/compressed_data" in test_nxs + assert isinstance(test_nxs["/entry/test_compression/compressed_data"], h5py.Dataset) # pylint: disable=no-member - assert test_nxs['/entry/test_compression/compressed_data'].compression == 'gzip' - assert test_nxs['/entry/test_compression/not_to_compress'].compression is None + assert test_nxs["/entry/test_compression/compressed_data"].compression == "gzip" + assert test_nxs["/entry/test_compression/not_to_compress"].compression is None restore_xarray_file_from_tmp(tmp_path) @@ -179,30 +215,37 @@ def test_compression(tmp_path): def test_mpes_writing(tmp_path): """Check if mpes example can be reproduced""" # dataconverter - dirpath = os.path.join(os.path.dirname(__file__), "../data/dataconverter/readers/mpes") - dataconverter.convert((os.path.join(dirpath, "xarray_saved_small_calibration.h5"), - os.path.join(dirpath, "config_file.json")), - "mpes", "NXmpes", - os.path.join(tmp_path, "mpes.small_test.nxs"), - False, False) + dirpath = os.path.join( + os.path.dirname(__file__), "../data/dataconverter/readers/mpes" + ) + dataconverter.convert( + ( + os.path.join(dirpath, "xarray_saved_small_calibration.h5"), + os.path.join(dirpath, "config_file.json"), + ), + "mpes", + "NXmpes", + os.path.join(tmp_path, "mpes.small_test.nxs"), + False, + False, + ) # check generated nexus file - test_data = os.path.join(tmp_path, 'mpes.small_test.nxs') + test_data = os.path.join(tmp_path, "mpes.small_test.nxs") logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) - handler = logging. \ - FileHandler(os.path.join(tmp_path, 'mpes_test.log'), 'w') - formatter = logging.Formatter('%(levelname)s - %(message)s') + handler = logging.FileHandler(os.path.join(tmp_path, "mpes_test.log"), "w") + formatter = logging.Formatter("%(levelname)s - %(message)s") handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) logger.addHandler(handler) nexus_helper = nexus.HandleNexus(logger, test_data, None, None) nexus_helper.process_nexus_master_file(None) - with open(os.path.join(tmp_path, 'mpes_test.log'), 'r', encoding='utf-8') as logfile: + with open( + os.path.join(tmp_path, "mpes_test.log"), "r", encoding="utf-8" + ) as logfile: log = logfile.readlines() with open( - os.path.join(dirpath, 'Ref_nexus_mpes.log'), - 'r', - encoding='utf-8' + os.path.join(dirpath, "Ref_nexus_mpes.log"), "r", encoding="utf-8" ) as logfile: ref_log = logfile.readlines() assert log == ref_log @@ -210,19 +253,38 @@ def test_mpes_writing(tmp_path): def test_eln_data(tmp_path): """Check if the subsections in the eln_data.yml file work.""" - dirpath = os.path.join(os.path.dirname(__file__), "../data/dataconverter/readers/mpes") - dataconverter.convert((os.path.join(dirpath, "xarray_saved_small_calibration.h5"), - os.path.join(dirpath, "config_file.json"), - os.path.join(dirpath, "eln_data.yaml")), - "mpes", "NXmpes", - os.path.join(tmp_path, "mpes.small_test.nxs"), - False, False) + dirpath = os.path.join( + os.path.dirname(__file__), "../data/dataconverter/readers/mpes" + ) + dataconverter.convert( + ( + os.path.join(dirpath, "xarray_saved_small_calibration.h5"), + os.path.join(dirpath, "config_file.json"), + os.path.join(dirpath, "eln_data.yaml"), + ), + "mpes", + "NXmpes", + os.path.join(tmp_path, "mpes.small_test.nxs"), + False, + False, + ) def test_eln_data_subsections(tmp_path): """Check if the subsections in the eln_data.yml file work.""" - dirpath = os.path.join(os.path.dirname(__file__), "../data/dataconverter/readers/json_yml") - dataconverter.convert((os.path.join(dirpath, "eln_data_w_subsections.yaml",),), - "hall", "NXroot", - os.path.join(tmp_path, "hall.nxs"), - False, False) + dirpath = os.path.join( + os.path.dirname(__file__), "../data/dataconverter/readers/json_yml" + ) + dataconverter.convert( + ( + os.path.join( + dirpath, + "eln_data_w_subsections.yaml", + ), + ), + "hall", + "NXroot", + os.path.join(tmp_path, "hall.nxs"), + False, + False, + ) diff --git a/tests/dataconverter/test_helpers.py b/tests/dataconverter/test_helpers.py index 421f8ce9b..21f41586d 100644 --- a/tests/dataconverter/test_helpers.py +++ b/tests/dataconverter/test_helpers.py @@ -33,7 +33,9 @@ def remove_optional_parent(data_dict: Template): internal_dict = Template(data_dict) del internal_dict["/ENTRY[my_entry]/optional_parent/required_child"] del internal_dict["/ENTRY[my_entry]/optional_parent/optional_child"] - del internal_dict["/ENTRY[my_entry]/optional_parent/req_group_in_opt_group/DATA[data]"] + del internal_dict[ + "/ENTRY[my_entry]/optional_parent/req_group_in_opt_group/DATA[data]" + ] return internal_dict @@ -58,7 +60,7 @@ def set_to_none_in_dict(data_dict: Template, key: str, optionality: str): return None -def remove_from_dict(data_dict: Template, key: str, optionality: str = 'optional'): +def remove_from_dict(data_dict: Template, key: str, optionality: str = "optional"): """Helper function to remove a key from dict""" if data_dict is not None and key in data_dict[optionality]: internal_dict = Template(data_dict) @@ -73,23 +75,31 @@ def listify_template(data_dict: Template): listified_template = Template() for optionality in ("optional", "recommended", "required", "undocumented"): for path in data_dict[optionality]: - if path[path.rindex("/") + 1:] in ("@units", "type", "definition", "date_value"): + if path[path.rindex("/") + 1 :] in ( + "@units", + "type", + "definition", + "date_value", + ): listified_template[optionality][path] = data_dict[optionality][path] else: listified_template[optionality][path] = [data_dict[optionality][path]] return listified_template -@pytest.mark.parametrize("input_data, expected_output", [ - ('2.4E-23', 2.4e-23), - ('28', 28), - ('45.98', 45.98), - ('test', 'test'), - (['59', '3.00005', '498E-36'], np.array([59.0, 3.00005, 4.98e-34])), - ('23 34 444 5000', np.array([23., 34., 444., 5000.])), - ('xrd experiment', 'xrd experiment'), - (None, None), -]) +@pytest.mark.parametrize( + "input_data, expected_output", + [ + ("2.4E-23", 2.4e-23), + ("28", 28), + ("45.98", 45.98), + ("test", "test"), + (["59", "3.00005", "498E-36"], np.array([59.0, 3.00005, 4.98e-34])), + ("23 34 444 5000", np.array([23.0, 34.0, 444.0, 5000.0])), + ("xrd experiment", "xrd experiment"), + (None, None), + ], +) def test_transform_to_intended_dt(input_data, expected_output): """Transform to possible numerical method.""" result = helpers.transform_to_intended_dt(input_data) @@ -127,12 +137,14 @@ def fixture_filled_test_data(template, tmp_path): # Copy original measurement file to tmp dir, # because h5py.ExternalLink is modifying it while # linking the nxs file. - distutils.file_util.copy_file(f"{os.path.dirname(__file__)}" - f"/../" - f"data/dataconverter/" - f"readers/mpes/" - f"xarray_saved_small_calibration.h5", - tmp_path) + distutils.file_util.copy_file( + f"{os.path.dirname(__file__)}" + f"/../" + f"data/dataconverter/" + f"readers/mpes/" + f"xarray_saved_small_calibration.h5", + tmp_path, + ) template.clear() template["/ENTRY[my_entry]/NXODD_name/float_value"] = 2.0 @@ -142,24 +154,24 @@ def fixture_filled_test_data(template, tmp_path): template["/ENTRY[my_entry]/NXODD_name/bool_value"] = True template["/ENTRY[my_entry]/NXODD_name/int_value"] = 2 template["/ENTRY[my_entry]/NXODD_name/int_value/@units"] = "eV" - template["/ENTRY[my_entry]/NXODD_name/posint_value"] = np.array([1, 2, 3], - dtype=np.int8) + template["/ENTRY[my_entry]/NXODD_name/posint_value"] = np.array( + [1, 2, 3], dtype=np.int8 + ) template["/ENTRY[my_entry]/NXODD_name/posint_value/@units"] = "kg" template["/ENTRY[my_entry]/NXODD_name/char_value"] = "just chars" template["/ENTRY[my_entry]/definition"] = "NXtest" template["/ENTRY[my_entry]/definition/@version"] = "2.4.6" template["/ENTRY[my_entry]/program_name"] = "Testing program" template["/ENTRY[my_entry]/NXODD_name/type"] = "2nd type" - template["/ENTRY[my_entry]/NXODD_name/date_value"] = ("2022-01-22T12" - ":14:12.05018+00:00") + template["/ENTRY[my_entry]/NXODD_name/date_value"] = ( + "2022-01-22T12" ":14:12.05018+00:00" + ) template["/ENTRY[my_entry]/required_group/description"] = "An example description" template["/ENTRY[my_entry]/required_group2/description"] = "An example description" template["/ENTRY[my_entry]/does/not/exist"] = "random" - template["/ENTRY[my_entry]/links/ext_link"] = {"link": - f"{tmp_path}/" - f"xarray_saved_small_cali" - f"bration.h5:/axes/ax3" - } + template["/ENTRY[my_entry]/links/ext_link"] = { + "link": f"{tmp_path}/" f"xarray_saved_small_cali" f"bration.h5:/axes/ax3" + } yield template @@ -171,157 +183,214 @@ def fixture_filled_test_data(template, tmp_path): TEMPLATE["required"]["/ENTRY[my_entry]/NXODD_name/bool_value"] = True # pylint: disable=E1126 TEMPLATE["required"]["/ENTRY[my_entry]/NXODD_name/int_value"] = 2 # pylint: disable=E1126 TEMPLATE["required"]["/ENTRY[my_entry]/NXODD_name/int_value/@units"] = "eV" # pylint: disable=E1126 -TEMPLATE["required"]["/ENTRY[my_entry]/NXODD_name/posint_value"] = np.array([1, 2, 3], # pylint: disable=E1126 - dtype=np.int8) # pylint: disable=E1126 +TEMPLATE["required"]["/ENTRY[my_entry]/NXODD_name/posint_value"] = np.array( + [1, 2, 3], # pylint: disable=E1126 + dtype=np.int8, +) # pylint: disable=E1126 TEMPLATE["required"]["/ENTRY[my_entry]/NXODD_name/posint_value/@units"] = "kg" # pylint: disable=E1126 TEMPLATE["required"]["/ENTRY[my_entry]/NXODD_name/char_value"] = "just chars" # pylint: disable=E1126 TEMPLATE["required"]["/ENTRY[my_entry]/definition"] = "NXtest" # pylint: disable=E1126 TEMPLATE["required"]["/ENTRY[my_entry]/definition/@version"] = "2.4.6" # pylint: disable=E1126 TEMPLATE["required"]["/ENTRY[my_entry]/program_name"] = "Testing program" # pylint: disable=E1126 TEMPLATE["required"]["/ENTRY[my_entry]/NXODD_name/type"] = "2nd type" # pylint: disable=E1126 -TEMPLATE["required"]["/ENTRY[my_entry]/NXODD_name/date_value"] = "2022-01-22T12:14:12.05018+00:00" # pylint: disable=E1126 -TEMPLATE["optional"]["/ENTRY[my_entry]/required_group/description"] = "An example description" -TEMPLATE["optional"]["/ENTRY[my_entry]/required_group2/description"] = "An example description" -TEMPLATE["required"]["/ENTRY[my_entry]/optional_parent/req_group_in_opt_group/DATA[data]"] = 1 -TEMPLATE["lone_groups"] = ['/ENTRY[entry]/required_group', - '/ENTRY[entry]/required_group2', - '/ENTRY[entry]/optional_parent/req_group_in_opt_group'] +TEMPLATE["required"][ + "/ENTRY[my_entry]/NXODD_name/date_value" +] = "2022-01-22T12:14:12.05018+00:00" # pylint: disable=E1126 +TEMPLATE["optional"][ + "/ENTRY[my_entry]/required_group/description" +] = "An example description" +TEMPLATE["optional"][ + "/ENTRY[my_entry]/required_group2/description" +] = "An example description" +TEMPLATE["required"][ + "/ENTRY[my_entry]/optional_parent/req_group_in_opt_group/DATA[data]" +] = 1 +TEMPLATE["lone_groups"] = [ + "/ENTRY[entry]/required_group", + "/ENTRY[entry]/required_group2", + "/ENTRY[entry]/optional_parent/req_group_in_opt_group", +] TEMPLATE["optional"]["/@default"] = "Some NXroot attribute" -@pytest.mark.parametrize("data_dict,error_message", [ - pytest.param( - alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/int_value", "not_a_num"), - ("The value at /ENTRY[my_entry]/NXODD_name/in" - "t_value should be of Python type: (, , )," - " as defined in the NXDL as NX_INT."), - id="string-instead-of-int"), - pytest.param( - alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/bool_value", "NOT_TRUE_OR_FALSE"), - ("The value at /ENTRY[my_entry]/NXODD_name/bool_value sh" - "ould be of Python type: (, , ), as defined in the NXDL as NX_BOOLEAN."), - id="string-instead-of-int"), - pytest.param( - alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/int_value", {"link": "/a-link"}), - (""), - id="link-dict-instead-of-bool"), - pytest.param( - alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/posint_value", -1), - ("The value at /ENTRY[my_entry]/NXODD_name/posint_value " - "should be a positive int."), - id="negative-posint"), - pytest.param( - alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/char_value", 3), - ("The value at /ENTRY[my_entry]/NXODD_name/char_value should be of Python type:" - " (, , )," - " as defined in the NXDL as NX_CHAR."), - id="int-instead-of-chars"), - pytest.param( - alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/float_value", None), - "", - id="empty-optional-field"), - pytest.param( - set_to_none_in_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/bool_value", "required"), - ("The data entry corresponding to /ENTRY[entry]/NXODD_name/bool_value is" - " required and hasn't been supplied by the reader."), - id="empty-required-field"), - pytest.param( - alter_dict(TEMPLATE, - "/ENTRY[my_entry]/NXODD_name/date_value", - "2022-01-22T12:14:12.05018+00:00"), - "", - id="UTC-with-+00:00"), - pytest.param( - alter_dict(TEMPLATE, - "/ENTRY[my_entry]/NXODD_name/date_value", - "2022-01-22T12:14:12.05018Z"), - "", - id="UTC-with-Z"), - pytest.param( - alter_dict(TEMPLATE, - "/ENTRY[my_entry]/NXODD_name/date_value", - "2022-01-22T12:14:12.05018-00:00"), - "The date at /ENTRY[my_entry]/NXODD_name/date_value should be a timezone aware" - " ISO8601 formatted str. For example, 2022-01-22T12:14:12.05018Z or 2022-01-22" - "T12:14:12.05018+00:00.", - id="UTC-with--00:00"), - pytest.param( - listify_template(TEMPLATE), - "", - id="lists"), - pytest.param( - alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/type", "Wrong option"), - ("The value at /ENTRY[my_entry]/NXODD_name/type should be on of the following" - " strings: [1st type,2nd type,3rd type,4th type]"), - id="wrong-enum-choice"), - pytest.param( - set_to_none_in_dict(TEMPLATE, - "/ENTRY[my_entry]/optional_parent/required_child", - "optional"), - ("The data entry, /ENTRY[my_entry]/optional_parent/optional_child, has an " - "optional parent, /ENTRY[entry]/optional_parent, with required children set" - ". Either provide no children for /ENTRY[entry]/optional_parent or provide " - "all required ones."), - id="atleast-one-required-child-not-provided-optional-parent"), - pytest.param( - alter_dict(alter_dict(TEMPLATE, - "/ENTRY[my_entry]/optional_parent/required_child", - None), - "/ENTRY[my_entry]/optional_parent/optional_child", - None), - (""), - id="no-child-provided-optional-parent"), - pytest.param( - TEMPLATE, - "", - id="valid-data-dict"), - pytest.param( - remove_from_dict(TEMPLATE, "/ENTRY[my_entry]/required_group/description"), - "The required group, /ENTRY[entry]/required_group, hasn't been supplied.", - id="missing-empty-yet-required-group"), - pytest.param( - remove_from_dict(TEMPLATE, "/ENTRY[my_entry]/required_group2/description"), - "The required group, /ENTRY[entry]/required_group2, hasn't been supplied.", - id="missing-empty-yet-required-group2"), - pytest.param( - alter_dict( +@pytest.mark.parametrize( + "data_dict,error_message", + [ + pytest.param( + alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/int_value", "not_a_num"), + ( + "The value at /ENTRY[my_entry]/NXODD_name/in" + "t_value should be of Python type: (, , )," + " as defined in the NXDL as NX_INT." + ), + id="string-instead-of-int", + ), + pytest.param( + alter_dict( + TEMPLATE, "/ENTRY[my_entry]/NXODD_name/bool_value", "NOT_TRUE_OR_FALSE" + ), + ( + "The value at /ENTRY[my_entry]/NXODD_name/bool_value sh" + "ould be of Python type: (, , ), as defined in the NXDL as NX_BOOLEAN." + ), + id="string-instead-of-int", + ), + pytest.param( + alter_dict( + TEMPLATE, "/ENTRY[my_entry]/NXODD_name/int_value", {"link": "/a-link"} + ), + (""), + id="link-dict-instead-of-bool", + ), + pytest.param( + alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/posint_value", -1), + ( + "The value at /ENTRY[my_entry]/NXODD_name/posint_value " + "should be a positive int." + ), + id="negative-posint", + ), + pytest.param( + alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/char_value", 3), + ( + "The value at /ENTRY[my_entry]/NXODD_name/char_value should be of Python type:" + " (, , )," + " as defined in the NXDL as NX_CHAR." + ), + id="int-instead-of-chars", + ), + pytest.param( + alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/float_value", None), + "", + id="empty-optional-field", + ), + pytest.param( + set_to_none_in_dict( + TEMPLATE, "/ENTRY[my_entry]/NXODD_name/bool_value", "required" + ), + ( + "The data entry corresponding to /ENTRY[entry]/NXODD_name/bool_value is" + " required and hasn't been supplied by the reader." + ), + id="empty-required-field", + ), + pytest.param( + alter_dict( + TEMPLATE, + "/ENTRY[my_entry]/NXODD_name/date_value", + "2022-01-22T12:14:12.05018+00:00", + ), + "", + id="UTC-with-+00:00", + ), + pytest.param( + alter_dict( + TEMPLATE, + "/ENTRY[my_entry]/NXODD_name/date_value", + "2022-01-22T12:14:12.05018Z", + ), + "", + id="UTC-with-Z", + ), + pytest.param( + alter_dict( + TEMPLATE, + "/ENTRY[my_entry]/NXODD_name/date_value", + "2022-01-22T12:14:12.05018-00:00", + ), + "The date at /ENTRY[my_entry]/NXODD_name/date_value should be a timezone aware" + " ISO8601 formatted str. For example, 2022-01-22T12:14:12.05018Z or 2022-01-22" + "T12:14:12.05018+00:00.", + id="UTC-with--00:00", + ), + pytest.param(listify_template(TEMPLATE), "", id="lists"), + pytest.param( + alter_dict(TEMPLATE, "/ENTRY[my_entry]/NXODD_name/type", "Wrong option"), + ( + "The value at /ENTRY[my_entry]/NXODD_name/type should be on of the following" + " strings: [1st type,2nd type,3rd type,4th type]" + ), + id="wrong-enum-choice", + ), + pytest.param( + set_to_none_in_dict( + TEMPLATE, "/ENTRY[my_entry]/optional_parent/required_child", "optional" + ), + ( + "The data entry, /ENTRY[my_entry]/optional_parent/optional_child, has an " + "optional parent, /ENTRY[entry]/optional_parent, with required children set" + ". Either provide no children for /ENTRY[entry]/optional_parent or provide " + "all required ones." + ), + id="atleast-one-required-child-not-provided-optional-parent", + ), + pytest.param( + alter_dict( + alter_dict( + TEMPLATE, "/ENTRY[my_entry]/optional_parent/required_child", None + ), + "/ENTRY[my_entry]/optional_parent/optional_child", + None, + ), + (""), + id="no-child-provided-optional-parent", + ), + pytest.param(TEMPLATE, "", id="valid-data-dict"), + pytest.param( remove_from_dict(TEMPLATE, "/ENTRY[my_entry]/required_group/description"), - "/ENTRY[my_entry]/required_group", - {} + "The required group, /ENTRY[entry]/required_group, hasn't been supplied.", + id="missing-empty-yet-required-group", + ), + pytest.param( + remove_from_dict(TEMPLATE, "/ENTRY[my_entry]/required_group2/description"), + "The required group, /ENTRY[entry]/required_group2, hasn't been supplied.", + id="missing-empty-yet-required-group2", ), - (""), - id="allow-required-and-empty-group" - ), - pytest.param( - remove_from_dict(TEMPLATE, - "/ENTRY[my_entry]/optional_parent/req_group_in_opt_group/DATA[data]", - "required" - ), - ("The required group, /ENTRY[entry]/optional_parent/req_group_in_opt_group, hasn't been " - "supplied while its optional parent, /ENTRY[entry]/optional_parent/" - "req_group_in_opt_group, is supplied."), - id="req-group-in-opt-parent-removed" - ), - pytest.param( - remove_optional_parent(TEMPLATE), - (""), - id="opt-group-completely-removed" - ), -]) + pytest.param( + alter_dict( + remove_from_dict( + TEMPLATE, "/ENTRY[my_entry]/required_group/description" + ), + "/ENTRY[my_entry]/required_group", + {}, + ), + (""), + id="allow-required-and-empty-group", + ), + pytest.param( + remove_from_dict( + TEMPLATE, + "/ENTRY[my_entry]/optional_parent/req_group_in_opt_group/DATA[data]", + "required", + ), + ( + "The required group, /ENTRY[entry]/optional_parent/req_group_in_opt_group, hasn't been " + "supplied while its optional parent, /ENTRY[entry]/optional_parent/" + "req_group_in_opt_group, is supplied." + ), + id="req-group-in-opt-parent-removed", + ), + pytest.param( + remove_optional_parent(TEMPLATE), (""), id="opt-group-completely-removed" + ), + ], +) def test_validate_data_dict(data_dict, error_message, template, nxdl_root, request): """Unit test for the data validation routine""" - if request.node.callspec.id in ("valid-data-dict", - "lists", - "empty-optional-field", - "UTC-with-+00:00", - "UTC-with-Z", - "no-child-provided-optional-parent", - "int-instead-of-chars", - "link-dict-instead-of-bool", - "allow-required-and-empty-group", - "opt-group-completely-removed"): + if request.node.callspec.id in ( + "valid-data-dict", + "lists", + "empty-optional-field", + "UTC-with-+00:00", + "UTC-with-Z", + "no-child-provided-optional-parent", + "int-instead-of-chars", + "link-dict-instead-of-bool", + "allow-required-and-empty-group", + "opt-group-completely-removed", + ): helpers.validate_data_dict(template, data_dict, nxdl_root) else: with pytest.raises(Exception) as execinfo: @@ -329,16 +398,19 @@ def test_validate_data_dict(data_dict, error_message, template, nxdl_root, reque assert (error_message) == str(execinfo.value) -@pytest.mark.parametrize("nxdl_path,expected", [ - pytest.param( - "/ENTRY/definition/@version", - (True, "/ENTRY[entry]/definition/@version"), - id="path-exists-in-dict"), - pytest.param( - "/RANDOM/does/not/@exist", - (False, None), - id="path-does-not-exist-in-dict") -]) +@pytest.mark.parametrize( + "nxdl_path,expected", + [ + pytest.param( + "/ENTRY/definition/@version", + (True, "/ENTRY[entry]/definition/@version"), + id="path-exists-in-dict", + ), + pytest.param( + "/RANDOM/does/not/@exist", (False, None), id="path-does-not-exist-in-dict" + ), + ], +) def test_path_in_data_dict(nxdl_path, expected, template): """Unit test for helper function to check if an NXDL path exists in the reader dictionary.""" assert helpers.path_in_data_dict(nxdl_path, template) == expected @@ -346,11 +418,11 @@ def test_path_in_data_dict(nxdl_path, expected, template): def test_atom_type_extractor_and_hill_conversion(): """ - Test atom type extractor and conversion to hill + Test atom type extractor and conversion to hill """ test_chemical_formula = "(C38H54S4)n(NaO2)5(CH4)NH3B" - expected_atom_types = ['C', 'H', 'B', 'N', 'Na', 'O', 'S'] + expected_atom_types = ["C", "H", "B", "N", "Na", "O", "S"] atom_list = helpers.extract_atom_types(test_chemical_formula) diff --git a/tests/dataconverter/test_readers.py b/tests/dataconverter/test_readers.py index d75344541..00cdcf603 100644 --- a/tests/dataconverter/test_readers.py +++ b/tests/dataconverter/test_readers.py @@ -26,10 +26,11 @@ from _pytest.mark.structures import ParameterSet from pynxtools.dataconverter.readers.base.reader import BaseReader -from pynxtools.dataconverter.convert import \ - get_names_of_all_readers, get_reader -from pynxtools.dataconverter.helpers import \ - validate_data_dict, generate_template_from_nxdl +from pynxtools.dataconverter.convert import get_names_of_all_readers, get_reader +from pynxtools.dataconverter.helpers import ( + validate_data_dict, + generate_template_from_nxdl, +) from pynxtools.dataconverter.template import Template @@ -53,10 +54,17 @@ def get_all_readers() -> List[ParameterSet]: # Explicitly removing ApmReader and EmNionReader because we need to add test data for reader in [get_reader(x) for x in get_names_of_all_readers()]: - if reader.__name__ in ("ApmReader", "EmOmReader", "EmSpctrscpyReader", "EmNionReader"): - readers.append(pytest.param(reader, - marks=pytest.mark.skip(reason="Missing test data.") - )) + if reader.__name__ in ( + "ApmReader", + "EmOmReader", + "EmSpctrscpyReader", + "EmNionReader", + ): + readers.append( + pytest.param( + reader, marks=pytest.mark.skip(reason="Missing test data.") + ) + ) else: readers.append(pytest.param(reader)) @@ -98,15 +106,15 @@ def test_has_correct_read_func(reader): template = Template() generate_template_from_nxdl(root, template) - read_data = reader().read(template=Template(template), file_paths=tuple(input_files)) + read_data = reader().read( + template=Template(template), file_paths=tuple(input_files) + ) assert isinstance(read_data, Template) assert validate_data_dict(template, read_data, root) -@pytest.mark.parametrize("reader_name,nxdl,undocumented_keys", [ - ('mpes', 'NXmpes', []) -]) +@pytest.mark.parametrize("reader_name,nxdl,undocumented_keys", [("mpes", "NXmpes", [])]) def test_shows_correct_warnings(reader_name, nxdl, undocumented_keys): """ Checks whether the read function generates the correct warnings. @@ -117,9 +125,7 @@ def test_shows_correct_warnings(reader_name, nxdl, undocumented_keys): input_files = sorted( glob.glob(os.path.join(dataconverter_data_dir, "readers", reader_name, "*")) ) - nxdl_file = os.path.join( - def_dir, "contributed_definitions", f"{nxdl}.nxdl.xml" - ) + nxdl_file = os.path.join(def_dir, "contributed_definitions", f"{nxdl}.nxdl.xml") root = ET.parse(nxdl_file).getroot() template = Template() diff --git a/tests/dataconverter/test_writer.py b/tests/dataconverter/test_writer.py index 7ca160775..55b3cb43b 100644 --- a/tests/dataconverter/test_writer.py +++ b/tests/dataconverter/test_writer.py @@ -35,7 +35,7 @@ def fixture_writer(filled_test_data, tmp_path): writer = Writer( filled_test_data, os.path.join("tests", "data", "dataconverter", "NXtest.nxdl.xml"), - os.path.join(tmp_path, "test.nxs") + os.path.join(tmp_path, "test.nxs"), ) yield writer del writer @@ -58,8 +58,8 @@ def test_write(writer): def test_write_link(writer): """Test for the Writer's write function. -Checks whether entries given above get written out when a dictionary containing a link is -given in the template dictionary.""" + Checks whether entries given above get written out when a dictionary containing a link is + given in the template dictionary.""" writer.write() test_nxs = h5py.File(writer.output_path, "r") assert isinstance(test_nxs["/my_entry/links/ext_link"], h5py.Dataset) @@ -69,15 +69,19 @@ def test_write_link(writer): def test_wrong_dict_provided_in_template(filled_test_data, tmp_path): """Tests if the writer correctly fails when a wrong dictionary is provided""" writer = Writer( - alter_dict(filled_test_data, - "/ENTRY[my_entry]/links/ext_link", - {"not a link or anything": 2.0}), + alter_dict( + filled_test_data, + "/ENTRY[my_entry]/links/ext_link", + {"not a link or anything": 2.0}, + ), os.path.join("tests", "data", "dataconverter", "NXtest.nxdl.xml"), - os.path.join(tmp_path, "test.nxs") + os.path.join(tmp_path, "test.nxs"), ) with pytest.raises(InvalidDictProvided) as execinfo: writer.write() - assert str(execinfo.value) == ("pynxtools.dataconverter.exceptions.InvalidDictProvided: " - "A dictionary was provided to the template but it didn't " - "fall into any of the know cases of handling dictionaries" - ". This occured for: ext_link") + assert str(execinfo.value) == ( + "pynxtools.dataconverter.exceptions.InvalidDictProvided: " + "A dictionary was provided to the template but it didn't " + "fall into any of the know cases of handling dictionaries" + ". This occured for: ext_link" + ) diff --git a/tests/eln_mapper/test_eln_mapper.py b/tests/eln_mapper/test_eln_mapper.py index 17f9130dd..4d569a920 100644 --- a/tests/eln_mapper/test_eln_mapper.py +++ b/tests/eln_mapper/test_eln_mapper.py @@ -54,24 +54,28 @@ def test_reader_eln(tmp_path): """ local_dir = os.path.abspath(os.path.dirname(__file__)) - ref_file = os.path.join(local_dir, '../data/eln_mapper/eln.yaml') + ref_file = os.path.join(local_dir, "../data/eln_mapper/eln.yaml") - test_file = os.path.join(tmp_path, 'eln.yaml') + test_file = os.path.join(tmp_path, "eln.yaml") cli_run = testing.CliRunner() - cli_run.invoke(eln_mapper.get_eln, [ - "--nxdl", - "NXmpes", - "--skip-top-levels", - 1, - "--output-file", - test_file, - "--eln-type", - 'eln']) - - with open(ref_file, encoding='utf-8', mode='r') as ref_f: + cli_run.invoke( + eln_mapper.get_eln, + [ + "--nxdl", + "NXmpes", + "--skip-top-levels", + 1, + "--output-file", + test_file, + "--eln-type", + "eln", + ], + ) + + with open(ref_file, encoding="utf-8", mode="r") as ref_f: ref_dict = yaml.safe_load(ref_f) - with open(test_file, encoding='utf-8', mode='r') as test_f: + with open(test_file, encoding="utf-8", mode="r") as test_f: test_dict = yaml.safe_load(test_f) check_keys_from_two_dict(ref_dict, test_dict) @@ -87,21 +91,18 @@ def test_scheme_eln(tmp_path): """ local_dir = os.path.abspath(os.path.dirname(__file__)) - ref_file = os.path.join(local_dir, '../data/eln_mapper/mpes.scheme.archive.yaml') + ref_file = os.path.join(local_dir, "../data/eln_mapper/mpes.scheme.archive.yaml") - test_file = os.path.join(tmp_path, '.scheme.archive.yaml') + test_file = os.path.join(tmp_path, ".scheme.archive.yaml") cli_run = testing.CliRunner() - cli_run.invoke(eln_mapper.get_eln, [ - "--nxdl", - "NXmpes", - "--output-file", - test_file, - "--eln-type", - 'scheme_eln']) - with open(ref_file, encoding='utf-8', mode='r') as ref_f: + cli_run.invoke( + eln_mapper.get_eln, + ["--nxdl", "NXmpes", "--output-file", test_file, "--eln-type", "scheme_eln"], + ) + with open(ref_file, encoding="utf-8", mode="r") as ref_f: ref_dict = yaml.safe_load(ref_f) - with open(test_file, encoding='utf-8', mode='r') as test_f: + with open(test_file, encoding="utf-8", mode="r") as test_f: test_dict = yaml.safe_load(test_f) check_keys_from_two_dict(ref_dict, test_dict) diff --git a/tests/nexus/test_nexus.py b/tests/nexus/test_nexus.py index d69b0fae2..efdb3dfa6 100644 --- a/tests/nexus/test_nexus.py +++ b/tests/nexus/test_nexus.py @@ -28,24 +28,23 @@ def test_get_nexus_classes_units_attributes(): """Check the correct parsing of a separate list for: -Nexus classes (base_classes) -Nexus units (memberTypes) -Nexus attribute type (primitiveTypes) -the tested functions can be found in nexus.py file -""" + Nexus classes (base_classes) + Nexus units (memberTypes) + Nexus attribute type (primitiveTypes) + the tested functions can be found in nexus.py file""" # Test 1 nexus_classes_list = nexus.get_nx_classes() - assert 'NXbeam' in nexus_classes_list + assert "NXbeam" in nexus_classes_list # Test 2 nexus_units_list = nexus.get_nx_units() - assert 'NX_TEMPERATURE' in nexus_units_list + assert "NX_TEMPERATURE" in nexus_units_list # Test 3 nexus_attribute_list = nexus.get_nx_attribute_type() - assert 'NX_FLOAT' in nexus_attribute_list + assert "NX_FLOAT" in nexus_attribute_list def test_nexus(tmp_path): @@ -53,24 +52,25 @@ def test_nexus(tmp_path): The nexus test function """ local_dir = os.path.abspath(os.path.dirname(__file__)) - example_data = os.path.join(local_dir, '../data/nexus/201805_WSe2_arpes.nxs') + example_data = os.path.join(local_dir, "../data/nexus/201805_WSe2_arpes.nxs") logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) - handler = logging.\ - FileHandler(os.path.join(tmp_path, 'nexus_test.log'), 'w') + handler = logging.FileHandler(os.path.join(tmp_path, "nexus_test.log"), "w") handler.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(levelname)s - %(message)s') + formatter = logging.Formatter("%(levelname)s - %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) nexus_helper = nexus.HandleNexus(logger, example_data, None, None) nexus_helper.process_nexus_master_file(None) - with open(os.path.join(tmp_path, 'nexus_test.log'), 'r', encoding='utf-8') as logfile: + with open( + os.path.join(tmp_path, "nexus_test.log"), "r", encoding="utf-8" + ) as logfile: log = logfile.readlines() with open( - os.path.join(local_dir, '../data/nexus/Ref_nexus_test.log'), - 'r', - encoding='utf-8' + os.path.join(local_dir, "../data/nexus/Ref_nexus_test.log"), + "r", + encoding="utf-8", ) as reffile: ref = reffile.readlines() assert log == ref @@ -94,64 +94,69 @@ def test_get_node_at_nxdl_path(): assert node.attrib["type"] == "NX_FLOAT" assert node.attrib["name"] == "float_value" - node = nexus.get_node_at_nxdl_path("/ENTRY/NXODD_name/AXISNAME/long_name", elem=elem) + node = nexus.get_node_at_nxdl_path( + "/ENTRY/NXODD_name/AXISNAME/long_name", elem=elem + ) assert node.attrib["name"] == "long_name" - nxdl_file_path = os.path.join( - local_dir, - "../data/nexus/NXtest2.nxdl.xml" - ) + nxdl_file_path = os.path.join(local_dir, "../data/nexus/NXtest2.nxdl.xml") elem = ET.parse(nxdl_file_path).getroot() node = nexus.get_node_at_nxdl_path( - "/ENTRY/measurement/EVENT_DATA_EM/USER/affiliation", - elem=elem) + "/ENTRY/measurement/EVENT_DATA_EM/USER/affiliation", elem=elem + ) assert node.attrib["name"] == "affiliation" node = nexus.get_node_at_nxdl_path("/ENTRY/measurement", elem=elem) assert node.attrib["type"] == "NXevent_data_em_set" node = nexus.get_node_at_nxdl_path( - "/ENTRY/measurement/EVENT_DATA_EM/SPECTRUM_SET/summary", elem=elem) + "/ENTRY/measurement/EVENT_DATA_EM/SPECTRUM_SET/summary", elem=elem + ) assert node.attrib["type"] == "NXdata" node = nexus.get_node_at_nxdl_path( - "/ENTRY/measurement/EVENT_DATA_EM/SPECTRUM_SET/summary/DATA", elem=elem) + "/ENTRY/measurement/EVENT_DATA_EM/SPECTRUM_SET/summary/DATA", elem=elem + ) assert node.attrib["type"] == "NX_NUMBER" node = nexus.get_node_at_nxdl_path( "/ENTRY/measurement/EVENT_DATA_EM/SPECTRUM_SET/summary/AXISNAME_indices", - elem=elem) + elem=elem, + ) assert node.attrib["name"] == "AXISNAME_indices" node = nexus.get_node_at_nxdl_path("/ENTRY/COORDINATE_SYSTEM_SET", elem=elem) assert node.attrib["type"] == "NXcoordinate_system_set" node = nexus.get_node_at_nxdl_path( - "/ENTRY/COORDINATE_SYSTEM_SET/TRANSFORMATIONS", elem=elem) + "/ENTRY/COORDINATE_SYSTEM_SET/TRANSFORMATIONS", elem=elem + ) assert node.attrib["type"] == "NXtransformations" node = nexus.get_node_at_nxdl_path( - "/ENTRY/COORDINATE_SYSTEM_SET/TRANSFORMATIONS/AXISNAME", elem=elem) + "/ENTRY/COORDINATE_SYSTEM_SET/TRANSFORMATIONS/AXISNAME", elem=elem + ) assert node.attrib["type"] == "NX_NUMBER" node = nexus.get_node_at_nxdl_path( "/ENTRY/COORDINATE_SYSTEM_SET/TRANSFORMATIONS/AXISNAME/transformation_type", - elem=elem) + elem=elem, + ) assert node.attrib["name"] == "transformation_type" nxdl_file_path = os.path.join( local_dir, - "../../pynxtools/definitions/contributed_definitions/NXiv_temp.nxdl.xml" + "../../pynxtools/definitions/contributed_definitions/NXiv_temp.nxdl.xml", ) elem = ET.parse(nxdl_file_path).getroot() node = nexus.get_node_at_nxdl_path( - "/ENTRY/INSTRUMENT/ENVIRONMENT/voltage_controller", - elem=elem) + "/ENTRY/INSTRUMENT/ENVIRONMENT/voltage_controller", elem=elem + ) assert node.attrib["name"] == "voltage_controller" node = nexus.get_node_at_nxdl_path( - "/ENTRY/INSTRUMENT/ENVIRONMENT/voltage_controller/calibration_time", - elem=elem) + "/ENTRY/INSTRUMENT/ENVIRONMENT/voltage_controller/calibration_time", elem=elem + ) assert node.attrib["name"] == "calibration_time" @@ -160,22 +165,23 @@ def test_get_inherited_nodes(): local_dir = os.path.abspath(os.path.dirname(__file__)) nxdl_file_path = os.path.join( local_dir, - "../../pynxtools/definitions/contributed_definitions/NXiv_temp.nxdl.xml" + "../../pynxtools/definitions/contributed_definitions/NXiv_temp.nxdl.xml", ) elem = ET.parse(nxdl_file_path).getroot() (_, _, elist) = nexus.get_inherited_nodes( - nxdl_path="/ENTRY/INSTRUMENT/ENVIRONMENT", - elem=elem) + nxdl_path="/ENTRY/INSTRUMENT/ENVIRONMENT", elem=elem + ) assert len(elist) == 3 (_, _, elist) = nexus.get_inherited_nodes( - nxdl_path="/ENTRY/INSTRUMENT/ENVIRONMENT/voltage_controller", - elem=elem) + nxdl_path="/ENTRY/INSTRUMENT/ENVIRONMENT/voltage_controller", elem=elem + ) assert len(elist) == 4 (_, _, elist) = nexus.get_inherited_nodes( nxdl_path="/ENTRY/INSTRUMENT/ENVIRONMENT/voltage_controller", - nx_name="NXiv_temp") + nx_name="NXiv_temp", + ) assert len(elist) == 4 @@ -185,54 +191,54 @@ def test_c_option(tmp_path): """ local_path = os.path.dirname(__file__) - path_to_ref_files = os.path.join(local_path, '../data/nexus/') - ref_file = path_to_ref_files + 'Ref1_c_option_test.log' - tmp_file = os.path.join(tmp_path, 'c_option_1_test.log') + path_to_ref_files = os.path.join(local_path, "../data/nexus/") + ref_file = path_to_ref_files + "Ref1_c_option_test.log" + tmp_file = os.path.join(tmp_path, "c_option_1_test.log") logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) - handler = logging.FileHandler(tmp_file, 'w') + handler = logging.FileHandler(tmp_file, "w") - with open(ref_file, encoding='utf-8', mode='r') as ref_f: + with open(ref_file, encoding="utf-8", mode="r") as ref_f: ref = ref_f.readlines() - handler = logging.FileHandler(tmp_file, 'w') + handler = logging.FileHandler(tmp_file, "w") handler.setLevel(logging.INFO) - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) - nexus_helper = nexus.HandleNexus(logger, None, None, '/NXbeam') + nexus_helper = nexus.HandleNexus(logger, None, None, "/NXbeam") nexus_helper.process_nexus_master_file(None) - with open(tmp_file, encoding='utf-8', mode='r') as tmp_f: + with open(tmp_file, encoding="utf-8", mode="r") as tmp_f: tmp = tmp_f.readlines() assert tmp == ref logger.removeHandler(handler) - handler = logging.FileHandler(tmp_file, 'w') + handler = logging.FileHandler(tmp_file, "w") handler.setLevel(logging.INFO) - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) - nexus_helper = nexus.HandleNexus(logger, None, None, '/NXdetector/data') + nexus_helper = nexus.HandleNexus(logger, None, None, "/NXdetector/data") nexus_helper.process_nexus_master_file(None) - with open(tmp_file, encoding='utf-8', mode='r') as tmp_f: + with open(tmp_file, encoding="utf-8", mode="r") as tmp_f: tmp = tmp_f.readlines() - assert tmp[0] == 'INFO: entry/instrument/analyser/data\n' + assert tmp[0] == "INFO: entry/instrument/analyser/data\n" logger.removeHandler(handler) - handler = logging.FileHandler(tmp_file, 'w') + handler = logging.FileHandler(tmp_file, "w") handler.setLevel(logging.INFO) - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) - nexus_helper = nexus.HandleNexus(logger, None, None, '/NXdata@signal') + nexus_helper = nexus.HandleNexus(logger, None, None, "/NXdata@signal") nexus_helper.process_nexus_master_file(None) - with open(tmp_file, encoding='utf-8', mode='r') as tmp_f: + with open(tmp_file, encoding="utf-8", mode="r") as tmp_f: tmp = tmp_f.readlines() - assert tmp[0] == 'INFO: entry/data@signal\n' + assert tmp[0] == "INFO: entry/data@signal\n" def test_d_option(tmp_path): @@ -240,21 +246,26 @@ def test_d_option(tmp_path): To check -d option for default NXarpes test data file. """ - tmp_file = os.path.join(tmp_path, 'd_option_1_test.log') + tmp_file = os.path.join(tmp_path, "d_option_1_test.log") logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) - handler = logging.FileHandler(tmp_file, 'w') + handler = logging.FileHandler(tmp_file, "w") - handler = logging.FileHandler(tmp_file, 'w') + handler = logging.FileHandler(tmp_file, "w") handler.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) - nexus_helper = nexus.HandleNexus(logger, None, '/entry/instrument/analyser/data', None) + nexus_helper = nexus.HandleNexus( + logger, None, "/entry/instrument/analyser/data", None + ) nexus_helper.process_nexus_master_file(None) - with open(tmp_file, encoding='utf-8', mode='r') as tmp_f: + with open(tmp_file, encoding="utf-8", mode="r") as tmp_f: tmp = tmp_f.readlines() - assert tmp[0] == 'DEBUG: ===== FIELD (//entry/instrument/analyser/data): ' + \ - '\n' + assert ( + tmp[0] + == "DEBUG: ===== FIELD (//entry/instrument/analyser/data): " + + '\n' + )