diff --git a/imgs.batch.bash b/imgs.batch.sh old mode 100644 new mode 100755 similarity index 100% rename from imgs.batch.bash rename to imgs.batch.sh diff --git a/imgs.dev.ipynb b/imgs.dev.ipynb new file mode 100755 index 000000000..deee2d480 --- /dev/null +++ b/imgs.dev.ipynb @@ -0,0 +1,236 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "3889c47f-11c4-4bf3-97de-04fc52f0798d", + "metadata": {}, + "outputs": [], + "source": [ + "fnm = \"/home/kaiobach/Research/paper_paper_paper/scidat_nomad_ebsd/bb_analysis/data/production_imgs/ALN_baoh_021.tif\"\n", + "fnm = \"/home/kaiobach/Research/paper_paper_paper/scidat_nomad_ebsd/bb_analysis/data/production_imgs/FeMoOx_AntiA_04_1k5x_CN.tif\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6da1aea0-545b-446b-a3d1-1574af72f6c6", + "metadata": {}, + "outputs": [], + "source": [ + "from PIL import Image\n", + "from PIL.TiffTags import TAGS\n", + "# print(TAGS)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1963afb6-6e48-4628-a0e8-d2da0874701e", + "metadata": {}, + "outputs": [], + "source": [ + "with Image.open(fnm, mode=\"r\") as fp:\n", + " for key in fp.tag_v2:\n", + " if key in [34118, 34119]:\n", + " print(type(fp.tag[key]))\n", + " print(len(fp.tag[key])) \n", + " # print(f\"{key}, {fp.tag[key]}\")\n", + " if key not in TAGS.keys():\n", + " print(f\"--->tag {key}, is not in PIL.TiffTAGS !\")\n", + " # self.tags = {TAGS[key] : fp.tag[key] for key in fp.tag_v2}\n", + " # for key, val in self.tags.items():\n", + " # print(f\"{key}, {val}\")\n", + " nparr = np.array(fp)\n", + " print(f\"{type(nparr)}\")\n", + " print(f\"{nparr.dtype}\")\n", + " print(f\"{np.shape(nparr)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a9ef2a35-a260-4a54-9b83-eae1d588966f", + "metadata": {}, + "outputs": [], + "source": [ + "with Image.open(fnm, mode=\"r\") as fp:\n", + " czi_keys = [34118, 34119]\n", + " for czi_key in czi_keys:\n", + " if czi_key in fp.tag_v2:\n", + " utf = fp.tag[czi_key]\n", + " print(type(utf))\n", + " if len(utf) == 1:\n", + " print(utf[0])\n", + " exit(1)\n", + " tfs_keys = [34682]\n", + " for tfs_key in tfs_keys:\n", + " if tfs_key in fp.tag_v2:\n", + " utf = fp.tag[tfs_key]\n", + " print(type(utf))\n", + " if len(utf) == 1:\n", + " print(utf[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8ada062-e308-4288-8f00-b3e620f3c890", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "# https://www.geeksforgeeks.org/python-program-to-sort-a-list-of-tuples-by-second-item/\n", + "def sort_tuple(tup):\n", + " # convert the list of tuples to a numpy array with data type (object, int)\n", + " arr = np.array(tup, dtype=[('col1', object), ('col2', int)])\n", + " # get the indices that would sort the array based on the second column\n", + " indices = np.argsort(arr['col2'])\n", + " # use the resulting indices to sort the array\n", + " sorted_arr = arr[indices]\n", + " # convert the sorted numpy array back to a list of tuples\n", + " sorted_tup = [(row['col1'], row['col2']) for row in sorted_arr]\n", + " return sorted_tup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d27df293-626c-4d37-80df-96c182d4f401", + "metadata": {}, + "outputs": [], + "source": [ + "def if_str_represents_float(s):\n", + " try:\n", + " float(s)\n", + " return str(float(s)) == s\n", + " except ValueError:\n", + " return False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1a2f0864-f8b3-4d53-bf9d-08a5787c32fb", + "metadata": {}, + "outputs": [], + "source": [ + "# TFS sections based on IKZ ALN_baoh_021.tif example\n", + "import mmap\n", + "\n", + "tfs_section_names = [\"[User]\",\n", + " \"[System]\",\n", + " \"[Beam]\",\n", + " \"[EBeam]\", \n", + " \"[GIS]\",\n", + " \"[Scan]\",\n", + " \"[EScan]\",\n", + " \"[Stage]\",\n", + " \"[Image]\",\n", + " \"[Vacuum]\",\n", + " \"[Specimen]\",\n", + " \"[Detectors]\",\n", + " \"[T2]\",\n", + " \"[Accessories]\",\n", + " \"[EBeamDeceleration]\",\n", + " \"[CompoundLensFilter]\",\n", + " \"[PrivateFei]\",\n", + " \"[HiResIllumination]\",\n", + " \"[EasyLift]\",\n", + " \"[HotStageMEMS]\",\n", + " \"[HotStage]\",\n", + " \"[HotStageHVHS]\",\n", + " \"[ColdStage]\"]\n", + "\n", + "tfs_section_details = {\"[System]\": [\"Type\", \"Dnumber\", \"Software\", \"BuildNr\", \"Source\", \"Column\", \"FinalLens\", \"Chamber\", \"Stage\", \"Pump\",\n", + " \"ESEM\", \"Aperture\", \"Scan\", \"Acq\", \"EucWD\", \"SystemType\", \"DisplayWidth\", \"DisplayHeight\"]}\n", + "tfs_section_offsets = {}\n", + "\n", + "with open(fnm, 'rb', 0) as file:\n", + " s = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)\n", + " for section_name in tfs_section_names:\n", + " pos = s.find(bytes(section_name, \"utf8\")) # != -1\n", + " tfs_section_offsets[section_name] = pos\n", + " print(tfs_section_offsets)\n", + "\n", + " # define search offsets\n", + " tpl = []\n", + " for key, value in tfs_section_offsets.items():\n", + " tpl.append((key, value))\n", + " # print(tpl)\n", + " tpl = sort_tuple(tpl)\n", + " print(tpl)\n", + " # if section_name == \"[System]\":\n", + " pos_s = None\n", + " pos_e = None\n", + " for idx in np.arange(0, len(tpl)):\n", + " if tpl[idx][0] != \"[System]\":\n", + " continue\n", + " else:\n", + " pos_s = tpl[idx][1]\n", + " if idx <= len(tpl) - 1:\n", + " pos_e = tpl[idx + 1][1]\n", + " break\n", + " print(f\"Search in between byte offsets {pos_s} and {pos_e}\")\n", + " # fish metadata of e.g. the system section\n", + " section_metadata = {}\n", + " for term in tfs_section_details[\"[System]\"]:\n", + " \n", + " s.seek(pos_s, 0)\n", + " pos = s.find(bytes(term, \"utf8\"))\n", + " if pos < pos_e: # check if pos_e is None\n", + " s.seek(pos, 0)\n", + " section_metadata[f\"{term}\"] = f\"{s.readline().strip().decode('utf8').replace(f'{term}=', '')}\"\n", + " if if_str_represents_float(section_metadata[f\"{term}\"]) is True:\n", + " section_metadata[f\"{term}\"] = np.float64(section_metadata[f\"{term}\"])\n", + " elif section_metadata[f\"{term}\"].isdigit() is True:\n", + " section_metadata[f\"{term}\"] = np.int64(section_metadata[f\"{term}\"])\n", + " else:\n", + " pass\n", + " # print(f\"{term}, {pos}, {pos + len(term) + 1}\")\n", + " # tfs_section_offswr\n", + " # file.seek(pos, 0) #\n", + " print(section_metadata)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f3eb287-8f55-424c-a016-a07fc59f068a", + "metadata": {}, + "outputs": [], + "source": [ + "'2'.isdigit()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1341e30-fcce-4a3d-a099-d342b8bbe318", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pynxtools/dataconverter/readers/em/reader.py b/pynxtools/dataconverter/readers/em/reader.py index a8aff8d4b..ae21ad233 100644 --- a/pynxtools/dataconverter/readers/em/reader.py +++ b/pynxtools/dataconverter/readers/em/reader.py @@ -22,15 +22,11 @@ from typing import Tuple, Any from pynxtools.dataconverter.readers.base.reader import BaseReader - from pynxtools.dataconverter.readers.em.concepts.nexus_concepts import NxEmAppDef - # from pynxtools.dataconverter.readers.em.subparsers.nxs_mtex import NxEmNxsMTexSubParser - from pynxtools.dataconverter.readers.em.subparsers.nxs_pyxem import NxEmNxsPyxemSubParser - +from pynxtools.dataconverter.readers.em.subparsers.nxs_imgs import NxEmImagesSubParser from pynxtools.dataconverter.readers.em.utils.default_plots import NxEmDefaultPlotResolver - # from pynxtools.dataconverter.readers.em.geometry.convention_mapper import NxEmConventionMapper # remaining subparsers to be implemented and merged into this one @@ -118,13 +114,19 @@ def read(self, # sub_parser = "nxs_mtex" # subparser = NxEmNxsMTexSubParser(entry_id, file_paths[0]) # subparser.parse(template) + # TODO::check correct loop through! # add further with resolving cases # if file_path is an HDF5 will use hfive parser # sub_parser = "nxs_pyxem" - subparser = NxEmNxsPyxemSubParser(entry_id, file_paths[0]) + # subparser = NxEmNxsPyxemSubParser(entry_id, file_paths[0]) + # subparser.parse(template) + # TODO::check correct loop through! + + # sub_parser = "image_tiff" + subparser = NxEmImagesSubParser(entry_id, file_paths[0]) subparser.parse(template) - # exit(1) + exit(1) # for dat_instance in case.dat_parser_type: # print(f"Process pieces of information in {dat_instance} tech partner file...") diff --git a/pynxtools/dataconverter/readers/em/subparsers/hfive_apex.py b/pynxtools/dataconverter/readers/em/subparsers/hfive_apex.py index e487c5287..fbc7b91da 100644 --- a/pynxtools/dataconverter/readers/em/subparsers/hfive_apex.py +++ b/pynxtools/dataconverter/readers/em/subparsers/hfive_apex.py @@ -40,9 +40,10 @@ def __init__(self, file_path: str = ""): self.tmp = {} self.supported_version: Dict = {} self.version: Dict = {} - self.init_support() self.supported = False - self.check_if_supported() + if self.is_hdf is True: + self.init_support() + self.check_if_supported() def init_support(self): """Init supported versions.""" diff --git a/pynxtools/dataconverter/readers/em/subparsers/hfive_base.py b/pynxtools/dataconverter/readers/em/subparsers/hfive_base.py index 6c3534bd2..d365d1d34 100644 --- a/pynxtools/dataconverter/readers/em/subparsers/hfive_base.py +++ b/pynxtools/dataconverter/readers/em/subparsers/hfive_base.py @@ -68,8 +68,10 @@ def __init__(self, file_path: str = ""): self.template_attributes: List = [] self.templates: Dict = {} self.h5r = None + self.is_hdf = True if file_path is not None and file_path != "": self.file_path = file_path + # TODO::check if HDF5 file using magic cookie else: raise ValueError(f"{__name__} needs proper instantiation !") diff --git a/pynxtools/dataconverter/readers/em/subparsers/hfive_bruker.py b/pynxtools/dataconverter/readers/em/subparsers/hfive_bruker.py index ad448ebe3..3ebd0aace 100644 --- a/pynxtools/dataconverter/readers/em/subparsers/hfive_bruker.py +++ b/pynxtools/dataconverter/readers/em/subparsers/hfive_bruker.py @@ -39,9 +39,10 @@ def __init__(self, file_path: str = ""): self.tmp: Dict = {} self.supported_version: Dict = {} self.version: Dict = {} - self.init_support() self.supported = False - self.check_if_supported() + if self.is_hdf is True: + self.init_support() + self.check_if_supported() def init_support(self): """Init supported versions.""" diff --git a/pynxtools/dataconverter/readers/em/subparsers/hfive_dreamthreed.py b/pynxtools/dataconverter/readers/em/subparsers/hfive_dreamthreed.py index 0502c6519..0816e0466 100644 --- a/pynxtools/dataconverter/readers/em/subparsers/hfive_dreamthreed.py +++ b/pynxtools/dataconverter/readers/em/subparsers/hfive_dreamthreed.py @@ -87,9 +87,10 @@ def __init__(self, file_path: str = ""): self.path_registry: Dict = {} self.supported_version: Dict = {} self.version: Dict = {} - self.init_support() self.supported = False - self.check_if_supported() + if self.is_hdf is True: + self.init_support() + self.check_if_supported() def init_support(self): """Init supported versions.""" diff --git a/pynxtools/dataconverter/readers/em/subparsers/hfive_ebsd.py b/pynxtools/dataconverter/readers/em/subparsers/hfive_ebsd.py index 173ce7ad4..e246bdef8 100644 --- a/pynxtools/dataconverter/readers/em/subparsers/hfive_ebsd.py +++ b/pynxtools/dataconverter/readers/em/subparsers/hfive_ebsd.py @@ -39,9 +39,10 @@ def __init__(self, file_path: str = ""): self.tmp = {} self.supported_version: Dict = {} self.version: Dict = {} - self.init_support() self.supported = False - self.check_if_supported() + if self.is_hdf is True: + self.init_support() + self.check_if_supported() def init_support(self): """Init supported versions.""" diff --git a/pynxtools/dataconverter/readers/em/subparsers/hfive_edax.py b/pynxtools/dataconverter/readers/em/subparsers/hfive_edax.py index 157b8ce75..8d7db4a74 100644 --- a/pynxtools/dataconverter/readers/em/subparsers/hfive_edax.py +++ b/pynxtools/dataconverter/readers/em/subparsers/hfive_edax.py @@ -37,9 +37,10 @@ def __init__(self, file_path: str = ""): self.tmp = {} self.supported_version: Dict = {} self.version: Dict = {} - self.init_support() self.supported = False - self.check_if_supported() + if self.is_hdf is True: + self.init_support() + self.check_if_supported() def init_support(self): """Init supported versions.""" diff --git a/pynxtools/dataconverter/readers/em/subparsers/hfive_emsoft.py b/pynxtools/dataconverter/readers/em/subparsers/hfive_emsoft.py index 49405197b..acb75b67f 100644 --- a/pynxtools/dataconverter/readers/em/subparsers/hfive_emsoft.py +++ b/pynxtools/dataconverter/readers/em/subparsers/hfive_emsoft.py @@ -33,9 +33,10 @@ def __init__(self, file_path: str = ""): self.tmp = {} self.supported_version: Dict = {} self.version: Dict = {} - self.init_support() self.supported = False - self.check_if_supported() + if self.is_hdf is True: + self.init_support() + self.check_if_supported() def init_support(self): """Init supported versions.""" diff --git a/pynxtools/dataconverter/readers/em/subparsers/hfive_oxford.py b/pynxtools/dataconverter/readers/em/subparsers/hfive_oxford.py index 04db7b896..8818c93f3 100644 --- a/pynxtools/dataconverter/readers/em/subparsers/hfive_oxford.py +++ b/pynxtools/dataconverter/readers/em/subparsers/hfive_oxford.py @@ -42,9 +42,10 @@ def __init__(self, file_path: str = ""): # duplicate the code of the base hfive parser for generating NeXus default plots self.supported_version: Dict = {} self.version: Dict = {} - self.init_support() self.supported = False - self.check_if_supported() + if self.is_hdf is True: + self.init_support() + self.check_if_supported() def init_support(self): """Init supported versions.""" diff --git a/pynxtools/dataconverter/readers/em/subparsers/image_tiff.py b/pynxtools/dataconverter/readers/em/subparsers/image_tiff.py index ecbac4569..c7969aaf1 100644 --- a/pynxtools/dataconverter/readers/em/subparsers/image_tiff.py +++ b/pynxtools/dataconverter/readers/em/subparsers/image_tiff.py @@ -26,29 +26,56 @@ from pynxtools.dataconverter.readers.em.subparsers.image_base import ImgsBaseParser -class TiffReader(ImgsBaseParser): - """Read Bruker Esprit H5""" +class TiffSubParser(ImgsBaseParser): + """Read Tagged Image File Format TIF/TIFF.""" def __init__(self, file_path: str = ""): super().__init__(file_path) self.prfx = None self.tmp: Dict = {} self.supported_version: Dict = {} self.version: Dict = {} - self.supported = False self.tags: Dict = {} + self.supported = False self.check_if_tiff() - if self.supported is True: - self.get_tags() def check_if_tiff(self): - """Check if instance can at all be likely a TaggedImageFormat file via magic number.""" + """Check if resource behind self.file_path is a TaggedImageFormat file.""" self.supported = 0 # voting-based + # different tech partners may all generate tiff files but internally + # report completely different pieces of information + # the situation is the same as for HDF5 files + # for this reason specific parsers for specific tech partner content is required + # checking just on the file ending is in most cases never sufficient ! + # checking the magic number is useful as it at least narrows down that one + # has a tiff container but like with every container you can pack into it + # almost whatever you want and unfortunately this is how tiff is used + # currently in the field of electron microscopy + + # it is common practice to export single images from a microscope session + # using common image formats like png, jpg, tiff often with a scale bar + # hard-coded into the image + # although this is usual practice we argue this is not best practice at all + # better is to use tech partner file formats and at conferences and meetings + # speak up to convince the tech partners to offer documentation of the + # content of these file formats (ideally using semantic web technology) + # the more this happens and the more users articulate this one write software + # to support scientists with reading directly and more completely from the + # tech partner files. In effect, there is then less and less of a reason + # to manually export files and share them ad hoc like single tiff images. + # Rather try to think about a mindset change and ask yourself: + # Can I not just show this content to my colleagues in the research + # data management system directly instead of copying over files that in the + # process of manually exporting them get cut off from their contextualization + # unless one is super careful and spents time on writing super rich metadata ! + # Most tech partners by now have file formats with indeed very rich metadata. + # Our conviction is that these should be used and explored more frequently. + # Exactly for this reason we provided an example for the differences + # in the current state of and documentation of EBSD data stored in HDF5 with open(self.file_path, 'rb', 0) as file: s = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) magic = s.read(4) - print(magic) - # TODO::add magic number https://en.wikipedia.org/wiki/TIFF - self.supported += 1 + if magic == b'II*\x00': # https://en.wikipedia.org/wiki/TIFF + self.supported += 1 if self.supported == 1: self.supported = True else: @@ -56,7 +83,19 @@ def check_if_tiff(self): def get_tags(self): """Extract tags if present.""" - with Image.open(self.file_path, mode="r") as fp: - self.tags = {TAGS[key] : fp.tag[key] for key in fp.tag_v2} - for key, val in self.tags.items(): - print(f"{key}, {val}") + print("Reporting the tags found in this TIFF file...") + # for an overview of tags + # https://www.loc.gov/preservation/digital/formats/content/tiff_tags.shtml + pass + # with Image.open(self.file_path, mode="r") as fp: + # self.tags = {TAGS[key] : fp.tag[key] for key in fp.tag_v2} + # for key, val in self.tags.items(): + # print(f"{key}, {val}") + + def parse_and_normalize(self): + """Perform actual parsing filling cache self.tmp.""" + if self.supported is True: + print(f"Parsing via TiffSubParser...") + self.get_tags() + else: + print(f"{self.file_path} is not a TIFF file this parser can process !") diff --git a/pynxtools/dataconverter/readers/em/subparsers/nxs_imgs.py b/pynxtools/dataconverter/readers/em/subparsers/nxs_imgs.py new file mode 100644 index 000000000..6b2c2f479 --- /dev/null +++ b/pynxtools/dataconverter/readers/em/subparsers/nxs_imgs.py @@ -0,0 +1,74 @@ +# +# Copyright The NOMAD Authors. +# +# This file is part of NOMAD. See https://nomad-lab.eu for further info. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Parser mapping content of specific image files on NeXus.""" + +import numpy as np +# from typing import Dict, Any, List + +from pynxtools.dataconverter.readers.em.subparsers.image_tiff import TiffSubParser + + +class NxEmImagesSubParser: + """Map content from different type of image files on an instance of NXem.""" + + def __init__(self, entry_id: int = 1, input_file_name: str = ""): + """Overwrite constructor of the generic reader.""" + if entry_id > 0: + self.entry_id = entry_id + else: + self.entry_id = 1 + self.file_path = input_file_name + self.cache = {"is_filled": False} + + def identify_image_type(self): + """Identify if image matches known mime type and has content for which subparser exists.""" + # tech partner formats used for measurement + img = TiffSubParser(f"{self.file_path}") + if img.supported is True: + return "tiff" + return None + + def parse(self, template: dict) -> dict: + image_parser_type = self.identify_image_type() + if image_parser_type is None: + print(f"{self.file_path} does not match any of the supported image formats") + return template + print(f"Parsing via {image_parser_type}...") + # see also comments for respective nxs_pyxem parser + # and its interaction with tech-partner-specific hfive_* subparsers + + if image_parser_type == "tiff": + tiff = TiffSubParser(self.file_path) + tiff.parse_and_normalize() + self.process_into_template(tiff.tmp, template) + else: # none or something unsupported + return template + return template + + def process_into_template(self, inp: dict, template: dict) -> dict: + debugging = False + if debugging is True: + for key, val in inp.items(): + if isinstance(val, dict): + for ckey, cval in val.items(): + print(f"{ckey}, {cval}") + else: + print(f"{key}, {val}") + # TODO:: implement actual mapping on template + # self.process_roi_overview(inp, template) + return template diff --git a/pyxem.dev.ipynb b/pyxem.dev.ipynb new file mode 100644 index 000000000..8813ff68f --- /dev/null +++ b/pyxem.dev.ipynb @@ -0,0 +1,85 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "685b6ead-42e8-43f1-81c5-d354fee63935", + "metadata": {}, + "outputs": [], + "source": [ + "from jupyterlab_h5web import H5Web\n", + "datasource=\"../../../../paper_paper_paper/scidat_nomad_ebsd/bb_analysis/data/production_ebsd_pyxem\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29a18629-3ab7-4353-955d-4a2c943f6dee", + "metadata": {}, + "outputs": [], + "source": [ + "H5Web(f\"{datasource}/244_0014.dream3d\") # dream3d, synthetic\n", + "H5Web(f\"{datasource}/SmallIN100_Final.dream3d\") # dream3d, exp\n", + "H5Web(f\"{datasource}/173_0057.h5oina\") # oxford\n", + "H5Web(f\"{datasource}/130_0003.h5\") # bruker\n", + "H5Web(f\"{datasource}/088_0009.h5\") # britton\n", + "H5Web(f\"{datasource}/116_0014.h5\") # edax new, where X Position and Y Position are calibrated by step size\n", + "# H5Web(f\"{datasource}/229_2097.oh5\") # edax old, where X Position and Y Position is not yet calibrated by step size\n", + "# H5Web(f\"{datasource}/207_2081.edaxh5\") # apex" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "807b8d48-ee35-4742-be3e-d43063eeefc6", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ad9cef4-a9c1-4c62-b56c-4391e18309a5", + "metadata": {}, + "outputs": [], + "source": [ + "nxy = [2, 3]\n", + "sxy = [0.3, 0.25]\n", + "\n", + "print(np.tile(np.linspace(0, nxy[0] - 1, num=nxy[0], endpoint=True) * sxy[0], nxy[1]))\n", + "print(np.repeat(np.linspace(0, nxy[1] - 1, num=nxy[1], endpoint=True) * sxy[1], nxy[0]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62a0e099-10c8-425d-bc85-a46b55b12cfa", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}