Skip to content

Commit

Permalink
reformat __init__ files
Browse files Browse the repository at this point in the history
  • Loading branch information
lukaspie committed Nov 29, 2023
1 parent a945ca5 commit 9fbbb37
Show file tree
Hide file tree
Showing 7 changed files with 115 additions and 63 deletions.
2 changes: 1 addition & 1 deletion pynxtools/dataconverter/readers/xps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
17 changes: 17 additions & 0 deletions pynxtools/dataconverter/readers/xps/sle/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
17 changes: 17 additions & 0 deletions pynxtools/dataconverter/readers/xps/txt/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
2 changes: 1 addition & 1 deletion pynxtools/dataconverter/readers/xps/vms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
2 changes: 1 addition & 1 deletion pynxtools/dataconverter/readers/xps/xml/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
2 changes: 1 addition & 1 deletion pynxtools/dataconverter/readers/xps/xy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
136 changes: 77 additions & 59 deletions pynxtools/dataconverter/readers/xps/xy/xy_specs.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,19 +108,15 @@ def construct_data(self):
"source_analyzer_angle",
],
"beam": ["excitation_energy"],
"analyser": [
"analyzer_name"
],
"collectioncolumn": [
"lens_mode"
],
"analyser": ["analyzer_name"],
"collectioncolumn": ["lens_mode"],
"energydispersion": [
"scan_mode",
"pass_energy",
],
"detector": [
"detector_voltage",
],
],
"manipulator": [],
"sample": ["target_bias"],
"calibration": [],
Expand All @@ -140,8 +136,8 @@ def construct_data(self):
"spectrum_id",
"time_stamp",
"scans",
"spectrum_id"
'cycle_no',
"spectrum_id",
"cycle_no",
],
}

Expand Down Expand Up @@ -210,19 +206,21 @@ def _update_xps_dict_with_spectrum(self, spectrum, key_map):
averaged_channels = spectrum["data"]["y"]
else:
all_channel_data = [
value for key, value in self._xps_dict.items() if
detector_data_key.split('Channel_')[0] in key
]
averaged_channels = np.mean(all_channel_data, axis = 0)
value
for key, value in self._xps_dict.items()
if detector_data_key.split("Channel_")[0] in key
]
averaged_channels = np.mean(all_channel_data, axis=0)

if not self.parser.export_settings["Separate Scan Data"]:
averaged_scans = spectrum["data"]["y"]
else:
all_scan_data = [
value for key, value in self._xps_dict.items() if
detector_data_key.split('Scan_')[0] in key
]
averaged_scans = np.mean(all_scan_data, axis = 0)
value
for key, value in self._xps_dict.items()
if detector_data_key.split("Scan_")[0] in key
]
averaged_scans = np.mean(all_scan_data, axis=0)

# Writing order: scan, cycle, channel data
self._xps_dict["data"][entry][scan_key.split("_")[0]] = xr.DataArray(
Expand All @@ -235,15 +233,14 @@ def _update_xps_dict_with_spectrum(self, spectrum, key_map):
coords={x_units: energy},
)

if (self.parser.export_settings["Separate Channel Data"] and
self.write_channels_to_data):
if (
self.parser.export_settings["Separate Channel Data"]
and self.write_channels_to_data
):
channel_no = spectrum["channel_no"]
self._xps_dict["data"][entry][
f"{scan_key}_chan{channel_no}"
] = xr.DataArray(
data=spectrum["data"]["y"],
coords={x_units: energy}
)
] = xr.DataArray(data=spectrum["data"]["y"], coords={x_units: energy})


class XyProdigyParser:
Expand Down Expand Up @@ -315,7 +312,6 @@ def parse_file(self, file, **kwargs):
if "n_headerlines" in kwargs.keys():
self.n_headerlines = kwargs["n_headerlines"]


self.lines = self._read_lines(file)
header, data = self._separate_header()
self.export_settings = self._parse_export_settings(header)
Expand Down Expand Up @@ -357,8 +353,8 @@ def _separate_header(self):
List of list containing data strings.
"""
header = self.lines[:self.n_headerlines]
groups = self.lines[self.n_headerlines:]
header = self.lines[: self.n_headerlines]
groups = self.lines[self.n_headerlines :]

return header, groups

Expand All @@ -377,12 +373,12 @@ def _parse_export_settings(self, header):
Dictionary of export settings.
"""
export_lines = self.lines[:self.n_headerlines]
export_lines = self.lines[: self.n_headerlines]

bool_map = {
"yes": True,
"no": False,
}
}

export_settings = {}
for line in export_lines:
Expand Down Expand Up @@ -415,7 +411,10 @@ def _handle_groups(self, data):
"""
grouped_list = [
list(g) for _, g in itertools.groupby(data, lambda line: "Group:" in line.strip(self.prefix).strip())
list(g)
for _, g in itertools.groupby(
data, lambda line: "Group:" in line.strip(self.prefix).strip()
)
][1:]

groups = OrderedDict()
Expand All @@ -425,7 +424,7 @@ def _handle_groups(self, data):
group_settings = {"group_name": name}
groups[name] = {
"group_settings": self._replace_keys(group_settings, self.settings_map),
}
}
groups[name].update(self._handle_regions(group_data))

return groups
Expand All @@ -449,7 +448,10 @@ def _handle_regions(self, group_data):
"""
grouped_list = [
list(g) for _, g in itertools.groupby(group_data, lambda line: "Region:" in line.strip(self.prefix).strip())
list(g)
for _, g in itertools.groupby(
group_data, lambda line: "Region:" in line.strip(self.prefix).strip()
)
][1:]

regions = OrderedDict()
Expand All @@ -469,8 +471,10 @@ def _handle_regions(self, group_data):
region_settings[setting_name] = val

regions[name] = {
"region_settings": self._replace_keys(region_settings, self.settings_map),
}
"region_settings": self._replace_keys(
region_settings, self.settings_map
),
}
regions[name].update(self._handle_cycles(region_data))

return regions
Expand Down Expand Up @@ -500,18 +504,22 @@ def _handle_cycles(self, region_data):

for i, line in enumerate(region_data):
if cycle_pattern.match(line):
cycle_line_nrs["cycle_" + str(int(self._strip_param(line, 'Cycle:')))] = i
if i == len(region_data)-1:
cycle_line_nrs[
"cycle_" + str(int(self._strip_param(line, "Cycle:")))
] = i
if i == len(region_data) - 1:
cycle_line_nrs["end"] = i + 1

for i, (line_no_a, line_no_b) in enumerate(zip(list(cycle_line_nrs.values()), list(cycle_line_nrs.values())[1:])):
for i, (line_no_a, line_no_b) in enumerate(
zip(list(cycle_line_nrs.values()), list(cycle_line_nrs.values())[1:])
):
name = f"cycle_{i}"
cycle_settings = {"loop_no": i}
cycle_data = region_data[line_no_a:line_no_b]

cycles[name] = {
"cycle_settings": self._replace_keys(cycle_settings, self.settings_map),
}
}
cycles[name].update(self._handle_individual_cycles(cycle_data))

return cycles
Expand Down Expand Up @@ -545,15 +553,27 @@ def _handle_individual_cycles(self, cycle_data):

for i, line in enumerate(cycle_data):
if spec_pattern.match(line):
name_dict = dict((a.strip(), int(b.strip()))
for a, b in (element.split(': ')
for element in line.strip(self.prefix).strip().split(', ')))
name = ''.join([f"{key.lower()}_{val}_" for key, val in name_dict.items() if key != "Curve"]).rstrip("_")
name_dict = dict(
(a.strip(), int(b.strip()))
for a, b in (
element.split(": ")
for element in line.strip(self.prefix).strip().split(", ")
)
)
name = "".join(
[
f"{key.lower()}_{val}_"
for key, val in name_dict.items()
if key != "Curve"
]
).rstrip("_")
scan_line_nrs[name] = i
if i == len(cycle_data)-1:
if i == len(cycle_data) - 1:
scan_line_nrs["end"] = i + 1

for i, ((name, line_no_a), line_no_b) in enumerate(zip(list(scan_line_nrs.items()), list(scan_line_nrs.values())[1:])):
for i, ((name, line_no_a), line_no_b) in enumerate(
zip(list(scan_line_nrs.items()), list(scan_line_nrs.values())[1:])
):
scan_data = cycle_data[line_no_a:line_no_b]
scan = self._handle_individual_scan(scan_data)
scan["scan_settings"].update(self._extend_scan_settings(name))
Expand Down Expand Up @@ -588,12 +608,15 @@ def _handle_individual_scan(self, scan_data):
scan_settings = {}

for i, line in enumerate(scan_data):
if (line.startswith(self.prefix) and line.strip(self.prefix).strip("\n")):
key, val = [item.strip() for item in line.strip(self.prefix).strip("\n").split(":",1)]
if line.startswith(self.prefix) and line.strip(self.prefix).strip("\n"):
key, val = [
item.strip()
for item in line.strip(self.prefix).strip("\n").split(":", 1)
]
if key == "Acquisition Date":
scan_settings[key] = self._parse_datetime(val)

if key == "ColumnLabels":
if key == "ColumnLabels":
if not self.export_settings["Transmission Function"]:
x_units, y_units = val.split(" ")
scan_settings["x_units"] = x_units
Expand All @@ -605,7 +628,7 @@ def _handle_individual_scan(self, scan_data):
scan_settings["y_units"] = self._reformat_y_units(y_units)
scan_settings["tf_units"] = tf_units

if (not line.startswith(self.prefix) and line.strip("\n")):
if not line.startswith(self.prefix) and line.strip("\n"):
data = line.strip("\n").split(" ")
data = [d for d in data if d]
x.append(float(data[0]))
Expand All @@ -620,9 +643,9 @@ def _handle_individual_scan(self, scan_data):
"data": {
"x": np.array(x),
"y": np.array(y),
},
"scan_settings": self._replace_keys(scan_settings, self.settings_map)
}
},
"scan_settings": self._replace_keys(scan_settings, self.settings_map),
}

if self.export_settings["Transmission Function"]:
scan["data"]["transmission_function"] = np.array(transmission_function)
Expand Down Expand Up @@ -655,13 +678,12 @@ def _extend_scan_settings(self, scan_name):

split_name = scan_name.split("_")

for param, val in zip(split_name[::2],split_name[1::2]):
for param, val in zip(split_name[::2], split_name[1::2]):
if param != "cycle":
settings[f"{param}_no"] = int(val)

return settings


def _strip_param(self, line, key):
"""
Split the scan name and extract the scan metadata.
Expand All @@ -687,7 +709,6 @@ def _strip_param(self, line, key):
if key in line:
return line.strip().split(self.prefix + " " + key)[-1].strip()


def _flatten_dict(self, data_dict):
"""
Flatten a raw data dict into a list, with each element
Expand Down Expand Up @@ -721,8 +742,8 @@ def _flatten_dict(self, data_dict):
group_settings,
region_settings,
cycle_settings,
scan_settings
]:
scan_settings,
]:
spectrum.update(settings)
spectrum["data"] = scan["data"]
spectra.append(spectrum)
Expand Down Expand Up @@ -793,9 +814,6 @@ def _reformat_y_units(self, y_units):
Shortened intensity units.
"""
unit_map = {
"counts/s": "CPS",
"counts": "Counts"
}
unit_map = {"counts/s": "CPS", "counts": "Counts"}

return unit_map[y_units]

0 comments on commit 9fbbb37

Please sign in to comment.