diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..0ec99cd --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +## Code changes will send a PR to the following users +* @medley56 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..e488e03 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,11 @@ +# Title of PR + +Brief description of changes. If you write good commit messages, put the concatenated list of messages here. + + +## Checklist +- [ ] Changes are fully implemented without dangling issues or TODO items +- [ ] Deprecated/superseded code is removed or marked with deprecation warning +- [ ] Current dependencies have been properly specified and old dependencies removed +- [ ] New code/functionality has accompanying tests and any old tests have been updated to match any new assumptions +- [ ] The changelog.md has been updated diff --git a/.github/workflows/run-pycodestyle.yml b/.github/workflows/run-pycodestyle.yml new file mode 100644 index 0000000..8840659 --- /dev/null +++ b/.github/workflows/run-pycodestyle.yml @@ -0,0 +1,12 @@ +name: Run PyCodeStyle +on: pull_request +jobs: + pylint: + runs-on: ubuntu-latest + steps: + - name: Check out repo + uses: actions/checkout@v4 + - name: Build Docker image + run: docker build . --file Dockerfile --target style --tag space-packet-parser-style:latest + - name: Run PyCodeStyle in Docker + run: docker run -i space-packet-parser-style:latest diff --git a/.github/workflows/run-pylint.yml b/.github/workflows/run-pylint.yml new file mode 100644 index 0000000..570ddc9 --- /dev/null +++ b/.github/workflows/run-pylint.yml @@ -0,0 +1,12 @@ +name: Run Pylint +on: pull_request +jobs: + pylint: + runs-on: ubuntu-latest + steps: + - name: Check out repo + uses: actions/checkout@v4 + - name: Build Docker image + run: docker build . --file Dockerfile --target lint --tag space-packet-parser-lint:latest + - name: Run Pylint in Docker + run: docker run -i space-packet-parser-lint:latest diff --git a/.github/workflows/test-python-version.yml b/.github/workflows/test-python-version.yml index 173a324..90b53a7 100644 --- a/.github/workflows/test-python-version.yml +++ b/.github/workflows/test-python-version.yml @@ -5,18 +5,15 @@ on: python-version: required: true type: string - bitstring-version: - required: false - type: string - default: "" jobs: test: name: Run Tests in Docker runs-on: ubuntu-latest timeout-minutes: 5 steps: - - uses: actions/checkout@v3 - - name: Build Test Docker Image - run: docker build . --file Dockerfile --build-arg BASE_IMAGE_PYTHON_VERSION=${{ inputs.python-version }} --build-arg BITSTRING_VERSION=${{ inputs.bitstring-version }} --tag space-packet-parser-${{ inputs.python-version }}-test:latest + - name: Check out repo + uses: actions/checkout@v4 + - name: Build test Docker image + run: docker build . --file Dockerfile --target test --build-arg BASE_IMAGE_PYTHON_VERSION=${{ inputs.python-version }} --tag space-packet-parser-${{ inputs.python-version }}-test:latest - name: Run Tests in Docker run: docker run -i space-packet-parser-${{ inputs.python-version }}-test:latest \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1804cf5..c8a5bcc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,16 +1,10 @@ name: Test with Matrix of Python Versions -on: - push: - branches: [ "main", "dev" ] - pull_request: - branches: [ "main", "dev" ] +on: pull_request jobs: python-version-matrix: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] - bitstring-version: ["", "3.0.0", "4.1.2"] uses: ./.github/workflows/test-python-version.yml with: - python-version: ${{ matrix.python-version }} - bitstring-version: ${{ matrix.bitstring-version }} \ No newline at end of file + python-version: ${{ matrix.python-version }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 5a8e46f..3aa1c3a 100644 --- a/.gitignore +++ b/.gitignore @@ -58,10 +58,12 @@ Thumbs.db ####################### .idea .project +.run # Virtual environment # ####################### venv +.venv # Configurations # ################## diff --git a/.readthedocs.yml b/.readthedocs.yml index 788bcb6..a0e95f9 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -8,11 +8,11 @@ build: post_create_environment: # Install poetry - pip install poetry - # Tell poetry to not use a virtual environment + # Tell poetry to not create a new virtual environment but use the current one - poetry config virtualenvs.create false post_install: # Install dependencies. Our doc building dependencies are part of the dev group - - poetry install + - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install sphinx: configuration: docs/source/conf.py diff --git a/CITATION.cff b/CITATION.cff index a7cd0d3..be5811a 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,7 +1,7 @@ cff-version: 1.2.0 title: 'space_packet_parser' type: software -version: '4.1.0' +version: '4.2.0' description: A CCSDS telemetry packet decoding library based on the XTCE packet format description standard. license: BSD-3-Clause abstract: The space_packet_parser Python library is a generalized, configurable packet decoding library for CCSDS telemetry @@ -21,9 +21,12 @@ authors: - email: michael.chambliss@lasp.colorado.edu name: Michael Chambliss orcid: "0009-0003-7493-0542" +- email: greg.lucas@lasp.colorado.edu + name: Greg Lucas + orcid: "0000-0003-1331-1863" maintainers: - email: gavin.medley@lasp.colorado.edu name: Gavin Medley orcid: "0000-0002-3520-9715" repository-code: "https://github.com/medley56/space_packet_parser" -url: "TBD" \ No newline at end of file +url: "https://space-packet-parser.readthedocs.io" \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 2b0d579..70d415f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,7 @@ # Python version with which to test (must be supported and available on dockerhub) ARG BASE_IMAGE_PYTHON_VERSION -FROM python:${BASE_IMAGE_PYTHON_VERSION}-slim - -# Optional bitstring version -ARG BITSTRING_VERSION +FROM python:${BASE_IMAGE_PYTHON_VERSION:-3.12}-slim AS test USER root @@ -29,7 +26,6 @@ ENV PATH="$PATH:/root/.local/bin" COPY space_packet_parser $INSTALL_LOCATION/space_packet_parser COPY tests $INSTALL_LOCATION/tests -COPY pylintrc $INSTALL_LOCATION COPY pyproject.toml $INSTALL_LOCATION # LICENSE.txt is referenced by pyproject.toml COPY LICENSE.txt $INSTALL_LOCATION @@ -44,10 +40,21 @@ RUN pip install --upgrade pip # Install all dependencies (including dev deps) specified in pyproject.toml RUN poetry install -RUN if [ -n "${BITSTRING_VERSION}" ]; then pip install bitstring==$BITSTRING_VERSION; fi - ENTRYPOINT pytest --cov-report=xml:coverage.xml \ --cov-report=term \ --cov=space_packet_parser \ --junitxml=junit.xml \ tests + + +FROM test AS lint + +COPY pylintrc $INSTALL_LOCATION + +ENTRYPOINT pylint space_packet_parser + +FROM test AS style + +COPY pycodestyle.ini $INSTALL_LOCATION + +ENTRYPOINT pycodestyle --config=pycodestyle.ini space_packet_parser diff --git a/README.md b/README.md index 055a540..70c30d5 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,11 @@ # Space Packet Parser ---------- -![Test Status](https://github.com/medley56/space_packet_parser/actions/workflows/tests.yml/badge.svg) -![Doc Status](https://readthedocs.org/projects/space-packet-parser/badge/?version=latest) +[![Test Status](https://github.com/medley56/space_packet_parser/actions/workflows/tests.yml/badge.svg)](https://github.com/medley56/space_packet_parser/actions/workflows/tests.yml) +[![Doc Status](https://readthedocs.org/projects/space-packet-parser/badge/?version=latest)](https://readthedocs.org/projects/space-packet-parser/) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7735001.svg)](https://doi.org/10.5281/zenodo.7735001) -Documentation: https://space-packet-parser.readthedocs.io/en/latest/ +Documentation: [https://space-packet-parser.readthedocs.io/en/latest/](https://space-packet-parser.readthedocs.io/en/latest/) Space Packet Parser is a package for decoding CCSDS telemetry packets according to an XTCE or CSV packet structure definition. It is based on the UML model of the XTCE spec and aims to support all but the most esoteric elements of the @@ -20,3 +21,10 @@ Resources: pip install space_packet_parser ``` +## Missions using Space Packet Parser + +[IMAP](https://imap.princeton.edu/) +[CLARREO](https://clarreo-pathfinder.larc.nasa.gov/) +[Libera](https://lasp.colorado.edu/libera/) +[CTIM-FD](https://lasp.colorado.edu/ctim/) +[MMS-FEEPS](https://lasp.colorado.edu/mms/sdc/public/) diff --git a/docker-compose.yml b/docker-compose.yml index 8a2f738..15381c6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,39 +1,52 @@ -version: '3' - services: + lint: + image: space-packets-linting:latest + build: + target: lint + + style: + image: space-packets-style:latest + build: + target: style + 3.8-tests: image: space-packets-3.8-test:latest build: + target: test args: - BASE_IMAGE_PYTHON_VERSION=3.8 3.9-tests: image: space-packets-3.9-test:latest build: + target: test args: - BASE_IMAGE_PYTHON_VERSION=3.9 3.10-tests: image: space-packets-3.10-test:latest build: + target: test args: - BASE_IMAGE_PYTHON_VERSION=3.10 3.11-tests: image: space-packets-3.11-test:latest build: + target: test args: - BASE_IMAGE_PYTHON_VERSION=3.11 3.12-tests: image: space-packets-3.12-test:latest build: + target: test args: - BASE_IMAGE_PYTHON_VERSION=3.12 3.11-tests-min-deps: image: space-packets-3.11-test:latest build: + target: test args: - BASE_IMAGE_PYTHON_VERSION=3.11 - - BITSTRING_VERSION=3.0.0 diff --git a/docs/source/changelog.md b/docs/source/changelog.md index 8e19bea..65f5fbe 100644 --- a/docs/source/changelog.md +++ b/docs/source/changelog.md @@ -1,15 +1,30 @@ # Change Log -This is a log of changes made to the library over time - -## Long Term To-Do List -- Add frame transfer parsing layer on top of CCSDS parsing layer -- Support BooleanExpression in a ContextCalibrator -- Add ByteOrderList support to encodings in xtcedef (search for TODOs) -- Support multiple `xtce:Unit` elements for compound units +This is a log of changes made to the library over time. For planned upcoming changes, please check the GitHub issue +list and release milestones. ## Version Release Notes Release notes for the `space_packet_parser` library +### v5.0.0 (unreleased) +- BREAKING: Replace `bitstring` objects with native Python bytes objects + - Remove dependency on the `bitstring` library + - Much faster parsing speed + - Users that are passing `bitstring.ConstBitStream` objects to `generator` will need to pass a + binary filelike object instead +- Fix EnumeratedParameterType to handle duplicate labels +- Add error reporting for unsupported and invalid parameter types + +### v4.2.0 (released) +- Parse short and long descriptions of parameters +- Implement equality checking for SequenceContainer objects and Parameter objects +- Include parameter short description and long description in ParsedDataItems +- Add support for AbsoluteTimeParameterType and RelativeTimeParameterType +- Add support for BooleanParameterType +- Drop support for bitstring <4.0.1 +- Support BooleanExpression in a ContextCalibrator +- Default read size is changed to a full file read on file-like objects +- Improve error handling for invalid/unsupported parameter types + ### v4.1.1 (released) - Allow Python 3.12 diff --git a/docs/source/conf.py b/docs/source/conf.py index 92cbf90..44155ef 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -19,6 +19,10 @@ "autoapi.extension" ] +myst_enable_extensions = [ + "html_image" +] + autoapi_type = 'python' autoapi_dirs = ['../../space_packet_parser'] diff --git a/docs/source/contents.rst b/docs/source/contents.rst new file mode 100644 index 0000000..22b02cd --- /dev/null +++ b/docs/source/contents.rst @@ -0,0 +1,8 @@ +.. toctree:: + :maxdepth: 1 + :caption: Contents: + + users.md + developers.md + changelog.md + autoapi/index.rst \ No newline at end of file diff --git a/docs/source/developers.md b/docs/source/developers.md index 274302d..381886b 100644 --- a/docs/source/developers.md +++ b/docs/source/developers.md @@ -17,11 +17,19 @@ To run all tests in docker containers (tests against many versions of python), r docker-compose up --build && docker-compose down ``` +## Making a Pull Request +Feel free to fork this repo and submit a PR! +- If you are working on an issue, link your PR to that issue. +- All PRs should be destined for the `main` branch (trunk-based development). +- Reviews are required before merging and our automated tests must pass. +- Please fill out the PR template that is populated when creating a PR in the GitHub interface. + ## Release Process Reference: [https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow-workflow](https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow-workflow) 1. Create a release candidate branch named according to the version to be released. This branch is used to polish - the release while work continues on dev (towards the next release). The naming convention is `release/X.Y.Z` + the release but is fundamentally not different from any other feature branch in trunk-based development. + The naming convention is `release/X.Y.Z`. 2. Bump the version of the package to the version you are about to release, either manually by editing `pyproject.toml` or by running `poetry version X.Y.Z` or bumping according to a valid bump rule like `poetry version minor` @@ -31,12 +39,13 @@ Reference: [https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow- 4. Update `changelog.md` to reflect that the version is now "released" and revisit `README.md` to keep it up to date. -5. Open a PR to merge the release branch into master. This informs the rest of the team how the release - process is progressing as you polish the release branch. +5. Open a PR to merge the release branch into main. This informs the rest of the team how the release + process is progressing as you polish the release branch. You may need to rebase the release branch onto + any recent changes to `main` and resolve any conflicts on a regular basis. -6. When you are satisfied that the release branch is ready, merge the PR into `master`. +6. When you are satisfied that the release branch is ready, merge the PR into `main`. -7. Check out the `master` branch, pull the merged changes, and tag the newly created merge commit with the +7. Check out the `main` branch, pull the merged changes, and tag the newly created merge commit with the desired version `X.Y.Z` and push the tag upstream. ```bash @@ -44,14 +53,11 @@ Reference: [https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow- git push origin X.Y.Z ``` -8. Ensure that you have `master` checked out and build the package (see below). +8. Ensure that you have `main` checked out and build the package (see below). Check that the version of the built artifacts is as you expect (should match the version git tag and the output from `poetry version --short`). 9. Optionally distribute the artifacts to PyPI/Nexus if desired (see below). - -10. Open a PR to merge `master` back into `dev` so that any changes made during the release process are also captured - in `dev`. ## Building and Distribution @@ -59,4 +65,4 @@ Reference: [https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow- 2. To build the distribution archives, run `poetry build`. -3. To upload the wheel to Nexus, run `poetry publish`. You will need credentials to sign into PyPI. +3. To upload the wheel to Nexus, run `poetry publish`. You will need credentials to sign in to PyPI. diff --git a/docs/source/index.rst b/docs/source/index.rst index c42cd02..c5dbc9b 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,31 +1,7 @@ Space Packet Parser Documentation ================================= -.. toctree:: - :maxdepth: 2 - :caption: Contents: +.. include:: contents.rst - users.md - developers.md - changelog.md - autoapi/index.rst - - -Indices and Tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - - -Instruments and Missions -======================== - -The following programs are using space packet parser in some capacity (most for decoding science data packets). - -* `CLARREO Pathfinder `_ -* `Libera `_ -* `IDEX `_ -* `CTIM-FD `_ -* FEEPS +.. include:: ../../README.md + :parser: myst_parser.sphinx_ diff --git a/docs/source/users.md b/docs/source/users.md index eed8137..bdbbcbb 100644 --- a/docs/source/users.md +++ b/docs/source/users.md @@ -1,4 +1,4 @@ -# User Documentation +# User Documentation (Getting Started) ## Installation This package is distributed via PyPI. To install it with pip, run: @@ -16,7 +16,14 @@ from space_packet_parser import xtcedef, parser packet_file = Path('my_packets.pkts') xtce_document = Path('my_xtce_document.xml') packet_definition = xtcedef.XtcePacketDefinition(xtce_document) -my_parser = parser.PacketParser(packet_definition) + +# You can introspect the packet definition to learn about what was parsed +pt = packet_definition.named_parameter_types["MY_PARAM_Type"] # Look up a type (includes unit and encoding info) +p = packet_definition.named_parameters['MY_PARAM'] # Look up a parameter (includes short and long descriptions) +sc = packet_definition.named_containers['SecondaryHeaderContainer'] # Look up a sequence container (includes inheritance) +# See the API docs for more information about the ParameterType, Parameter, and SequenceContainer classes + +my_parser = parser.PacketParser(packet_definition) # Set up a packet parser from your definition with packet_file.open("rb") as binary_data: packet_generator = my_parser.generator(binary_data) diff --git a/docs/source/xtce_variable_length_packets.md b/docs/source/xtce_variable_length_packets.md new file mode 100644 index 0000000..4d1df7d --- /dev/null +++ b/docs/source/xtce_variable_length_packets.md @@ -0,0 +1,52 @@ +# XTCE Documentation on Variable Length Packets + +The intent of this documentation is to describe the process of defining variable length packets in XTCE. + +## Explanation +The PKT_LEN field is the length of the packet in bytes. +PKT_LEN a) counts from zero and b) does not include the header bits. +Hence, the length of the parameter in bits (PARAM_LEN) may be calculated by the following equation: + +PARAM_LEN = 8*(PKT_LEN + 1) - FIXED_LENGTH_DATA + +where FIXED_LENGTH_DATA is the sum of the length of each data field in bits. + +An example of this is shown below. + +## Example +**Packet Definition:** +"VERSION" : 3 bits +"TYPE" : 1 bits +"SEC_HDR_FLG" : 1 bits +"PKT_APID" : 11 bits +"SEQ_FLGS" : 2 bits +"SRC_SEQ_CTR" : 14 bits +"PKT_LEN" : 16 bits +"SHCOARSE" : 32 bits +"SID" : 8 bits +"SPIN" : 8 bits +"ABORTFLAG" : 1 bits +"STARTDELAY" : 15 bits +"COUNT" : 8 bits +"EVENTDATA": variable length + +FIXED_LENGTH_DATA = 32+8+8+1+15+8 + +Therefore: +EVENTDATA = 8 * (PKT_LEN + 1) - 72 + +This equation can be implemented in XTCE as follows: + +```xml + + + + + + + + + + + +``` \ No newline at end of file diff --git a/examples/parsing_and_plotting_idex_waveforms_from_socket.py b/examples/parsing_and_plotting_idex_waveforms_from_socket.py index 2ca35e6..22fcbd7 100644 --- a/examples/parsing_and_plotting_idex_waveforms_from_socket.py +++ b/examples/parsing_and_plotting_idex_waveforms_from_socket.py @@ -17,7 +17,6 @@ import socket import time # Installed -import bitstring import matplotlib.pyplot as plt # Local from space_packet_parser import xtcedef @@ -36,38 +35,44 @@ def send_data(sender: socket.socket, file: Path): """ # Read binary file with file.open('rb') as fh: - stream = bitstring.ConstBitStream(fh) - while stream.pos < len(stream): + stream = fh.read() + pos = 0 + while pos < len(stream): time.sleep(random.random() * .1) # Random sleep up to 1s # Send binary data to socket in random chunk sizes random_n_bytes = random.randint(1024, 2048) - n_bits_to_send = 8 * random_n_bytes - if stream.pos + n_bits_to_send > len(stream): - n_bits_to_send = len(stream) - stream.pos - chunk_to_send = stream[stream.pos:stream.pos + n_bits_to_send] + n_bytes_to_send = 8 * random_n_bytes + if pos + n_bytes_to_send > len(stream): + n_bytes_to_send = len(stream) - pos + chunk_to_send = stream[pos:pos + n_bytes_to_send] print(f"Sending {len(chunk_to_send)} bytes") - sender.send(chunk_to_send.bytes) - stream.pos += n_bits_to_send + sender.send(chunk_to_send) + pos += n_bytes_to_send print("\nFinished sending data.") def parse_hg_waveform(waveform_raw: str): """Parse a binary string representing a high gain waveform""" - w = bitstring.ConstBitStream(bin=waveform_raw) ints = [] - while w.pos < len(w): - w.read('pad:2') # skip 2. Note: for old versions of bitstring, use bits:2, not pad:2. - ints += w.readlist(['uint:10']*3) + for i in range(0, len(waveform_raw), 32): + # 32 bit chunks, divided up into 2, 10, 10, 10 + # skip first two bits + ints += [ + int(waveform_raw[i + 2 : i + 12], 2), + int(waveform_raw[i + 12 : i + 22], 2), + int(waveform_raw[i + 22 : i + 32], 2), + ] return ints def parse_lg_waveform(waveform_raw: str): """Parse a binary string representing a low gain waveform""" - w = bitstring.ConstBitStream(bin=waveform_raw) ints = [] - while w.pos < len(w): - w.read('pad:8') # skip 8 - ints += w.readlist(['uint:12']*2) + for i in range(0, len(waveform_raw), 32): + ints += [ + int(waveform_raw[i + 8 : i + 20], 2), + int(waveform_raw[i + 20 : i + 32], 2), + ] return ints @@ -100,7 +105,6 @@ def plot_full_event(data: dict): sender, receiver = socket.socketpair() receiver.settimeout(3) - file = '/Users/game1390/Workspace/space/space_packet_parser/tests/test_data/jpss/J01_G011_LZ_2021-04-09T00-00-00Z_V01.DAT1' p = Process(target=send_data, args=(sender, idex_packet_file,)) p.start() diff --git a/pycodestyle.ini b/pycodestyle.ini new file mode 100644 index 0000000..b3a9cdb --- /dev/null +++ b/pycodestyle.ini @@ -0,0 +1,4 @@ +[pycodestyle] +ignore = E121, E123, E126, E133, E226, E241, E242, E704, W503, W504, W505 +max-line-length = 120 +statistics = True diff --git a/pylintrc b/pylintrc index 988ea6f..5746484 100644 --- a/pylintrc +++ b/pylintrc @@ -9,7 +9,7 @@ # Add files or directories to the blacklist. They should be base names, not # paths. -ignore = CVS +ignore = CVS,csvdef.py # Pickle collected data for later comparisons. persistent = yes @@ -51,7 +51,7 @@ confidence = # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable = invalid-name, +disable = invalid-name, fixme, too-many-instance-attributes, too-few-public-methods, too-many-public-methods, too-many-arguments, too-many-locals, cyclic-import, similarities, bare-except, broad-except, unused-argument, unnecessary-pass, diff --git a/pyproject.toml b/pyproject.toml index 7ae33b4..acefa33 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "space_packet_parser" -version = "4.1.1" +version = "4.2.0" description = "A CCSDS telemetry packet decoding library based on the XTCE packet format description standard." license = "BSD-3-Clause" readme = "README.md" @@ -36,20 +36,20 @@ keywords = [ [tool.poetry.dependencies] python = ">=3.8" -bitstring = ">=3.0.0" lxml = ">=4.8.0" [tool.poetry.group.dev.dependencies] -pylint = "^2" -pytest = "^6" -pytest-randomly = "^3" -pytest-cov = "^3" -pyyaml = "^6.0" -sphinx = "^6.1.3" -myst-parser = "^1.0.0" -sphinx-autoapi = "^2.0.1" -sphinx-rtd-theme = "^1.2.0" -coverage = "^7.2.5" +pycodestyle = "*" +pylint = "*" +pytest = "*" +pytest-randomly = "*" +pytest-cov = "*" +pyyaml = "*" +sphinx = "*" +myst-parser = "*" +sphinx-autoapi = "*" +sphinx-rtd-theme = "*" +coverage = "*" [tool.poetry.group.examples] optional = true diff --git a/space_packet_parser/csvdef.py b/space_packet_parser/csvdef.py index 2eb92f7..be1878d 100644 --- a/space_packet_parser/csvdef.py +++ b/space_packet_parser/csvdef.py @@ -5,8 +5,16 @@ from collections import namedtuple from pathlib import Path # Local -from space_packet_parser.xtcedef import Comparison, Parameter, IntegerDataEncoding, FloatDataEncoding, StringDataEncoding, \ - IntegerParameterType, FloatParameterType, StringParameterType +from space_packet_parser.xtcedef import ( + Comparison, + Parameter, + IntegerDataEncoding, + FloatDataEncoding, + StringDataEncoding, + IntegerParameterType, + FloatParameterType, + StringParameterType +) FlattenedContainer = namedtuple('FlattenedContainer', ['entry_list', 'restrictions']) diff --git a/space_packet_parser/parser.py b/space_packet_parser/parser.py index 88b1627..8573546 100644 --- a/space_packet_parser/parser.py +++ b/space_packet_parser/parser.py @@ -6,28 +6,26 @@ import logging import socket import time -from typing import BinaryIO, Tuple +from typing import BinaryIO, Optional, Tuple, Union import warnings -# Installed -import bitstring # Local from space_packet_parser import xtcedef, csvdef logger = logging.getLogger(__name__) -CcsdsPacketHeaderElement = namedtuple('CcsdsPacketHeaderElement', ['name', 'format_string']) +CcsdsPacketHeaderElement = namedtuple('CcsdsPacketHeaderElement', ['name', 'nbits']) CCSDS_HEADER_DEFINITION = [ - CcsdsPacketHeaderElement('VERSION', 'uint:3'), - CcsdsPacketHeaderElement('TYPE', 'uint:1'), - CcsdsPacketHeaderElement('SEC_HDR_FLG', 'uint:1'), - CcsdsPacketHeaderElement('PKT_APID', 'uint:11'), - CcsdsPacketHeaderElement('SEQ_FLGS', 'uint:2'), - CcsdsPacketHeaderElement('SRC_SEQ_CTR', 'uint:14'), - CcsdsPacketHeaderElement('PKT_LEN', 'uint:16') + CcsdsPacketHeaderElement('VERSION', 3), + CcsdsPacketHeaderElement('TYPE', 1), + CcsdsPacketHeaderElement('SEC_HDR_FLG', 1), + CcsdsPacketHeaderElement('PKT_APID', 11), + CcsdsPacketHeaderElement('SEQ_FLGS', 2), + CcsdsPacketHeaderElement('SRC_SEQ_CTR', 14), + CcsdsPacketHeaderElement('PKT_LEN', 16) ] -CCSDS_HEADER_LENGTH_BITS = 48 +CCSDS_HEADER_LENGTH_BYTES = 6 Packet = namedtuple('Packet', ['header', 'data']) @@ -35,7 +33,8 @@ class ParsedDataItem(xtcedef.AttrComparable): """Representation of a parsed parameter""" - def __init__(self, name: str, raw_value: any, unit: str = None, derived_value: float or str = None): + def __init__(self, name: str, raw_value: any, unit: str = None, derived_value: Optional[Union[float, str]] = None, + short_description: str = None, long_description: str = None): """Constructor Parameters @@ -46,8 +45,12 @@ def __init__(self, name: str, raw_value: any, unit: str = None, derived_value: f Parameter units raw_value : any Raw representation of the parsed value. May be lots of different types but most often an integer - derived_value : float or str + derived_value : Union[float, str] May be a calibrated value or an enum lookup + short_description : str + Parameter short description + long_description : str + Parameter long description """ if name is None or raw_value is None: raise ValueError("Invalid ParsedDataItem. Must define name and raw_value.") @@ -55,6 +58,8 @@ def __init__(self, name: str, raw_value: any, unit: str = None, derived_value: f self.raw_value = raw_value self.unit = unit self.derived_value = derived_value + self.short_description = short_description + self.long_description = long_description def __repr__(self): return (f"{self.__class__.__name__}(" @@ -97,62 +102,31 @@ def __init__(self, self.word_size = word_size @staticmethod - def _parse_header(packet_data: bitstring.ConstBitStream, - start_position: int = None, - reset_cursor: bool = False) -> dict: + def _parse_header(packet_data: bytes) -> dict: """Parses the CCSDS standard header. Parameters ---------- - packet_data : bitstring.ConstBitStream - Binary data stream of packet data. - start_position : int - Position from which to start parsing. If not provided, will start whenever the cursor currently is. - reset_cursor : bool - If True, upon parsing the header data, reset the cursor to the original position in the stream. - This still applies even if start_position is specified. start_position will be used only for parsing the - header and then the cursor will be returned to the location it was at before this function was called. + packet_data : bytes + 6 bytes of binary data. Returns ------- header : dict Dictionary of header items. """ - original_cursor_position = packet_data.pos - - if start_position: - packet_data.pos = start_position - - header = { - item.name: ParsedDataItem(name=item.name, unit=None, raw_value=packet_data.read(item.format_string)) - for item in CCSDS_HEADER_DEFINITION - } - - if reset_cursor: - packet_data.pos = original_cursor_position - + header = {} + current_bit = 0 + for item in CCSDS_HEADER_DEFINITION: + header[item.name] = ParsedDataItem(name=item.name, + unit=None, + # pylint: disable=protected-access + raw_value=xtcedef._extract_bits(packet_data, current_bit, item.nbits)) + current_bit += item.nbits return header - @staticmethod - def _total_packet_bits_from_pkt_len(pkt_len: int): - """Calculate the total length of a CCSDS packet in bits based on the PKT_LEN field in its header. - - Parameters - ---------- - pkt_len : int - PKT_LEN value from CCSDS header - - Returns - ------- - : int - Length, in bits of the packet - - """ - # 4.1.3.5.3 The length count C shall be expressed as: - # C = (Total Number of Octets in the Packet Data Field) – 1 - # We also just reparsed the CCSDS header though as well, so that's an additional 6 octets - return 8 * (pkt_len + 1 + 6) - + # DEPRECATED! Remove in next major release along with CSV parser + # pylint: disable=inconsistent-return-statements def _determine_packet_by_restrictions(self, parsed_header: dict) -> Tuple[str, list]: """Examines a dictionary representation of a CCSDS header and determines which packet type applies. This packet type must be unique. If the header data satisfies the restrictions for more than one packet @@ -206,9 +180,10 @@ def _determine_packet_by_restrictions(self, parsed_header: dict) -> Tuple[str, l "Unable to choose a packet type to parse. " "Note: Restricting container inheritance based on non-header data items is not possible in a " "general way and is not supported by this package.", partial_data=parsed_header) + # pylint: enable=inconsistent-return-statements @staticmethod - def parse_packet(packet_data: bitstring.ConstBitStream, + def parse_packet(packet_data: xtcedef.PacketData, containers: dict, root_container_name: str = "CCSDSPacket", **parse_value_kwargs) -> Packet: @@ -216,7 +191,7 @@ def parse_packet(packet_data: bitstring.ConstBitStream, Parameters ---------- - packet_data : bitstring.BitString + packet_data : xtcedef.PacketData Binary packet data to parse into Packets containers : dict Dictionary of named containers, including their inheritance information. @@ -237,7 +212,9 @@ def _parse_parameter(p: xtcedef.Parameter): name=p.name, unit=p.parameter_type.unit, raw_value=parsed_value, - derived_value=derived_value + derived_value=derived_value, + short_description=p.short_description, + long_description=p.long_description ) def _parse_sequence_container(sc: xtcedef.SequenceContainer): @@ -250,11 +227,7 @@ def _parse_sequence_container(sc: xtcedef.SequenceContainer): parsed_items = {} current_container: xtcedef.SequenceContainer = containers[root_container_name] while True: - for entry in current_container.entry_list: - if isinstance(entry, xtcedef.Parameter): - _parse_parameter(entry) - elif isinstance(entry, xtcedef.SequenceContainer): - _parse_sequence_container(entry) + _parse_sequence_container(current_container) valid_inheritors = [] for inheritor_name in current_container.inheritors: @@ -283,12 +256,12 @@ def _parse_sequence_container(sc: xtcedef.SequenceContainer): return Packet(header, user_data) @staticmethod - def legacy_parse_packet(packet_data: bitstring.ConstBitStream, entry_list: list, **parse_value_kwargs) -> Packet: + def legacy_parse_packet(packet_data: xtcedef.PacketData, entry_list: list, **parse_value_kwargs) -> Packet: """Parse binary packet data according to the self.flattened_containers property Parameters ---------- - packet_data : bitstring.BitString + packet_data : xtcedef.PacketData Binary packet data to parse into Packets entry_list : list List of Parameter objects @@ -320,23 +293,25 @@ def legacy_parse_packet(packet_data: bitstring.ConstBitStream, entry_list: list, name=parameter.name, unit=parameter.parameter_type.unit, raw_value=parsed_value, - derived_value=derived_value + derived_value=derived_value, + short_description=parameter.short_description, + long_description=parameter.long_description ) return Packet(header=header, data=user_data) @staticmethod - def print_progress(current_bits: int, total_bits: int or None, + def print_progress(current_bytes: int, total_bytes: Optional[int], start_time_ns: int, current_packets: int, end: str = '\r', log: bool = False): """Prints a progress bar, including statistics on parsing rate. Parameters ---------- - current_bits : int - Number of bits parsed so far. - total_bits : int - Number of total bits to parse (if known) + current_bytes : int + Number of bytes parsed so far. + total_bytes : Optional[int] + Number of total bytes to parse, if known. None otherwise. current_packets : int Number of packets parsed so far. start_time_ns : int @@ -349,34 +324,34 @@ def print_progress(current_bits: int, total_bits: int or None, progress_char = "=" bar_length = 20 - if total_bits is not None: # If we actually have an endpoint (i.e. not using a socket) - percentage = int((current_bits / total_bits) * 100) # Percent Completed Calculation - progress = int((bar_length * current_bits) / total_bits) # Progress Done Calculation + if total_bytes is not None: # If we actually have an endpoint (i.e. not using a socket) + percentage = int((current_bytes / total_bytes) * 100) # Percent Completed Calculation + progress = int((bar_length * current_bytes) / total_bytes) # Progress Done Calculation else: percentage = "???" progress = 0 elapsed_ns = time.time_ns() - start_time_ns delta = dt.timedelta(microseconds=elapsed_ns / 1E3) - kbps = int(current_bits * 1E6 / elapsed_ns) + kbps = int(current_bytes // 8 * 1E6 / elapsed_ns) pps = int(current_packets * 1E9 / elapsed_ns) info_str = f"[Elapsed: {delta}, " \ - f"Parsed {current_bits} bits ({current_packets} packets) " \ + f"Parsed {current_bytes} bytes ({current_packets} packets) " \ f"at {kbps}kb/s ({pps}pkts/s)]" loadbar = f"Progress: [{progress*progress_char:{bar_length}}]{percentage}% {info_str}" print(loadbar, end=end) if log is True: logger.info(loadbar) - def generator(self, - binary_data: bitstring.ConstBitStream or BinaryIO or socket.socket, + def generator(self, # pylint: disable=too-many-branches,too-many-statements + binary_data: Union[BinaryIO, socket.socket], parse_bad_pkts: bool = True, skip_header_bits: int = 0, root_container_name="CCSDSPacket", ccsds_headers_only: bool = False, yield_unrecognized_packet_errors: bool = False, show_progress: bool = False, - buffer_read_size_bytes: int = 4096): + buffer_read_size_bytes: Optional[int] = None): """Create and return a Packet generator that reads from a ConstBitStream or a filelike object or a socket. Creating a generator object to return allows the user to create @@ -384,7 +359,7 @@ def generator(self, Parameters ---------- - binary_data : bitstring.ConstBitStream or BinaryIO or socket.socket + binary_data : BinaryIO or socket.socket Binary data source to parse into Packets. parse_bad_pkts : bool, Optional Default True. @@ -412,28 +387,24 @@ def generator(self, ends. buffer_read_size_bytes : int, Optional Number of bytes to read from e.g. a BufferedReader or socket binary data source on each read attempt. - Default is 4096 bytes. + Default is 4096 bytes from a socket, -1 (full read) from a file. Yields ------- - : Packet or UnrecognizedPacketTypeError + Packet or UnrecognizedPacketTypeError Generator yields Packet objects containing the parsed packet data for each subsequent packet. If yield_unrecognized_packet_errors is True, it will yield an unraised exception object, which can be raised or used for debugging purposes. """ - def fill_read_buffer(source: bitstring.ConstBitStream or BinaryIO or socket.socket, - buffer: bitstring.BitStream, - read_size_bytes: int) -> int: - """Read data from a source and add it to an existing buffer (BitStream). + def read_bytes_from_source(source: Union[BinaryIO, socket.socket], + read_size_bytes: int) -> bytes: + """Read data from a source and return the bytes read. Parameters ---------- - source : bitstring.ConstBitStream or BinaryIO or socket.socket + source : BinaryIO or socket.socket Source of data. - buffer : bitstring.BitStream - A reference to a rotating buffer to which the new data is appended. Mutating this changes the data - available to the caller by reference so we don't return it. read_size_bytes : int Max number of bytes to read from the source per read attempt. For sockets, this should be a small power of 2 (e.g. 4096) due to networking and hardware conventions. For a file or ConstBitStream object @@ -442,104 +413,107 @@ def fill_read_buffer(source: bitstring.ConstBitStream or BinaryIO or socket.sock Returns ------- - result : int - Number of bits added to the buffer. Note that the buffer may still have nonzero length from previous - data even when this returns zero. + : bytes + The bytes that were read from the source. """ - curser_pos = buffer.pos # Keep track of the original buffer cursor location if isinstance(source, io.BufferedIOBase): - new_bytes = source.read(read_size_bytes) - buffer += new_bytes - n_new_bits = len(new_bytes)*8 - elif isinstance(source, socket.socket): - new_bytes = source.recv(read_size_bytes) - buffer += new_bytes # Append BitStream with newly read bytes - n_new_bits = len(new_bytes)*8 - elif isinstance(source, bitstring.ConstBitStream): - # This either reads read_size_bytes bytes or it just reads to the end of the data - new_bits = source[source.pos:source.pos + read_size_bytes * 8] - source.pos += len(new_bits) # Set the source.pos to exactly where we read to - buffer += new_bits - n_new_bits = len(new_bits) - elif isinstance(source, io.TextIOWrapper): - raise IOError(f"Packet data file opened in TextIO mode. You must open packet data in binary mode.") - else: - raise IOError(f"Unrecognized data source: {source}") - - # Reset buffer.pos to the original position before we extended it - buffer.pos = curser_pos - return n_new_bits + return source.read(read_size_bytes) + if isinstance(source, socket.socket): + return source.recv(read_size_bytes) + if isinstance(source, io.TextIOWrapper): + raise IOError("Packet data file opened in TextIO mode. You must open packet data in binary mode.") + raise IOError(f"Unrecognized data source: {source}") # ======== # Start of generator # ======== - if isinstance(binary_data, bitstring.ConstBitStream): - total_length_bits = len(binary_data) - logger.info( - f"Creating packet generator from pre-loaded ConstBitStream. Total length is {total_length_bits}") - elif isinstance(binary_data, io.BufferedIOBase): - total_length_bits = 8 * binary_data.seek(0, io.SEEK_END) # This is probably preferable to len + if isinstance(binary_data, io.BufferedIOBase): + if buffer_read_size_bytes is None: + # Default to a full read of the file + buffer_read_size_bytes = -1 + total_length_bytes = binary_data.seek(0, io.SEEK_END) # This is probably preferable to len binary_data.seek(0, 0) logger.info(f"Creating packet generator from a filelike object, {binary_data}. " - f"Total length is {total_length_bits}bits") + f"Total length is {total_length_bytes} bytes") else: # It's a socket and we don't know how much data we will get logger.info("Creating packet generator to read from a socket. Total length to parse is unknown.") - total_length_bits = None # We don't know how long it is + total_length_bytes = None # We don't know how long it is + if buffer_read_size_bytes is None: + # Default to 4096 bytes from a socket + buffer_read_size_bytes = 4096 # ======== # Packet loop. Each iteration of this loop yields a ParsedPacket object # ======== start_time = time.time_ns() - n_bits_parsed = 0 # Keep track of how many bits we have parsed + n_bytes_parsed = 0 # Keep track of how many bytes we have parsed n_packets_parsed = 0 # Keep track of how many packets we have parsed - read_buffer = bitstring.BitStream() # Not const because it's a rotating buffer + read_buffer = b"" # Empty bytes object to start + skip_header_bytes = skip_header_bits // 8 # Internally keep track of bytes + current_pos = 0 # Keep track of where we are in the buffer while True: - if total_length_bits and n_bits_parsed == total_length_bits: + if total_length_bytes and n_bytes_parsed == total_length_bytes: break # Exit if we know the length and we've reached it if show_progress is True: - self.print_progress(current_bits=n_bits_parsed, total_bits=total_length_bits, + self.print_progress(current_bytes=n_bytes_parsed, total_bytes=total_length_bytes, start_time_ns=start_time, current_packets=n_packets_parsed) + if current_pos > 20_000_000: + # Only trim the buffer after 20 MB read to prevent modifying + # the bitstream and trimming after every packet + read_buffer = read_buffer[current_pos:] + current_pos = 0 + # Fill buffer enough to parse a header - while len(read_buffer) < skip_header_bits + CCSDS_HEADER_LENGTH_BITS: - result = fill_read_buffer(binary_data, read_buffer, - read_size_bytes=buffer_read_size_bytes) + while len(read_buffer) - current_pos < skip_header_bytes + CCSDS_HEADER_LENGTH_BYTES: + result = read_bytes_from_source(binary_data, read_size_bytes=buffer_read_size_bytes) if not result: # If there is verifiably no more data to add, break break - - read_buffer.pos += skip_header_bits - header = self._parse_header(read_buffer, reset_cursor=True) - specified_total_packet_length_bits = self._total_packet_bits_from_pkt_len(header['PKT_LEN'].raw_value) - n_packets_parsed += 1 # Consider it a counted packet once we've parsed the header + read_buffer += result + # Skip the header bytes + current_pos += skip_header_bytes + header_bytes = read_buffer[current_pos:current_pos + CCSDS_HEADER_LENGTH_BYTES] + header = self._parse_header(header_bytes) + + # per the CCSDS spec + # 4.1.3.5.3 The length count C shall be expressed as: + # C = (Total Number of Octets in the Packet Data Field) – 1 + n_bytes_data = header['PKT_LEN'].raw_value + 1 + n_bytes_packet = CCSDS_HEADER_LENGTH_BYTES + n_bytes_data + + # Consider it a counted packet once we've parsed the header + # and update the number of bits parsed + n_packets_parsed += 1 + n_bytes_parsed += skip_header_bytes + n_bytes_packet if ccsds_headers_only is True: - # Trim read buffer (this also reduces memory usage over time for reading a ConstBitStream) - n_bits_parsed += skip_header_bits + specified_total_packet_length_bits - read_buffer = read_buffer[specified_total_packet_length_bits + skip_header_bits:] + # update the current position to the end of the packet data + current_pos += n_bytes_packet yield Packet(header=header, data=None) continue # Based on PKT_LEN fill buffer enough to read a full packet - while len(read_buffer) < skip_header_bits + specified_total_packet_length_bits: - result = fill_read_buffer(binary_data, read_buffer, - read_size_bytes=buffer_read_size_bytes) + while len(read_buffer) - current_pos < n_bytes_packet: + result = read_bytes_from_source(binary_data, read_size_bytes=buffer_read_size_bytes) if not result: # If there is verifiably no more data to add, break break + read_buffer += result + # current_pos is still before the header, so we are reading the entire packet here + packet_bytes = read_buffer[current_pos:current_pos + n_bytes_packet] + current_pos += n_bytes_packet + # Wrap the bytes in a class that can keep track of position as we read from it + packet_data = xtcedef.PacketData(packet_bytes) try: if isinstance(self.packet_definition, xtcedef.XtcePacketDefinition): - packet = self.parse_packet(read_buffer, + packet = self.parse_packet(packet_data, self.packet_definition.named_containers, root_container_name=root_container_name, word_size=self.word_size) else: _, parameter_list = self._determine_packet_by_restrictions(header) - packet = self.legacy_parse_packet(read_buffer, parameter_list, word_size=self.word_size) + packet = self.legacy_parse_packet(packet_data, parameter_list, word_size=self.word_size) except UnrecognizedPacketTypeError as e: - # Regardless of whether we handle the error, we still want to chop the read_buffer in preparation - # for parsing the next packet - n_bits_parsed += skip_header_bits + specified_total_packet_length_bits - read_buffer = read_buffer[skip_header_bits + specified_total_packet_length_bits:] logger.debug(f"Unrecognized error on packet with APID {header['PKT_APID'].raw_value}'") if yield_unrecognized_packet_errors is True: # Yield the caught exception without raising it (raising ends generator) @@ -553,22 +527,20 @@ def fill_read_buffer(source: bitstring.ConstBitStream or BinaryIO or socket.sock f"{packet.header['PKT_LEN'].raw_value}. This might be because the CCSDS header is " f"incorrectly represented in your packet definition document.") - actual_length_parsed = read_buffer.pos - skip_header_bits - - if actual_length_parsed != specified_total_packet_length_bits: + actual_length_parsed = packet_data.pos // 8 + if actual_length_parsed != n_bytes_packet: logger.warning(f"Parsed packet length " - f"({actual_length_parsed}b) did not match " - f"length specified in header ({specified_total_packet_length_bits}b). " - f"Updating bit string position to correct position " + f"({actual_length_parsed}B) did not match " + f"length specified in header ({n_bytes_packet}B). " + f"Updating the position to the correct position " "indicated by CCSDS header.") if not parse_bad_pkts: logger.warning("Skipping (not yielding) bad packet because parse_bad_pkts is falsy.") continue - n_bits_parsed += skip_header_bits + specified_total_packet_length_bits - read_buffer = read_buffer[specified_total_packet_length_bits + skip_header_bits:] + yield packet if show_progress is True: - self.print_progress(current_bits=n_bits_parsed, total_bits=total_length_bits, + self.print_progress(current_bytes=n_bytes_parsed, total_bytes=total_length_bytes, start_time_ns=start_time, current_packets=n_packets_parsed, end="\n", log=True) diff --git a/space_packet_parser/xtcedef.py b/space_packet_parser/xtcedef.py index 0cc3b61..0470dfd 100644 --- a/space_packet_parser/xtcedef.py +++ b/space_packet_parser/xtcedef.py @@ -4,17 +4,14 @@ from collections import namedtuple import inspect import logging -from pathlib import Path -from typing import Tuple +import struct +from typing import Tuple, Union, Optional, Any, List, TextIO, Dict import warnings -import lxml.etree as ElementTree # Installed -import bitstring +import lxml.etree as ElementTree logger = logging.getLogger(__name__) -# TODO: Improve exceptions for specific failure modes - # Exceptions class ElementNotFoundError(Exception): @@ -42,6 +39,11 @@ class CalibrationError(Exception): pass +class InvalidParameterTypeError(Exception): + """Error raised when someone is using an invalid ParameterType element""" + pass + + # Common comparable mixin class AttrComparable(metaclass=ABCMeta): """Generic class that provides a notion of equality based on all non-callable, non-dunder attributes""" @@ -72,12 +74,12 @@ class MatchCriteria(AttrComparable, metaclass=ABCMeta): # Python's XML parser doesn't appear to support &eq; ≠ ≤ or ≥ # We have implemented support for bash-style comparisons just in case. _valid_operators = { - "==": "==", "eq": "==", # equal to - "!=": "!=", "neq": "!=", # not equal to - "<": "<", "lt": "<", # less than - ">": ">", "gt": ">", # greater than - "<=": "<=", "leq": "<=", # less than or equal to - ">=": ">=", "geq": ">=", # greater than or equal to + "==": "__eq__", "eq": "__eq__", # equal to + "!=": "__ne__", "neq": "__ne__", # not equal to + "<": "__lt__", "lt": "__lt__", "<": "__lt__", # less than + ">": "__gt__", "gt": "__gt__", ">": "__gt__", # greater than + "<=": "__le__", "leq": "__le__", "<=": "__le__", # less than or equal to + ">=": "__ge__", "geq": "__ge__", ">=": "__ge__", # greater than or equal to } @classmethod @@ -97,7 +99,7 @@ def from_match_criteria_xml_element(cls, element: ElementTree.Element, ns: dict) """ raise NotImplementedError() - def evaluate(self, parsed_data: dict, current_parsed_value: int or float = None) -> bool: + def evaluate(self, parsed_data: dict, current_parsed_value: Optional[Union[int, float]] = None) -> bool: """Evaluate match criteria down to a boolean. Parameters @@ -151,13 +153,13 @@ def _validate(self): ------- None """ - if not (self.operator in self._valid_operators or self.operator in self._valid_operators.values()): + if self.operator not in self._valid_operators: raise ValueError(f"Unrecognized operator syntax {self.operator}. " f"Must be one of " - f"{set(list(self._valid_operators.values()) + list(self._valid_operators.keys()))}") + f"{set(self._valid_operators.keys())}") @classmethod - def from_match_criteria_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_match_criteria_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'Comparison': """Create Parameters @@ -169,7 +171,7 @@ def from_match_criteria_xml_element(cls, element: ElementTree.Element, ns: dict) Returns ------- - : cls + : Comparison """ use_calibrated_value = True # Default if 'useCalibratedValue' in element.attrib: @@ -184,7 +186,7 @@ def from_match_criteria_xml_element(cls, element: ElementTree.Element, ns: dict) return cls(value, parameter_name, operator=operator, use_calibrated_value=use_calibrated_value) - def evaluate(self, parsed_data: dict, current_parsed_value: int or float = None) -> bool: + def evaluate(self, parsed_data: dict, current_parsed_value: Optional[Union[int, float]] = None) -> bool: """Evaluate comparison down to a boolean. If the parameter to compare is not present in the parsed_data dict, we assume that we are comparing against the current raw value in current_parsed_value. @@ -192,7 +194,7 @@ def evaluate(self, parsed_data: dict, current_parsed_value: int or float = None) ---------- parsed_data : dict Dictionary of parsed parameter data so far. Used to evaluate truthyness of the match criteria. - current_parsed_value : int or float + current_parsed_value : Union[int, float] Optional. Uncalibrated value that is currently a candidate for calibration and so has not yet been added to the parsed_data dict. Used to resolve calibrator conditions that reference their own raw value as a comparate. @@ -222,9 +224,7 @@ def evaluate(self, parsed_data: dict, current_parsed_value: int or float = None) "appear in the parsed data so far and no current raw value was passed " "to compare with.") - operator = (self.operator - if self.operator in self._valid_operators.values() - else self._valid_operators[self.operator]) + operator = self._valid_operators[self.operator] t_comparate = type(parsed_value) try: required_value = t_comparate(self.required_value) @@ -234,10 +234,9 @@ def evaluate(self, parsed_data: dict, current_parsed_value: int or float = None) if required_value is None or parsed_value is None: raise ValueError(f"Error in Comparison. Cannot compare {required_value} with {parsed_value}. " "Neither should be None.") - if isinstance(required_value, str): - parsed_value = f"'{parsed_value}'" - required_value = f"'{required_value}'" - return eval(f"{parsed_value} {operator} {required_value}") + + # x.__le__(y) style call + return getattr(parsed_value, operator)(required_value) class Condition(MatchCriteria): @@ -246,8 +245,13 @@ class Condition(MatchCriteria): but it's functionally close enough that we inherit the class here. """ - def __init__(self, left_param: str, operator: str, right_param: str = None, right_value=None, - left_use_calibrated_value: bool = True, right_use_calibrated_value: bool = True): + def __init__(self, + left_param: str, + operator: str, + right_param: Optional[str] = None, + right_value: Optional[Any] = None, + left_use_calibrated_value: bool = True, + right_use_calibrated_value: bool = True): """Constructor Parameters @@ -256,9 +260,9 @@ def __init__(self, left_param: str, operator: str, right_param: str = None, righ Parameter name on the LH side of the comparison operator : str Member of MatchCriteria._valid_operators. - right_param : str + right_param : Optional[str] Parameter name on the RH side of the comparison. - right_value: any, Optional + right_value: Optional[Any] Used in case of comparison with a fixed xtce:Value on the RH side. left_use_calibrated_value : bool, Optional Default is True. If False, comparison is made against the uncalibrated value. @@ -280,10 +284,10 @@ def _validate(self): ------- None """ - if not (self.operator in self._valid_operators or self.operator in self._valid_operators.values()): + if self.operator not in self._valid_operators: raise ValueError(f"Unrecognized operator syntax {self.operator}. " f"Must be one of " - f"{set(list(self._valid_operators.values()) + list(self._valid_operators.keys()))}") + f"{set(self._valid_operators.keys())}") if self.right_param and self.right_value: raise ComparisonError(f"Received both a right_value and a right_param reference to Condition {self}.") if self.right_value and self.right_use_calibrated_value: @@ -341,16 +345,16 @@ def from_match_criteria_xml_element(cls, element: ElementTree.Element, ns: dict) left_use_calibrated_value=left_use_calibrated_value, right_use_calibrated_value=right_use_calibrated_value) raise ValueError(f'Failed to parse a Condition element {element}. ' - 'See 3.4.3.4.2 of XTCE Green Book CCSDS 660.1-G-2') + 'See 3.4.3.4.2 of XTCE Green Book CCSDS 660.1-G-2') - def evaluate(self, parsed_data: dict, current_parsed_value: int or float = None) -> bool: + def evaluate(self, parsed_data: dict, current_parsed_value: Optional[Union[int, float]] = None) -> bool: """Evaluate match criteria down to a boolean. Parameters ---------- parsed_data : dict Dictionary of parsed parameter data so far. Used to evaluate truthyness of the match criteria. - current_parsed_value : int or float, Optional + current_parsed_value : Optional[Union[int, float]] Current value being parsed. NOTE: This is currently ignored. See the TODO item below. Returns @@ -358,6 +362,7 @@ def evaluate(self, parsed_data: dict, current_parsed_value: int or float = None) : bool Truthyness of this match criteria based on parsed_data values. """ + def _get_parsed_value(parameter_name: str, use_calibrated: bool): """Retrieves the previously parsed value from the passed in parsed_data""" try: @@ -369,16 +374,15 @@ def _get_parsed_value(parameter_name: str, use_calibrated: bool): "the evaluate method. If you intended a comparison against the raw value of the " "parameter currently being parsed, unfortunately that is not currently supported." ) from e + # TODO: Consider allowing one of the parameters to be the parameter currently being evaluated. # This isn't explicitly provided for in the XTCE spec but it seems reasonable to be able to # perform conditionals against the current raw value of a parameter, e.g. while determining if it # should be calibrated. Note that only one of the parameters can be used this way and it must reference # an uncalibrated value so the logic and error handling must be done carefully. left_value = _get_parsed_value(self.left_param, self.left_use_calibrated_value) - # Convert XML operator representation to a python-compatible operator (e.g. '>' to '>') - operator = (self.operator - if self.operator in self._valid_operators.values() - else self._valid_operators[self.operator]) + # Convert XML operator representation to a python-compatible operator (e.g. '>' to '__gt__') + operator = self._valid_operators[self.operator] if self.right_param is not None: right_value = _get_parsed_value(self.right_param, self.right_use_calibrated_value) @@ -389,10 +393,9 @@ def _get_parsed_value(parameter_name: str, use_calibrated: bool): raise ValueError(f"Error when evaluating condition {self}. Neither right_param nor right_value is set.") if left_value is None or right_value is None: raise ComparisonError(f"Error comparing {left_value} and {right_value}. Neither should be None.") - if isinstance(left_value, str): - left_value = f"'{left_value}'" - right_value = f"'{right_value}'" - return eval(f"{left_value} {operator} {right_value}") + + # x.__le__(y) style call + return getattr(left_value, operator)(right_value) Anded = namedtuple('Anded', ['conditions', 'ors']) @@ -402,11 +405,11 @@ def _get_parsed_value(parameter_name: str, use_calibrated: bool): class BooleanExpression(MatchCriteria): """""" - def __init__(self, expression: Condition or Anded or Ored): + def __init__(self, expression: Union[Condition, Anded, Ored]): self.expression = expression @classmethod - def from_match_criteria_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_match_criteria_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'BooleanExpression': """Abstract classmethod to create a match criteria object from an XML element. Parameters @@ -418,9 +421,10 @@ def from_match_criteria_xml_element(cls, element: ElementTree.Element, ns: dict) Returns ------- - : cls + : BooleanExpression """ - def _parse_anded(anded_el: ElementTree.Element): + + def _parse_anded(anded_el: ElementTree.Element) -> Anded: """Create an Anded object from an xtce:ANDedConditions element Parameters @@ -437,7 +441,7 @@ def _parse_anded(anded_el: ElementTree.Element): anded_ors = [_parse_ored(anded_or) for anded_or in anded_el.findall('xtce:ORedConditions', ns)] return Anded(conditions, anded_ors) - def _parse_ored(ored_el: ElementTree.Element): + def _parse_ored(ored_el: ElementTree.Element) -> Ored: """Create an Ored object from an xtce:ARedConditions element Parameters @@ -463,13 +467,15 @@ def _parse_ored(ored_el: ElementTree.Element): return cls(expression=_parse_ored(element.find('xtce:ORedConditions', ns))) raise ValueError(f"Failed to parse {element}") - def evaluate(self, parsed_data: dict, **kwargs) -> bool: + def evaluate(self, parsed_data: dict, current_parsed_value: Optional[Union[int, float]] = None) -> bool: """Evaluate the criteria in the BooleanExpression down to a single boolean. Parameters ---------- parsed_data : dict Dictionary of parsed parameter data so far. Used to evaluate truthyness of the match criteria. + current_parsed_value : Optional[Union[int, float]] + Current value being parsed. Returns ------- @@ -508,21 +514,21 @@ def _and(anded: Anded): class DiscreteLookup(AttrComparable): """""" - def __init__(self, match_criteria: list, lookup_value: int or float): + def __init__(self, match_criteria: list, lookup_value: Union[int, float]): """Constructor Parameters ---------- match_criteria : list List of criteria to determine if the lookup value should be returned during evaluation. - lookup_value : int or float + lookup_value : Union[int, float] Value to return from the lookup if the criteria evaluate true """ self.match_criteria = match_criteria self.lookup_value = lookup_value @classmethod - def from_discrete_lookup_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_discrete_lookup_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'DiscreteLookup': """Create a DiscreteLookup object from an XML element Parameters @@ -534,7 +540,7 @@ def from_discrete_lookup_xml_element(cls, element: ElementTree.Element, ns: dict Returns ------- - : cls + : DiscreteLookup """ lookup_value = float(element.attrib['value']) if element.find('xtce:ComparisonList', ns) is not None: @@ -548,14 +554,14 @@ def from_discrete_lookup_xml_element(cls, element: ElementTree.Element, ns: dict return cls(match_criteria, lookup_value) - def evaluate(self, parsed_data: dict, current_parsed_value: int or float = None): + def evaluate(self, parsed_data: dict, current_parsed_value: Optional[Union[int, float]] = None) -> Any: """Evaluate the lookup to determine if it is valid. Parameters ---------- parsed_data : dict Data parsed so far (for referencing during criteria evaluation). - current_parsed_value: int or float, Optional + current_parsed_value: Optional[Union[int, float]] If referenced parameter in criterion isn't in parsed_data dict, we assume we are comparing against this currently parsed value. @@ -575,7 +581,7 @@ class Calibrator(AttrComparable, metaclass=ABCMeta): """Abstract base class for XTCE calibrators""" @classmethod - def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'Calibrator': """Abstract classmethod to create a default_calibrator object from an XML element. Parameters @@ -591,12 +597,17 @@ def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict): """ return NotImplemented - def calibrate(self, uncalibrated_value: int): - """Takes an integer-encoded value and returns a calibrated version. + def calibrate(self, uncalibrated_value: Union[int, float]) -> Union[int, float]: + """Takes an integer-encoded or float-encoded value and returns a calibrated version. + + Parameters + ---------- + uncalibrated_value : Union[int, float] + The uncalibrated, raw encoded value Returns ------- - : int or float + : Union[int, float] Calibrated value """ raise NotImplementedError @@ -631,7 +642,7 @@ def __init__(self, points: list, order: int = 0, extrapolate: bool = False): self.extrapolate = extrapolate @classmethod - def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'SplineCalibrator': """Create a spline default_calibrator object from an XML element.""" point_elements = element.findall('xtce:SplinePoint', ns) spline_points = [ @@ -642,7 +653,7 @@ def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict): extrapolate = element.attrib['extrapolate'].lower() == 'true' if 'extrapolate' in element.attrib else False return cls(order=order, points=spline_points, extrapolate=extrapolate) - def calibrate(self, uncalibrated_value: float): + def calibrate(self, uncalibrated_value: float) -> float: """Take an integer-encoded value and returns a calibrated version according to the spline points. Parameters @@ -661,7 +672,7 @@ def calibrate(self, uncalibrated_value: float): return self._first_order_spline_interp(uncalibrated_value) raise NotImplementedError(f"SplineCalibrator is not implemented for spline order {self.order}.") - def _zero_order_spline_interp(self, query_point: float): + def _zero_order_spline_interp(self, query_point: float) -> float: """Abstraction for zero order spline interpolation. If extrapolation is set to a truthy value, we use the nearest point to extrapolate outside the range of the given spline points. Within the range of spline points, we use nearest lower point interpolation. @@ -688,7 +699,7 @@ def _zero_order_spline_interp(self, query_point: float): raise CalibrationError(f"Extrapolation is set to a falsy value ({self.extrapolate}) but query value " f"{query_point} falls outside the range of spline points {self.points}") - def _first_order_spline_interp(self, query_point: float): + def _first_order_spline_interp(self, query_point: float) -> float: """Abstraction for first order spline interpolation. If extrapolation is set to a truthy value, we use the end points to make a linear function and use it to extrapolate. @@ -703,7 +714,7 @@ def _first_order_spline_interp(self, query_point: float): Calibrated value. """ - def linear_func(xq: float, x0: float, x1: float, y0: float, y1: float): + def linear_func(xq: float, x0: float, x1: float, y0: float, y1: float) -> float: """Evaluate a linear function through points (x0, y0), (x1, y1) at point xq Parameters @@ -754,7 +765,7 @@ def __init__(self, coefficients: list): self.coefficients = coefficients # Coefficients should be a list of PolynomialCoefficients @classmethod - def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'PolynomialCalibrator': """Create a polynomial default_calibrator object from an XML element. Parameters @@ -775,7 +786,7 @@ def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict): ] return cls(coefficients=coefficients) - def calibrate(self, uncalibrated_value: float): + def calibrate(self, uncalibrated_value: float) -> float: """Evaluate the polynomial defined by object coefficients at the specified uncalibrated point. Parameters @@ -803,7 +814,7 @@ def __init__(self): raise NotImplementedError(self.err_msg) @classmethod - def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'MathOperationCalibrator': """Create a math operation default_calibrator from an XML element.""" raise NotImplementedError(cls.err_msg) @@ -829,7 +840,7 @@ def __init__(self, match_criteria: list, calibrator: Calibrator): Parameters ---------- - match_criteria : MatchCriteria or list + match_criteria : Union[MatchCriteria, list] Object representing the logical operations to be performed to determine whether to use this default_calibrator. This can be a Comparison, a ComparsonList (a list of Comparison objects), a BooleanExpression (not supported), or a CustomAlgorithm (not supported) @@ -840,7 +851,7 @@ def __init__(self, match_criteria: list, calibrator: Calibrator): self.calibrator = calibrator @staticmethod - def get_context_match_criteria(element: ElementTree.Element, ns: dict): + def get_context_match_criteria(element: ElementTree.Element, ns: dict) -> List[MatchCriteria]: """Parse contextual requirements from a Comparison, ComparisonList, or BooleanExpression Parameters @@ -852,7 +863,7 @@ def get_context_match_criteria(element: ElementTree.Element, ns: dict): Returns ------- - : list + : List[MatchCriteria] List of Comparisons that can be evaluated to determine whether this calibrator should be used. """ context_match_element = element.find('xtce:ContextMatch', ns) @@ -862,13 +873,15 @@ def get_context_match_criteria(element: ElementTree.Element, ns: dict): if context_match_element.find('xtce:Comparison', ns) is not None: return [Comparison.from_match_criteria_xml_element( context_match_element.find('xtce:Comparison', ns), ns)] - # TODO: Implement handling of BooleanExpression - raise NotImplementedError("ContextCalibrator doesn't contain Comparison on ComparisonList. This probably" - "means the match criteria is an unsupported type (BooleanExpression or " - "CustomAlgorithm).") + if context_match_element.find('xtce:BooleanExpression', ns) is not None: + return [BooleanExpression.from_match_criteria_xml_element( + context_match_element.find('xtce:BooleanExpression', ns), ns)] + raise NotImplementedError("ContextCalibrator doesn't contain Comparison, ComparisonList, or BooleanExpression. " + "This probably means the match criteria is an unsupported type " + "(CustomAlgorithm).") @classmethod - def from_context_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_context_calibrator_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'ContextCalibrator': """Create a ContextCalibrator object from an XML element Parameters @@ -897,27 +910,129 @@ def from_context_calibrator_xml_element(cls, element: ElementTree.Element, ns: d return cls(match_criteria=match_criteria, calibrator=calibrator) - def calibrate(self, parsed_value): - """Wrapper method for the internal Calibrator.calibrate + def calibrate(self, parsed_value: Union[int, float]) -> Union[int, float]: + """Wrapper method for the internal `Calibrator.calibrate` Parameters ---------- - parsed_value : int or float + parsed_value : Union[int, float] Uncalibrated value. Returns ------- - : int or float + : Union[int, float] Calibrated value """ return self.calibrator.calibrate(parsed_value) +class PacketData: + """Raw packet data stored as bytes""" + + def __init__(self, data: bytes, pos: int = 0): + """The raw packet data stored as bytes + + Intended to be used to ``read`` and ``peek`` at data within the packet. + Iterating through and keeping track of the current position read from. + Similar to the bitstring module's objects, but with less capability + and thus faster for these specific use-cases. + + Parameters + ---------- + data : bytes + The binary data for a single packet. + pos : int + The bit cursor position in the packet. Default 0. + """ + self.data = data + self.pos = pos + self._nbits = len(data) * 8 + + def __len__(self): + """The length of the full packet data object, in bits""" + return self._nbits + + def read(self, format_string: str, update_position: bool = True) -> Union[int, float, str, bytes]: + """Read bits from the packet data according to the format specifier. + + Starts reading at the current cursor position `pos` where pos is in bits. + + Parameters + ---------- + format_string : str + A bitstring-style format string, e.g. `uint:14` + update_position : bool + Whether to update the cursor position in the packet. Default True. + + Returns + ------- + : Union[int, float, str, bytes] + Value read from the packet data according to the format specifier. + """ + # pylint: disable=too-many-branches + name, n_things = format_string.split(":") + if name == "bytes": + nbits = int(n_things) * 8 + else: + nbits = int(n_things) + + if self.pos + nbits > self._nbits: + raise ValueError("End of packet reached") + + # Get the bytes we're interested in as an integer + bytes_as_int = _extract_bits(self.data, self.pos, nbits) + if update_position: + self.pos += nbits + + if name == "uint": + return bytes_as_int + if name == "int": + # Compute two's complement for signed integer of any size (nbits) + if (bytes_as_int & (1 << (nbits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255 + return bytes_as_int - (1 << nbits) # compute negative value + return bytes_as_int # return positive value as is + if name == "floatbe": + if nbits == 16: + name = "!e" + elif nbits == 32: + name = "!f" + elif nbits == 64: + name = "!d" + else: + raise ValueError(f"Unsupported float size {nbits}, only 32 and 64 are supported") + return struct.unpack(name, int.to_bytes(bytes_as_int, nbits // 8, byteorder="big"))[0] + if name == "bin": + # Binary string + return f"{bytes_as_int:0{nbits}b}" + if name == "bytes": + # Binary data directly returned + return int.to_bytes(bytes_as_int, nbits // 8, "big") + raise ValueError(f"Unsupported format type {name}") + + def peek(self, format_string: str): + """Peek from the packet data according to the format specifier. + + Does not update the current cursor position in the data. + + Parameters + ---------- + format_string : str + Bitstring-style format string, e.g. `uint:14` + + Returns + ------- + : Union[int, float, str, bytes] + Read value from the packet data according to the format specifier. + """ + return self.read(format_string, update_position=False) + + # DataEncoding definitions class DataEncoding(AttrComparable, metaclass=ABCMeta): """Abstract base class for XTCE data encodings""" + @classmethod - def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'DataEncoding': """Abstract classmethod to create a data encoding object from an XML element. Parameters @@ -929,12 +1044,12 @@ def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): Returns ------- - cls + : DataEncoding """ return NotImplemented @staticmethod - def get_default_calibrator(data_encoding_element: ElementTree.Element, ns: dict): + def get_default_calibrator(data_encoding_element: ElementTree.Element, ns: dict) -> Union[Calibrator, None]: """Gets the default_calibrator for the data encoding element Parameters @@ -946,7 +1061,7 @@ def get_default_calibrator(data_encoding_element: ElementTree.Element, ns: dict) Returns ------- - Calibrator + : Union[Calibrator, None] """ for calibrator in [SplineCalibrator, PolynomialCalibrator, MathOperationCalibrator]: # Try to find each type of data encoding element. If we find one, we assume it's the only one. @@ -956,7 +1071,8 @@ def get_default_calibrator(data_encoding_element: ElementTree.Element, ns: dict) return None @staticmethod - def get_context_calibrators(data_encoding_element: ElementTree.Element, ns: dict) -> list or None: + def get_context_calibrators( + data_encoding_element: ElementTree.Element, ns: dict) -> Union[List[ContextCalibrator], None]: """Get the context default_calibrator(s) for the data encoding element Parameters @@ -968,8 +1084,8 @@ def get_context_calibrators(data_encoding_element: ElementTree.Element, ns: dict Returns ------- - : list - List of ContextCalibrator objects. + : Union[List[ContextCalibrator], None] + List of ContextCalibrator objects or None if there are no context calibrators """ if data_encoding_element.find('xtce:ContextCalibratorList', ns): context_calibrators_elements = data_encoding_element.findall( @@ -979,7 +1095,7 @@ def get_context_calibrators(data_encoding_element: ElementTree.Element, ns: dict return None @staticmethod - def _get_linear_adjuster(parent_element: ElementTree.Element, ns: dict) -> callable or None: + def _get_linear_adjuster(parent_element: ElementTree.Element, ns: dict) -> Union[callable, None]: """Examine a parent (e.g. a ) element and find a LinearAdjustment if present, creating and returning a function that evaluates the adjustment. @@ -992,8 +1108,8 @@ def _get_linear_adjuster(parent_element: ElementTree.Element, ns: dict) -> calla Returns ------- - adjuster : callable - Function object that adjusts a SizeInBits value by a linear function + adjuster : Union[callable, None] + Function object that adjusts a SizeInBits value by a linear function or None if no adjuster present """ linear_adjustment_element = parent_element.find('xtce:LinearAdjustment', ns) if linear_adjustment_element is not None: @@ -1025,7 +1141,7 @@ def adjuster(x: int) -> int: return adjuster return None - def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: dict): + def _get_format_string(self, packet_data: PacketData, parsed_data: dict) -> str: """Infer a bitstring format string, possibly using previously parsed data. This is called by parse_value only so it's private. @@ -1041,12 +1157,12 @@ def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: """ raise NotImplementedError() - def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, **kwargs): + def parse_value(self, packet_data: PacketData, parsed_data: dict, **kwargs) -> Tuple[Any, Any]: """Parse a value from packet data, possibly using previously parsed data items to inform parsing. Parameters ---------- - packet_data: bitstring.ConstBitStream + packet_data: PacketData Binary data coming up next in the packet. parsed_data: dict Previously parsed data items from which to infer parsing details (e.g. length of a field). @@ -1065,13 +1181,13 @@ class StringDataEncoding(DataEncoding): """""" def __init__(self, encoding: str = 'utf-8', - termination_character: str = None, - fixed_length: int = None, - leading_length_size: int = None, - dynamic_length_reference: str = None, + termination_character: Optional[str] = None, + fixed_length: Optional[int] = None, + leading_length_size: Optional[int] = None, + dynamic_length_reference: Optional[str] = None, use_calibrated_value: bool = True, - discrete_lookup_length: list = None, - length_linear_adjuster: callable = None): + discrete_lookup_length: Optional[List[DiscreteLookup]] = None, + length_linear_adjuster: Optional[callable] = None): """Constructor Only one of termination_character, fixed_length, or leading_length_size should be set. Setting more than one is nonsensical. @@ -1084,23 +1200,23 @@ def __init__(self, encoding: str = 'utf-8', ---------- encoding : str One of 'utf-8', 'utf-16-le', or 'utf-16-be'. Describes how to read the characters in the string. - termination_character : str + termination_character : Optional[str] A single hexadecimal character, represented as a string. Must be encoded in the same encoding as the string itself. For example, for a utf-8 encoded string, the hex string must be two hex characters (one byte). For a utf-16-* encoded string, the hex representation of the termination character must be four characters (two bytes). - fixed_length : int + fixed_length : Optional[int] Fixed length of the string, in bits. - leading_length_size : int + leading_length_size : Optional[int] Fixed size in bits of a leading field that contains the length of the subsequent string. - dynamic_length_reference : str - Name of referenced parameter for dynamic length. May be combined with a linear_adjuster + dynamic_length_reference : Optional[str] + Name of referenced parameter for dynamic length, in bits. May be combined with a linear_adjuster use_calibrated_value: bool Whether to use the calibrated value on the referenced parameter in dynamic_length_reference. Default is True. - discrete_lookup_length : DiscreteLookup - DiscreteLookup object with which to determine string length from another parameter. - length_linear_adjuster : callable + discrete_lookup_length : Optional[List[DiscreteLookup]] + List of DiscreteLookup objects with which to determine string length from another parameter. + length_linear_adjuster : Optional[callable] Function that linearly adjusts a size. e.g. if the size reference parameter gives a length in bytes, the linear adjuster should multiply by 8 to give the size in bits. """ @@ -1109,7 +1225,11 @@ def __init__(self, encoding: str = 'utf-8', f"Got encoding={encoding}. Encoding must be one of utf-8, utf-16-le, or utf-16-be (note that" f"endianness must be specified for utf-16 encoding.") self.encoding = encoding - if termination_character and len(bytes.fromhex(termination_character).decode(encoding).encode('utf-8')) != 1: + # Check that the termination character is a single character in the specified encoding + # e.g. b'\x58' in utf-8 is "X" + # b'\x21\00' in utf-16-le is "!" + # b'\x00\x21' in utf-16-be is "!" + if termination_character and len(bytes.fromhex(termination_character).decode(encoding)) != 1: raise ValueError(f"Termination character {termination_character} appears to be malformed. Expected a " f"hex string representation of a single character, e.g. '58' for character 'X' in utf-8 " f"or '5800' for character 'X' in utf-16-le. Note that variable-width encoding is not " @@ -1122,30 +1242,35 @@ def __init__(self, encoding: str = 'utf-8', self.discrete_lookup_length = discrete_lookup_length self.length_linear_adjuster = length_linear_adjuster - def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: dict): + def _get_format_string(self, packet_data: PacketData, parsed_data: dict) -> Tuple[str, int]: """Infer a bitstring format string Parameters ---------- parsed_data: dict Dictionary of previously parsed data items for use in determining the format string if necessary. - packet_data: bitstring.ConstBitStream + packet_data: PacketData Packet data, which can be used to determine the string length from a leading value or from a termination character. Returns ------- - : str or None - Format string in the bitstring format. e.g. uint:16 - : int or None + : str + Format string in the bitstring format. e.g. `uint:16` + : int Number of bits to skip after parsing the string """ + # pylint: disable=too-many-branches skip_bits_after = 0 # Gets modified if we have a termination character if self.fixed_length: strlen_bits = self.fixed_length elif self.leading_length_size is not None: # strlen_bits is determined from a preceding integer leading_strlen_bitstring_format = f"uint:{self.leading_length_size}" strlen_bits = packet_data.read(leading_strlen_bitstring_format) + if strlen_bits % 8 != 0: + warnings.warn(f"String length (in bits) is {strlen_bits}, which is not a multiple of 8. " + f"This likely means something is wrong since strings are expected to be integer numbers " + f"of bytes.") elif self.discrete_lookup_length is not None: for discrete_lookup in self.discrete_lookup_length: strlen_bits = discrete_lookup.evaluate(parsed_data) @@ -1161,10 +1286,8 @@ def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: strlen_bits = parsed_data[self.dynamic_length_reference].raw_value strlen_bits = int(strlen_bits) elif self.termination_character is not None: - print(f"hex termination character: {self.termination_character}") - termination_char_utf8_bytes = bytes.fromhex(self.termination_character) - print(f"bytes termination character (utf-8): {termination_char_utf8_bytes}, " - f"len={len(termination_char_utf8_bytes)}") + # Literal bytes object (no encoding assumed yet) + termination_char_bytes = bytes.fromhex(self.termination_character) if self.encoding in ['utf-16-le', 'utf-16-be']: bytes_per_char = 2 @@ -1176,45 +1299,62 @@ def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: f"endianness must be specified for utf-16 encoding.") bits_per_byte = 8 - look_ahead_n_bytes = 0 - while look_ahead_n_bytes <= len(packet_data) - packet_data.pos: - print(f"looking ahead {look_ahead_n_bytes} bytes") - look_ahead = packet_data.peek(f'bytes:{look_ahead_n_bytes}') # Outputs UTF-8 encoded byte string - look_ahead = look_ahead.decode('utf-8').encode(self.encoding) # Force specified encoding - print(f"string so far: {look_ahead}") - if termination_char_utf8_bytes in look_ahead: - print('Found termination character.') + + # If we are starting mid-byte, there may not be an integer number of bytes between the string and the + # end of the packet + n_full_bytes_left_in_packet = (len(packet_data) - packet_data.pos) // bits_per_byte + + # Start by looking ahead at most 16 chars. + look_ahead_n_chars = min(16, n_full_bytes_left_in_packet // bytes_per_char) + + # Peek at 64 characters at a time. 64, 128, 192, ... + # Remember in utf-16-be/le, each character will be 2 bytes + look_ahead_n_bytes = bytes_per_char * look_ahead_n_chars + + while look_ahead_n_bytes <= n_full_bytes_left_in_packet: + # Outputs byte string (encoding is irrelevant, they are just bytes) + look_ahead = packet_data.peek(f'bytes:{look_ahead_n_bytes}') + # Check if the termination character byte string is in the look ahead bytes we peeked at + if termination_char_bytes in look_ahead: # Implicit assumption of one termination character in specified encoding tclen_bits = bytes_per_char * bits_per_byte - strlen_bits = (look_ahead_n_bytes * bits_per_byte) - tclen_bits + # Split the look ahead bytes string at the termination character and get its length (in bits) + strlen_bits = len(look_ahead.split(termination_char_bytes)[0]) * bits_per_byte + # Tell the parser to skip the termination character skip_bits_after = tclen_bits break - look_ahead_n_bytes += bytes_per_char + # Increment look ahead length by one char (1 or 2 bytes, based on encoding) + look_ahead_n_bytes += bytes_per_char * look_ahead_n_chars + # Ensure we never go over the total length of the packet, in bits + look_ahead_n_bytes = min(look_ahead_n_bytes, n_full_bytes_left_in_packet) else: raise ValueError(f"Reached end of binary string without finding " f"termination character {self.termination_character}.") else: raise ValueError("Unable to parse StringParameterType. " "Didn't contain any way to constrain the length of the string.") - if self.length_linear_adjuster is not None: + if not self.termination_character and self.length_linear_adjuster is not None: + # Only adjust if we are not doing this by termination character. Adjusting a length that is objectively + # determined via termination character is nonsensical. strlen_bits = self.length_linear_adjuster(strlen_bits) return f"bytes:{strlen_bits // 8}", skip_bits_after + # pylint: enable=too-many-branches - def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, **kwargs): + def parse_value(self, packet_data: PacketData, parsed_data: dict, **kwargs) -> Tuple[str, None]: """Parse a value from packet data, possibly using previously parsed data items to inform parsing. Parameters ---------- - packet_data: bitstring.ConstBitStream + packet_data: PacketData Binary data coming up next in the packet. parsed_data: dict, Optional Previously parsed data items from which to infer parsing details (e.g. length of a field). Returns ------- - : any + : str Parsed value - : any + : None Calibrated value """ bitstring_format, skip_bits_after = self._get_format_string(packet_data, parsed_data) @@ -1223,12 +1363,13 @@ def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, return parsed_value.decode(self.encoding), None @classmethod - def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'StringDataEncoding': """Create a data encoding object from an XML element. Strings in XTCE can be described in three ways: 1. Using a termination character that marks the end of the string. - 2. Using a fixed length, which may be derived from referenced parameter either directly or via a discrete lookup table. + 2. Using a fixed length, which may be derived from referenced parameter either directly or via a discrete + lookup table. 3. Using a leading size field that describes the size of the following string. Parameters @@ -1296,7 +1437,8 @@ class NumericDataEncoding(DataEncoding, metaclass=ABCMeta): """Abstract class that is inherited by IntegerDataEncoding and FloatDataEncoding""" def __init__(self, size_in_bits: int, encoding: str, - default_calibrator: Calibrator = None, context_calibrators: list = None): + default_calibrator: Optional[Calibrator] = None, + context_calibrators: Optional[List[ContextCalibrator]] = None): """Constructor # TODO: Implement ByteOrderList to inform endianness @@ -1310,10 +1452,10 @@ def __init__(self, size_in_bits: int, encoding: str, though 'signed' is not actually a valid specifier according to XTCE. 'twosCompliment' [sic] should be used instead, though we support the unofficial 'signed' specifier here. For supported specifiers, see XTCE spec 4.3.2.2.5.6.2 - default_calibrator : Calibrator + default_calibrator : Optional[Calibrator] Optional Calibrator object, containing information on how to transform the integer-encoded data, e.g. via a polynomial conversion or spline interpolation. - context_calibrators : list + context_calibrators : Optional[List[ContextCalibrator]] List of ContextCalibrator objects, containing match criteria and corresponding calibrators to use in various scenarios, based on other parameters. """ @@ -1322,12 +1464,14 @@ def __init__(self, size_in_bits: int, encoding: str, self.default_calibrator = default_calibrator self.context_calibrators = context_calibrators - def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, **kwargs): + def parse_value(self, + packet_data: PacketData, + parsed_data: dict, **kwargs) -> Tuple[Union[int, float], Union[int, float]]: """Parse a value from packet data, possibly using previously parsed data items to inform parsing. Parameters ---------- - packet_data: bitstring.ConstBitStream + packet_data: PacketData Binary data coming up next in the packet. parsed_data: dict, Optional Previously parsed data items from which to infer parsing details (e.g. length of a field). @@ -1359,7 +1503,7 @@ def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, class IntegerDataEncoding(NumericDataEncoding): """""" - def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: dict): + def _get_format_string(self, packet_data: PacketData, parsed_data: dict) -> str: """Infer a bitstring format string Returns @@ -1379,7 +1523,7 @@ def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: return f"{base}:{self.size_in_bits}" @classmethod - def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'IntegerDataEncoding': """Create a data encoding object from an XML element. Parameters @@ -1394,7 +1538,10 @@ def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): : cls """ size_in_bits = int(element.attrib['sizeInBits']) - encoding = element.attrib['encoding'] + if 'encoding' in element.attrib: + encoding = element.attrib['encoding'] + else: + encoding = "unsigned" calibrator = cls.get_default_calibrator(element, ns) context_calibrators = cls.get_context_calibrators(element, ns) return cls(size_in_bits=size_in_bits, encoding=encoding, @@ -1406,7 +1553,8 @@ class FloatDataEncoding(NumericDataEncoding): _supported_encodings = ['IEEE-754', 'MIL-1750A'] def __init__(self, size_in_bits: int, encoding: str = 'IEEE-754', - default_calibrator: Calibrator = None, context_calibrators: list = None): + default_calibrator: Optional[Calibrator] = None, + context_calibrators: Optional[List[ContextCalibrator]] = None): """Constructor # TODO: Implement MIL-1650A encoding option @@ -1419,10 +1567,10 @@ def __init__(self, size_in_bits: int, encoding: str = 'IEEE-754', Size of the encoded value, in bits. encoding : str Encoding method of the float data. Must be either 'IEEE-754' or 'MIL-1750A'. Defaults to IEEE-754. - default_calibrator : Calibrator + default_calibrator : Optional[Calibrator] Optional Calibrator object, containing information on how to transform the data, e.g. via a polynomial conversion or spline interpolation. - context_calibrators : list + context_calibrators : Optional[List[ContextCalibrator]] List of ContextCalibrator objects, containing match criteria and corresponding calibrators to use in various scenarios, based on other parameters. """ @@ -1431,10 +1579,13 @@ def __init__(self, size_in_bits: int, encoding: str = 'IEEE-754', f"Must be one of {self._supported_encodings}.") if encoding == 'MIL-1750A': raise NotImplementedError("MIL-1750A encoded floats are not supported by this library yet.") + if encoding == 'IEEE-754' and size_in_bits not in (16, 32, 64): + raise ValueError(f"Invalid size_in_bits value for IEEE-754 FloatDataEncoding, {size_in_bits}. " + "Must be 16, 32, or 64.") super().__init__(size_in_bits, encoding=encoding, default_calibrator=default_calibrator, context_calibrators=context_calibrators) - def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: dict): + def _get_format_string(self, packet_data: PacketData, parsed_data: dict) -> str: """Infer a bitstring format string Returns @@ -1445,7 +1596,7 @@ def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: return f"floatbe:{self.size_in_bits}" @classmethod - def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'FloatDataEncoding': """Create a data encoding object from an XML element. Parameters @@ -1473,26 +1624,26 @@ def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): class BinaryDataEncoding(DataEncoding): """""" - def __init__(self, fixed_size_in_bits: int = None, - size_reference_parameter: str = None, use_calibrated_value: bool = True, - size_discrete_lookup_list: list = None, - linear_adjuster: callable = None): + def __init__(self, fixed_size_in_bits: Optional[int] = None, + size_reference_parameter: Optional[str] = None, use_calibrated_value: bool = True, + size_discrete_lookup_list: Optional[List[DiscreteLookup]] = None, + linear_adjuster: Optional[callable] = None): """Constructor Parameters ---------- - fixed_size_in_bits : int + fixed_size_in_bits : Optional[int] Fixed size for the binary field, in bits. - size_reference_parameter : str + size_reference_parameter : Optional[str] Name of a parameter to reference for the binary field length, in bits. Note that space often specifies these fields in byte length, not bit length. This should be taken care of by a LinearAdjuster element that simply instructs the value to be multiplied by 8 but that hasn't historically been implemented unfortunately. use_calibrated_value: bool, Optional Default True. If False, the size_reference_parameter is examined for its raw value. - size_discrete_lookup_list: list + size_discrete_lookup_list: Optional[List[DiscreteLookup]] List of DiscreteLookup objects by which to determine the length of the binary data field. This suffers from the same bit/byte conversion problem as size_reference_parameter. - linear_adjuster : callable + linear_adjuster : Optional[callable] Function that linearly adjusts a size. e.g. if the size reference parameter gives a length in bytes, the linear adjuster should multiply by 8 to give the size in bits. """ @@ -1502,12 +1653,12 @@ def __init__(self, fixed_size_in_bits: int = None, self.size_discrete_lookup_list = size_discrete_lookup_list self.linear_adjuster = linear_adjuster - def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: dict): + def _get_format_string(self, packet_data: PacketData, parsed_data: dict) -> str: """Infer a bitstring format string Returns ------- - : str or None + : Union[str, None] Format string in the bitstring format. e.g. bin:1024 """ if self.fixed_size_in_bits is not None: @@ -1534,16 +1685,16 @@ def _get_format_string(self, packet_data: bitstring.ConstBitStream, parsed_data: len_bits = self.linear_adjuster(len_bits) return f"bin:{len_bits}" - def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, word_size: int = None, **kwargs): + def parse_value(self, packet_data: PacketData, parsed_data: dict, word_size: Optional[int] = None, **kwargs): """Parse a value from packet data, possibly using previously parsed data items to inform parsing. Parameters ---------- - packet_data: bitstring.ConstBitStream + packet_data: PacketData Binary data coming up next in the packet. - parsed_data: dict, Optional + parsed_data: dict Previously parsed data items from which to infer parsing details (e.g. length of a field). - word_size : int, Optional + word_size : Optional[int] Word size for encoded data. This is used to ensure that the cursor ends up at the end of the last word and ready to parse the next data field. @@ -1564,7 +1715,7 @@ def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, return parsed_value, None @classmethod - def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): + def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'BinaryDataEncoding': """Create a data encoding object from an XML element. Parameters @@ -1576,7 +1727,7 @@ def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): Returns ------- - : cls + : BinaryDataEncoding """ fixed_value_element = element.find('xtce:SizeInBits/xtce:FixedValue', ns) if fixed_value_element is not None: @@ -1608,7 +1759,7 @@ def from_data_encoding_xml_element(cls, element: ElementTree.Element, ns: dict): class ParameterType(AttrComparable, metaclass=ABCMeta): """Abstract base class for XTCE parameter types""" - def __init__(self, name: str, encoding: DataEncoding, unit: str = None): + def __init__(self, name: str, encoding: DataEncoding, unit: Optional[str] = None): """Constructor Parameters @@ -1617,7 +1768,7 @@ def __init__(self, name: str, encoding: DataEncoding, unit: str = None): Parameter type name. Usually something like 'MSN__PARAM_Type' encoding : DataEncoding How the data is encoded. e.g. IntegerDataEncoding, StringDataEncoding, etc. - unit : str + unit : Optional[str] String describing the unit for the stored value. """ self.name = name @@ -1630,8 +1781,8 @@ def __repr__(self): return f"<{module}.{qualname} {self.name}>" @classmethod - def from_parameter_type_xml_element(cls, element: ElementTree.Element, ns: dict): - """Create an IntegerParameterType from an XML element. + def from_parameter_type_xml_element(cls, element: ElementTree.Element, ns: dict) -> 'ParameterType': + """Create a *ParameterType from an XML element. Parameters ---------- @@ -1642,7 +1793,7 @@ def from_parameter_type_xml_element(cls, element: ElementTree.Element, ns: dict) Returns ------- - : IntegerParameterType + : ParameterType """ name = element.attrib['name'] unit = cls.get_units(element, ns) @@ -1650,7 +1801,7 @@ def from_parameter_type_xml_element(cls, element: ElementTree.Element, ns: dict) return cls(name, encoding, unit) @staticmethod - def get_units(parameter_type_element: ElementTree.Element, ns: dict) -> str or None: + def get_units(parameter_type_element: ElementTree.Element, ns: dict) -> Union[str, None]: """Finds the units associated with a parameter type element and parsed them to return a unit string. We assume only one but this could be extended to support multiple units. See section 4.3.2.2.4 of CCSDS 660.1-G-1 @@ -1664,8 +1815,10 @@ def get_units(parameter_type_element: ElementTree.Element, ns: dict) -> str or N Returns ------- - : str or None + : Union[str, None] + Unit string or None if no units are defined """ + # Assume we are not parsing a Time Parameter Type, which stores units differently units = parameter_type_element.findall('xtce:UnitSet/xtce:Unit', ns) # TODO: Implement multiple unit elements for compound unit definitions assert len(units) <= 1, f"Found {len(units)} elements in a single ." \ @@ -1673,10 +1826,11 @@ def get_units(parameter_type_element: ElementTree.Element, ns: dict) -> str or N f"and is not yet supported by this library." if units: return " ".join([u.text for u in units]) + # Units are optional so return None if they aren't specified return None @staticmethod - def get_data_encoding(parameter_type_element: ElementTree.Element, ns: dict) -> DataEncoding or None: + def get_data_encoding(parameter_type_element: ElementTree.Element, ns: dict) -> Union[DataEncoding, None]: """Finds the data encoding XML element associated with a parameter type XML element and parses it, returning an object representation of the data encoding. @@ -1689,21 +1843,23 @@ def get_data_encoding(parameter_type_element: ElementTree.Element, ns: dict) -> Returns ------- - : DataEncoding or None + : Union[DataEncoding, None] + DataEncoding object or None if no data encoding is defined (which is probably an issue) """ for data_encoding in [StringDataEncoding, IntegerDataEncoding, FloatDataEncoding, BinaryDataEncoding]: # Try to find each type of data encoding element. If we find one, we assume it's the only one. - element = parameter_type_element.find(f"xtce:{data_encoding.__name__}", ns) + element = parameter_type_element.find(f".//xtce:{data_encoding.__name__}", ns) if element is not None: return data_encoding.from_data_encoding_xml_element(element, ns) + return None - def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, **kwargs): + def parse_value(self, packet_data: PacketData, parsed_data: dict, **kwargs): """Using the parameter type definition and associated data encoding, parse a value from a bit stream starting at the current cursor position. Parameters ---------- - packet_data : bitstring.ConstBitStream + packet_data : PacketData Binary packet data with cursor at the beginning of this parameter's data field. parsed_data: dict Previously parsed data to inform parsing. @@ -1719,7 +1875,7 @@ def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, class StringParameterType(ParameterType): """""" - def __init__(self, name: str, encoding: StringDataEncoding, unit: str = None): + def __init__(self, name: str, encoding: StringDataEncoding, unit: Optional[str] = None): """Constructor Parameters @@ -1728,7 +1884,7 @@ def __init__(self, name: str, encoding: StringDataEncoding, unit: str = None): Parameter type name. Usually something like 'MSN__PARAM_Type' encoding : StringDataEncoding Must be a StringDataEncoding object since strings can't be encoded other ways. - unit : str + unit : Optional[str] String describing the unit for the stored value. """ if not isinstance(encoding, StringDataEncoding): @@ -1750,7 +1906,7 @@ class FloatParameterType(ParameterType): class EnumeratedParameterType(ParameterType): """""" - def __init__(self, name: str, encoding: DataEncoding, enumeration: dict, unit: str or None = None): + def __init__(self, name: str, encoding: DataEncoding, enumeration: dict, unit: Union[str, None] = None): """Constructor Parameters @@ -1815,17 +1971,17 @@ def get_enumeration_list_contents(element: ElementTree.Element, ns: dict) -> dic raise ValueError("An EnumeratedParameterType must contain an EnumerationList.") return { - el.attrib['label']: int(el.attrib['value']) + int(el.attrib['value']): el.attrib['label'] for el in enumeration_list.iterfind('xtce:Enumeration', ns) } - def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, **kwargs): + def parse_value(self, packet_data: PacketData, parsed_data: dict, **kwargs): """Using the parameter type definition and associated data encoding, parse a value from a bit stream starting at the current cursor position. Parameters ---------- - packet_data : bitstring.ConstBitStream + packet_data : PacketData Binary packet data with cursor at the beginning of this parameter's data field. parsed_data : dict Previously parsed data @@ -1841,8 +1997,8 @@ def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, # Note: The enum lookup only operates on raw values. This is specified in 4.3.2.4.3.6 of the XTCE spec " # CCSDS 660.1-G-2 try: - label = next(key for key, value in self.enumeration.items() if value == raw) - except StopIteration as exc: + label = self.enumeration[raw] + except KeyError as exc: raise ValueError(f"Failed to find raw value {raw} in enum lookup list {self.enumeration}.") from exc return raw, label @@ -1850,7 +2006,7 @@ def parse_value(self, packet_data: bitstring.ConstBitStream, parsed_data: dict, class BinaryParameterType(ParameterType): """""" - def __init__(self, name: str, encoding: BinaryDataEncoding, unit: str = None): + def __init__(self, name: str, encoding: BinaryDataEncoding, unit: Optional[str] = None): """Constructor Parameters @@ -1859,7 +2015,7 @@ def __init__(self, name: str, encoding: BinaryDataEncoding, unit: str = None): Parameter type name. Usually something like 'MSN__PARAM_Type' encoding : BinaryDataEncoding Must be a BinaryDataEncoding object since binary data can't be encoded other ways. - unit : str + unit : Optional[str] String describing the unit for the stored value. """ if not isinstance(encoding, BinaryDataEncoding): @@ -1868,10 +2024,225 @@ def __init__(self, name: str, encoding: BinaryDataEncoding, unit: str = None): self.encoding = encoding -class Parameter: +class BooleanParameterType(ParameterType): + """""" + + def __init__(self, name: str, encoding: DataEncoding, unit: Optional[str] = None): + """Constructor that just issues a warning if the encoding is String or Binary""" + if isinstance(encoding, (BinaryDataEncoding, StringDataEncoding)): + warnings.warn(f"You are encoding a BooleanParameterType with a {type(encoding)} encoding." + f"This is almost certainly a very bad idea because the behavior of string and binary " + f"encoded booleans is not specified in XTCE. e.g. is the string \"0\" truthy?") + super().__init__(name, encoding, unit) + + def parse_value(self, packet_data: PacketData, parsed_data: dict, **kwargs): + """Using the parameter type definition and associated data encoding, parse a value from a bit stream starting + at the current cursor position. + + Parameters + ---------- + packet_data : PacketData + Binary packet data with cursor at the beginning of this parameter's data field. + parsed_data : dict + Previously parsed data + + Returns + ------- + parsed_value : int + Raw encoded value + derived_value : str + Resulting boolean representation of the encoded raw value + """ + raw, _ = super().parse_value(packet_data, parsed_data, **kwargs) + # Note: This behaves very strangely for String and Binary data encodings. + # Don't use those for Boolean parameters. The behavior isn't specified well in XTCE. + return raw, bool(raw) + + +class TimeParameterType(ParameterType, metaclass=ABCMeta): + """Abstract class for time parameter types""" + + def __init__(self, name: str, encoding: DataEncoding, unit: Optional[str] = None, + epoch: Optional[str] = None, offset_from: Optional[str] = None): + """Constructor + + Parameters + ---------- + name : str + Parameter type name. Usually something like 'MSN__PARAM_Type'. + encoding : DataEncoding + How the data is encoded. e.g. IntegerDataEncoding, StringDataEncoding, etc. + unit : Optional[str] + String describing the unit for the stored value. Note that if a scale and offset are provided on + the Encoding element, the unit applies to the scaled value, not the raw value. + epoch : Optional[str] + String describing the starting epoch for the date or datetime encoded in the parameter. + Must be xs:date, xs:dateTime, or one of the following: "TAI", "J2000", "UNIX", "POSIX", "GPS". + offset_from : Optional[str] + Used to reference another time parameter by name. It allows + for the stringing together of several dissimilar but related time parameters. + + Notes + ----- + The XTCE spec is not very clear about OffsetFrom or what it is for. We parse it but don't use it for + anything. + """ + super().__init__(name, encoding, unit=unit) + self.epoch = epoch + self.offset_from = offset_from + + @classmethod + def from_parameter_type_xml_element(cls, element: ElementTree.Element, ns: dict): + """Create a *TimeParameterType from an XML element. + + Parameters + ---------- + element : ElementTree.Element + The XML element from which to create the object. + ns: dict + XML namespace dict + + Returns + ------- + : TimeParameterType + """ + name = element.attrib['name'] + unit = cls.get_units(element, ns) + encoding = cls.get_data_encoding(element, ns) + encoding_unit_scaler = cls.get_time_unit_linear_scaler(element, ns) + if encoding_unit_scaler: + encoding.default_calibrator = encoding_unit_scaler + epoch = cls.get_epoch(element, ns) + offset_from = cls.get_offset_from(element, ns) + return cls(name, encoding, unit, epoch, offset_from) + + @staticmethod + def get_units(parameter_type_element: ElementTree.Element, ns: dict) -> Union[str, None]: + """Finds the units associated with a parameter type element and parsed them to return a unit string. + We assume only one but this could be extended to support multiple units. + See section 4.3.2.2.4 of CCSDS 660.1-G-1 + + Parameters + ---------- + parameter_type_element : ElementTree.Element + The parameter type element + ns : dict + XML namespace dictionary + + Returns + ------- + : Union[str, None] + Unit string or None if no units are defined + """ + encoding_element = parameter_type_element.find('xtce:Encoding', ns) + if encoding_element and "units" in encoding_element.attrib: + units = encoding_element.attrib["units"] + return units + # Units are optional so return None if they aren't specified + return None + + @staticmethod + def get_time_unit_linear_scaler( + parameter_type_element: ElementTree.Element, ns: dict) -> Union[PolynomialCalibrator, None]: + """Finds the linear calibrator associated with the Encoding element for the parameter type element. + See section 4.3.2.4.8.3 of CCSDS 660.1-G-2 + + Parameters + ---------- + parameter_type_element : ElementTree.Element + The parameter type element + ns : dict + XML namespace dictionary + + Returns + ------- + : Union[PolynomialCalibrator, None] + The PolynomialCalibrator, or None if we couldn't create a valid calibrator from the XML element + """ + encoding_element = parameter_type_element.find('xtce:Encoding', ns) + coefficients = [] + + if "offset" in encoding_element.attrib: + offset = encoding_element.attrib["offset"] + c0 = PolynomialCoefficient(coefficient=float(offset), exponent=0) + coefficients.append(c0) + + if "scale" in encoding_element.attrib: + scale = encoding_element.attrib["scale"] + c1 = PolynomialCoefficient(coefficient=float(scale), exponent=1) + coefficients.append(c1) + # If we have an offset but not a scale, we need to add a first order term with coefficient 1 + elif "offset" in encoding_element.attrib: + c1 = PolynomialCoefficient(coefficient=1, exponent=1) + coefficients.append(c1) + + if coefficients: + return PolynomialCalibrator(coefficients=coefficients) + # If we didn't find offset nor scale, return None (no calibrator) + return None + + @staticmethod + def get_epoch(parameter_type_element: ElementTree.Element, ns: dict) -> Union[str, None]: + """Finds the epoch associated with a parameter type element and parses them to return an epoch string. + See section 4.3.2.4.9 of CCSDS 660.1-G-2 + + Parameters + ---------- + parameter_type_element : ElementTree.Element + The parameter type element + ns : dict + XML namespace dictionary + + Returns + ------- + : Union[str, None] + The epoch string, which may be a datetime string or a named epoch such as TAI. None if the element was + not found. + """ + epoch_element = parameter_type_element.find('xtce:ReferenceTime/xtce:Epoch', ns) + if epoch_element is not None: + return epoch_element.text + return None + + @staticmethod + def get_offset_from(parameter_type_element: ElementTree.Element, ns: dict) -> Union[str, None]: + """Finds the parameter referenced in OffsetFrom in a parameter type element and returns the name of the + referenced parameter (which must be of type TimeParameterType). + See section 4.3.2.4.9 of CCSDS 660.1-G-1 + + Parameters + ---------- + parameter_type_element : ElementTree.Element + The parameter type element + ns : dict + XML namespace dictionary + + Returns + ------- + : Union[str, None] + The named of the referenced parameter. None if no OffsetFrom element was found. + """ + offset_from_element = parameter_type_element.find('xtce:ReferenceTime/xtce:OffsetFrom', ns) + if offset_from_element is not None: + return offset_from_element.attrib['parameterRef'] + return None + + +class AbsoluteTimeParameterType(TimeParameterType): + """""" + pass + + +class RelativeTimeParameterType(TimeParameterType): + """""" + pass + + +class Parameter(AttrComparable): """""" - def __init__(self, name: str, parameter_type: ParameterType): + def __init__(self, name: str, parameter_type: ParameterType, + short_description: Optional[str] = None, long_description: Optional[str] = None): """Constructor Parameters @@ -1880,9 +2251,15 @@ def __init__(self, name: str, parameter_type: ParameterType): Parameter name. Typically something like MSN__PARAMNAME parameter_type : ParameterType Parameter type object that describes how the parameter is stored. + short_description : Optional[str] + Short description of parameter as parsed from XTCE + long_description : Optional[str] + Long description of parameter as parsed from XTCE """ self.name = name self.parameter_type = parameter_type + self.short_description = short_description + self.long_description = long_description def __repr__(self): module = self.__class__.__module__ @@ -1890,18 +2267,18 @@ def __repr__(self): return f"<{module}.{qualname} {self.name}>" -class SequenceContainer: +class SequenceContainer(AttrComparable): """""" def __init__(self, name: str, entry_list: list, - short_description: str = None, - long_description: str = None, - base_container_name: str = None, - restriction_criteria: list = None, + short_description: Optional[str] = None, + long_description: Optional[str] = None, + base_container_name: Optional[str] = None, + restriction_criteria: Optional[list] = None, abstract: bool = False, - inheritors: list = None): + inheritors: Optional[List['SequenceContainer']] = None): """Object representation of Parameters @@ -1910,16 +2287,18 @@ def __init__(self, Container name entry_list : list List of Parameter objects - long_description : str + short_description : Optional[str] + long_description : Optional[str] Long description of the container - base_container_name : str + base_container_name : Optional[str] Name of the base container from which this may inherit if restriction criteria are met. - restriction_criteria : list + restriction_criteria : Optional[list] A list of MatchCriteria elements that evaluate to determine whether the SequenceContainer should be included. abstract : bool True if container has abstract=true attribute. False otherwise. - inheritors : list, Optional + Default False. + inheritors : Optional[List[SequenceContainer]] List of SequenceContainer objects that may inherit this one's entry list if their restriction criteria are met. Any SequenceContainers with this container as base_container_name should be listed here. """ @@ -1950,9 +2329,12 @@ class XtcePacketDefinition: '{{{xtce}}}FloatParameterType': FloatParameterType, '{{{xtce}}}EnumeratedParameterType': EnumeratedParameterType, '{{{xtce}}}BinaryParameterType': BinaryParameterType, + '{{{xtce}}}BooleanParameterType': BooleanParameterType, + '{{{xtce}}}AbsoluteTimeParameterType': AbsoluteTimeParameterType, + '{{{xtce}}}RelativeTimeParameterType': RelativeTimeParameterType, } - def __init__(self, xtce_document: str or Path, ns: dict = None): + def __init__(self, xtce_document: TextIO, ns: Optional[dict] = None): """Instantiate an object representation of a CCSDS packet definition, according to a format specified in an XTCE XML document. The parser iteratively builds sequences of parameters according to the SequenceContainers specified in the XML document's ContainerSet element. The notions of container inheritance @@ -1962,9 +2344,9 @@ def __init__(self, xtce_document: str or Path, ns: dict = None): Parameters ---------- - xtce_document : str or Path + xtce_document : TextIO Path to XTCE XML document containing packet definition. - ns : dict + ns : Optional[dict] Optional different namespace than the default xtce namespace. """ self._sequence_container_cache = {} # Lookup for parsed sequence container objects @@ -1974,18 +2356,23 @@ def __init__(self, xtce_document: str or Path, ns: dict = None): self.type_tag_to_object = {k.format(**self.ns): v for k, v in self._tag_to_type_template.items()} + self._populate_sequence_container_cache() + + def __getitem__(self, item): + return self._sequence_container_cache[item] + + def _populate_sequence_container_cache(self): + """Force populating sequence_container_cache by parsing all SequenceContainers""" for sequence_container in self.container_set.iterfind('xtce:SequenceContainer', self.ns): self._sequence_container_cache[ sequence_container.attrib['name'] ] = self.parse_sequence_container_contents(sequence_container) + # Back-populate the list of inheritors for each container for name, sc in self._sequence_container_cache.items(): if sc.base_container_name: self._sequence_container_cache[sc.base_container_name].inheritors.append(name) - def __getitem__(self, item): - return self._sequence_container_cache[item] - def parse_sequence_container_contents(self, sequence_container: ElementTree.Element) -> SequenceContainer: """Parses the list of parameters in a SequenceContainer element, recursively parsing nested SequenceContainers to build an entry list of parameters that flattens the nested structure to derive a sequential ordering of @@ -2007,10 +2394,7 @@ def parse_sequence_container_contents(self, sequence_container: ElementTree.Elem try: base_container, restriction_criteria = self._get_container_base_container(sequence_container) base_sequence_container = self.parse_sequence_container_contents(base_container) - #base_sequence_container.restriction_criteria = restriction_criteria base_container_name = base_sequence_container.name - # Prepend the base container. This is necessary for handling multiple inheritance. - #entry_list.insert(0, base_sequence_container) except ElementNotFoundError: base_container_name = None restriction_criteria = None @@ -2027,23 +2411,56 @@ def parse_sequence_container_contents(self, sequence_container: ElementTree.Elem else: parameter_element = self._find_parameter(parameter_name) parameter_type_name = parameter_element.attrib['parameterTypeRef'] - parameter_type_element = self._find_parameter_type(parameter_type_name) - parameter_type_class = self.type_tag_to_object[parameter_type_element.tag] + + # If we've already parsed this parameter type for a different parameter + if parameter_type_name in self._parameter_type_cache: + parameter_type_object = self._parameter_type_cache[parameter_type_name] + else: + parameter_type_element = self._find_parameter_type(parameter_type_name) + try: + parameter_type_class = self.type_tag_to_object[parameter_type_element.tag] + except KeyError as e: + if ( + "ArrayParameterType" in parameter_type_element.tag or + "AggregateParameterType" in parameter_type_element.tag + ): + raise NotImplementedError(f"Unsupported parameter type {parameter_type_element.tag}. " + "Supporting this parameter type is in the roadmap but has " + "not yet been implemented.") from e + raise InvalidParameterTypeError(f"Invalid parameter type {parameter_type_element.tag}. " + "If you believe this is a valid XTCE parameter type, " + "please open a feature request as a Github issue with a " + "reference to the XTCE element description for the " + "parameter type element.") from e + parameter_type_object = parameter_type_class.from_parameter_type_xml_element( + parameter_type_element, self.ns) + self._parameter_type_cache[parameter_type_name] = parameter_type_object # Add to cache + + parameter_short_description = parameter_element.attrib['shortDescription'] if ( + 'shortDescription' in parameter_element.attrib + ) else None + parameter_long_description = parameter_element.find('xtce:LongDescription', self.ns).text if ( + parameter_element.find('xtce:LongDescription', self.ns) is not None + ) else None + parameter_object = Parameter( name=parameter_name, - parameter_type=parameter_type_class.from_parameter_type_xml_element( - parameter_type_element, self.ns)) + parameter_type=parameter_type_object, + short_description=parameter_short_description, + long_description=parameter_long_description + ) entry_list.append(parameter_object) - self._parameter_cache[parameter_name] = parameter_object - elif entry.tag == '{{{xtce}}}ContainerRefEntry'.format(**self.ns): # pylint: disable=consider-using-f-string + self._parameter_cache[parameter_name] = parameter_object # Add to cache + elif entry.tag == '{{{xtce}}}ContainerRefEntry'.format( # pylint: disable=consider-using-f-string + **self.ns): nested_container = self._find_container(name=entry.attrib['containerRef']) entry_list.append(self.parse_sequence_container_contents(nested_container)) short_description = sequence_container.attrib['shortDescription'] if ( - 'shortDescription' in sequence_container.attrib + 'shortDescription' in sequence_container.attrib ) else None long_description = sequence_container.find('xtce:LongDescription', self.ns).text if ( - sequence_container.find('xtce:LongDescription', self.ns) is not None + sequence_container.find('xtce:LongDescription', self.ns) is not None ) else None return SequenceContainer(name=sequence_container.attrib['name'], @@ -2055,10 +2472,21 @@ def parse_sequence_container_contents(self, sequence_container: ElementTree.Elem long_description=long_description) @property - def named_containers(self): - """Property accessor that returns the dict cache of SequenceContainer objecs""" + def named_containers(self) -> Dict[str, SequenceContainer]: + """Property accessor that returns the dict cache of SequenceContainer objects""" return self._sequence_container_cache + @property + def named_parameters(self) -> Dict[str, Parameter]: + """Property accessor that returns the dict cache of Parameter objects""" + return self._parameter_cache + + @property + def named_parameter_types(self) -> Dict[str, ParameterType]: + """Property accessor that returns the dict cache of ParameterType objects""" + return self._parameter_type_cache + + # DEPRECATED! This is only used by CSV-parser code. Remove for 5.0.0 release @property def flattened_containers(self): """Accesses a flattened, generic representation of non-abstract packet definitions along with their @@ -2116,6 +2544,8 @@ def flatten_container(sequence_container: SequenceContainer): aggregated_entry_list.append(entry) return aggregated_entry_list, aggregated_restrictions + warnings.warn("The 'flattened_containers' property is deprecated to allow for dynamic container " + "inheritance matching during parsing.", DeprecationWarning) return { name: FlattenedContainer(*flatten_container(sc)) for name, sc in self._sequence_container_cache.items() @@ -2123,17 +2553,17 @@ def flatten_container(sequence_container: SequenceContainer): } @property - def container_set(self): + def container_set(self) -> ElementTree.Element: """Property that returns the element, containing all the sequence container elements.""" return self.tree.getroot().find('xtce:TelemetryMetaData/xtce:ContainerSet', self.ns) @property - def parameter_type_set(self): + def parameter_type_set(self) -> ElementTree.Element: """Property that returns the element, containing all parameter type elements.""" return self.tree.getroot().find('xtce:TelemetryMetaData/xtce:ParameterTypeSet', self.ns) @property - def parameter_set(self): + def parameter_set(self) -> ElementTree.Element: """Property that returns the element, containing all parameter elements.""" return self.tree.getroot().find('xtce:TelemetryMetaData/xtce:ParameterSet', self.ns) @@ -2206,7 +2636,9 @@ def _find_parameter_type(self, name: str) -> ElementTree.Element: f"Parameter type names are expected to exist and be unique." return matches[0] - def _get_container_base_container(self, container_element: ElementTree.Element) -> Tuple[ElementTree.Element, list]: + def _get_container_base_container( + self, + container_element: ElementTree.Element) -> Tuple[ElementTree.Element, List[MatchCriteria]]: """Examines the container_element and returns information about its inheritance. Parameters @@ -2251,3 +2683,41 @@ def _get_container_base_container(self, container_element: ElementTree.Element) else: restrictions = [] return self._find_container(base_container_element.attrib['containerRef']), restrictions + + +def _extract_bits(data: bytes, start_bit: int, nbits: int): + """Extract nbits from the data starting from the least significant end. + + If data = 00110101 11001010, start_bit = 2, nbits = 9, then the bits extracted are "110101110". + Those bits are turned into a Python integer and returned. + + Parameters + ---------- + data : bytes + Data to extract bits from + start_bit : int + Starting bit location within the data + nbits : int + Number of bits to extract + + Returns + ------- + int + Extracted bits as an integer + """ + # Get the bits from the packet data + # Select the bytes that contain the bits we want. + start_byte = start_bit // 8 # Byte index containing the start_bit + start_bit_within_byte = start_bit % 8 # Bit index within the start_byte + end_byte = start_byte + (start_bit_within_byte + nbits + 7) // 8 + data = data[start_byte:end_byte] # Chunk of bytes containing the data item we want to parse + # Convert the bytes to an integer for bitwise operations + value = int.from_bytes(data, byteorder="big") + if start_bit_within_byte == 0 and nbits % 8 == 0: + # If we're extracting whole bytes starting at a byte boundary, we don't need any bitshifting + # This is faster, especially for large binary chunks + return value + + # Shift the value to the right to move the LSB of the data item we want to parse + # to the least significant position, then mask out the number of bits we want to keep + return (value >> (len(data) * 8 - start_bit_within_byte - nbits)) & (2 ** nbits - 1) diff --git a/tests/conftest.py b/tests/conftest.py index 6c517ca..47b9b65 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,10 +1,13 @@ """Test fixtures""" -# Standard library +# Standard from pathlib import Path import sys -# External modules +# Installed import pytest +XTCE_URI = "http://www.omg.org/space/xtce" +TEST_NAMESPACE = {'xtce': XTCE_URI} + @pytest.fixture def test_data_dir(): diff --git a/tests/integration/test_bufferedreader_parsing.py b/tests/integration/test_bufferedreader_parsing.py index 9d0416a..e1d79ed 100644 --- a/tests/integration/test_bufferedreader_parsing.py +++ b/tests/integration/test_bufferedreader_parsing.py @@ -1,6 +1,4 @@ """Integration test for parsing JPSS packets""" -# Installed -import bitstring # Local from space_packet_parser import xtcedef from space_packet_parser import parser diff --git a/tests/integration/test_csv_based_parsing/test_ctim_parsing.py b/tests/integration/test_csv_based_parsing/test_ctim_parsing.py index 464bcd4..195af19 100644 --- a/tests/integration/test_csv_based_parsing/test_ctim_parsing.py +++ b/tests/integration/test_csv_based_parsing/test_ctim_parsing.py @@ -1,6 +1,4 @@ """Test parsing of CTIM packet data""" -# Installed -import bitstring # Local from space_packet_parser import csvdef, parser @@ -11,12 +9,10 @@ def test_csv_packet_definition_parsing(ctim_test_data_dir): csv_pkt_def = csvdef.CsvPacketDefinition(test_csv_file) test_packet_file = ctim_test_data_dir / 'ccsds_2021_155_14_39_51' - pkt_binary_data = bitstring.ConstBitStream(filename=test_packet_file) - parser_inst = parser.PacketParser(csv_pkt_def) - pkt_gen = parser_inst.generator(pkt_binary_data, show_progress=True) - - packets = list(pkt_gen) + with open(test_packet_file, 'rb') as pkt_file: + pkt_gen = parser_inst.generator(pkt_file, show_progress=True) + packets = list(pkt_gen) assert(len(packets) == 1499) assert(packets[159].header['PKT_APID'].raw_value == 34) diff --git a/tests/integration/test_socket_parsing.py b/tests/integration/test_socket_parsing.py index 1e3f7f4..83ff589 100644 --- a/tests/integration/test_socket_parsing.py +++ b/tests/integration/test_socket_parsing.py @@ -5,7 +5,6 @@ import socket import time # Installed -import bitstring import pytest # Local @@ -25,19 +24,20 @@ def send_data(sender: socket.socket, file: str): """ # Read binary file with open(file, 'rb') as fh: - stream = bitstring.ConstBitStream(fh) - while stream.pos < len(stream): + stream = fh.read() + pos = 0 + while pos < len(stream): time.sleep(random.random() * .1) # Random sleep up to 1s # Send binary data to socket in random chunk sizes min_n_bytes = 4096 max_n_bytes = 4096*2 random_n_bytes = int(random.random()) * (max_n_bytes - min_n_bytes) - n_bits_to_send = 8 * (min_n_bytes + random_n_bytes) - if stream.pos + n_bits_to_send > len(stream): - n_bits_to_send = len(stream) - stream.pos - chunk_to_send = stream[stream.pos:stream.pos + n_bits_to_send] - sender.send(chunk_to_send.bytes) - stream.pos += n_bits_to_send + n_bytes_to_send = 8 * (min_n_bytes + random_n_bytes) + if pos + n_bytes_to_send > len(stream): + n_bytes_to_send = len(stream) - pos + chunk_to_send = stream[pos:pos + n_bytes_to_send] + sender.send(chunk_to_send) + pos += n_bytes_to_send print("\nFinished sending data.") diff --git a/tests/integration/test_xtce_based_parsing/test_inheritance_restrictions.py b/tests/integration/test_xtce_based_parsing/test_inheritance_restrictions.py index a26dd8b..0fcfb77 100644 --- a/tests/integration/test_xtce_based_parsing/test_inheritance_restrictions.py +++ b/tests/integration/test_xtce_based_parsing/test_inheritance_restrictions.py @@ -1,6 +1,4 @@ """Test RestrictionCriteria being used creatively with JPSS data""" -# Installed -import bitstring # Local from space_packet_parser import xtcedef from space_packet_parser import parser diff --git a/tests/integration/test_xtce_based_parsing/test_jpss_parsing.py b/tests/integration/test_xtce_based_parsing/test_jpss_parsing.py index 7027dd9..7f1d44d 100644 --- a/tests/integration/test_xtce_based_parsing/test_jpss_parsing.py +++ b/tests/integration/test_xtce_based_parsing/test_jpss_parsing.py @@ -21,5 +21,7 @@ def test_jpss_xtce_packet_parsing(jpss_test_data_dir): assert isinstance(jpss_packet, parser.Packet) assert jpss_packet.header['PKT_APID'].raw_value == 11 assert jpss_packet.header['VERSION'].raw_value == 0 + assert jpss_packet.data['USEC'].short_description == "Secondary Header Fine Time (microsecond)" + assert jpss_packet.data['USEC'].long_description == "CCSDS Packet 2nd Header Fine Time in microseconds." n_packets += 1 assert n_packets == 7200 diff --git a/tests/integration/test_xtce_based_parsing/test_suda_parsing.py b/tests/integration/test_xtce_based_parsing/test_suda_parsing.py index 43009a3..7b75876 100644 --- a/tests/integration/test_xtce_based_parsing/test_suda_parsing.py +++ b/tests/integration/test_xtce_based_parsing/test_suda_parsing.py @@ -3,8 +3,6 @@ The packet definition used here is intended for IDEX, which is basically a rebuild of the SUDA instrument. The data used here is SUDA data but the fields are parsed using IDEX naming conventions. """ -# Installed -import bitstring # Local from space_packet_parser import xtcedef from space_packet_parser import parser @@ -12,21 +10,26 @@ def parse_hg_waveform(waveform_raw: str): """Parse a binary string representing a high gain waveform""" - w = bitstring.ConstBitStream(bin=waveform_raw) ints = [] - while w.pos < len(w): - w.read('bits:2') # skip 2. We use bits instead of pad for bitstring 3.0.0 compatibility - ints += w.readlist(['uint:10']*3) + for i in range(0, len(waveform_raw), 32): + # 32 bit chunks, divided up into 2, 10, 10, 10 + # skip first two bits + ints += [ + int(waveform_raw[i + 2 : i + 12], 2), + int(waveform_raw[i + 12 : i + 22], 2), + int(waveform_raw[i + 22 : i + 32], 2), + ] return ints def parse_lg_waveform(waveform_raw: str): """Parse a binary string representing a low gain waveform""" - w = bitstring.ConstBitStream(bin=waveform_raw) ints = [] - while w.pos < len(w): - w.read('bits:8') # skip 2 - ints += w.readlist(['uint:12']*2) + for i in range(0, len(waveform_raw), 32): + ints += [ + int(waveform_raw[i + 8 : i + 20], 2), + int(waveform_raw[i + 20 : i + 32], 2), + ] return ints diff --git a/tests/test_data/test_xtce.xml b/tests/test_data/test_xtce.xml new file mode 100644 index 0000000..fe2a4d4 --- /dev/null +++ b/tests/test_data/test_xtce.xml @@ -0,0 +1,207 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + day + + + + + + ms + + + + + + us + + + + + + + + + + day + + + + + + ms + + + + + + us + + + + + + m + + + + + + m/s + + + + + + + + + + + Not really used. We aren't changing the version of CCSDS that we use. + + + Indicates whether this packet is CMD or TLM. TLM is 0. + + + Always 1 - indicates that there is a secondary header. + + + Unique to each packet type. + + + Always set to 1. + + + Increments from 0 at reset for each packet issued of that APID. Rolls over at 14b. + + + Number of bytes of the data field following the primary header -1. (To get the length of the whole packet, add 7) + + + CCSDS Packet 2nd Header Day of Year in days. + + + CCSDS Packet 2nd Header Coarse Time in milliseconds. + + + CCSDS Packet 2nd Header Fine Time in microseconds. + + + + + + + + + + + + + + + + + + + + + + Super-container for telemetry and command packets + + + + + + + + + + + + Super-container for all telemetry packets + + + + + + + + + + + + Container for telemetry secondary header items + + + + + + + + Spacecraft Attitude and Ephemeris packet used to geolocate mission data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/unit/test_csvdef.py b/tests/unit/test_csvdef.py index daf2570..58419dd 100644 --- a/tests/unit/test_csvdef.py +++ b/tests/unit/test_csvdef.py @@ -1,6 +1,4 @@ """Tests for the CSV based packet definition""" -# Installed -import bitstring import pytest # Local from space_packet_parser import csvdef, xtcedef, parser @@ -56,10 +54,8 @@ def test_csv_packet_definition(ctim_test_data_dir): assert isinstance(csv_pkt_def, CsvPacketDefinition) test_packet_file = ctim_test_data_dir / 'ccsds_2021_155_14_39_51' - pkt_binary_data = bitstring.ConstBitStream(filename=test_packet_file) - - parser_inst = parser.PacketParser(csv_pkt_def) - pkt_gen = parser_inst.generator(pkt_binary_data) - - packet = next(pkt_gen) + with open(test_packet_file, 'rb') as pkt_file: + parser_inst = parser.PacketParser(csv_pkt_def) + pkt_gen = parser_inst.generator(pkt_file, show_progress=True) + packet = next(pkt_gen) assert isinstance(packet, parser.Packet) diff --git a/tests/unit/test_parser.py b/tests/unit/test_parser.py index 683bda8..c1a97ae 100644 --- a/tests/unit/test_parser.py +++ b/tests/unit/test_parser.py @@ -1,24 +1,23 @@ """Tests for space_packet_parser.parser""" # Installed -import bitstring import pytest # Local from space_packet_parser import parser @pytest.mark.parametrize( - ('name', 'raw_value', 'unit', 'derived_value', 'valid'), + ('name', 'raw_value', 'unit', 'derived_value', 'short_description', 'long_description', 'valid'), [ - ('TEST', 0, 'smoots', 10, True), - ('TEST', 10, None, None, True), - (None, 10, 'foo', 10, False), - ('TEST', None, None, None, False) + ('TEST', 0, 'smoots', 10, "short", "long", True), + ('TEST', 10, None, None, None, None, True), + (None, 10, 'foo', 10, None, None, False), + ('TEST', None, None, None, None, None, False) ] ) -def test_parsed_data_item(name, raw_value, unit, derived_value, valid): +def test_parsed_data_item(name, raw_value, unit, derived_value, short_description, long_description, valid): """Test ParsedDataItem""" if valid: - pdi = parser.ParsedDataItem(name, raw_value, unit, derived_value) + pdi = parser.ParsedDataItem(name, raw_value, unit, derived_value, short_description, long_description) else: with pytest.raises(ValueError): - pdi = parser.ParsedDataItem(name, raw_value, unit, derived_value) + pdi = parser.ParsedDataItem(name, raw_value, unit, derived_value, short_description, long_description) diff --git a/tests/unit/test_xtcedef.py b/tests/unit/test_xtcedef.py index ca3215b..f275493 100644 --- a/tests/unit/test_xtcedef.py +++ b/tests/unit/test_xtcedef.py @@ -1,11 +1,88 @@ """Tests for space_packet_parser.xtcedef""" -import bitstring +# Standard +from io import StringIO +# Installed import pytest import lxml.etree as ElementTree - +# Local from space_packet_parser import xtcedef, parser -TEST_NAMESPACE = {'xtce': 'http://www.omg.org/space/xtce'} +XTCE_URI = "http://www.omg.org/space/xtce" +TEST_NAMESPACE = {'xtce': XTCE_URI} + + +def test_invalid_parameter_type_error(test_data_dir): + """Test proper reporting of an invalid parameter type element""" + # Test document contains an invalid "InvalidParameterType" element + test_xtce_document = """ + + + + + + + + + + + + + + + + + + + + + +""" + x = StringIO(test_xtce_document) + with pytest.raises(xtcedef.InvalidParameterTypeError): + xtcedef.XtcePacketDefinition(x, ns=TEST_NAMESPACE) + + +def test_unsupported_parameter_type_error(test_data_dir): + """Test proper reporting of an unsupported parameter type element""" + # Test document contains an unsupported array parameter type that is not yet implemented + test_xtce_document = """ + + + + + + + + + 0 + + + 4 + + + + + + + + + + + + + + + + + +""" + x = StringIO(test_xtce_document) + with pytest.raises(NotImplementedError): + xtcedef.XtcePacketDefinition(x, ns=TEST_NAMESPACE) def test_attr_comparable(): @@ -387,7 +464,51 @@ def test_discrete_lookup(xml_string, test_parsed_data, expected_lookup_result): xtcedef.PolynomialCoefficient(coefficient=-0.045, exponent=2), xtcedef.PolynomialCoefficient(coefficient=1.25, exponent=3), xtcedef.PolynomialCoefficient(coefficient=0.0025, exponent=4) - ]))) + ]))), + (""" + + + + + + + == + 100 + + + + != + 99 + + + + + + + + + + + +""", + xtcedef.ContextCalibrator( + match_criteria=[ + xtcedef.BooleanExpression( + expression=xtcedef.Anded( + conditions=[ + xtcedef.Condition(left_param='P1', operator='==', right_value='100', + right_use_calibrated_value=False), + xtcedef.Condition(left_param='P4', operator='!=', right_value='99', + right_use_calibrated_value=False) + ], + ors=[] + ) + ), + ], + calibrator=xtcedef.PolynomialCalibrator(coefficients=[ + xtcedef.PolynomialCoefficient(coefficient=0.5, exponent=0), + xtcedef.PolynomialCoefficient(coefficient=1.5, exponent=1), + ]))), ] ) def test_context_calibrator(xml_string, expectation): @@ -398,6 +519,90 @@ def test_context_calibrator(xml_string, expectation): assert result == expectation +@pytest.mark.parametrize( + ('context_calibrator', 'parsed_data', 'parsed_value', 'match_expectation', 'expectation'), + [ + (xtcedef.ContextCalibrator( + match_criteria=[ + xtcedef.Comparison(required_value='678', referenced_parameter='EXI__FPGAT', operator='>=', + use_calibrated_value=True), + xtcedef.Comparison(required_value='4096', referenced_parameter='EXI__FPGAT', operator='<', + use_calibrated_value=True), + ], + calibrator=xtcedef.PolynomialCalibrator(coefficients=[ + xtcedef.PolynomialCoefficient(coefficient=0.5, exponent=0), + xtcedef.PolynomialCoefficient(coefficient=1.5, exponent=1) + ])), + {"EXI__FPGAT": parser.ParsedDataItem("EXI__FPGAT", 600, derived_value=700)}, + 42, True, 63.5), + (xtcedef.ContextCalibrator( + match_criteria=[ + xtcedef.Comparison(required_value='3.14', referenced_parameter='EXI__FPGAT', operator='!=', + use_calibrated_value=True), + ], + calibrator=xtcedef.PolynomialCalibrator(coefficients=[ + xtcedef.PolynomialCoefficient(coefficient=0.5, exponent=0), + xtcedef.PolynomialCoefficient(coefficient=1.5, exponent=1), + ])), + {"EXI__FPGAT": parser.ParsedDataItem("EXI__FPGAT", 3.14, derived_value=700.0)}, + 42, True, 63.5), + (xtcedef.ContextCalibrator( + match_criteria=[ + xtcedef.BooleanExpression( + expression=xtcedef.Anded( + conditions=[ + xtcedef.Condition(left_param='P1', operator='==', right_value='700', + right_use_calibrated_value=False), + xtcedef.Condition(left_param='P2', operator='!=', right_value='99', + right_use_calibrated_value=False) + ], + ors=[] + ) + ), + ], + calibrator=xtcedef.PolynomialCalibrator(coefficients=[ + xtcedef.PolynomialCoefficient(coefficient=0.5, exponent=0), + xtcedef.PolynomialCoefficient(coefficient=1.5, exponent=1), + ])), + {"P1": parser.ParsedDataItem("P1", 100.0, derived_value=700.0), + "P2": parser.ParsedDataItem("P2", 99, derived_value=700.0)}, + 42, True, 63.5), + (xtcedef.ContextCalibrator( + match_criteria=[ + xtcedef.BooleanExpression( + expression=xtcedef.Ored( + conditions=[ # Neither of these are true given the parsed data so far + xtcedef.Condition(left_param='P1', operator='==', right_value='700', + left_use_calibrated_value=False, + right_use_calibrated_value=False), + xtcedef.Condition(left_param='P2', operator='!=', right_value='700', + right_use_calibrated_value=False) + ], + ands=[] + ) + ), + ], + calibrator=xtcedef.PolynomialCalibrator(coefficients=[ + xtcedef.PolynomialCoefficient(coefficient=0.5, exponent=0), + xtcedef.PolynomialCoefficient(coefficient=1.5, exponent=1), + ])), + {"P1": parser.ParsedDataItem("P1", 100.0, derived_value=700.0), + "P2": parser.ParsedDataItem("P2", 99, derived_value=700.0)}, + 42, False, 63.5), + ] +) +def test_context_calibrator_calibrate(context_calibrator, parsed_data, parsed_value, match_expectation, expectation): + """Test context calibrator calibration""" + # Check if the context match is True or False given the parsed data so far + match = all(criterion.evaluate(parsed_data, parsed_value) for criterion in context_calibrator.match_criteria) + if match_expectation: + assert match + else: + assert not match + # Regardless of the context match, we still test the hypothetical result if the calibrator is evaluated + assert context_calibrator.calibrate(parsed_value) == expectation + + @pytest.mark.parametrize( ('xml_string', 'expectation'), [ @@ -623,6 +828,10 @@ def test_string_data_encoding(xml_string: str, expectation): [ (""" +""", + xtcedef.IntegerDataEncoding(size_in_bits=4, encoding='unsigned')), + (""" + """, xtcedef.IntegerDataEncoding(size_in_bits=4, encoding='unsigned')), (""" @@ -714,7 +923,7 @@ def test_integer_data_encoding(xml_string: str, expectation): (""" """, - xtcedef.FloatDataEncoding(size_in_bits=4, encoding='IEEE-754')), + ValueError()), (""" @@ -926,11 +1135,11 @@ def test_string_parameter_type(xml_string: str, expectation): # Fixed length test (xtcedef.StringParameterType( 'TEST_STRING', - xtcedef.StringDataEncoding(fixed_length=3, + xtcedef.StringDataEncoding(fixed_length=3, # Giving length in bytes length_linear_adjuster=lambda x: 8*x)), {}, # Don't need parsed_data for leading length parsing - # This is still 123X456 but with 011 prepended (a 3-bit representation of the number 3) - '0b00110001001100100011001101011000001101000011010100110110', + # This still 123X456 + xtcedef.PacketData(b'123X456'), '123'), # Dynamic reference length (xtcedef.StringParameterType( @@ -939,7 +1148,7 @@ def test_string_parameter_type(xml_string: str, expectation): use_calibrated_value=False, length_linear_adjuster=lambda x: 8*x)), {'STR_LEN': parser.ParsedDataItem('STR_LEN', 8, None)}, - '0b01000010010000010100010000100000010101110100111101001100010001100100011101000001010100100100001001000001', + xtcedef.PacketData(b'BAD WOLF'), 'BAD WOLF'), # Discrete lookup test (xtcedef.StringParameterType( @@ -951,7 +1160,7 @@ def test_string_parameter_type(xml_string: str, expectation): ], lookup_value=8) ], length_linear_adjuster=lambda x: 8*x)), {'P1': parser.ParsedDataItem('P1', 7, None, 7.55), 'P2': parser.ParsedDataItem('P2', 99, None, 100)}, - '0b01000010010000010100010000100000010101110100111101001100010001100100011101000001010100100100001001000001', + xtcedef.PacketData(b'BAD WOLF'), 'BAD WOLF'), # Termination character tests (xtcedef.StringParameterType( @@ -959,37 +1168,69 @@ def test_string_parameter_type(xml_string: str, expectation): xtcedef.StringDataEncoding(encoding='utf-8', termination_character='58')), {}, # Don't need parsed_data for termination character - # 123X456, termination character is X - '0b00110001001100100011001101011000001101000011010100110110', + # 123X456 + extra characters, termination character is X + xtcedef.PacketData(b'123X456000000000000000000000000000000000000000000000'), '123'), + (xtcedef.StringParameterType( + 'TEST_STRING', + xtcedef.StringDataEncoding(encoding='utf-8', + termination_character='58')), + {}, # Don't need parsed_data for termination character + # 56bits + 123X456 + extra characters, termination character is X + xtcedef.PacketData(b'9090909123X456000000000000000000000000000000000000000000000', pos=56), + '123'), + (xtcedef.StringParameterType( + 'TEST_STRING', + xtcedef.StringDataEncoding(encoding='utf-8', + termination_character='58')), + {}, # Don't need parsed_data for termination character + # 53bits + 123X456 + extra characters, termination character is X + # This is the same string as above but bit-shifted left by 3 bits + xtcedef.PacketData(b'\x03K;s{\x93)\x89\x91\x9a\xc1\xa1\xa9\xb3K;s{\x93(', pos=53), + '123'), + (xtcedef.StringParameterType( + "TEST_STRING", + xtcedef.StringDataEncoding(encoding="utf-8", + termination_character='00')), + {}, + xtcedef.PacketData("false_is_truthy".encode("utf-8") + b'\x00ABCD'), + 'false_is_truthy'), + (xtcedef.StringParameterType( + "TEST_STRING", + xtcedef.StringDataEncoding(encoding="utf-16-be", + termination_character='0021')), + {}, + xtcedef.PacketData("false_is_truthy".encode("utf-16-be") + b'\x00\x21ignoreme'), + 'false_is_truthy'), (xtcedef.StringParameterType( 'TEST_STRING', xtcedef.StringDataEncoding(encoding='utf-16-le', termination_character='5800')), {}, # Don't need parsed_data for termination character # 123X456, termination character is X - '0b0011000100000000001100100000000000110011000000000101100000000000001101000000000000110101000000000011011000000000', + xtcedef.PacketData('123X456'.encode('utf-16-le')), '123'), (xtcedef.StringParameterType( 'TEST_STRING', xtcedef.StringDataEncoding(encoding='utf-16-be', termination_character='0058')), {}, # Don't need parsed_data for termination character - '0b0000000000110001000000000011001000000000001100110000000001011000000000000011010000000000001101010000000000110110', + xtcedef.PacketData('123X456'.encode('utf-16-be')), '123'), # Leading length test (xtcedef.StringParameterType( 'TEST_STRING', xtcedef.StringDataEncoding(leading_length_size=5)), {}, # Don't need parsed_data for leading length parsing - # This is still 123X456 but with 011 prepended (a 3-bit representation of the number 3) - '0b1100000110001001100100011001101011000001101000011010100110110', + # This is still 123X456 but with 11000 prepended (a 5-bit representation of the number 24) + # This represents a string length (in bits) of 24 bits. + xtcedef.PacketData(0b1100000110001001100100011001101011000001101000011010100110110000.to_bytes(8, byteorder="big")), '123'), ] ) def test_string_parameter_parsing(parameter_type, parsed_data, packet_data, expected): """Test parsing a string parameter""" - raw, _ = parameter_type.parse_value(bitstring.ConstBitStream(packet_data), parsed_data) + raw, _ = parameter_type.parse_value(packet_data, parsed_data) assert raw == expected @@ -1075,10 +1316,18 @@ def test_integer_parameter_type(xml_string: str, expectation): @pytest.mark.parametrize( ('parameter_type', 'parsed_data', 'packet_data', 'expected'), [ + # 16-bit unsigned starting at byte boundary (xtcedef.IntegerParameterType('TEST_INT', xtcedef.IntegerDataEncoding(16, 'unsigned')), - {}, '0b1000000000000000', 32768), + {}, + xtcedef.PacketData(0b1000000000000000.to_bytes(length=2, byteorder='big')), + 32768), + # 16-bit signed starting at byte boundary (xtcedef.IntegerParameterType('TEST_INT', xtcedef.IntegerDataEncoding(16, 'signed')), - {}, '0b1111111111010110', -42), + {}, + xtcedef.PacketData(0b1111111111010110.to_bytes(length=2, byteorder='big')), + -42), + # 16-bit signed integer starting at a byte boundary, + # calibrated by a polynomial y = (x*2 + 5); x = -42; y = -84 + 5 = -79 (xtcedef.IntegerParameterType( 'TEST_INT', xtcedef.IntegerDataEncoding( @@ -1091,12 +1340,57 @@ def test_integer_parameter_type(xml_string: str, expectation): xtcedef.PolynomialCalibrator([xtcedef.PolynomialCoefficient(5, 0), xtcedef.PolynomialCoefficient(2, 1)])) ])), - {'PKT_APID': parser.ParsedDataItem('PKT_APID', 1101)}, '0b1111111111010110', -79), + {'PKT_APID': parser.ParsedDataItem('PKT_APID', 1101)}, + xtcedef.PacketData(0b1111111111010110.to_bytes(length=2, byteorder='big')), + -79), + # 12-bit unsigned integer starting at bit 4 of the first byte + (xtcedef.IntegerParameterType('TEST_INT', xtcedef.IntegerDataEncoding(12, 'unsigned')), + {}, + # 11111000 00000000 + # |--uint:12--| + xtcedef.PacketData(0b1111100000000000.to_bytes(length=2, byteorder='big'), pos=4), + 2048), + # 13-bit unsigned integer starting on bit 2 of the second byte + (xtcedef.IntegerParameterType('TEST_INT', xtcedef.IntegerDataEncoding(13, 'unsigned')), + {}, + # 10101010 11100000 00000001 + # |--uint:13---| + xtcedef.PacketData(0b101010101110000000000001.to_bytes(length=3, byteorder='big'), pos=10), + 4096), + # 16-bit unsigned integer starting on bit 2 of the first byte + (xtcedef.IntegerParameterType('TEST_INT', xtcedef.IntegerDataEncoding(16, 'unsigned')), + {}, + # 10101010 11100000 00000001 + # |----uint:16-----| + xtcedef.PacketData(0b101010101110000000000001.to_bytes(length=3, byteorder='big'), pos=2), + 43904), + # 12-bit signed integer starting on bit 4 of the first byte + (xtcedef.IntegerParameterType('TEST_INT', xtcedef.IntegerDataEncoding(12, 'signed')), + {}, + # 11111000 00000000 + # |---int:12--| + xtcedef.PacketData(0b1111100000000000.to_bytes(length=2, byteorder='big'), pos=4), + -2048), + # 12-bit signed integer starting on bit 6 of the first byte + (xtcedef.IntegerParameterType('TEST_INT', xtcedef.IntegerDataEncoding(12, 'signed')), + {}, + # 12-bit signed integer starting on bit 4 of the first byte + # 11111110 00000000 00111111 10101010 + # |---int:12---| + xtcedef.PacketData(0b11111110000000000011111110101010.to_bytes(length=4, byteorder='big'), pos=6), + -2048), + (xtcedef.IntegerParameterType('TEST_INT', xtcedef.IntegerDataEncoding(3, 'twosComplement')), + {}, + # 3-bit signed integer starting at bit 7 of the first byte + # 00000001 11000000 00000000 + # |-int:3-| + xtcedef.PacketData(0b000000011100000000000000.to_bytes(length=3, byteorder='big'), pos=7), + -1), ] ) def test_integer_parameter_parsing(parameter_type, parsed_data, packet_data, expected): """Testing parsing an integer parameters""" - raw, derived = parameter_type.parse_value(bitstring.ConstBitStream(packet_data), parsed_data) + raw, derived = parameter_type.parse_value(packet_data, parsed_data) if derived: assert derived == expected else: @@ -1107,6 +1401,16 @@ def test_integer_parameter_parsing(parameter_type, parsed_data, packet_data, exp ('xml_string', 'expectation'), [ (""" + + + smoot + + + +""", + xtcedef.FloatParameterType(name='TEST_INT_Type', unit='smoot', + encoding=xtcedef.FloatDataEncoding(size_in_bits=16, encoding='IEEE-754'))), + (""" smoot @@ -1186,7 +1490,13 @@ def test_float_parameter_type(xml_string: str, expectation): ('parameter_type', 'parsed_data', 'packet_data', 'expected'), [ (xtcedef.FloatParameterType('TEST_FLOAT', xtcedef.FloatDataEncoding(32)), - {}, '0b01000000010010010000111111010000', 3.14159), + {}, + xtcedef.PacketData(0b01000000010010010000111111010000.to_bytes(length=4, byteorder='big')), + 3.14159), + (xtcedef.FloatParameterType('TEST_FLOAT', xtcedef.FloatDataEncoding(64)), + {}, + xtcedef.PacketData(b'\x3F\xF9\xE3\x77\x9B\x97\xF4\xA8'), # 64-bit IEEE 754 value of Phi + 1.61803), (xtcedef.FloatParameterType( 'TEST_FLOAT', xtcedef.IntegerDataEncoding( @@ -1199,12 +1509,14 @@ def test_float_parameter_type(xml_string: str, expectation): xtcedef.PolynomialCalibrator([xtcedef.PolynomialCoefficient(5.6, 0), xtcedef.PolynomialCoefficient(2.1, 1)])) ])), - {'PKT_APID': parser.ParsedDataItem('PKT_APID', 1101)}, '0b1111111111010110', -82.600000), + {'PKT_APID': parser.ParsedDataItem('PKT_APID', 1101)}, + xtcedef.PacketData(0b1111111111010110.to_bytes(length=2, byteorder='big')), + -82.600000), ] ) def test_float_parameter_parsing(parameter_type, parsed_data, packet_data, expected): """Test parsing float parameters""" - raw, derived = parameter_type.parse_value(bitstring.ConstBitStream(packet_data), parsed_data) + raw, derived = parameter_type.parse_value(packet_data, parsed_data) if derived: # NOTE: These results are rounded due to the imprecise storage of floats assert round(derived, 5) == expected @@ -1224,12 +1536,14 @@ def test_float_parameter_parsing(parameter_type, parsed_data, packet_data, expec + """, xtcedef.EnumeratedParameterType(name='TEST_ENUM_Type', encoding=xtcedef.IntegerDataEncoding(size_in_bits=2, encoding='unsigned'), - enumeration={'BOOT_POR': 0, 'BOOT_RETURN': 1, 'OP_LOW': 2, 'OP_HIGH': 3})), + # NOTE: Duplicate final value is on purpose to make sure we handle that case + enumeration={0: 'BOOT_POR', 1: 'BOOT_RETURN', 2: 'OP_LOW', 3: 'OP_HIGH', 4: 'OP_HIGH'})), ] ) def test_enumerated_parameter_type(xml_string: str, expectation): @@ -1247,17 +1561,23 @@ def test_enumerated_parameter_type(xml_string: str, expectation): @pytest.mark.parametrize( ('parameter_type', 'parsed_data', 'packet_data', 'expected'), [ - (xtcedef.EnumeratedParameterType('TEST_ENUM', xtcedef.IntegerDataEncoding(16, 'unsigned'), {'NOMINAL': 32768}), - {}, '0b1000000000000000', 'NOMINAL'), + (xtcedef.EnumeratedParameterType( + 'TEST_ENUM', + xtcedef.IntegerDataEncoding(16, 'unsigned'), {32768: 'NOMINAL'}), + {}, + xtcedef.PacketData(0b1000000000000000.to_bytes(length=2, byteorder='big')), + 'NOMINAL'), (xtcedef.EnumeratedParameterType( 'TEST_FLOAT', - xtcedef.IntegerDataEncoding(16, 'signed'), {'VAL_LOW': -42}), - {}, '0b1111111111010110', 'VAL_LOW'), + xtcedef.IntegerDataEncoding(16, 'signed'), {-42: 'VAL_LOW'}), + {}, + xtcedef.PacketData(0b1111111111010110.to_bytes(length=2, byteorder='big')), + 'VAL_LOW'), ] ) def test_enumerated_parameter_parsing(parameter_type, parsed_data, packet_data, expected): """"Test parsing enumerated parameters""" - raw, derived = parameter_type.parse_value(bitstring.ConstBitStream(packet_data), parsed_data) + raw, derived = parameter_type.parse_value(packet_data, parsed_data) if derived: # NOTE: These results are rounded due to the imprecise storage of floats assert derived == expected @@ -1349,7 +1669,7 @@ def test_binary_parameter_type(xml_string: str, expectation): 'TEST_BIN', xtcedef.BinaryDataEncoding(fixed_size_in_bits=16)), {}, - '0b0011010000110010010100110000000001001011000000000100100100000000', + xtcedef.PacketData(0b0011010000110010010100110000000001001011000000000100100100000000.to_bytes(length=8, byteorder='big')), '0011010000110010'), # discrete lookup list size (xtcedef.BinaryParameterType( @@ -1361,7 +1681,7 @@ def test_binary_parameter_type(xml_string: str, expectation): ], lookup_value=2) ], linear_adjuster=lambda x: 8*x)), {'P1': parser.ParsedDataItem('P1', 1, None, 7.4)}, - '0b0011010000110010010100110000000001001011000000000100100100000000', + xtcedef.PacketData(0b0011010000110010010100110000000001001011000000000100100100000000.to_bytes(length=8, byteorder='big')), '0011010000110010'), # dynamic size reference to other parameter (xtcedef.BinaryParameterType( @@ -1369,16 +1689,268 @@ def test_binary_parameter_type(xml_string: str, expectation): xtcedef.BinaryDataEncoding(size_reference_parameter='BIN_LEN', use_calibrated_value=False, linear_adjuster=lambda x: 8*x)), {'BIN_LEN': parser.ParsedDataItem('BIN_LEN', 2, None)}, - '0b0011010000110010010100110000000001001011000000000100100100000000', + xtcedef.PacketData(0b0011010000110010010100110000000001001011000000000100100100000000.to_bytes(length=8, byteorder='big')), '0011010000110010'), ] ) def test_binary_parameter_parsing(parameter_type, parsed_data, packet_data, expected): """Test parsing binary parameters""" - raw, _ = parameter_type.parse_value(bitstring.ConstBitStream(packet_data), parsed_data) + raw, _ = parameter_type.parse_value(packet_data, parsed_data) assert raw == expected +@pytest.mark.parametrize( + ('xml_string', 'expectation'), + [ + (""" + + + smoot + + + + 1 + + + +""", + xtcedef.BooleanParameterType(name='TEST_PARAM_Type', unit='smoot', + encoding=xtcedef.BinaryDataEncoding(fixed_size_in_bits=1))), + (""" + + + smoot + + + +""", + xtcedef.BooleanParameterType(name='TEST_PARAM_Type', unit='smoot', + encoding=xtcedef.IntegerDataEncoding(size_in_bits=1, encoding="unsigned"))), + (""" + + + smoot + + + + 00 + + + +""", + xtcedef.BooleanParameterType(name='TEST_PARAM_Type', unit='smoot', + encoding=xtcedef.StringDataEncoding(termination_character='00'))), + ] +) +def test_boolean_parameter_type(xml_string, expectation): + """Test parsing a BooleanParameterType from an XML string""" + element = ElementTree.fromstring(xml_string) + + if isinstance(expectation, Exception): + with pytest.raises(type(expectation)): + xtcedef.BooleanParameterType.from_parameter_type_xml_element(element, TEST_NAMESPACE) + else: + result = xtcedef.BooleanParameterType.from_parameter_type_xml_element(element, TEST_NAMESPACE) + assert result == expectation + + +@pytest.mark.parametrize( + ('parameter_type', 'parsed_data', 'packet_data', 'expected_raw', 'expected_derived'), + [ + (xtcedef.BooleanParameterType( + 'TEST_BOOL', + xtcedef.BinaryDataEncoding(fixed_size_in_bits=1)), + {}, + xtcedef.PacketData(0b0011010000110010010100110000000001001011000000000100100100000000.to_bytes(length=64, byteorder='big')), + '0', True), + (xtcedef.BooleanParameterType( + 'TEST_BOOL', + xtcedef.StringDataEncoding(encoding="utf-8", termination_character='00')), + {}, + xtcedef.PacketData(0b011001100110000101101100011100110110010101011111011010010111001101011111011101000111001001110101011101000110100001111001000000000010101101010111.to_bytes(length=18, byteorder='big')), + 'false_is_truthy', True), + (xtcedef.BooleanParameterType( + 'TEST_BOOL', + xtcedef.IntegerDataEncoding(size_in_bits=2, encoding="unsigned")), + {}, + xtcedef.PacketData(0b0011.to_bytes(length=1, byteorder='big')), + 0, False), + (xtcedef.BooleanParameterType( + 'TEST_BOOL', + xtcedef.IntegerDataEncoding(size_in_bits=2, encoding="unsigned")), + {}, + xtcedef.PacketData(0b00001111.to_bytes(length=1, byteorder='big'), pos=4), + 3, True), + (xtcedef.BooleanParameterType( + 'TEST_BOOL', + xtcedef.FloatDataEncoding(size_in_bits=16)), + {}, + xtcedef.PacketData(0b01010001010000001111111110000000.to_bytes(length=4, byteorder='big')), + 42.0, True), + (xtcedef.BooleanParameterType( + 'TEST_BOOL', + xtcedef.FloatDataEncoding(size_in_bits=16)), + {}, + xtcedef.PacketData(0b00000000101000101000000111111111.to_bytes(length=4, byteorder='big'), pos=7), + 42.0, True), + ] +) +def test_boolean_parameter_parsing(parameter_type, parsed_data, packet_data, expected_raw, expected_derived): + """Test parsing boolean parameters""" + raw, derived = parameter_type.parse_value(packet_data, parsed_data) + assert raw == expected_raw + assert derived == expected_derived + + +@pytest.mark.parametrize( + ('xml_string', 'expectation'), + [ + (""" + + + + + + + TAI + + +""", + xtcedef.AbsoluteTimeParameterType(name='TEST_PARAM_Type', unit='seconds', + encoding=xtcedef.IntegerDataEncoding(size_in_bits=32, encoding="unsigned"), + epoch="TAI", offset_from="MilliSeconds")), + (""" + + + + + + + 2009-10-10T12:00:00-05:00 + + +""", + xtcedef.AbsoluteTimeParameterType( + name='TEST_PARAM_Type', unit='s', + encoding=xtcedef.IntegerDataEncoding( + size_in_bits=32, encoding="unsigned", + default_calibrator=xtcedef.PolynomialCalibrator( + coefficients=[ + xtcedef.PolynomialCoefficient(0, 0), + xtcedef.PolynomialCoefficient(1E-6, 1) + ])), + epoch="2009-10-10T12:00:00-05:00", offset_from="MilliSeconds")), + (""" + + + + + +""", + xtcedef.AbsoluteTimeParameterType( + name='TEST_PARAM_Type', unit='s', + encoding=xtcedef.IntegerDataEncoding( + size_in_bits=32, encoding="unsigned", + default_calibrator=xtcedef.PolynomialCalibrator( + coefficients=[ + xtcedef.PolynomialCoefficient(1.31E-6, 1) + ])) + )), + (""" + + + + + +""", + xtcedef.AbsoluteTimeParameterType( + name='TEST_PARAM_Type', unit='s', + encoding=xtcedef.IntegerDataEncoding( + size_in_bits=32, encoding="unsigned", + default_calibrator=xtcedef.PolynomialCalibrator( + coefficients=[ + xtcedef.PolynomialCoefficient(147.884, 0), + xtcedef.PolynomialCoefficient(1, 1) + ])) + )), + (""" + + + + + +""", + xtcedef.AbsoluteTimeParameterType( + name='TEST_PARAM_Type', unit='s', + encoding=xtcedef.FloatDataEncoding( + size_in_bits=32, encoding="IEEE-754", + default_calibrator=xtcedef.PolynomialCalibrator( + coefficients=[ + xtcedef.PolynomialCoefficient(147.884, 0), + xtcedef.PolynomialCoefficient(1, 1) + ])) + )), + ] +) +def test_absolute_time_parameter_type(xml_string, expectation): + """Test parsing an AbsoluteTimeParameterType from an XML string.""" + element = ElementTree.fromstring(xml_string) + + if isinstance(expectation, Exception): + with pytest.raises(type(expectation)): + xtcedef.AbsoluteTimeParameterType.from_parameter_type_xml_element(element, TEST_NAMESPACE) + else: + result = xtcedef.AbsoluteTimeParameterType.from_parameter_type_xml_element(element, TEST_NAMESPACE) + assert result == expectation + + +@pytest.mark.parametrize( + ('parameter_type', 'parsed_data', 'packet_data', 'expected_raw', 'expected_derived'), + [ + (xtcedef.AbsoluteTimeParameterType(name='TEST_PARAM_Type', unit='seconds', + encoding=xtcedef.IntegerDataEncoding(size_in_bits=32, encoding="unsigned"), + epoch="TAI", offset_from="MilliSeconds"), + {}, + # Exactly 64 bits so neatly goes into a bytes object without padding + xtcedef.PacketData(0b0011010000110010010100110000000001001011000000000100100100000000.to_bytes(length=8, byteorder='big')), + 875713280, 875713280), + (xtcedef.AbsoluteTimeParameterType( + name='TEST_PARAM_Type', unit='s', + encoding=xtcedef.IntegerDataEncoding( + size_in_bits=32, encoding="unsigned", + default_calibrator=xtcedef.PolynomialCalibrator( + coefficients=[ + xtcedef.PolynomialCoefficient(0, 0), + xtcedef.PolynomialCoefficient(1E-6, 1) + ])), + epoch="2009-10-10T12:00:00-05:00", offset_from="MilliSeconds"), + {}, + # Exactly 64 bits so neatly goes into a bytes object without padding + xtcedef.PacketData(0b0011010000110010010100110000000001001011000000000100100100000000.to_bytes(length=8, byteorder='big')), + 875713280, 875.7132799999999), + (xtcedef.AbsoluteTimeParameterType( + name='TEST_PARAM_Type', unit='s', + encoding=xtcedef.FloatDataEncoding( + size_in_bits=32, encoding="IEEE-754", + default_calibrator=xtcedef.PolynomialCalibrator( + coefficients=[ + xtcedef.PolynomialCoefficient(147.884, 0), + xtcedef.PolynomialCoefficient(1, 1) + ]))), + {}, + # 65 bits, so we need a 9th byte with 7 bits of padding to hold it, + # which means we need to be starting at pos=7 + xtcedef.PacketData(0b01000000010010010000111111011011001001011000000000100100100000000.to_bytes(length=9, byteorder='big'), pos=7), + 3.1415927, 151.02559269999998), + ] +) +def test_absolute_time_parameter_parsing(parameter_type, parsed_data, packet_data, expected_raw, expected_derived): + raw, derived = parameter_type.parse_value(packet_data, parsed_data) + assert round(raw, 5) == round(expected_raw, 5) + # NOTE: derived values are rounded for comparison due to imprecise storage of floats + assert round(derived, 5) == round(expected_derived, 5) + + # --------------- # Parameter Tests # --------------- @@ -1388,4 +1960,100 @@ def test_parameter(): parameter_type=xtcedef.IntegerParameterType( name='TEST_INT_Type', unit='floops', - encoding=xtcedef.IntegerDataEncoding(size_in_bits=16, encoding='unsigned'))) + encoding=xtcedef.IntegerDataEncoding(size_in_bits=16, encoding='unsigned')), + short_description="Param short desc", + long_description="This is a long description of the parameter") + + +# ----------------------- +# Full XTCE Document Test +# ----------------------- +def test_parsing_xtce_document(test_data_dir): + """Tests parsing an entire XTCE document and makes assertions about the contents""" + with open(test_data_dir / "test_xtce.xml") as x: + xdef = xtcedef.XtcePacketDefinition(x, ns=TEST_NAMESPACE) + + # Test Parameter Types + ptname = "USEC_Type" + pt = xdef.named_parameter_types[ptname] + assert pt.name == ptname + assert pt.unit == "us" + assert isinstance(pt.encoding, xtcedef.IntegerDataEncoding) + + # Test Parameters + pname = "ADAET1DAY" # Named parameter + p = xdef.named_parameters[pname] + assert p.name == pname + assert p.short_description == "Ephemeris Valid Time, Days Since 1/1/1958" + assert p.long_description is None + + pname = "USEC" + p = xdef.named_parameters[pname] + assert p.name == pname + assert p.short_description == "Secondary Header Fine Time (microsecond)" + assert p.long_description == "CCSDS Packet 2nd Header Fine Time in microseconds." + + # Test Sequence Containers + scname = "SecondaryHeaderContainer" + sc = xdef.named_containers[scname] + assert sc.name == scname + assert sc == xtcedef.SequenceContainer( + name=scname, + entry_list=[ + xtcedef.Parameter( + name="DOY", + parameter_type=xtcedef.FloatParameterType( + name="DOY_Type", + encoding=xtcedef.IntegerDataEncoding( + size_in_bits=16, encoding="unsigned" + ), + unit="day" + ), + short_description="Secondary Header Day of Year", + long_description="CCSDS Packet 2nd Header Day of Year in days." + ), + xtcedef.Parameter( + name="MSEC", + parameter_type=xtcedef.FloatParameterType( + name="MSEC_Type", + encoding=xtcedef.IntegerDataEncoding( + size_in_bits=32, encoding="unsigned" + ), + unit="ms" + ), + short_description="Secondary Header Coarse Time (millisecond)", + long_description="CCSDS Packet 2nd Header Coarse Time in milliseconds." + ), + xtcedef.Parameter( + name="USEC", + parameter_type=xtcedef.FloatParameterType( + name="USEC_Type", + encoding=xtcedef.IntegerDataEncoding( + size_in_bits=16, encoding="unsigned" + ), + unit="us" + ), + short_description="Secondary Header Fine Time (microsecond)", + long_description="CCSDS Packet 2nd Header Fine Time in microseconds." + ) + ], + short_description=None, + long_description="Container for telemetry secondary header items", + base_container_name=None, + restriction_criteria=None, + abstract=True, + inheritors=None + ) + + +@pytest.mark.parametrize("start, nbits", [(0, 1), (0, 16), (0, 8), (0, 9), + (3, 5), (3, 8), (3, 13), + (7, 1), (7, 2), (7, 8), + (8, 1), (8, 8), (15, 1)]) +def test__extract_bits(start, nbits): + """Test the _extract_bits function with various start and nbits values""" + # Test extracting bits from a bitstream + s = '0000111100001111' + data = int(s, 2).to_bytes(2, byteorder="big") + + assert xtcedef._extract_bits(data, start, nbits) == int(s[start:start+nbits], 2) \ No newline at end of file