Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

factor out in src/reporter/tests/test_timescale_types.py #710 #713

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions RELEASE_NOTES.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
- Replaced entity with getter (#652)
- Resolved TODO in Dockerfile (#680)
- Resolved TODO at src/reporter/tests/test_timescale_types.py (#667)
- Resolved TODO at src/wq/ql/flask_utils.py (#707)
- factor out in src/reporter/tests/test_timescale_types.py (#710)

### Bug fixes

Expand Down
40 changes: 6 additions & 34 deletions src/reporter/tests/test_timescale_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,40 +109,12 @@ def gen_entity(entity_id: int,
# Similar to gen_entity in test_timescale_insert module in translators.tests.


def entity_name_value_pairs(entity: dict) -> dict:
"""
Transform an NGSI entity ``e`` into the format::

{
entityId: e[id]
attr1: [ e[attr1][value] ]
...
attrN: [ e[attrN][value] ]
}
"""
eid = {'entityId': entity['id']}

attr_names = {k for k in entity.keys()} - {'id', 'type'}
attrs = {k: [maybe_value(entity, k, 'value')] for k in attr_names}

return merge_dicts(eid, attrs)

# TODO: factor out?
# This function and the one below could come in handy when testing a number
# of scenarios where we first insert entities and then query them by ID.

def entity_query_result_name_value_pairs(data: dict) -> dict:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@NEC-Vishal not sure if or why you want to merge the two functions into one, but as it stands now this code won't work.

Here's a script you can run to see why.

from utils.jsondict import maybe_value
from utils.kvt import merge_dicts


def entity_name_value_pairs(entity: dict) -> dict:
    """
    Transform an NGSI entity ``e`` into the format::

        {
            entityId: e[id]
            attr1: [ e[attr1][value] ]
            ...
            attrN: [ e[attrN][value] ]
        }
    """
    eid = {'entityId': entity['id']}

    attr_names = {k for k in entity.keys()} - {'id', 'type'}
    attrs = {k: [maybe_value(entity, k, 'value')] for k in attr_names}

    return merge_dicts(eid, attrs)


def query_result_name_value_pairs(result: dict) -> dict:
    """
    Extract the result set returned by the ``/v2/entities/{entityId}`` endpoint
    using the same format as that of ``entity_name_value_pairs``.
    """
    eid = {'entityId': maybe_value(result, 'entityId')}

    attrs_array = maybe_value(result, 'attributes')
    attrs_array = attrs_array if attrs_array else []
    attrs = {maybe_value(a, 'attrName'): maybe_value(a, 'values')
             for a in attrs_array}

    return merge_dicts(eid, attrs)


def entity_query_result_name_value_pairs(data: dict) -> dict:
    """
    New implementation.
    """
    eid = {'entityId': maybe_value(data, 'entityId')}
    attr_names = {k for k in data.keys()} - {'id', 'type'}
    attrs = {k: [maybe_value(data, k, 'value')] for k in attr_names}

    return merge_dicts(eid, attrs)


e = {
    "id": "1",
    "type": "T",
    "speed": {
        "type": "Number",
        "value": 89.4
    }
}

q = {
    "entityId": "1",
    "entityType": "T",
    "index": ["2018-01-05T15:44:34"],
    "attributes": [
        {
            "attrName": "speed",
            "values": [89.4]
        }
    ]
}

e1 = entity_name_value_pairs(e)
e2 = entity_query_result_name_value_pairs(e)

q1 = query_result_name_value_pairs(q)
q2 = entity_query_result_name_value_pairs(q)

Now if you load the script in the Pyhton REPL, you'll see the data extracted by the old functions is right, whereas the new implementation isn't really extracting the data we need for the comparison in test_entity_with_all_supported_types

>>> e1
{'entityId': '1', 'speed': [89.4]}
>>> q1
{'entityId': '1', 'speed': [89.4]}
>>> e1 == q1
True
>>> e2
{'entityId': None, 'speed': [89.4]}
>>> q2
{'entityId': [None], 'entityType': [None], 'index': [None], 'attributes': [None]}
>>> e2 == q2
False


def query_result_name_value_pairs(result: dict) -> dict:
"""
Extract the result set returned by the ``/v2/entities/{entityId}`` endpoint
using the same format as that of ``entity_name_value_pairs``.
"""
eid = {'entityId': maybe_value(result, 'entityId')}
eid = {'entityId': maybe_value(data, 'entityId')}

attrs_array = maybe_value(result, 'attributes')
attrs_array = attrs_array if attrs_array else []
attrs = {maybe_value(a, 'attrName'): maybe_value(a, 'values')
for a in attrs_array}
attr_names = {k for k in data.keys()} - {'id', 'type'}
attrs = {k: [maybe_value(data, k, 'value')] for k in attr_names}

return merge_dicts(eid, attrs)

Expand Down Expand Up @@ -189,8 +161,8 @@ def test_entity_with_all_supported_types():

result_set = query_entity_by_id(e['id'], service)

actual = query_result_name_value_pairs(result_set)
expected = entity_name_value_pairs(e)
actual = entity_query_result_name_value_pairs(result_set)
expected = entity_query_result_name_value_pairs(e)
Comment on lines +164 to +165
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm struggling to see how this test could actually pass when using entity_query_result_name_value_pairs, see my previous comment about this function's implementation. Could it be this test isn't actually executed? Can you please check if that's the case?

assert actual == expected

# TODO: probabilistic testing.
Expand Down