Skip to content

Commit

Permalink
Fix bug for sample times 1 day or longer
Browse files Browse the repository at this point in the history
Various typos
  • Loading branch information
einarsi committed Oct 5, 2020
1 parent 7e17d36 commit c599efc
Show file tree
Hide file tree
Showing 14 changed files with 95 additions and 22 deletions.
2 changes: 1 addition & 1 deletion docs/manual.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ If you do not work in Equinor: ODBC queries may already work for you, although i

### For Equinor users

The Web APIs are queried with the requests package. Requests does not utilize the system certificate store, but instead relies on the certifi bundle. In order to avoid SSL verification errors, we need to either turn off SSL verification (optional input argument `verifySSL=False` for relevant function calls) or, strongly preferred, add the certificate to the certifi bundle. To to this, simply activate the virtual environment where you installed tagreader, and run the following snippet:
The Web APIs are queried with the requests package. Requests does not utilize the system certificate store, but instead relies on the certifi bundle. In order to avoid SSL verification errors, we need to either turn off SSL verification (optional input argument `verifySSL=False` for relevant function calls) or, strongly preferred, add the certificate to the certifi bundle. To do this, simply activate the virtual environment where you installed tagreader, and run the following snippet:

``` python
from tagreader.utils import add_statoil_root_certificate
Expand Down
2 changes: 1 addition & 1 deletion docs/quickstart.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
"metadata": {},
"source": [
"## Quickstart\n",
"This document provides a quick demonstration of the basic usage of tagreader. It will show you the steps from importing the package to fetching data and making a plot. Some cells contain links to more details that can be found in the [manual](./manual.ipynb)."
"This document provides a quick demonstration of the basic usage of tagreader. It will show you the steps from importing the package to fetching data and making a plot. Some cells contain links to more details that can be found in the [manual](./manual.md)."
]
},
{
Expand Down
1 change: 0 additions & 1 deletion pytest.ini
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
[pytest]
mock_use_standalone_module = true
junit_family = xunit1
python_files = test_*.py
4 changes: 2 additions & 2 deletions tagreader/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def key_path(self, df, readtype, ts=None):
"""
name = list(df)[0] if isinstance(df, pd.DataFrame) else df
name = safe_tagname(name)
ts = ts.seconds if isinstance(ts, pd.Timedelta) else ts
ts = int(ts.total_seconds()) if isinstance(ts, pd.Timedelta) else ts
if readtype != ReaderType.RAW:
if ts is None:
# Determine sample time by reading interval between first two
Expand Down Expand Up @@ -122,7 +122,7 @@ def readtype_to_str(rt):

def timedelta_to_str(t):
if isinstance(t, pd.Timedelta):
return str(t.seconds)
return str(int(t.total_seconds()))
return t

key = "/" + key.lstrip("/") # Ensure absolute path
Expand Down
5 changes: 3 additions & 2 deletions tagreader/clients.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,8 @@ def get_missing_intervals(df, start_time, stop_time, ts, read_type):
read_type == ReaderType.RAW
): # Fixme: How to check for completeness for RAW data?
return [[start_time, stop_time]]
tvec = pd.date_range(start=start_time, end=stop_time, freq=f"{ts}s")
seconds = int(ts.total_seconds())
tvec = pd.date_range(start=start_time, end=stop_time, freq=f"{seconds}s")
if len(df) == len(tvec): # Short-circuit if dataset is complete
return []
values_in_df = tvec.isin(df.index)
Expand Down Expand Up @@ -292,7 +293,7 @@ def _read_single_tag(self, tag, start_time, stop_time, ts, read_type, cache=None
stop_time=time_slice[1],
)
missing_intervals = get_missing_intervals(
df, start_time, stop_time, ts.seconds, read_type
df, start_time, stop_time, ts, read_type
)
if not missing_intervals:
return df.tz_convert(self.tz).sort_index()
Expand Down
18 changes: 9 additions & 9 deletions tagreader/odbc_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,11 @@ def generate_read_query(tag, mapdef, start_time, stop_time, sample_time, read_ty
if read_type != ReaderType.SNAPSHOT:
start_time = start_time.tz_convert("UTC")
stop_time = stop_time.tz_convert("UTC")
sample_time = sample_time.seconds
seconds = int(sample_time.total_seconds())
if ReaderType.SAMPLED == read_type:
sample_time = 0
seconds = 0
else:
if sample_time <= 0:
if seconds <= 0:
raise NotImplementedError
# sample_time = (stop_time-start_time).totalseconds

Expand Down Expand Up @@ -159,7 +159,7 @@ def generate_read_query(tag, mapdef, start_time, stop_time, sample_time, read_ty
if mapdef:
query.extend([f'AND FIELD_ID = FT({mapdef["MAP_HistoryValue"]!r})'])
if ReaderType.RAW != read_type:
query.extend([f"AND (period = {sample_time*10})"])
query.extend([f"AND (period = {seconds*10})"])
query.extend(
[
f"AND (request = {request_num})",
Expand Down Expand Up @@ -405,11 +405,11 @@ def generate_read_query(
if read_type != ReaderType.SNAPSHOT:
start_time = start_time.tz_convert("UTC")
stop_time = stop_time.tz_convert("UTC")
sample_time = sample_time.seconds
seconds = int(sample_time.total_seconds())
if ReaderType.SAMPLED == read_type:
sample_time = 0
seconds = 0
else:
if sample_time <= 0:
if seconds <= 0:
pass # Fixme: Not implemented
# sample_time = (stop_time-start_time).totalseconds

Expand Down Expand Up @@ -461,13 +461,13 @@ def generate_read_query(
elif ReaderType.SHAPEPRESERVING == read_type:
query.extend(
[
f"AND (intervalcount = {int((stop_time-start_time).seconds/sample_time)})" # noqa E501
f"AND (intervalcount = {int((stop_time-start_time).seconds/seconds)})" # noqa E501
]
)
elif ReaderType.RAW == read_type:
pass
elif read_type not in [ReaderType.SNAPSHOT, ReaderType.RAW]:
query.extend([f"AND (timestep = '{sample_time}s')"])
query.extend([f"AND (timestep = '{seconds}s')"])

if ReaderType.SNAPSHOT != read_type:
query.extend(["ORDER BY time"])
Expand Down
2 changes: 1 addition & 1 deletion tagreader/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def add_statoil_root_certificate():
if pem in certifi.contents():
print("Certificate already exists in certifi store. Nothing to do.")
break
print("Writing certificate to cacert store.")
print("Writing certificate to certifi store.")
cafile = certifi.where()
with open(cafile, "ab") as f:
f.write(bytes(pem, "ascii"))
Expand Down
6 changes: 3 additions & 3 deletions tagreader/web_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ def generate_read_query(
webid = tag

if read_type != ReaderType.SNAPSHOT:
sample_time = sample_time.seconds
seconds = int(sample_time.total_seconds())

get_action = {
ReaderType.INT: "interpolated",
Expand Down Expand Up @@ -542,10 +542,10 @@ def generate_read_query(
}.get(read_type, None)

if ReaderType.INT == read_type:
params["interval"] = f"{sample_time}s"
params["interval"] = f"{seconds}s"
elif summary_type:
params["summaryType"] = summary_type
params["summaryDuration"] = f"{sample_time}s"
params["summaryDuration"] = f"{seconds}s"

if self._is_summary(read_type):
params[
Expand Down
19 changes: 19 additions & 0 deletions tests/test_AspenHandlerODBC.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,3 +119,22 @@ def test_generate_tag_read_query(read_type):
}

assert expected[read_type] == res


def test_genreadquery_long_sampletime():
starttime = utils.ensure_datetime_with_tz(START_TIME)
stoptime = utils.ensure_datetime_with_tz(STOP_TIME)
ts = pd.Timedelta(86401, unit="s")

res = AspenHandlerODBC.generate_read_query(
"thetag", None, starttime, stoptime, ts, ReaderType.INT
)

expected = (
'SELECT ISO8601(ts) AS "time", value AS "value" FROM history WHERE '
"name = 'thetag' AND (period = 864010) AND (request = 7) "
"AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') "
"ORDER BY ts"
)

assert expected == res
19 changes: 18 additions & 1 deletion tests/test_AspenHandlerREST.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def test_generate_map_query(AspenHandler):
def test_generate_tag_read_query(AspenHandler, read_type):
start_time = utils.ensure_datetime_with_tz("2020-06-24 17:00:00")
stop_time = utils.ensure_datetime_with_tz("2020-06-24 18:00:00")
ts = pd.Timedelta(1, unit="m")
ts = pd.Timedelta(SAMPLE_TIME, unit="s")
res = AspenHandler.generate_read_query(
"ATCAI", None, start_time, stop_time, ts, getattr(ReaderType, read_type)
)
Expand Down Expand Up @@ -171,3 +171,20 @@ def test_generate_tag_read_query(AspenHandler, read_type):
),
}
assert expected[read_type] == res

def test_genreadquery_long_sampletime(AspenHandler):
start_time = utils.ensure_datetime_with_tz("2020-06-24 17:00:00")
stop_time = utils.ensure_datetime_with_tz("2020-06-24 18:00:00")
ts = pd.Timedelta(86401, unit="s")

res = AspenHandler.generate_read_query(
"ATCAI", None, start_time, stop_time, ts, ReaderType.INT
)
expected = (
'<Q f="d" allQuotes="1"><Tag><N><![CDATA[ATCAI]]></N>'
"<D><![CDATA[sourcename]]></D><F><![CDATA[VAL]]></F>"
"<HF>0</HF><St>1593010800000</St><Et>1593014400000</Et>"
"<RT>1</RT><S>0</S><P>86401</P><PU>3</PU></Tag></Q>"
)

assert expected == res
19 changes: 19 additions & 0 deletions tests/test_PIHandlerODBC.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,3 +119,22 @@ def test_generate_tag_read_query(PIHandler, read_type):
),
}
assert expected[read_type] == res


def test_genreadquery_long_sampletime(PIHandler):
starttime = utils.ensure_datetime_with_tz(START_TIME)
stoptime = utils.ensure_datetime_with_tz(STOP_TIME)
ts = pd.Timedelta(86401, unit="s")

res = PIHandler.generate_read_query(
"thetag", starttime, stoptime, ts, ReaderType.INT
)

expected = (
"SELECT CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[piinterp2] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '86401s') ORDER BY time"
)

assert expected == res
14 changes: 14 additions & 0 deletions tests/test_PIHandlerREST.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,3 +125,17 @@ def test_generate_read_query(PIHandler, read_type): # TODO: Move away from test
params["selectedFields"] == "Links;Items.Timestamp;Items.Value;Items.Good"
)
assert params["maxCount"] == 10000

def test_genreadquery_long_sampletime(PIHandler):
starttime = ensure_datetime_with_tz(START_TIME)
stoptime = ensure_datetime_with_tz(STOP_TIME)
ts = pd.Timedelta(86410, unit="s")

(url, params) = PIHandler.generate_read_query(
PIHandler.tag_to_webid("alreadyknowntag"),
starttime,
stoptime,
ts,
ReaderType.INT,
)
assert params["interval"] == f"{86410}s"
4 changes: 4 additions & 0 deletions tests/test_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,10 @@ def test_match_tag(cache):
cache._match_tag("INT/s60/tag1", readtype=ReaderType.INT, ts=60, tagname="tag1")
is True
)
assert (
cache._match_tag("INT/s86401/tag1", readtype=ReaderType.INT, ts=86401, tagname="tag1")
is True
)
assert (
cache._match_tag("INT/s60/tag1", readtype=ReaderType.RAW, ts=60, tagname="tag1")
is False
Expand Down
2 changes: 1 addition & 1 deletion tests/test_clients.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def test_get_missing_intervals():
df,
start_time="2018-01-18 05:00:00",
stop_time="2018-01-18 06:00:00",
ts=ts,
ts=pd.Timedelta(ts, unit='s'),
read_type=ReaderType.INT,
)
assert missing[0] == (idx[2], idx[2])
Expand Down

0 comments on commit c599efc

Please sign in to comment.