Skip to content

Commit

Permalink
Merge pull request #27 from apdn7/feature/v4.7.6
Browse files Browse the repository at this point in the history
Feature/v4.7.6
  • Loading branch information
apdn7 authored Nov 18, 2024
2 parents b5ccacb + 7f4d315 commit 573c7ab
Show file tree
Hide file tree
Showing 64 changed files with 64 additions and 65 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# Analysis Platform

```
2024-11-05: version 4.7.5
2024-11-18: version 4.7.6
```

Analysis Platform is an open source web application to import, connect and visualize factory IoT data. It helps to collect, link and integrate data from multiple data sources.
Expand Down
11 changes: 11 additions & 0 deletions RELEASE.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,16 @@
# Releases

## v4.7.6

Bug fix

- Fixed a bug where some data periods were not imported from a database
- Fixed a bug where data preview did not appear in Process Config page from a database which has a date column
- Fixed a bug where data importing failes when CSV/TSV file has an empty column name
- Fixed a bug of significant digits on the Y axis of the graph in Full-points Plot (FPP)
- Fixed a bug where some data periods to not be imported from the Software Workshop interface (PostgreSQL)


## v4.7.5

New features and improvements
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
v4.7.5.243.6134499f
v4.7.6.244.d174d58d
1
OSS

Expand Down
5 changes: 3 additions & 2 deletions ap/api/common/services/show_graph_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -1032,8 +1032,9 @@ def gen_graph_df(
df = judge_data_conversion(df, judge_columns)
df = boolean_data_conversion(df, boolean_columns)
# filter function column
for condition_proc in cond_procs:
df = filter_function_column(df, condition_proc, end_proc)
for end_proc in end_procs:
for condition_proc in cond_procs:
df = filter_function_column(df, condition_proc, end_proc)

return df, actual_record_number, unique_serial

Expand Down
14 changes: 11 additions & 3 deletions ap/api/setting_module/services/csv_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,10 @@ def import_csv(proc_id, record_per_commit=RECORD_PER_COMMIT, register_by_file_re
is_first_chunk = True
error_type = None
chunk_size = record_per_commit * 100
origin_default_csv_param = default_csv_param.copy()
for idx, (csv_file_name, transformed_file) in enumerate(import_targets):
# Because each file has a different structure, it will read according to different parameters
default_csv_param = origin_default_csv_param.copy()
job_info.target = csv_file_name

if not dic_imported_row:
Expand All @@ -298,6 +301,7 @@ def import_csv(proc_id, record_per_commit=RECORD_PER_COMMIT, register_by_file_re
with_encoding=True,
)
# check missing columns
partial_dummy_header = False
if is_abnormal is False:
dic_csv_cols = None
dic_org_csv_cols = None
Expand Down Expand Up @@ -326,7 +330,7 @@ def import_csv(proc_id, record_per_commit=RECORD_PER_COMMIT, register_by_file_re
csv_cols, _ = gen_colsname_for_duplicated(csv_cols)
else:
# for the column names with only spaces, we need to generate dummy headers for them
_, csv_cols, *_ = gen_dummy_header(org_csv_cols)
_, csv_cols, _, partial_dummy_header, *_ = gen_dummy_header(org_csv_cols)
csv_cols = normalize_list(csv_cols)
# try to convert ➊ irregular number from csv columns
csv_cols = [normalize_str(col) for col in csv_cols]
Expand Down Expand Up @@ -399,7 +403,7 @@ def import_csv(proc_id, record_per_commit=RECORD_PER_COMMIT, register_by_file_re
continue

# default_csv_param['usecols'] = [i for i, col in enumerate(valid_columns) if col]
if not data_src.dummy_header:
if not data_src.dummy_header and not partial_dummy_header:
default_csv_param['usecols'] = transform_duplicated_col_suffix_to_pandas_col(
dic_valid_csv_cols,
dic_org_csv_cols,
Expand Down Expand Up @@ -484,6 +488,7 @@ def import_csv(proc_id, record_per_commit=RECORD_PER_COMMIT, register_by_file_re
dic_use_cols=dic_use_cols,
col_names=use_col_names,
encoding=encoding,
is_partial_dummy_header=partial_dummy_header,
)
# validate column name
validate_columns(dic_use_cols, df_one_file.columns, use_dummy_datetime, dummy_datetime_col)
Expand Down Expand Up @@ -723,12 +728,16 @@ def csv_to_df(
dic_use_cols=None,
col_names=None,
encoding=None,
is_partial_dummy_header=False,
):
# read csv file
read_csv_param = {}
if default_csv_param:
read_csv_param.update(default_csv_param)

if is_partial_dummy_header: # skip header
head_skips = head_skips + [max(head_skips) + 1] if len(head_skips) else [0]

read_csv_param.update(
{
'skiprows': head_skips + list(range(data_first_row, skip_row + data_first_row)),
Expand All @@ -741,7 +750,6 @@ def csv_to_df(
'header': 0,
},
)

# assign n_rows with is_transpose
n_rows = get_limit_records(is_transpose=data_src.is_transpose, n_rows=data_src.n_rows)
read_csv_param.update({'nrows': n_rows})
Expand Down
45 changes: 20 additions & 25 deletions ap/api/setting_module/services/factory_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
)
from ap.api.trace_data.services.proc_link import add_gen_proc_link_job
from ap.common.common_utils import (
DATE_FORMAT_STR_FACTORY_DB,
DATE_FORMAT_STR_ONLY_DIGIT,
add_days,
add_double_quotes,
Expand Down Expand Up @@ -67,6 +66,7 @@

MAX_RECORD = 1_000_000
SQL_FACTORY_LIMIT = 5_000_000
SOFTWARE_WORKSHOP_FACTORY_LIMIT = 20_000
SQL_DAY = 1
SQL_DAYS_AGO = 30
FETCH_MANY_SIZE = 20_000
Expand Down Expand Up @@ -163,7 +163,6 @@ def import_factory(proc_id):
job_info.job_id = job_id
data_source_name = proc_cfg.data_source.name
table_name = proc_cfg.table_name
has_data = True

while inserted_row_count < MAX_RECORD and is_import:
# get sql range
Expand All @@ -187,17 +186,18 @@ def import_factory(proc_id):

# no data in range, stop
if start_time > fac_max_date:
has_data = False
break

# validate import date range
if end_time >= fac_max_date:
# this is not necessary for now
# the next import_from is determined by the imported_end_time
# end_time = fac_max_date
is_import = False

# get data from factory
data = get_factory_data(proc_cfg, column_names, auto_increment_col, start_time, end_time)
if not data:
has_data = False
break

cols = next(data)
Expand Down Expand Up @@ -321,7 +321,7 @@ def import_factory(proc_id):

# to save into
job_info.import_from = start_time
job_info.import_to = imported_end_time
job_info.import_to = format_factory_date_to_meta_data(imported_end_time, is_tz_col)

job_info.status = JobStatus.DONE.name
if error_type:
Expand Down Expand Up @@ -350,17 +350,17 @@ def import_factory(proc_id):
f'FACTORY DATA IMPORT SQL(days = {sql_day}, records = {total_row}, range = {start_time} - {end_time})',
)

if not has_data:
# save record into factory import to start job FACTORY PAST
gen_import_job_info(job_info, 0, start_time, start_time)
job_info.auto_increment_col_timezone = is_tz_col
job_info.percent = 100
# insert import history
job_info.import_type = JobType.FACTORY_IMPORT.name
job_info.import_from = start_time
job_info.import_to = start_time
save_import_history(proc_id, job_info=job_info)
yield job_info
# if not has_data:
# # save record into factory import to start job FACTORY PAST
# gen_import_job_info(job_info, 0, start_time, start_time)
# job_info.auto_increment_col_timezone = is_tz_col
# job_info.percent = 100
# # insert import history
# job_info.import_type = JobType.FACTORY_IMPORT.name
# job_info.import_from = start_time
# job_info.import_to = start_time
# save_import_history(proc_id, job_info=job_info)
# yield job_info


@log_execution_time()
Expand Down Expand Up @@ -455,14 +455,9 @@ def get_sql_range_time(
end_time = add_days(start_time, days=range_day)

# convert to string
start_time = convert_time(start_time, format_str=DATE_FORMAT_STR_FACTORY_DB, only_millisecond=True)
end_time = convert_time(end_time, format_str=DATE_FORMAT_STR_FACTORY_DB, only_millisecond=True)
filter_time = convert_time(filter_time, format_str=DATE_FORMAT_STR_FACTORY_DB, only_millisecond=True)

if is_tz_col:
start_time += 'Z'
end_time += 'Z'
filter_time += 'Z'
start_time = format_factory_date_to_meta_data(start_time, is_tz_col=is_tz_col)
end_time = format_factory_date_to_meta_data(end_time, is_tz_col=is_tz_col)
filter_time = format_factory_date_to_meta_data(filter_time, is_tz_col=is_tz_col)

return start_time, end_time, filter_time

Expand Down Expand Up @@ -517,7 +512,7 @@ def get_data_by_range_time(
proc_cfg.process_factid,
start_time,
end_time,
limit=sql_limit,
limit=SOFTWARE_WORKSHOP_FACTORY_LIMIT,
master_type=MasterDBType[proc_cfg.master_type],
)
sql, params = db_instance.gen_sql_and_params(stmt)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,9 +225,9 @@ def get_master_data_stmt(
if process_factid is not None:
conditions.append(target_table.c.child_equip_id == process_factid)
if start_date is not None:
conditions.append(target_table.c.event_time >= start_date)
conditions.append(target_table.c.event_time > start_date)
if end_date is not None:
conditions.append(target_table.c.event_time < end_date)
conditions.append(target_table.c.event_time <= end_date)

stmt = sa.select(
[
Expand Down
2 changes: 1 addition & 1 deletion ap/api/trace_data/services/filter_function_condition.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def filter_function_column(df: DataFrame, condition_proc: ConditionProc, end_pro
# AND
for column_id, filter_details in condition_proc.dic_col_id_filters.items():
filter_column = end_proc.cfg_proc.get_col(column_id)
if not filter_details:
if not filter_column or not filter_details:
continue

if not len(filter_column.function_details):
Expand Down
4 changes: 3 additions & 1 deletion ap/common/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1320,7 +1320,9 @@ def gen_bridge_column_name(id, name):
from ap.common.services.jp_to_romaji_utils import to_romaji

name = to_romaji(name)
return f"_{id}_{name.replace('-', '_').lower()}"[:50]
# clear column name
name = re.sub(r'[^A-Za-z0-9_]', '_', name)
return f'_{id}_{name.lower()}'[:50]


def gen_end_proc_start_end_time(start_tm, end_tm, return_string: bool = True, buffer_days=14):
Expand Down
3 changes: 3 additions & 0 deletions ap/common/pydn/dblib/oracle.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,11 +305,14 @@ def convert_sql(sql):
"""
timestamp_frm = "'YYYY-MM-DD HH24:MI:SS.FF3'"
timestamp_z_frm = "'YYYY-MM-DD HH24:MI:SS.FF3TZR'"
# timestamp_z_frm_6 = "'YYYY-MM-DD HH24:MI:SS.FF6TZR'"
timestamp_tz_frm = "'YYYY-MM-DD HH24:MI:SS.FF3 TZH:TZM'"
regex_str = r"('\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}.\d{3}')"
regex_z_str = r"('\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}.\d{3}Z')"
# regex_z_str_6 = r"('\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}.\d{6}Z')"
regex_tz_str = r"('\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}.\d{3}\s[+-]?\d{1,2}:\d{2}')"
new_sql = re.sub(regex_str, f'TO_TIMESTAMP(\\1,{timestamp_frm})', sql)
# new_sql = re.sub(regex_z_str_6, f'TO_TIMESTAMP_TZ(\\1,{timestamp_z_frm_6})', new_sql)
new_sql = re.sub(regex_z_str, f'TO_TIMESTAMP_TZ(\\1,{timestamp_z_frm})', new_sql)
new_sql = re.sub(regex_tz_str, f'TO_TIMESTAMP_TZ(\\1,{timestamp_tz_frm})', new_sql)
# new_sql = re.sub(regex_str, f'TO_CHAR(TO_TIMESTAMP(\\1,{timestamp_frm}),{timestamp_frm})', sql)
Expand Down
3 changes: 2 additions & 1 deletion ap/common/services/data_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,8 @@ def convert_df_str_to_others(orig_series):

if series is None:
# Only cast datetime with string len >= 10
if orig_series.str.len().min() < MIN_DATETIME_LEN:
# cast to string before using .str accessor
if orig_series.astype(str).str.len().min() < MIN_DATETIME_LEN:
return orig_series

with contextlib.suppress(Exception):
Expand Down
15 changes: 5 additions & 10 deletions ap/static/common/js/utils.js
Original file line number Diff line number Diff line change
Expand Up @@ -5452,25 +5452,20 @@ const getFmtValueOfArrayTrim5Percent = (array) => {
};

const getFmtValueOfArray = (array) => {
const sortedArray = [...array].sort();
const { sigDigit, usageNum } = getSigDigitOfArray(sortedArray);
const fmt =
sortedArray.length > 0 ? significantDigitFmt(usageNum, sigDigit) : '';
return fmt === ',.1f' ? ',.2f' : fmt;
};

const getSigDigitOfArray = (array) => {
const decimal = '.';
let sortedArray = [...array].sort();
let usageNum = 0;
let sigDigit = 0;
array.forEach((num) => {
sortedArray.forEach((num) => {
const vals = String(num).split(decimal);
if (vals.length > 1 && vals[1].length > sigDigit) {
sigDigit = vals[1].length;
usageNum = num;
}
});
return { sigDigit, usageNum };
const fmt =
sortedArray.length > 0 ? significantDigitFmt(usageNum, sigDigit) : '';
return fmt === ',.1f' ? ',.2f' : fmt;
};

const alignLengthTickLabels = (ticks) => {
Expand Down
19 changes: 1 addition & 18 deletions ap/static/trace_data/js/trace_data_time_series.js
Original file line number Diff line number Diff line change
Expand Up @@ -701,24 +701,7 @@ function YasuTsChart(
// end limit 8 ticks
} else {
const ticks = axis.ticks.map((tick) => tick.value);
const formatFloat = '^[,][.](\\d+)[f]$';
const { sigDigit } = getSigDigitOfArray([
minY,
maxY,
]);
const yTickFmt = getFmtValueOfArray(ticks);
const yTickMax = Math.max(...ticks);
const yTickMin = Math.min(...ticks);
y_fmt = yTickFmt;
if (yTickFmt.includes('f')) {
const [, sigDigitFmt] =
yTickFmt.match(formatFloat);
y_fmt =
sigDigit > Number(sigDigitFmt) &&
yTickMax - yTickMin < 1
? `,.${sigDigit + 1}f`
: yTickFmt;
}
y_fmt = getFmtValueOfArray(ticks);
}
return;
},
Expand Down
Binary file modified ap/translations/ar/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/bg/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/ca/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/cs/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/cy/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/da/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/de/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/el/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/en/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/es/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/fa/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/fi/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/fr/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/gd/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/he/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/hi/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/hr/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/hu/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/id/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/is/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/it/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/ja/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/jv/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/km/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/ko/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/lb/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/mi/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/mk/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/mn/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/ms/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/my/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/ne/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/nl/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/no/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/pa/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/pl/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/pt/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/ro/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/ru/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/sd/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/si/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/sk/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/sq/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/sv/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/te/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/th/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/tl/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/tr/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/vi/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/zh_Hans_CN/LC_MESSAGES/messages.mo
Binary file not shown.
Binary file modified ap/translations/zh_Hant_TW/LC_MESSAGES/messages.mo
Binary file not shown.

0 comments on commit 573c7ab

Please sign in to comment.