Skip to content

Commit

Permalink
Update OpenAgua, HyroShare, and WEAP connections
Browse files Browse the repository at this point in the history
  • Loading branch information
Adel M. Abdallah committed Jan 17, 2021
1 parent 1acc066 commit f8f5a83
Show file tree
Hide file tree
Showing 59 changed files with 4,851 additions and 1,331 deletions.
Binary file added src/LowerBearRiver---backup..sqlite
Binary file not shown.
315 changes: 315 additions & 0 deletions src/Old_pc_libraies.txt

Large diffs are not rendered by default.

140 changes: 140 additions & 0 deletions src/Pivot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@

import pandas as pd
import sqlite3

WaMDaM_SQLite_Name='WEAP.sqlite'

# WaMDaM_SQLite_Name='replicateWaMDaM.sqlite'


conn = sqlite3.connect(WaMDaM_SQLite_Name)

WaMDaM_SQLite_Name='BearRiverDatasets_August_2018.sqlite'


sql_Multi_colums = """
SELECT DISTINCT "ObjectTypes"."ObjectType",
"Instances"."InstanceName",
ScenarioName,"Attributes"."AttributeName" AS Multi_AttributeName,
Methods.MethodName,Sources.SourceName,
"AttributesColumns"."AttributeName" AS "Sub_AttributeName",
"DataValue","ValueOrder"
FROM "ResourceTypes"
-- Join the ResourceType to get its Object Types
LEFT JOIN "ObjectTypes"
ON "ObjectTypes"."ResourceTypeID"="ResourceTypes"."ResourceTypeID"
-- Join the Object types to get their attributes
LEFT JOIN "Attributes"
ON "Attributes"."ObjectTypeID"="ObjectTypes"."ObjectTypeID"
-- Join the Attributes to get their Mappings
LEFT JOIN "Mappings"
ON Mappings.AttributeID= Attributes.AttributeID
-- Join the Mappings to get their Instances
LEFT JOIN "Instances"
ON "Instances"."InstanceID"="Mappings"."InstanceID"
-- Join the Mappings to get their ScenarioMappings
LEFT JOIN "ScenarioMappings"
ON "ScenarioMappings"."MappingID"="Mappings"."MappingID"
-- Join the ScenarioMappings to get their Scenarios
LEFT JOIN "Scenarios"
ON "Scenarios"."ScenarioID"="ScenarioMappings"."ScenarioID"
-- Join the Scenarios to get their MasterNetworks
LEFT JOIN "MasterNetworks"
ON "MasterNetworks"."MasterNetworkID"="Scenarios"."MasterNetworkID"
-- Join the Mappings to get their Methods
LEFT JOIN "Methods"
ON "Methods"."MethodID"="Mappings"."MethodID"
-- Join the Mappings to get their Sources
LEFT JOIN "Sources"
ON "Sources"."SourceID"="Mappings"."SourceID"
-- Join the Mappings to get their ValuesMappers
LEFT JOIN "ValuesMapper"
ON "ValuesMapper"."ValuesMapperID"="Mappings"."ValuesMapperID"
-- Join the ValuesMapper to get their MultiAttributeSeries
LEFT JOIN "MultiAttributeSeries"
ON "MultiAttributeSeries" ."ValuesMapperID"="ValuesMapper"."ValuesMapperID"
/*This is an extra join to get to each column name within the MultiColumn Array */
-- Join the MultiAttributeSeries to get to their specific ValuesMapper, now called ValuesMapperColumn
LEFT JOIN "ValuesMapper" As "ValuesMapperColumn"
ON "ValuesMapperColumn"."ValuesMapperID"="MultiAttributeSeries"."MappingID_Attribute"
-- Join the ValuesMapperColumn to get back to their specific Mapping, now called MappingColumns
LEFT JOIN "Mappings" As "MappingColumns"
ON "MappingColumns"."ValuesMapperID"="ValuesMapperColumn"."ValuesMapperID"
-- Join the MappingColumns to get back to their specific Attribute, now called AttributeColumns
LEFT JOIN "Attributes" AS "AttributesColumns"
ON "AttributesColumns"."AttributeID"="MappingColumns"."AttributeID"
/* Finishes here */
-- Join the MultiAttributeSeries to get access to their MultiAttributeSeriesValues
LEFT JOIN "MultiAttributeSeriesValues"
ON "MultiAttributeSeriesValues"."MultiAttributeSeriesID"="MultiAttributeSeries"."MultiAttributeSeriesID"
-- Select one InstanceName and restrict the query AttributeDataTypeCV that is MultiAttributeSeries
WHERE Attributes.AttributeDataTypeCV='MultiAttributeSeries' and DataValue is not "" and DataValue is not null
--AND Multi_AttributeName='wsi_par'
AND "ResourceTypeAcronym"="WASH"
AND "MasterNetworkName"= "Lower Bear River Network"
AND "ScenarioName" ="base case scenario 2003"
ORDER BY InstanceName, ScenarioName,Multi_AttributeName,Sub_AttributeName,ValueOrder ASC
"""


Multi_colums_result_df=pd.read_sql( sql_Multi_colums , conn)



subsets = Multi_colums_result_df.groupby(['ObjectType', 'InstanceName', 'Multi_AttributeName'])

for subset in subsets.groups.keys():
dt = subsets.get_group(name=subset)
attr_name = dt['Multi_AttributeNameAttributeName'].values[3] # Big attribute
AttributeNameNos = dt['Sub_AttributeName'] # all small attributes

for subset in subsets.groups.keys():
dt = subsets.get_group(name=subset)
AttributeName1_Values=dt['AttributeName1_Values']
AttributeName2_Values=dt['AttributeName2_Values']

ObjectType = dt['ObjectType'].values[0]
InstanceName = dt['InstanceName'].values[1]
attr_name = dt['AttributeName'].values[3]

if (InstanceName, attr_name) in dict_res_attr.keys():
Attr_name = multiAttr_sheet_bottom_df.values[k][3]
ObjectType = multiAttr_sheet_bottom_df.values[k][0]
dimension = Dataset_attr_Name_Dim_list[ObjectType,Attr_name]


rs_multi = {'resource_attr_id': dict_res_attr[(multiAttr_sheet_bottom_df.values[k][1], multiAttr_sheet_bottom_df.values[k][3])]['id']}

dataset = {'type': 'array', 'name': multiAttr_sheet_bottom_df.values[k][3], 'unit': attr_unit, 'dimension': dimension,
'metadata': json.dumps(metadata, ensure_ascii=True),
'hidden': 'N', 'value': json.dumps(array_value)}
rs_multi['value'] = dataset
list_rs_multi.append(rs_multi)
13 changes: 11 additions & 2 deletions src/WaMDaM.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,26 @@
""""""

"""
WaMDaM: The Water Management Data Model Version 1.06
WaMDaM: The Water Management Data Model Version 1.08
See the schema at http://schema.wamdam.org/diagrams/01_WaMDaM.html
Instructions at http://docs.wamdam.org/Getting_started/Steps
Adel M. Abdallah
email: [email protected]
website: http://adelmabdallah.com/
Feb 2019
Dec 2020
"""

'''
to create a new .exe, type this in the terminal
pyinstaller --clean --icon=WaMDaM_Wizard.ico --onedir --noconfirm --noupx --onefile --windowed wamdam.py
Please write the pyinstaller command here. specific version
'''

"""
Wamdam.py is the calling/initializing function for the Wizard.
Expand All @@ -41,6 +49,7 @@
dlg_ImportSpreadsheetBasic.py which exist in the viewer Folder. Then when the user
clicks at "load Data" in this dialog box, a function calls one or all the sub-data importers
for the checked boxes from the controller folder:
stp0_loadCVs.py, stp1_loadMetadata.py,
stp2_loadDataStructure.py,
stp3_loadNetworks.py,
Expand Down
47 changes: 25 additions & 22 deletions src/controller/HydroShare/PublishWaMDaM.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
def publishOnHydraShare(userName, password, filePathOfSqlite, title, abstract, author):
auth = HydroShareAuthBasic(username=userName, password=password)
hs = HydroShare(auth=auth)
# hs = HydroShare(auth=auth, hostname='beta.hydroshare.org')
hs = HydroShare(auth=auth, hostname='hydroshare.org')

# We Import the classes from ConnectDB_ParseExcel.py. These classes are inherited by LoadMetaData
# from ..ConnectDB_ParseExcel import *
Expand Down Expand Up @@ -125,27 +123,28 @@ def publishOnHydraShare(userName, password, filePathOfSqlite, title, abstract, a


print 'filePathOfSqlite= '+filePathOfSqlite

filePathOfSqlite = str(filePathOfSqlite)

# here we create the resource and upload the SQLite file
resource_id=hs.createResource(resource_type, title=title, resource_file=filePathOfSqlite, resource_filename=SqliteName, abstract=abstract, keywords=keywords,
edit_users=None, view_users=None, edit_groups=None, view_groups=None, metadata=metadata,
extra_metadata=extra_metadata, progress_callback=None)

# add metadata to the SQLite file
options = {"file_path": SqliteName, "hs_file_type": "SingleFile"}
# print options

result = hs.resource(resource_id).functions.set_file_type(options)

resource_id=hs.createResource(resource_type, title=title, resource_file=filePathOfSqlite, resource_filename=SqliteName, abstract=abstract, keywords=keywords, metadata=metadata,
extra_metadata=extra_metadata)
#--------------

file = ""
for f in hs.resource(resource_id).files.all():
file += f.decode('utf8')
# abstract = 'My abstract'
# title = 'My resource'
#
# keywords = ('my keyword 1', 'my keyword 2')
# rtype = 'CompositeResource'
#
# fpath = '/path/to/a/file'
# metadata = '[{"coverage":{"type":"period", "value":{"start":"01/01/2000", "end":"12/12/2010"}}}, {"creator":{"name":"John Smith"}}, {"creator":{"name":"Lisa Miller"}}]'
# extra_metadata = '{"key-1": "value-1", "key-2": "value-2"}'
#
# resource_id = hs.createResource(resource_type, title, resource_file='', keywords=keywords, abstract=abstract,
# metadata=metadata, extra_metadata=extra_metadata)

file_json = json.loads(file)
file_id = file_json["results"][0]["id"]
# print file_id
#--------------



Expand All @@ -169,10 +168,14 @@ def publishOnHydraShare(userName, password, filePathOfSqlite, title, abstract, a

}

# add metadata to the SQLite file
options = {"file_path": SqliteName, "hs_file_type": "SingleFile"}
# print options

result = hs.resource(resource_id).functions.set_file_type(options)

spatial=hs.resource(resource_id).files.metadata(file_id, params)
spatial = hs.resource(resource_id).files.metadata(SqliteName, params)

print 'Done'
print 'resource_id= '+resource_id
return resource_id
print spatial

return resource_id
79 changes: 56 additions & 23 deletions src/controller/HydroShare/trash.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,65 @@
import sqlite3
import numpy as np
import pandas as pd
import getpass
from hs_restclient import HydroShare, HydroShareAuthBasic
import os
import json

import plotly
username = 'amabdallah'

from plotly.graph_objs import *
# username = 'adelabdallah'

import os
import csv
from collections import OrderedDict
import sqlite3
import pandas as pd
import numpy as np
from IPython.display import display, Image, SVG, Math, YouTubeVideo
import urllib
password = ''

print 'The needed Python libraries have been imported'
auth = HydroShareAuthBasic(username=username, password=password)

hs = HydroShare(auth=auth)

username = 'amabdallah'
password = 'MyHydroShareWorld'
resource_type = 'CompositeResource'

auth = HydroShareAuthBasic(username=username, password=password)
abstract = 'My abstract'

hs = HydroShare(auth=auth)
print hs
for resource in hs.resources():
print 'Connected to HydroShare'
title = 'new tes5255t'

keywords = ('my keyword 1', 'my keyword 2')
rtype = 'CompositeResource'

fpath = 'C:\Users\Rosenberg\Desktop\Ecosystem/WASH3.sqlite'

metadata = '[{"coverage":{"type":"period", "value":{"start":"01/01/2000", "end":"12/12/2010"}}}, {"creator":{"name":"John Smith"}}, {"creator":{"name":"Lisa Miller"}}]'


resource_id = hs.createResource(resource_type, title, resource_file=fpath, keywords=keywords, abstract=abstract,
metadata=metadata)

SqliteName='WASH3.sqlite'

params = {}

params['temporal_coverage'] = {"start": '2000', "end": '2003'}

params['title'] = SqliteName

params['spatial_coverage'] = {
"type": "box",

"units": "Decimal degrees",

"eastlimit": float(-110.8200), # -110.8200,

"northlimit": float(42.8480), # 42.8480,

"southlimit": float(40.7120), # 40.7120,

"westlimit": float(-113.0000), # -113.0000,

"name": "12232",

"projection": "WGS 84 EPSG:4326"
}

# add metadata to the SQLite file
options = {"file_path": SqliteName, "hs_file_type": "SingleFile"}
# print options

result = hs.resource(resource_id).functions.set_file_type(options)



spatial = hs.resource(resource_id).files.metadata(SqliteName, params)
Loading

0 comments on commit f8f5a83

Please sign in to comment.