diff --git a/.github/workflows/eluc.yml b/.github/workflows/eluc.yml
index 1e6241a..ea987ba 100644
--- a/.github/workflows/eluc.yml
+++ b/.github/workflows/eluc.yml
@@ -31,6 +31,8 @@ jobs:
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with PyLint
run: pylint ./*
+ - name: Lint with Flake8
+ run: flake8
- name: Run unit tests
run: python -m unittest
diff --git a/use_cases/eluc/.flake8 b/use_cases/eluc/.flake8
new file mode 100644
index 0000000..79a16af
--- /dev/null
+++ b/use_cases/eluc/.flake8
@@ -0,0 +1,2 @@
+[flake8]
+max-line-length = 120
\ No newline at end of file
diff --git a/use_cases/eluc/app/app.py b/use_cases/eluc/app/app.py
index 02f53b2..a926e00 100644
--- a/use_cases/eluc/app/app.py
+++ b/use_cases/eluc/app/app.py
@@ -74,8 +74,8 @@
For a given context cell representing a portion of the earth,
identified by its latitude and longitude coordinates, and a given year:
* What changes can we make to the land usage
-* In order to minimize the resulting estimated CO2 emissions? (Emissions from Land Use Change, ELUC,
-in tons of carbon per hectare)
+* In order to minimize the resulting estimated CO2 emissions? (Emissions from Land Use Change, ELUC,
+ in tons of carbon per hectare)
'''),
dcc.Markdown('''## Context'''),
@@ -99,8 +99,8 @@
dcc.Graph(id='presc-fig',
figure=chart_component.create_treemap(type_context=False),
style={'grid-clumn': '4'})
- # This can't be set to auto because the lines will overflow!
], style={'display': 'grid', 'grid-template-columns': '4.5% 40% 1fr 1fr', "width": "100%"}),
+ # The above line can't be set to auto because the lines will overflow!
html.Div([
sliders_component.get_frozen_div(),
html.Button("Sum to 100%", id='sum-button', n_clicks=0),
diff --git a/use_cases/eluc/app/components/chart.py b/use_cases/eluc/app/components/chart.py
index d61d1b4..70b53d1 100644
--- a/use_cases/eluc/app/components/chart.py
+++ b/use_cases/eluc/app/components/chart.py
@@ -11,6 +11,7 @@
from app import utils
from data import constants
+
class ChartComponent:
"""
Component in charge of handling the context and prescription charts.
@@ -161,24 +162,24 @@ def create_treemap(self, data=pd.Series, type_context=True, year=2021) -> go.Fig
fields = data[app_constants.FIELDS].sum()
labels = [title, "Nonland",
- "Crops",
- "Primary Vegetation", "primf", "primn",
- "Secondary Vegetation", "secdf", "secdn",
- "Urban",
- "Fields", "pastr", "range"]
+ "Crops",
+ "Primary Vegetation", "primf", "primn",
+ "Secondary Vegetation", "secdf", "secdn",
+ "Urban",
+ "Fields", "pastr", "range"]
parents = ["", title,
- title,
- title, "Primary Vegetation", "Primary Vegetation",
- title, "Secondary Vegetation", "Secondary Vegetation",
- title,
- title, "Fields", "Fields"]
-
- values = [total + data["nonland"], data["nonland"],
- data["crop"],
- primary, data["primf"], data["primn"],
- secondary, data["secdf"], data["secdn"],
- data["urban"],
- fields, data["pastr"], data["range"]]
+ title,
+ title, "Primary Vegetation", "Primary Vegetation",
+ title, "Secondary Vegetation", "Secondary Vegetation",
+ title,
+ title, "Fields", "Fields"]
+
+ values = [total + data["nonland"], data["nonland"],
+ data["crop"],
+ primary, data["primf"], data["primn"],
+ secondary, data["secdf"], data["secdn"],
+ data["urban"],
+ fields, data["pastr"], data["range"]]
tree_params["customdata"] = self._create_hovertext(labels, parents, values, title)
tree_params["hovertemplate"] = "%{customdata}"
@@ -188,15 +189,15 @@ def create_treemap(self, data=pd.Series, type_context=True, year=2021) -> go.Fig
fig = go.Figure(
go.Treemap(
- labels = labels,
- parents = parents,
- values = values,
+ labels=labels,
+ parents=parents,
+ values=values,
**tree_params
)
)
colors = px.colors.qualitative.Plotly
fig.update_layout(
- treemapcolorway = [colors[1], colors[4], colors[2], colors[7], colors[3], colors[0]],
+ treemapcolorway=[colors[1], colors[4], colors[2], colors[7], colors[3], colors[0]],
margin={"t": 0, "b": 0, "l": 10, "r": 10}
)
return fig
@@ -219,24 +220,24 @@ def create_pie(self, data=pd.Series, type_context=True, year=2021) -> go.Figure:
else:
values = data[app_constants.CHART_COLS].tolist()
- assert(len(values) == len(app_constants.CHART_COLS))
+ assert len(values) == len(app_constants.CHART_COLS)
title = f"Context in {year}" if type_context else f"Prescribed for {year+1}"
# Attempt to match the colors from the treemap
plo = px.colors.qualitative.Plotly
dar = px.colors.qualitative.Dark24
- #['crop', 'pastr', 'primf', 'primn', 'range', 'secdf', 'secdn', 'urban', 'nonland]
+ # ['crop', 'pastr', 'primf', 'primn', 'range', 'secdf', 'secdn', 'urban', 'nonland]
colors = [plo[4], plo[0], plo[2], dar[14], plo[5], plo[7], dar[2], plo[3], plo[1]]
fig = go.Figure(
go.Pie(
- values = values,
- labels = app_constants.CHART_COLS,
- textposition = "inside",
- sort = False,
- marker_colors = colors,
- hovertemplate = "%{label}
%{value}
%{percent}",
- title = title
+ values=values,
+ labels=app_constants.CHART_COLS,
+ textposition="inside",
+ sort=False,
+ marker_colors=colors,
+ hovertemplate="%{label}
%{value}
%{percent}",
+ title=title
)
)
diff --git a/use_cases/eluc/app/components/legend.py b/use_cases/eluc/app/components/legend.py
index 48b1416..4fb15aa 100644
--- a/use_cases/eluc/app/components/legend.py
+++ b/use_cases/eluc/app/components/legend.py
@@ -4,6 +4,7 @@
from dash import dcc
from dash import html
+
# pylint: disable=too-few-public-methods
class LegendComponent:
"""
@@ -15,8 +16,8 @@ def get_legend_div(self):
"""
# Legend examples come from https://hess.copernicus.org/preprints/hess-2021-247/hess-2021-247-ATC3.pdf
legend_div = html.Div(
- style={"margin-bottom": "100px"}, # Because we removed some crops, we extend this so the map doesn't shrink.
- children = [
+ style={"margin-bottom": "100px"}, # Because we removed some crops, we extend this so the map doesn't shrink
+ children=[
dcc.Markdown('''
### Land Use Types
@@ -25,7 +26,7 @@ def get_legend_div(self):
- primf: Primary forest
- primn: Primary nonforest vegetation
-
+
Secondary: Vegetation that has been touched by humans
- secdf: Secondary forest
diff --git a/use_cases/eluc/app/components/lock.py b/use_cases/eluc/app/components/lock.py
index a4a1b9d..0ebd9fa 100644
--- a/use_cases/eluc/app/components/lock.py
+++ b/use_cases/eluc/app/components/lock.py
@@ -6,6 +6,7 @@
from data import constants
+
class LockComponent:
"""
Component that creates lock div based on reco columns.
@@ -20,7 +21,7 @@ def create_check_options(self, values: list) -> list:
for val in values:
options.append(
{"label": [html.I(className="bi bi-lock"), html.Span(val)],
- "value": val})
+ "value": val})
return options
def get_checklist_div(self):
diff --git a/use_cases/eluc/app/components/map.py b/use_cases/eluc/app/components/map.py
index 598839b..1342b83 100644
--- a/use_cases/eluc/app/components/map.py
+++ b/use_cases/eluc/app/components/map.py
@@ -12,6 +12,7 @@
from app import constants as app_constants
+
class MapComponent:
"""
Component handling the map. Keeps track of the latitudes and longitudes a user can select as well as the countries.
@@ -49,8 +50,8 @@ def get_context_div(self):
select_style = {'width': '75%', 'justify-self': 'left', 'margin-top': '-3px'}
context_div = html.Div(
style={'display': 'grid',
- 'grid-template-columns': 'auto 1fr', 'grid-template-rows': 'auto auto auto auto',
- 'position': 'absolute', 'bottom': '0'},
+ 'grid-template-columns': 'auto 1fr', 'grid-template-rows': 'auto auto auto auto',
+ 'position': 'absolute', 'bottom': '0'},
children=[
html.P("Region", style={'grid-column': '1', 'grid-row': '1', 'padding-right': '10px'}),
dcc.Dropdown(
@@ -164,7 +165,7 @@ def update_map(year, lat, lon, location):
return self.create_map(data, 10, idx)
- def create_map(self, df: pd.DataFrame, zoom=10, color_idx = None) -> go.Figure:
+ def create_map(self, df: pd.DataFrame, zoom=10, color_idx=None) -> go.Figure:
"""
Creates map figure with data centered and zoomed in with appropriate point marked.
:param df: DataFrame of data to plot. This dataframe has its index reset.
diff --git a/use_cases/eluc/app/components/prediction.py b/use_cases/eluc/app/components/prediction.py
index 3704c90..944bbc5 100644
--- a/use_cases/eluc/app/components/prediction.py
+++ b/use_cases/eluc/app/components/prediction.py
@@ -16,6 +16,7 @@
from predictors.predictor import Predictor
from predictors.percent_change.percent_change_predictor import PercentChangePredictor
+
class PredictionComponent:
"""
Component in charge of handling predictor selection and predict button callback.
@@ -41,11 +42,11 @@ def load_predictors(self) -> dict[str, Predictor]:
rf_path = "danyoung/eluc-global-rf"
rf_local_dir = app_constants.PREDICTOR_PATH / rf_path.replace("/", "--")
global_nn = nn_persistor.from_pretrained(nn_path,
- local_dir=nn_local_dir)
+ local_dir=nn_local_dir)
global_linreg = sklearn_persistor.from_pretrained(linreg_path,
- local_dir=linreg_local_dir)
+ local_dir=linreg_local_dir)
global_rf = sklearn_persistor.from_pretrained(rf_path,
- local_dir=rf_local_dir)
+ local_dir=rf_local_dir)
predictors["Global Neural Network"] = global_nn
predictors["Global Linear Regression"] = global_linreg
@@ -177,5 +178,5 @@ def context_presc_to_df(self, context: pd.Series, presc: pd.Series) -> pd.DataFr
diff = diff.rename({col: f"{col}_diff" for col in diff.index})
context_actions = diff.combine_first(context[constants.CAO_MAPPING["context"]])
context_actions_df = pd.DataFrame([context_actions])
- context_actions_df[constants.NO_CHANGE_COLS] = 0 # TODO: I'm not entirely sure why this line is necessary
+ context_actions_df[constants.NO_CHANGE_COLS] = 0 # TODO: I'm not entirely sure why this line is necessary
return context_actions_df
diff --git a/use_cases/eluc/app/components/prescription.py b/use_cases/eluc/app/components/prescription.py
index 2e1ba25..c66a529 100644
--- a/use_cases/eluc/app/components/prescription.py
+++ b/use_cases/eluc/app/components/prescription.py
@@ -15,6 +15,7 @@
from persistence.serializers.prescriptor_serializer import PrescriptorSerializer
from prescriptors.prescriptor_manager import PrescriptorManager
+
class PrescriptionComponent():
"""
Component in charge of handling prescriptor selection and prescribe button.
@@ -59,10 +60,10 @@ def get_presc_select_div(self):
html.P("Minimize change", style={"grid-column": "1"}),
html.Div([
dcc.Slider(id='presc-select',
- min=0, max=len(self.prescriptor_list)-1, step=1,
- value=app_constants.DEFAULT_PRESCRIPTOR_IDX,
- included=False,
- marks={i : "" for i in range(len(self.prescriptor_list))})
+ min=0, max=len(self.prescriptor_list)-1, step=1,
+ value=app_constants.DEFAULT_PRESCRIPTOR_IDX,
+ included=False,
+ marks={i: "" for i in range(len(self.prescriptor_list))})
], style={"grid-column": "2", "width": "100%", "margin-top": "8px"}),
html.P("Minimize ELUC", style={"grid-column": "3", "padding-right": "10px"}),
html.Button("Prescribe", id='presc-button', n_clicks=0, style={"grid-column": "4", **margin_style}),
@@ -168,10 +169,9 @@ def create_pareto(self, pareto_df: pd.DataFrame, presc_id: int) -> go.Figure:
})
# Name axes and hide legend
fig.update_layout(xaxis_title={"text": "Change (%)"},
- yaxis_title={"text": 'ELUC (tC/ha)'},
- showlegend=False,
- title="Prescriptors",
- )
+ yaxis_title={"text": 'ELUC (tC/ha)'},
+ showlegend=False,
+ title="Prescriptors")
fig.update_traces(hovertemplate="Average Change: %{x} %"
"
"
" Average ELUC: %{y} tC/ha")
diff --git a/use_cases/eluc/app/components/references.py b/use_cases/eluc/app/components/references.py
index 4817ef9..b6cbe64 100644
--- a/use_cases/eluc/app/components/references.py
+++ b/use_cases/eluc/app/components/references.py
@@ -3,6 +3,7 @@
"""
from dash import html
+
# pylint: disable=too-few-public-methods
class ReferencesComponent:
"""
@@ -16,36 +17,36 @@ def get_references_div(self):
references_div = html.Div([
html.Div(className="parent", children=[
html.P("Code for this project can be found here: ",
- className="child", style=inline_block),
+ className="child", style=inline_block),
html.A("(Project Resilience MVP repo)",
- href="https://github.com/Project-Resilience/mvp/tree/main/use_cases/eluc\n"),
+ href="https://github.com/Project-Resilience/mvp/tree/main/use_cases/eluc\n"),
]),
html.Div(className="parent", children=[
html.P("The paper for this project can be found here: ",
- className="child", style=inline_block),
+ className="child", style=inline_block),
html.A("(arXiv link)", href="https://arxiv.org/abs/2311.12304\n"),
]),
html.Div(className="parent", children=[
html.P("ELUC data provided by the BLUE model ",
- className="child", style=inline_block),
+ className="child", style=inline_block),
html.A("(BLUE: Bookkeeping of land use emissions)",
- href="https://agupubs.onlinelibrary.wiley.com/doi/10.1002/2014GB004997\n"),
+ href="https://agupubs.onlinelibrary.wiley.com/doi/10.1002/2014GB004997\n"),
]),
html.Div(className="parent", children=[
html.P("Land use change data provided by the LUH2 project",
- className="child", style=inline_block),
+ className="child", style=inline_block),
html.A("(LUH2: Land Use Harmonization 2)", href="https://luh.umd.edu/\n"),
]),
html.Div(className="parent", children=[
html.P("Setup is described in Appendix C2.1 of the GCB 2022 report",
- className="child", style=inline_block),
+ className="child", style=inline_block),
html.A("(Global Carbon Budget 2022 report)",
- href="https://essd.copernicus.org/articles/14/4811/2022/#section10/\n"),
+ href="https://essd.copernicus.org/articles/14/4811/2022/#section10/\n"),
]),
html.Div(className="parent", children=[
html.P("The Global Carbon Budget report assesses the global CO2 budget \
for the Intergovernmental Panel on Climate Change",
- className="child", style=inline_block),
+ className="child", style=inline_block),
html.A("(IPCC)", href="https://www.ipcc.ch/\n"),
]),
])
diff --git a/use_cases/eluc/app/components/sliders.py b/use_cases/eluc/app/components/sliders.py
index 936fd78..d1507a1 100644
--- a/use_cases/eluc/app/components/sliders.py
+++ b/use_cases/eluc/app/components/sliders.py
@@ -10,6 +10,7 @@
from app import utils
from data import constants
+
class SlidersComponent:
"""
Component that displays the sliders, shows their values in frozen inputs, resets the sliders when context changes,
@@ -56,7 +57,7 @@ def get_frozen_div(self):
type="text",
disabled=True,
id={"type": "frozen-input", "index": f"{col}-frozen"})
- for col in app_constants.NO_CHANGE_COLS + ["nonland"]
+ for col in app_constants.NO_CHANGE_COLS + ["nonland"]
])
return frozen_div
diff --git a/use_cases/eluc/app/components/trivia.py b/use_cases/eluc/app/components/trivia.py
index 9accbbe..44b8637 100644
--- a/use_cases/eluc/app/components/trivia.py
+++ b/use_cases/eluc/app/components/trivia.py
@@ -7,6 +7,7 @@
from app import constants as app_constants
+
class TriviaComponent():
"""
Component in charge of generating the trivia div as well as updating it after prediction is made.
@@ -28,7 +29,7 @@ def get_trivia_div(self):
className="child",
style=inline_block
),
- html.P(id="total-em", style={"font-weight": "bold"}|inline_block)
+ html.P(id="total-em", style={"font-weight": "bold"} | inline_block)
]),
html.Div(className="parent", children=[
html.I(className="bi bi-airplane", style=inline_block),
@@ -37,7 +38,7 @@ def get_trivia_div(self):
className="child",
style=inline_block
),
- html.P(f"{app_constants.CO2_JFK_GVA} tonnes CO2", style={"font-weight": "bold"}|inline_block)
+ html.P(f"{app_constants.CO2_JFK_GVA} tonnes CO2", style={"font-weight": "bold"} | inline_block)
]),
html.Div(className="parent", children=[
html.I(className="bi bi-airplane", style=inline_block),
@@ -46,7 +47,7 @@ def get_trivia_div(self):
className="child",
style=inline_block
),
- html.P(id="tickets", style={"font-weight": "bold"}|inline_block)
+ html.P(id="tickets", style={"font-weight": "bold"} | inline_block)
]),
html.Div(className="parent", children=[
html.I(className="bi bi-person", style=inline_block),
@@ -55,7 +56,7 @@ def get_trivia_div(self):
className="child",
style=inline_block
),
- html.P(f"{app_constants.CO2_PERSON} tonnes CO2", style={"font-weight": "bold"}|inline_block)
+ html.P(f"{app_constants.CO2_PERSON} tonnes CO2", style={"font-weight": "bold"} | inline_block)
]),
html.Div(className="parent", children=[
html.I(className="bi bi-person", style=inline_block),
@@ -64,7 +65,7 @@ def get_trivia_div(self):
className="child",
style=inline_block
),
- html.P(id="people", style={"font-weight": "bold"}|inline_block)
+ html.P(id="people", style={"font-weight": "bold"} | inline_block)
]),
html.P(
"(Sources: https://flightfree.org/flight-emissions-calculator \
@@ -104,5 +105,5 @@ def update_trivia(eluc_str, year, lat, lon):
eluc = float(eluc_str)
total_reduction = eluc * area * app_constants.TC_TO_TCO2
return f"{-1 * total_reduction:,.2f} tonnes CO2", \
- f"{-1 * total_reduction // app_constants.CO2_JFK_GVA:,.0f} tickets", \
- f"{-1 * total_reduction // app_constants.CO2_PERSON:,.0f} people"
+ f"{-1 * total_reduction // app_constants.CO2_JFK_GVA:,.0f} tickets", \
+ f"{-1 * total_reduction // app_constants.CO2_PERSON:,.0f} people"
diff --git a/use_cases/eluc/app/process_data.py b/use_cases/eluc/app/process_data.py
index d5bf273..20c34ed 100644
--- a/use_cases/eluc/app/process_data.py
+++ b/use_cases/eluc/app/process_data.py
@@ -7,6 +7,7 @@
from app.constants import APP_START_YEAR
from data.eluc_data import ELUCData
+
def main():
"""
Main function that loads the data and saves it.
@@ -19,5 +20,6 @@ def main():
save_dir.mkdir(exist_ok=True)
test_df.to_csv(save_dir / "app_data.csv")
+
if __name__ == "__main__":
main()
diff --git a/use_cases/eluc/app/utils.py b/use_cases/eluc/app/utils.py
index 7d5cfae..f68334d 100644
--- a/use_cases/eluc/app/utils.py
+++ b/use_cases/eluc/app/utils.py
@@ -6,6 +6,7 @@
import app.constants as app_constants
from data import constants
+
def add_nonland(data: pd.Series) -> pd.Series:
"""
Adds a nonland column that is the difference between 1 and
diff --git a/use_cases/eluc/data/constants.py b/use_cases/eluc/data/constants.py
index 0a1aca5..040f981 100644
--- a/use_cases/eluc/data/constants.py
+++ b/use_cases/eluc/data/constants.py
@@ -9,10 +9,10 @@
CODES_PATH = "data/codes.csv"
# Different variations of land-use change columns
-LAND_USE_COLS = ['c3ann', 'c3nfx', 'c3per','c4ann', 'c4per',
+LAND_USE_COLS = ['c3ann', 'c3nfx', 'c3per', 'c4ann', 'c4per',
'pastr', 'primf', 'primn',
'range', 'secdf', 'secdn', 'urban']
-CROP_COLS = ['c3ann', 'c3nfx', 'c3per','c4ann', 'c4per']
+CROP_COLS = ['c3ann', 'c3nfx', 'c3per', 'c4ann', 'c4per']
LAND_USE_COLS = ["crop"] + [col for col in LAND_USE_COLS if col not in CROP_COLS]
DIFF_LAND_USE_COLS = [f"{col}_diff" for col in LAND_USE_COLS]
COLS_MAP = dict(zip(LAND_USE_COLS, DIFF_LAND_USE_COLS))
@@ -38,5 +38,5 @@
# Context-action-outcome mapping for prescription.
CAO_MAPPING = {'context': LAND_USE_COLS + NONLAND_FEATURES,
- 'actions': DIFF_LAND_USE_COLS,
+ 'actions': DIFF_LAND_USE_COLS,
'outcomes': ["ELUC", "change"]}
diff --git a/use_cases/eluc/data/conversion.py b/use_cases/eluc/data/conversion.py
index 1bac636..4aca155 100644
--- a/use_cases/eluc/data/conversion.py
+++ b/use_cases/eluc/data/conversion.py
@@ -45,6 +45,7 @@
"SS": 728
}
+
def construct_countries_df():
"""
Constructs a dataframe mapping of countries, their abbreviations, and their proper codes.
diff --git a/use_cases/eluc/data/eluc_data.py b/use_cases/eluc/data/eluc_data.py
index 4f55b0a..1eedd27 100644
--- a/use_cases/eluc/data/eluc_data.py
+++ b/use_cases/eluc/data/eluc_data.py
@@ -14,6 +14,7 @@
from data.conversion import construct_countries_df
from data.eluc_encoder import ELUCEncoder
+
class ELUCData():
"""
Wrapper for pandas dataframe that separates the data into train and test sets based on the time column.
@@ -86,7 +87,7 @@ def import_data(path, update_path):
raw = raw.merge(eluc)
# Shift actions back a year
- raw_diffs = ['c3ann', 'c3nfx', 'c3per','c4ann', 'c4per',
+ raw_diffs = ['c3ann', 'c3nfx', 'c3per', 'c4ann', 'c4per',
'pastr', 'primf', 'primn', 'range',
'secdf', 'secdn', 'urban']
raw_diffs = [f"{col}_diff" for col in raw_diffs]
diff --git a/use_cases/eluc/data/eluc_encoder.py b/use_cases/eluc/data/eluc_encoder.py
index 7b4419b..f6365af 100644
--- a/use_cases/eluc/data/eluc_encoder.py
+++ b/use_cases/eluc/data/eluc_encoder.py
@@ -8,6 +8,7 @@
from data import constants
+
class ELUCEncoder():
"""
Creates an encoder for a pandas dataset by collecting fields used for minmax scaling.
diff --git a/use_cases/eluc/data/torch_data.py b/use_cases/eluc/data/torch_data.py
index c0904ce..980b616 100644
--- a/use_cases/eluc/data/torch_data.py
+++ b/use_cases/eluc/data/torch_data.py
@@ -8,6 +8,7 @@
import torch
from torch.utils.data.dataset import Dataset
+
class TorchDataset(Dataset):
"""
Simple custom torch dataset.
diff --git a/use_cases/eluc/experiments/predictor_significance.py b/use_cases/eluc/experiments/predictor_significance.py
index b8d702e..c74f71a 100644
--- a/use_cases/eluc/experiments/predictor_significance.py
+++ b/use_cases/eluc/experiments/predictor_significance.py
@@ -16,6 +16,7 @@
from predictors.neural_network.neural_net_predictor import NeuralNetPredictor
from predictors.sklearn_predictor.sklearn_predictor import RandomForestPredictor, LinearRegressionPredictor
+
def train_and_test(n: int,
model_constructor,
config: dict,
@@ -78,6 +79,7 @@ def train_and_test(n: int,
results_df = pd.DataFrame(results)
results_df.to_csv(save_path)
+
def main():
"""
Main function call that performs significance tests.
@@ -117,13 +119,14 @@ def main():
override_start_year = None if model_name != "random_forest" else 1982
print(model_name)
train_and_test(30,
- model_constructor,
- config,
- dataset.train_df,
- dataset.test_df,
- train_regions,
- significance_path / f"{model_name}_eval.csv",
- override_start_year=override_start_year)
+ model_constructor,
+ config,
+ dataset.train_df,
+ dataset.test_df,
+ train_regions,
+ significance_path / f"{model_name}_eval.csv",
+ override_start_year=override_start_year)
+
if __name__ == "__main__":
main()
diff --git a/use_cases/eluc/persistence/persistors/hf_persistor.py b/use_cases/eluc/persistence/persistors/hf_persistor.py
index 92a4680..dc57fb7 100644
--- a/use_cases/eluc/persistence/persistors/hf_persistor.py
+++ b/use_cases/eluc/persistence/persistors/hf_persistor.py
@@ -7,6 +7,7 @@
from persistence.persistors.persistor import Persistor
+
class HuggingFacePersistor(Persistor):
"""
Persists models to and from HuggingFace repo.
diff --git a/use_cases/eluc/persistence/persistors/persistor.py b/use_cases/eluc/persistence/persistors/persistor.py
index 5ae9ef6..a1b3887 100644
--- a/use_cases/eluc/persistence/persistors/persistor.py
+++ b/use_cases/eluc/persistence/persistors/persistor.py
@@ -8,6 +8,7 @@
from persistence.serializers.serializer import Serializer
+
class Persistor(ABC):
"""
Abstract class for persistors to inherit from.
diff --git a/use_cases/eluc/persistence/serializers/neural_network_serializer.py b/use_cases/eluc/persistence/serializers/neural_network_serializer.py
index 01a75e1..3210db1 100644
--- a/use_cases/eluc/persistence/serializers/neural_network_serializer.py
+++ b/use_cases/eluc/persistence/serializers/neural_network_serializer.py
@@ -11,6 +11,7 @@
from predictors.neural_network.eluc_neural_net import ELUCNeuralNet
from predictors.neural_network.neural_net_predictor import NeuralNetPredictor
+
class NeuralNetSerializer(Serializer):
"""
Serializer for the NeuralNetPredictor.
@@ -53,7 +54,7 @@ def load(self, path: Path) -> "NeuralNetPredictor":
raise FileNotFoundError(f"Path {path} does not exist.")
if not (path / "config.json").exists() or \
not (path / "model.pt").exists() or \
- not (path / "scaler.joblib").exists():
+ not (path / "scaler.joblib").exists():
raise FileNotFoundError("Model files not found in path.")
# Initialize model with config
diff --git a/use_cases/eluc/persistence/serializers/prescriptor_serializer.py b/use_cases/eluc/persistence/serializers/prescriptor_serializer.py
index 790c2e9..8368bab 100644
--- a/use_cases/eluc/persistence/serializers/prescriptor_serializer.py
+++ b/use_cases/eluc/persistence/serializers/prescriptor_serializer.py
@@ -11,6 +11,7 @@
from prescriptors.nsga2.candidate import Candidate
from prescriptors.nsga2.land_use_prescriptor import LandUsePrescriptor
+
class PrescriptorSerializer(Serializer):
"""
Serializer in charge of saving single prescriptor model from LandUsePrescriptor.
diff --git a/use_cases/eluc/persistence/serializers/serializer.py b/use_cases/eluc/persistence/serializers/serializer.py
index e44de21..d2412cc 100644
--- a/use_cases/eluc/persistence/serializers/serializer.py
+++ b/use_cases/eluc/persistence/serializers/serializer.py
@@ -4,6 +4,7 @@
from abc import ABC, abstractmethod
from pathlib import Path
+
class Serializer(ABC):
"""
Abstract class responsible for saving and loading predictor/prescriptor models locally.
diff --git a/use_cases/eluc/persistence/serializers/sklearn_serializer.py b/use_cases/eluc/persistence/serializers/sklearn_serializer.py
index 65484ed..430422d 100644
--- a/use_cases/eluc/persistence/serializers/sklearn_serializer.py
+++ b/use_cases/eluc/persistence/serializers/sklearn_serializer.py
@@ -9,6 +9,7 @@
from persistence.serializers.serializer import Serializer
from predictors.sklearn_predictor.sklearn_predictor import SKLearnPredictor
+
class SKLearnSerializer(Serializer):
"""
Serializer for the SKLearnPredictor.
diff --git a/use_cases/eluc/predictors/custom/template/template_predictor.py b/use_cases/eluc/predictors/custom/template/template_predictor.py
index 2b46067..1869c82 100644
--- a/use_cases/eluc/predictors/custom/template/template_predictor.py
+++ b/use_cases/eluc/predictors/custom/template/template_predictor.py
@@ -6,6 +6,7 @@
from data import constants
from predictors.predictor import Predictor
+
class TemplatePredictor(Predictor):
"""
A template predictor returning dummy values for ELUC.
@@ -23,11 +24,11 @@ def fit(self, X_train, y_train):
def predict(self, context_actions_df: pd.DataFrame) -> pd.DataFrame:
dummy_eluc = list(range(len(context_actions_df)))
return pd.DataFrame({"ELUC": dummy_eluc}, index=context_actions_df.index)
-
+
@classmethod
def load(cls, path: str) -> "TemplatePredictor":
"""
Dummy load function that just returns a new instance of the class.
"""
print("Loading model from", path)
- return cls()
\ No newline at end of file
+ return cls()
diff --git a/use_cases/eluc/predictors/neural_network/eluc_neural_net.py b/use_cases/eluc/predictors/neural_network/eluc_neural_net.py
index 41f243c..ad9433a 100644
--- a/use_cases/eluc/predictors/neural_network/eluc_neural_net.py
+++ b/use_cases/eluc/predictors/neural_network/eluc_neural_net.py
@@ -3,6 +3,7 @@
"""
import torch
+
class ELUCNeuralNet(torch.nn.Module):
"""
Custom torch neural network module.
@@ -23,6 +24,7 @@ def __init__(self, in_size: int, out_size: int, dropout: float):
torch.nn.ReLU(),
torch.nn.Dropout(p=dropout)
)
+
def forward(self, X: torch.FloatTensor) -> torch.FloatTensor:
"""
Passes input through the block.
diff --git a/use_cases/eluc/predictors/neural_network/neural_net_predictor.py b/use_cases/eluc/predictors/neural_network/neural_net_predictor.py
index 1b52d07..ad8631b 100644
--- a/use_cases/eluc/predictors/neural_network/neural_net_predictor.py
+++ b/use_cases/eluc/predictors/neural_network/neural_net_predictor.py
@@ -19,6 +19,7 @@
from predictors.predictor import Predictor
from predictors.neural_network.eluc_neural_net import ELUCNeuralNet
+
class NeuralNetPredictor(Predictor):
"""
Simple feed-forward neural network predictor implemented in PyTorch.
@@ -75,7 +76,7 @@ def fit(self, X_train: pd.DataFrame, y_train: pd.Series,
:param y_test: test labels.
:param log_path: path to log training data to tensorboard.
:param verbose: whether to print progress bars.
- :return: dictionary of results from training containing time taken, best epoch, best loss,
+ :return: dictionary of results from training containing time taken, best epoch, best loss,
and test loss if applicable.
"""
if not self.features:
@@ -176,7 +177,6 @@ def fit(self, X_train: pd.DataFrame, y_train: pd.Series,
return result_dict
-
def predict(self, context_actions_df: pd.DataFrame) -> pd.DataFrame:
"""
Generates prediction from model for given test data.
diff --git a/use_cases/eluc/predictors/percent_change/percent_change_predictor.py b/use_cases/eluc/predictors/percent_change/percent_change_predictor.py
index 7d7b003..fb5ef05 100644
--- a/use_cases/eluc/predictors/percent_change/percent_change_predictor.py
+++ b/use_cases/eluc/predictors/percent_change/percent_change_predictor.py
@@ -6,6 +6,7 @@
from data import constants
from predictors.predictor import Predictor
+
class PercentChangePredictor(Predictor):
"""
Heuristic that calculates the percent change of land use from actions and context.
@@ -28,7 +29,7 @@ def predict(self, context_actions_df: pd.DataFrame) -> pd.DataFrame:
percent_changed = pos_diffs[constants.DIFF_LAND_USE_COLS].sum(axis=1)
# Divide by sum of used land
total_land = context_actions_df[constants.LAND_USE_COLS].sum(axis=1)
- total_land = total_land.replace(0, 1) # Avoid division by 0
+ total_land = total_land.replace(0, 1) # Avoid division by 0
percent_changed = percent_changed / total_land
change_df = pd.DataFrame(percent_changed, columns=["change"])
return change_df
diff --git a/use_cases/eluc/predictors/predictor.py b/use_cases/eluc/predictors/predictor.py
index 1a36313..4ddfe2d 100644
--- a/use_cases/eluc/predictors/predictor.py
+++ b/use_cases/eluc/predictors/predictor.py
@@ -5,6 +5,7 @@
import pandas as pd
+
class Predictor(ABC):
"""
Abstract class for predictors to inherit from.
diff --git a/use_cases/eluc/predictors/scoring/scorer.py b/use_cases/eluc/predictors/scoring/scorer.py
index 9bc8340..b6c6bea 100644
--- a/use_cases/eluc/predictors/scoring/scorer.py
+++ b/use_cases/eluc/predictors/scoring/scorer.py
@@ -14,6 +14,7 @@
from predictors.predictor import Predictor
from predictors.scoring.validator import Validator
+
class PredictorScorer:
"""
Scoring class to evaluate predictors on a dataset.
@@ -59,7 +60,7 @@ def dynamically_load_models(self, config: dict) -> list[Predictor]:
raise ValueError("Model type must be either 'hf' or 'local'")
predictors[model["filepath"]] = predictor
return predictors
-
+
def score_models(self, test_df: pd.DataFrame) -> dict[str, float]:
"""
Scores our list of predictors on a given test dataframe.
@@ -78,6 +79,7 @@ def score_models(self, test_df: pd.DataFrame) -> dict[str, float]:
results = dict(sorted(results.items(), key=lambda item: item[1]))
return results
+
def run_scoring():
"""
A demo script to show how the PredictorScorer class works.
@@ -90,5 +92,6 @@ def run_scoring():
print("Results:")
print(results)
+
if __name__ == "__main__":
run_scoring()
diff --git a/use_cases/eluc/predictors/scoring/validator.py b/use_cases/eluc/predictors/scoring/validator.py
index 73dd804..8569f5f 100644
--- a/use_cases/eluc/predictors/scoring/validator.py
+++ b/use_cases/eluc/predictors/scoring/validator.py
@@ -3,6 +3,7 @@
"""
import pandas as pd
+
class Validator():
"""
Validates input and output dataframes for predictor scoring.
diff --git a/use_cases/eluc/predictors/sklearn_predictor/sklearn_predictor.py b/use_cases/eluc/predictors/sklearn_predictor/sklearn_predictor.py
index 86ce3d5..fcb3339 100644
--- a/use_cases/eluc/predictors/sklearn_predictor/sklearn_predictor.py
+++ b/use_cases/eluc/predictors/sklearn_predictor/sklearn_predictor.py
@@ -11,6 +11,7 @@
from data import constants
from predictors.predictor import Predictor
+
class SKLearnPredictor(Predictor, ABC):
"""
Simple abstract class for sklearn predictors.
@@ -52,6 +53,7 @@ def predict(self, context_actions_df: pd.DataFrame) -> pd.DataFrame:
y_pred = self.model.predict(context_actions_df)
return pd.DataFrame(y_pred, index=context_actions_df.index, columns=[self.config["label"]])
+
class LinearRegressionPredictor(SKLearnPredictor):
"""
Simple linear regression predictor.
@@ -63,7 +65,8 @@ def __init__(self, model_config: dict):
lr_config = {key: value for key, value in model_config.items() if key not in ["features", "label"]}
model = LinearRegression(**lr_config)
super().__init__(model, model_config)
-
+
+
class RandomForestPredictor(SKLearnPredictor):
"""
Simple random forest predictor.
diff --git a/use_cases/eluc/prescriptors/heuristics/heuristics.py b/use_cases/eluc/prescriptors/heuristics/heuristics.py
index 42c3824..80cdab1 100644
--- a/use_cases/eluc/prescriptors/heuristics/heuristics.py
+++ b/use_cases/eluc/prescriptors/heuristics/heuristics.py
@@ -8,6 +8,7 @@
from data import constants
from prescriptors.prescriptor import Prescriptor
+
class HeuristicPrescriptor(Prescriptor, ABC):
"""
Abstract heuristic prescriptor class that inherits from prescriptor class.
@@ -22,7 +23,7 @@ def __init__(self, pct: float):
@abstractmethod
def _reco_heuristic(self, pct: float, context_df: pd.DataFrame) -> pd.DataFrame:
"""
- Abstract method that takes a percentage threshold of land change and a
+ Abstract method that takes a percentage threshold of land change and a
context dataframe and returns a dataframe of recommendations based on the heuristic.
"""
raise NotImplementedError
@@ -42,6 +43,7 @@ def prescribe(self, context_df: pd.DataFrame) -> pd.DataFrame:
context_actions_df = pd.concat([context_df, prescribed_actions_df[constants.DIFF_LAND_USE_COLS]], axis=1)
return context_actions_df
+
class EvenHeuristic(HeuristicPrescriptor):
"""
Implementation of HeuristicPrescriptor that evenly distributes land use to a "best" column.
@@ -73,12 +75,13 @@ def _reco_heuristic(self, pct: float, context_df: pd.DataFrame):
adjusted = adjusted.drop(["scaled_change", "row_sum", "max_change"], axis=1)
return adjusted
+
class PerfectHeuristic(HeuristicPrescriptor):
"""
- Implementation of HeuristicPrescriptor that does an informed land use prescription
+ Implementation of HeuristicPrescriptor that does an informed land use prescription
based on linear regression coefficients.
"""
- def __init__(self, pct:float, coefs: list[float]):
+ def __init__(self, pct: float, coefs: list[float]):
"""
We save and sort the columns by highest coefficient i.e. most emissions.
Separate the best column according to the coefficients to add to.
diff --git a/use_cases/eluc/prescriptors/nsga2/candidate.py b/use_cases/eluc/prescriptors/nsga2/candidate.py
index e7fd06a..9d8674a 100644
--- a/use_cases/eluc/prescriptors/nsga2/candidate.py
+++ b/use_cases/eluc/prescriptors/nsga2/candidate.py
@@ -4,6 +4,7 @@
"""
import torch
+
class Candidate(torch.nn.Module):
"""
Simple fixed topology 1 hidden layer feed-forward nn candidate.
@@ -87,9 +88,8 @@ def record_state(self) -> dict:
raise ValueError("Candidate has not been evaluated yet")
cand_state = {"id": self.cand_id,
"parents": self.parents,
- "NSGA-II_rank": self.rank, # Named this to match ESP
- "distance": self.distance,
- }
+ "NSGA-II_rank": self.rank, # Named this to match ESP
+ "distance": self.distance}
metrics = self.metrics if self.metrics else [float("inf"), float("inf")]
cand_state["ELUC"] = metrics[0]
cand_state["change"] = metrics[1]
diff --git a/use_cases/eluc/prescriptors/nsga2/create_seeds.py b/use_cases/eluc/prescriptors/nsga2/create_seeds.py
index 099107a..f707679 100644
--- a/use_cases/eluc/prescriptors/nsga2/create_seeds.py
+++ b/use_cases/eluc/prescriptors/nsga2/create_seeds.py
@@ -13,6 +13,7 @@
from data.torch_data import TorchDataset
from prescriptors.nsga2.candidate import Candidate
+
def supervised_backprop(save_path: Path, ds: TorchDataset):
"""
Performs supervised backpropagation on the given dataset to create a Candidate.
@@ -51,6 +52,7 @@ def supervised_backprop(save_path: Path, ds: TorchDataset):
torch.save(seed.state_dict(), save_path)
+
def seed_no_change(seed_dir: Path, df: pd.DataFrame, encoded_df: pd.DataFrame):
"""
Creates a seed that attempts to prescribe the same reco cols as the input.
@@ -59,6 +61,7 @@ def seed_no_change(seed_dir: Path, df: pd.DataFrame, encoded_df: pd.DataFrame):
seed_dir.mkdir(parents=True, exist_ok=True)
supervised_backprop(seed_dir / "no_change.pt", ds)
+
def seed_max_change(seed_dir: Path, df: pd.DataFrame, encoded_df: pd.DataFrame):
"""
Creates a seed that attempts to prescribe the max change to secdf.
@@ -77,6 +80,7 @@ def seed_max_change(seed_dir: Path, df: pd.DataFrame, encoded_df: pd.DataFrame):
seed_dir.mkdir(parents=True, exist_ok=True)
supervised_backprop(seed_dir / "max_change.pt", ds)
+
if __name__ == "__main__":
dataset = ELUCData.from_hf()
train_df = dataset.train_df.sample(10000)
diff --git a/use_cases/eluc/prescriptors/nsga2/land_use_prescriptor.py b/use_cases/eluc/prescriptors/nsga2/land_use_prescriptor.py
index 0d00656..8a58219 100644
--- a/use_cases/eluc/prescriptors/nsga2/land_use_prescriptor.py
+++ b/use_cases/eluc/prescriptors/nsga2/land_use_prescriptor.py
@@ -12,12 +12,13 @@
from prescriptors.nsga2.candidate import Candidate
from prescriptors.prescriptor import Prescriptor
+
class LandUsePrescriptor(Prescriptor):
"""
Prescriptor object that wraps around a single candidate that was trained via.
evolution using NSGA-II.
"""
- def __init__(self, candidate: Candidate, encoder: ELUCEncoder, batch_size: int=4096):
+ def __init__(self, candidate: Candidate, encoder: ELUCEncoder, batch_size: int = 4096):
super().__init__(constants.CAO_MAPPING["context"], constants.CAO_MAPPING["actions"])
self.candidate = candidate
self.encoder = encoder
@@ -30,10 +31,10 @@ def _reco_tensor_to_df(self, reco_tensor: torch.Tensor, context_df: pd.DataFrame
the land diffs.
"""
reco_df = pd.DataFrame(reco_tensor.cpu().numpy(), index=context_df.index, columns=constants.RECO_COLS)
- reco_df = reco_df.clip(0, None) # ReLU
- reco_df[reco_df.sum(axis=1) == 0] = 1 # Rows of all 0s are set to 1s
- reco_df = reco_df.div(reco_df.sum(axis=1), axis=0) # Normalize to sum to 1
- reco_df = reco_df.mul(context_df[constants.RECO_COLS].sum(axis=1), axis=0) # Rescale to match original sum
+ reco_df = reco_df.clip(0, None) # ReLU
+ reco_df[reco_df.sum(axis=1) == 0] = 1 # Rows of all 0s are set to 1s
+ reco_df = reco_df.div(reco_df.sum(axis=1), axis=0) # Normalize to sum to 1
+ reco_df = reco_df.mul(context_df[constants.RECO_COLS].sum(axis=1), axis=0) # Rescale to match original sum
return reco_df
def _reco_to_context_actions(self, reco_df: pd.DataFrame, context_df: pd.DataFrame) -> pd.DataFrame:
@@ -45,9 +46,7 @@ def _reco_to_context_actions(self, reco_df: pd.DataFrame, context_df: pd.DataFra
presc_actions_df = reco_df - context_df[constants.RECO_COLS]
presc_actions_df = presc_actions_df.rename(constants.RECO_MAP, axis=1)
presc_actions_df[constants.NO_CHANGE_COLS] = 0
- context_actions_df = pd.concat([context_df[self.context],
- presc_actions_df[self.actions]],
- axis=1)
+ context_actions_df = pd.concat([context_df[self.context], presc_actions_df[self.actions]], axis=1)
return context_actions_df
def prescribe(self, context_df) -> pd.DataFrame:
diff --git a/use_cases/eluc/prescriptors/nsga2/nsga2_utils.py b/use_cases/eluc/prescriptors/nsga2/nsga2_utils.py
index 781b0fe..a6a8422 100644
--- a/use_cases/eluc/prescriptors/nsga2/nsga2_utils.py
+++ b/use_cases/eluc/prescriptors/nsga2/nsga2_utils.py
@@ -3,6 +3,7 @@
"""
from prescriptors.nsga2.candidate import Candidate
+
# pylint: disable=invalid-name
def fast_non_dominated_sort(candidates: list):
"""
@@ -57,6 +58,7 @@ def fast_non_dominated_sort(candidates: list):
return candidate_fronts, rank
+
def calculate_crowding_distance(front):
"""
Set crowding distance of each candidate in front.
@@ -78,6 +80,7 @@ def calculate_crowding_distance(front):
else:
sorted_front[i].distance += 0
+
def dominates(candidate1: Candidate, candidate2: Candidate) -> bool:
"""
Determine if one individual dominates another.
diff --git a/use_cases/eluc/prescriptors/nsga2/train_prescriptors.py b/use_cases/eluc/prescriptors/nsga2/train_prescriptors.py
index 36ce077..01e38d8 100644
--- a/use_cases/eluc/prescriptors/nsga2/train_prescriptors.py
+++ b/use_cases/eluc/prescriptors/nsga2/train_prescriptors.py
@@ -13,8 +13,8 @@
from prescriptors.nsga2.trainer import TorchTrainer
from predictors.percent_change.percent_change_predictor import PercentChangePredictor
-if __name__ == "__main__":
+if __name__ == "__main__":
# Load config
parser = argparse.ArgumentParser()
parser.add_argument("--config_path", type=str, required=True)
diff --git a/use_cases/eluc/prescriptors/nsga2/trainer.py b/use_cases/eluc/prescriptors/nsga2/trainer.py
index 5778a16..9743652 100644
--- a/use_cases/eluc/prescriptors/nsga2/trainer.py
+++ b/use_cases/eluc/prescriptors/nsga2/trainer.py
@@ -19,6 +19,7 @@
from prescriptors.nsga2.land_use_prescriptor import LandUsePrescriptor
from prescriptors.prescriptor_manager import PrescriptorManager
+
class TorchTrainer():
"""
Handles prescriptor candidate evolution
@@ -39,7 +40,7 @@ def __init__(self,
self.pop_size = pop_size
self.n_generations = n_generations
self.p_mutation = p_mutation
- self.seed_dir=seed_dir
+ self.seed_dir = seed_dir
# Evaluation params
self.encoder = encoder
@@ -95,7 +96,7 @@ def _tournament_selection(self, sorted_parents: list[Candidate]) -> tuple[Candid
idx2 = min(random.choices(range(len(sorted_parents)), k=2))
return sorted_parents[idx1], sorted_parents[idx2]
- def _make_new_pop(self, parents: list[Candidate], pop_size: int, gen:int) -> list[Candidate]:
+ def _make_new_pop(self, parents: list[Candidate], pop_size: int, gen: int) -> list[Candidate]:
"""
Makes new population by creating children from parents.
We use tournament selection to select parents for crossover.
@@ -176,4 +177,3 @@ def _record_candidate_avgs(self, gen: int, candidates: list[Candidate]) -> dict:
avg_eluc = np.mean([c.metrics[0] for c in candidates])
avg_change = np.mean([c.metrics[1] for c in candidates])
return {"gen": gen, "eluc": avg_eluc, "change": avg_change}
-
\ No newline at end of file
diff --git a/use_cases/eluc/prescriptors/prescriptor.py b/use_cases/eluc/prescriptors/prescriptor.py
index cca3ef0..6e3f7e9 100644
--- a/use_cases/eluc/prescriptors/prescriptor.py
+++ b/use_cases/eluc/prescriptors/prescriptor.py
@@ -5,6 +5,7 @@
import pandas as pd
+
class Prescriptor(ABC):
"""
Abstract class for prescriptors to allow us to experiment with different implementations.
diff --git a/use_cases/eluc/prescriptors/prescriptor_manager.py b/use_cases/eluc/prescriptors/prescriptor_manager.py
index 9e291b2..cef5879 100644
--- a/use_cases/eluc/prescriptors/prescriptor_manager.py
+++ b/use_cases/eluc/prescriptors/prescriptor_manager.py
@@ -8,6 +8,7 @@
from predictors.predictor import Predictor
from prescriptors.prescriptor import Prescriptor
+
class PrescriptorManager():
"""
Stores many Prescriptor objects and some predictors.
diff --git a/use_cases/eluc/requirements.txt b/use_cases/eluc/requirements.txt
index ade7bf8..6e920fb 100644
--- a/use_cases/eluc/requirements.txt
+++ b/use_cases/eluc/requirements.txt
@@ -41,6 +41,7 @@ einops==0.6.1
exceptiongroup==1.1.1
fasteners==0.18
Fiona==1.9.4.post1
+flake8==7.1.0
Flask==2.2.5
flatbuffers==23.5.26
fonttools==4.39.4
diff --git a/use_cases/eluc/tests/test_app.py b/use_cases/eluc/tests/test_app.py
index bc7430d..cc34773 100644
--- a/use_cases/eluc/tests/test_app.py
+++ b/use_cases/eluc/tests/test_app.py
@@ -9,6 +9,7 @@
from app import utils
from data import constants
+
class TestUtilFunctions(unittest.TestCase):
"""
Tests app utilities.
diff --git a/use_cases/eluc/tests/test_compute_change.py b/use_cases/eluc/tests/test_compute_change.py
index a713ef3..0ef269d 100644
--- a/use_cases/eluc/tests/test_compute_change.py
+++ b/use_cases/eluc/tests/test_compute_change.py
@@ -8,6 +8,7 @@
from data import constants
from predictors.percent_change.percent_change_predictor import PercentChangePredictor
+
class TestComputeChange(unittest.TestCase):
"""
Tests the prescriptor compute change method.
@@ -56,7 +57,7 @@ def test_compute_percent_change_no_change(self):
Tests compute percent change when nothing changes.
"""
context_data = [0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.12]
- presc_data = context_data[0:6] + context_data [8:11]
+ presc_data = context_data[0:6] + context_data[8:11]
context_actions_df = self._list_data_to_df(context_data, presc_data)
@@ -68,7 +69,7 @@ def test_compute_percent_change_all_nonreco(self):
Tests compute change when there is only urban/primf/primn.
"""
context_data = [0, 0, 0, 0, 0, 0, 0.33, 0.33, 0, 0, 0, 0.34]
- presc_data = context_data[0:6] + context_data [8:11]
+ presc_data = context_data[0:6] + context_data[8:11]
context_actions_df = self._list_data_to_df(context_data, presc_data)
diff --git a/use_cases/eluc/tests/test_land_use_prescriptor.py b/use_cases/eluc/tests/test_land_use_prescriptor.py
index ada0348..78f3207 100644
--- a/use_cases/eluc/tests/test_land_use_prescriptor.py
+++ b/use_cases/eluc/tests/test_land_use_prescriptor.py
@@ -14,6 +14,7 @@
from prescriptors.nsga2.candidate import Candidate
from prescriptors.nsga2.land_use_prescriptor import LandUsePrescriptor
+
class TestLandUsePrescriptor(unittest.TestCase):
"""
Tests PyTorch prescriptor class
diff --git a/use_cases/eluc/tests/test_nsga2_utils.py b/use_cases/eluc/tests/test_nsga2_utils.py
index 96c256a..4d7fa35 100644
--- a/use_cases/eluc/tests/test_nsga2_utils.py
+++ b/use_cases/eluc/tests/test_nsga2_utils.py
@@ -9,6 +9,7 @@
from prescriptors.nsga2.candidate import Candidate
from prescriptors.nsga2 import nsga2_utils
+
class TestNSGA2Utils(unittest.TestCase):
"""
Tests the NGSA-II utility functions.
@@ -66,5 +67,5 @@ def test_domination_two_obj(self):
candidate2 = Candidate(16, 16, 16)
candidate1.metrics = [comb[0], comb[1]]
candidate2.metrics = [comb[2], comb[3]]
- self.assertEqual(nsga2_utils.dominates(candidate1, candidate2), self.manual_two_obj_dominate(candidate1, candidate2))
-
+ self.assertEqual(nsga2_utils.dominates(candidate1, candidate2),
+ self.manual_two_obj_dominate(candidate1, candidate2))
diff --git a/use_cases/eluc/tests/test_predictors.py b/use_cases/eluc/tests/test_predictors.py
index c4942c5..be26f45 100644
--- a/use_cases/eluc/tests/test_predictors.py
+++ b/use_cases/eluc/tests/test_predictors.py
@@ -12,6 +12,7 @@
from predictors.neural_network.neural_net_predictor import NeuralNetPredictor
from predictors.sklearn_predictor.sklearn_predictor import LinearRegressionPredictor, RandomForestPredictor
+
class TestPredictors(unittest.TestCase):
"""
Tests the 3 base predictor implementations' saving and loading behavior.
@@ -76,7 +77,7 @@ def test_loaded_same(self):
loaded = serializer.load(self.temp_path)
loaded_output = loaded.predict(self.dummy_data.iloc[2:])
- self.assertTrue((output == loaded_output).all().all()) # Pandas is so annoying why is this necessary?
+ self.assertTrue((output == loaded_output).all().all())
shutil.rmtree(self.temp_path)
self.assertFalse(self.temp_path.exists())