From 24b599e8a6aecba36ce9294362f4e8f92d90d190 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 20 Nov 2024 19:17:08 +0100 Subject: [PATCH] Update .model.transport type hints for Python >= 3.9 - Use standard collections, e.g. list[str] instead of typing.List[str]. - Import certain types from collections.abc, instead of deprecated aliases in typing. --- message_ix_models/model/transport/base.py | 4 +- message_ix_models/model/transport/build.py | 6 +- message_ix_models/model/transport/config.py | 30 +++---- message_ix_models/model/transport/data.py | 23 +++--- message_ix_models/model/transport/demand.py | 8 +- message_ix_models/model/transport/factor.py | 26 ++----- message_ix_models/model/transport/files.py | 10 +-- message_ix_models/model/transport/groups.py | 6 +- message_ix_models/model/transport/ikarus.py | 5 +- message_ix_models/model/transport/ldv.py | 17 ++-- message_ix_models/model/transport/non_ldv.py | 33 ++++---- message_ix_models/model/transport/operator.py | 78 ++++++++----------- message_ix_models/model/transport/plot.py | 6 +- message_ix_models/model/transport/report.py | 6 +- .../model/transport/structure.py | 9 ++- message_ix_models/model/transport/testing.py | 5 +- .../model/transport/ustimes_ma3t.py | 5 +- message_ix_models/model/transport/util.py | 3 +- message_ix_models/project/ssp/transport.py | 4 +- message_ix_models/tools/iamc.py | 7 +- message_ix_models/types.py | 3 +- 21 files changed, 140 insertions(+), 154 deletions(-) diff --git a/message_ix_models/model/transport/base.py b/message_ix_models/model/transport/base.py index 3b8848da1b..b4ca524920 100644 --- a/message_ix_models/model/transport/base.py +++ b/message_ix_models/model/transport/base.py @@ -3,7 +3,7 @@ from functools import partial from itertools import pairwise, product from pathlib import Path -from typing import TYPE_CHECKING, Any, List, Optional +from typing import TYPE_CHECKING, Any, Optional import genno import numpy as np @@ -423,7 +423,7 @@ def to_csv( def format_share_constraints( - qty: "AnyQuantity", config: dict, *, kind: str, groupby: List[str] = [] + qty: "AnyQuantity", config: dict, *, kind: str, groupby: list[str] = [] ) -> pd.DataFrame: """Produce values for :file:`ue_share_constraints.xlsx`. diff --git a/message_ix_models/model/transport/build.py b/message_ix_models/model/transport/build.py index ab55a8b135..36177c6481 100644 --- a/message_ix_models/model/transport/build.py +++ b/message_ix_models/model/transport/build.py @@ -3,7 +3,7 @@ import logging from importlib import import_module from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Any, Optional import pandas as pd from genno import Computer, KeyExistsError, Quantity, quote @@ -179,7 +179,7 @@ def add_exogenous_data(c: Computer, info: ScenarioInfo) -> None: # Identify appropriate source keyword arguments for loading GDP and population data source = str(config.ssp) if config.ssp in SSP_2017: - source_kw: Tuple[Dict[str, Any], ...] = ( + source_kw: tuple[dict[str, Any], ...] = ( dict(measure="GDP", model="IIASA GDP"), dict(measure="POP", model="IIASA GDP"), ) @@ -471,7 +471,7 @@ def get_computer( def main( context: Context, scenario: Scenario, - options: Optional[Dict] = None, + options: Optional[dict] = None, **option_kwargs, ): """Build MESSAGEix-Transport on `scenario`. diff --git a/message_ix_models/model/transport/config.py b/message_ix_models/model/transport/config.py index 0e19e7ceb1..8404e5362c 100644 --- a/message_ix_models/model/transport/config.py +++ b/message_ix_models/model/transport/config.py @@ -1,7 +1,7 @@ import logging from dataclasses import InitVar, dataclass, field, replace from enum import Enum -from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple, Union +from typing import TYPE_CHECKING, Literal, Optional, Union import message_ix from genno import Quantity @@ -63,7 +63,7 @@ class Config(ConfigHelper): #: for transport modes *other than* LDV. See :func:`non_ldv.growth_new_capacity`. #: "* initial_*_up" #: Base value for growth constraints. These values are arbitrary. - constraint: Dict = field( + constraint: dict = field( default_factory=lambda: { "LDV growth_activity_lo": -0.0192, "LDV growth_activity_up": 0.0192 * 3.0, @@ -93,7 +93,7 @@ class Config(ConfigHelper): #: 'updateTRPdata', with the comment "Original data from Sei (PAO)." #: - This probably refers to some source that gave relative costs of different #: buses, in PAO, for this year; it is applied across all years. - cost: Dict = field( + cost: dict = field( default_factory=lambda: { # "ldv nga": 0.85, @@ -114,7 +114,7 @@ class Config(ConfigHelper): #: specified in the corresponding technology.yaml file. #: #: .. todo:: Read directly from technology.yaml - demand_modes: List[str] = field( + demand_modes: list[str] = field( default_factory=lambda: ["LDV", "2W", "AIR", "BUS", "RAIL"] ) @@ -129,7 +129,7 @@ class Config(ConfigHelper): dummy_supply: bool = False #: Various efficiency factors. - efficiency: Dict = field( + efficiency: dict = field( default_factory=lambda: { "*": 0.2, "hev": 0.2, @@ -150,7 +150,7 @@ class Config(ConfigHelper): emission_relations: bool = True #: Various other factors. - factor: Dict = field(default_factory=dict) + factor: dict = field(default_factory=dict) #: If :obj:`True` (the default), do not record/preserve parameter data when removing #: set elements from the base model. @@ -171,7 +171,7 @@ class Config(ConfigHelper): #: #: ``F ROAD``: similar to IEA “Future of Trucks” (2017) values; see #: .transport.freight. Alternately use 5.0, similar to Roadmap 2017 values. - load_factor: Dict = field( + load_factor: dict = field( default_factory=lambda: { "F ROAD": 10.0, "F RAIL": 10.0, @@ -183,7 +183,7 @@ class Config(ConfigHelper): #: Period in which LDV costs match those of a reference region. #: Dimensions: (node,). - ldv_cost_catch_up_year: Dict = field(default_factory=dict) + ldv_cost_catch_up_year: dict = field(default_factory=dict) #: Method for calibrating LDV stock and sales: #: @@ -193,7 +193,7 @@ class Config(ConfigHelper): #: Tuples of (node, technology (transport mode), commodity) for which minimum #: activity should be enforced. See :func:`.non_ldv.bound_activity_lo`. - minimum_activity: Dict[Tuple[str, Tuple[str, ...], str], float] = field( + minimum_activity: dict[tuple[str, tuple[str, ...], str], float] = field( default_factory=dict ) @@ -202,7 +202,7 @@ class Config(ConfigHelper): mode_share: str = "default" #: List of modules containing model-building calculations. - modules: List[str] = field( + modules: list[str] = field( default_factory=lambda: ( "groups demand freight ikarus ldv disutility non_ldv plot data" ).split() @@ -210,7 +210,7 @@ class Config(ConfigHelper): #: Used by :func:`.get_USTIMES_MA3T` to map MESSAGE regions to U.S. census divisions #: appearing in MA³T. - node_to_census_division: Dict = field(default_factory=dict) + node_to_census_division: dict = field(default_factory=dict) #: **Temporary** setting for the SSP 2024 project: indicates whether the base #: scenario used is a policy (carbon pricing) scenario, or not. This currently does @@ -226,7 +226,7 @@ class Config(ConfigHelper): #: #: :mod:`.transport.build` and :mod:`.transport.report` code will respond to these #: settings in documented ways. - project: Dict[str, Enum] = field( + project: dict[str, Enum] = field( default_factory=lambda: dict( futures=FUTURES_SCENARIO.BASE, navigate=NAVIGATE_SCENARIO.REF ) @@ -236,7 +236,7 @@ class Config(ConfigHelper): scaling: float = 1.0 #: Mapping from nodes to other nodes towards which share weights should converge. - share_weight_convergence: Dict = field(default_factory=dict) + share_weight_convergence: dict = field(default_factory=dict) #: Specification for the structure of MESSAGEix-Transport, processed from contents #: of :file:`set.yaml` and :file:`technology.yaml`. @@ -282,7 +282,7 @@ class Config(ConfigHelper): #: space-delimited string (:py:`"module_a -module_b"`) or sequence of strings. #: Values prefixed with a hyphen (:py:`"-module_b"`) are *removed* from #: :attr:`.modules`. - extra_modules: InitVar[Union[str, List[str]]] = None + extra_modules: InitVar[Union[str, list[str]]] = None #: Identifier of a Transport Futures scenario, used to update :attr:`project` via #: :meth:`.ScenarioFlags.parse_futures`. @@ -315,7 +315,7 @@ def from_context( cls, context: Context, scenario: Optional[message_ix.Scenario] = None, - options: Optional[Dict] = None, + options: Optional[dict] = None, ) -> "Config": """Configure `context` for building MESSAGEix-Transport. diff --git a/message_ix_models/model/transport/data.py b/message_ix_models/model/transport/data.py index 24bf637978..55272b76da 100644 --- a/message_ix_models/model/transport/data.py +++ b/message_ix_models/model/transport/data.py @@ -2,10 +2,11 @@ import logging from collections import defaultdict +from collections.abc import Callable, Mapping from copy import deepcopy from functools import partial from operator import le -from typing import TYPE_CHECKING, Callable, Dict, List, Mapping, Optional, Set, Tuple +from typing import TYPE_CHECKING, Optional import pandas as pd from genno import Computer, Key, Quantity @@ -51,8 +52,8 @@ def prepare_computer(c: Computer): def conversion( - nodes: List[str], years: List[int], config: dict -) -> Dict[str, pd.DataFrame]: + nodes: list[str], years: list[int], config: dict +) -> dict[str, pd.DataFrame]: """Input and output data for conversion technologies: The technologies are named 'transport {service} load factor'. @@ -72,7 +73,7 @@ def conversion( ("pax", 1.0, "Gp km / a"), ] - data0: Mapping[str, List] = defaultdict(list) + data0: Mapping[str, list] = defaultdict(list) for service, factor, output_unit in service_info: i_o = make_io( (f"transport {service} vehicle", "useful", "Gv km"), @@ -98,7 +99,7 @@ def conversion( return data1 -def dummy_supply(technologies: List["Code"], info, config) -> Dict[str, pd.DataFrame]: +def dummy_supply(technologies: list["Code"], info, config) -> dict[str, pd.DataFrame]: """Dummy fuel supply for the bare RES.""" if not config["transport"].dummy_supply: return dict() @@ -116,7 +117,7 @@ def dummy_supply(technologies: List["Code"], info, config) -> Dict[str, pd.DataF else: raise TypeError(type(commodity)) - result: Dict[str, pd.DataFrame] = dict() + result: dict[str, pd.DataFrame] = dict() common = dict(mode="all", time="year", time_dest="year", unit="GWa") values = dict(output=1.0, var_cost=1.0) @@ -133,7 +134,7 @@ def dummy_supply(technologies: List["Code"], info, config) -> Dict[str, pd.DataF return result -def misc(info: ScenarioInfo, nodes: List[str], y: List[int]): +def misc(info: ScenarioInfo, nodes: list[str], y: list[int]): """Miscellaneous bounds for calibration/vetting.""" # Limit activity of methanol LDVs in the model base year @@ -157,8 +158,8 @@ def misc(info: ScenarioInfo, nodes: List[str], y: List[int]): def navigate_ele( - nodes: List[str], techs: List["Code"], t_groups, years: List[int], config -) -> Dict[str, pd.DataFrame]: + nodes: list[str], techs: list["Code"], t_groups, years: list[int], config +) -> dict[str, pd.DataFrame]: """Return constraint data for :attr:`ScenarioFlags.ELE`. The text reads as follows as of 2023-02-15: @@ -307,7 +308,7 @@ class MaybeAdaptR11Source(ExoDataSource): """ #: Set of measures recognized by a subclass. - measures: Set[str] = set() + measures: set[str] = set() #: Mapping from :attr:`.measures` entries to file names. filename: Mapping[str, str] = dict() @@ -364,7 +365,7 @@ def __call__(self): def __repr__(self) -> str: return self._repr - def get_keys(self) -> Tuple[Key, Key]: + def get_keys(self) -> tuple[Key, Key]: """Return the target keys for the (1) raw and (2) transformed data.""" k = self.key or Key( self.name or self.measure.lower(), ("n", "y") + self.extra_dims diff --git a/message_ix_models/model/transport/demand.py b/message_ix_models/model/transport/demand.py index d506e296aa..f36bdfcdd0 100644 --- a/message_ix_models/model/transport/demand.py +++ b/message_ix_models/model/transport/demand.py @@ -1,7 +1,7 @@ """Demand calculation for MESSAGEix-Transport.""" import logging -from typing import TYPE_CHECKING, Dict, List +from typing import TYPE_CHECKING import genno import numpy as np @@ -48,8 +48,8 @@ def dummy( - commodities: List, nodes: List[str], y: List[int], config: dict -) -> Dict[str, pd.DataFrame]: + commodities: list, nodes: list[str], y: list[int], config: dict +) -> dict[str, pd.DataFrame]: """Dummy demands. @@ -241,7 +241,7 @@ def pdt_per_capita(c: Computer) -> None: # Add `y` dimension. Here for the future fixed point we use y=2 * max(y), e.g. # 4220 for y=2110. The value doesn't matter, we just need to avoid overlap with y # in the model. - def _future(qty: "AnyQuantity", years: List[int]) -> "AnyQuantity": + def _future(qty: "AnyQuantity", years: list[int]) -> "AnyQuantity": return qty.expand_dims(y=[years[-1] * 2]) # Same, but adding y0 diff --git a/message_ix_models/model/transport/factor.py b/message_ix_models/model/transport/factor.py index a132085597..bb249a9aaa 100644 --- a/message_ix_models/model/transport/factor.py +++ b/message_ix_models/model/transport/factor.py @@ -11,20 +11,10 @@ import operator import re from abc import ABC, abstractmethod +from collections.abc import Callable, Mapping, Sequence from dataclasses import dataclass, field from functools import partial -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Dict, - List, - Mapping, - Optional, - Sequence, - Tuple, - Union, -) +from typing import TYPE_CHECKING, Any, Optional, Union import pandas as pd from genno import Computer, Key, Quantity @@ -95,7 +85,7 @@ class Constant(Layer): value: Quantity #: Dimensions of the result. - dims: Tuple[str, ...] + dims: tuple[str, ...] operation = operator.mul @@ -183,12 +173,12 @@ class Map(Layer): """ dim: str - values: Dict[str, Layer] + values: dict[str, Layer] operation = operator.mul def __init__( - self, dim: str, values: Optional[Dict[str, Layer]] = None, **value_kwargs: Layer + self, dim: str, values: Optional[dict[str, Layer]] = None, **value_kwargs: Layer ): self.dim = dim self.values = values or value_kwargs @@ -215,7 +205,7 @@ class ScenarioSetting(Layer): """ #: Mapping from scenario identifier to setting label. - setting: Dict[Any, str] + setting: dict[Any, str] #: Default setting. default: str @@ -310,7 +300,7 @@ class Factor: """ #: Ordered list of :class:`.Layer`. - layers: List[Layer] = field(default_factory=list) + layers: list[Layer] = field(default_factory=list) def __hash__(self): return hash(tuple(self.layers)) @@ -360,7 +350,7 @@ def add_tasks( ) def __call__( - self, config, *coords, dims: Tuple[str, ...], scenario_expr: str + self, config, *coords, dims: tuple[str, ...], scenario_expr: str ) -> Quantity: """Invoke :meth:`quantify`, for use with :mod:`genno`.""" kw = dict(zip(dims, coords)) diff --git a/message_ix_models/model/transport/files.py b/message_ix_models/model/transport/files.py index 346e5d4388..856da6b50c 100644 --- a/message_ix_models/model/transport/files.py +++ b/message_ix_models/model/transport/files.py @@ -1,7 +1,7 @@ import logging from functools import lru_cache from pathlib import Path -from typing import TYPE_CHECKING, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Optional, Union from genno import Key @@ -24,7 +24,7 @@ #: List of all :class:`.ExogenousDataFile`. -FILES: List["ExogenousDataFile"] = [] +FILES: list["ExogenousDataFile"] = [] class ExogenousDataFile: @@ -94,8 +94,8 @@ def __init__( name: str, units: str, key: Optional["KeyLike"] = None, - dims: Optional[Tuple[str, ...]] = None, - path: Union[str, Tuple[str, ...], None] = None, + dims: Optional[tuple[str, ...]] = None, + path: Union[str, tuple[str, ...], None] = None, description: Optional[str] = None, required: bool = True, ): @@ -184,7 +184,7 @@ def __repr__(self) -> str: def add_tasks( self, c: "genno.Computer", *args, context: "Context" - ) -> Tuple["KeyLike", ...]: + ) -> tuple["KeyLike", ...]: """Prepare `c` to read data from a file like :attr:`.path`.""" from message_ix_models.util.ixmp import rename_dims diff --git a/message_ix_models/model/transport/groups.py b/message_ix_models/model/transport/groups.py index e078c9ff2a..59251aea42 100644 --- a/message_ix_models/model/transport/groups.py +++ b/message_ix_models/model/transport/groups.py @@ -2,7 +2,7 @@ import logging from copy import deepcopy -from typing import TYPE_CHECKING, Dict +from typing import TYPE_CHECKING import pandas as pd import xarray as xr @@ -38,8 +38,8 @@ def cg_shares( ma3t_attitude: Quantity, ma3t_driver: Quantity, ma3t_pop: Quantity, - n_cd_indexers: Dict[str, xr.DataArray], - cg_indexers: Dict[str, xr.DataArray], + n_cd_indexers: dict[str, xr.DataArray], + cg_indexers: dict[str, xr.DataArray], ) -> Quantity: """Return shares of transport consumer groups. diff --git a/message_ix_models/model/transport/ikarus.py b/message_ix_models/model/transport/ikarus.py index 54ca3469a2..b5fcd21061 100644 --- a/message_ix_models/model/transport/ikarus.py +++ b/message_ix_models/model/transport/ikarus.py @@ -3,7 +3,6 @@ import logging from functools import lru_cache, partial from operator import le -from typing import Dict import pandas as pd import xarray as xr @@ -102,7 +101,7 @@ } -def make_indexers(*args) -> Dict[str, xr.DataArray]: +def make_indexers(*args) -> dict[str, xr.DataArray]: """Return indexers corresponding to `SOURCE`. These can be used for :mod:`xarray`-style advanced indexing to select from the data @@ -116,7 +115,7 @@ def make_indexers(*args) -> Dict[str, xr.DataArray]: ) -def make_output(input_data: Dict[str, pd.DataFrame], techs) -> Dict[str, pd.DataFrame]: +def make_output(input_data: dict[str, pd.DataFrame], techs) -> dict[str, pd.DataFrame]: """Make ``output`` data corresponding to IKARUS ``input`` data.""" result = make_matched_dfs( input_data["input"], output=registry.Quantity(1.0, UNITS["output"]) diff --git a/message_ix_models/model/transport/ldv.py b/message_ix_models/model/transport/ldv.py index d632785244..61a8fa4682 100644 --- a/message_ix_models/model/transport/ldv.py +++ b/message_ix_models/model/transport/ldv.py @@ -1,9 +1,10 @@ """Data for light-duty vehicles (LDVs) for passenger transport.""" import logging +from collections.abc import Mapping from operator import itemgetter from types import SimpleNamespace -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, cast +from typing import TYPE_CHECKING, Any, cast import genno import pandas as pd @@ -232,7 +233,7 @@ def prepare_computer(c: Computer): if k.stock: # historical_new_capacity: select only data prior to y₀ - kw: Dict[str, Any] = dict( + kw: dict[str, Any] = dict( common={}, dims=dict(node_loc="nl", technology="t", year_vtg="yv"), name="historical_new_capacity", @@ -466,11 +467,11 @@ def constraint_data(context) -> "ParameterData": # List of technologies to constrain, including the LDV technologies, plus the # corresponding "X usage by CG" pseudo-technologies - constrained: List[Code] = [] + constrained: list[Code] = [] for t in map(str, ldv_techs): constrained.extend(filter(lambda _t: t in _t, all_techs)) # type: ignore - data: Dict[str, pd.DataFrame] = dict() + data: dict[str, pd.DataFrame] = dict() for bound in "lo", "up": name = f"growth_activity_{bound}" @@ -563,10 +564,10 @@ def stock(c: Computer) -> Key: def usage_data( load_factor: "AnyQuantity", - cg: List["Code"], - nodes: List[str], - t_ldv: Mapping[str, List], - years: List, + cg: list["Code"], + nodes: list[str], + t_ldv: Mapping[str, list], + years: list, ) -> "ParameterData": """Generate data for LDV “usage pseudo-technologies”. diff --git a/message_ix_models/model/transport/non_ldv.py b/message_ix_models/model/transport/non_ldv.py index 96170a587a..c9af2beaf4 100644 --- a/message_ix_models/model/transport/non_ldv.py +++ b/message_ix_models/model/transport/non_ldv.py @@ -2,9 +2,10 @@ import logging from collections import defaultdict +from collections.abc import Mapping from functools import lru_cache, partial from operator import itemgetter -from typing import TYPE_CHECKING, Dict, List, Mapping, Set +from typing import TYPE_CHECKING import numpy as np import pandas as pd @@ -64,7 +65,7 @@ def prepare_computer(c: Computer): source = context.transport.data_source.non_LDV log.info(f"non-LDV data from {source}") - keys: List[KeyLike] = [] + keys: list[KeyLike] = [] if source == "IKARUS": keys.append("transport nonldv::ixmp+ikarus") @@ -134,7 +135,7 @@ def prepare_computer(c: Computer): c.add("transport_data", __name__, key=k_all) -def get_2w_dummies(context) -> Dict[str, pd.DataFrame]: +def get_2w_dummies(context) -> dict[str, pd.DataFrame]: """Generate dummy, equal-cost output for 2-wheeler technologies. **NB** this is analogous to :func:`.ldv.get_dummy`. @@ -178,7 +179,7 @@ def get_2w_dummies(context) -> Dict[str, pd.DataFrame]: return data -def bound_activity(c: "Computer") -> List[Key]: +def bound_activity(c: "Computer") -> list[Key]: """Constrain activity of non-LDV technologies based on :file:`act-non_ldv.csv`.""" base = exo.act_non_ldv @@ -193,11 +194,11 @@ def bound_activity(c: "Computer") -> List[Key]: return [k_bau] -def bound_activity_lo(c: Computer) -> List[Key]: +def bound_activity_lo(c: Computer) -> list[Key]: """Set minimum activity for certain technologies to ensure |y0| energy use.""" @lru_cache - def techs_for(mode: Code, commodity: str) -> List[Code]: + def techs_for(mode: Code, commodity: str) -> list[Code]: """Return techs that are (a) associated with `mode` and (b) use `commodity`.""" result = [] for t in mode.child: @@ -212,7 +213,7 @@ def _(nodes, technologies, y0, config: dict) -> Quantity: cfg: "Config" = config["transport"] # Construct a set of all (node, technology, commodity) to constrain - rows: List[List] = [] + rows: list[list] = [] cols = ["n", "t", "c", "value"] for (n, modes, c), value in cfg.minimum_activity.items(): for m in ["2W", "BUS", "F ROAD"] if modes == "ROAD" else ["RAIL"]: @@ -252,8 +253,8 @@ def _inputs(technology: Code, commodity: str) -> bool: def constraint_data( - t_all, t_modes: List[str], nodes, years: List[int], genno_config: dict -) -> Dict[str, pd.DataFrame]: + t_all, t_modes: list[str], nodes, years: list[int], genno_config: dict +) -> dict[str, pd.DataFrame]: """Return constraints on growth of CAP_NEW for non-LDV technologies. Responds to the :attr:`.Config.constraint` keys :py:`"non-LDV *"`; see description @@ -268,13 +269,13 @@ def constraint_data( # Lists of technologies to constrain # All technologies under the non-LDV modes - t_0: Set[Code] = set(filter(lambda t: t.parent and t.parent.id in modes, t_all)) + t_0: set[Code] = set(filter(lambda t: t.parent and t.parent.id in modes, t_all)) # Only the technologies that input c=electr - t_1: Set[Code] = set(filter(partial(_inputs, commodity="electr"), t_0)) + t_1: set[Code] = set(filter(partial(_inputs, commodity="electr"), t_0)) # Aviation technologies only - t_2: Set[Code] = set(filter(lambda t: t.parent and t.parent.id == "AIR", t_all)) + t_2: set[Code] = set(filter(lambda t: t.parent and t.parent.id == "AIR", t_all)) # Only the technologies that input c=gas - t_3: Set[Code] = set(filter(partial(_inputs, commodity="electr"), t_0)) + t_3: set[Code] = set(filter(partial(_inputs, commodity="electr"), t_0)) common = dict(year_act=years, year_vtg=years, time="year", unit="-") dfs = defaultdict(list) @@ -317,7 +318,7 @@ def constraint_data( return {k: pd.concat(v) for k, v in dfs.items()} -def other(c: Computer, base: Key) -> List[Key]: +def other(c: Computer, base: Key) -> list[Key]: """Generate MESSAGE parameter data for ``transport other *`` technologies.""" from .key import gdp_index @@ -372,7 +373,7 @@ def broadcast_other_transport(technologies) -> Quantity: def usage_data( - load_factor: Quantity, modes: List[Code], nodes: List[str], years: List[int] + load_factor: Quantity, modes: list[Code], nodes: list[str], years: list[int] ) -> Mapping[str, pd.DataFrame]: """Generate data for non-LDV usage "virtual" technologies. @@ -399,7 +400,7 @@ def usage_data( ) ) - result: Dict[str, pd.DataFrame] = dict() + result: dict[str, pd.DataFrame] = dict() merge_data(result, *data) for k, v in result.items(): diff --git a/message_ix_models/model/transport/operator.py b/message_ix_models/model/transport/operator.py index 73ce3ab6e1..27d47cd346 100644 --- a/message_ix_models/model/transport/operator.py +++ b/message_ix_models/model/transport/operator.py @@ -2,23 +2,11 @@ import logging import re +from collections.abc import Mapping, Sequence from functools import partial, reduce from itertools import pairwise, product from operator import gt, le, lt -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Hashable, - List, - Literal, - Mapping, - Optional, - Sequence, - Set, - Tuple, - cast, -) +from typing import TYPE_CHECKING, Any, Hashable, Literal, Optional, cast import genno import numpy as np @@ -93,7 +81,7 @@ ] -def base_model_data_header(scenario: "Scenario", *, name: str) -> Dict[str, str]: +def base_model_data_header(scenario: "Scenario", *, name: str) -> dict[str, str]: """Return a header comment for writing out base model data.""" versions = "\n\n".join(show_versions().split("\n\n")[:2]) @@ -109,7 +97,7 @@ def base_model_data_header(scenario: "Scenario", *, name: str) -> Dict[str, str] def base_shares( - base: "AnyQuantity", nodes: List[str], techs: List[str], y: List[int] + base: "AnyQuantity", nodes: list[str], techs: list[str], y: list[int] ) -> "AnyQuantity": """Return base mode shares. @@ -149,7 +137,7 @@ def base_shares( elif len(extra_modes): raise NotImplementedError(f"Extra mode(s) t={extra_modes}") - missing = cast(Set[Hashable], set("nty")) - set(result.dims) + missing = cast(set[Hashable], set("nty")) - set(result.dims) if len(missing): log.info(f"Broadcast base mode shares with dims {base.dims} over {missing}") @@ -221,7 +209,7 @@ def broadcast(q1: "AnyQuantity", q2: "AnyQuantity") -> "AnyQuantity": def broadcast_wildcard( - qty: "AnyQuantity", coords: List[str], *, dim: str = "n" + qty: "AnyQuantity", coords: list[str], *, dim: str = "n" ) -> "AnyQuantity": """Broadcast over coordinates `coords` along dimension `dim`. @@ -248,8 +236,8 @@ def broadcast_wildcard( def broadcast_t_c_l( - technologies: List[Code], - commodities: List[Code], + technologies: list[Code], + commodities: list[Code], kind: Literal["input", "output"], default_level: Optional[str] = None, ) -> "AnyQuantity": @@ -295,7 +283,7 @@ def broadcast_t_c_l( def broadcast_y_yv_ya( - y: List[int], y_include: List[int], *, method: Literal["product", "zip"] = "product" + y: list[int], y_include: list[int], *, method: Literal["product", "zip"] = "product" ) -> "AnyQuantity": """Return a quantity for broadcasting y to (yv, ya). @@ -322,7 +310,7 @@ def cost( whours: "AnyQuantity", speeds: "AnyQuantity", votm: "AnyQuantity", - y: List[int], + y: list[int], ) -> "AnyQuantity": """Calculate cost of transport [money / distance]. @@ -443,7 +431,7 @@ def expand_dims(qty: "AnyQuantity", dim, *args, **kwargs) -> "AnyQuantity": return qty.expand_dims(dim=dim, *args, **kwargs) -def extend_y(qty: "AnyQuantity", y: List[int], *, dim: str = "y") -> "AnyQuantity": +def extend_y(qty: "AnyQuantity", y: list[int], *, dim: str = "y") -> "AnyQuantity": """Extend `qty` along the dimension `dim` to cover all of `y`. - Values are first filled forward, then backwards, within existing `dim` labels in @@ -478,7 +466,7 @@ def extend_y(qty: "AnyQuantity", y: List[int], *, dim: str = "y") -> "AnyQuantit return MappingAdapter({dim: y_map})(qty.ffill(dim).bfill(dim)) # type: ignore [attr-defined] -def factor_fv(n: List[str], y: List[int], config: dict) -> "AnyQuantity": +def factor_fv(n: list[str], y: list[int], config: dict) -> "AnyQuantity": """Scaling factor for freight activity. If :attr:`.Config.project` is :data:`ScenarioFlags.ACT`, the value declines from @@ -516,7 +504,7 @@ def factor_fv(n: List[str], y: List[int], config: dict) -> "AnyQuantity": def factor_input( - y: List[int], t: List[Code], t_agg: Dict, config: dict + y: list[int], t: list[Code], t_agg: dict, config: dict ) -> "AnyQuantity": """Scaling factor for ``input`` (energy intensity of activity). @@ -570,7 +558,7 @@ def _not_disutility(tech): return compound_growth(qty, "y") -def factor_pdt(n: List[str], y: List[int], t: List[str], config: dict) -> "AnyQuantity": +def factor_pdt(n: list[str], y: list[int], t: list[str], config: dict) -> "AnyQuantity": """Scaling factor for passenger activity. When :attr:`.Config.scenarios` includes :attr:`ScenarioFlags.ACT` (i.e. NAVIGATE @@ -612,9 +600,9 @@ def factor_pdt(n: List[str], y: List[int], t: List[str], config: dict) -> "AnyQu def factor_ssp( config: dict, - nodes: List[str], - years: List[int], - *others: List, + nodes: list[str], + years: list[int], + *others: list, info: "message_ix_models.model.transport.factor.Factor", extra_dims: Optional[Sequence[str]] = None, ) -> "AnyQuantity": @@ -641,10 +629,10 @@ def freight_usage_output(context: "Context") -> "AnyQuantity": ) -Groups = Dict[str, Dict[str, List[str]]] +Groups = dict[str, dict[str, list[str]]] -def groups_iea_eweb(technologies: List[Code]) -> Tuple[Groups, Groups, Dict]: +def groups_iea_eweb(technologies: list[Code]) -> tuple[Groups, Groups, dict]: """Structure for calibration to IEA Extended World Energy Balances (EWEB). Returns 3 sets of groups: @@ -665,7 +653,7 @@ def groups_iea_eweb(technologies: List[Code]) -> Tuple[Groups, Groups, Dict]: """ g0: Groups = dict(flow={}, product={}) g1: Groups = dict(t={}) - g2: Dict = dict(t=[], t_new=[]) + g2: dict = dict(t=[], t_new=[]) # Add groups from base model commodity code list: # - IEA product list → MESSAGE commodity (e.g. "lightoil") @@ -708,7 +696,7 @@ def groups_y_annual(duration_period: "AnyQuantity") -> "AnyQuantity": def logit( - x: "AnyQuantity", k: "AnyQuantity", lamda: "AnyQuantity", y: List[int], dim: str + x: "AnyQuantity", k: "AnyQuantity", lamda: "AnyQuantity", y: list[int], dim: str ) -> "AnyQuantity": r"""Compute probabilities for a logit random utility model. @@ -824,19 +812,19 @@ def min( def merge_data( *others: Mapping[Hashable, pd.DataFrame], -) -> Dict[Hashable, pd.DataFrame]: +) -> dict[Hashable, pd.DataFrame]: """Slightly modified from message_ix_models.util. .. todo: move upstream or merge functionality with :func:`message_ix_models.util.merge_data`. """ - keys: Set[Hashable] = reduce(lambda x, y: x | y.keys(), others, set()) + keys: set[Hashable] = reduce(lambda x, y: x | y.keys(), others, set()) return { k: pd.concat([o.get(k, None) for o in others], ignore_index=True) for k in keys } -def iea_eei_fv(name: str, config: Dict) -> "AnyQuantity": +def iea_eei_fv(name: str, config: dict) -> "AnyQuantity": """Returns base-year demand for freight from IEA EEI, with dimensions n-c-y.""" from message_ix_models.tools.iea import eei @@ -849,7 +837,7 @@ def iea_eei_fv(name: str, config: Dict) -> "AnyQuantity": return result.sel(y=ym1, t="Total freight transport", drop=True) -def indexers_n_cd(config: Dict) -> Dict[str, xr.DataArray]: +def indexers_n_cd(config: dict) -> dict[str, xr.DataArray]: """Indexers for selecting (`n`, `census_division`) → `n`. Based on :attr:`.Config.node_to_census_division`. @@ -862,9 +850,9 @@ def indexers_n_cd(config: Dict) -> Dict[str, xr.DataArray]: ) -def indexers_usage(technologies: List[Code]) -> Dict: +def indexers_usage(technologies: list[Code]) -> dict: """Indexers for replacing LDV `t` and `cg` with `t_new` for usage technologies.""" - labels: Dict[str, List[str]] = dict(cg=[], t=[], t_new=[]) + labels: dict[str, list[str]] = dict(cg=[], t=[], t_new=[]) for t in technologies: if not t.eval_annotation("is-disutility"): continue @@ -879,7 +867,7 @@ def indexers_usage(technologies: List[Code]) -> Dict: } -def nodes_world_agg(config, dim: Hashable = "nl") -> Dict[Hashable, Mapping]: +def nodes_world_agg(config, dim: Hashable = "nl") -> dict[Hashable, Mapping]: """Mapping to aggregate e.g. nl="World" from values for child nodes of "World". This mapping should be used with :func:`.genno.operator.aggregate`, giving the @@ -920,7 +908,7 @@ def price_units(qty: "AnyQuantity") -> "AnyQuantity": def quantity_from_config( - config: dict, name: str, dimensionality: Optional[Dict] = None + config: dict, name: str, dimensionality: Optional[dict] = None ) -> "AnyQuantity": if dimensionality: raise NotImplementedError @@ -1044,8 +1032,8 @@ def share_weight( gdp: "AnyQuantity", cost: "AnyQuantity", lamda: "AnyQuantity", - t_modes: List[str], - y: List[int], + t_modes: list[str], + y: list[int], config: dict, ) -> "AnyQuantity": """Calculate mode share weights. @@ -1088,9 +1076,9 @@ def share_weight( # Selectors # A scalar induces xarray but not genno <= 1.21 to drop - y0: Dict[Any, Any] = dict(y=y[0]) + y0: dict[Any, Any] = dict(y=y[0]) y0_ = dict(y=[y[0]]) # Do not drop - yC: Dict[Any, Any] = dict(y=cfg.year_convergence) + yC: dict[Any, Any] = dict(y=cfg.year_convergence) # Weights in y0 for all modes and nodes idx = dict(t=t_modes, n=nodes) | y0 diff --git a/message_ix_models/model/transport/plot.py b/message_ix_models/model/transport/plot.py index a79ed1bcb5..f594a68bea 100644 --- a/message_ix_models/model/transport/plot.py +++ b/message_ix_models/model/transport/plot.py @@ -3,7 +3,7 @@ import logging from datetime import datetime from pathlib import Path -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Optional import genno.compat.plotnine import pandas as pd @@ -52,7 +52,7 @@ class Plot(genno.compat.plotnine.Plot): """ #: 'Static' geoms: list of plotnine objects that are not dynamic - static: List["plotnine.typing.PlotAddable"] = [ + static: list["plotnine.typing.PlotAddable"] = [ p9.theme(figure_size=(11.7, 8.3)), ] @@ -528,7 +528,7 @@ def generate(self, data, commodities, cg): yield ggplot -def _reduce_units(df: pd.DataFrame, target_units) -> Tuple[pd.DataFrame, str]: +def _reduce_units(df: pd.DataFrame, target_units) -> tuple[pd.DataFrame, str]: df_units = df["unit"].unique() assert 1 == len(df_units) tmp = registry.Quantity(1.0, df_units[0]).to(target_units) diff --git a/message_ix_models/model/transport/report.py b/message_ix_models/model/transport/report.py index 12c2a9edb5..763f1d4a36 100644 --- a/message_ix_models/model/transport/report.py +++ b/message_ix_models/model/transport/report.py @@ -3,7 +3,7 @@ import logging from copy import deepcopy from pathlib import Path -from typing import TYPE_CHECKING, Any, Tuple +from typing import TYPE_CHECKING, Any import genno import pandas as pd @@ -428,7 +428,7 @@ def configure_legacy_reporting(config: dict) -> None: def latest_reporting_from_file( info: ScenarioInfo, base_dir: Path -) -> Tuple[Any, int, pd.DataFrame]: +) -> tuple[Any, int, pd.DataFrame]: """Locate and retrieve the latest reported output for the scenario `info`. The file :file:`transport.csv` is sought in a subdirectory of `base_dir` identified @@ -465,7 +465,7 @@ def latest_reporting_from_file( def latest_reporting_from_platform( info: ScenarioInfo, platform: "ixmp.Platform", minimum_version: int = -1 -) -> Tuple[Any, int, pd.DataFrame]: +) -> tuple[Any, int, pd.DataFrame]: """Retrieve the latest reported output for the scenario described by `info`. The time series data attached to a scenario on `platform` is retrieved. diff --git a/message_ix_models/model/transport/structure.py b/message_ix_models/model/transport/structure.py index cdf2b59f6a..7df40fdbce 100644 --- a/message_ix_models/model/transport/structure.py +++ b/message_ix_models/model/transport/structure.py @@ -1,4 +1,5 @@ -from typing import Any, Dict, List, Sequence, Union +from collections.abc import Sequence +from typing import Any, Union from sdmx.model.common import Annotation, Code @@ -37,7 +38,7 @@ def get_technology_groups( technologies: Union[Spec, ScenarioInfo, Sequence["Code"]], -) -> Dict[str, List[str]]: +) -> dict[str, list[str]]: """Subsets of transport technologies for aggregation and filtering.""" if isinstance(technologies, Spec): t_list: Sequence["Code"] = technologies.add.set["technology"] @@ -46,7 +47,7 @@ def get_technology_groups( else: t_list = technologies - result: Dict[str, List[str]] = {"non-ldv": []} + result: dict[str, list[str]] = {"non-ldv": []} # Only include those technologies with children for tech in filter(lambda t: len(t.child), t_list): @@ -59,7 +60,7 @@ def get_technology_groups( def make_spec(regions: str) -> Spec: - sets: Dict[str, Any] = dict() + sets: dict[str, Any] = dict() # Overrides specific to regional versions tmp = dict() diff --git a/message_ix_models/model/transport/testing.py b/message_ix_models/model/transport/testing.py index 84dfacfb5b..e0703c7f5f 100644 --- a/message_ix_models/model/transport/testing.py +++ b/message_ix_models/model/transport/testing.py @@ -2,9 +2,10 @@ import logging import platform +from collections.abc import Mapping from contextlib import nullcontext from pathlib import Path -from typing import TYPE_CHECKING, Mapping, Optional, Tuple, Union +from typing import TYPE_CHECKING, Optional, Union import pytest from genno import Computer @@ -76,7 +77,7 @@ def configure_build( years: str, tmp_path: Optional[Path] = None, options=None, -) -> Tuple[Computer, ScenarioInfo]: +) -> tuple[Computer, ScenarioInfo]: test_context.update(regions=regions, years=years, output_path=tmp_path) # By default, omit plots while testing diff --git a/message_ix_models/model/transport/ustimes_ma3t.py b/message_ix_models/model/transport/ustimes_ma3t.py index bd8790a28a..e7d48317db 100644 --- a/message_ix_models/model/transport/ustimes_ma3t.py +++ b/message_ix_models/model/transport/ustimes_ma3t.py @@ -6,7 +6,8 @@ """ from collections import defaultdict -from typing import TYPE_CHECKING, List, Mapping +from collections.abc import Mapping +from typing import TYPE_CHECKING import genno import pandas as pd @@ -32,7 +33,7 @@ @cached -def read_USTIMES_MA3T(nodes: List[str], subdir=None) -> Mapping[str, "AnyQuantity"]: +def read_USTIMES_MA3T(nodes: list[str], subdir=None) -> Mapping[str, "AnyQuantity"]: """Read the US-TIMES MA3T data from :data:`FILE`. No transformation is performed. diff --git a/message_ix_models/model/transport/util.py b/message_ix_models/model/transport/util.py index b01953088c..f27d54cbee 100644 --- a/message_ix_models/model/transport/util.py +++ b/message_ix_models/model/transport/util.py @@ -1,8 +1,9 @@ """Utility code for MESSAGEix-Transport.""" import logging +from collections.abc import Iterable from pathlib import Path -from typing import TYPE_CHECKING, Iterable, Union +from typing import TYPE_CHECKING, Union from message_ix_models import Context from message_ix_models.util import package_data_path diff --git a/message_ix_models/project/ssp/transport.py b/message_ix_models/project/ssp/transport.py index 626192e9dc..121ca0c852 100644 --- a/message_ix_models/project/ssp/transport.py +++ b/message_ix_models/project/ssp/transport.py @@ -1,7 +1,7 @@ """Postprocess aviation emissions for SSP 2024.""" import re -from typing import TYPE_CHECKING, Dict, Hashable, List +from typing import TYPE_CHECKING, Hashable import genno import xarray as xr @@ -121,7 +121,7 @@ def extract_dims1(qty: "AnyQuantity", dim: dict) -> "AnyQuantity": # pragma: no d0_new = f"{d0}_new" pattern = re.compile(expr) - indexers: Dict[Hashable, List[Hashable]] = {g: [] for g in pattern.groupindex} + indexers: dict[Hashable, list[Hashable]] = {g: [] for g in pattern.groupindex} indexers[d0_new] = [] coords = qty.coords[d0].data.astype(str) diff --git a/message_ix_models/tools/iamc.py b/message_ix_models/tools/iamc.py index 56c5b8d577..88368c6dc2 100644 --- a/message_ix_models/tools/iamc.py +++ b/message_ix_models/tools/iamc.py @@ -1,6 +1,7 @@ """Tools for working with IAMC-structured data.""" -from typing import TYPE_CHECKING, Any, Dict, List, Literal, MutableMapping, Optional +from collections.abc import MutableMapping +from typing import TYPE_CHECKING, Any, Literal, Optional import genno import pandas as pd @@ -118,7 +119,7 @@ def iamc_like_data_for_query( query: str, *, archive_member: Optional[str] = None, - drop: Optional[List[str]] = None, + drop: Optional[list[str]] = None, non_iso_3166: Literal["keep", "discard"] = "discard", replace: Optional[dict] = None, unique: str = "MODEL SCENARIO VARIABLE UNIT", @@ -169,7 +170,7 @@ def iamc_like_data_for_query( set(["MODEL", "SCENARIO", "VARIABLE", "UNIT"]) - set(unique.split()) ) - unique_values: Dict[str, Any] = dict() + unique_values: dict[str, Any] = dict() tmp = ( pd.read_csv(source, **kwargs) .drop(columns=drop or []) diff --git a/message_ix_models/types.py b/message_ix_models/types.py index da9648ef33..208d99b044 100644 --- a/message_ix_models/types.py +++ b/message_ix_models/types.py @@ -1,6 +1,7 @@ """Types for hinting.""" -from typing import Hashable, Mapping, MutableMapping +from collections.abc import Mapping, MutableMapping +from typing import Hashable import pandas as pd