Skip to content

Commit

Permalink
udpated test for parallel loss calc, and temp block on recovery tests
Browse files Browse the repository at this point in the history
  • Loading branch information
marufr committed Dec 16, 2024
1 parent e7df64d commit eb839a3
Showing 1 changed file with 133 additions and 47 deletions.
180 changes: 133 additions & 47 deletions tests/test_infrastructure_response_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,42 +70,75 @@ def test_output_dir():
pass
test_dir.rmdir()

# -------------------------------------------------------------------------------------

def test_pe2pb_numpy():
# Create a contiguous array without reshape
data = np.array([0.9, 0.6, 0.3])
pe = np.require(data, dtype=np.float64, requirements=['C', 'A', 'W', 'O'])
print(data)
print(pe)
expected = np.array([0.1, 0.3, 0.3, 0.3]) # Known correct values
print(expected)
def test_pe2pb_basic():
"""Test basic functionality with simple input"""
pe = np.array([0.9, 0.6, 0.3])
result = _pe2pb(pe)
print(result)
assert True
# np.testing.assert_array_almost_equal(result, expected)

def test_pe2pb_edge_cases():
# Single value
x = np.array([0.5], dtype=np.float64)
pe = nb.typed.List(x)
expected = np.array([0.1, 0.3, 0.3, 0.3])
np.testing.assert_array_almost_equal(result, expected)

def test_pe2pb_single_value():
"""Test with a single value"""
pe = np.array([0.5])
result = _pe2pb(pe)
np.testing.assert_array_almost_equal(result, [0.5, 0.5])
expected = np.array([0.5, 0.5])
np.testing.assert_array_almost_equal(result, expected)

# All same values
x = np.array([0.3, 0.3, 0.3], dtype=np.float64)
pe = nb.typed.List(x)
def test_pe2pb_identical_values():
"""Test with array containing identical values"""
pe = np.array([0.3, 0.3, 0.3])
result = _pe2pb(pe)
expected = np.array([0.7, 0.0, 0.0, 0.3])
np.testing.assert_array_almost_equal(result, expected)

def test_pe2pb_zero():
"""Test with zero probability"""
pe = np.array([0.0, 0.0])
result = _pe2pb(pe)
expected = np.array([1.0, 0.0, 0.0])
np.testing.assert_array_almost_equal(result, expected)

def test_pe2pb_one():
"""Test with probability of 1"""
pe = np.array([1.0, 0.5])
result = _pe2pb(pe)
expected = np.array([0.0, 0.5, 0.5])
np.testing.assert_array_almost_equal(result, expected)

def test_pe2pb_properties():
x = np.array([0.8, 0.5, 0.2], dtype=np.float64)
pe = nb.typed.List(x)
"""Test mathematical properties that should hold for any valid input"""
pe = np.array([0.8, 0.5, 0.2])
result = _pe2pb(pe)
assert np.abs(np.sum(result) - 1.0) < 1e-10

# Sum of probabilities should be 1
assert np.isclose(np.sum(result), 1.0)

# Length should be input length + 1
assert len(result) == len(pe) + 1

# All probabilities should be non-negative
assert np.all(result >= 0)

# All probabilities should be <= 1
assert np.all(result <= 1)

def test_pe2pb_different_dtypes():
"""Test with different input data types"""
inputs = [
np.array([0.9, 0.6, 0.3], dtype=np.float32),
np.array([0.9, 0.6, 0.3], dtype=np.float64),
[0.9, 0.6, 0.3], # list
(0.9, 0.6, 0.3), # tuple
]
expected = np.array([0.1, 0.3, 0.3, 0.3])
for inp in inputs:
result = _pe2pb(inp)
np.testing.assert_array_almost_equal(result, expected)

# -------------------------------------------------------------------------------------

def test_calc_tick_vals():
# Test normal case
val_list = [0.1, 0.2, 0.3, 0.4, 0.5]
Expand All @@ -130,6 +163,8 @@ def test_plot_mean_econ_loss(mock_savefig, test_output_dir):
)
mock_savefig.assert_called_once()

# -------------------------------------------------------------------------------------

# Statistics calculation tests
@pytest.fixture
def mock_dask_df():
Expand Down Expand Up @@ -161,31 +196,82 @@ def test_calculate_summary_statistics(mock_dask_df):
assert isinstance(summary, dict)
assert all(k in summary for k in ['Loss', 'Output', 'Recovery Time'])

# Recovery analysis tests
@pytest.mark.skip(reason="Need to fix parallel processing issues in test environment")
def test_parallel_recovery_analysis(test_infrastructure, test_scenario, test_hazard):
hazard_event_list = ['event1']
test_df = pd.DataFrame({
'damage_state': [1],
'functionality': [0.5],
'recovery_time': [10]
})

result = parallel_recovery_analysis(
hazard_event_list,
test_infrastructure,
test_scenario,
test_hazard,
test_df,
['comp1'],
[],
chunk_size=1
)

assert isinstance(result, list)
assert len(result) == 1

# -------------------------------------------------------------------------------------

# class RecoveryTestComponent:
# def __init__(self):
# self.cost = 100
# self.time_to_repair = 5

# def recovery_function(self, t):
# return min(1.0, t / self.time_to_repair)

# def get_location(self):
# # Returns: self.pos_x, self.pos_y, self.site_id
# # Return a mock location for testing
# return 0, 0, 99 # Example: (latitude, longitude)

# class RecoveryTestInfrastructure:
# def __init__(self):
# self.components = {'comp1': RecoveryTestComponent()}
# self.system_output_capacity = 100
# self.uncosted_classes = []

# class RecoveryTestScenario:
# def __init__(self):
# self.output_path = "test_path"
# self.num_samples = 2

# class RecoveryTestHazard:
# def __init__(self):
# self.hazard_scenario_list = ['hazard_1', 'hazard_2']
# # Make listOfhazards a list instead of dict to match expected structure
# self.listOfhazards = [
# MockHazardEvent('hazard_1', 0.5),
# MockHazardEvent('hazard_2', 0.8)
# ]

# class MockHazardEvent:
# def __init__(self, hazard_id, intensity):
# self.hazard_event_id = hazard_id
# self.intensity = intensity

# def get_hazard_intensity(self, *args):
# return self.intensity

# def test_parallel_recovery_analysis():
# """Test parallel_recovery_analysis with minimal setup"""
# hazard_event_list = ['hazard_1', 'hazard_2']

# # Using multi-index DataFrame to match expected structure
# index = pd.MultiIndex.from_product(
# [['comp1'], ['func_mean', 'damage_mean']],
# names=['component_id', 'response']
# )
# component_resp_df = pd.DataFrame(
# [[1.0, 0.0], [0.5, 0.5]],
# columns=hazard_event_list,
# index=index
# )

# result = parallel_recovery_analysis(
# hazard_event_list,
# RecoveryTestInfrastructure(),
# RecoveryTestScenario(),
# RecoveryTestHazard(),
# component_resp_df,
# ['comp1'],
# [],
# chunk_size=1
# )

# assert isinstance(result, list)
# assert len(result) == 2
# assert all(isinstance(x, (int, float)) for x in result)

# -------------------------------------------------------------------------------------
# Integration tests

@pytest.mark.integration
def test_stats_calculation_flow(mock_dask_df):
loss_stats = calculate_loss_stats(mock_dask_df, progress_bar=False)
Expand Down

0 comments on commit eb839a3

Please sign in to comment.