diff --git a/examples/example.ipynb b/examples/example.ipynb index e2c8989..8f52224 100644 --- a/examples/example.ipynb +++ b/examples/example.ipynb @@ -8,7 +8,7 @@ "\n", "Jake Nunemaker\n", "\n", - "Last Updated: 01/03/2023 (gbarter)" + "Last Updated: 08/27/2023 (gbarter)" ] }, { @@ -41,7 +41,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -53,7 +53,7 @@ } ], "source": [ - "output_dir = \"/Users/gbarter/devel/pCrunch/tests/io/data\"\n", + "output_dir = \"/Users/gbarter/devel/pCrunch/pCrunch/test/io/data\"\n", "results_dir = os.path.join(output_dir, \"results\")\n", "save_results = True\n", "\n", @@ -74,18 +74,18 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[,\n", - " ,\n", - " ]" + "[,\n", + " ,\n", + " ]" ] }, - "execution_count": 3, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -100,7 +100,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -153,7 +153,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -191,7 +191,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -204,8 +204,10 @@ " trim_data=(0,), # If 'trim_data' is passed, all input files will\n", ") # be trimmed to (tmin, tmax(optional))\n", "\n", - "la.process_outputs(cores=4) # Once LoadsAnalysis is configured, process outputs with\n", - " # `process_outputs`. `cores` is optional but will trigger parallel processing if configured" + "la.process_outputs(cores=4, # Once LoadsAnalysis is configured, process outputs with\n", + " return_damage=True, # optional return of Palmgren-Miner damange and\n", + " goodman=True) # optional use of goodman correction for mean load values\n", + " # Note `cores` is optional but will trigger parallel processing if configured" ] }, { @@ -217,7 +219,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -450,7 +452,7 @@ "[5 rows x 1799 columns]" ] }, - "execution_count": 7, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -462,7 +464,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -566,7 +568,7 @@ "AOC_WSt.outb NaN NaN NaN " ] }, - "execution_count": 8, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -578,7 +580,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -592,7 +594,7 @@ "Name: (RootMc1, min), dtype: float64" ] }, - "execution_count": 9, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -603,7 +605,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -623,7 +625,7 @@ "Name: DLC2.3_3.out, Length: 1799, dtype: float64" ] }, - "execution_count": 10, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -635,7 +637,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -708,7 +710,7 @@ "AOC_WSt.outb 0.003558 0.003558 0.003558" ] }, - "execution_count": 11, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -720,7 +722,92 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
RootMc1RootMc2RootMc3
Test1.outb2.610118e-423.230998e-422.143750e-42
step_0.outb5.184544e-351.633931e-372.555698e-34
Test2.outb1.438786e-411.714979e-411.890789e-41
DLC2.3_3.out4.790238e-424.007654e-422.539284e-46
AOC_WSt.outb4.244367e-1044.244367e-1044.244367e-104
\n", + "
" + ], + "text/plain": [ + " RootMc1 RootMc2 RootMc3\n", + "Test1.outb 2.610118e-42 3.230998e-42 2.143750e-42\n", + "step_0.outb 5.184544e-35 1.633931e-37 2.555698e-34\n", + "Test2.outb 1.438786e-41 1.714979e-41 1.890789e-41\n", + "DLC2.3_3.out 4.790238e-42 4.007654e-42 2.539284e-46\n", + "AOC_WSt.outb 4.244367e-104 4.244367e-104 4.244367e-104" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Palmgren-Miner damage can be viewed with:\n", + "la.damage" + ] + }, + { + "cell_type": "code", + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -916,7 +1003,7 @@ " 'RootMc3': nan}]}" ] }, - "execution_count": 12, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -925,18 +1012,11 @@ "# Extreme events:\n", "la.extreme_events" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -950,7 +1030,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.11" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/examples/post_BatchRun.py b/examples/post_BatchRun.py index 708ba48..0897e16 100644 --- a/examples/post_BatchRun.py +++ b/examples/post_BatchRun.py @@ -7,112 +7,120 @@ import os from fnmatch import fnmatch -import numpy as np +#import numpy as np import pandas as pd -import ruamel.yaml as ry - -from pCrunch import LoadsAnalysis, PowerProduction -from pCrunch.io import load_FAST_out +try: + import ruamel_yaml as ry +except: + try: + import ruamel.yaml as ry + except: + raise ImportError("No module named ruamel.yaml or ruamel_yaml") + +from pCrunch import LoadsAnalysis, PowerProduction, FatigueParams +#from pCrunch.io import load_FAST_out from pCrunch.utility import save_yaml, get_windspeeds, convert_summary_stats def valid_extension(fp): return any([fnmatch(fp, ext) for ext in ["*.outb", "*.out"]]) - -# Define input files paths -output_dir = "/Users/jnunemak/Projects/pCrunch/BAR10/rank_0/" -results_dir = os.path.join(output_dir, "results") -save_results = True - - -# Find outfiles -outfiles = [ - os.path.join(output_dir, f) - for f in os.listdir(output_dir) - if valid_extension(f) -] - -# Configure pCrunch -magnitude_channels = { - "RootMc1": ["RootMxc1", "RootMyc1", "RootMzc1"], - "RootMc2": ["RootMxc2", "RootMyc2", "RootMzc2"], - "RootMc3": ["RootMxc3", "RootMyc3", "RootMzc3"], -} - -fatigue_channels = {"RootMc1": 10, "RootMc2": 10, "RootMc3": 10} - -channel_extremes = [ - "RotSpeed", - "RotThrust", - "RotTorq", - "RootMc1", - "RootMc2", - "RootMc3", -] - -# Run pCrunch -la = LoadsAnalysis( - outfiles, - magnitude_channels=magnitude_channels, - fatigue_channels=fatigue_channels, - extreme_channels=channel_extremes, - trim_data=(0,), -) -la.process_outputs(cores=4) - -if save_results: - save_yaml( - results_dir, - "summary_stats.yaml", - convert_summary_stats(la.summary_stats), +if __name__ == '__main__': + # Define input files paths + output_dir = "/Users/gbarter/devel/WEIS/examples/05_IEA-3.4-130-RWT/temp/iea34" + results_dir = os.path.join(output_dir, "results") + save_results = True + + # Find outfiles + outfiles = [ + os.path.join(output_dir, f) + for f in os.listdir(output_dir) + if valid_extension(f) + ] + + # Configure pCrunch + magnitude_channels = { + "RootMc1": ["RootMxc1", "RootMyc1", "RootMzc1"], + "RootMc2": ["RootMxc2", "RootMyc2", "RootMzc2"], + "RootMc3": ["RootMxc3", "RootMyc3", "RootMzc3"], + } + + fatigue_channels = {"RootMc1": FatigueParams(lifetime=25, slope=10), + "RootMc2": FatigueParams(lifetime=25, slope=10), + "RootMc3": FatigueParams(lifetime=25, slope=10), + } + + channel_extremes = [ + "RotSpeed", + "RotThrust", + "RotTorq", + "RootMc1", + "RootMc2", + "RootMc3", + ] + + # Run pCrunch + la = LoadsAnalysis( + outfiles, + magnitude_channels=magnitude_channels, + fatigue_channels=fatigue_channels, + extreme_channels=channel_extremes, + trim_data=(0,), ) - -# Load case matrix into dataframe -fname_case_matrix = os.path.join(output_dir, "case_matrix.yaml") -with open(fname_case_matrix, "r") as f: - case_matrix = ry.load(f, Loader=ry.Loader) -cm = pd.DataFrame(case_matrix) - -# Get wind speeds for processed runs -windspeeds, seed, IECtype, cm_wind = get_windspeeds(cm, return_df=True) - -# Get AEP -turbine_class = 1 -pp = PowerProduction(turbine_class) -AEP, perf_data = pp.AEP( - la.summary_stats, - windspeeds, - ["GenPwr", "RtAeroCp", "RotSpeed", "BldPitch1"], -) -print(f"AEP: {AEP}") - -# # ========== Plotting ========== -# an_plts = Analysis.wsPlotting() -# # --- Time domain analysis --- -# filenames = [outfiles[0][2], outfiles[1][2]] # select the 2nd run from each dataset -# cases = {'Baseline': ['Wind1VelX', 'GenPwr', 'BldPitch1', 'GenTq', 'RotSpeed']} -# fast_dict = fast_io.load_FAST_out(filenames, tmin=30) -# fast_pl.plot_fast_out(cases, fast_dict) - -# # Plot some spectral cases -# spec_cases = [('RootMyb1', 0), ('TwrBsFyt', 0)] -# twrfreq = .0716 -# fig,ax = fast_pl.plot_spectral(fast_dict, spec_cases, show_RtSpeed=True, -# add_freqs=[twrfreq], add_freq_labels=['Tower'], -# averaging='Welch') -# ax.set_title('DLC1.1') - -# # Plot a data distribution -# channels = ['RotSpeed'] -# caseid = [0, 1] -# an_plts.distribution(fast_dict, channels, caseid, names=['DLC 1.1', 'DLC 1.3']) - -# # --- Batch Statistical analysis --- -# # Bar plot -# fig,ax = an_plts.stat_curve(windspeeds, stats, 'RotSpeed', 'bar', names=['DLC1.1', 'DLC1.3']) - -# # Turbulent power curve -# fig,ax = an_plts.stat_curve(windspeeds, stats, 'GenPwr', 'line', stat_idx=0, names=['DLC1.1']) - -# plt.show() + la.process_outputs(cores=3, return_damage=True, goodman=True) + + if save_results: + save_yaml( + results_dir, + "summary_stats.yaml", + convert_summary_stats(la.summary_stats), + ) + + # Load case matrix into dataframe + fname_case_matrix = os.path.join(output_dir, "case_matrix.yaml") + with open(fname_case_matrix, "r") as f: + case_matrix = ry.load(f, Loader=ry.Loader) + cm = pd.DataFrame(case_matrix) + + # Get wind speeds for processed runs + windspeeds, seed, IECtype, cm_wind = get_windspeeds(cm, return_df=True) + + # Get AEP + turbine_class = 1 + pp = PowerProduction(turbine_class) + AEP, perf_data = pp.AEP( + la.summary_stats, + windspeeds, + ["GenPwr", "RtAeroCp", "RotSpeed", "BldPitch1"], + ) + print(f"AEP: {AEP}") + + # # ========== Plotting ========== + # an_plts = Analysis.wsPlotting() + # # --- Time domain analysis --- + # filenames = [outfiles[0][2], outfiles[1][2]] # select the 2nd run from each dataset + # cases = {'Baseline': ['Wind1VelX', 'GenPwr', 'BldPitch1', 'GenTq', 'RotSpeed']} + # fast_dict = fast_io.load_FAST_out(filenames, tmin=30) + # fast_pl.plot_fast_out(cases, fast_dict) + + # # Plot some spectral cases + # spec_cases = [('RootMyb1', 0), ('TwrBsFyt', 0)] + # twrfreq = .0716 + # fig,ax = fast_pl.plot_spectral(fast_dict, spec_cases, show_RtSpeed=True, + # add_freqs=[twrfreq], add_freq_labels=['Tower'], + # averaging='Welch') + # ax.set_title('DLC1.1') + + # # Plot a data distribution + # channels = ['RotSpeed'] + # caseid = [0, 1] + # an_plts.distribution(fast_dict, channels, caseid, names=['DLC 1.1', 'DLC 1.3']) + + # # --- Batch Statistical analysis --- + # # Bar plot + # fig,ax = an_plts.stat_curve(windspeeds, stats, 'RotSpeed', 'bar', names=['DLC1.1', 'DLC1.3']) + + # # Turbulent power curve + # fig,ax = an_plts.stat_curve(windspeeds, stats, 'GenPwr', 'line', stat_idx=0, names=['DLC1.1']) + + # plt.show() diff --git a/pCrunch/analysis.py b/pCrunch/analysis.py index 4862575..cf49bb0 100644 --- a/pCrunch/analysis.py +++ b/pCrunch/analysis.py @@ -7,7 +7,6 @@ import os import multiprocessing as mp from functools import partial - import numpy as np import pandas as pd import fatpack @@ -138,13 +137,13 @@ def _process_parallel(self, cores, **kwargs): DELs = {} Damage = {} - pool = mp.Pool(cores) + pool = mp.Pool(processes=cores) returned = pool.map( partial(self._process_output, **kwargs), self.outputs ) pool.close() pool.join() - + for filename, stats, extrs, dels, damage in returned: summary_stats[filename] = stats extremes[filename] = extrs @@ -423,7 +422,7 @@ def _compute_del(ts, elapsed, lifetime, load2stress, slope, Sult, Sc=0.0, **kwar Whether to apply Goodman mean correction to loads and stress Default: False return_damage: boolean - Whether to compute both DEL and true damage + Whether to compute both DEL and damage Default: False """ @@ -572,9 +571,15 @@ def AEP(self, stats, windspeeds, pwr_curve_vars): perf_data = {"U": unique} for var in pwr_curve_vars: - perf_array = stats.loc[:, (var, "mean")].to_frame() + try: + perf_array = stats.loc[:, (var, "mean")].to_frame() + except KeyError: + print(var,"not found. . . continuing") + continue perf_array["windspeed"] = windspeeds perf_array = perf_array.groupby("windspeed").mean() perf_data[var] = perf_array[var] return AEP, perf_data + + diff --git a/pCrunch/io/__init__.py b/pCrunch/io/__init__.py index 95d7d59..5af78e6 100644 --- a/pCrunch/io/__init__.py +++ b/pCrunch/io/__init__.py @@ -3,13 +3,11 @@ __maintainer__ = "Jake Nunemaker" __email__ = ["jake.nunemaker@nrel.gov"] +# Need all these for import here in __init__ even if not used in code below +from pCrunch.io.openfast import OpenFASTAscii, OpenFASTBinary, OpenFASTOutput -import numpy as np -from .openfast import OpenFASTAscii, OpenFASTBinary, OpenFASTOutput - - -def load_FAST_out(filenames, tmin=0, tmax=np.inf, **kwargs): +def load_FAST_out(filenames, tmin=0, tmax=float('inf'), **kwargs): """ Load a list of OpenFAST files. diff --git a/pCrunch/utility.py b/pCrunch/utility.py index 2429719..3f25d68 100644 --- a/pCrunch/utility.py +++ b/pCrunch/utility.py @@ -211,18 +211,27 @@ def get_windspeeds(case_matrix, return_df=False): IECtype.append("NTM") elif "ETM" in fname: IECtype.append("NTM") + elif "EWM" in fname: + IECtype.append("EWM") + else: + IECtype.append("") elif "ECD" in fname: - obj = fname.split("U")[-1].split(".wnd") - windspeed.append(float(obj[0])) + obj = fname.split("U")[-1].split("_D")[0].split(".wnd")[0] + windspeed.append(float(obj)) seed.append([]) IECtype.append("ECD") elif "EWS" in fname: - obj = fname.split("U")[-1].split(".wnd") - windspeed.append(float(obj[0])) + obj = fname.split("U")[-1].split("_D")[0].split(".wnd")[0] + windspeed.append(float(obj)) seed.append([]) IECtype.append("EWS") + + else: + print("Shouldn't get here") + print(fname) + breakpoint() if return_df: case_matrix = pd.DataFrame(case_matrix)