diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 117257e3c..a94805e68 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,7 @@ jobs: build: name: "Build Gusto" # The type of runner that the job will run on - runs-on: self-hosted + runs-on: [self-hosted, Linux] # The docker container to use. container: image: firedrakeproject/firedrake-vanilla:latest @@ -38,6 +38,7 @@ jobs: . /home/firedrake/firedrake/bin/activate firedrake-clean export GUSTO_PARALLEL_LOG=FILE + export PYOP2_CFLAGS=-O0 python -m pytest \ -n 12 --dist worksteal \ --durations=100 \ @@ -54,7 +55,7 @@ jobs: find . -name "*.log" -exec cp --parents {} /__w/gusto/gusto/logs/ \; - name: Upload artifact if: always() - uses: actions/upload-pages-artifact@v1 + uses: actions/upload-pages-artifact@v3 with: name: log-files path: /__w/gusto/gusto/logs diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index e6bec0821..bdc217654 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -29,7 +29,7 @@ jobs: # Steps represent a sequence of tasks that will be executed as # part of the jobs steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install checkedout Gusto run: | . /home/firedrake/firedrake/bin/activate @@ -50,7 +50,7 @@ jobs: cd docs make html - name: Upload artifact - uses: actions/upload-pages-artifact@v1 + uses: actions/upload-pages-artifact@v3 with: name: github-pages path: /__w/gusto/gusto/docs/build/html diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 8c29106e4..e75f285cf 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -12,9 +12,9 @@ jobs: name: "Run linter" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.11 - name: Setup flake8 annotations @@ -33,7 +33,7 @@ jobs: # #example-error-annotation-on-github-actions runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check workflow files uses: docker://rhysd/actionlint:latest with: diff --git a/.github/workflows/website.yml b/.github/workflows/website.yml index e8d9f5bf9..0e5ff6aff 100644 --- a/.github/workflows/website.yml +++ b/.github/workflows/website.yml @@ -27,16 +27,16 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Pages - uses: actions/configure-pages@v3 + uses: actions/configure-pages@v5 - name: Build with Jekyll uses: actions/jekyll-build-pages@v1 with: source: ./docs destination: ./docs/_site - name: Upload artifact - uses: actions/upload-pages-artifact@v1 + uses: actions/upload-pages-artifact@v3 with: path: ./docs/_site/ @@ -50,4 +50,4 @@ jobs: steps: - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v2 + uses: actions/deploy-pages@v4 diff --git a/Makefile b/Makefile index 4ffd2902f..2b50a2dcb 100644 --- a/Makefile +++ b/Makefile @@ -32,4 +32,8 @@ integration_test: example: @echo " Running all examples" - @python3 -m pytest examples $(PYTEST_ARGS) + @python3 -m pytest examples -v -m "not parallel" $(PYTEST_ARGS) + +parallel_example: + @echo " Running all parallel examples" + @python3 -m pytest examples -v -m "parallel" $(PYTEST_ARGS) diff --git a/README.md b/README.md index 5e1f041e0..935af9cbe 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ These compatible methods underpin the Met Office's next-generation model, [LFRic The best way to install Gusto is as an additional package when installing [Firedrake](http://firedrakeproject.org). Usually, for a Mac with Homebrew or an Ubuntu installation this is done by downloading the Firedrake install script and executing it: ``` -curl -0 https://raw.githubusercontent/com/firedrakeproject/firedrake/master/scripts/firedrake-install +curl -0 https://raw.githubusercontent.com/firedrakeproject/firedrake/master/scripts/firedrake-install python3 firedrake-install --install gusto ``` For an up-to-date installation guide, see the [firedrake installation instructions](http://firedrakeproject.org/download.html). Once installed, Gusto must be run from within the Firedrake virtual environment, which is activated via diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index 1b9da8a00..9c7a1c79d 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -222,8 +222,7 @@ GEM rb-fsevent (0.11.2) rb-inotify (0.10.1) ffi (~> 1.0) - rexml (3.3.3) - strscan + rexml (3.3.9) rouge (3.26.0) ruby2_keywords (0.0.5) rubyzip (2.3.2) @@ -238,7 +237,6 @@ GEM faraday (>= 0.17.3, < 3) simpleidn (0.2.1) unf (~> 0.1.4) - strscan (3.1.0) terminal-table (1.8.0) unicode-display_width (~> 1.1, >= 1.1.1) typhoeus (1.4.0) @@ -249,7 +247,7 @@ GEM unf_ext unf_ext (0.0.8.2) unicode-display_width (1.8.0) - webrick (1.8.1) + webrick (1.8.2) PLATFORMS x86_64-linux diff --git a/examples/boussinesq/skamarock_klemp_compressible.py b/examples/boussinesq/skamarock_klemp_compressible.py index b6e6d3bf7..c15daff84 100644 --- a/examples/boussinesq/skamarock_klemp_compressible.py +++ b/examples/boussinesq/skamarock_klemp_compressible.py @@ -1,114 +1,194 @@ """ -The gravity wave test case of Skamarock and Klemp (1994), solved using the -incompressible Boussinesq equations. +This example uses the compressible Boussinesq equations to solve the vertical +slice gravity wave test case of Skamarock and Klemp, 1994: +``Efficiency and Accuracy of the Klemp-Wilhelmson Time-Splitting Technique'', +MWR. -Buoyancy is transported using SUPG. +Buoyancy is transported using SUPG, and the degree 1 elements are used. """ -from gusto import * -from firedrake import (as_vector, PeriodicIntervalMesh, ExtrudedMesh, - sin, SpatialCoordinate, Function, pi) -import sys - -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -dt = 6. -L = 3.0e5 # Domain length -H = 1.0e4 # Height position of the model top - -if '--running-tests' in sys.argv: - tmax = dt - dumpfreq = 1 - columns = 30 # number of columns - nlayers = 5 # horizontal layers - -else: - tmax = 3600. - dumpfreq = int(tmax / (2*dt)) - columns = 300 # number of columns - nlayers = 10 # horizontal layers - -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # - -# Domain -m = PeriodicIntervalMesh(columns, L) -mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers) -domain = Domain(mesh, dt, 'CG', 1) - -# Equation -parameters = BoussinesqParameters(cs=300) -eqns = BoussinesqEquations(domain, parameters) - -# I/O -output = OutputParameters( - dirname='skamarock_klemp_compressible', - dumpfreq=dumpfreq, - dumplist=['u'], +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + as_vector, PeriodicIntervalMesh, ExtrudedMesh, sin, SpatialCoordinate, + Function, pi ) -# list of diagnostic fields, each defined in a class in diagnostics.py -diagnostic_fields = [CourantNumber(), Divergence(), Perturbation('b')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) - -# Transport schemes -b_opts = SUPGOptions() -transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "p"), - SSPRK3(domain, "b", options=b_opts)] -transport_methods = [DGUpwind(eqns, "u"), - DGUpwind(eqns, "p"), - DGUpwind(eqns, "b", ibp=b_opts.ibp)] - -# Linear solver -linear_solver = BoussinesqSolver(eqns) - -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver) +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + TrapeziumRule, SUPGOptions, Divergence, Perturbation, CourantNumber, + BoussinesqParameters, BoussinesqEquations, BoussinesqSolver, + boussinesq_hydrostatic_balance +) + +skamarock_klemp_compressible_bouss_defaults = { + 'ncolumns': 300, + 'nlayers': 10, + 'dt': 6.0, + 'tmax': 3600., + 'dumpfreq': 300, + 'dirname': 'skamarock_klemp_compressible_bouss' +} + + +def skamarock_klemp_compressible_bouss( + ncolumns=skamarock_klemp_compressible_bouss_defaults['ncolumns'], + nlayers=skamarock_klemp_compressible_bouss_defaults['nlayers'], + dt=skamarock_klemp_compressible_bouss_defaults['dt'], + tmax=skamarock_klemp_compressible_bouss_defaults['tmax'], + dumpfreq=skamarock_klemp_compressible_bouss_defaults['dumpfreq'], + dirname=skamarock_klemp_compressible_bouss_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Test case parameters + # ------------------------------------------------------------------------ # + + domain_width = 3.0e5 # Width of domain (m) + domain_height = 1.0e4 # Height of domain (m) + wind_initial = 20. # Initial wind in x direction (m/s) + pert_width = 5.0e3 # Width parameter of perturbation (m) + deltab = 1.0e-2 # Magnitude of buoyancy perturbation (m/s^2) + N = 0.01 # Brunt-Vaisala frequency (1/s) + cs = 300. # Speed of sound (m/s) + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + base_mesh = PeriodicIntervalMesh(ncolumns, domain_width) + mesh = ExtrudedMesh(base_mesh, nlayers, layer_height=domain_height/nlayers) + domain = Domain(mesh, dt, 'CG', element_order) + + # Equation + parameters = BoussinesqParameters(cs=cs) + eqns = BoussinesqEquations(domain, parameters) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_vtus=True, dump_nc=False, + ) + # list of diagnostic fields, each defined in a class in diagnostics.py + diagnostic_fields = [CourantNumber(), Divergence(), Perturbation('b')] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes + b_opts = SUPGOptions() + transported_fields = [ + TrapeziumRule(domain, "u"), + SSPRK3(domain, "p"), + SSPRK3(domain, "b", options=b_opts) + ] + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "p"), + DGUpwind(eqns, "b", ibp=b_opts.ibp) + ] + + # Linear solver + linear_solver = BoussinesqSolver(eqns) + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + b0 = stepper.fields("b") + p0 = stepper.fields("p") + + # spaces + Vb = b0.function_space() + Vp = p0.function_space() + + x, z = SpatialCoordinate(mesh) + + # first setup the background buoyancy profile + # z.grad(bref) = N**2 + bref = z*(N**2) + # interpolate the expression to the function + b_b = Function(Vb).interpolate(bref) + + # setup constants + b_pert = ( + deltab * sin(pi*z/domain_height) + / (1 + (x - domain_width/2)**2 / pert_width**2) + ) + # interpolate the expression to the function + b0.interpolate(b_b + b_pert) + + p_b = Function(Vp) + boussinesq_hydrostatic_balance(eqns, b_b, p_b) + p0.assign(p_b) + + uinit = (as_vector([wind_initial, 0.0])) + u0.project(uinit) + + # set the background buoyancy + stepper.set_reference_profiles([('p', p_b), ('b', b_b)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + stepper.run(t=0, tmax=tmax) # ---------------------------------------------------------------------------- # -# Initial conditions +# MAIN # ---------------------------------------------------------------------------- # -u0 = stepper.fields("u") -b0 = stepper.fields("b") -p0 = stepper.fields("p") - -# spaces -Vb = b0.function_space() -Vp = p0.function_space() - -x, z = SpatialCoordinate(mesh) - -# first setup the background buoyancy profile -# z.grad(bref) = N**2 -N = parameters.N -bref = z*(N**2) -# interpolate the expression to the function -b_b = Function(Vb).interpolate(bref) - -# setup constants -a = 5.0e3 -deltab = 1.0e-2 -b_pert = deltab*sin(pi*z/H)/(1 + (x - L/2)**2/a**2) -# interpolate the expression to the function -b0.interpolate(b_b + b_pert) -p_b = Function(Vp) -boussinesq_hydrostatic_balance(eqns, b_b, p_b) -p0.assign(p_b) - -uinit = (as_vector([20.0, 0.0])) -u0.project(uinit) - -# set the background buoyancy -stepper.set_reference_profiles([('p', p_b), ('b', b_b)]) - -# ---------------------------------------------------------------------------- # -# Run -# ---------------------------------------------------------------------------- # -stepper.run(t=0, tmax=tmax) +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncolumns', + help="The number of columns in the vertical slice mesh.", + type=int, + default=skamarock_klemp_compressible_bouss_defaults['ncolumns'] + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=skamarock_klemp_compressible_bouss_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=skamarock_klemp_compressible_bouss_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=skamarock_klemp_compressible_bouss_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=skamarock_klemp_compressible_bouss_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=skamarock_klemp_compressible_bouss_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + skamarock_klemp_compressible_bouss(**vars(args)) diff --git a/examples/boussinesq/skamarock_klemp_incompressible.py b/examples/boussinesq/skamarock_klemp_incompressible.py index b71e89830..b7bb5fd5e 100644 --- a/examples/boussinesq/skamarock_klemp_incompressible.py +++ b/examples/boussinesq/skamarock_klemp_incompressible.py @@ -1,110 +1,190 @@ """ -The gravity wave test case of Skamarock and Klemp (1994), solved using the -incompressible Boussinesq equations. +This example uses the incompressible Boussinesq equations to solve the vertical +slice gravity wave test case of Skamarock and Klemp, 1994: +``Efficiency and Accuracy of the Klemp-Wilhelmson Time-Splitting Technique'', +MWR. -Buoyancy is transported using SUPG. +Buoyancy is transported using SUPG, and the degree 1 elements are used. """ -from gusto import * -from firedrake import (as_vector, PeriodicIntervalMesh, ExtrudedMesh, - sin, SpatialCoordinate, Function, pi) -import sys - -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -dt = 6. -L = 3.0e5 # Domain length -H = 1.0e4 # Height position of the model top - -if '--running-tests' in sys.argv: - tmax = dt - dumpfreq = 1 - columns = 30 # number of columns - nlayers = 5 # horizontal layers - -else: - tmax = 3600. - dumpfreq = int(tmax / (2*dt)) - columns = 300 # number of columns - nlayers = 10 # horizontal layers - -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # - -# Domain -m = PeriodicIntervalMesh(columns, L) -mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers) -domain = Domain(mesh, dt, 'CG', 1) - -# Equation -parameters = BoussinesqParameters() -eqns = BoussinesqEquations(domain, parameters, compressible=False) - -# I/O -output = OutputParameters( - dirname='skamarock_klemp_incompressible', - dumpfreq=dumpfreq, - dumplist=['u'], +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + as_vector, PeriodicIntervalMesh, ExtrudedMesh, sin, SpatialCoordinate, + Function, pi +) +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + TrapeziumRule, SUPGOptions, Divergence, Perturbation, CourantNumber, + BoussinesqParameters, BoussinesqEquations, BoussinesqSolver, + boussinesq_hydrostatic_balance ) -# list of diagnostic fields, each defined in a class in diagnostics.py -diagnostic_fields = [CourantNumber(), Divergence(), Perturbation('b')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) - -# Transport schemes -b_opts = SUPGOptions() -transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "b", options=b_opts)] -transport_methods = [DGUpwind(eqns, "u"), DGUpwind(eqns, "b", ibp=b_opts.ibp)] - -# Linear solver -linear_solver = BoussinesqSolver(eqns) -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver) +skamarock_klemp_incompressible_bouss_defaults = { + 'ncolumns': 300, + 'nlayers': 10, + 'dt': 6.0, + 'tmax': 3600., + 'dumpfreq': 300, + 'dirname': 'skamarock_klemp_incompressible_bouss' +} + + +def skamarock_klemp_incompressible_bouss( + ncolumns=skamarock_klemp_incompressible_bouss_defaults['ncolumns'], + nlayers=skamarock_klemp_incompressible_bouss_defaults['nlayers'], + dt=skamarock_klemp_incompressible_bouss_defaults['dt'], + tmax=skamarock_klemp_incompressible_bouss_defaults['tmax'], + dumpfreq=skamarock_klemp_incompressible_bouss_defaults['dumpfreq'], + dirname=skamarock_klemp_incompressible_bouss_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Test case parameters + # ------------------------------------------------------------------------ # + + domain_width = 3.0e5 # Width of domain (m) + domain_height = 1.0e4 # Height of domain (m) + wind_initial = 20. # Initial wind in x direction (m/s) + pert_width = 5.0e3 # Width parameter of perturbation (m) + deltab = 1.0e-2 # Magnitude of buoyancy perturbation (m/s^2) + N = 0.01 # Brunt-Vaisala frequency (1/s) + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + base_mesh = PeriodicIntervalMesh(ncolumns, domain_width) + mesh = ExtrudedMesh(base_mesh, nlayers, layer_height=domain_height/nlayers) + domain = Domain(mesh, dt, 'CG', element_order) + + # Equation + parameters = BoussinesqParameters() + eqns = BoussinesqEquations(domain, parameters, compressible=False) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_vtus=True, dump_nc=True, + ) + # list of diagnostic fields, each defined in a class in diagnostics.py + diagnostic_fields = [CourantNumber(), Divergence(), Perturbation('b')] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes + b_opts = SUPGOptions() + transported_fields = [ + TrapeziumRule(domain, "u"), + SSPRK3(domain, "b", options=b_opts) + ] + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "b", ibp=b_opts.ibp) + ] + + # Linear solver + linear_solver = BoussinesqSolver(eqns) + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + b0 = stepper.fields("b") + p0 = stepper.fields("p") + + # spaces + Vb = b0.function_space() + + x, z = SpatialCoordinate(mesh) + + # first setup the background buoyancy profile + # z.grad(bref) = N**2 + bref = z*(N**2) + # interpolate the expression to the function + b_b = Function(Vb).interpolate(bref) + + # setup constants + b_pert = ( + deltab * sin(pi*z/domain_height) + / (1 + (x - domain_width/2)**2 / pert_width**2) + ) + # interpolate the expression to the function + b0.interpolate(b_b + b_pert) + + boussinesq_hydrostatic_balance(eqns, b_b, p0) + + uinit = (as_vector([wind_initial, 0.0])) + u0.project(uinit) + + # set the background buoyancy + stepper.set_reference_profiles([('b', b_b)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + # Run! + stepper.run(t=0, tmax=tmax) # ---------------------------------------------------------------------------- # -# Initial conditions +# MAIN # ---------------------------------------------------------------------------- # -u0 = stepper.fields("u") -b0 = stepper.fields("b") -p0 = stepper.fields("p") - -# spaces -Vb = b0.function_space() - -x, z = SpatialCoordinate(mesh) - -# first setup the background buoyancy profile -# z.grad(bref) = N**2 -N = parameters.N -bref = z*(N**2) -# interpolate the expression to the function -b_b = Function(Vb).interpolate(bref) - -# setup constants -a = 5.0e3 -deltab = 1.0e-2 -b_pert = deltab*sin(pi*z/H)/(1 + (x - L/2)**2/a**2) -# interpolate the expression to the function -b0.interpolate(b_b + b_pert) - -boussinesq_hydrostatic_balance(eqns, b_b, p0) - -uinit = (as_vector([20.0, 0.0])) -u0.project(uinit) - -# set the background buoyancy -stepper.set_reference_profiles([('b', b_b)]) - -# ---------------------------------------------------------------------------- # -# Run -# ---------------------------------------------------------------------------- # -# Run! -stepper.run(t=0, tmax=tmax) +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncolumns', + help="The number of columns in the vertical slice mesh.", + type=int, + default=skamarock_klemp_incompressible_bouss_defaults['ncolumns'] + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=skamarock_klemp_incompressible_bouss_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=skamarock_klemp_incompressible_bouss_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=skamarock_klemp_incompressible_bouss_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=skamarock_klemp_incompressible_bouss_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=skamarock_klemp_incompressible_bouss_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + skamarock_klemp_incompressible_bouss(**vars(args)) diff --git a/examples/boussinesq/skamarock_klemp_linear.py b/examples/boussinesq/skamarock_klemp_linear.py index bb64c9b3a..e8145548d 100644 --- a/examples/boussinesq/skamarock_klemp_linear.py +++ b/examples/boussinesq/skamarock_klemp_linear.py @@ -1,98 +1,177 @@ """ -The gravity wave test case of Skamarock and Klemp (1994), solved using the -incompressible Boussinesq equations. +This example uses the linear Boussinesq equations to solve the vertical +slice gravity wave test case of Skamarock and Klemp, 1994: +``Efficiency and Accuracy of the Klemp-Wilhelmson Time-Splitting Technique'', +MWR. -Buoyancy is transported using SUPG. +The degree 1 elements are used, with an explicit RK4 time stepper. """ -from gusto import * -from firedrake import (PeriodicIntervalMesh, ExtrudedMesh, - sin, SpatialCoordinate, Function, pi) -import sys +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + PeriodicIntervalMesh, ExtrudedMesh, sin, SpatialCoordinate, Function, pi +) +from gusto import ( + Domain, IO, OutputParameters, RK4, DGUpwind, SUPGOptions, Divergence, + Timestepper, Perturbation, CourantNumber, BoussinesqParameters, + LinearBoussinesqEquations, boussinesq_hydrostatic_balance +) + +skamarock_klemp_linear_bouss_defaults = { + 'ncolumns': 300, + 'nlayers': 10, + 'dt': 0.5, + 'tmax': 3600., + 'dumpfreq': 3600, + 'dirname': 'skamarock_klemp_linear_bouss' +} + + +def skamarock_klemp_linear_bouss( + ncolumns=skamarock_klemp_linear_bouss_defaults['ncolumns'], + nlayers=skamarock_klemp_linear_bouss_defaults['nlayers'], + dt=skamarock_klemp_linear_bouss_defaults['dt'], + tmax=skamarock_klemp_linear_bouss_defaults['tmax'], + dumpfreq=skamarock_klemp_linear_bouss_defaults['dumpfreq'], + dirname=skamarock_klemp_linear_bouss_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Test case parameters + # ------------------------------------------------------------------------ # + + domain_width = 3.0e5 # Width of domain (m) + domain_height = 1.0e4 # Height of domain (m) + pert_width = 5.0e3 # Width parameter of perturbation (m) + deltab = 1.0e-2 # Magnitude of buoyancy perturbation (m/s^2) + N = 0.01 # Brunt-Vaisala frequency (1/s) + cs = 300. # Speed of sound (m/s) + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + base_mesh = PeriodicIntervalMesh(ncolumns, domain_width) + mesh = ExtrudedMesh(base_mesh, nlayers, layer_height=domain_height/nlayers) + domain = Domain(mesh, dt, 'CG', element_order) + + # Equation + parameters = BoussinesqParameters(cs=cs) + eqns = LinearBoussinesqEquations(domain, parameters) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_vtus=True, dump_nc=True, + ) + # list of diagnostic fields, each defined in a class in diagnostics.py + diagnostic_fields = [CourantNumber(), Divergence(), Perturbation('b')] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes + b_opts = SUPGOptions() + transport_methods = [ + DGUpwind(eqns, "p"), + DGUpwind(eqns, "b", ibp=b_opts.ibp) + ] + + # Time stepper + stepper = Timestepper( + eqns, RK4(domain), io, spatial_methods=transport_methods + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + b0 = stepper.fields("b") + p0 = stepper.fields("p") + + # spaces + Vb = b0.function_space() + Vp = p0.function_space() + + x, z = SpatialCoordinate(mesh) + + # first setup the background buoyancy profile + # z.grad(bref) = N**2 + bref = z*(N**2) + # interpolate the expression to the function + b_b = Function(Vb).interpolate(bref) + + # setup constants + b_pert = ( + deltab * sin(pi*z/domain_height) + / (1 + (x - domain_width/2)**2 / pert_width**2) + ) + # interpolate the expression to the function + b0.interpolate(b_b + b_pert) + + p_b = Function(Vp) + boussinesq_hydrostatic_balance(eqns, b_b, p_b) + p0.assign(p_b) + + # set the background buoyancy + stepper.set_reference_profiles([('p', p_b), ('b', b_b)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + stepper.run(t=0, tmax=tmax) # ---------------------------------------------------------------------------- # -# Test case parameters +# MAIN # ---------------------------------------------------------------------------- # -dt = 0.5 -L = 3.0e5 # Domain length -H = 1.0e4 # Height position of the model top -if '--running-tests' in sys.argv: - tmax = dt - dumpfreq = 1 - columns = 30 # number of columns - nlayers = 5 # horizontal layers - -else: - tmax = 3600. - dumpfreq = int(tmax / (2*dt)) - columns = 300 # number of columns - nlayers = 10 # horizontal layers - -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # - -# Domain -m = PeriodicIntervalMesh(columns, L) -mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers) -domain = Domain(mesh, dt, 'CG', 1) - -# Equation -parameters = BoussinesqParameters(cs=300) -eqns = LinearBoussinesqEquations(domain, parameters) - -# I/O -output = OutputParameters(dirname='skamarock_klemp_linear') -# list of diagnostic fields, each defined in a class in diagnostics.py -diagnostic_fields = [CourantNumber(), Divergence(), Perturbation('b')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) - -# Transport schemes -b_opts = SUPGOptions() -transport_methods = [DGUpwind(eqns, "p"), - DGUpwind(eqns, "b", ibp=b_opts.ibp)] - - -# Time stepper -stepper = Timestepper(eqns, RK4(domain), io, spatial_methods=transport_methods) - -# ---------------------------------------------------------------------------- # -# Initial conditions -# ---------------------------------------------------------------------------- # - -b0 = stepper.fields("b") -p0 = stepper.fields("p") - -# spaces -Vb = b0.function_space() -Vp = p0.function_space() - -x, z = SpatialCoordinate(mesh) - -# first setup the background buoyancy profile -# z.grad(bref) = N**2 -N = parameters.N -bref = z*(N**2) -# interpolate the expression to the function -b_b = Function(Vb).interpolate(bref) - -# setup constants -a = 5.0e3 -deltab = 1.0e-2 -b_pert = deltab*sin(pi*z/H)/(1 + (x - L/2)**2/a**2) -# interpolate the expression to the function -b0.interpolate(b_b + b_pert) - -p_b = Function(Vp) -boussinesq_hydrostatic_balance(eqns, b_b, p_b) -p0.assign(p_b) - -# set the background buoyancy -stepper.set_reference_profiles([('p', p_b), ('b', b_b)]) - -# ---------------------------------------------------------------------------- # -# Run -# ---------------------------------------------------------------------------- # -stepper.run(t=0, tmax=tmax) +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncolumns', + help="The number of columns in the vertical slice mesh.", + type=int, + default=skamarock_klemp_linear_bouss_defaults['ncolumns'] + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=skamarock_klemp_linear_bouss_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=skamarock_klemp_linear_bouss_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=skamarock_klemp_linear_bouss_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=skamarock_klemp_linear_bouss_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=skamarock_klemp_linear_bouss_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + skamarock_klemp_linear_bouss(**vars(args)) diff --git a/examples/boussinesq/test_boussinesq_examples.py b/examples/boussinesq/test_boussinesq_examples.py new file mode 100644 index 000000000..53d3d479d --- /dev/null +++ b/examples/boussinesq/test_boussinesq_examples.py @@ -0,0 +1,64 @@ +import pytest + + +def make_dirname(test_name): + from mpi4py import MPI + comm = MPI.COMM_WORLD + if comm.size > 1: + return f'pytest_{test_name}_parallel' + else: + return f'pytest_{test_name}' + + +def test_skamarock_klemp_compressible_bouss(): + from skamarock_klemp_compressible import skamarock_klemp_compressible_bouss + test_name = 'skamarock_klemp_compressible_bouss' + skamarock_klemp_compressible_bouss( + ncolumns=30, + nlayers=5, + dt=6.0, + tmax=60.0, + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=2) +def test_skamarock_klemp_compressible_bouss_parallel(): + test_skamarock_klemp_compressible_bouss() + + +def test_skamarock_klemp_incompressible_bouss(): + from skamarock_klemp_incompressible import skamarock_klemp_incompressible_bouss + test_name = 'skamarock_klemp_incompressible_bouss' + skamarock_klemp_incompressible_bouss( + ncolumns=30, + nlayers=5, + dt=60.0, + tmax=12.0, + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=2) +def test_skamarock_klemp_incompressible_bouss_parallel(): + test_skamarock_klemp_incompressible_bouss() + + +def test_skamarock_klemp_linear_bouss(): + from skamarock_klemp_linear import skamarock_klemp_linear_bouss + test_name = 'skamarock_klemp_linear_bouss' + skamarock_klemp_linear_bouss( + ncolumns=30, + nlayers=5, + dt=60.0, + tmax=12.0, + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=2) +def test_skamarock_klemp_linear_bouss_parallel(): + test_skamarock_klemp_linear_bouss() diff --git a/examples/compressible/dcmip_3_1_meanflow_quads.py b/examples/compressible/dcmip_3_1_meanflow_quads.py deleted file mode 100644 index 06701a0ba..000000000 --- a/examples/compressible/dcmip_3_1_meanflow_quads.py +++ /dev/null @@ -1,166 +0,0 @@ -""" -The non-orographic gravity wave test case (3-1) from the DCMIP test case -document of Ullrich et al (2012). - -This uses a cubed-sphere mesh. -""" - -from gusto import * -from firedrake import (CubedSphereMesh, ExtrudedMesh, FunctionSpace, - Function, SpatialCoordinate, as_vector) -from firedrake import exp, acos, cos, sin, pi, sqrt, asin, atan2 -import sys - -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -dt = 100.0 # Time-step size (s) - -if '--running-tests' in sys.argv: - nlayers = 4 # Number of vertical layers - refinements = 2 # Number of horiz. refinements - tmax = dt - dumpfreq = 1 -else: - nlayers = 10 # Number of vertical layers - refinements = 3 # Number of horiz. refinements - tmax = 3600.0 - dumpfreq = int(tmax / (4*dt)) - -parameters = CompressibleParameters() -a_ref = 6.37122e6 # Radius of the Earth (m) -X = 125.0 # Reduced-size Earth reduction factor -a = a_ref/X # Scaled radius of planet (m) -g = parameters.g # Acceleration due to gravity (m/s^2) -N = parameters.N # Brunt-Vaisala frequency (1/s) -p_0 = parameters.p_0 # Reference pressure (Pa, not hPa) -c_p = parameters.cp # SHC of dry air at constant pressure (J/kg/K) -R_d = parameters.R_d # Gas constant for dry air (J/kg/K) -kappa = parameters.kappa # R_d/c_p -T_eq = 300.0 # Isothermal atmospheric temperature (K) -p_eq = 1000.0 * 100.0 # Reference surface pressure at the equator -u_max = 20.0 # Maximum amplitude of the zonal wind (m/s) -d = 5000.0 # Width parameter for Theta' -lamda_c = 2.0*pi/3.0 # Longitudinal centerpoint of Theta' -phi_c = 0.0 # Latitudinal centerpoint of Theta' (equator) -deltaTheta = 1.0 # Maximum amplitude of Theta' (K) -L_z = 20000.0 # Vertical wave length of the Theta' perturb. -z_top = 1.0e4 # Height position of the model top - -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # - -# Domain -# Cubed-sphere horizontal mesh -m = CubedSphereMesh(radius=a, - refinement_level=refinements, - degree=2) -# Build volume mesh -mesh = ExtrudedMesh(m, layers=nlayers, - layer_height=z_top/nlayers, - extrusion_type="radial") -domain = Domain(mesh, dt, "RTCF", 1) -x = SpatialCoordinate(mesh) - -# Create polar coordinates: -# Since we use a CG1 field, this is constant on layers -W_Q1 = FunctionSpace(mesh, "CG", 1) -z_expr = sqrt(x[0]*x[0] + x[1]*x[1] + x[2]*x[2]) - a -z = Function(W_Q1).interpolate(z_expr) -lat_expr = asin(x[2]/sqrt(x[0]*x[0] + x[1]*x[1] + x[2]*x[2])) -lat = Function(W_Q1).interpolate(lat_expr) -lon = Function(W_Q1).interpolate(atan2(x[1], x[0])) - -# Equation -eqns = CompressibleEulerEquations(domain, parameters) - -# I/O -dirname = 'dcmip_3_1_meanflow' -output = OutputParameters( - dirname=dirname, - dumpfreq=dumpfreq, -) -diagnostic_fields = [Perturbation('theta'), Perturbation('rho'), - CompressibleKineticEnergy(), PotentialEnergy(eqns)] - -io = IO(domain, output, diagnostic_fields=diagnostic_fields) - -# Transport schemes -transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "rho", fixed_subcycles=2), - SSPRK3(domain, "theta", options=SUPGOptions(), fixed_subcycles=2)] -transport_methods = [DGUpwind(eqns, field) for field in ["u", "rho", "theta"]] - -# Linear solver -linear_solver = CompressibleSolver(eqns) - -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver) - -# ---------------------------------------------------------------------------- # -# Initial conditions -# ---------------------------------------------------------------------------- # - -u0 = stepper.fields('u') -theta0 = stepper.fields('theta') -rho0 = stepper.fields('rho') - -# spaces -Vu = domain.spaces("HDiv") -Vt = domain.spaces("theta") -Vr = domain.spaces("DG") - -# Initial conditions with u0 -uexpr = as_vector([-u_max*x[1]/a, u_max*x[0]/a, 0.0]) -u0.project(uexpr) - -# Surface temperature -G = g**2/(N**2*c_p) -Ts_expr = G + (T_eq-G)*exp(-(u_max*N**2/(4*g*g))*u_max*(cos(2.0*lat)-1.0)) -Ts = Function(W_Q1).interpolate(Ts_expr) - -# Surface pressure -ps_expr = p_eq*exp((u_max/(4.0*G*R_d))*u_max*(cos(2.0*lat)-1.0))*(Ts/T_eq)**(1.0/kappa) -ps = Function(W_Q1).interpolate(ps_expr) - -# Background pressure -p_expr = ps*(1 + G/Ts*(exp(-N**2*z/g)-1))**(1.0/kappa) -p = Function(W_Q1).interpolate(p_expr) - -# Background temperature -Tb_expr = G*(1 - exp(N**2*z/g)) + Ts*exp(N**2*z/g) -Tb = Function(W_Q1).interpolate(Tb_expr) - -# Background potential temperature -thetab_expr = Tb*(p_0/p)**kappa -thetab = Function(W_Q1).interpolate(thetab_expr) -theta_b = Function(theta0.function_space()).interpolate(thetab) -rho_b = Function(rho0.function_space()) -sin_tmp = sin(lat) * sin(phi_c) -cos_tmp = cos(lat) * cos(phi_c) -r = a*acos(sin_tmp + cos_tmp*cos(lon-lamda_c)) -s = (d**2)/(d**2 + r**2) -theta_pert = deltaTheta*s*sin(2*pi*z/L_z) -theta0.interpolate(theta_b) - -# Compute the balanced density -compressible_hydrostatic_balance(eqns, - theta_b, - rho_b, - top=False, - exner_boundary=(p/p_0)**kappa) -theta0.interpolate(theta_pert) -theta0 += theta_b -rho0.assign(rho_b) - -stepper.set_reference_profiles([('rho', rho_b), ('theta', theta_b)]) - -# ---------------------------------------------------------------------------- # -# Run -# ---------------------------------------------------------------------------- # - -# Run! -stepper.run(t=0, tmax=tmax) diff --git a/examples/compressible/dry_bryan_fritsch.py b/examples/compressible/dry_bryan_fritsch.py deleted file mode 100644 index 653fc70bb..000000000 --- a/examples/compressible/dry_bryan_fritsch.py +++ /dev/null @@ -1,138 +0,0 @@ -""" -The dry rising bubble test from Bryan & Fritsch (2002). - -This uses the lowest-order function spaces, with the recovered methods for -transporting the fields. The test also uses a non-periodic base mesh. -""" - -from gusto import * -from firedrake import (IntervalMesh, ExtrudedMesh, - SpatialCoordinate, conditional, cos, pi, sqrt, - TestFunction, dx, TrialFunction, Constant, Function, - LinearVariationalProblem, LinearVariationalSolver) -import sys -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -dt = 1.0 -L = 10000. -H = 10000. - -if '--running-tests' in sys.argv: - deltax = 1000. - tmax = 5. -else: - deltax = 100. - tmax = 1000. - -degree = 0 -dirname = 'dry_bryan_fritsch' - -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # - -# Domain -nlayers = int(H/deltax) -ncolumns = int(L/deltax) -m = IntervalMesh(ncolumns, L) -mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers) -domain = Domain(mesh, dt, "CG", degree) - -# Equation -params = CompressibleParameters() -u_transport_option = "vector_advection_form" -eqns = CompressibleEulerEquations(domain, params, - u_transport_option=u_transport_option, - no_normal_flow_bc_ids=[1, 2]) - -# I/O -output = OutputParameters( - dirname=dirname, - dumpfreq=int(tmax / (5*dt)), - dumplist=['rho'], - dump_vtus=False, - dump_nc=True, -) -diagnostic_fields = [Perturbation('theta')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) - -# Transport schemes -- set up options for using recovery wrapper -boundary_methods = {'DG': BoundaryMethod.taylor, - 'HDiv': BoundaryMethod.taylor} - -recovery_spaces = RecoverySpaces(domain, boundary_methods, use_vector_spaces=True) - -u_opts = recovery_spaces.HDiv_options -rho_opts = recovery_spaces.DG_options -theta_opts = recovery_spaces.theta_options - -transported_fields = [SSPRK3(domain, "rho", options=rho_opts), - SSPRK3(domain, "theta", options=theta_opts), - SSPRK3(domain, "u", options=u_opts)] - -transport_methods = [DGUpwind(eqns, field) for field in ["u", "rho", "theta"]] - -# Linear solver -linear_solver = CompressibleSolver(eqns) - -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver) - -# ---------------------------------------------------------------------------- # -# Initial conditions -# ---------------------------------------------------------------------------- # - -u0 = stepper.fields("u") -rho0 = stepper.fields("rho") -theta0 = stepper.fields("theta") - -# spaces -Vu = domain.spaces("HDiv") -Vt = domain.spaces("theta") -Vr = domain.spaces("DG") -x, z = SpatialCoordinate(mesh) - -# Define constant theta_e and water_t -Tsurf = 300.0 -theta_b = Function(Vt).interpolate(Constant(Tsurf)) - -# Calculate hydrostatic fields -compressible_hydrostatic_balance(eqns, theta_b, rho0, solve_for_rho=True) - -# make mean fields -rho_b = Function(Vr).assign(rho0) - -# define perturbation -xc = L / 2 -zc = 2000. -rc = 2000. -Tdash = 2.0 -r = sqrt((x - xc) ** 2 + (z - zc) ** 2) -theta_pert = Function(Vt).interpolate(conditional(r > rc, - 0.0, - Tdash * (cos(pi * r / (2.0 * rc))) ** 2)) - -# define initial theta -theta0.interpolate(theta_b * (theta_pert / 300.0 + 1.0)) - -# find perturbed rho -gamma = TestFunction(Vr) -rho_trial = TrialFunction(Vr) -lhs = gamma * rho_trial * dx -rhs = gamma * (rho_b * theta_b / theta0) * dx -rho_problem = LinearVariationalProblem(lhs, rhs, rho0) -rho_solver = LinearVariationalSolver(rho_problem) -rho_solver.solve() - -stepper.set_reference_profiles([('rho', rho_b), - ('theta', theta_b)]) - -# ---------------------------------------------------------------------------- # -# Run -# ---------------------------------------------------------------------------- # - -stepper.run(t=0, tmax=tmax) diff --git a/examples/compressible/mountain_hydrostatic.py b/examples/compressible/mountain_hydrostatic.py deleted file mode 100644 index f1b7ac015..000000000 --- a/examples/compressible/mountain_hydrostatic.py +++ /dev/null @@ -1,222 +0,0 @@ -""" -The 1 metre high mountain test case. This is solved with the hydrostatic -compressible Euler equations. -""" - -from gusto import * -from firedrake import (as_vector, VectorFunctionSpace, - PeriodicIntervalMesh, ExtrudedMesh, SpatialCoordinate, - exp, pi, cos, Function, conditional, Mesh, sqrt) -import sys - -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -dt = 5.0 -L = 240000. # Domain length -H = 50000. # Height position of the model top - -if '--running-tests' in sys.argv: - tmax = dt - res = 1 - dumpfreq = 1 -else: - tmax = 15000. - res = 10 - dumpfreq = int(tmax / (5*dt)) - - -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # - -# Domain -# Make an normal extruded mesh which will be distorted to describe the mountain -nlayers = res*20 # horizontal layers -columns = res*12 # number of columns -m = PeriodicIntervalMesh(columns, L) -ext_mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers) -Vc = VectorFunctionSpace(ext_mesh, "DG", 2) - -# Describe the mountain -a = 10000. -xc = L/2. -x, z = SpatialCoordinate(ext_mesh) -hm = 1. -zs = hm*a**2/((x-xc)**2 + a**2) -zh = 5000. -xexpr = as_vector([x, conditional(z < zh, z + cos(0.5*pi*z/zh)**6*zs, z)]) - -# Make new mesh -new_coords = Function(Vc).interpolate(xexpr) -mesh = Mesh(new_coords) -mesh._base_mesh = m # Force new mesh to inherit original base mesh -domain = Domain(mesh, dt, "CG", 1) - -# Equation -parameters = CompressibleParameters(g=9.80665, cp=1004.) -sponge = SpongeLayerParameters(H=H, z_level=H-20000, mubar=0.3) -eqns = CompressibleEulerEquations(domain, parameters, sponge_options=sponge) - -# I/O -dirname = 'hydrostatic_mountain' -output = OutputParameters( - dirname=dirname, - dumpfreq=dumpfreq, - dumplist=['u'], -) -diagnostic_fields = [CourantNumber(), ZComponent('u'), HydrostaticImbalance(eqns), - Perturbation('theta'), Perturbation('rho')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) - -# Transport schemes -theta_opts = SUPGOptions() -transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "rho"), - SSPRK3(domain, "theta", options=theta_opts)] -transport_methods = [DGUpwind(eqns, "u"), - DGUpwind(eqns, "rho"), - DGUpwind(eqns, "theta", ibp=theta_opts.ibp)] - -# Linear solver -params = {'mat_type': 'matfree', - 'ksp_type': 'preonly', - 'pc_type': 'python', - 'pc_python_type': 'firedrake.SCPC', - # Velocity mass operator is singular in the hydrostatic case. - # So for reconstruction, we eliminate rho into u - 'pc_sc_eliminate_fields': '1, 0', - 'condensed_field': {'ksp_type': 'fgmres', - 'ksp_rtol': 1.0e-8, - 'ksp_atol': 1.0e-8, - 'ksp_max_it': 100, - 'pc_type': 'gamg', - 'pc_gamg_sym_graph': True, - 'mg_levels': {'ksp_type': 'gmres', - 'ksp_max_it': 5, - 'pc_type': 'bjacobi', - 'sub_pc_type': 'ilu'}}} - -alpha = 0.51 # off-centering parameter -linear_solver = CompressibleSolver(eqns, alpha, solver_parameters=params, - overwrite_solver_parameters=True) - -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver, - alpha=alpha) - -# ---------------------------------------------------------------------------- # -# Initial conditions -# ---------------------------------------------------------------------------- # - -u0 = stepper.fields("u") -rho0 = stepper.fields("rho") -theta0 = stepper.fields("theta") - -# spaces -Vu = domain.spaces("HDiv") -Vt = domain.spaces("theta") -Vr = domain.spaces("DG") - -# Thermodynamic constants required for setting initial conditions -# and reference profiles -g = parameters.g -p_0 = parameters.p_0 -c_p = parameters.cp -R_d = parameters.R_d -kappa = parameters.kappa - -# Hydrostatic case: Isothermal with T = 250 -x, z = SpatialCoordinate(mesh) -Tsurf = 250. -N = g/sqrt(c_p*Tsurf) - -# N^2 = (g/theta)dtheta/dz => dtheta/dz = theta N^2g => theta=theta_0exp(N^2gz) -thetab = Tsurf*exp(N**2*z/g) -theta_b = Function(Vt).interpolate(thetab) - -# Calculate hydrostatic exner -exner = Function(Vr) -rho_b = Function(Vr) - -exner_surf = 1.0 # maximum value of Exner pressure at surface -max_iterations = 10 # maximum number of hydrostatic balance iterations -tolerance = 1e-7 # tolerance for hydrostatic balance iteration - -# Set up kernels to evaluate global minima and maxima of fields -min_kernel = MinKernel() -max_kernel = MaxKernel() - -# First solve hydrostatic balance that gives Exner = 1 at bottom boundary -# This gives us a guess for the top boundary condition -bottom_boundary = Constant(exner_surf, domain=mesh) -logger.info(f'Solving hydrostatic with bottom Exner of {exner_surf}') -compressible_hydrostatic_balance( - eqns, theta_b, rho_b, exner, top=False, exner_boundary=bottom_boundary -) - -# Solve hydrostatic balance again, but now use minimum value from first -# solve as the *top* boundary condition for Exner -top_value = min_kernel.apply(exner) -top_boundary = Constant(top_value, domain=mesh) -logger.info(f'Solving hydrostatic with top Exner of {top_value}') -compressible_hydrostatic_balance( - eqns, theta_b, rho_b, exner, top=True, exner_boundary=top_boundary -) - -max_bottom_value = max_kernel.apply(exner) - -# Now we iterate, adjusting the top boundary condition, until this gives -# a maximum value of 1.0 at the surface -lower_top_guess = 0.9*top_value -upper_top_guess = 1.2*top_value -for i in range(max_iterations): - # If max bottom Exner value is equal to desired value, stop iteration - if abs(max_bottom_value - exner_surf) < tolerance: - break - - # Make new guess by average of previous guesses - top_guess = 0.5*(lower_top_guess + upper_top_guess) - top_boundary.assign(top_guess) - - logger.info( - f'Solving hydrostatic balance iteration {i}, with top Exner value ' - + f'of {top_guess}' - ) - - compressible_hydrostatic_balance( - eqns, theta_b, rho_b, exner, top=True, exner_boundary=top_boundary - ) - - max_bottom_value = max_kernel.apply(exner) - - # Adjust guesses based on new value - if max_bottom_value < exner_surf: - lower_top_guess = top_guess - else: - upper_top_guess = top_guess - -logger.info(f'Final max bottom Exner value of {max_bottom_value}') - -# Perform a final solve to obtain hydrostatically balanced rho -compressible_hydrostatic_balance( - eqns, theta_b, rho_b, exner, top=True, exner_boundary=top_boundary, - solve_for_rho=True -) - -theta0.assign(theta_b) -rho0.assign(rho_b) -u0.project(as_vector([20.0, 0.0])) -remove_initial_w(u0) - -stepper.set_reference_profiles([('rho', rho_b), - ('theta', theta_b)]) - -# ---------------------------------------------------------------------------- # -# Run -# ---------------------------------------------------------------------------- # - -stepper.run(t=0, tmax=tmax) diff --git a/examples/compressible/skamarock_klemp_hydrostatic.py b/examples/compressible/skamarock_klemp_hydrostatic.py deleted file mode 100644 index 9988a6d72..000000000 --- a/examples/compressible/skamarock_klemp_hydrostatic.py +++ /dev/null @@ -1,124 +0,0 @@ -""" -The non-linear gravity wave test case of Skamarock and Klemp (1994), but solved -with the hydrostatic Compressible Euler equations. - -Potential temperature is transported using SUPG. -""" - -from gusto import * -from firedrake import (as_vector, SpatialCoordinate, PeriodicRectangleMesh, - ExtrudedMesh, exp, sin, Function, pi) -import sys - -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -dt = 25. -if '--running-tests' in sys.argv: - nlayers = 5 # horizontal layers - columns = 10 # number of columns - tmax = dt - dumpfreq = 1 -else: - nlayers = 10 # horizontal layers - columns = 150 # number of columns - tmax = 60000.0 - dumpfreq = int(tmax / (2*dt)) - -L = 6.0e6 # Length of domain -H = 1.0e4 # Height position of the model top - -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # - -# Domain -- 3D volume mesh -m = PeriodicRectangleMesh(columns, 1, L, 1.e4, quadrilateral=True) -mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers) -domain = Domain(mesh, dt, "RTCF", 1) - -# Equation -parameters = CompressibleParameters(Omega=0.5e-4) -balanced_pg = as_vector((0., -1.0e-4*20, 0.)) -eqns = CompressibleEulerEquations(domain, parameters, - extra_terms=[("u", balanced_pg)]) - -# I/O -dirname = 'skamarock_klemp_hydrostatic' -output = OutputParameters( - dirname=dirname, - dumpfreq=dumpfreq, - dumplist=['u'], -) -diagnostic_fields = [CourantNumber(), Perturbation('theta'), Perturbation('rho')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) - -# Transport schemes -theta_opts = SUPGOptions() -transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "rho"), - SSPRK3(domain, "theta", options=theta_opts)] - -transport_methods = [DGUpwind(eqns, "u"), - DGUpwind(eqns, "rho"), - DGUpwind(eqns, "theta", ibp=theta_opts.ibp)] - -# Linear solver -linear_solver = CompressibleSolver(eqns) - -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver) - -# ---------------------------------------------------------------------------- # -# Initial conditions -# ---------------------------------------------------------------------------- # - -u0 = stepper.fields("u") -rho0 = stepper.fields("rho") -theta0 = stepper.fields("theta") - -# spaces -Vu = domain.spaces("HDiv") -Vt = domain.spaces("theta") -Vr = domain.spaces("DG") - -# Thermodynamic constants required for setting initial conditions -# and reference profiles -g = parameters.g -N = parameters.N -p_0 = parameters.p_0 -c_p = parameters.cp -R_d = parameters.R_d -kappa = parameters.kappa - -x, y, z = SpatialCoordinate(mesh) - -# N^2 = (g/theta)dtheta/dz => dtheta/dz = theta N^2g => theta=theta_0exp(N^2gz) -Tsurf = 300. -thetab = Tsurf*exp(N**2*z/g) - -theta_b = Function(Vt).interpolate(thetab) -rho_b = Function(Vr) - -a = 1.0e5 -deltaTheta = 1.0e-2 -theta_pert = deltaTheta*sin(pi*z/H)/(1 + (x - L/2)**2/a**2) -theta0.interpolate(theta_b + theta_pert) - -compressible_hydrostatic_balance(eqns, theta_b, rho_b, - solve_for_rho=True) - -rho0.assign(rho_b) -u0.project(as_vector([20.0, 0.0, 0.0])) - -stepper.set_reference_profiles([('rho', rho_b), - ('theta', theta_b)]) - -# ---------------------------------------------------------------------------- # -# Run -# ---------------------------------------------------------------------------- # - -stepper.run(t=0, tmax=tmax) diff --git a/examples/compressible/skamarock_klemp_nonhydrostatic.py b/examples/compressible/skamarock_klemp_nonhydrostatic.py deleted file mode 100644 index e347472aa..000000000 --- a/examples/compressible/skamarock_klemp_nonhydrostatic.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -The non-linear gravity wave test case of Skamarock and Klemp (1994). - -Potential temperature is transported using SUPG. -""" - -from petsc4py import PETSc -PETSc.Sys.popErrorHandler() -from gusto import * -import itertools -from firedrake import (as_vector, SpatialCoordinate, PeriodicIntervalMesh, - ExtrudedMesh, exp, sin, Function, pi, COMM_WORLD) -import numpy as np -import sys - -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -dt = 6. -L = 3.0e5 # Domain length -H = 1.0e4 # Height position of the model top - -if '--running-tests' in sys.argv: - nlayers = 5 - columns = 30 - tmax = dt - dumpfreq = 1 -else: - nlayers = 10 - columns = 150 - tmax = 3600 - dumpfreq = int(tmax / (2*dt)) - -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # - -# Domain -- 3D volume mesh -m = PeriodicIntervalMesh(columns, L) -mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers) -domain = Domain(mesh, dt, "CG", 1) - -# Equation -Tsurf = 300. -parameters = CompressibleParameters() -eqns = CompressibleEulerEquations(domain, parameters) - -# I/O -points_x = np.linspace(0., L, 100) -points_z = [H/2.] -points = np.array([p for p in itertools.product(points_x, points_z)]) -dirname = 'skamarock_klemp_nonlinear' - -# Dumping point data using legacy PointDataOutput is not supported in parallel -if COMM_WORLD.size == 1: - output = OutputParameters( - dirname=dirname, - dumpfreq=dumpfreq, - pddumpfreq=dumpfreq, - dumplist=['u'], - point_data=[('theta_perturbation', points)], - ) -else: - logger.warning( - 'Dumping point data using legacy PointDataOutput is not' - ' supported in parallel\nDisabling PointDataOutput' - ) - output = OutputParameters( - dirname=dirname, - dumpfreq=dumpfreq, - pddumpfreq=dumpfreq, - dumplist=['u'], - ) - -diagnostic_fields = [CourantNumber(), Gradient('u'), Perturbation('theta'), - Gradient('theta_perturbation'), Perturbation('rho'), - RichardsonNumber('theta', parameters.g/Tsurf), Gradient('theta')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) - -# Transport schemes -theta_opts = SUPGOptions() -transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "rho"), - SSPRK3(domain, "theta", options=theta_opts)] -transport_methods = [DGUpwind(eqns, "u"), - DGUpwind(eqns, "rho"), - DGUpwind(eqns, "theta", ibp=theta_opts.ibp)] - -# Linear solver -linear_solver = CompressibleSolver(eqns) - -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver) - -# ---------------------------------------------------------------------------- # -# Initial conditions -# ---------------------------------------------------------------------------- # - -u0 = stepper.fields("u") -rho0 = stepper.fields("rho") -theta0 = stepper.fields("theta") - -# spaces -Vu = domain.spaces("HDiv") -Vt = domain.spaces("theta") -Vr = domain.spaces("DG") - -# Thermodynamic constants required for setting initial conditions -# and reference profiles -g = parameters.g -N = parameters.N - -x, z = SpatialCoordinate(mesh) - -# N^2 = (g/theta)dtheta/dz => dtheta/dz = theta N^2g => theta=theta_0exp(N^2gz) -thetab = Tsurf*exp(N**2*z/g) - -theta_b = Function(Vt).interpolate(thetab) -rho_b = Function(Vr) - -# Calculate hydrostatic exner -compressible_hydrostatic_balance(eqns, theta_b, rho_b) - -a = 5.0e3 -deltaTheta = 1.0e-2 -theta_pert = deltaTheta*sin(pi*z/H)/(1 + (x - L/2)**2/a**2) -theta0.interpolate(theta_b + theta_pert) -rho0.assign(rho_b) -u0.project(as_vector([20.0, 0.0])) - -stepper.set_reference_profiles([('rho', rho_b), - ('theta', theta_b)]) - -# ---------------------------------------------------------------------------- # -# Run -# ---------------------------------------------------------------------------- # - -stepper.run(t=0, tmax=tmax) diff --git a/examples/compressible/straka_bubble.py b/examples/compressible/straka_bubble.py deleted file mode 100644 index 2d3388ccc..000000000 --- a/examples/compressible/straka_bubble.py +++ /dev/null @@ -1,130 +0,0 @@ -""" -The falling cold density current test of Straka et al (1993). - -This example runs at a series of resolutions with different time steps. -""" - -from gusto import * -from firedrake import (PeriodicIntervalMesh, ExtrudedMesh, SpatialCoordinate, - Constant, pi, cos, Function, sqrt, - conditional) -import sys - -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -if '--running-tests' in sys.argv: - res_dt = {800.: 4.} - tmax = 4. - ndumps = 1 -else: - res_dt = {800.: 4., 400.: 2., 200.: 1., 100.: 0.5, 50.: 0.25} - tmax = 15.*60. - ndumps = 4 - - -L = 51200. - -# build volume mesh -H = 6400. # Height position of the model top - -for delta, dt in res_dt.items(): - - # ------------------------------------------------------------------------ # - # Set up model objects - # ------------------------------------------------------------------------ # - - # Domain - nlayers = int(H/delta) # horizontal layers - columns = int(L/delta) # number of columns - m = PeriodicIntervalMesh(columns, L) - mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers) - domain = Domain(mesh, dt, "CG", 1) - - # Equation - parameters = CompressibleParameters() - u_diffusion_opts = DiffusionParameters(kappa=75., mu=10./delta) - theta_diffusion_opts = DiffusionParameters(kappa=75., mu=10./delta) - diffusion_options = [("u", u_diffusion_opts), ("theta", theta_diffusion_opts)] - eqns = CompressibleEulerEquations(domain, parameters, - diffusion_options=diffusion_options) - - # I/O - dirname = "straka_dx%s_dt%s" % (delta, dt) - dumpfreq = int(tmax / (ndumps*dt)) - output = OutputParameters( - dirname=dirname, - dumpfreq=dumpfreq, - dumplist=['u'], - ) - diagnostic_fields = [CourantNumber(), Perturbation('theta'), Perturbation('rho')] - io = IO(domain, output, diagnostic_fields=diagnostic_fields) - - # Transport schemes - theta_opts = SUPGOptions() - transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "rho"), - SSPRK3(domain, "theta", options=theta_opts)] - transport_methods = [DGUpwind(eqns, "u"), - DGUpwind(eqns, "rho"), - DGUpwind(eqns, "theta", ibp=theta_opts.ibp)] - - # Linear solver - linear_solver = CompressibleSolver(eqns) - - # Diffusion schemes - diffusion_schemes = [BackwardEuler(domain, "u"), - BackwardEuler(domain, "theta")] - diffusion_methods = [InteriorPenaltyDiffusion(eqns, "u", u_diffusion_opts), - InteriorPenaltyDiffusion(eqns, "theta", theta_diffusion_opts)] - - # Time stepper - stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - spatial_methods=transport_methods+diffusion_methods, - linear_solver=linear_solver, - diffusion_schemes=diffusion_schemes) - - # ------------------------------------------------------------------------ # - # Initial conditions - # ------------------------------------------------------------------------ # - - u0 = stepper.fields("u") - rho0 = stepper.fields("rho") - theta0 = stepper.fields("theta") - - # spaces - Vu = domain.spaces("HDiv") - Vt = domain.spaces("theta") - Vr = domain.spaces("DG") - - # Isentropic background state - Tsurf = Constant(300.) - - theta_b = Function(Vt).interpolate(Tsurf) - rho_b = Function(Vr) - exner = Function(Vr) - - # Calculate hydrostatic exner - compressible_hydrostatic_balance(eqns, theta_b, rho_b, exner0=exner, - solve_for_rho=True) - - x = SpatialCoordinate(mesh) - a = 5.0e3 - xc = 0.5*L - xr = 4000. - zc = 3000. - zr = 2000. - r = sqrt(((x[0]-xc)/xr)**2 + ((x[1]-zc)/zr)**2) - T_pert = conditional(r > 1., 0., -7.5*(1.+cos(pi*r))) - theta0.interpolate(theta_b + T_pert*exner) - rho0.assign(rho_b) - - stepper.set_reference_profiles([('rho', rho_b), - ('theta', theta_b)]) - - # ------------------------------------------------------------------------ # - # Run - # ------------------------------------------------------------------------ # - - stepper.run(t=0, tmax=tmax) diff --git a/examples/compressible/unsaturated_bubble.py b/examples/compressible/unsaturated_bubble.py deleted file mode 100644 index 23e38624f..000000000 --- a/examples/compressible/unsaturated_bubble.py +++ /dev/null @@ -1,236 +0,0 @@ -""" -A moist thermal in an unsaturated atmosphere. This test is based on that of -Grabowski and Clark (1991), and is described in Bendall et al (2020). - -As the thermal rises, water vapour condenses into cloud and forms rain. -Limiters are applied to the transport of the water species. -""" -from gusto import * -from gusto.equations import thermodynamics -from firedrake import (PeriodicIntervalMesh, ExtrudedMesh, - SpatialCoordinate, conditional, cos, pi, sqrt, exp, - TestFunction, dx, TrialFunction, Constant, Function, - LinearVariationalProblem, LinearVariationalSolver, - errornorm) -from firedrake.slope_limiter.vertex_based_limiter import VertexBasedLimiter -import sys - -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -dt = 1.0 -if '--running-tests' in sys.argv: - deltax = 240. - tmax = 10. - tdump = tmax -else: - deltax = 20. - tmax = 600. - tdump = 100. - -L = 3600. -h = 2400. -nlayers = int(h/deltax) -ncolumns = int(L/deltax) -degree = 0 - -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # - -# Domain -m = PeriodicIntervalMesh(ncolumns, L) -mesh = ExtrudedMesh(m, layers=nlayers, layer_height=h/nlayers) -domain = Domain(mesh, dt, "CG", degree) - -# Equation -params = CompressibleParameters() -tracers = [WaterVapour(), CloudWater(), Rain()] -eqns = CompressibleEulerEquations(domain, params, - active_tracers=tracers) - -# I/O -dirname = 'unsaturated_bubble' -output = OutputParameters( - dirname=dirname, - dumpfreq=tdump, - dump_nc=True, - dumplist=['cloud_water', 'rain'], - checkpoint=False -) -diagnostic_fields = [RelativeHumidity(eqns), Perturbation('theta'), - Perturbation('water_vapour'), Perturbation('rho'), Perturbation('RelativeHumidity')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) -# Transport schemes -- specify options for using recovery wrapper -boundary_methods = {'DG': BoundaryMethod.taylor, - 'HDiv': BoundaryMethod.taylor} - -recovery_spaces = RecoverySpaces(domain, boundary_method=boundary_methods, use_vector_spaces=True) - -u_opts = recovery_spaces.HDiv_options -rho_opts = recovery_spaces.DG_options -theta_opts = recovery_spaces.theta_options - -VDG1 = domain.spaces("DG1_equispaced") -limiter = VertexBasedLimiter(VDG1) - -transported_fields = [SSPRK3(domain, "u", options=u_opts), - SSPRK3(domain, "rho", options=rho_opts), - SSPRK3(domain, "theta", options=theta_opts), - SSPRK3(domain, "water_vapour", options=theta_opts, limiter=limiter), - SSPRK3(domain, "cloud_water", options=theta_opts, limiter=limiter), - SSPRK3(domain, "rain", options=theta_opts, limiter=limiter)] - -transport_methods = [DGUpwind(eqns, field) for field in ["u", "rho", "theta", "water_vapour", "cloud_water", "rain"]] - -# Linear solver -linear_solver = CompressibleSolver(eqns) - -# Physics schemes -# NB: can't yet use wrapper or limiter options with physics -Vt = domain.spaces('theta') -rainfall_method = DGUpwind(eqns, 'rain', outflow=True) -zero_limiter = MixedFSLimiter(eqns, {'water_vapour': ZeroLimiter(Vt), - 'cloud_water': ZeroLimiter(Vt)}) -physics_schemes = [(Fallout(eqns, 'rain', domain, rainfall_method), SSPRK3(domain)), - (Coalescence(eqns), ForwardEuler(domain)), - (EvaporationOfRain(eqns), ForwardEuler(domain)), - (SaturationAdjustment(eqns), ForwardEuler(domain, limiter=zero_limiter))] - -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver, - physics_schemes=physics_schemes) - -# ---------------------------------------------------------------------------- # -# Initial conditions -# ---------------------------------------------------------------------------- # - -u0 = stepper.fields("u") -rho0 = stepper.fields("rho") -theta0 = stepper.fields("theta") -water_v0 = stepper.fields("water_vapour") -water_c0 = stepper.fields("cloud_water") -rain0 = stepper.fields("rain") - -# spaces -Vu = domain.spaces("HDiv") -Vr = domain.spaces("DG") -x, z = SpatialCoordinate(mesh) -quadrature_degree = (4, 4) -dxp = dx(degree=(quadrature_degree)) - -physics_boundary_method = BoundaryMethod.extruded - -# Define constant theta_e and water_t -Tsurf = 283.0 -psurf = 85000. -exner_surf = (psurf / eqns.parameters.p_0) ** eqns.parameters.kappa -humidity = 0.2 -S = 1.3e-5 -theta_surf = thermodynamics.theta(eqns.parameters, Tsurf, psurf) -theta_d = Function(Vt).interpolate(theta_surf * exp(S*z)) -H = Function(Vt).assign(humidity) - -# Calculate hydrostatic fields -unsaturated_hydrostatic_balance(eqns, stepper.fields, theta_d, H, - exner_boundary=Constant(exner_surf)) - -# make mean fields -theta_b = Function(Vt).assign(theta0) -rho_b = Function(Vr).assign(rho0) -water_vb = Function(Vt).assign(water_v0) - -# define perturbation to RH -xc = L / 2 -zc = 800. -r1 = 300. -r2 = 200. -r = sqrt((x - xc) ** 2 + (z - zc) ** 2) - -H_expr = conditional( - r > r1, 0.0, - conditional(r > r2, - (1 - humidity) * cos(pi * (r - r2) - / (2 * (r1 - r2))) ** 2, - 1 - humidity)) -H_pert = Function(Vt).interpolate(H_expr) -H.assign(H + H_pert) - -# now need to find perturbed rho, theta_vd and r_v -# follow approach used in unsaturated hydrostatic setup -rho_averaged = Function(Vt) -rho_recoverer = Recoverer(rho0, rho_averaged, boundary_method=physics_boundary_method) -rho_h = Function(Vr) -w_h = Function(Vt) -delta = 1.0 - -R_d = eqns.parameters.R_d -R_v = eqns.parameters.R_v -epsilon = R_d / R_v - -# make expressions for determining water_v0 -exner = thermodynamics.exner_pressure(eqns.parameters, rho_averaged, theta0) -p = thermodynamics.p(eqns.parameters, exner) -T = thermodynamics.T(eqns.parameters, theta0, exner, water_v0) -r_v_expr = thermodynamics.r_v(eqns.parameters, H, T, p) - -# make expressions to evaluate residual -exner_ev = thermodynamics.exner_pressure(eqns.parameters, rho_averaged, theta0) -p_ev = thermodynamics.p(eqns.parameters, exner_ev) -T_ev = thermodynamics.T(eqns.parameters, theta0, exner_ev, water_v0) -RH_ev = thermodynamics.RH(eqns.parameters, water_v0, T_ev, p_ev) -RH = Function(Vt) - -# set-up rho problem to keep exner constant -gamma = TestFunction(Vr) -rho_trial = TrialFunction(Vr) -a = gamma * rho_trial * dxp -L = gamma * (rho_b * theta_b / theta0) * dxp -rho_problem = LinearVariationalProblem(a, L, rho_h) -rho_solver = LinearVariationalSolver(rho_problem) - -max_outer_solve_count = 20 -max_inner_solve_count = 10 - -for i in range(max_outer_solve_count): - # calculate averaged rho - rho_recoverer.project() - - RH.interpolate(RH_ev) - if errornorm(RH, H) < 1e-10: - break - - # first solve for r_v - for j in range(max_inner_solve_count): - w_h.interpolate(r_v_expr) - water_v0.assign(water_v0 * (1 - delta) + delta * w_h) - - # compute theta_vd - theta0.interpolate(theta_d * (1 + water_v0 / epsilon)) - - # test quality of solution by re-evaluating expression - RH.interpolate(RH_ev) - if errornorm(RH, H) < 1e-10: - break - - # now solve for rho with theta_vd and w_v guesses - rho_solver.solve() - - # damp solution - rho0.assign(rho0 * (1 - delta) + delta * rho_h) - - if i == max_outer_solve_count: - raise RuntimeError('Hydrostatic balance solve has not converged within %i' % i, 'iterations') - -# initialise fields -stepper.set_reference_profiles([('rho', rho_b), - ('theta', theta_b), - ('water_vapour', water_vb)]) -# ---------------------------------------------------------------------------- # -# Run -# ---------------------------------------------------------------------------- # - -stepper.run(t=0, tmax=tmax) diff --git a/examples/compressible_euler/dcmip_3_1_gravity_wave.py b/examples/compressible_euler/dcmip_3_1_gravity_wave.py new file mode 100644 index 000000000..2fccfbe3f --- /dev/null +++ b/examples/compressible_euler/dcmip_3_1_gravity_wave.py @@ -0,0 +1,231 @@ +""" +The non-orographic gravity wave test case (3-1) from the DCMIP test case +document of Ullrich et al, 2012: +``Dynamical core model intercomparison project (DCMIP) test case document''. + +This uses a cubed-sphere mesh, the degree 1 finite element spaces and tests +substepping the transport schemes. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + ExtrudedMesh, Function, SpatialCoordinate, as_vector, + exp, acos, cos, sin, pi +) +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + TrapeziumRule, SUPGOptions, lonlatr_from_xyz, CompressibleParameters, + CompressibleEulerEquations, CompressibleSolver, ZonalComponent, + compressible_hydrostatic_balance, Perturbation, GeneralCubedSphereMesh, +) + +dcmip_3_1_gravity_wave_defaults = { + 'ncells_per_edge': 8, + 'nlayers': 10, + 'dt': 50.0, + 'tmax': 3600., + 'dumpfreq': 9, + 'dirname': 'dcmip_3_1_gravity_wave' +} + + +def dcmip_3_1_gravity_wave( + ncells_per_edge=dcmip_3_1_gravity_wave_defaults['ncells_per_edge'], + nlayers=dcmip_3_1_gravity_wave_defaults['nlayers'], + dt=dcmip_3_1_gravity_wave_defaults['dt'], + tmax=dcmip_3_1_gravity_wave_defaults['tmax'], + dumpfreq=dcmip_3_1_gravity_wave_defaults['dumpfreq'], + dirname=dcmip_3_1_gravity_wave_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Test case parameters + # ------------------------------------------------------------------------ # + + parameters = CompressibleParameters() + a_ref = 6.37122e6 # Radius of the Earth (m) + X = 125.0 # Reduced-size Earth reduction factor + a = a_ref/X # Scaled radius of planet (m) + g = parameters.g # Acceleration due to gravity (m/s^2) + N = 0.01 # Brunt-Vaisala frequency (1/s) + p_0 = parameters.p_0 # Reference pressure (Pa, not hPa) + c_p = parameters.cp # SHC of dry air at constant pressure (J/kg/K) + R_d = parameters.R_d # Gas constant for dry air (J/kg/K) + kappa = parameters.kappa # R_d/c_p + T_eq = 300.0 # Isothermal atmospheric temperature (K) + p_eq = 1000.0 * 100.0 # Reference surface pressure at the equator + u_max = 20.0 # Maximum amplitude of the zonal wind (m/s) + d = 5000.0 # Width parameter for Theta' + lamda_c = 2.0*pi/3.0 # Longitudinal centerpoint of Theta' + phi_c = 0.0 # Latitudinal centerpoint of Theta' (equator) + deltaTheta = 1.0 # Maximum amplitude of Theta' (K) + L_z = 20000.0 # Vertical wave length of the Theta' perturb. + z_top = 1.0e4 # Height position of the model top + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + u_eqn_type = 'vector_invariant_form' + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + base_mesh = GeneralCubedSphereMesh(a, ncells_per_edge, degree=2) + mesh = ExtrudedMesh( + base_mesh, nlayers, layer_height=z_top/nlayers, + extrusion_type="radial" + ) + domain = Domain(mesh, dt, "RTCF", element_order) + + # Equation + eqns = CompressibleEulerEquations( + domain, parameters, u_transport_option=u_eqn_type + ) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_vtus=False, dump_nc=True + ) + diagnostic_fields = [ + Perturbation('theta'), Perturbation('rho'), ZonalComponent('u'), + ] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes + transported_fields = [ + TrapeziumRule(domain, "u"), + SSPRK3(domain, "rho", fixed_subcycles=2), + SSPRK3(domain, "theta", options=SUPGOptions(), fixed_subcycles=2) + ] + transport_methods = [ + DGUpwind(eqns, field) for field in ["u", "rho", "theta"] + ] + + # Linear solver + linear_solver = CompressibleSolver(eqns) + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields('u') + theta0 = stepper.fields('theta') + rho0 = stepper.fields('rho') + + # spaces + Vr = domain.spaces("DG") + + x, y, z = SpatialCoordinate(mesh) + lon, lat, r = lonlatr_from_xyz(x, y, z) + h = r - a + + # Initial conditions with u0 + uexpr = as_vector([-u_max*y/a, u_max*x/a, 0.0]) + + # Surface temperature + G = g**2/(N**2*c_p) + Ts_expr = ( + G + (T_eq - G) * exp(-(u_max*N**2/(4*g*g)) * u_max*(cos(2.0*lat)-1.0)) + ) + + # Surface pressure + ps_expr = ( + p_eq * exp((u_max/(4.0*G*R_d)) * u_max*(cos(2.0*lat)-1.0)) + * (Ts_expr / T_eq)**(1.0/kappa) + ) + + # Background pressure + p_expr = ps_expr*(1 + G/Ts_expr*(exp(-N**2*h/g)-1))**(1.0/kappa) + p = Function(Vr).interpolate(p_expr) + + # Background temperature + Tb_expr = G*(1 - exp(N**2*h/g)) + Ts_expr*exp(N**2*h/g) + + # Background potential temperature + thetab_expr = Tb_expr*(p_0/p)**kappa + theta_b = Function(theta0.function_space()).interpolate(thetab_expr) + rho_b = Function(rho0.function_space()) + sin_tmp = sin(lat) * sin(phi_c) + cos_tmp = cos(lat) * cos(phi_c) + l = a*acos(sin_tmp + cos_tmp*cos(lon-lamda_c)) + s = (d**2)/(d**2 + l**2) + theta_pert = deltaTheta*s*sin(2*pi*h/L_z) + + # Compute the balanced density + compressible_hydrostatic_balance( + eqns, theta_b, rho_b, top=False, exner_boundary=(p/p_0)**kappa + ) + + u0.project(uexpr) + theta0.interpolate(theta_b + theta_pert) + rho0.assign(rho_b) + + stepper.set_reference_profiles([('rho', rho_b), ('theta', theta_b)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + # Run! + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncells_per_edge', + help="The number of cells per panel edge of the cubed-sphere.", + type=int, + default=dcmip_3_1_gravity_wave_defaults['ncells_per_edge'] + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=dcmip_3_1_gravity_wave_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=dcmip_3_1_gravity_wave_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=dcmip_3_1_gravity_wave_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=dcmip_3_1_gravity_wave_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=dcmip_3_1_gravity_wave_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + dcmip_3_1_gravity_wave(**vars(args)) diff --git a/examples/compressible_euler/dry_bryan_fritsch.py b/examples/compressible_euler/dry_bryan_fritsch.py new file mode 100644 index 000000000..dc4c92e9e --- /dev/null +++ b/examples/compressible_euler/dry_bryan_fritsch.py @@ -0,0 +1,220 @@ +""" +The dry rising bubble test from Bryan & Fritsch, 2002: +``A Benchmark Simulation for Moist Nonhydrostatic Numerical Models'', GMD. + +This uses the lowest-order function spaces, with the recovered methods for +transporting the fields. The test also uses a non-periodic base mesh. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + IntervalMesh, ExtrudedMesh, SpatialCoordinate, conditional, cos, pi, sqrt, + TestFunction, dx, TrialFunction, Constant, Function, as_vector, + LinearVariationalProblem, LinearVariationalSolver +) +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + RecoverySpaces, BoundaryMethod, Perturbation, CompressibleParameters, + CompressibleEulerEquations, CompressibleSolver, + compressible_hydrostatic_balance +) + +dry_bryan_fritsch_defaults = { + 'ncolumns': 100, + 'nlayers': 100, + 'dt': 2.0, + 'tmax': 1000., + 'dumpfreq': 500, + 'dirname': 'dry_bryan_fritsch' +} + + +def dry_bryan_fritsch( + ncolumns=dry_bryan_fritsch_defaults['ncolumns'], + nlayers=dry_bryan_fritsch_defaults['nlayers'], + dt=dry_bryan_fritsch_defaults['dt'], + tmax=dry_bryan_fritsch_defaults['tmax'], + dumpfreq=dry_bryan_fritsch_defaults['dumpfreq'], + dirname=dry_bryan_fritsch_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # + + domain_width = 10000. # domain width (m) + domain_height = 10000. # domain height (m) + zc = 2000. # vertical centre of bubble (m) + rc = 2000. # radius of bubble (m) + Tdash = 2.0 # strength of temperature perturbation (K) + Tsurf = 300.0 # background theta value (K) + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 0 + u_eqn_type = 'vector_advection_form' + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + base_mesh = IntervalMesh(ncolumns, domain_width) + mesh = ExtrudedMesh(base_mesh, nlayers, layer_height=domain_height/nlayers) + domain = Domain(mesh, dt, "CG", element_order) + + # Equation + params = CompressibleParameters() + eqns = CompressibleEulerEquations( + domain, params, u_transport_option=u_eqn_type, + no_normal_flow_bc_ids=[1, 2] + ) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_vtus=False, dump_nc=True, + dumplist=['rho'] + ) + diagnostic_fields = [Perturbation('theta')] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes -- set up options for using recovery wrapper + boundary_methods = {'DG': BoundaryMethod.taylor, + 'HDiv': BoundaryMethod.taylor} + + recovery_spaces = RecoverySpaces( + domain, boundary_methods, use_vector_spaces=True + ) + + u_opts = recovery_spaces.HDiv_options + rho_opts = recovery_spaces.DG_options + theta_opts = recovery_spaces.theta_options + + transported_fields = [ + SSPRK3(domain, "rho", options=rho_opts), + SSPRK3(domain, "theta", options=theta_opts), + SSPRK3(domain, "u", options=u_opts) + ] + + transport_methods = [ + DGUpwind(eqns, field) for field in ["u", "rho", "theta"] + ] + + # Linear solver + linear_solver = CompressibleSolver(eqns) + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + rho0 = stepper.fields("rho") + theta0 = stepper.fields("theta") + + # spaces + Vt = domain.spaces("theta") + Vr = domain.spaces("DG") + x, z = SpatialCoordinate(mesh) + + # Define constant theta_e and water_t + theta_b = Function(Vt).interpolate(Constant(Tsurf)) + + # Set initial wind to be zero + zero = Constant(0.0, domain=mesh) + u0.project(as_vector([zero, zero])) + + # Calculate hydrostatic fields + compressible_hydrostatic_balance(eqns, theta_b, rho0, solve_for_rho=True) + + # make mean fields + rho_b = Function(Vr).assign(rho0) + + # define perturbation + xc = domain_width / 2 + r = sqrt((x - xc) ** 2 + (z - zc) ** 2) + theta_pert = Function(Vt).interpolate( + conditional( + r > rc, + 0.0, + Tdash * (cos(pi * r / (2.0 * rc))) ** 2 + ) + ) + + # define initial theta + theta0.interpolate(theta_b * (theta_pert / 300.0 + 1.0)) + + # find perturbed rho + gamma = TestFunction(Vr) + rho_trial = TrialFunction(Vr) + lhs = gamma * rho_trial * dx + rhs = gamma * (rho_b * theta_b / theta0) * dx + rho_problem = LinearVariationalProblem(lhs, rhs, rho0) + rho_solver = LinearVariationalSolver(rho_problem) + rho_solver.solve() + + stepper.set_reference_profiles([('rho', rho_b), ('theta', theta_b)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncolumns', + help="The number of columns in the vertical slice mesh.", + type=int, + default=dry_bryan_fritsch_defaults['ncolumns'] + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=dry_bryan_fritsch_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=dry_bryan_fritsch_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=dry_bryan_fritsch_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=dry_bryan_fritsch_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=dry_bryan_fritsch_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + dry_bryan_fritsch(**vars(args)) diff --git a/examples/compressible_euler/mountain_hydrostatic.py b/examples/compressible_euler/mountain_hydrostatic.py new file mode 100644 index 000000000..1fc113a71 --- /dev/null +++ b/examples/compressible_euler/mountain_hydrostatic.py @@ -0,0 +1,305 @@ +""" +The hydrostatic 1 metre high mountain test case from Melvin et al, 2010: +``An inherently mass-conserving iterative semi-implicit semi-Lagrangian +discretization of the non-hydrostatic vertical-slice equations.'', QJRMS. + +This test describes a wave over a mountain in a hydrostatic atmosphere. + +The setup used here uses the order 1 finite elements. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + as_vector, VectorFunctionSpace, PeriodicIntervalMesh, ExtrudedMesh, + SpatialCoordinate, exp, pi, cos, Function, conditional, Mesh, Constant +) +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + TrapeziumRule, SUPGOptions, ZComponent, Perturbation, + CompressibleParameters, HydrostaticCompressibleEulerEquations, + CompressibleSolver, compressible_hydrostatic_balance, HydrostaticImbalance, + SpongeLayerParameters, MinKernel, MaxKernel, logger +) + +mountain_hydrostatic_defaults = { + 'ncolumns': 200, + 'nlayers': 120, + 'dt': 5.0, + 'tmax': 15000., + 'dumpfreq': 1500, + 'dirname': 'mountain_hydrostatic' +} + + +def mountain_hydrostatic( + ncolumns=mountain_hydrostatic_defaults['ncolumns'], + nlayers=mountain_hydrostatic_defaults['nlayers'], + dt=mountain_hydrostatic_defaults['dt'], + tmax=mountain_hydrostatic_defaults['tmax'], + dumpfreq=mountain_hydrostatic_defaults['dumpfreq'], + dirname=mountain_hydrostatic_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # + + domain_width = 240000. # width of domain in x direction, in m + domain_height = 50000. # height of model top, in m + a = 10000. # scale width of mountain, in m + hm = 1. # height of mountain, in m + zh = 5000. # height at which mesh is no longer distorted, in m + Tsurf = 250. # temperature of surface, in K + initial_wind = 20.0 # initial horizontal wind, in m/s + sponge_depth = 20000.0 # depth of sponge layer, in m + g = 9.80665 # acceleration due to gravity, in m/s^2 + cp = 1004. # specific heat capacity at constant pressure + sponge_mu = 0.15 # parameter for strength of sponge layer, in J/kg/K + exner_surf = 1.0 # maximum value of Exner pressure at surface + max_iterations = 10 # maximum number of hydrostatic balance iterations + tolerance = 1e-7 # tolerance for hydrostatic balance iteration + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + u_eqn_type = 'vector_invariant_form' + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + # Make normal extruded mesh which will be distorted to describe the mountain + base_mesh = PeriodicIntervalMesh(ncolumns, domain_width) + ext_mesh = ExtrudedMesh( + base_mesh, layers=nlayers, layer_height=domain_height/nlayers + ) + Vc = VectorFunctionSpace(ext_mesh, "DG", 2) + + # Describe the mountain + xc = domain_width/2. + x, z = SpatialCoordinate(ext_mesh) + zs = hm * a**2 / ((x - xc)**2 + a**2) + xexpr = as_vector( + [x, conditional(z < zh, z + cos(0.5 * pi * z / zh)**6 * zs, z)] + ) + + # Make new mesh + new_coords = Function(Vc).interpolate(xexpr) + mesh = Mesh(new_coords) + mesh._base_mesh = base_mesh # Force new mesh to inherit original base mesh + domain = Domain(mesh, dt, "CG", element_order) + + # Equation + parameters = CompressibleParameters(g=g, cp=cp) + sponge = SpongeLayerParameters( + H=domain_height, z_level=domain_height-sponge_depth, mubar=sponge_mu/dt + ) + eqns = HydrostaticCompressibleEulerEquations( + domain, parameters, sponge_options=sponge, u_transport_option=u_eqn_type + ) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_vtus=True, dump_nc=False + ) + diagnostic_fields = [ + ZComponent('u'), HydrostaticImbalance(eqns), + Perturbation('theta'), Perturbation('rho') + ] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes + theta_opts = SUPGOptions() + transported_fields = [ + TrapeziumRule(domain, "u"), + SSPRK3(domain, "rho"), + SSPRK3(domain, "theta", options=theta_opts) + ] + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "rho"), + DGUpwind(eqns, "theta", ibp=theta_opts.ibp) + ] + + # Linear solver + params = {'mat_type': 'matfree', + 'ksp_type': 'preonly', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.SCPC', + # Velocity mass operator is singular in the hydrostatic case. + # So for reconstruction, we eliminate rho into u + 'pc_sc_eliminate_fields': '1, 0', + 'condensed_field': {'ksp_type': 'fgmres', + 'ksp_rtol': 1.0e-8, + 'ksp_atol': 1.0e-8, + 'ksp_max_it': 100, + 'pc_type': 'gamg', + 'pc_gamg_sym_graph': True, + 'mg_levels': {'ksp_type': 'gmres', + 'ksp_max_it': 5, + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu'}}} + + alpha = 0.51 # off-centering parameter + linear_solver = CompressibleSolver( + eqns, alpha, solver_parameters=params, + overwrite_solver_parameters=True + ) + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver, alpha=alpha + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + rho0 = stepper.fields("rho") + theta0 = stepper.fields("theta") + + # spaces + Vt = domain.spaces("theta") + Vr = domain.spaces("DG") + + # Thermodynamic constants required for setting initial conditions + # and reference profiles + N = parameters.N + + # N^2 = (g/theta)dtheta/dz => dtheta/dz = theta N^2g => theta=theta_0exp(N^2gz) + x, z = SpatialCoordinate(mesh) + thetab = Tsurf*exp(N**2*z/g) + theta_b = Function(Vt).interpolate(thetab) + + # Calculate hydrostatic exner + exner = Function(Vr) + rho_b = Function(Vr) + + # Set up kernels to evaluate global minima and maxima of fields + min_kernel = MinKernel() + max_kernel = MaxKernel() + + # First solve hydrostatic balance that gives Exner = 1 at bottom boundary + # This gives us a guess for the top boundary condition + bottom_boundary = Constant(exner_surf, domain=mesh) + logger.info(f'Solving hydrostatic with bottom Exner of {exner_surf}') + compressible_hydrostatic_balance( + eqns, theta_b, rho_b, exner, top=False, exner_boundary=bottom_boundary + ) + + # Solve hydrostatic balance again, but now use minimum value from first + # solve as the *top* boundary condition for Exner + top_value = min_kernel.apply(exner) + top_boundary = Constant(top_value, domain=mesh) + logger.info(f'Solving hydrostatic with top Exner of {top_value}') + compressible_hydrostatic_balance( + eqns, theta_b, rho_b, exner, top=True, exner_boundary=top_boundary + ) + + max_bottom_value = max_kernel.apply(exner) + + # Now we iterate, adjusting the top boundary condition, until this gives + # a maximum value of 1.0 at the surface + lower_top_guess = 0.9*top_value + upper_top_guess = 1.2*top_value + for i in range(max_iterations): + # If max bottom Exner value is equal to desired value, stop iteration + if abs(max_bottom_value - exner_surf) < tolerance: + break + + # Make new guess by average of previous guesses + top_guess = 0.5*(lower_top_guess + upper_top_guess) + top_boundary.assign(top_guess) + + logger.info( + f'Solving hydrostatic balance iteration {i}, with top Exner value ' + + f'of {top_guess}' + ) + + compressible_hydrostatic_balance( + eqns, theta_b, rho_b, exner, top=True, exner_boundary=top_boundary + ) + + max_bottom_value = max_kernel.apply(exner) + + # Adjust guesses based on new value + if max_bottom_value < exner_surf: + lower_top_guess = top_guess + else: + upper_top_guess = top_guess + + logger.info(f'Final max bottom Exner value of {max_bottom_value}') + + # Perform a final solve to obtain hydrostatically balanced rho + compressible_hydrostatic_balance( + eqns, theta_b, rho_b, exner, top=True, exner_boundary=top_boundary, + solve_for_rho=True + ) + + theta0.assign(theta_b) + rho0.assign(rho_b) + u0.project(as_vector([initial_wind, 0.0]), bcs=eqns.bcs['u']) + + stepper.set_reference_profiles([('rho', rho_b), ('theta', theta_b)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncolumns', + help="The number of columns in the vertical slice mesh.", + type=int, + default=mountain_hydrostatic_defaults['ncolumns'] + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=mountain_hydrostatic_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=mountain_hydrostatic_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=mountain_hydrostatic_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=mountain_hydrostatic_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=mountain_hydrostatic_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + mountain_hydrostatic(**vars(args)) diff --git a/examples/compressible_euler/skamarock_klemp_hydrostatic.py b/examples/compressible_euler/skamarock_klemp_hydrostatic.py new file mode 100644 index 000000000..f75ad9b10 --- /dev/null +++ b/examples/compressible_euler/skamarock_klemp_hydrostatic.py @@ -0,0 +1,204 @@ +""" +This example uses the hydrostatic compressible Euler equations to solve the +vertical slice gravity wave test case of Skamarock and Klemp, 1994: +``Efficiency and Accuracy of the Klemp-Wilhelmson Time-Splitting Technique'', +MWR. + +Potential temperature is transported using SUPG, and the degree 1 elements are +used. This also uses a mesh which is one cell thick in the y-direction. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + as_vector, SpatialCoordinate, PeriodicRectangleMesh, ExtrudedMesh, exp, sin, + Function, pi +) +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + TrapeziumRule, SUPGOptions, CourantNumber, Perturbation, + CompressibleParameters, HydrostaticCompressibleEulerEquations, + CompressibleSolver, compressible_hydrostatic_balance +) + +skamarock_klemp_hydrostatic_defaults = { + 'ncolumns': 150, + 'nlayers': 10, + 'dt': 25.0, + 'tmax': 60000., + 'dumpfreq': 1200, + 'dirname': 'skamarock_klemp_hydrostatic' +} + + +def skamarock_klemp_hydrostatic( + ncolumns=skamarock_klemp_hydrostatic_defaults['ncolumns'], + nlayers=skamarock_klemp_hydrostatic_defaults['nlayers'], + dt=skamarock_klemp_hydrostatic_defaults['dt'], + tmax=skamarock_klemp_hydrostatic_defaults['tmax'], + dumpfreq=skamarock_klemp_hydrostatic_defaults['dumpfreq'], + dirname=skamarock_klemp_hydrostatic_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Test case parameters + # ------------------------------------------------------------------------ # + + domain_width = 6.0e6 # Width of domain in x direction (m) + domain_length = 1.0e4 # Length of domain in y direction (m) + domain_height = 1.0e4 # Height of domain (m) + Tsurf = 300. # Temperature at surface (K) + wind_initial = 20. # Initial wind in x direction (m/s) + pert_width = 5.0e3 # Width parameter of perturbation (m) + deltaTheta = 1.0e-2 # Magnitude of theta perturbation (K) + N = 0.01 # Brunt-Vaisala frequency (1/s) + Omega = 0.5e-4 # Planetary rotation rate (1/s) + pressure_gradient_y = -1.0e-4*20 # Prescribed force in y direction (m/s^2) + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain -- 3D volume mesh + base_mesh = PeriodicRectangleMesh( + ncolumns, 1, domain_width, domain_length, quadrilateral=True + ) + mesh = ExtrudedMesh(base_mesh, nlayers, layer_height=domain_height/nlayers) + domain = Domain(mesh, dt, "RTCF", element_order) + + # Equation + parameters = CompressibleParameters(Omega=Omega) + balanced_pg = as_vector((0., pressure_gradient_y, 0.)) + eqns = HydrostaticCompressibleEulerEquations( + domain, parameters, extra_terms=[("u", balanced_pg)] + ) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_vtus=True, dump_nc=False, + dumplist=['u'], + ) + diagnostic_fields = [CourantNumber(), Perturbation('theta'), Perturbation('rho')] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes + theta_opts = SUPGOptions() + transported_fields = [ + TrapeziumRule(domain, "u"), + SSPRK3(domain, "rho"), + SSPRK3(domain, "theta", options=theta_opts) + ] + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "rho"), + DGUpwind(eqns, "theta", ibp=theta_opts.ibp) + ] + + # Linear solver + linear_solver = CompressibleSolver(eqns) + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + rho0 = stepper.fields("rho") + theta0 = stepper.fields("theta") + + # spaces + Vt = domain.spaces("theta") + Vr = domain.spaces("DG") + + # Thermodynamic constants required for setting initial conditions + # and reference profiles + g = parameters.g + + x, _, z = SpatialCoordinate(mesh) + + # N^2 = (g/theta)dtheta/dz => dtheta/dz = theta N^2g => theta=theta_0exp(N^2gz) + thetab = Tsurf*exp(N**2*z/g) + + theta_b = Function(Vt).interpolate(thetab) + rho_b = Function(Vr) + + theta_pert = ( + deltaTheta * sin(pi*z/domain_height) + / (1 + (x - domain_width/2)**2 / pert_width**2) + ) + theta0.interpolate(theta_b + theta_pert) + + compressible_hydrostatic_balance(eqns, theta_b, rho_b, solve_for_rho=True) + + rho0.assign(rho_b) + u0.project(as_vector([wind_initial, 0.0, 0.0])) + + stepper.set_reference_profiles([('rho', rho_b), + ('theta', theta_b)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncolumns', + help="The number of columns in the vertical slice mesh.", + type=int, + default=skamarock_klemp_hydrostatic_defaults['ncolumns'] + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=skamarock_klemp_hydrostatic_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=skamarock_klemp_hydrostatic_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=skamarock_klemp_hydrostatic_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=skamarock_klemp_hydrostatic_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=skamarock_klemp_hydrostatic_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + skamarock_klemp_hydrostatic(**vars(args)) diff --git a/examples/compressible_euler/skamarock_klemp_nonhydrostatic.py b/examples/compressible_euler/skamarock_klemp_nonhydrostatic.py new file mode 100644 index 000000000..6a6700650 --- /dev/null +++ b/examples/compressible_euler/skamarock_klemp_nonhydrostatic.py @@ -0,0 +1,221 @@ +""" +This example uses the non-linear compressible Euler equations to solve the +vertical slice gravity wave test case of Skamarock and Klemp, 1994: +``Efficiency and Accuracy of the Klemp-Wilhelmson Time-Splitting Technique'', +MWR. + +Potential temperature is transported using SUPG, and the degree 1 elements are +used. +""" +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter + +from petsc4py import PETSc +PETSc.Sys.popErrorHandler() +import itertools +from firedrake import ( + as_vector, SpatialCoordinate, PeriodicIntervalMesh, ExtrudedMesh, exp, sin, + Function, pi, COMM_WORLD +) +import numpy as np +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + SUPGOptions, CourantNumber, Perturbation, Gradient, + CompressibleParameters, CompressibleEulerEquations, CompressibleSolver, + compressible_hydrostatic_balance, logger, RichardsonNumber, + RungeKuttaFormulation +) + +skamarock_klemp_nonhydrostatic_defaults = { + 'ncolumns': 150, + 'nlayers': 10, + 'dt': 6.0, + 'tmax': 3600., + 'dumpfreq': 300, + 'dirname': 'skamarock_klemp_nonhydrostatic' +} + + +def skamarock_klemp_nonhydrostatic( + ncolumns=skamarock_klemp_nonhydrostatic_defaults['ncolumns'], + nlayers=skamarock_klemp_nonhydrostatic_defaults['nlayers'], + dt=skamarock_klemp_nonhydrostatic_defaults['dt'], + tmax=skamarock_klemp_nonhydrostatic_defaults['tmax'], + dumpfreq=skamarock_klemp_nonhydrostatic_defaults['dumpfreq'], + dirname=skamarock_klemp_nonhydrostatic_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Test case parameters + # ------------------------------------------------------------------------ # + + domain_width = 3.0e5 # Width of domain (m) + domain_height = 1.0e4 # Height of domain (m) + Tsurf = 300. # Temperature at surface (K) + wind_initial = 20. # Initial wind in x direction (m/s) + pert_width = 5.0e3 # Width parameter of perturbation (m) + deltaTheta = 1.0e-2 # Magnitude of theta perturbation (K) + N = 0.01 # Brunt-Vaisala frequency (1/s) + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain -- 3D volume mesh + base_mesh = PeriodicIntervalMesh(ncolumns, domain_width) + mesh = ExtrudedMesh(base_mesh, nlayers, layer_height=domain_height/nlayers) + domain = Domain(mesh, dt, "CG", element_order) + + # Equation + parameters = CompressibleParameters() + eqns = CompressibleEulerEquations(domain, parameters) + + # I/O + points_x = np.linspace(0., domain_width, 100) + points_z = [domain_height/2.] + points = np.array([p for p in itertools.product(points_x, points_z)]) + + # Dumping point data using legacy PointDataOutput is not supported in parallel + if COMM_WORLD.size == 1: + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, pddumpfreq=dumpfreq, + dump_vtus=True, dump_nc=False, + point_data=[('theta_perturbation', points)], + ) + else: + logger.warning( + 'Dumping point data using legacy PointDataOutput is not' + ' supported in parallel\nDisabling PointDataOutput' + ) + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, pddumpfreq=dumpfreq, + dump_vtus=True, dump_nc=True, + ) + + diagnostic_fields = [ + CourantNumber(), Gradient('u'), Perturbation('theta'), + Gradient('theta_perturbation'), Perturbation('rho'), + RichardsonNumber('theta', parameters.g/Tsurf), Gradient('theta') + ] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes + theta_opts = SUPGOptions() + transported_fields = [ + SSPRK3(domain, "u", subcycle_by_courant=0.25), + SSPRK3(domain, "rho", subcycle_by_courant=0.25, rk_formulation=RungeKuttaFormulation.linear), + SSPRK3(domain, "theta", subcycle_by_courant=0.25, options=theta_opts) + ] + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "rho", advective_then_flux=True), + DGUpwind(eqns, "theta", ibp=theta_opts.ibp) + ] + + # Linear solver + linear_solver = CompressibleSolver(eqns) + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + rho0 = stepper.fields("rho") + theta0 = stepper.fields("theta") + + # spaces + Vt = domain.spaces("theta") + Vr = domain.spaces("DG") + + # Thermodynamic constants required for setting initial conditions + # and reference profiles + g = parameters.g + + x, z = SpatialCoordinate(mesh) + + # N^2 = (g/theta)dtheta/dz => dtheta/dz = theta N^2g => theta=theta_0exp(N^2gz) + thetab = Tsurf*exp(N**2*z/g) + + theta_b = Function(Vt).interpolate(thetab) + rho_b = Function(Vr) + + # Calculate hydrostatic exner + compressible_hydrostatic_balance(eqns, theta_b, rho_b) + + theta_pert = ( + deltaTheta * sin(pi*z/domain_height) + / (1 + (x - domain_width/2)**2 / pert_width**2) + ) + theta0.interpolate(theta_b + theta_pert) + rho0.assign(rho_b) + u0.project(as_vector([wind_initial, 0.0])) + + stepper.set_reference_profiles([('rho', rho_b), ('theta', theta_b)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncolumns', + help="The number of columns in the vertical slice mesh.", + type=int, + default=skamarock_klemp_nonhydrostatic_defaults['ncolumns'] + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=skamarock_klemp_nonhydrostatic_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=skamarock_klemp_nonhydrostatic_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=skamarock_klemp_nonhydrostatic_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=skamarock_klemp_nonhydrostatic_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=skamarock_klemp_nonhydrostatic_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + skamarock_klemp_nonhydrostatic(**vars(args)) diff --git a/examples/compressible_euler/straka_bubble.py b/examples/compressible_euler/straka_bubble.py new file mode 100644 index 000000000..fed55a1ec --- /dev/null +++ b/examples/compressible_euler/straka_bubble.py @@ -0,0 +1,213 @@ +""" +The falling cold density current test of Straka et al, 1993: +``Numerical solutions of a non‐linear density current: A benchmark solution and +comparisons'', MiF. + +Diffusion is included in the velocity and potential temperature equations. The +degree 1 finite elements are used in this configuration. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + PeriodicIntervalMesh, ExtrudedMesh, SpatialCoordinate, Constant, pi, cos, + Function, sqrt, conditional, as_vector +) +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + TrapeziumRule, SUPGOptions, CourantNumber, Perturbation, + DiffusionParameters, InteriorPenaltyDiffusion, BackwardEuler, + CompressibleParameters, CompressibleEulerEquations, CompressibleSolver, + compressible_hydrostatic_balance +) + +straka_bubble_defaults = { + 'nlayers': 32, + 'dt': 1.0, + 'tmax': 900., + 'dumpfreq': 225, + 'dirname': 'straka_bubble' +} + + +def straka_bubble( + nlayers=straka_bubble_defaults['nlayers'], + dt=straka_bubble_defaults['dt'], + tmax=straka_bubble_defaults['tmax'], + dumpfreq=straka_bubble_defaults['dumpfreq'], + dirname=straka_bubble_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # + + domain_width = 51200. # domain width (m) + domain_height = 6400. # domain height (m) + zc = 3000. # vertical centre of perturbation (m) + xr = 4000. # horizontal radius of perturbation (m) + zr = 2000. # vertical radius of perturbation (m) + T_pert = -7.5 # strength of temperature perturbation (K) + Tsurf = 300.0 # background theta value (K) + kappa = 75. # diffusivity parameter (m^2/s) + mu0 = 10. # interior penalty parameter (1/m) + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + delta = domain_height/nlayers + ncolumns = 8 * nlayers + element_order = 1 + u_eqn_type = 'vector_advection_form' + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + base_mesh = PeriodicIntervalMesh(ncolumns, domain_width) + mesh = ExtrudedMesh(base_mesh, nlayers, layer_height=delta) + domain = Domain(mesh, dt, "CG", element_order) + + # Equation + parameters = CompressibleParameters() + diffusion_params = DiffusionParameters(kappa=kappa, mu=mu0/delta) + diffusion_options = [("u", diffusion_params), ("theta", diffusion_params)] + eqns = CompressibleEulerEquations( + domain, parameters, u_transport_option=u_eqn_type, + diffusion_options=diffusion_options + ) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_vtus=True, dump_nc=False, + dumplist=['u'] + ) + diagnostic_fields = [ + CourantNumber(), Perturbation('theta'), Perturbation('rho') + ] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes + theta_opts = SUPGOptions() + transported_fields = [ + TrapeziumRule(domain, "u"), + SSPRK3(domain, "rho"), + SSPRK3(domain, "theta", options=theta_opts) + ] + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "rho"), + DGUpwind(eqns, "theta", ibp=theta_opts.ibp) + ] + + # Linear solver + linear_solver = CompressibleSolver(eqns) + + # Diffusion schemes + diffusion_schemes = [ + BackwardEuler(domain, "u"), + BackwardEuler(domain, "theta") + ] + diffusion_methods = [ + InteriorPenaltyDiffusion(eqns, "u", diffusion_params), + InteriorPenaltyDiffusion(eqns, "theta", diffusion_params) + ] + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, + spatial_methods=transport_methods+diffusion_methods, + linear_solver=linear_solver, diffusion_schemes=diffusion_schemes + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + rho0 = stepper.fields("rho") + theta0 = stepper.fields("theta") + + # spaces + Vt = domain.spaces("theta") + Vr = domain.spaces("DG") + + # Isentropic background state + theta_b = Function(Vt).interpolate(Tsurf) + rho_b = Function(Vr) + exner = Function(Vr) + + # Calculate hydrostatic exner + compressible_hydrostatic_balance( + eqns, theta_b, rho_b, exner0=exner, solve_for_rho=True + ) + + x, z = SpatialCoordinate(mesh) + xc = 0.5*domain_width + r = sqrt(((x - xc)/xr)**2 + ((z - zc)/zr)**2) + T_pert_expr = conditional( + r > 1., + 0., + 0.5*T_pert*(1. + cos(pi*r)) + ) + + # Set initial fields + zero = Constant(0.0, domain=mesh) + u0.project(as_vector([zero, zero])) + theta0.interpolate(theta_b + T_pert_expr*exner) + rho0.assign(rho_b) + + # Reference profiles + stepper.set_reference_profiles([('rho', rho_b), ('theta', theta_b)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=straka_bubble_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=straka_bubble_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=straka_bubble_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=straka_bubble_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=straka_bubble_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + straka_bubble(**vars(args)) diff --git a/examples/compressible_euler/test_compressible_euler_examples.py b/examples/compressible_euler/test_compressible_euler_examples.py new file mode 100644 index 000000000..384824e12 --- /dev/null +++ b/examples/compressible_euler/test_compressible_euler_examples.py @@ -0,0 +1,143 @@ +import pytest + + +def make_dirname(test_name): + from mpi4py import MPI + comm = MPI.COMM_WORLD + if comm.size > 1: + return f'pytest_{test_name}_parallel' + else: + return f'pytest_{test_name}' + + +def test_dcmip_3_1_gravity_wave(): + from dcmip_3_1_gravity_wave import dcmip_3_1_gravity_wave + test_name = 'dcmip_3_1_gravity_wave' + dcmip_3_1_gravity_wave( + ncells_per_edge=4, + nlayers=4, + dt=100, + tmax=200, + dumpfreq=2, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=2) +def test_dcmip_3_1_gravity_wave_parallel(): + test_dcmip_3_1_gravity_wave() + + +def test_dry_bryan_fritsch(): + from dry_bryan_fritsch import dry_bryan_fritsch + test_name = 'dry_bryan_fritsch' + dry_bryan_fritsch( + ncolumns=20, + nlayers=20, + dt=2.0, + tmax=20.0, + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=4) +def test_dry_bryan_fritsch_parallel(): + test_dry_bryan_fritsch() + + +# Hydrostatic equations not currently working +@pytest.mark.xfail +def test_mountain_hydrostatic(): + from mountain_hydrostatic import mountain_hydrostatic + test_name = 'mountain_hydrostatic' + mountain_hydrostatic( + ncolumns=20, + nlayers=10, + dt=5.0, + tmax=50.0, + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +# Hydrostatic equations not currently working +@pytest.mark.xfail +@pytest.mark.parallel(nprocs=4) +def test_mountain_hydrostatic_parallel(): + test_mountain_hydrostatic() + + +# Hydrostatic equations not currently working +@pytest.mark.xfail +def test_skamarock_klemp_hydrostatic(): + from skamarock_klemp_hydrostatic import skamarock_klemp_hydrostatic + test_name = 'skamarock_klemp_hydrostatic' + skamarock_klemp_hydrostatic( + ncolumns=30, + nlayers=5, + dt=6.0, + tmax=60.0, + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +# Hydrostatic equations not currently working +@pytest.mark.xfail +@pytest.mark.parallel(nprocs=2) +def test_skamarock_klemp_hydrostatic_parallel(): + test_skamarock_klemp_hydrostatic() + + +def test_skamarock_klemp_nonhydrostatic(): + from skamarock_klemp_nonhydrostatic import skamarock_klemp_nonhydrostatic + test_name = 'skamarock_klemp_nonhydrostatic' + skamarock_klemp_nonhydrostatic( + ncolumns=30, + nlayers=5, + dt=6.0, + tmax=60.0, + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=2) +def test_skamarock_klemp_nonhydrostatic_parallel(): + test_skamarock_klemp_nonhydrostatic() + + +def test_straka_bubble(): + from straka_bubble import straka_bubble + test_name = 'straka_bubble' + straka_bubble( + nlayers=6, + dt=4.0, + tmax=40.0, + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=3) +def test_straka_bubble_parallel(): + test_straka_bubble() + + +def test_unsaturated_bubble(): + from unsaturated_bubble import unsaturated_bubble + test_name = 'unsaturated_bubble' + unsaturated_bubble( + ncolumns=20, + nlayers=20, + dt=1.0, + tmax=10.0, + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=2) +def test_unsaturated_bubble_parallel(): + test_unsaturated_bubble() diff --git a/examples/compressible_euler/unsaturated_bubble.py b/examples/compressible_euler/unsaturated_bubble.py new file mode 100644 index 000000000..394e15f97 --- /dev/null +++ b/examples/compressible_euler/unsaturated_bubble.py @@ -0,0 +1,338 @@ +""" +A moist thermal in an unsaturated atmosphere, including a rain species. This +test is based on that of Grabowski and Clark, 1991: +``Cloud–environment interface instability: Rising thermal calculations in two +spatial dimensions'', JAS. + +and is described in Bendall et al, 2020: +``A compatible finite‐element discretisation for the moist compressible Euler +equations'', QJRMS. + +As the thermal rises, water vapour condenses into cloud and forms rain. +Limiters are applied to the transport of the water species. + +This configuration uses the lowest-order finite elements, and the recovery +wrapper to provide higher-order accuracy. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + PeriodicIntervalMesh, ExtrudedMesh, SpatialCoordinate, conditional, cos, pi, + sqrt, exp, TestFunction, dx, TrialFunction, Constant, Function, errornorm, + LinearVariationalProblem, LinearVariationalSolver, as_vector +) +from firedrake.slope_limiter.vertex_based_limiter import VertexBasedLimiter +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + Perturbation, RecoverySpaces, BoundaryMethod, Recoverer, Fallout, + Coalescence, SaturationAdjustment, EvaporationOfRain, thermodynamics, + CompressibleParameters, CompressibleEulerEquations, CompressibleSolver, + unsaturated_hydrostatic_balance, WaterVapour, CloudWater, Rain, + RelativeHumidity, ForwardEuler, MixedFSLimiter, ZeroLimiter +) + +unsaturated_bubble_defaults = { + 'ncolumns': 180, + 'nlayers': 120, + 'dt': 1.0, + 'tmax': 600., + 'dumpfreq': 300, + 'dirname': 'unsaturated_bubble' +} + + +def unsaturated_bubble( + ncolumns=unsaturated_bubble_defaults['ncolumns'], + nlayers=unsaturated_bubble_defaults['nlayers'], + dt=unsaturated_bubble_defaults['dt'], + tmax=unsaturated_bubble_defaults['tmax'], + dumpfreq=unsaturated_bubble_defaults['dumpfreq'], + dirname=unsaturated_bubble_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # + + domain_width = 3600. # domain width (m) + domain_height = 2400. # domain height (m) + zc = 800. # height of centre of perturbation (m) + r1 = 300. # outer radius of perturbation (m) + r2 = 200. # inner radius of perturbation (m) + Tsurf = 283.0 # surface temperature (K) + psurf = 85000. # surface pressure (Pa) + rel_hum_background = 0.2 # background relative humidity (dimensionless) + S = 1.3e-5 # height factor for theta profile (1/m) + max_outer_solve_count = 20 # max num outer iterations for initialisation + max_inner_solve_count = 10 # max num inner iterations for initialisation + tol_initialisation = 1e-10 # tolerance for initialisation + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 0 + u_eqn_type = 'vector_advection_form' + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + base_mesh = PeriodicIntervalMesh(ncolumns, domain_width) + mesh = ExtrudedMesh(base_mesh, nlayers, layer_height=domain_height/nlayers) + domain = Domain(mesh, dt, "CG", element_order) + + # Equation + params = CompressibleParameters() + tracers = [WaterVapour(), CloudWater(), Rain()] + eqns = CompressibleEulerEquations( + domain, params, active_tracers=tracers, u_transport_option=u_eqn_type + ) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_vtus=False, dump_nc=True, + dumplist=['cloud_water', 'rain'] + ) + diagnostic_fields = [ + RelativeHumidity(eqns), Perturbation('theta'), Perturbation('rho'), + Perturbation('water_vapour'), Perturbation('RelativeHumidity') + ] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes -- specify options for using recovery wrapper + boundary_methods = {'DG': BoundaryMethod.taylor, + 'HDiv': BoundaryMethod.taylor} + + recovery_spaces = RecoverySpaces(domain, boundary_method=boundary_methods, use_vector_spaces=True) + + u_opts = recovery_spaces.HDiv_options + rho_opts = recovery_spaces.DG_options + theta_opts = recovery_spaces.theta_options + + VDG1 = domain.spaces("DG1_equispaced") + limiter = VertexBasedLimiter(VDG1) + + transported_fields = [ + SSPRK3(domain, "u", options=u_opts), + SSPRK3(domain, "rho", options=rho_opts), + SSPRK3(domain, "theta", options=theta_opts), + SSPRK3(domain, "water_vapour", options=theta_opts, limiter=limiter), + SSPRK3(domain, "cloud_water", options=theta_opts, limiter=limiter), + SSPRK3(domain, "rain", options=theta_opts, limiter=limiter) + ] + + transport_methods = [ + DGUpwind(eqns, field) for field in + ["u", "rho", "theta", "water_vapour", "cloud_water", "rain"] + ] + + # Linear solver + linear_solver = CompressibleSolver(eqns) + + # Physics schemes + Vt = domain.spaces('theta') + rainfall_method = DGUpwind(eqns, 'rain', outflow=True) + zero_limiter = MixedFSLimiter( + eqns, + {'water_vapour': ZeroLimiter(Vt), 'cloud_water': ZeroLimiter(Vt)} + ) + physics_schemes = [ + (Fallout(eqns, 'rain', domain, rainfall_method), SSPRK3(domain)), + (Coalescence(eqns), ForwardEuler(domain)), + (EvaporationOfRain(eqns), ForwardEuler(domain)), + (SaturationAdjustment(eqns), ForwardEuler(domain, limiter=zero_limiter)) + ] + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver, physics_schemes=physics_schemes + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + rho0 = stepper.fields("rho") + theta0 = stepper.fields("theta") + water_v0 = stepper.fields("water_vapour") + water_c0 = stepper.fields("cloud_water") + water_r0 = stepper.fields("rain") + + # spaces + Vr = domain.spaces("DG") + x, z = SpatialCoordinate(mesh) + quadrature_degree = (4, 4) + dxp = dx(degree=(quadrature_degree)) + + physics_boundary_method = BoundaryMethod.extruded + + # Define constant theta_e and water_t + exner_surf = (psurf / eqns.parameters.p_0) ** eqns.parameters.kappa + theta_surf = thermodynamics.theta(eqns.parameters, Tsurf, psurf) + theta_d = Function(Vt).interpolate(theta_surf * exp(S*z)) + rel_hum = Function(Vt).assign(rel_hum_background) + + # Calculate hydrostatic fields + unsaturated_hydrostatic_balance( + eqns, stepper.fields, theta_d, rel_hum, + exner_boundary=Constant(exner_surf) + ) + + # make mean fields + theta_b = Function(Vt).assign(theta0) + rho_b = Function(Vr).assign(rho0) + water_vb = Function(Vt).assign(water_v0) + + # define perturbation to RH + xc = domain_width / 2 + r = sqrt((x - xc) ** 2 + (z - zc) ** 2) + + rel_hum_pert_expr = conditional( + r > r1, + 0.0, + conditional( + r > r2, + (1 - rel_hum_background) * cos(pi*(r - r2) / (2*(r1 - r2)))**2, + 1 - rel_hum_background + ) + ) + rel_hum.interpolate(rel_hum_background + rel_hum_pert_expr) + + # now need to find perturbed rho, theta_vd and r_v + # follow approach used in unsaturated hydrostatic setup + rho_averaged = Function(Vt) + rho_recoverer = Recoverer( + rho0, rho_averaged, boundary_method=physics_boundary_method + ) + rho_eval = Function(Vr) + water_v_eval = Function(Vt) + delta = 1.0 + + R_d = eqns.parameters.R_d + R_v = eqns.parameters.R_v + epsilon = R_d / R_v + + # make expressions for determining water_v0 + exner = thermodynamics.exner_pressure(eqns.parameters, rho_averaged, theta0) + p = thermodynamics.p(eqns.parameters, exner) + T = thermodynamics.T(eqns.parameters, theta0, exner, water_v0) + r_v_expr = thermodynamics.r_v(eqns.parameters, rel_hum, T, p) + + # make expressions to evaluate residual + exner_expr = thermodynamics.exner_pressure(eqns.parameters, rho_averaged, theta0) + p_expr = thermodynamics.p(eqns.parameters, exner_expr) + T_expr = thermodynamics.T(eqns.parameters, theta0, exner_expr, water_v0) + rel_hum_expr = thermodynamics.RH(eqns.parameters, water_v0, T_expr, p_expr) + rel_hum_eval = Function(Vt) + + # set-up rho problem to keep exner constant + gamma = TestFunction(Vr) + rho_trial = TrialFunction(Vr) + lhs = gamma * rho_trial * dxp + rhs = gamma * (rho_b * theta_b / theta0) * dxp + rho_problem = LinearVariationalProblem(lhs, rhs, rho_eval) + rho_solver = LinearVariationalSolver(rho_problem) + + for i in range(max_outer_solve_count): + # calculate averaged rho + rho_recoverer.project() + + rel_hum_eval.interpolate(rel_hum_expr) + if errornorm(rel_hum_eval, rel_hum) < tol_initialisation: + break + + # first solve for r_v + for _ in range(max_inner_solve_count): + water_v_eval.interpolate(r_v_expr) + water_v0.assign(water_v0 * (1 - delta) + delta * water_v_eval) + + # compute theta_vd + theta0.interpolate(theta_d * (1 + water_v0 / epsilon)) + + # test quality of solution by re-evaluating expression + rel_hum_eval.interpolate(rel_hum_expr) + if errornorm(rel_hum_eval, rel_hum) < tol_initialisation: + break + + # now solve for rho with theta_vd and w_v guesses + rho_solver.solve() + + # damp solution + rho0.assign(rho0 * (1 - delta) + delta * rho_eval) + + if i == max_outer_solve_count: + raise RuntimeError( + f'Hydrostatic balance solve has not converged within {i} iterations' + ) + + # Set wind, cloud and rain to be zero + zero = Constant(0.0, domain=mesh) + u0.project(as_vector([zero, zero])) + water_c0.interpolate(zero) + water_r0.interpolate(zero) + + # initialise reference profiles + stepper.set_reference_profiles( + [('rho', rho_b), ('theta', theta_b), ('water_vapour', water_vb)] + ) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncolumns', + help="The number of columns in the vertical slice mesh.", + type=int, + default=unsaturated_bubble_defaults['ncolumns'] + ) + parser.add_argument( + '--nlayers', + help="The number of layers for the mesh.", + type=int, + default=unsaturated_bubble_defaults['nlayers'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=unsaturated_bubble_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=unsaturated_bubble_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=unsaturated_bubble_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=unsaturated_bubble_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + unsaturated_bubble(**vars(args)) diff --git a/examples/shallow_water/linear_williamson_2.py b/examples/shallow_water/linear_williamson_2.py index e32a92e40..9d022bf11 100644 --- a/examples/shallow_water/linear_williamson_2.py +++ b/examples/shallow_water/linear_williamson_2.py @@ -1,82 +1,150 @@ """ -The Williamson 2 shallow-water test case (solid-body rotation), solved with a -discretisation of the linear shallow-water equations. +A linearised form of Test Case 2 (solid-body rotation) of Williamson et al 1992: +``A standard test set for numerical approximations to the shallow water +equations in spherical geometry'', JCP. -This uses an icosahedral mesh of the sphere. +This uses an icosahedral mesh of the sphere, and the linear shallow water +equations. """ -from gusto import * -from firedrake import IcosahedralSphereMesh, SpatialCoordinate, as_vector, pi -import sys +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import Function, SpatialCoordinate, as_vector, pi +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, DefaultTransport, + ForwardEuler, SteadyStateError, ShallowWaterParameters, + LinearShallowWaterEquations, GeneralIcosahedralSphereMesh, + ZonalComponent, MeridionalComponent, RelativeVorticity +) -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # +linear_williamson_2_defaults = { + 'ncells_per_edge': 16, # number of cells per icosahedron edge + 'dt': 900.0, # 15 minutes + 'tmax': 5.*24.*60.*60., # 5 days + 'dumpfreq': 96, # once per day with default options + 'dirname': 'linear_williamson_2' +} -dt = 3600. -day = 24.*60.*60. -if '--running-tests' in sys.argv: - tmax = dt - dumpfreq = 1 -else: - tmax = 5*day - dumpfreq = int(tmax / (5*dt)) -refinements = 3 # number of horizontal cells = 20*(4^refinements) +def linear_williamson_2( + ncells_per_edge=linear_williamson_2_defaults['ncells_per_edge'], + dt=linear_williamson_2_defaults['dt'], + tmax=linear_williamson_2_defaults['tmax'], + dumpfreq=linear_williamson_2_defaults['dumpfreq'], + dirname=linear_williamson_2_defaults['dirname'] +): -R = 6371220. -H = 2000. + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # -# ---------------------------------------------------------------------------- # -# Set up model objects -# ---------------------------------------------------------------------------- # + radius = 6371220. # planetary radius (m) + mean_depth = 2000. # reference depth (m) + u_max = 2*pi*radius/(12*24*60*60) # Max amplitude of the zonal wind (m/s) -# Domain -mesh = IcosahedralSphereMesh(radius=R, - refinement_level=refinements, degree=3) -x = SpatialCoordinate(mesh) -domain = Domain(mesh, dt, 'BDM', 1) - -# Equation -parameters = ShallowWaterParameters(H=H) -Omega = parameters.Omega -x = SpatialCoordinate(mesh) -fexpr = 2*Omega*x[2]/R -eqns = LinearShallowWaterEquations(domain, parameters, fexpr=fexpr) - -# I/O -output = OutputParameters( - dirname='linear_williamson_2', - dumpfreq=dumpfreq, -) -diagnostic_fields = [SteadyStateError('u'), SteadyStateError('D')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # -# Transport schemes -transport_schemes = [ForwardEuler(domain, "D")] -transport_methods = [DefaultTransport(eqns, "D")] + element_order = 1 -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transport_schemes, transport_methods) + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # -# ---------------------------------------------------------------------------- # -# Initial conditions -# ---------------------------------------------------------------------------- # + # Domain + mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2) + x, y, z = SpatialCoordinate(mesh) + domain = Domain(mesh, dt, 'BDM', element_order) + + # Equation + parameters = ShallowWaterParameters(H=mean_depth) + Omega = parameters.Omega + fexpr = 2*Omega*z/radius + eqns = LinearShallowWaterEquations(domain, parameters, fexpr=fexpr) + + # I/O + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_nc=False, dump_vtus=True + ) + diagnostic_fields = [SteadyStateError('u'), SteadyStateError('D'), + ZonalComponent('u'), MeridionalComponent('u'), + RelativeVorticity()] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) -u0 = stepper.fields("u") -D0 = stepper.fields("D") -u_max = 2*pi*R/(12*day) # Maximum amplitude of the zonal wind (m/s) -uexpr = as_vector([-u_max*x[1]/R, u_max*x[0]/R, 0.0]) -g = parameters.g -Dexpr = - ((R * Omega * u_max)*(x[2]*x[2]/(R*R)))/g -u0.project(uexpr) -D0.interpolate(Dexpr) + # Transport schemes + transport_schemes = [ForwardEuler(domain, "D")] + transport_methods = [DefaultTransport(eqns, "D")] -Dbar = Function(D0.function_space()).assign(H) -stepper.set_reference_profiles([('D', Dbar)]) + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transport_schemes, transport_methods + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + g = parameters.g + + u0 = stepper.fields("u") + D0 = stepper.fields("D") + + uexpr = as_vector([-u_max*y/radius, u_max*x/radius, 0.0]) + Dexpr = - ((radius*Omega*u_max) * (z/radius)**2) / g + + u0.project(uexpr) + D0.interpolate(Dexpr) + + Dbar = Function(D0.function_space()).assign(mean_depth) + stepper.set_reference_profiles([('D', Dbar)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(t=0, tmax=tmax) # ---------------------------------------------------------------------------- # -# Run +# MAIN # ---------------------------------------------------------------------------- # -stepper.run(t=0, tmax=tmax) + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncells_per_edge', + help="The number of cells per edge of icosahedron", + type=int, + default=linear_williamson_2_defaults['ncells_per_edge'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=linear_williamson_2_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=linear_williamson_2_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=linear_williamson_2_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=linear_williamson_2_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + linear_williamson_2(**vars(args)) diff --git a/examples/shallow_water/moist_convective_williamson_2.py b/examples/shallow_water/moist_convective_williamson_2.py new file mode 100644 index 000000000..07fc7e96c --- /dev/null +++ b/examples/shallow_water/moist_convective_williamson_2.py @@ -0,0 +1,240 @@ +""" +A moist convective form of Test Case 2 (solid-body rotation with flow in +geostrophic balance) of Williamson 2 et al, 1992: +``A standard test set for numerical approximations to the shallow water +equations in spherical geometry'', JCP. + +Three moist variables (vapour, cloud liquid and rain) are used. The saturation +function depends on height, with a temporally-constant background buoyancy/ +temperature field. Vapour is initialised very close to saturation and +small overshoots in will generate clouds. + +This example uses the icosahedral sphere mesh and degree 1 spaces. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import SpatialCoordinate, sin, cos, exp, Function +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + TrapeziumRule, ShallowWaterParameters, ShallowWaterEquations, + ZonalComponent, MeridionalComponent, SteadyStateError, lonlatr_from_xyz, + DG1Limiter, InstantRain, MoistConvectiveSWSolver, ForwardEuler, + RelativeVorticity, SWSaturationAdjustment, WaterVapour, CloudWater, Rain, + GeneralIcosahedralSphereMesh, xyz_vector_from_lonlatr +) + +moist_convect_williamson_2_defaults = { + 'ncells_per_edge': 16, # number of cells per icosahedron edge + 'dt': 900.0, # 15 minutes + 'tmax': 5.*24.*60.*60., # 5 days + 'dumpfreq': 96, # once per day with default options + 'dirname': 'moist_convective_williamson_2' +} + + +def moist_convect_williamson_2( + ncells_per_edge=moist_convect_williamson_2_defaults['ncells_per_edge'], + dt=moist_convect_williamson_2_defaults['dt'], + tmax=moist_convect_williamson_2_defaults['tmax'], + dumpfreq=moist_convect_williamson_2_defaults['dumpfreq'], + dirname=moist_convect_williamson_2_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # + + radius = 6371220. # planetary radius (m) + u_max = 20. # max amplitude of the zonal wind (m/s) + phi_0 = 3.0e4 # reference geopotential height (m^2/s^2) + epsilon = 1/300 # linear air expansion coeff (1/K) + theta_0 = epsilon*phi_0**2 # ref depth-integrated temperature (no units) + g = 9.80616 # acceleration due to gravity (m/s^2) + mean_depth = phi_0/g # reference depth (m) + xi = 0 # fraction of excess vapour/cloud not converted + q0 = 200 # saturation mixing ratio scaling (kg/kg) + beta1 = 1600 # depth-vaporisation factor (m) + gamma_v = 0.98 # vaporisation implicit factor + qprecip = 1e-4 # cloud to rain conversion threshold (kg/kg) + gamma_r = 1e-3 # rain-coalescence implicit factor + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + u_eqn_type = 'vector_advection_form' + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2) + domain = Domain(mesh, dt, 'BDM', element_order) + x, y, z = SpatialCoordinate(mesh) + _, phi, _ = lonlatr_from_xyz(x, y, z) + + # Equations + parameters = ShallowWaterParameters(H=mean_depth, g=g) + Omega = parameters.Omega + fexpr = 2*Omega*z/radius + + tracers = [ + WaterVapour(space='DG'), CloudWater(space='DG'), Rain(space='DG') + ] + + eqns = ShallowWaterEquations( + domain, parameters, fexpr=fexpr, u_transport_option=u_eqn_type, + active_tracers=tracers + ) + + # IO + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_nc=False, dump_vtus=True, + dumplist_latlon=['D', 'D_error'] + ) + diagnostic_fields = [ + SteadyStateError('u'), SteadyStateError('D'), + SteadyStateError('water_vapour'), ZonalComponent('u'), + MeridionalComponent('u'), RelativeVorticity() + ] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # define saturation function + def sat_func(x_in): + h = x_in.split()[1] + numerator = ( + theta_0 + sigma*((cos(phi))**2) + * ((w + sigma)*(cos(phi))**2 + 2*(phi_0 - w - sigma)) + ) + denominator = ( + phi_0**2 + (w + sigma)**2*(sin(phi))**4 + - 2*phi_0*(w + sigma)*(sin(phi))**2 + ) + theta = numerator/denominator + return q0/(g*h) * exp(20*(theta)) + + transport_methods = [DGUpwind(eqns, field_name) for field_name in eqns.field_names] + + limiter = DG1Limiter(domain.spaces('DG')) + + transported_fields = [ + TrapeziumRule(domain, "u"), + SSPRK3(domain, "D"), + SSPRK3(domain, "water_vapour", limiter=limiter), + SSPRK3(domain, "cloud_water", limiter=limiter), + SSPRK3(domain, "rain", limiter=limiter) + ] + + linear_solver = MoistConvectiveSWSolver(eqns) + + # Physics schemes + sat_adj = SWSaturationAdjustment( + eqns, sat_func, time_varying_saturation=True, + convective_feedback=True, beta1=beta1, gamma_v=gamma_v, + time_varying_gamma_v=False, parameters=parameters + ) + inst_rain = InstantRain( + eqns, qprecip, vapour_name="cloud_water", rain_name="rain", + gamma_r=gamma_r + ) + + physics_schemes = [ + (sat_adj, ForwardEuler(domain)), (inst_rain, ForwardEuler(domain)) + ] + + stepper = SemiImplicitQuasiNewton( + eqns, io, transport_schemes=transported_fields, + spatial_methods=transport_methods, linear_solver=linear_solver, + physics_schemes=physics_schemes + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + D0 = stepper.fields("D") + v0 = stepper.fields("water_vapour") + + uexpr = xyz_vector_from_lonlatr(u_max*cos(phi), 0, 0, (x, y, z)) + g = parameters.g + w = Omega*radius*u_max + (u_max**2)/2 + sigma = 0 + + Dexpr = mean_depth - (1/g)*(w)*((sin(phi))**2) + + # though this set-up has no buoyancy, we use the expression for theta to + # set up the initial vapour + numerator = ( + theta_0 + sigma*((cos(phi))**2) + * ((w + sigma)*(cos(phi))**2 + 2*(phi_0 - w - sigma)) + ) + denominator = ( + phi_0**2 + (w + sigma)**2*(sin(phi))**4 + - 2*phi_0*(w + sigma)*(sin(phi))**2 + ) + theta = numerator/denominator + + initial_msat = q0/(g*Dexpr) * exp(20*theta) + vexpr = (1 - xi) * initial_msat + + u0.project(uexpr) + D0.interpolate(Dexpr) + v0.interpolate(vexpr) + + # Set reference profiles + Dbar = Function(D0.function_space()).assign(mean_depth) + stepper.set_reference_profiles([('D', Dbar)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncells_per_edge', + help="The number of cells per edge of icosahedron", + type=int, + default=moist_convect_williamson_2_defaults['ncells_per_edge'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=moist_convect_williamson_2_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=moist_convect_williamson_2_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=moist_convect_williamson_2_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=moist_convect_williamson_2_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + moist_convect_williamson_2(**vars(args)) diff --git a/examples/shallow_water/moist_thermal_williamson_5.py b/examples/shallow_water/moist_thermal_williamson_5.py new file mode 100644 index 000000000..9ce7da77e --- /dev/null +++ b/examples/shallow_water/moist_thermal_williamson_5.py @@ -0,0 +1,243 @@ +""" +The moist thermal form of Test Case 5 (flow over a mountain) of Williamson et +al, 1992: +``A standard test set for numerical approximations to the shallow water +equations in spherical geometry'', JCP. + +The initial conditions are taken from Zerroukat & Allen, 2015: +``A moist Boussinesq shallow water equations set for testing atmospheric +models'', JCP. + +Three moist variables (vapour, cloud liquid and rain) are used. This set of +equations involves an active buoyancy field. + +The example here uses the icosahedral sphere mesh and degree 1 spaces. An +explicit RK4 timestepper is used. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + SpatialCoordinate, as_vector, pi, sqrt, min_value, exp, cos, sin +) +from gusto import ( + Domain, IO, OutputParameters, Timestepper, RK4, DGUpwind, + ShallowWaterParameters, ShallowWaterEquations, Sum, + lonlatr_from_xyz, InstantRain, SWSaturationAdjustment, WaterVapour, + CloudWater, Rain, GeneralIcosahedralSphereMesh, RelativeVorticity, + ZonalComponent, MeridionalComponent +) + +moist_thermal_williamson_5_defaults = { + 'ncells_per_edge': 16, # number of cells per icosahedron edge + 'dt': 300.0, # 5 minutes + 'tmax': 50.*24.*60.*60., # 50 days + 'dumpfreq': 2880, # once per 10 days with default options + 'dirname': 'moist_thermal_williamson_5' +} + + +def moist_thermal_williamson_5( + ncells_per_edge=moist_thermal_williamson_5_defaults['ncells_per_edge'], + dt=moist_thermal_williamson_5_defaults['dt'], + tmax=moist_thermal_williamson_5_defaults['tmax'], + dumpfreq=moist_thermal_williamson_5_defaults['dumpfreq'], + dirname=moist_thermal_williamson_5_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # + + radius = 6371220. # planetary radius (m) + mean_depth = 5960 # reference depth (m) + g = 9.80616 # acceleration due to gravity (m/s^2) + u_max = 20. # max amplitude of the zonal wind (m/s) + epsilon = 1/300 # linear air expansion coeff (1/K) + theta_SP = -40*epsilon # value of theta at south pole (no units) + theta_EQ = 30*epsilon # value of theta at equator (no units) + theta_NP = -20*epsilon # value of theta at north pole (no units) + mu1 = 0.05 # scaling for theta with longitude (no units) + mu2 = 0.98 # proportion of qsat to make init qv (no units) + q0 = 135 # qsat scaling, gives init q_v of ~0.02, (kg/kg) + beta2 = 10*g # buoyancy-vaporisation factor (m/s^2) + nu = 20. # qsat factor in exponent (no units) + qprecip = 1e-4 # cloud to rain conversion threshold (kg/kg) + gamma_r = 1e-3 # rain-coalescence implicit factor + mountain_height = 2000. # height of mountain (m) + R0 = pi/9. # radius of mountain (rad) + lamda_c = -pi/2. # longitudinal centre of mountain (rad) + phi_c = pi/6. # latitudinal centre of mountain (rad) + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + u_eqn_type = 'vector_invariant_form' + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2) + domain = Domain(mesh, dt, "BDM", element_order) + x, y, z = SpatialCoordinate(mesh) + lamda, phi, _ = lonlatr_from_xyz(x, y, z) + + # Equation: coriolis + parameters = ShallowWaterParameters(H=mean_depth, g=g) + Omega = parameters.Omega + fexpr = 2*Omega*z/radius + + # Equation: topography + rsq = min_value(R0**2, (lamda - lamda_c)**2 + (phi - phi_c)**2) + r = sqrt(rsq) + tpexpr = mountain_height * (1 - r/R0) + + # Equation: moisture + tracers = [ + WaterVapour(space='DG'), CloudWater(space='DG'), Rain(space='DG') + ] + eqns = ShallowWaterEquations( + domain, parameters, fexpr=fexpr, bexpr=tpexpr, thermal=True, + active_tracers=tracers, u_transport_option=u_eqn_type + ) + + # I/O + output = OutputParameters( + dirname=dirname, dumplist_latlon=['D'], dumpfreq=dumpfreq, + dump_vtus=True, dump_nc=False, + dumplist=['D', 'b', 'water_vapour', 'cloud_water'] + ) + diagnostic_fields = [Sum('D', 'topography'), RelativeVorticity(), + ZonalComponent('u'), MeridionalComponent('u')] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Physics ------------------------------------------------------------------ + # Saturation function -- first define simple expression + def q_sat(b, D): + return (q0/(g*D + g*tpexpr)) * exp(nu*(1 - b/g)) + + # Function to pass to physics (takes mixed function as argument) + def phys_sat_func(x_in): + D = x_in.split()[1] + b = x_in.split()[2] + return q_sat(b, D) + + # Feedback proportionality is dependent on D and b + def gamma_v(x_in): + D = x_in.split()[1] + b = x_in.split()[2] + return 1.0 / (1.0 + nu*beta2/g*q_sat(b, D)) + + SWSaturationAdjustment( + eqns, phys_sat_func, time_varying_saturation=True, + parameters=parameters, thermal_feedback=True, + beta2=beta2, gamma_v=gamma_v, time_varying_gamma_v=True + ) + + InstantRain( + eqns, qprecip, vapour_name="cloud_water", rain_name="rain", + gamma_r=gamma_r + ) + + transport_methods = [ + DGUpwind(eqns, field_name) for field_name in eqns.field_names + ] + + # Timestepper + stepper = Timestepper( + eqns, RK4(domain), io, spatial_methods=transport_methods + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + D0 = stepper.fields("D") + b0 = stepper.fields("b") + v0 = stepper.fields("water_vapour") + c0 = stepper.fields("cloud_water") + r0 = stepper.fields("rain") + + uexpr = as_vector([-u_max*y/radius, u_max*x/radius, 0.0]) + + Dexpr = ( + mean_depth - tpexpr + - (radius * Omega * u_max + 0.5*u_max**2)*(z/radius)**2/g + ) + + # Expression for initial buoyancy - note the bracket around 1-mu + theta_expr = ( + 2/(pi**2) * ( + phi*(phi - pi/2)*theta_SP + - 2*(phi + pi/2) * (phi - pi/2)*(1 - mu1)*theta_EQ + + phi*(phi + pi/2)*theta_NP + ) + + mu1*theta_EQ*cos(phi)*sin(lamda) + ) + bexpr = g * (1 - theta_expr) + + # Expression for initial vapour depends on initial saturation + vexpr = mu2 * q_sat(bexpr, Dexpr) + + # Initialise (cloud and rain initially zero) + u0.project(uexpr) + D0.interpolate(Dexpr) + b0.interpolate(bexpr) + v0.interpolate(vexpr) + c0.assign(0.0) + r0.assign(0.0) + + # ----------------------------------------------------------------- # + # Run + # ----------------------------------------------------------------- # + + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncells_per_edge', + help="The number of cells per edge of icosahedron", + type=int, + default=moist_thermal_williamson_5_defaults['ncells_per_edge'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=moist_thermal_williamson_5_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=moist_thermal_williamson_5_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=moist_thermal_williamson_5_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=moist_thermal_williamson_5_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + moist_thermal_williamson_5(**vars(args)) diff --git a/examples/shallow_water/shallow_water_1d.py b/examples/shallow_water/shallow_water_1d.py deleted file mode 100644 index ab50bd524..000000000 --- a/examples/shallow_water/shallow_water_1d.py +++ /dev/null @@ -1,69 +0,0 @@ -import numpy as np -import sys - -from firedrake import * -from gusto import * -from pyop2.mpi import MPI - -L = 2*pi -n = 128 -delta = L/n -mesh = PeriodicIntervalMesh(128, L) -dt = 0.0001 -if '--running-tests' in sys.argv: - T = 0.0005 -else: - T = 1 - -domain = Domain(mesh, dt, 'CG', 1) - -epsilon = 0.1 -parameters = ShallowWaterParameters(H=1/epsilon, g=1/epsilon) - -u_diffusion_opts = DiffusionParameters(kappa=1e-2) -v_diffusion_opts = DiffusionParameters(kappa=1e-2, mu=10/delta) -D_diffusion_opts = DiffusionParameters(kappa=1e-2, mu=10/delta) -diffusion_options = [("u", u_diffusion_opts), - ("v", v_diffusion_opts), - ("D", D_diffusion_opts)] - -eqns = ShallowWaterEquations_1d(domain, parameters, - fexpr=Constant(1/epsilon), - diffusion_options=diffusion_options) - -output = OutputParameters(dirname="1dsw_%s" % str(epsilon), - dumpfreq=50) -io = IO(domain, output) - -transport_methods = [DGUpwind(eqns, "u"), DGUpwind(eqns, "v"), - DGUpwind(eqns, "D")] - -diffusion_methods = [CGDiffusion(eqns, "u", u_diffusion_opts), - InteriorPenaltyDiffusion(eqns, "v", v_diffusion_opts), - InteriorPenaltyDiffusion(eqns, "D", D_diffusion_opts)] - -stepper = Timestepper(eqns, RK4(domain), io, - spatial_methods=transport_methods+diffusion_methods) - -D = stepper.fields("D") -x = SpatialCoordinate(mesh)[0] -hexpr = ( - sin(x - pi/2) * exp(-4*(x - pi/2)**2) - + sin(8*(x - pi)) * exp(-2*(x - pi)**2) -) -h = Function(D.function_space()).interpolate(hexpr) - -A = assemble(h*dx) - -# B must be the maximum value of h (across all ranks) -B = np.zeros(1) -COMM_WORLD.Allreduce(h.dat.data_ro.max(), B, MPI.MAX) - -C0 = 1/(1 - 2*pi*B[0]/A) -C1 = (1 - C0)/B[0] -H = parameters.H -D.interpolate(C1*hexpr + C0) - -D += parameters.H - -stepper.run(0, T) diff --git a/examples/shallow_water/shallow_water_1d_wave.py b/examples/shallow_water/shallow_water_1d_wave.py new file mode 100644 index 000000000..3e0b1582b --- /dev/null +++ b/examples/shallow_water/shallow_water_1d_wave.py @@ -0,0 +1,177 @@ +""" +A shallow water wave on a 1D periodic domain. The test is taken from +Haut & Wingate, 2014: +``An asymptotic parallel-in-time method for highly oscillatory PDEs'', SIAM JSC. + +The velocity includes a component normal to the domain, and diffusion terms are +included in the equations. + +This example uses an explicit RK4 timestepper to solve the equations. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +import numpy as np +from pyop2.mpi import MPI +from firedrake import ( + PeriodicIntervalMesh, Function, assemble, SpatialCoordinate, COMM_WORLD, + Constant, pi, sin, exp, dx +) +from gusto import ( + Domain, IO, OutputParameters, Timestepper, RK4, DGUpwind, + ShallowWaterParameters, ShallowWaterEquations_1d, CGDiffusion, + InteriorPenaltyDiffusion, DiffusionParameters +) + +shallow_water_1d_wave_defaults = { + 'ncells': 128, + 'dt': 0.0001, + 'tmax': 1.0, + 'dumpfreq': 1000, # 10 outputs with default options + 'dirname': 'shallow_water_1d_wave' +} + + +def shallow_water_1d_wave( + ncells=shallow_water_1d_wave_defaults['ncells'], + dt=shallow_water_1d_wave_defaults['dt'], + tmax=shallow_water_1d_wave_defaults['tmax'], + dumpfreq=shallow_water_1d_wave_defaults['dumpfreq'], + dirname=shallow_water_1d_wave_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # + + domain_length = 2*pi # length of domain (m) + kappa = 1.e-2 # diffusivity (m^2/s^2) + epsilon = 0.1 # scaling factor for depth, gravity and rotation + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + mesh = PeriodicIntervalMesh(ncells, domain_length) + domain = Domain(mesh, dt, 'CG', element_order) + + # Diffusion + delta = domain_length / ncells + u_diffusion_opts = DiffusionParameters(kappa=kappa) + v_diffusion_opts = DiffusionParameters(kappa=kappa, mu=10/delta) + D_diffusion_opts = DiffusionParameters(kappa=kappa, mu=10/delta) + diffusion_options = [ + ("u", u_diffusion_opts), + ("v", v_diffusion_opts), + ("D", D_diffusion_opts) + ] + + # Equation + parameters = ShallowWaterParameters(H=1/epsilon, g=1/epsilon) + eqns = ShallowWaterEquations_1d( + domain, parameters, fexpr=Constant(1/epsilon), + diffusion_options=diffusion_options + ) + + output = OutputParameters(dirname=dirname, dumpfreq=dumpfreq) + io = IO(domain, output) + + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "v"), + DGUpwind(eqns, "D") + ] + + diffusion_methods = [ + CGDiffusion(eqns, "u", u_diffusion_opts), + InteriorPenaltyDiffusion(eqns, "v", v_diffusion_opts), + InteriorPenaltyDiffusion(eqns, "D", D_diffusion_opts) + ] + + stepper = Timestepper( + eqns, RK4(domain), io, + spatial_methods=transport_methods+diffusion_methods + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + x = SpatialCoordinate(mesh)[0] + D = stepper.fields("D") + + # Spatially-varying part of initial condition + hexpr = ( + sin(x - pi/2) * exp(-4*(x - pi/2)**2) + + sin(8*(x - pi)) * exp(-2*(x - pi)**2) + ) + + # Make a function to include spatially-varying part + h = Function(D.function_space()).interpolate(hexpr) + + A = assemble(h*dx) + + # B must be the maximum value of h (across all ranks) + B = np.zeros(1) + COMM_WORLD.Allreduce(h.dat.data_ro.max(), B, MPI.MAX) + + # D is normalised form of h + C0 = 1/(1 - 2*pi*B[0]/A) + C1 = (1 - C0)/B[0] + D.interpolate(parameters.H + C1*hexpr + C0) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(0, tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncells', + help="The number of cells in the 1D domain", + type=int, + default=shallow_water_1d_wave_defaults['ncells'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=shallow_water_1d_wave_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=shallow_water_1d_wave_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=shallow_water_1d_wave_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=shallow_water_1d_wave_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + shallow_water_1d_wave(**vars(args)) diff --git a/examples/shallow_water/test_shallow_water_examples.py b/examples/shallow_water/test_shallow_water_examples.py new file mode 100644 index 000000000..eb64e94e0 --- /dev/null +++ b/examples/shallow_water/test_shallow_water_examples.py @@ -0,0 +1,129 @@ +import pytest + + +def make_dirname(test_name): + from mpi4py import MPI + comm = MPI.COMM_WORLD + if comm.size > 1: + return f'pytest_{test_name}_parallel' + else: + return f'pytest_{test_name}' + + +def test_linear_williamson_2(): + from linear_williamson_2 import linear_williamson_2 + test_name = 'linear_williamson_2' + linear_williamson_2( + ncells_per_edge=4, + dt=1800., + tmax=18000., + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=2) +def test_linear_williamson_2_parallel(): + test_linear_williamson_2() + + +def test_moist_convective_williamson_2(): + from moist_convective_williamson_2 import moist_convect_williamson_2 + test_name = 'moist_convective_williamson_2' + moist_convect_williamson_2( + ncells_per_edge=4, + dt=1800., + tmax=18000., + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=2) +def test_moist_convective_williamson_2_parallel(): + test_moist_convective_williamson_2() + + +def test_moist_thermal_williamson_5(): + from moist_thermal_williamson_5 import moist_thermal_williamson_5 + test_name = 'moist_thermal_williamson_5' + moist_thermal_williamson_5( + ncells_per_edge=4, + dt=1800., + tmax=18000., + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=2) +def test_moist_thermal_williamson_5_parallel(): + test_moist_thermal_williamson_5() + + +def test_shallow_water_1d_wave(): + from shallow_water_1d_wave import shallow_water_1d_wave + test_name = 'shallow_water_1d_wave' + shallow_water_1d_wave( + ncells=20, + dt=1.0e-4, + tmax=1.0e-3, + dumpfreq=2, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=4) +def test_shallow_water_1d_wave_parallel(): + test_shallow_water_1d_wave() + + +def test_thermal_williamson_2(): + from thermal_williamson_2 import thermal_williamson_2 + test_name = 'thermal_williamson_2' + thermal_williamson_2( + ncells_per_edge=4, + dt=1800., + tmax=18000., + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=4) +def test_thermal_williamson_2_parallel(): + test_thermal_williamson_2() + + +def test_williamson_2(): + from williamson_2 import williamson_2 + test_name = 'williamson_2' + williamson_2( + ncells_per_edge=4, + dt=1800., + tmax=18000., + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=4) +def test_williamson_2_parallel(): + test_williamson_2() + + +def test_williamson_5(): + from williamson_5 import williamson_5 + test_name = 'williamson_5' + williamson_5( + ncells_per_edge=4, + dt=1800., + tmax=18000., + dumpfreq=10, + dirname=make_dirname(test_name) + ) + + +@pytest.mark.parallel(nprocs=4) +def test_williamson_5_parallel(): + test_williamson_5() diff --git a/examples/shallow_water/thermal_williamson2.py b/examples/shallow_water/thermal_williamson2.py deleted file mode 100644 index 342ee8b07..000000000 --- a/examples/shallow_water/thermal_williamson2.py +++ /dev/null @@ -1,115 +0,0 @@ -from gusto import * -from firedrake import IcosahedralSphereMesh, SpatialCoordinate, sin, cos -import sys - -# ----------------------------------------------------------------- # -# Test case parameters -# ----------------------------------------------------------------- # - -dt = 4000 - -if '--running-tests' in sys.argv: - tmax = dt - dumpfreq = 1 -else: - day = 24*60*60 - tmax = 5*day - ndumps = 5 - dumpfreq = int(tmax / (ndumps*dt)) - -R = 6371220. -u_max = 20 -phi_0 = 3e4 -epsilon = 1/300 -theta_0 = epsilon*phi_0**2 -g = 9.80616 -H = phi_0/g - -# ----------------------------------------------------------------- # -# Set up model objects -# ----------------------------------------------------------------- # - -# Domain -mesh = IcosahedralSphereMesh(radius=R, refinement_level=3, degree=2) -degree = 1 -domain = Domain(mesh, dt, 'BDM', degree) -x = SpatialCoordinate(mesh) - -# Equations -params = ShallowWaterParameters(H=H, g=g) -Omega = params.Omega -fexpr = 2*Omega*x[2]/R -eqns = ShallowWaterEquations(domain, params, fexpr=fexpr, u_transport_option='vector_advection_form', thermal=True) - -# IO -dirname = "thermal_williamson2" -output = OutputParameters( - dirname=dirname, - dumpfreq=dumpfreq, - dumplist_latlon=['D', 'D_error'], -) - -diagnostic_fields = [RelativeVorticity(), PotentialVorticity(), - ShallowWaterKineticEnergy(), - ShallowWaterPotentialEnergy(params), - ShallowWaterPotentialEnstrophy(), - SteadyStateError('u'), SteadyStateError('D'), - SteadyStateError('b'), MeridionalComponent('u'), - ZonalComponent('u')] -io = IO(domain, output, diagnostic_fields=diagnostic_fields) - -# Transport schemes -transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "D", fixed_subcycles=2), - SSPRK3(domain, "b", fixed_subcycles=2)] -transport_methods = [DGUpwind(eqns, "u"), - DGUpwind(eqns, "D"), - DGUpwind(eqns, "b")] - -# Linear solver -linear_solver = ThermalSWSolver(eqns) - -# Time stepper -stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver) - -# ----------------------------------------------------------------- # -# Initial conditions -# ----------------------------------------------------------------- # - -u0 = stepper.fields("u") -D0 = stepper.fields("D") -b0 = stepper.fields("b") - -lamda, phi, _ = lonlatr_from_xyz(x[0], x[1], x[2]) - -uexpr = xyz_vector_from_lonlatr(u_max*cos(phi), 0, 0, x) -g = params.g -w = Omega*R*u_max + (u_max**2)/2 -sigma = w/10 - -Dexpr = H - (1/g)*(w + sigma)*((sin(phi))**2) - -numerator = theta_0 + sigma*((cos(phi))**2) * ((w + sigma)*(cos(phi))**2 + 2*(phi_0 - w - sigma)) - -denominator = phi_0**2 + (w + sigma)**2*(sin(phi))**4 - 2*phi_0*(w + sigma)*(sin(phi))**2 - -theta = numerator/denominator - -bexpr = params.g * (1 - theta) - -u0.project(uexpr) -D0.interpolate(Dexpr) -b0.interpolate(bexpr) - -# Set reference profiles -Dbar = Function(D0.function_space()).assign(H) -bbar = Function(b0.function_space()).interpolate(bexpr) -stepper.set_reference_profiles([('D', Dbar), ('b', bbar)]) - -# ----------------------------------------------------------------- # -# Run -# ----------------------------------------------------------------- # - -stepper.run(t=0, tmax=tmax) diff --git a/examples/shallow_water/thermal_williamson_2.py b/examples/shallow_water/thermal_williamson_2.py new file mode 100644 index 000000000..fe489ee32 --- /dev/null +++ b/examples/shallow_water/thermal_williamson_2.py @@ -0,0 +1,197 @@ +""" +The thermal form of Test Case 2 (solid-body rotation with geostrophically +balanced flow) of Williamson et al, 1992: +``A standard test set for numerical approximations to the shallow water +equations in spherical geometry'', JCP. + +The initial conditions are taken from Zerroukat & Allen, 2015: +``A moist Boussinesq shallow water equations set for testing atmospheric +models'', JCP. + +The example here uses the icosahedral sphere mesh and degree 1 spaces. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import Function, SpatialCoordinate, sin, cos +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + TrapeziumRule, ShallowWaterParameters, ShallowWaterEquations, + RelativeVorticity, PotentialVorticity, SteadyStateError, + ZonalComponent, MeridionalComponent, ThermalSWSolver, + xyz_vector_from_lonlatr, lonlatr_from_xyz, GeneralIcosahedralSphereMesh +) + +thermal_williamson_2_defaults = { + 'ncells_per_edge': 16, # number of cells per icosahedron edge + 'dt': 900.0, # 15 minutes + 'tmax': 5.*24.*60.*60., # 5 days + 'dumpfreq': 96, # once per day with default options + 'dirname': 'thermal_williamson_2' +} + + +def thermal_williamson_2( + ncells_per_edge=thermal_williamson_2_defaults['ncells_per_edge'], + dt=thermal_williamson_2_defaults['dt'], + tmax=thermal_williamson_2_defaults['tmax'], + dumpfreq=thermal_williamson_2_defaults['dumpfreq'], + dirname=thermal_williamson_2_defaults['dirname'] +): + + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # + + radius = 6371220. # planetary radius (m) + u_max = 20. # max amplitude of the zonal wind (m/s) + phi_0 = 3.0e4 # reference geopotential height (m^2/s^2) + epsilon = 1/300 # linear air expansion coeff (1/K) + theta_0 = epsilon*phi_0**2 # ref depth-integrated temperature (no units) + g = 9.80616 # acceleration due to gravity (m/s^2) + mean_depth = phi_0/g # reference depth (m) + + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # + + element_order = 1 + u_eqn_type = 'vector_advection_form' + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2) + domain = Domain(mesh, dt, 'BDM', element_order) + x, y, z = SpatialCoordinate(mesh) + + # Equations + params = ShallowWaterParameters(H=mean_depth, g=g) + Omega = params.Omega + fexpr = 2*Omega*z/radius + eqns = ShallowWaterEquations( + domain, params, fexpr=fexpr, u_transport_option=u_eqn_type, thermal=True + ) + + # IO + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dumplist_latlon=['D', 'D_error'], + dump_vtus=False, dump_nc=True + ) + + diagnostic_fields = [ + RelativeVorticity(), PotentialVorticity(), + SteadyStateError('u'), SteadyStateError('D'), SteadyStateError('b'), + MeridionalComponent('u'), ZonalComponent('u') + ] + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport schemes + transported_fields = [ + TrapeziumRule(domain, "u"), + SSPRK3(domain, "D", fixed_subcycles=2), + SSPRK3(domain, "b", fixed_subcycles=2) + ] + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "D"), + DGUpwind(eqns, "b") + ] + + # Linear solver + linear_solver = ThermalSWSolver(eqns) + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + D0 = stepper.fields("D") + b0 = stepper.fields("b") + + _, phi, _ = lonlatr_from_xyz(x, y, z) + + uexpr = xyz_vector_from_lonlatr(u_max*cos(phi), 0, 0, (x, y, z)) + w = Omega*radius*u_max + (u_max**2)/2 + sigma = w/10 + + Dexpr = mean_depth - (1/g)*(w + sigma)*((sin(phi))**2) + + numerator = ( + theta_0 + sigma*((cos(phi))**2) + * ((w + sigma)*(cos(phi))**2 + 2*(phi_0 - w - sigma)) + ) + denominator = ( + phi_0**2 + (w + sigma)**2*(sin(phi))**4 + - 2*phi_0*(w + sigma)*(sin(phi))**2 + ) + + theta = numerator/denominator + bexpr = params.g * (1 - theta) + + u0.project(uexpr) + D0.interpolate(Dexpr) + b0.interpolate(bexpr) + + # Set reference profiles + Dbar = Function(D0.function_space()).assign(mean_depth) + bbar = Function(b0.function_space()).interpolate(bexpr) + stepper.set_reference_profiles([('D', Dbar), ('b', bbar)]) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncells_per_edge', + help="The number of cells per edge of icosahedron", + type=int, + default=thermal_williamson_2_defaults['ncells_per_edge'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=thermal_williamson_2_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=thermal_williamson_2_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=thermal_williamson_2_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=thermal_williamson_2_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + thermal_williamson_2(**vars(args)) diff --git a/examples/shallow_water/williamson_2.py b/examples/shallow_water/williamson_2.py index 16057b5b1..0bbd82851 100644 --- a/examples/shallow_water/williamson_2.py +++ b/examples/shallow_water/williamson_2.py @@ -1,101 +1,121 @@ """ -The Williamson 2 shallow-water test case (solid-body rotation), solved with a -discretisation of the non-linear shallow-water equations. +Test Case 2 (solid-body rotation with geostrophically-balanced flow) of +Williamson et al, 1992: +``A standard test set for numerical approximations to the shallow water +equations in spherical geometry'', JCP. -This uses an icosahedral mesh of the sphere, and runs a series of resolutions -to act as a convergence test. +The example here uses the icosahedral sphere mesh and degree 1 spaces. """ -from gusto import * -from firedrake import IcosahedralSphereMesh, SpatialCoordinate, sin, cos, pi -import sys +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import SpatialCoordinate, sin, cos, pi, Function +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + TrapeziumRule, ShallowWaterParameters, ShallowWaterEquations, + RelativeVorticity, PotentialVorticity, SteadyStateError, + ShallowWaterKineticEnergy, ShallowWaterPotentialEnergy, + ShallowWaterPotentialEnstrophy, rotated_lonlatr_coords, + ZonalComponent, MeridionalComponent, rotated_lonlatr_vectors, + GeneralIcosahedralSphereMesh +) + +williamson_2_defaults = { + 'ncells_per_edge': 16, # number of cells per icosahedron edge + 'dt': 900.0, # 15 minutes + 'tmax': 5.*24.*60.*60., # 5 days + 'dumpfreq': 96, # once per day with default options + 'dirname': 'williamson_2' +} + + +def williamson_2( + ncells_per_edge=williamson_2_defaults['ncells_per_edge'], + dt=williamson_2_defaults['dt'], + tmax=williamson_2_defaults['tmax'], + dumpfreq=williamson_2_defaults['dumpfreq'], + dirname=williamson_2_defaults['dirname'] +): -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -day = 24.*60.*60. -if '--running-tests' in sys.argv: - ref_dt = {3: 3000.} - tmax = 3000. - ndumps = 1 -else: - # setup resolution and timestepping parameters for convergence test - ref_dt = {3: 4000., 4: 2000., 5: 1000., 6: 500.} - tmax = 5*day - ndumps = 5 + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # -# setup shallow water parameters -R = 6371220. -H = 5960. -rotated_pole = (0.0, pi/3) + radius = 6371220. # planetary radius (m) + mean_depth = 5960. # reference depth (m) + rotate_pole_to = (0.0, pi/3) # location of North pole of mesh + u_max = 2*pi*radius/(12*24*60*60) # Max amplitude of the zonal wind (m/s) -# setup input that doesn't change with ref level or dt -parameters = ShallowWaterParameters(H=H) + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # -for ref_level, dt in ref_dt.items(): + element_order = 1 + u_eqn_type = 'vector_invariant_form' # ------------------------------------------------------------------------ # # Set up model objects # ------------------------------------------------------------------------ # # Domain - mesh = IcosahedralSphereMesh(radius=R, - refinement_level=ref_level, degree=2) - x = SpatialCoordinate(mesh) - domain = Domain(mesh, dt, 'BDM', 1, rotated_pole=rotated_pole) + mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2) + domain = Domain(mesh, dt, 'BDM', element_order, rotated_pole=rotate_pole_to) + xyz = SpatialCoordinate(mesh) # Equation + parameters = ShallowWaterParameters(H=mean_depth) Omega = parameters.Omega - _, lat, _ = rotated_lonlatr_coords(x, rotated_pole) - e_lon, _, _ = rotated_lonlatr_vectors(x, rotated_pole) + _, lat, _ = rotated_lonlatr_coords(xyz, rotate_pole_to) + e_lon, _, _ = rotated_lonlatr_vectors(xyz, rotate_pole_to) fexpr = 2*Omega*sin(lat) - eqns = ShallowWaterEquations(domain, parameters, fexpr=fexpr) + eqns = ShallowWaterEquations( + domain, parameters, fexpr=fexpr, u_transport_option=u_eqn_type) # I/O - dirname = "williamson_2_ref%s_dt%s" % (ref_level, dt) - dumpfreq = int(tmax / (ndumps*dt)) output = OutputParameters( - dirname=dirname, - dumpfreq=dumpfreq, + dirname=dirname, dumpfreq=dumpfreq, dump_nc=True, dumplist_latlon=['D', 'D_error'], - dump_nc=True, ) - - diagnostic_fields = [RelativeVorticity(), SteadyStateError('RelativeVorticity'), - PotentialVorticity(), - ShallowWaterKineticEnergy(), - ShallowWaterPotentialEnergy(parameters), - ShallowWaterPotentialEnstrophy(), - SteadyStateError('u'), SteadyStateError('D'), - MeridionalComponent('u', rotated_pole), - ZonalComponent('u', rotated_pole)] + diagnostic_fields = [ + RelativeVorticity(), SteadyStateError('RelativeVorticity'), + PotentialVorticity(), ShallowWaterKineticEnergy(), + ShallowWaterPotentialEnergy(parameters), + ShallowWaterPotentialEnstrophy(), + SteadyStateError('u'), SteadyStateError('D'), + MeridionalComponent('u', rotate_pole_to), + ZonalComponent('u', rotate_pole_to) + ] io = IO(domain, output, diagnostic_fields=diagnostic_fields) # Transport schemes - transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "D", fixed_subcycles=2)] - transport_methods = [DGUpwind(eqns, "u"), DGUpwind(eqns, "D")] + transported_fields = [ + TrapeziumRule(domain, "u"), + SSPRK3(domain, "D", fixed_subcycles=2)] + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "D") + ] # Time stepper - stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, transport_methods) + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods + ) # ------------------------------------------------------------------------ # # Initial conditions # ------------------------------------------------------------------------ # + g = parameters.g + u0 = stepper.fields("u") D0 = stepper.fields("D") - x = SpatialCoordinate(mesh) - u_max = 2*pi*R/(12*day) # Maximum amplitude of the zonal wind (m/s) + uexpr = u_max*cos(lat)*e_lon - g = parameters.g - Dexpr = H - (R * Omega * u_max + u_max*u_max/2.0)*(sin(lat))**2/g + Dexpr = mean_depth - (radius * Omega * u_max + 0.5*u_max**2)*(sin(lat))**2/g u0.project(uexpr) D0.interpolate(Dexpr) - Dbar = Function(D0.function_space()).assign(H) + Dbar = Function(D0.function_space()).assign(mean_depth) stepper.set_reference_profiles([('D', Dbar)]) # ------------------------------------------------------------------------ # @@ -103,3 +123,48 @@ # ------------------------------------------------------------------------ # stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncells_per_edge', + help="The number of cells per edge of icosahedron", + type=int, + default=williamson_2_defaults['ncells_per_edge'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=williamson_2_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=williamson_2_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=williamson_2_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=williamson_2_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + williamson_2(**vars(args)) diff --git a/examples/shallow_water/williamson_5.py b/examples/shallow_water/williamson_5.py index 366a5a903..e6cae36c7 100644 --- a/examples/shallow_water/williamson_5.py +++ b/examples/shallow_water/williamson_5.py @@ -1,82 +1,102 @@ """ -The Williamson 5 shallow-water test case (flow over topography), solved with a -discretisation of the non-linear shallow-water equations. +Test Case 5 (flow over a mountain) of Williamson et al, 1992: +``A standard test set for numerical approximations to the shallow water +equations in spherical geometry'', JCP. -This uses an icosahedral mesh of the sphere, and runs a series of resolutions. +The example here uses the icosahedral sphere mesh and degree 1 spaces. """ -from gusto import * -from firedrake import (IcosahedralSphereMesh, SpatialCoordinate, - as_vector, pi, sqrt, min_value) -import sys +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + SpatialCoordinate, as_vector, pi, sqrt, min_value, Function +) +from gusto import ( + Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, + ShallowWaterParameters, ShallowWaterEquations, Sum, + lonlatr_from_xyz, GeneralIcosahedralSphereMesh, ZonalComponent, + MeridionalComponent, RelativeVorticity, RungeKuttaFormulation +) + +williamson_5_defaults = { + 'ncells_per_edge': 16, # number of cells per icosahedron edge + 'dt': 900.0, # 15 minutes + 'tmax': 50.*24.*60.*60., # 50 days + 'dumpfreq': 960, # once per 10 days with default options + 'dirname': 'williamson_5' +} + + +def williamson_5( + ncells_per_edge=williamson_5_defaults['ncells_per_edge'], + dt=williamson_5_defaults['dt'], + tmax=williamson_5_defaults['tmax'], + dumpfreq=williamson_5_defaults['dumpfreq'], + dirname=williamson_5_defaults['dirname'] +): -# ---------------------------------------------------------------------------- # -# Test case parameters -# ---------------------------------------------------------------------------- # - -day = 24.*60.*60. -if '--running-tests' in sys.argv: - ref_dt = {3: 3000.} - tmax = 3000. - ndumps = 1 -else: - # setup resolution and timestepping parameters for convergence test - ref_dt = {3: 900., 4: 450., 5: 225., 6: 112.5} - tmax = 50*day - ndumps = 5 + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # -# setup shallow water parameters -R = 6371220. -H = 5960. + radius = 6371220. # planetary radius (m) + mean_depth = 5960 # reference depth (m) + g = 9.80616 # acceleration due to gravity (m/s^2) + u_max = 20. # max amplitude of the zonal wind (m/s) + mountain_height = 2000. # height of mountain (m) + R0 = pi/9. # radius of mountain (rad) + lamda_c = -pi/2. # longitudinal centre of mountain (rad) + phi_c = pi/6. # latitudinal centre of mountain (rad) -# setup input that doesn't change with ref level or dt -parameters = ShallowWaterParameters(H=H) + # ------------------------------------------------------------------------ # + # Our settings for this set up + # ------------------------------------------------------------------------ # -for ref_level, dt in ref_dt.items(): + element_order = 1 # ------------------------------------------------------------------------ # # Set up model objects # ------------------------------------------------------------------------ # # Domain - mesh = IcosahedralSphereMesh(radius=R, - refinement_level=ref_level, degree=2) - x = SpatialCoordinate(mesh) - domain = Domain(mesh, dt, 'BDM', 1) + mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2) + domain = Domain(mesh, dt, 'BDM', element_order) + x, y, z = SpatialCoordinate(mesh) + lamda, phi, _ = lonlatr_from_xyz(x, y, z) - # Equation + # Equation: coriolis + parameters = ShallowWaterParameters(H=mean_depth, g=g) Omega = parameters.Omega - fexpr = 2*Omega*x[2]/R - lamda, theta, _ = lonlatr_from_xyz(x[0], x[1], x[2]) - R0 = pi/9. - R0sq = R0**2 - lamda_c = -pi/2. - lsq = (lamda - lamda_c)**2 - theta_c = pi/6. - thsq = (theta - theta_c)**2 - rsq = min_value(R0sq, lsq+thsq) + fexpr = 2*Omega*z/radius + + # Equation: topography + rsq = min_value(R0**2, (lamda - lamda_c)**2 + (phi - phi_c)**2) r = sqrt(rsq) - bexpr = 2000 * (1 - r/R0) - eqns = ShallowWaterEquations(domain, parameters, fexpr=fexpr, bexpr=bexpr) + tpexpr = mountain_height * (1 - r/R0) + eqns = ShallowWaterEquations(domain, parameters, fexpr=fexpr, bexpr=tpexpr) # I/O - dirname = "williamson_5_ref%s_dt%s" % (ref_level, dt) - dumpfreq = int(tmax / (ndumps*dt)) output = OutputParameters( - dirname=dirname, - dumplist_latlon=['D'], - dumpfreq=dumpfreq, + dirname=dirname, dumplist_latlon=['D'], dumpfreq=dumpfreq, + dump_vtus=True, dump_nc=False, dumplist=['D', 'topography'] ) - diagnostic_fields = [Sum('D', 'topography')] + diagnostic_fields = [Sum('D', 'topography'), RelativeVorticity(), + MeridionalComponent('u'), ZonalComponent('u')] io = IO(domain, output, diagnostic_fields=diagnostic_fields) # Transport schemes - transported_fields = [TrapeziumRule(domain, "u"), - SSPRK3(domain, "D")] - transport_methods = [DGUpwind(eqns, "u"), DGUpwind(eqns, "D")] + transported_fields = [ + SSPRK3(domain, "u", subcycle_by_courant=0.25), + SSPRK3(domain, "D", subcycle_by_courant=0.25, rk_formulation=RungeKuttaFormulation.linear) + ] + transport_methods = [ + DGUpwind(eqns, "u"), + DGUpwind(eqns, "D", advective_then_flux=True) + ] # Time stepper - stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, transport_methods) + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods + ) # ------------------------------------------------------------------------ # # Initial conditions @@ -84,16 +104,16 @@ u0 = stepper.fields('u') D0 = stepper.fields('D') - u_max = 20. # Maximum amplitude of the zonal wind (m/s) - uexpr = as_vector([-u_max*x[1]/R, u_max*x[0]/R, 0.0]) - g = parameters.g - Rsq = R**2 - Dexpr = H - ((R * Omega * u_max + 0.5*u_max**2)*x[2]**2/Rsq)/g - bexpr + uexpr = as_vector([-u_max*y/radius, u_max*x/radius, 0.0]) + Dexpr = ( + mean_depth - tpexpr + - (radius*Omega*u_max + 0.5*u_max**2)*(z/radius)**2/g + ) u0.project(uexpr) D0.interpolate(Dexpr) - Dbar = Function(D0.function_space()).assign(H) + Dbar = Function(D0.function_space()).assign(mean_depth) stepper.set_reference_profiles([('D', Dbar)]) # ------------------------------------------------------------------------ # @@ -101,3 +121,48 @@ # ------------------------------------------------------------------------ # stepper.run(t=0, tmax=tmax) + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncells_per_edge', + help="The number of cells per edge of icosahedron", + type=int, + default=williamson_5_defaults['ncells_per_edge'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=williamson_5_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=williamson_5_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=williamson_5_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=williamson_5_defaults['dirname'] + ) + args, unknown = parser.parse_known_args() + + williamson_5(**vars(args)) diff --git a/examples/test_examples_run.py b/examples/test_examples_run.py deleted file mode 100644 index f69dec270..000000000 --- a/examples/test_examples_run.py +++ /dev/null @@ -1,38 +0,0 @@ -import pytest -from os.path import abspath, basename, dirname -import subprocess -import glob -import sys -import os - - -examples_dir = abspath(dirname(__file__)) -example_files = glob.glob("%s/*/*.py" % examples_dir) - - -@pytest.fixture(params=glob.glob("%s/*/*.py" % examples_dir), - ids=lambda x: basename(x)) -def example_file(request): - return abspath(request.param) - - -def test_example_runs(example_file, tmpdir, monkeypatch): - # This ensures that the test writes output in a temporary - # directory, rather than where pytest was run from. - monkeypatch.chdir(tmpdir) - subprocess.run( - [sys.executable, example_file, "--running-tests"], - check=True, - env=os.environ | {"PYOP2_CFLAGS": "-O0"} - ) - - -def test_example_runs_parallel(example_file, tmpdir, monkeypatch): - # This ensures that the test writes output in a temporary - # directory, rather than where pytest was run from. - monkeypatch.chdir(tmpdir) - subprocess.run( - ["mpiexec", "-n", "4", sys.executable, example_file, "--running-tests"], - check=True, - env=os.environ | {"PYOP2_CFLAGS": "-O0"} - ) diff --git a/figures/boussinesq/skamarock_klemp_compressible_bouss_final.png b/figures/boussinesq/skamarock_klemp_compressible_bouss_final.png new file mode 100644 index 000000000..4107d44ec Binary files /dev/null and b/figures/boussinesq/skamarock_klemp_compressible_bouss_final.png differ diff --git a/figures/boussinesq/skamarock_klemp_compressible_bouss_initial.png b/figures/boussinesq/skamarock_klemp_compressible_bouss_initial.png new file mode 100644 index 000000000..ef961cc01 Binary files /dev/null and b/figures/boussinesq/skamarock_klemp_compressible_bouss_initial.png differ diff --git a/figures/boussinesq/skamarock_klemp_incompressible_bouss_final.png b/figures/boussinesq/skamarock_klemp_incompressible_bouss_final.png new file mode 100644 index 000000000..622266ca1 Binary files /dev/null and b/figures/boussinesq/skamarock_klemp_incompressible_bouss_final.png differ diff --git a/figures/boussinesq/skamarock_klemp_incompressible_bouss_initial.png b/figures/boussinesq/skamarock_klemp_incompressible_bouss_initial.png new file mode 100644 index 000000000..ef961cc01 Binary files /dev/null and b/figures/boussinesq/skamarock_klemp_incompressible_bouss_initial.png differ diff --git a/figures/boussinesq/skamarock_klemp_linear_bouss_final.png b/figures/boussinesq/skamarock_klemp_linear_bouss_final.png new file mode 100644 index 000000000..c893cf013 Binary files /dev/null and b/figures/boussinesq/skamarock_klemp_linear_bouss_final.png differ diff --git a/figures/boussinesq/skamarock_klemp_linear_bouss_initial.png b/figures/boussinesq/skamarock_klemp_linear_bouss_initial.png new file mode 100644 index 000000000..ef961cc01 Binary files /dev/null and b/figures/boussinesq/skamarock_klemp_linear_bouss_initial.png differ diff --git a/figures/compressible_euler/dcmip_3_1_gravity_wave_final.png b/figures/compressible_euler/dcmip_3_1_gravity_wave_final.png new file mode 100644 index 000000000..d9864b820 Binary files /dev/null and b/figures/compressible_euler/dcmip_3_1_gravity_wave_final.png differ diff --git a/figures/compressible_euler/dcmip_3_1_gravity_wave_initial.png b/figures/compressible_euler/dcmip_3_1_gravity_wave_initial.png new file mode 100644 index 000000000..f6febb2e8 Binary files /dev/null and b/figures/compressible_euler/dcmip_3_1_gravity_wave_initial.png differ diff --git a/figures/compressible_euler/dry_bryan_fritsch.png b/figures/compressible_euler/dry_bryan_fritsch.png new file mode 100644 index 000000000..b763f4275 Binary files /dev/null and b/figures/compressible_euler/dry_bryan_fritsch.png differ diff --git a/figures/compressible_euler/skamarock_klemp_nonhydrostatic_final.png b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_final.png new file mode 100644 index 000000000..c1187a1cd Binary files /dev/null and b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_final.png differ diff --git a/figures/compressible_euler/skamarock_klemp_nonhydrostatic_initial.png b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_initial.png new file mode 100644 index 000000000..94c1eb09f Binary files /dev/null and b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_initial.png differ diff --git a/figures/compressible_euler/straka_bubble.png b/figures/compressible_euler/straka_bubble.png new file mode 100644 index 000000000..395bb9105 Binary files /dev/null and b/figures/compressible_euler/straka_bubble.png differ diff --git a/figures/compressible_euler/unsaturated_bubble_final.png b/figures/compressible_euler/unsaturated_bubble_final.png new file mode 100644 index 000000000..4f32dc099 Binary files /dev/null and b/figures/compressible_euler/unsaturated_bubble_final.png differ diff --git a/figures/compressible_euler/unsaturated_bubble_initial.png b/figures/compressible_euler/unsaturated_bubble_initial.png new file mode 100644 index 000000000..993a75dfb Binary files /dev/null and b/figures/compressible_euler/unsaturated_bubble_initial.png differ diff --git a/figures/shallow_water/linear_williamson_2_final.png b/figures/shallow_water/linear_williamson_2_final.png new file mode 100644 index 000000000..bd326d3e2 Binary files /dev/null and b/figures/shallow_water/linear_williamson_2_final.png differ diff --git a/figures/shallow_water/linear_williamson_2_initial.png b/figures/shallow_water/linear_williamson_2_initial.png new file mode 100644 index 000000000..992d3b5b3 Binary files /dev/null and b/figures/shallow_water/linear_williamson_2_initial.png differ diff --git a/figures/shallow_water/moist_convective_williamson_2_final.png b/figures/shallow_water/moist_convective_williamson_2_final.png new file mode 100644 index 000000000..c85b82415 Binary files /dev/null and b/figures/shallow_water/moist_convective_williamson_2_final.png differ diff --git a/figures/shallow_water/moist_convective_williamson_2_initial.png b/figures/shallow_water/moist_convective_williamson_2_initial.png new file mode 100644 index 000000000..07e171d25 Binary files /dev/null and b/figures/shallow_water/moist_convective_williamson_2_initial.png differ diff --git a/figures/shallow_water/moist_thermal_williamson_5_final.png b/figures/shallow_water/moist_thermal_williamson_5_final.png new file mode 100644 index 000000000..cc63024af Binary files /dev/null and b/figures/shallow_water/moist_thermal_williamson_5_final.png differ diff --git a/figures/shallow_water/moist_thermal_williamson_5_initial.png b/figures/shallow_water/moist_thermal_williamson_5_initial.png new file mode 100644 index 000000000..3d9c8486f Binary files /dev/null and b/figures/shallow_water/moist_thermal_williamson_5_initial.png differ diff --git a/figures/shallow_water/shallow_water_1d_wave.png b/figures/shallow_water/shallow_water_1d_wave.png new file mode 100644 index 000000000..8496e625e Binary files /dev/null and b/figures/shallow_water/shallow_water_1d_wave.png differ diff --git a/figures/shallow_water/thermal_williamson_2_final.png b/figures/shallow_water/thermal_williamson_2_final.png new file mode 100644 index 000000000..705a1f436 Binary files /dev/null and b/figures/shallow_water/thermal_williamson_2_final.png differ diff --git a/figures/shallow_water/thermal_williamson_2_initial.png b/figures/shallow_water/thermal_williamson_2_initial.png new file mode 100644 index 000000000..0f53c1da5 Binary files /dev/null and b/figures/shallow_water/thermal_williamson_2_initial.png differ diff --git a/figures/shallow_water/williamson_2_final.png b/figures/shallow_water/williamson_2_final.png new file mode 100644 index 000000000..77fbdc8b3 Binary files /dev/null and b/figures/shallow_water/williamson_2_final.png differ diff --git a/figures/shallow_water/williamson_2_initial.png b/figures/shallow_water/williamson_2_initial.png new file mode 100644 index 000000000..dee716023 Binary files /dev/null and b/figures/shallow_water/williamson_2_initial.png differ diff --git a/figures/shallow_water/williamson_5_final.png b/figures/shallow_water/williamson_5_final.png new file mode 100644 index 000000000..dc23560b7 Binary files /dev/null and b/figures/shallow_water/williamson_5_final.png differ diff --git a/figures/shallow_water/williamson_5_initial.png b/figures/shallow_water/williamson_5_initial.png new file mode 100644 index 000000000..ae18f58f6 Binary files /dev/null and b/figures/shallow_water/williamson_5_initial.png differ diff --git a/gusto/core/__init__.py b/gusto/core/__init__.py index 163f9ddff..432c66b9d 100644 --- a/gusto/core/__init__.py +++ b/gusto/core/__init__.py @@ -1,11 +1,12 @@ -from gusto.core.configuration import * # noqa -from gusto.core.coordinates import * # noqa -from gusto.core.coord_transforms import * # noqa -from gusto.core.domain import * # noqa -from gusto.core.fields import * # noqa -from gusto.core.function_spaces import * # noqa -from gusto.core.io import * # noqa -from gusto.core.kernels import * # noqa -from gusto.core.labels import * # noqa -from gusto.core.logging import * # noqa -from gusto.core.meshes import * # noqa \ No newline at end of file +from gusto.core.configuration import * # noqa +from gusto.core.conservative_projection import * # noqa +from gusto.core.coordinates import * # noqa +from gusto.core.coord_transforms import * # noqa +from gusto.core.domain import * # noqa +from gusto.core.fields import * # noqa +from gusto.core.function_spaces import * # noqa +from gusto.core.io import * # noqa +from gusto.core.kernels import * # noqa +from gusto.core.labels import * # noqa +from gusto.core.logging import * # noqa +from gusto.core.meshes import * # noqa \ No newline at end of file diff --git a/gusto/core/configuration.py b/gusto/core/configuration.py index 252f25187..e994eb476 100644 --- a/gusto/core/configuration.py +++ b/gusto/core/configuration.py @@ -8,7 +8,8 @@ "IntegrateByParts", "TransportEquationType", "OutputParameters", "BoussinesqParameters", "CompressibleParameters", "ShallowWaterParameters", - "EmbeddedDGOptions", "RecoveryOptions", "SUPGOptions", "MixedFSOptions", + "EmbeddedDGOptions", "ConservativeEmbeddedDGOptions", "RecoveryOptions", + "ConservativeRecoveryOptions", "SUPGOptions", "MixedFSOptions", "SpongeLayerParameters", "DiffusionParameters", "BoundaryLayerParameters" ] @@ -164,6 +165,14 @@ class EmbeddedDGOptions(WrapperOptions): embedding_space = None +class ConservativeEmbeddedDGOptions(EmbeddedDGOptions): + """Specifies options for a conservative embedded DG method.""" + + project_back_method = 'conservative_project' + rho_name = None + orig_rho_space = None + + class RecoveryOptions(WrapperOptions): """Specifies options for a recovery wrapper method.""" @@ -177,6 +186,15 @@ class RecoveryOptions(WrapperOptions): broken_method = 'interpolate' +class ConservativeRecoveryOptions(RecoveryOptions): + """Specifies options for a conservative recovery wrapper method.""" + + rho_name = None + orig_rho_space = None + project_high_method = 'conservative_project' + project_low_method = 'conservative_project' + + class SUPGOptions(WrapperOptions): """Specifies options for an SUPG scheme.""" @@ -185,6 +203,12 @@ class SUPGOptions(WrapperOptions): default = 1/sqrt(15) ibp = IntegrateByParts.TWICE + # Dictionary containing keys field_name and values term_labels + # field_name (str): name of the field for SUPG to be applied to + # term_label (list): labels of terms for test function to be altered + # by SUPG + suboptions = None + class MixedFSOptions(WrapperOptions): """Specifies options for a mixed finite element formulation @@ -192,7 +216,12 @@ class MixedFSOptions(WrapperOptions): prognostic variables.""" name = "mixed_options" - suboptions = {} + + # Dictionary containing keys field_name and values suboption + # field_name (str): name of the field for suboption to be applied to + # suboption (:class:`WrapperOptions`): Wrapper options to be applied + # to the provided field + suboptions = None class SpongeLayerParameters(Configuration): diff --git a/gusto/core/conservative_projection.py b/gusto/core/conservative_projection.py new file mode 100644 index 000000000..1ab1455d6 --- /dev/null +++ b/gusto/core/conservative_projection.py @@ -0,0 +1,93 @@ +""" +This provides an operator for perform a conservative projection. + +The :class:`ConservativeProjector` provided in this module is an operator that +projects a field such as a mixing ratio from one function space to another, +weighted by a density field to ensure that mass is conserved by the projection. +""" + +from firedrake import (Function, TestFunction, TrialFunction, lhs, rhs, inner, + dx, LinearVariationalProblem, LinearVariationalSolver, + Constant, assemble) +import ufl + +__all__ = ["ConservativeProjector"] + + +class ConservativeProjector(object): + """ + Projects a field such that mass is conserved. + + This object is designed for projecting fields such as mixing ratios of + tracer species from one function space to another, but weighted by density + such that mass is conserved by the projection. + """ + + def __init__(self, rho_source, rho_target, m_source, m_target, + subtract_mean=False): + """ + Args: + rho_source (:class:`Function`): the density to use for weighting the + source mixing ratio field. Can also be a :class:`ufl.Expr`. + rho_target (:class:`Function`): the density to use for weighting the + target mixing ratio field. Can also be a :class:`ufl.Expr`. + m_source (:class:`Function`): the source mixing ratio field. Can + also be a :class:`ufl.Expr`. + m_target (:class:`Function`): the target mixing ratio field to + compute. + subtract_mean (bool, optional): whether to solve the projection by + subtracting the mean value of m for both sides. This is more + expensive as it involves calculating the mean, but will ensure + preservation of a constant when projecting to a continuous + space. Default to False. + + Raises: + RuntimeError: the geometric shape of the two rho fields must be equal. + RuntimeError: the geometric shape of the two m fields must be equal. + """ + + self.subtract_mean = subtract_mean + + if not isinstance(rho_source, (ufl.core.expr.Expr, Function)): + raise ValueError("Can only recover UFL expression or Functions not '%s'" % type(rho_source)) + + if not isinstance(rho_target, (ufl.core.expr.Expr, Function)): + raise ValueError("Can only recover UFL expression or Functions not '%s'" % type(rho_target)) + + if not isinstance(m_source, (ufl.core.expr.Expr, Function)): + raise ValueError("Can only recover UFL expression or Functions not '%s'" % type(m_source)) + + # Check shape values + if m_source.ufl_shape != m_target.ufl_shape: + raise RuntimeError('Shape mismatch between source %s and target function spaces %s in project' % (m_source.ufl_shape, m_target.ufl_shape)) + + if rho_source.ufl_shape != rho_target.ufl_shape: + raise RuntimeError('Shape mismatch between source %s and target function spaces %s in project' % (rho_source.ufl_shape, rho_target.ufl_shape)) + + self.m_source = m_source + self.m_target = m_target + + V = self.m_target.function_space() + mesh = V.mesh() + + self.m_mean = Constant(0.0, domain=mesh) + self.volume = assemble(Constant(1.0, domain=mesh)*dx) + + test = TestFunction(V) + m_trial = TrialFunction(V) + eqn = (rho_source*inner(test, m_source - self.m_mean)*dx + - rho_target*inner(test, m_trial - self.m_mean)*dx) + problem = LinearVariationalProblem(lhs(eqn), rhs(eqn), self.m_target) + self.solver = LinearVariationalSolver(problem) + + def project(self): + """Apply the projection.""" + + # Compute mean value + if self.subtract_mean: + self.m_mean.assign(assemble(self.m_source*dx) / self.volume) + + # Solve projection + self.solver.solve() + + return self.m_target diff --git a/gusto/core/coordinates.py b/gusto/core/coordinates.py index 69aebbf13..28862e176 100644 --- a/gusto/core/coordinates.py +++ b/gusto/core/coordinates.py @@ -99,7 +99,7 @@ def register_space(self, domain, space_name): space = domain.spaces(space_name) # Use the appropriate scalar function space if the space is vector - if np.prod(space.ufl_element().value_shape) > 1: + if np.prod(space.value_shape) > 1: # TODO: get scalar space, and only compute coordinates if necessary logger.warning(f'Space {space_name} has more than one dimension, ' + 'and coordinates used for netCDF output have not ' diff --git a/gusto/core/io.py b/gusto/core/io.py index a9a09f1fe..6f8f74712 100644 --- a/gusto/core/io.py +++ b/gusto/core/io.py @@ -13,14 +13,22 @@ from pyop2.mpi import MPI import numpy as np from gusto.core.logging import logger, update_logfile_location +from collections import namedtuple -__all__ = ["pick_up_mesh", "IO"] +__all__ = ["pick_up_mesh", "IO", "TimeData"] class GustoIOError(IOError): pass +# A named tuple object encapsulating data about timing +TimeData = namedtuple( + 'TimeData', + ['t', 'step', 'initial_steps', 'last_ref_update_time'] +) + + def pick_up_mesh(output, mesh_name): """ Picks up a checkpointed mesh. This must be the first step of any model being @@ -531,7 +539,14 @@ def setup_dump(self, state_fields, t, pick_up=False): # dump initial fields if not pick_up: - self.dump(state_fields, t, step=1) + step = 1 + last_ref_update_time = None + initial_steps = None + time_data = TimeData( + t=t, step=step, initial_steps=initial_steps, + last_ref_update_time=last_ref_update_time + ) + self.dump(state_fields, time_data) def pick_up_from_checkpoint(self, state_fields): """ @@ -541,7 +556,10 @@ def pick_up_from_checkpoint(self, state_fields): state_fields (:class:`StateFields`): the model's field container. Returns: - float: the checkpointed model time. + tuple of (`time_data`, `reference_profiles`): where `time_data` + itself is a named tuple containing the timing data. + The `reference_profiles` are a list of (`field_name`, expr) + pairs describing the reference profile fields. """ # -------------------------------------------------------------------- # @@ -602,6 +620,13 @@ def pick_up_from_checkpoint(self, state_fields): except AttributeError: initial_steps = None + # Try to pick up number last_ref_update_time + # Not compulsory so errors allowed + try: + last_ref_update_time = chk.read_attribute("/", "last_ref_update_time") + except AttributeError: + last_ref_update_time = None + # Finally pick up time and step number t = chk.read_attribute("/", "time") step = chk.read_attribute("/", "step") @@ -632,6 +657,13 @@ def pick_up_from_checkpoint(self, state_fields): else: initial_steps = None + # Try to pick up last reference profile update time + # Not compulsory so errors allowed + if chk.has_attr("/", "last_ref_update_time"): + last_ref_update_time = chk.get_attr("/", "last_ref_update_time") + else: + last_ref_update_time = None + # Finally pick up time t = chk.get_attr("/", "time") step = chk.get_attr("/", "step") @@ -647,9 +679,14 @@ def pick_up_from_checkpoint(self, state_fields): if hasattr(diagnostic_field, "init_field_set"): diagnostic_field.init_field_set = True - return t, reference_profiles, step, initial_steps + time_data = TimeData( + t=t, step=step, initial_steps=initial_steps, + last_ref_update_time=last_ref_update_time + ) + + return time_data, reference_profiles - def dump(self, state_fields, t, step, initial_steps=None): + def dump(self, state_fields, time_data): """ Dumps all of the required model output. @@ -659,12 +696,20 @@ def dump(self, state_fields, t, step, initial_steps=None): Args: state_fields (:class:`StateFields`): the model's field container. - t (float): the simulation's current time. - step (int): the number of time steps. - initial_steps (int, optional): the number of initial time steps - completed by a multi-level time scheme. Defaults to None. + time_data (namedtuple): contains information relating to the time in + the simulation. The tuple is structured as follows: + - t: current time in s + - step: the index of the time step + - initial_steps: number of initial time steps completed by a + multi-level time scheme (could be None) + - last_ref_update_time: the last time in s that the reference + profiles were updated (could be None) """ output = self.output + t = time_data.t + step = time_data.step + initial_steps = time_data.initial_steps + last_ref_update_time = time_data.last_ref_update_time # Diagnostics: # Compute diagnostic fields @@ -688,6 +733,8 @@ def dump(self, state_fields, t, step, initial_steps=None): self.chkpt.write_attribute("/", "step", step) if initial_steps is not None: self.chkpt.write_attribute("/", "initial_steps", initial_steps) + if last_ref_update_time is not None: + self.chkpt.write_attribute("/", "last_ref_update_time", last_ref_update_time) else: with CheckpointFile(self.chkpt_path, 'w') as chk: chk.save_mesh(self.domain.mesh) @@ -697,6 +744,8 @@ def dump(self, state_fields, t, step, initial_steps=None): chk.set_attr("/", "step", step) if initial_steps is not None: chk.set_attr("/", "initial_steps", initial_steps) + if last_ref_update_time is not None: + chk.set_attr("/", "last_ref_update_time", last_ref_update_time) if (next(self.dumpcount) % output.dumpfreq) == 0: if output.dump_nc: diff --git a/gusto/core/labels.py b/gusto/core/labels.py index 5f928d6d2..15b687f65 100644 --- a/gusto/core/labels.py +++ b/gusto/core/labels.py @@ -97,9 +97,12 @@ def __call__(self, target, value=None): linearisation = Label("linearisation", validator=lambda value: type(value) in [LabelledForm, Term]) mass_weighted = Label("mass_weighted", validator=lambda value: type(value) in [LabelledForm, Term]) ibp_label = Label("ibp", validator=lambda value: type(value) == IntegrateByParts) +all_but_last = Label("all_but_last", validator=lambda value: type(value) in [LabelledForm, Term]) + # labels for terms in the equations time_derivative = Label("time_derivative") +nonlinear_time_derivative = Label("nonlinear_time_derivative") transport = Label("transport", validator=lambda value: type(value) == TransportEquationType) diffusion = Label("diffusion") diff --git a/gusto/core/logging.py b/gusto/core/logging.py index c6df8eab4..552dac2af 100644 --- a/gusto/core/logging.py +++ b/gusto/core/logging.py @@ -70,7 +70,7 @@ def capture_exceptions(exception_type, exception_value, traceback, logger=logger logconsole_level = os.environ.get("GUSTO_CONSOLE_LOG_LEVEL", INFO) log_level_list = [log_level, logfile_level, logconsole_level] log_levels = [ - logging.getLevelNamesMapping().get(x) if isinstance(x, str) + logging.getLevelName(x) if isinstance(x, str) else x for x in log_level_list ] diff --git a/gusto/equations/common_forms.py b/gusto/equations/common_forms.py index 91ce01368..8bb606511 100644 --- a/gusto/equations/common_forms.py +++ b/gusto/equations/common_forms.py @@ -14,7 +14,6 @@ "kinetic_energy_form", "advection_equation_circulation_form", "diffusion_form", "diffusion_form_1d", "linear_advection_form", "linear_continuity_form", - "linear_continuity_form_1d", "split_continuity_form", "tracer_conservative_form"] @@ -134,26 +133,7 @@ def linear_continuity_form(test, qbar, ubar): :class:`LabelledForm`: a labelled transport form. """ - L = qbar*test*div(ubar)*dx - form = transporting_velocity(L, ubar) - - return transport(form, TransportEquationType.conservative) - - -def linear_continuity_form_1d(test, qbar, ubar): - """ - The form corresponding to the linearised continuity transport operator. - - Args: - test (:class:`TestFunction`): the test function. - qbar (:class:`ufl.Expr`): the variable to be transported. - ubar (:class:`ufl.Expr`): the transporting velocity. - - Returns: - :class:`LabelledForm`: a labelled transport form. - """ - - L = qbar*test*ubar.dx(0)*dx + L = test*div(qbar*ubar)*dx form = transporting_velocity(L, ubar) return transport(form, TransportEquationType.conservative) diff --git a/gusto/equations/compressible_euler_equations.py b/gusto/equations/compressible_euler_equations.py index 99c300170..58a9902ce 100644 --- a/gusto/equations/compressible_euler_equations.py +++ b/gusto/equations/compressible_euler_equations.py @@ -282,7 +282,7 @@ class HydrostaticCompressibleEulerEquations(CompressibleEulerEquations): equations. """ - def __init__(self, domain, parameters, sponge=None, + def __init__(self, domain, parameters, sponge_options=None, extra_terms=None, space_names=None, linearisation_map='default', u_transport_option="vector_invariant_form", diffusion_options=None, @@ -295,8 +295,9 @@ def __init__(self, domain, parameters, sponge=None, mesh and the compatible function spaces. parameters (:class:`Configuration`, optional): an object containing the model's physical parameters. - sponge (:class:`ufl.Expr`, optional): an expression for a sponge - layer. Defaults to None. + sponge_options (:class:`SpongeLayerParameters`, optional): any + parameters for applying a sponge layer to the upper boundary. + Defaults to None. extra_terms (:class:`ufl.Expr`, optional): any extra terms to be included in the equation set. Defaults to None. space_names (dict, optional): a dictionary of strings for names of @@ -329,7 +330,7 @@ def __init__(self, domain, parameters, sponge=None, NotImplementedError: only mixing ratio tracers are implemented. """ - super().__init__(domain, parameters, sponge=sponge, + super().__init__(domain, parameters, sponge_options=sponge_options, extra_terms=extra_terms, space_names=space_names, linearisation_map=linearisation_map, u_transport_option=u_transport_option, @@ -338,42 +339,49 @@ def __init__(self, domain, parameters, sponge=None, active_tracers=active_tracers, max_quad_deg=max_quad_deg) + # Replace self.residual = self.residual.label_map( lambda t: t.has_label(time_derivative), - map_if_true=lambda t: hydrostatic(t, self.hydrostatic_projection(t)) + map_if_true=lambda t: self.hydrostatic_projection(t, 'u') ) + # Add an extra hydrostatic term + u_idx = self.field_names.index('u') + u = split(self.X)[u_idx] k = self.domain.k - u = split(self.X)[0] self.residual += hydrostatic( subject( prognostic( - -inner(k, self.tests[0]) * inner(k, u) * dx, "u"), - self.X)) + -inner(k, self.tests[u_idx]) * inner(k, u) * dx, "u"), + self.X + ) + ) - def hydrostatic_projection(self, t): + def hydrostatic_projection(self, term, field_name): """ Performs the 'hydrostatic' projection. Takes a term involving a vector prognostic variable and replaces the - prognostic with only its horizontal components. + prognostic with only its horizontal components. It also adds the + 'hydrostatic' label to that term. Args: - t (:class:`Term`): the term to perform the projection upon. + term (:class:`Term`): the term to perform the projection upon. + field_name (str): the prognostic field to make hydrostatic. Returns: :class:`LabelledForm`: the labelled form containing the new term. - - Raises: - AssertionError: spherical geometry is not yet implemented. """ - # TODO: make this more general, i.e. should work on the sphere - if self.domain.on_sphere: - raise NotImplementedError("The hydrostatic projection is not yet " - + "implemented for spherical geometry") - k = Constant((*self.domain.k, 0, 0)) - X = t.get(subject) - - new_subj = X - k * inner(X, k) - return replace_subject(new_subj)(t) + f_idx = self.field_names.index(field_name) + k = self.domain.k + X = term.get(subject) + field = split(X)[f_idx] + + new_subj = field - inner(field, k) * k + # In one step: + # - set up the replace_subject routine (which returns a function) + # - call that function on the supplied `term` argument, + # to replace the subject with the new hydrostatic subject + # - add the hydrostatic label + return replace_subject(new_subj, old_idx=f_idx)(term) diff --git a/gusto/equations/prognostic_equations.py b/gusto/equations/prognostic_equations.py index 326966d04..3c86cd28e 100644 --- a/gusto/equations/prognostic_equations.py +++ b/gusto/equations/prognostic_equations.py @@ -10,7 +10,8 @@ replace_subject, replace_trial_function ) from gusto.core import PrescribedFields -from gusto.core.labels import time_derivative, prognostic, linearisation, mass_weighted +from gusto.core.labels import (nonlinear_time_derivative, time_derivative, + prognostic, linearisation, mass_weighted) from gusto.equations.common_forms import ( advection_form, continuity_form, tracer_conservative_form ) @@ -169,8 +170,8 @@ def generate_mass_terms(self): ref_density_idx = self.field_names.index(self.active_tracers[j].density_name) ref_density = split(self.X)[ref_density_idx] q = prog*ref_density - mass_weighted_form = time_derivative(subject(prognostic(inner(q, test)*dx, - field_name), self.X)) + mass_weighted_form = nonlinear_time_derivative(time_derivative( + subject(prognostic(inner(q, test)*dx, field_name), self.X))) mass = mass_weighted(standard_mass_form, mass_weighted_form) if i == 0: @@ -310,6 +311,18 @@ def add_tracers_to_prognostics(self, domain, active_tracers): name of the active tracer. """ + # If there are any conservatively transported tracers, ensure + # that the reference density, if it is also an active tracer, + # is indexed earlier. + for i in range(len(active_tracers) - 1): + tracer = active_tracers[i] + if tracer.transport_eqn == TransportEquationType.tracer_conservative: + ref_density = next((x for x in active_tracers if x.name == tracer.density_name), tracer) + j = active_tracers.index(ref_density) + if j > i: + # Swap the indices of the tracer and the reference density + active_tracers[i], active_tracers[j] = active_tracers[j], active_tracers[i] + # Loop through tracer fields and add field names and spaces for tracer in active_tracers: if isinstance(tracer, ActiveTracer): diff --git a/gusto/equations/shallow_water_equations.py b/gusto/equations/shallow_water_equations.py index 7e7453656..854266952 100644 --- a/gusto/equations/shallow_water_equations.py +++ b/gusto/equations/shallow_water_equations.py @@ -9,7 +9,7 @@ advection_form, advection_form_1d, continuity_form, continuity_form_1d, vector_invariant_form, kinetic_energy_form, advection_equation_circulation_form, diffusion_form_1d, - linear_continuity_form, linear_continuity_form_1d + linear_continuity_form ) from gusto.equations.prognostic_equations import PrognosticEquationSet @@ -189,14 +189,15 @@ def __init__(self, domain, parameters, fexpr=None, bexpr=None, self.X) residual += topography_form - # thermal source terms not involving topography + # thermal source terms not involving topography. + # label these as the equivalent pressure gradient term if self.thermal: n = FacetNormal(domain.mesh) - source_form = subject(prognostic(-D*div(b*w)*dx - - 0.5*b*div(D*w)*dx - + jump(b*w, n)*avg(D)*dS - + 0.5*jump(D*w, n)*avg(b)*dS, - 'u'), self.X) + source_form = pressure_gradient(subject(prognostic(-D*div(b*w)*dx + - 0.5*b*div(D*w)*dx + + jump(b*w, n)*avg(D)*dS + + 0.5*jump(D*w, n)*avg(b)*dS, + 'u'), self.X)) residual += source_form # -------------------------------------------------------------------- # @@ -360,7 +361,7 @@ def __init__(self, domain, parameters, # Transport term needs special linearisation if self.linearisation_map(D_adv.terms[0]): - linear_D_adv = linear_continuity_form_1d(phi, H, u_trial) + linear_D_adv = linear_continuity_form(phi, H, u_trial) # Add linearisation to D_adv D_adv = linearisation(D_adv, linear_D_adv) diff --git a/gusto/initialisation/hydrostatic_initialisation.py b/gusto/initialisation/hydrostatic_initialisation.py index f4be78451..53bf537e8 100644 --- a/gusto/initialisation/hydrostatic_initialisation.py +++ b/gusto/initialisation/hydrostatic_initialisation.py @@ -12,9 +12,10 @@ from gusto.recovery import Recoverer, BoundaryMethod -__all__ = ["boussinesq_hydrostatic_balance", - "compressible_hydrostatic_balance", "remove_initial_w", - "saturated_hydrostatic_balance", "unsaturated_hydrostatic_balance"] +__all__ = [ + "boussinesq_hydrostatic_balance", "compressible_hydrostatic_balance", + "saturated_hydrostatic_balance", "unsaturated_hydrostatic_balance" +] def boussinesq_hydrostatic_balance(equation, b0, p0, top=False, params=None): @@ -219,23 +220,6 @@ def compressible_hydrostatic_balance(equation, theta0, rho0, exner0=None, rho0.interpolate(thermodynamics.rho(parameters, theta0, exner)) -def remove_initial_w(u): - """ - Removes the vertical component of a velocity field. - - Args: - u (:class:`Function`): the velocity field to be altered. - """ - Vu = u.function_space() - Vv = FunctionSpace(Vu._ufl_domain, Vu.ufl_element()._elements[-1]) - bc = DirichletBC(Vu[0], 0.0, "bottom") - bc.apply(u) - uv = Function(Vv).project(u) - ustar = Function(u.function_space()).project(uv) - uin = Function(u.function_space()).assign(u - ustar) - u.assign(uin) - - def saturated_hydrostatic_balance(equation, state_fields, theta_e, mr_t, exner0=None, top=False, exner_boundary=Constant(1.0), diff --git a/gusto/physics/shallow_water_microphysics.py b/gusto/physics/shallow_water_microphysics.py index 8ffdcf7da..90cb5328d 100644 --- a/gusto/physics/shallow_water_microphysics.py +++ b/gusto/physics/shallow_water_microphysics.py @@ -233,8 +233,6 @@ def __init__(self, equation, saturation_curve, # Check for the correct fields assert vapour_name in equation.field_names, f"Field {vapour_name} does not exist in the equation set" assert cloud_name in equation.field_names, f"Field {cloud_name} does not exist in the equation set" - self.Vv_idx = equation.field_names.index(vapour_name) - self.Vc_idx = equation.field_names.index(cloud_name) if self.convective_feedback: assert "D" in equation.field_names, "Depth field must exist for convective feedback" @@ -244,21 +242,21 @@ def __init__(self, equation, saturation_curve, assert "b" in equation.field_names, "Buoyancy field must exist for thermal feedback" assert beta2 is not None, "If thermal feedback is used, beta2 parameter must be specified" - # Obtain function spaces and functions + # Obtain function spaces W = equation.function_space + self.Vv_idx = equation.field_names.index(vapour_name) + self.Vc_idx = equation.field_names.index(cloud_name) Vv = W.sub(self.Vv_idx) Vc = W.sub(self.Vc_idx) + # order for V_idxs is vapour, cloud V_idxs = [self.Vv_idx, self.Vc_idx] - # Source functions for both vapour and cloud - self.water_v = Function(Vv) - self.cloud = Function(Vc) - # depth needed if convective feedback if self.convective_feedback: self.VD_idx = equation.field_names.index("D") VD = W.sub(self.VD_idx) self.D = Function(VD) + # order for V_idxs is now vapour, cloud, depth V_idxs.append(self.VD_idx) # buoyancy needed if thermal feedback @@ -266,6 +264,7 @@ def __init__(self, equation, saturation_curve, self.Vb_idx = equation.field_names.index("b") Vb = W.sub(self.Vb_idx) self.b = Function(Vb) + # order for V_idxs is now vapour, cloud, depth, buoyancy V_idxs.append(self.Vb_idx) # tau is the timescale for condensation/evaporation (may or may not be the timestep) @@ -289,6 +288,8 @@ def __init__(self, equation, saturation_curve, self.saturation_curve = saturation_curve # Saturation adjustment expression, adjusted to stop negative values + self.water_v = Function(Vv) + self.cloud = Function(Vc) sat_adj_expr = (self.water_v - self.saturation_curve) / self.tau sat_adj_expr = conditional(sat_adj_expr < 0, max_value(sat_adj_expr, @@ -309,17 +310,22 @@ def __init__(self, equation, saturation_curve, self.gamma_v = gamma_v # Factors for multiplying source for different variables + # the order matches the order in V_idx (vapour, cloud, depth, buoyancy) factors = [self.gamma_v, -self.gamma_v] if convective_feedback: factors.append(self.gamma_v*beta1) if thermal_feedback: - factors.append(parameters.g*self.gamma_v*beta2) + factors.append(self.gamma_v*beta2) # Add terms to equations and make interpolators + # sources have the same order as V_idxs and factors self.source = [Function(Vc) for factor in factors] self.source_interpolators = [Interpolator(sat_adj_expr*factor, source) for factor, source in zip(factors, self.source)] + # test functions have the same order as factors and sources (vapour, + # cloud, depth, buoyancy) so that the correct test function multiplies + # each source term tests = [equation.tests[idx] for idx in V_idxs] # Add source terms to residual diff --git a/gusto/recovery/recovery.py b/gusto/recovery/recovery.py index ad8618470..b88c6ad6a 100644 --- a/gusto/recovery/recovery.py +++ b/gusto/recovery/recovery.py @@ -332,20 +332,20 @@ def find_eff_coords(V0): vec_DG1 = VectorFunctionSpace(mesh, DG1_element) x = SpatialCoordinate(mesh) - if isinstance(V0.ufl_element(), VectorElement) or V0.ufl_element().value_size > 1: + if isinstance(V0.ufl_element(), VectorElement) or V0.value_size > 1: eff_coords_list = [] V0_coords_list = [] # treat this separately for each component - for i in range(V0.ufl_element().value_size): + for i in range(V0.value_size): # fill an d-dimensional list with i-th coordinate - x_list = [x[i] for j in range(V0.ufl_element().value_size)] + x_list = [x[i] for j in range(V0.value_size)] # the i-th element in V0_coords_list is a vector with all components the i-th coord ith_V0_coords = Function(V0).project(as_vector(x_list)) V0_coords_list.append(ith_V0_coords) - for i in range(V0.ufl_element().value_size): + for i in range(V0.value_size): # slice through V0_coords_list to obtain the coords of the DOFs for that component x_list = [V0_coords[i] for V0_coords in V0_coords_list] diff --git a/gusto/recovery/recovery_options.py b/gusto/recovery/recovery_options.py index 936e30213..24c499c87 100644 --- a/gusto/recovery/recovery_options.py +++ b/gusto/recovery/recovery_options.py @@ -16,15 +16,15 @@ def __init__(self, domain, boundary_method=None, use_vector_spaces=False): Args: domain (:class:`Domain`): the model's domain object, containing the mesh and the compatible function spaces. - - boundary_method (:variable:'dict', optional): A dictionary containing the space - the boundary method is to be applied to along with specified method. Acceptable keys are "DG", - "HDiv" and "theta". acceptable values are (BoundaryMethod.taylor/hcurl/extruded), - passed as ('space', 'boundary method'). Defaults to None - - use_vector_spaces (bool, optional):. Determines if we need to use DG / CG - space for the embedded and recovery space for the HDiv field instead of the usual - HDiv, HCurl spaces. Defaults to False + boundary_method (:variable:'dict', optional): A dictionary + containing the space the boundary method is to be applied to + along with specified method. Acceptable keys are "DG", "HDiv" + and "theta". Acceptable values are + (BoundaryMethod.taylor/hcurl/extruded). Defaults to None. + use_vector_spaces (bool, optional):. Determines if we need to use + the vector DG1 / CG1 space for the embedded and recovery space + for the HDiv field instead of the usual HDiv, HCurl spaces. + Defaults to False. """ family = domain.family mesh = domain.mesh @@ -36,7 +36,7 @@ def __init__(self, domain, boundary_method=None, use_vector_spaces=False): valid_keys = ['DG', 'HDiv', 'theta'] if boundary_method is not None: - for key in boundary_method: + for key in boundary_method.keys(): if key not in valid_keys: raise KeyError(f'Recovery spaces: boundary method key {key} not valid. Valid keys are DG, HDiv, theta') @@ -47,7 +47,7 @@ def __init__(self, domain, boundary_method=None, use_vector_spaces=False): # Check if extruded and if so builds theta spaces if hasattr(mesh, "_base_mesh"): # check if boundary method is present - if hasattr(boundary_method, 'theta'): + if boundary_method is not None and 'theta' in boundary_method.keys(): theta_boundary_method = boundary_method['theta'] else: theta_boundary_method = None @@ -71,7 +71,7 @@ def __init__(self, domain, boundary_method=None, use_vector_spaces=False): # ---------------------------------------------------------------------- # Building the DG options # ---------------------------------------------------------------------- - if hasattr(boundary_method, 'DG'): + if boundary_method is not None and 'DG' in boundary_method.keys(): DG_boundary_method = boundary_method['DG'] else: DG_boundary_method = None @@ -91,7 +91,7 @@ def __init__(self, domain, boundary_method=None, use_vector_spaces=False): # Building HDiv options # ---------------------------------------------------------------------- - if hasattr(boundary_method, 'HDiv'): + if boundary_method is not None and 'HDiv' in boundary_method.keys(): HDiv_boundary_method = boundary_method['HDiv'] else: HDiv_boundary_method = None @@ -102,16 +102,17 @@ def __init__(self, domain, boundary_method=None, use_vector_spaces=False): HDiv_embedding_Space = Vu_DG1 HDiv_recovered_Space = Vu_CG1 + project_high_method = 'interpolate' else: - HDiv_embedding_Space = self.de_Rham.HDiv HDiv_recovered_Space = self.de_Rham.HCurl + project_high_method = 'project' self.HDiv_options = RecoveryOptions(embedding_space=HDiv_embedding_Space, recovered_space=HDiv_recovered_Space, injection_method='recover', - project_high_method='project', + project_high_method=project_high_method, project_low_method='project', broken_method='project', boundary_method=HDiv_boundary_method) diff --git a/gusto/recovery/reversible_recovery.py b/gusto/recovery/reversible_recovery.py index d9ad661d1..fc8fbd332 100644 --- a/gusto/recovery/reversible_recovery.py +++ b/gusto/recovery/reversible_recovery.py @@ -3,9 +3,12 @@ higher-order function space. """ +from gusto.core.conservative_projection import ConservativeProjector from firedrake import (Projector, Function, Interpolator) from .recovery import Recoverer +__all__ = ["ReversibleRecoverer", "ConservativeRecoverer"] + class ReversibleRecoverer(object): """ @@ -13,10 +16,11 @@ class ReversibleRecoverer(object): field into a higher-order discontinuous space. This uses the recovery operator, but with further adjustments to ensure reversibility. - :arg source_field: the source field. - :arg target_field: the target_field. - :arg reconstruct_opts: an object containing the various options for the - reconstruction. + Args: + source_field (:class:`Function`): the source field. + target_field (:class:`Function`): the target field. + reconstruct_opts (:class:`RecoveryOptions`): an object containing the + various options for the reconstruction. """ def __init__(self, source_field, target_field, reconstruct_opts): @@ -92,3 +96,63 @@ def project(self): self.q_corr_low.assign(self.q_low - self.q_corr_low) self.injector.interpolate() if self.interp_inj else self.injector.project() self.q_high.assign(self.q_corr_high + self.q_rec_high) + + +class ConservativeRecoverer(object): + """ + An object for performing a reconstruction of a low-order discontinuous + field into a higher-order discontinuous space, but such that mass is + conserved. This uses the recovery operator, but with further adjustments to + ensure both reversibility and mass conservation. + + Args: + source_field (:class:`Function`): the source field. + target_field (:class:`Function`): the target field. + source_density (:class:`Function`): the source density field. + target_density (:class:`Function`): the target density field. + reconstruct_opts (:class:`RecoveryOptions`): an object containing the + various options for the reconstruction. + """ + def __init__(self, source_field, target_field, source_density, + target_density, reconstruct_opts): + + self.opts = reconstruct_opts + + # Declare the fields used by the reconstructor + self.q_low = source_field + self.q_high = target_field + self.q_recovered = Function(self.opts.recovered_space) + self.q_corr_low = Function(source_field.function_space()) + self.q_corr_high = Function(target_field.function_space()) + self.q_rec_high = Function(target_field.function_space()) + + # -------------------------------------------------------------------- # + # Set up the operators for different transformations + # -------------------------------------------------------------------- # + + # Does recovery by first projecting into broken space then averaging + self.recoverer = Recoverer(self.q_low, self.q_recovered, + method=self.opts.broken_method, + boundary_method=self.opts.boundary_method) + + # Obtain the recovered field in the higher order space + self.projector_high = Projector(self.q_recovered, self.q_rec_high) + + # Obtain the correction in the lower order space + # Swap density arguments! + self.projector_low = ConservativeProjector(target_density, source_density, + self.q_rec_high, self.q_corr_low, + subtract_mean=True) + + # Final injection operator + # Should identify low order field in higher order space + self.injector = ConservativeProjector(source_density, target_density, + self.q_corr_low, self.q_corr_high) + + def project(self): + self.recoverer.project() + self.projector_high.project() + self.projector_low.project() + self.q_corr_low.assign(self.q_low - self.q_corr_low) + self.injector.project() + self.q_high.assign(self.q_corr_high + self.q_rec_high) diff --git a/gusto/solvers/linear_solvers.py b/gusto/solvers/linear_solvers.py index 8214d2ba2..72dcea7a9 100644 --- a/gusto/solvers/linear_solvers.py +++ b/gusto/solvers/linear_solvers.py @@ -27,7 +27,8 @@ from abc import ABCMeta, abstractmethod, abstractproperty -__all__ = ["BoussinesqSolver", "LinearTimesteppingSolver", "CompressibleSolver", "ThermalSWSolver", "MoistConvectiveSWSolver"] +__all__ = ["BoussinesqSolver", "LinearTimesteppingSolver", "CompressibleSolver", + "ThermalSWSolver", "MoistConvectiveSWSolver"] class TimesteppingSolver(object, metaclass=ABCMeta): @@ -374,6 +375,20 @@ def L_tr(f): python_context = self.hybridized_solver.snes.ksp.pc.getPythonContext() attach_custom_monitor(python_context, logging_ksp_monitor_true_residual) + @timed_function("Gusto:UpdateReferenceProfiles") + def update_reference_profiles(self): + """ + Updates the reference profiles. + """ + + with timed_region("Gusto:HybridProjectRhobar"): + logger.info('Compressible linear solver: rho average solve') + self.rho_avg_solver.solve() + + with timed_region("Gusto:HybridProjectExnerbar"): + logger.info('Compressible linear solver: Exner average solve') + self.exner_avg_solver.solve() + @timed_function("Gusto:LinearSolve") def solve(self, xrhs, dy): """ @@ -387,15 +402,6 @@ def solve(self, xrhs, dy): """ self.xrhs.assign(xrhs) - # TODO: can we avoid computing these each time the solver is called? - with timed_region("Gusto:HybridProjectRhobar"): - logger.info('Compressible linear solver: rho average solve') - self.rho_avg_solver.solve() - - with timed_region("Gusto:HybridProjectExnerbar"): - logger.info('Compressible linear solver: Exner average solve') - self.exner_avg_solver.solve() - # Solve the hybridized system logger.info('Compressible linear solver: hybridized solve') self.hybridized_solver.solve() @@ -660,7 +666,7 @@ def _setup_solver(self): - beta_u * 0.5 * bbar * div(w*(D-Dbar)) * dx + beta_u * 0.5 * jump((D-Dbar)*w, n) * avg(bbar) * dS + inner(phi, (D - D_in)) * dx - + beta_d * phi * Dbar * div(u) * dx + + beta_d * phi * div(Dbar*u) * dx ) if 'coriolis' in equation.prescribed_fields._field_names: @@ -876,7 +882,7 @@ def _setup_solver(self): inner(w, (u - u_in)) * dx - beta_u * (D - Dbar) * div(w*g) * dx + inner(phi, (D - D_in)) * dx - + beta_d * phi * Dbar * div(u) * dx + + beta_d * phi * div(Dbar*u) * dx ) if 'coriolis' in equation.prescribed_fields._field_names: diff --git a/gusto/solvers/preconditioners.py b/gusto/solvers/preconditioners.py index a471b18bc..1e1e29940 100644 --- a/gusto/solvers/preconditioners.py +++ b/gusto/solvers/preconditioners.py @@ -8,7 +8,6 @@ from firedrake.petsc import PETSc from gusto.recovery.recovery_kernels import AverageKernel, AverageWeightings from pyop2.profiling import timed_region, timed_function -from pyop2.utils import as_tuple from functools import partial @@ -72,7 +71,7 @@ def initialize(self, pc): for i, Vi in enumerate(V): # Vector-valued spaces will have a non-empty value_shape - if Vi.ufl_element().value_shape: + if Vi.value_shape: self.vidx = i else: self.pidx = i @@ -144,7 +143,7 @@ def initialize(self, pc): if isinstance(subdom, str): neumann_subdomains |= set([subdom]) else: - neumann_subdomains |= set(as_tuple(subdom, int)) + neumann_subdomains |= set(subdom) # separate out the top and bottom bcs extruded_neumann_subdomains = neumann_subdomains & {"top", "bottom"} diff --git a/gusto/spatial_methods/transport_methods.py b/gusto/spatial_methods/transport_methods.py index dc2f9d6e2..f075eeadd 100644 --- a/gusto/spatial_methods/transport_methods.py +++ b/gusto/spatial_methods/transport_methods.py @@ -8,8 +8,10 @@ ) from firedrake.fml import Term, keep, drop from gusto.core.configuration import IntegrateByParts, TransportEquationType -from gusto.core.labels import (prognostic, transport, transporting_velocity, ibp_label, - mass_weighted) +from gusto.core.labels import ( + prognostic, transport, transporting_velocity, ibp_label, mass_weighted, + all_but_last +) from gusto.core.logging import logger from gusto.spatial_methods.spatial_methods import SpatialMethod @@ -83,6 +85,10 @@ def replace_form(self, equation): # Create new term new_term = Term(self.form.form, original_term.labels) + # Add all_but_last form + if hasattr(self, "all_but_last_form"): + new_term = all_but_last(new_term, self.all_but_last_form) + # Check if this is a conservative transport if original_term.has_label(mass_weighted): # Extract the original and discretised mass_weighted terms @@ -151,7 +157,8 @@ class DGUpwind(TransportMethod): transported variable at facets. """ def __init__(self, equation, variable, ibp=IntegrateByParts.ONCE, - vector_manifold_correction=False, outflow=False): + vector_manifold_correction=False, outflow=False, + advective_then_flux=False): """ Args: equation (:class:`PrognosticEquation`): the equation, which includes @@ -163,6 +170,15 @@ def __init__(self, equation, variable, ibp=IntegrateByParts.ONCE, vector manifold correction term. Defaults to False. outflow (bool, optional): whether to include outflow at the domain boundaries, through exterior facet terms. Defaults to False. + advective_then_flux (bool, optional): whether to use the advective- + then-flux formulation. This uses the advective form of the + transport equation for all but the last steps of some + (potentially subcycled) Runge-Kutta scheme, before using the + conservative form for the final step to deliver a mass- + conserving increment. This option only makes sense to use with + Runge-Kutta, and should be used with the "linear" Runge-Kutta + formulation. Defaults to False, in which case the conservative + form is used for every step. """ super().__init__(equation, variable) @@ -170,6 +186,13 @@ def __init__(self, equation, variable, ibp=IntegrateByParts.ONCE, self.vector_manifold_correction = vector_manifold_correction self.outflow = outflow + if (advective_then_flux + and self.transport_equation_type != TransportEquationType.conservative): + raise ValueError( + 'DG Upwind: advective_then_flux form can only be used with ' + + 'the conservative form of the transport equation' + ) + # -------------------------------------------------------------------- # # Determine appropriate form to use # -------------------------------------------------------------------- # @@ -177,36 +200,52 @@ def __init__(self, equation, variable, ibp=IntegrateByParts.ONCE, if equation.domain.mesh.topological_dimension() == 1 and len(equation.domain.spaces("HDiv").shape) == 0: assert not vector_manifold_correction if self.transport_equation_type == TransportEquationType.advective: - form = upwind_advection_form_1d(self.domain, self.test, - self.field, - ibp=ibp, outflow=outflow) + form = upwind_advection_form_1d( + self.domain, self.test, self.field, ibp=ibp, + outflow=outflow + ) elif self.transport_equation_type == TransportEquationType.conservative: - form = upwind_continuity_form_1d(self.domain, self.test, - self.field, - ibp=ibp, outflow=outflow) + form = upwind_continuity_form_1d( + self.domain, self.test, self.field, ibp=ibp, + outflow=outflow + ) else: if self.transport_equation_type == TransportEquationType.advective: if vector_manifold_correction: - form = vector_manifold_advection_form(self.domain, - self.test, - self.field, ibp=ibp, - outflow=outflow) + form = vector_manifold_advection_form( + self.domain, self.test, self.field, ibp=ibp, + outflow=outflow + ) else: - form = upwind_advection_form(self.domain, self.test, - self.field, - ibp=ibp, outflow=outflow) + form = upwind_advection_form( + self.domain, self.test, self.field, ibp=ibp, + outflow=outflow + ) elif self.transport_equation_type == TransportEquationType.conservative: if vector_manifold_correction: - form = vector_manifold_continuity_form(self.domain, - self.test, - self.field, ibp=ibp, - outflow=outflow) + form = vector_manifold_continuity_form( + self.domain, self.test, self.field, ibp=ibp, + outflow=outflow + ) else: - form = upwind_continuity_form(self.domain, self.test, - self.field, - ibp=ibp, outflow=outflow) + form = upwind_continuity_form( + self.domain, self.test, self.field, ibp=ibp, + outflow=outflow + ) + + if advective_then_flux and vector_manifold_correction: + self.all_but_last_form = vector_manifold_advection_form( + self.domain, self.test, self.field, ibp=ibp, + outflow=outflow + ) + + elif advective_then_flux: + self.all_but_last_form = upwind_advection_form( + self.domain, self.test, self.field, ibp=ibp, + outflow=outflow + ) elif self.transport_equation_type == TransportEquationType.circulation: if outflow: diff --git a/gusto/time_discretisation/explicit_runge_kutta.py b/gusto/time_discretisation/explicit_runge_kutta.py index 64e9545e0..dabdf93ec 100644 --- a/gusto/time_discretisation/explicit_runge_kutta.py +++ b/gusto/time_discretisation/explicit_runge_kutta.py @@ -2,17 +2,46 @@ import numpy as np +from enum import Enum from firedrake import (Function, Constant, NonlinearVariationalProblem, NonlinearVariationalSolver) -from firedrake.fml import replace_subject, all_terms, drop, keep +from firedrake.fml import replace_subject, all_terms, drop, keep, Term from firedrake.utils import cached_property +from firedrake.formmanipulation import split_form -from gusto.core.labels import time_derivative +from gusto.core.labels import time_derivative, all_but_last from gusto.core.logging import logger from gusto.time_discretisation.time_discretisation import ExplicitTimeDiscretisation -__all__ = ["ForwardEuler", "ExplicitRungeKutta", "SSPRK3", "RK4", "Heun"] +__all__ = [ + "ForwardEuler", "ExplicitRungeKutta", "SSPRK3", "RK4", "Heun", + "RungeKuttaFormulation" +] + + +class RungeKuttaFormulation(Enum): + """ + Enumerator to describe the formulation of a Runge-Kutta scheme. + + The following Runge-Kutta methods for solving dy/dt = F(y) are encoded here: + - `increment`: \n + k_0 = F[y^n] \n + k_m = F[y^n - dt*Sum_{i=0}^{m-1} a_{m,i} * k_i], for m = 1 to M - 1 \n + y^{n+1} = y^n - dt*Sum_{i=0}^{M-1} b_i*k_i \n + - `predictor`: + y^0 = y^n \n + y^m = y^0 - dt*Sum_{i=0}^{m-1} a_{m,i} * F[y^i], for m = 1 to M - 1 \n + y^{n+1} = y^0 - dt*Sum_{i=0}^{m-1} b_i * F[y^i] \n + - `linear`: + y^0 = y^n \n + y^m = y^0 - dt*F[Sum_{i=0}^{m-1} a_{m,i} * y^i], for m = 1 to M - 1 \n + y^{n+1} = y^0 - dt*F[Sum_{i=0}^{m-1} b_i * y^i] \n + """ + + increment = 1595712 + predictor = 8234900 + linear = 269207 class ExplicitRungeKutta(ExplicitTimeDiscretisation): @@ -59,14 +88,14 @@ class ExplicitRungeKutta(ExplicitTimeDiscretisation): def __init__(self, domain, butcher_matrix, field_name=None, fixed_subcycles=None, subcycle_by_courant=None, - increment_form=True, solver_parameters=None, - limiter=None, options=None): + rk_formulation=RungeKuttaFormulation.increment, + solver_parameters=None, limiter=None, options=None): """ Args: domain (:class:`Domain`): the model's domain object, containing the mesh and the compatible function spaces. - butcher_matrix (numpy array): A matrix containing the coefficients of - a butcher tableau defining a given Runge Kutta time discretisation. + butcher_matrix (numpy array): A matrix containing the coefficients + of a butcher tableau defining a given Runge Kutta scheme. field_name (str, optional): name of the field to be evolved. Defaults to None. fixed_subcycles (int, optional): the fixed number of sub-steps to @@ -76,11 +105,11 @@ def __init__(self, domain, butcher_matrix, field_name=None, make the scheme perform adaptive sub-cycling based on the Courant number. The specified argument is the maximum Courant for one sub-cycle. Defaults to None, in which case adaptive - sub-cycling is not used. This option cannot be specified with the - `fixed_subcycles` argument. - increment_form (bool, optional): whether to write the RK scheme in - "increment form", solving for increments rather than updated - fields. Defaults to True. + sub-cycling is not used. This option cannot be specified with + the `fixed_subcycles` argument. + rk_formulation (:class:`RungeKuttaFormulation`, optional): + an enumerator object, describing the formulation of the Runge- + Kutta scheme. Defaults to the increment form. solver_parameters (dict, optional): dictionary of parameters to pass to the underlying solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to @@ -97,7 +126,7 @@ def __init__(self, domain, butcher_matrix, field_name=None, limiter=limiter, options=options) self.butcher_matrix = butcher_matrix self.nbutcher = int(np.shape(self.butcher_matrix)[0]) - self.increment_form = increment_form + self.rk_formulation = rk_formulation @property def nStages(self): @@ -114,37 +143,71 @@ def setup(self, equation, apply_bcs=True, *active_labels): """ super().setup(equation, apply_bcs, *active_labels) - if not self.increment_form: - self.field_i = [Function(self.fs) for i in range(self.nStages+1)] + if self.rk_formulation == RungeKuttaFormulation.predictor: + self.field_i = [Function(self.fs) for _ in range(self.nStages+1)] + elif self.rk_formulation == RungeKuttaFormulation.increment: + self.k = [Function(self.fs) for _ in range(self.nStages)] + elif self.rk_formulation == RungeKuttaFormulation.linear: + self.field_rhs = Function(self.fs) else: - self.k = [Function(self.fs) for i in range(self.nStages)] + raise NotImplementedError( + 'Runge-Kutta formulation is not implemented' + ) @cached_property def solver(self): - if self.increment_form: + if self.rk_formulation == RungeKuttaFormulation.increment: return super().solver - else: - # In this case, don't set snes_type to ksp only, as we do want the - # outer Newton iteration + + elif self.rk_formulation == RungeKuttaFormulation.predictor: solver_list = [] for stage in range(self.nStages): # setup linear solver using lhs and rhs defined in derived class problem = NonlinearVariationalProblem( self.lhs[stage].form - self.rhs[stage].form, - self.field_i[stage+1], bcs=self.bcs) + self.field_i[stage+1], bcs=self.bcs + ) solver_name = self.field_name+self.__class__.__name__+str(stage) solver = NonlinearVariationalSolver( problem, solver_parameters=self.solver_parameters, - options_prefix=solver_name) + options_prefix=solver_name + ) solver_list.append(solver) return solver_list + elif self.rk_formulation == RungeKuttaFormulation.linear: + problem = NonlinearVariationalProblem( + self.lhs - self.rhs[0], self.x1, bcs=self.bcs + ) + solver_name = self.field_name+self.__class__.__name__ + solver = NonlinearVariationalSolver( + problem, solver_parameters=self.solver_parameters, + options_prefix=solver_name + ) + + # Set up problem for final step + problem_last = NonlinearVariationalProblem( + self.lhs - self.rhs[1], self.x1, bcs=self.bcs + ) + solver_name = self.field_name+self.__class__.__name__+'_last' + solver_last = NonlinearVariationalSolver( + problem_last, solver_parameters=self.solver_parameters, + options_prefix=solver_name + ) + + return solver, solver_last + + else: + raise NotImplementedError( + 'Runge-Kutta formulation is not implemented' + ) + @cached_property def lhs(self): """Set up the discretisation's left hand side (the time derivative).""" - if self.increment_form: + if self.rk_formulation == RungeKuttaFormulation.increment: l = self.residual.label_map( lambda t: t.has_label(time_derivative), map_if_true=replace_subject(self.x_out, self.idx), @@ -152,7 +215,7 @@ def lhs(self): return l.form - else: + elif self.rk_formulation == RungeKuttaFormulation.predictor: lhs_list = [] for stage in range(self.nStages): l = self.residual.label_map( @@ -163,11 +226,24 @@ def lhs(self): return lhs_list + if self.rk_formulation == RungeKuttaFormulation.linear: + l = self.residual.label_map( + lambda t: t.has_label(time_derivative), + map_if_true=replace_subject(self.x1, self.idx), + map_if_false=drop) + + return l.form + + else: + raise NotImplementedError( + 'Runge-Kutta formulation is not implemented' + ) + @cached_property def rhs(self): """Set up the time discretisation's right hand side.""" - if self.increment_form: + if self.rk_formulation == RungeKuttaFormulation.increment: r = self.residual.label_map( all_terms, map_if_true=replace_subject(self.x1, old_idx=self.idx)) @@ -179,7 +255,7 @@ def rhs(self): # If there are no active labels, we may have no terms at this point # So that we can still do xnp1 = xn, put in a zero term here - if self.increment_form and len(r.terms) == 0: + if len(r.terms) == 0: logger.warning('No terms detected for RHS of explicit problem. ' + 'Adding a zero term to avoid failure.') null_term = Constant(0.0)*self.residual.label_map( @@ -191,7 +267,7 @@ def rhs(self): return r.form - else: + elif self.rk_formulation == RungeKuttaFormulation.predictor: rhs_list = [] for stage in range(self.nStages): @@ -217,9 +293,57 @@ def rhs(self): return rhs_list + elif self.rk_formulation == RungeKuttaFormulation.linear: + + r = self.residual.label_map( + lambda t: t.has_label(time_derivative), + map_if_true=replace_subject(self.x0, old_idx=self.idx), + map_if_false=replace_subject(self.field_rhs, old_idx=self.idx) + ) + r = r.label_map( + lambda t: t.has_label(time_derivative), + map_if_true=keep, + map_if_false=lambda t: -self.dt*t + ) + + # Set up all-but-last RHS + if self.idx is not None: + # If original function is in mixed function space, then ensure + # correct test function in the all-but-last form + r_all_but_last = self.residual.label_map( + lambda t: t.has_label(all_but_last), + map_if_true=lambda t: + Term(split_form(t.get(all_but_last).form)[self.idx].form, + t.labels), + map_if_false=keep + ) + else: + r_all_but_last = self.residual.label_map( + lambda t: t.has_label(all_but_last), + map_if_true=lambda t: Term(t.get(all_but_last).form, t.labels), + map_if_false=keep + ) + r_all_but_last = r_all_but_last.label_map( + lambda t: t.has_label(time_derivative), + map_if_true=replace_subject(self.x0, old_idx=self.idx), + map_if_false=replace_subject(self.field_rhs, old_idx=self.idx) + ) + r_all_but_last = r_all_but_last.label_map( + lambda t: t.has_label(time_derivative), + map_if_true=keep, + map_if_false=lambda t: -self.dt*t + ) + + return r_all_but_last.form, r.form + + else: + raise NotImplementedError( + 'Runge-Kutta formulation is not implemented' + ) + def solve_stage(self, x0, stage): - if self.increment_form: + if self.rk_formulation == RungeKuttaFormulation.increment: self.x1.assign(x0) for i in range(stage): @@ -228,6 +352,10 @@ def solve_stage(self, x0, stage): evaluate(self.x1, self.dt) if self.limiter is not None: self.limiter.apply(self.x1) + + # Set initial guess for solver + if stage > 0: + self.x_out.assign(self.k[stage-1]) self.solver.solve() self.k[stage].assign(self.x_out) @@ -241,21 +369,21 @@ def solve_stage(self, x0, stage): if self.limiter is not None: self.limiter.apply(self.x1) - else: + elif self.rk_formulation == RungeKuttaFormulation.predictor: # Set initial field if stage == 0: self.field_i[0].assign(x0) - # Use x0 as a first guess (otherwise may not converge) - self.field_i[stage+1].assign(x0) + # Use previous stage value as a first guess (otherwise may not converge) + self.field_i[stage+1].assign(self.field_i[stage]) # Update field_i for physics / limiters for evaluate in self.evaluate_source: # TODO: not implemented! Here we need to evaluate the m-th term # in the i-th RHS with field_m raise NotImplementedError( - 'Physics not implemented with RK schemes that do not use ' - + 'the increment form') + 'Physics not implemented with RK schemes that use the ' + + 'predictor form') if self.limiter is not None: self.limiter.apply(self.field_i[stage]) @@ -267,6 +395,70 @@ def solve_stage(self, x0, stage): if self.limiter is not None: self.limiter.apply(self.x1) + elif self.rk_formulation == RungeKuttaFormulation.linear: + + # Set combined index of stage and subcycle + cycle_stage = self.nStages*self.subcycle_idx + stage + + if stage == 0 and self.subcycle_idx == 0: + self.field_lhs = [Function(self.fs) for _ in range(self.nStages*self.ncycles)] + self.field_lhs[0].assign(self.x0) + + # All-but-last form ------------------------------------------------ + if (cycle_stage + 1 < self.ncycles*self.nStages): + # Build up RHS field to be evaluated + self.field_rhs.assign(0.0) + for i in range(stage+1): + i_cycle_stage = self.nStages*self.subcycle_idx + i + self.field_rhs.assign( + self.field_rhs + + self.butcher_matrix[stage, i]*self.field_lhs[i_cycle_stage] + ) + + # Evaluate physics and apply limiter, if necessary + for evaluate in self.evaluate_source: + evaluate(self.field_rhs, self.dt) + if self.limiter is not None: + self.limiter.apply(self.field_rhs) + + # Use previous stage value as a first guess (otherwise may not converge) + self.x1.assign(self.field_lhs[cycle_stage]) + # Solve problem, placing solution in self.x1 + self.solver[0].solve() + + # Store LHS + self.field_lhs[cycle_stage+1].assign(self.x1) + + # Last stage and last subcycle ------------------------------------- + else: + # Build up RHS field to be evaluated + self.field_rhs.assign(0.0) + for i in range(self.ncycles*self.nStages): + j = i % self.nStages + self.field_rhs.assign( + self.field_rhs + + self.butcher_matrix[self.nStages-1, j]*self.field_lhs[i] + ) + + # Evaluate physics and apply limiter, if necessary + for evaluate in self.evaluate_source: + evaluate(self.field_rhs, self.original_dt) + if self.limiter is not None: + self.limiter.apply(self.field_rhs) + # Use x0 as a first guess (otherwise may not converge) + self.x1.assign(x0) + # Solve problem, placing solution in self.x1 + self.solver[1].solve() + + # Final application of limiter + if self.limiter is not None: + self.limiter.apply(self.x1) + + else: + raise NotImplementedError( + 'Runge-Kutta formulation is not implemented' + ) + def apply_cycle(self, x_out, x_in): """ Apply the time discretisation through a single sub-step. @@ -275,6 +467,8 @@ def apply_cycle(self, x_out, x_in): x_in (:class:`Function`): the input field. x_out (:class:`Function`): the output field to be computed. """ + + # TODO: is this limiter application necessary? if self.limiter is not None: self.limiter.apply(x_in) @@ -294,9 +488,12 @@ class ForwardEuler(ExplicitRungeKutta): k0 = F[y^n] \n y^(n+1) = y^n + dt*k0 \n """ - def __init__(self, domain, field_name=None, fixed_subcycles=None, - subcycle_by_courant=None, increment_form=True, - solver_parameters=None, limiter=None, options=None): + def __init__( + self, domain, field_name=None, + fixed_subcycles=None, subcycle_by_courant=None, + rk_formulation=RungeKuttaFormulation.increment, + solver_parameters=None, limiter=None, options=None + ): """ Args: domain (:class:`Domain`): the model's domain object, containing the @@ -310,11 +507,11 @@ def __init__(self, domain, field_name=None, fixed_subcycles=None, make the scheme perform adaptive sub-cycling based on the Courant number. The specified argument is the maximum Courant for one sub-cycle. Defaults to None, in which case adaptive - sub-cycling is not used. This option cannot be specified with the - `fixed_subcycles` argument. - increment_form (bool, optional): whether to write the RK scheme in - "increment form", solving for increments rather than updated - fields. Defaults to True. + sub-cycling is not used. This option cannot be specified with + the `fixed_subcycles` argument. + rk_formulation (:class:`RungeKuttaFormulation`, optional): + an enumerator object, describing the formulation of the Runge- + Kutta scheme. Defaults to the increment form. solver_parameters (dict, optional): dictionary of parameters to pass to the underlying solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to @@ -324,11 +521,13 @@ def __init__(self, domain, field_name=None, fixed_subcycles=None, to control the "wrapper" methods, such as Embedded DG or a recovery method. Defaults to None. """ + butcher_matrix = np.array([1.]).reshape(1, 1) + super().__init__(domain, butcher_matrix, field_name=field_name, fixed_subcycles=fixed_subcycles, subcycle_by_courant=subcycle_by_courant, - increment_form=increment_form, + rk_formulation=rk_formulation, solver_parameters=solver_parameters, limiter=limiter, options=options) @@ -343,9 +542,12 @@ class SSPRK3(ExplicitRungeKutta): k2 = F[y^n + (1/4)*dt*(k0+k1)] \n y^(n+1) = y^n + (1/6)*dt*(k0 + k1 + 4*k2) \n """ - def __init__(self, domain, field_name=None, fixed_subcycles=None, - subcycle_by_courant=None, increment_form=True, - solver_parameters=None, limiter=None, options=None): + def __init__( + self, domain, field_name=None, + fixed_subcycles=None, subcycle_by_courant=None, + rk_formulation=RungeKuttaFormulation.increment, + solver_parameters=None, limiter=None, options=None + ): """ Args: domain (:class:`Domain`): the model's domain object, containing the @@ -359,11 +561,11 @@ def __init__(self, domain, field_name=None, fixed_subcycles=None, make the scheme perform adaptive sub-cycling based on the Courant number. The specified argument is the maximum Courant for one sub-cycle. Defaults to None, in which case adaptive - sub-cycling is not used. This option cannot be specified with the - `fixed_subcycles` argument. - increment_form (bool, optional): whether to write the RK scheme in - "increment form", solving for increments rather than updated - fields. Defaults to True. + sub-cycling is not used. This option cannot be specified with + the `fixed_subcycles` argument. + rk_formulation (:class:`RungeKuttaFormulation`, optional): + an enumerator object, describing the formulation of the Runge- + Kutta scheme. Defaults to the increment form. solver_parameters (dict, optional): dictionary of parameters to pass to the underlying solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to @@ -373,12 +575,16 @@ def __init__(self, domain, field_name=None, fixed_subcycles=None, to control the "wrapper" methods, such as Embedded DG or a recovery method. Defaults to None. """ - butcher_matrix = np.array([[1., 0., 0.], [1./4., 1./4., 0.], [1./6., 1./6., 2./3.]]) + butcher_matrix = np.array([ + [1., 0., 0.], + [1./4., 1./4., 0.], + [1./6., 1./6., 2./3.] + ]) super().__init__(domain, butcher_matrix, field_name=field_name, fixed_subcycles=fixed_subcycles, subcycle_by_courant=subcycle_by_courant, - increment_form=increment_form, + rk_formulation=rk_formulation, solver_parameters=solver_parameters, limiter=limiter, options=options) @@ -398,10 +604,12 @@ class RK4(ExplicitRungeKutta): where superscripts indicate the time-level. \n """ - def __init__(self, domain, field_name=None, fixed_subcycles=None, - subcycle_by_courant=None, increment_form=True, - solver_parameters=None, - limiter=None, options=None): + def __init__( + self, domain, field_name=None, + fixed_subcycles=None, subcycle_by_courant=None, + rk_formulation=RungeKuttaFormulation.increment, + solver_parameters=None, limiter=None, options=None + ): """ Args: domain (:class:`Domain`): the model's domain object, containing the @@ -415,11 +623,11 @@ def __init__(self, domain, field_name=None, fixed_subcycles=None, make the scheme perform adaptive sub-cycling based on the Courant number. The specified argument is the maximum Courant for one sub-cycle. Defaults to None, in which case adaptive - sub-cycling is not used. This option cannot be specified with the - `fixed_subcycles` argument. - increment_form (bool, optional): whether to write the RK scheme in - "increment form", solving for increments rather than updated - fields. Defaults to True. + sub-cycling is not used. This option cannot be specified with + the `fixed_subcycles` argument. + rk_formulation (:class:`RungeKuttaFormulation`, optional): + an enumerator object, describing the formulation of the Runge- + Kutta scheme. Defaults to the increment form. solver_parameters (dict, optional): dictionary of parameters to pass to the underlying solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to @@ -429,11 +637,16 @@ def __init__(self, domain, field_name=None, fixed_subcycles=None, to control the "wrapper" methods, such as Embedded DG or a recovery method. Defaults to None. """ - butcher_matrix = np.array([[0.5, 0., 0., 0.], [0., 0.5, 0., 0.], [0., 0., 1., 0.], [1./6., 1./3., 1./3., 1./6.]]) + butcher_matrix = np.array([ + [0.5, 0., 0., 0.], + [0., 0.5, 0., 0.], + [0., 0., 1., 0.], + [1./6., 1./3., 1./3., 1./6.] + ]) super().__init__(domain, butcher_matrix, field_name=field_name, fixed_subcycles=fixed_subcycles, subcycle_by_courant=subcycle_by_courant, - increment_form=increment_form, + rk_formulation=rk_formulation, solver_parameters=solver_parameters, limiter=limiter, options=options) @@ -451,9 +664,12 @@ class Heun(ExplicitRungeKutta): where superscripts indicate the time-level and subscripts indicate the stage number. """ - def __init__(self, domain, field_name=None, fixed_subcycles=None, - subcycle_by_courant=None, increment_form=True, - solver_parameters=None, limiter=None, options=None): + def __init__( + self, domain, field_name=None, + fixed_subcycles=None, subcycle_by_courant=None, + rk_formulation=RungeKuttaFormulation.increment, + solver_parameters=None, limiter=None, options=None + ): """ Args: domain (:class:`Domain`): the model's domain object, containing the @@ -469,9 +685,9 @@ def __init__(self, domain, field_name=None, fixed_subcycles=None, for one sub-cycle. Defaults to None, in which case adaptive sub-cycling is not used. This option cannot be specified with the `fixed_subcycles` argument. - increment_form (bool, optional): whether to write the RK scheme in - "increment form", solving for increments rather than updated - fields. Defaults to True. + rk_formulation (:class:`RungeKuttaFormulation`, optional): + an enumerator object, describing the formulation of the Runge- + Kutta scheme. Defaults to the increment form. solver_parameters (dict, optional): dictionary of parameters to pass to the underlying solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to @@ -481,10 +697,14 @@ def __init__(self, domain, field_name=None, fixed_subcycles=None, to control the "wrapper" methods, such as Embedded DG or a recovery method. Defaults to None. """ - butcher_matrix = np.array([[1., 0.], [0.5, 0.5]]) + + butcher_matrix = np.array([ + [1., 0.], + [0.5, 0.5] + ]) super().__init__(domain, butcher_matrix, field_name=field_name, fixed_subcycles=fixed_subcycles, subcycle_by_courant=subcycle_by_courant, - increment_form=increment_form, + rk_formulation=rk_formulation, solver_parameters=solver_parameters, limiter=limiter, options=options) diff --git a/gusto/time_discretisation/imex_runge_kutta.py b/gusto/time_discretisation/imex_runge_kutta.py index 283887e7e..fcaefe1ec 100644 --- a/gusto/time_discretisation/imex_runge_kutta.py +++ b/gusto/time_discretisation/imex_runge_kutta.py @@ -5,7 +5,9 @@ from firedrake.fml import replace_subject, all_terms, drop from firedrake.utils import cached_property from gusto.core.labels import time_derivative, implicit, explicit -from gusto.time_discretisation.time_discretisation import TimeDiscretisation +from gusto.time_discretisation.time_discretisation import ( + TimeDiscretisation, wrapper_apply +) import numpy as np @@ -58,7 +60,8 @@ class IMEXRungeKutta(TimeDiscretisation): # -------------------------------------------------------------------------- def __init__(self, domain, butcher_imp, butcher_exp, field_name=None, - solver_parameters=None, limiter=None, options=None): + linear_solver_parameters=None, nonlinear_solver_parameters=None, + limiter=None, options=None): """ Args: domain (:class:`Domain`): the model's domain object, containing the @@ -71,20 +74,39 @@ def __init__(self, domain, butcher_imp, butcher_exp, field_name=None, Runge Kutta time discretisation. field_name (str, optional): name of the field to be evolved. Defaults to None. - solver_parameters (dict, optional): dictionary of parameters to - pass to the underlying solver. Defaults to None. + linear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying linear solver. Defaults to None. + nonlinear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying nonlinear solver. Defaults to None. options (:class:`AdvectionOptions`, optional): an object containing options to either be passed to the spatial discretisation, or to control the "wrapper" methods, such as Embedded DG or a recovery method. Defaults to None. """ super().__init__(domain, field_name=field_name, - solver_parameters=solver_parameters, + solver_parameters=nonlinear_solver_parameters, options=options) self.butcher_imp = butcher_imp self.butcher_exp = butcher_exp self.nStages = int(np.shape(self.butcher_imp)[1]) + # Set default linear and nonlinear solver options if none passed in + if linear_solver_parameters is None: + self.linear_solver_parameters = {'snes_type': 'ksponly', + 'ksp_type': 'cg', + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu'} + else: + self.linear_solver_parameters = linear_solver_parameters + + if nonlinear_solver_parameters is None: + self.nonlinear_solver_parameters = {'snes_type': 'newtonls', + 'ksp_type': 'gmres', + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu'} + else: + self.nonlinear_solver_parameters = nonlinear_solver_parameters + def setup(self, equation, apply_bcs=True, *active_labels): """ Set up the time discretisation based on the equation. @@ -198,7 +220,7 @@ def solvers(self): # setup solver using residual defined in derived class problem = NonlinearVariationalProblem(self.res(stage), self.x_out, bcs=self.bcs) solver_name = self.field_name+self.__class__.__name__ + "%s" % (stage) - solvers.append(NonlinearVariationalSolver(problem, solver_parameters=self.solver_parameters, options_prefix=solver_name)) + solvers.append(NonlinearVariationalSolver(problem, solver_parameters=self.nonlinear_solver_parameters, options_prefix=solver_name)) return solvers @cached_property @@ -207,18 +229,30 @@ def final_solver(self): # setup solver using lhs and rhs defined in derived class problem = NonlinearVariationalProblem(self.final_res, self.x_out, bcs=self.bcs) solver_name = self.field_name+self.__class__.__name__ - return NonlinearVariationalSolver(problem, solver_parameters=self.solver_parameters, options_prefix=solver_name) + return NonlinearVariationalSolver(problem, solver_parameters=self.linear_solver_parameters, options_prefix=solver_name) + @wrapper_apply def apply(self, x_out, x_in): self.x1.assign(x_in) + self.x_out.assign(x_in) solver_list = self.solvers for stage in range(self.nStages): self.solver = solver_list[stage] + # Set initial solver guess + if (stage > 0): + self.x_out.assign(self.xs[stage-1]) self.solver.solve() - self.xs[stage].assign(self.x_out) + # Apply limiter + if self.limiter is not None: + self.limiter.apply(self.x_out) + self.xs[stage].assign(self.x_out) self.final_solver.solve() + + # Apply limiter + if self.limiter is not None: + self.limiter.apply(self.x_out) x_out.assign(self.x_out) @@ -233,7 +267,8 @@ class IMEX_Euler(IMEXRungeKutta): y_1 = y^n + dt*F[y_1] + dt*S[y_0] \n y^(n+1) = y^n + dt*F[y_1] + dt*S[y_0] """ - def __init__(self, domain, field_name=None, solver_parameters=None, + def __init__(self, domain, field_name=None, + linear_solver_parameters=None, nonlinear_solver_parameters=None, limiter=None, options=None): """ Args: @@ -241,8 +276,10 @@ def __init__(self, domain, field_name=None, solver_parameters=None, mesh and the compatible function spaces. field_name (str, optional): name of the field to be evolved. Defaults to None. - solver_parameters (dict, optional): dictionary of parameters to - pass to the underlying solver. Defaults to None. + linear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying linear solver. Defaults to None. + nonlinear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying nonlinear solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to the evolving field to enforce monotonicity. Defaults to None. options (:class:`AdvectionOptions`, optional): an object containing @@ -253,7 +290,8 @@ def __init__(self, domain, field_name=None, solver_parameters=None, butcher_imp = np.array([[0., 0.], [0., 1.], [0., 1.]]) butcher_exp = np.array([[0., 0.], [1., 0.], [1., 0.]]) super().__init__(domain, butcher_imp, butcher_exp, field_name, - solver_parameters=solver_parameters, + linear_solver_parameters=linear_solver_parameters, + nonlinear_solver_parameters=nonlinear_solver_parameters, limiter=limiter, options=options) @@ -273,7 +311,8 @@ class IMEX_ARS3(IMEXRungeKutta): y^(n+1) = y^n + dt*(g*F[y_1]+(1-g)*F[y_2]) \n + dt*(0.5*S[y_1]+0.5*S[y_2]) """ - def __init__(self, domain, field_name=None, solver_parameters=None, + def __init__(self, domain, field_name=None, + linear_solver_parameters=None, nonlinear_solver_parameters=None, limiter=None, options=None): """ Args: @@ -281,8 +320,10 @@ def __init__(self, domain, field_name=None, solver_parameters=None, mesh and the compatible function spaces. field_name (str, optional): name of the field to be evolved. Defaults to None. - solver_parameters (dict, optional): dictionary of parameters to - pass to the underlying solver. Defaults to None. + linear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying linear solver. Defaults to None. + nonlinear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying nonlinear solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to the evolving field to enforce monotonicity. Defaults to None. options (:class:`AdvectionOptions`, optional): an object containing @@ -295,7 +336,8 @@ def __init__(self, domain, field_name=None, solver_parameters=None, butcher_exp = np.array([[0., 0., 0.], [g, 0., 0.], [g-1., 2.*(1.-g), 0.], [0., 0.5, 0.5]]) super().__init__(domain, butcher_imp, butcher_exp, field_name, - solver_parameters=solver_parameters, + linear_solver_parameters=linear_solver_parameters, + nonlinear_solver_parameters=nonlinear_solver_parameters, limiter=limiter, options=options) @@ -315,15 +357,19 @@ class IMEX_ARK2(IMEXRungeKutta): y^(n+1) = y^n + dt*(d*F[y_0]+d*F[y_1]+g*F[y_2]) \n + dt*(d*S[y_0]+d*S[y_1]+g*S[y_2]) """ - def __init__(self, domain, field_name=None, solver_parameters=None, limiter=None, options=None): + def __init__(self, domain, field_name=None, + linear_solver_parameters=None, nonlinear_solver_parameters=None, + limiter=None, options=None): """ Args: domain (:class:`Domain`): the model's domain object, containing the mesh and the compatible function spaces. field_name (str, optional): name of the field to be evolved. Defaults to None. - solver_parameters (dict, optional): dictionary of parameters to - pass to the underlying solver. Defaults to None. + linear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying linear solver. Defaults to None. + nonlinear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying nonlinear solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to the evolving field to enforce monotonicity. Defaults to None. options (:class:`AdvectionOptions`, optional): an object containing @@ -337,7 +383,8 @@ def __init__(self, domain, field_name=None, solver_parameters=None, limiter=None butcher_imp = np.array([[0., 0., 0.], [g, g, 0.], [d, d, g], [d, d, g]]) butcher_exp = np.array([[0., 0., 0.], [2.*g, 0., 0.], [1.-a, a, 0.], [d, d, g]]) super().__init__(domain, butcher_imp, butcher_exp, field_name, - solver_parameters=solver_parameters, + linear_solver_parameters=linear_solver_parameters, + nonlinear_solver_parameters=nonlinear_solver_parameters, limiter=limiter, options=options) @@ -355,15 +402,19 @@ class IMEX_SSP3(IMEXRungeKutta): y^(n+1) = y^n + dt*(1/6*F[y_1]+1/6*F[y_2]+2/3*F[y_3]) \n + dt*(1/6*S[y_1]+1/6*S[y_2]+2/3*S[y_3]) """ - def __init__(self, domain, field_name=None, solver_parameters=None, limiter=None, options=None): + def __init__(self, domain, field_name=None, + linear_solver_parameters=None, nonlinear_solver_parameters=None, + limiter=None, options=None): """ Args: domain (:class:`Domain`): the model's domain object, containing the mesh and the compatible function spaces. field_name (str, optional): name of the field to be evolved. Defaults to None. - solver_parameters (dict, optional): dictionary of parameters to - pass to the underlying solver. Defaults to None. + linear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying linear solver. Defaults to None. + nonlinear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying nonlinear solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to the evolving field to enforce monotonicity. Defaults to None. options (:class:`AdvectionOptions`, optional): an object containing @@ -375,7 +426,8 @@ def __init__(self, domain, field_name=None, solver_parameters=None, limiter=None butcher_imp = np.array([[g, 0., 0.], [1-2.*g, g, 0.], [0.5-g, 0., g], [(1./6.), (1./6.), (2./3.)]]) butcher_exp = np.array([[0., 0., 0.], [1., 0., 0.], [0.25, 0.25, 0.], [(1./6.), (1./6.), (2./3.)]]) super().__init__(domain, butcher_imp, butcher_exp, field_name, - solver_parameters=solver_parameters, + linear_solver_parameters=linear_solver_parameters, + nonlinear_solver_parameters=nonlinear_solver_parameters, limiter=limiter, options=options) @@ -393,15 +445,19 @@ class IMEX_Trap2(IMEXRungeKutta): y_3 = y^n + dt*(0.5*F[y_0]+0.5*F[y_3]) + dt*(0.5*S[y_0]+0.5*S[y_2]) \n y^(n+1) = y^n + dt*(0.5*F[y_0]+0.5*F[y_3]) + dt*(0.5*S[y_0] + 0.5*S[y_2]) \n """ - def __init__(self, domain, field_name=None, solver_parameters=None, limiter=None, options=None): + def __init__(self, domain, field_name=None, + linear_solver_parameters=None, nonlinear_solver_parameters=None, + limiter=None, options=None): """ Args: domain (:class:`Domain`): the model's domain object, containing the mesh and the compatible function spaces. field_name (str, optional): name of the field to be evolved. Defaults to None. - solver_parameters (dict, optional): dictionary of parameters to - pass to the underlying solver. Defaults to None. + linear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying linear solver. Defaults to None. + nonlinear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying nonlinear solver. Defaults to None. limiter (:class:`Limiter` object, optional): a limiter to apply to the evolving field to enforce monotonicity. Defaults to None. options (:class:`AdvectionOptions`, optional): an object containing @@ -413,5 +469,6 @@ def __init__(self, domain, field_name=None, solver_parameters=None, limiter=None butcher_imp = np.array([[0., 0., 0., 0.], [e, 0., 0., 0.], [0.5, 0., 0.5, 0.], [0.5, 0., 0., 0.5], [0.5, 0., 0., 0.5]]) butcher_exp = np.array([[0., 0., 0., 0.], [1., 0., 0., 0.], [0.5, 0.5, 0., 0.], [0.5, 0., 0.5, 0.], [0.5, 0., 0.5, 0.]]) super().__init__(domain, butcher_imp, butcher_exp, field_name, - solver_parameters=solver_parameters, + linear_solver_parameters=linear_solver_parameters, + nonlinear_solver_parameters=nonlinear_solver_parameters, limiter=limiter, options=options) diff --git a/gusto/time_discretisation/implicit_runge_kutta.py b/gusto/time_discretisation/implicit_runge_kutta.py index 4184dd3e5..8eb1c75ab 100644 --- a/gusto/time_discretisation/implicit_runge_kutta.py +++ b/gusto/time_discretisation/implicit_runge_kutta.py @@ -8,7 +8,9 @@ from firedrake.utils import cached_property from gusto.core.labels import time_derivative -from gusto.time_discretisation.time_discretisation import TimeDiscretisation +from gusto.time_discretisation.time_discretisation import ( + TimeDiscretisation, wrapper_apply +) __all__ = ["ImplicitRungeKutta", "ImplicitMidpoint", "QinZhang"] @@ -54,7 +56,7 @@ class ImplicitRungeKutta(TimeDiscretisation): # --------------------------------------------------------------------------- def __init__(self, domain, butcher_matrix, field_name=None, - solver_parameters=None, limiter=None, options=None,): + solver_parameters=None, options=None,): """ Args: domain (:class:`Domain`): the model's domain object, containing the @@ -66,8 +68,6 @@ def __init__(self, domain, butcher_matrix, field_name=None, Defaults to None. solver_parameters (dict, optional): dictionary of parameters to pass to the underlying solver. Defaults to None. - limiter (:class:`Limiter` object, optional): a limiter to apply to - the evolving field to enforce monotonicity. Defaults to None. options (:class:`AdvectionOptions`, optional): an object containing options to either be passed to the spatial discretisation, or to control the "wrapper" methods, such as Embedded DG or a @@ -75,7 +75,7 @@ def __init__(self, domain, butcher_matrix, field_name=None, """ super().__init__(domain, field_name=field_name, solver_parameters=solver_parameters, - limiter=limiter, options=options) + options=options) self.butcher_matrix = butcher_matrix self.nStages = int(np.shape(self.butcher_matrix)[1]) @@ -129,19 +129,21 @@ def solve_stage(self, x0, stage): for i in range(stage): self.x1.assign(self.x1 + self.butcher_matrix[stage, i]*self.dt*self.k[i]) - if self.limiter is not None: - self.limiter.apply(self.x1) - if self.idx is None and len(self.fs) > 1: self.xnph = tuple([self.dt*self.butcher_matrix[stage, stage]*a + b for a, b in zip(split(self.x_out), split(self.x1))]) else: self.xnph = self.x1 + self.butcher_matrix[stage, stage]*self.dt*self.x_out solver = self.solvers[stage] + # Set initial guess for solver + if (stage > 0): + self.x_out.assign(self.k[stage-1]) + solver.solve() self.k[stage].assign(self.x_out) + @wrapper_apply def apply(self, x_out, x_in): for i in range(self.nStages): @@ -151,9 +153,6 @@ def apply(self, x_out, x_in): for i in range(self.nStages): x_out.assign(x_out + self.butcher_matrix[self.nStages, i]*self.dt*self.k[i]) - if self.limiter is not None: - self.limiter.apply(x_out) - class ImplicitMidpoint(ImplicitRungeKutta): u""" @@ -166,7 +165,7 @@ class ImplicitMidpoint(ImplicitRungeKutta): y^(n+1) = y^n + dt*k0 \n """ def __init__(self, domain, field_name=None, solver_parameters=None, - limiter=None, options=None): + options=None): """ Args: domain (:class:`Domain`): the model's domain object, containing the @@ -175,8 +174,6 @@ def __init__(self, domain, field_name=None, solver_parameters=None, Defaults to None. solver_parameters (dict, optional): dictionary of parameters to pass to the underlying solver. Defaults to None. - limiter (:class:`Limiter` object, optional): a limiter to apply to - the evolving field to enforce monotonicity. Defaults to None. options (:class:`AdvectionOptions`, optional): an object containing options to either be passed to the spatial discretisation, or to control the "wrapper" methods, such as Embedded DG or a @@ -185,7 +182,7 @@ def __init__(self, domain, field_name=None, solver_parameters=None, butcher_matrix = np.array([[0.5], [1.]]) super().__init__(domain, butcher_matrix, field_name, solver_parameters=solver_parameters, - limiter=limiter, options=options) + options=options) class QinZhang(ImplicitRungeKutta): @@ -200,7 +197,7 @@ class QinZhang(ImplicitRungeKutta): y^(n+1) = y^n + 0.5*dt*(k0 + k1) \n """ def __init__(self, domain, field_name=None, solver_parameters=None, - limiter=None, options=None): + options=None): """ Args: domain (:class:`Domain`): the model's domain object, containing the @@ -209,8 +206,6 @@ def __init__(self, domain, field_name=None, solver_parameters=None, Defaults to None. solver_parameters (dict, optional): dictionary of parameters to pass to the underlying solver. Defaults to None. - limiter (:class:`Limiter` object, optional): a limiter to apply to - the evolving field to enforce monotonicity. Defaults to None. options (:class:`AdvectionOptions`, optional): an object containing options to either be passed to the spatial discretisation, or to control the "wrapper" methods, such as Embedded DG or a @@ -219,4 +214,4 @@ def __init__(self, domain, field_name=None, solver_parameters=None, butcher_matrix = np.array([[0.25, 0], [0.5, 0.25], [0.5, 0.5]]) super().__init__(domain, butcher_matrix, field_name, solver_parameters=solver_parameters, - limiter=limiter, options=options) + options=options) diff --git a/gusto/time_discretisation/multi_level_schemes.py b/gusto/time_discretisation/multi_level_schemes.py index 47e3b9652..a11671cc4 100644 --- a/gusto/time_discretisation/multi_level_schemes.py +++ b/gusto/time_discretisation/multi_level_schemes.py @@ -144,6 +144,8 @@ def apply(self, x_out, *x_in): self.xnm1.assign(x_in[0]) self.x1.assign(x_in[1]) + # Set initial solver guess + self.x_out.assign(x_in[1]) solver.solve() x_out.assign(self.x_out) @@ -221,6 +223,8 @@ def apply(self, x_out, *x_in): self.xnm1.assign(x_in[0]) self.x1.assign(x_in[1]) + # Set initial solver guess + self.x_out.assign(x_in[1]) solver.solve() x_out.assign(self.x_out) @@ -352,6 +356,8 @@ def apply(self, x_out, *x_in): for n in range(self.nlevels): self.x[n].assign(x_in[n]) + # Set initial solver guess + self.x_out.assign(x_in[-1]) solver.solve() x_out.assign(self.x_out) @@ -507,5 +513,7 @@ def apply(self, x_out, *x_in): for n in range(self.nlevels): self.x[n].assign(x_in[n]) + # Set initial solver guess + self.x_out.assign(x_in[-1]) solver.solve() x_out.assign(self.x_out) diff --git a/gusto/time_discretisation/sdc.py b/gusto/time_discretisation/sdc.py index 45ff55fcc..0fe0c9f29 100644 --- a/gusto/time_discretisation/sdc.py +++ b/gusto/time_discretisation/sdc.py @@ -163,9 +163,9 @@ def __init__(self, base_scheme, domain, M, maxk, quad_type, node_type, qdelta_im # Get Q_delta matrices self.Qdelta_imp = genQDeltaCoeffs(qdelta_imp, form=formulation, - nodes=self.nodes, Q=self.Q) + nodes=self.nodes, Q=self.Q, nNodes=M, nodeType=node_type, quadType=quad_type) self.Qdelta_exp = genQDeltaCoeffs(qdelta_exp, form=formulation, - nodes=self.nodes, Q=self.Q) + nodes=self.nodes, Q=self.Q, nNodes=M, nodeType=node_type, quadType=quad_type) # Set default linear and nonlinear solver options if none passed in if linear_solver_parameters is None: @@ -526,7 +526,11 @@ def apply(self, x_out, x_in): self.fUnodes[m-1].assign(self.Urhs) self.compute_quad_final() # Compute y_(n+1) = y_n + sum(j=1,M) q_j*F(y_j) + self.U_fin.assign(self.Unodes[-1]) self.solver_fin.solve() + # Apply limiter if required + if self.limiter is not None: + self.limiter.apply(self.U_fin) x_out.assign(self.U_fin) else: # Take value at final quadrature node dtau_M diff --git a/gusto/time_discretisation/time_discretisation.py b/gusto/time_discretisation/time_discretisation.py index c0438295c..59a7a77f5 100644 --- a/gusto/time_discretisation/time_discretisation.py +++ b/gusto/time_discretisation/time_discretisation.py @@ -17,11 +17,11 @@ from firedrake.utils import cached_property from gusto.core.configuration import EmbeddedDGOptions, RecoveryOptions -from gusto.core.labels import time_derivative, prognostic, physics_label, mass_weighted +from gusto.core.labels import (time_derivative, prognostic, physics_label, + mass_weighted, nonlinear_time_derivative) from gusto.core.logging import logger, DEBUG, logging_ksp_monitor_true_residual from gusto.time_discretisation.wrappers import * - __all__ = ["TimeDiscretisation", "ExplicitTimeDiscretisation", "BackwardEuler", "ThetaMethod", "TrapeziumRule", "TR_BDF2"] @@ -91,15 +91,17 @@ def __init__(self, domain, field_name=None, solver_parameters=None, self.wrapper.subwrappers.update({field: RecoveryWrapper(self, suboption)}) elif suboption.name == "supg": raise RuntimeError( - 'Time discretisation: suboption SUPG is currently not implemented within MixedOptions') + 'Time discretisation: suboption SUPG is not implemented within MixedOptions') else: raise RuntimeError( - f'Time discretisation: suboption wrapper {wrapper_name} not implemented') + f'Time discretisation: suboption wrapper {suboption.name} not implemented') + elif self.wrapper_name == "embedded_dg": self.wrapper = EmbeddedDGWrapper(self, options) elif self.wrapper_name == "recovered": self.wrapper = RecoveryWrapper(self, options) elif self.wrapper_name == "supg": + self.suboptions = options.suboptions self.wrapper = SUPGWrapper(self, options) else: raise RuntimeError( @@ -110,7 +112,7 @@ def __init__(self, domain, field_name=None, solver_parameters=None, # get default solver options if none passed in if solver_parameters is None: - self.solver_parameters = {'ksp_type': 'cg', + self.solver_parameters = {'ksp_type': 'gmres', 'pc_type': 'bjacobi', 'sub_pc_type': 'ilu'} else: @@ -131,26 +133,58 @@ def setup(self, equation, apply_bcs=True, *active_labels): self.residual = equation.residual if self.field_name is not None and hasattr(equation, "field_names"): - self.idx = equation.field_names.index(self.field_name) - self.fs = equation.spaces[self.idx] - self.residual = self.residual.label_map( - lambda t: t.get(prognostic) == self.field_name, - lambda t: Term( - split_form(t.form)[self.idx].form, - t.labels), - drop) + if isinstance(self.field_name, list): + # Multiple fields are being solved for simultaneously. + # This enables conservative transport to be implemented with SIQN. + # Use the full mixed space for self.fs, with the + # field_name, residual, and BCs being set up later. + self.fs = equation.function_space + self.idx = None + else: + self.idx = equation.field_names.index(self.field_name) + self.fs = equation.spaces[self.idx] + self.residual = self.residual.label_map( + lambda t: t.get(prognostic) == self.field_name, + lambda t: Term( + split_form(t.form)[self.idx].form, + t.labels), + drop) else: self.field_name = equation.field_name self.fs = equation.function_space self.idx = None - bcs = equation.bcs[self.field_name] - if len(active_labels) > 0: - self.residual = self.residual.label_map( - lambda t: any(t.has_label(time_derivative, *active_labels)), - map_if_false=drop) + if isinstance(self.field_name, list): + # Multiple fields are being solved for simultaneously. + # Keep all time derivative terms: + residual = self.residual.label_map( + lambda t: t.has_label(time_derivative), + map_if_false=drop) + + # Only keep active labels for prognostics in the list + # of simultaneously transported variables: + for subname in self.field_name: + field_residual = self.residual.label_map( + lambda t: t.get(prognostic) == subname, + map_if_false=drop) + + residual += field_residual.label_map( + lambda t: t.has_label(*active_labels), + map_if_false=drop) + + self.residual = residual + else: + self.residual = self.residual.label_map( + lambda t: any(t.has_label(time_derivative, *active_labels)), + map_if_false=drop) + + # Set the field name if using simultaneous transport. + if isinstance(self.field_name, list): + self.field_name = equation.field_name + + bcs = equation.bcs[self.field_name] self.evaluate_source = [] self.physics_names = [] @@ -174,7 +208,10 @@ def setup(self, equation, apply_bcs=True, *active_labels): # timestepper should be used instead. if len(field_terms.label_map(lambda t: t.has_label(mass_weighted), map_if_false=drop)) > 0: if len(field_terms.label_map(lambda t: not t.has_label(mass_weighted), map_if_false=drop)) > 0: - raise ValueError(f"Mass-weighted and non-mass-weighted terms are present in a timestepping equation for {field}. As these terms cannot be solved for simultaneously, a split timestepping method should be used instead.") + raise ValueError('Mass-weighted and non-mass-weighted terms are present in a ' + + f'timestepping equation for {field}. As these terms cannot ' + + 'be solved for simultaneously, a split timestepping method ' + + 'should be used instead.') else: # Replace the terms with a mass_weighted label with the # mass_weighted form. It is important that the labels from @@ -182,12 +219,14 @@ def setup(self, equation, apply_bcs=True, *active_labels): self.residual = self.residual.label_map( lambda t: t.get(prognostic) == field and t.has_label(mass_weighted), map_if_true=lambda t: t.get(mass_weighted)) - # -------------------------------------------------------------------- # # Set up Wrappers # -------------------------------------------------------------------- # if self.wrapper is not None: + + wrapper_bcs = bcs if apply_bcs else None + if self.wrapper_name == "mixed_options": self.wrapper.wrapper_spaces = equation.spaces @@ -196,10 +235,11 @@ def setup(self, equation, apply_bcs=True, *active_labels): for field, subwrapper in self.wrapper.subwrappers.items(): if field not in equation.field_names: - raise ValueError(f"The option defined for {field} is for a field that does not exist in the equation set") + raise ValueError(f'The option defined for {field} is for a field ' + + 'that does not exist in the equation set.') field_idx = equation.field_names.index(field) - subwrapper.setup(equation.spaces[field_idx]) + subwrapper.setup(equation.spaces[field_idx], equation.bcs[field]) # Update the function space to that needed by the wrapper self.wrapper.wrapper_spaces[field_idx] = subwrapper.function_space @@ -216,23 +256,39 @@ def setup(self, equation, apply_bcs=True, *active_labels): else: if self.wrapper_name == "supg": - self.wrapper.setup() + if self.suboptions is not None: + for field_name, term_labels in self.suboptions.items(): + self.wrapper.setup(field_name) + new_test = self.wrapper.test + if term_labels is not None: + for term_label in term_labels: + self.residual = self.residual.label_map( + lambda t: t.get(prognostic) == field_name and t.has_label(term_label), + map_if_true=replace_test_function(new_test, old_idx=self.wrapper.idx)) + else: + self.residual = self.residual.label_map( + lambda t: t.get(prognostic) == field_name, + map_if_true=replace_test_function(new_test, old_idx=self.wrapper.idx)) + self.residual = self.wrapper.label_terms(self.residual) + else: + self.wrapper.setup(self.field_name) + new_test = self.wrapper.test + self.residual = self.residual.label_map( + all_terms, + map_if_true=replace_test_function(new_test)) + self.residual = self.wrapper.label_terms(self.residual) else: - self.wrapper.setup(self.fs) - self.fs = self.wrapper.function_space + self.wrapper.setup(self.fs, wrapper_bcs) + self.fs = self.wrapper.function_space + new_test = TestFunction(self.wrapper.test_space) + # Replace the original test function with the one from the wrapper + self.residual = self.residual.label_map( + all_terms, + map_if_true=replace_test_function(new_test)) + + self.residual = self.wrapper.label_terms(self.residual) if self.solver_parameters is None: self.solver_parameters = self.wrapper.solver_parameters - new_test = TestFunction(self.wrapper.test_space) - # SUPG has a special wrapper - if self.wrapper_name == "supg": - new_test = self.wrapper.test - - # Replace the original test function with the one from the wrapper - self.residual = self.residual.label_map( - all_terms, - map_if_true=replace_test_function(new_test)) - - self.residual = self.wrapper.label_terms(self.residual) # -------------------------------------------------------------------- # # Make boundary conditions @@ -240,10 +296,20 @@ def setup(self, equation, apply_bcs=True, *active_labels): if not apply_bcs: self.bcs = None - elif self.wrapper is not None: - # Transfer boundary conditions onto test function space - self.bcs = [DirichletBC(self.fs, bc.function_arg, bc.sub_domain) - for bc in bcs] + elif self.wrapper is not None and self.wrapper_name != "supg": + if self.wrapper_name == 'mixed_options': + # Define new Dirichlet BCs on the wrapper-modified + # mixed function space. + self.bcs = [] + for idx, field_name in enumerate(self.equation.field_names): + for bc in equation.bcs[field_name]: + self.bcs.append(DirichletBC(self.fs.sub(idx), + bc.function_arg, + bc.sub_domain)) + else: + # Transfer boundary conditions onto test function space + self.bcs = [DirichletBC(self.fs, bc.function_arg, bc.sub_domain) + for bc in bcs] else: self.bcs = bcs @@ -348,6 +414,15 @@ def __init__(self, domain, field_name=None, fixed_subcycles=None, self.fixed_subcycles = fixed_subcycles self.subcycle_by_courant = subcycle_by_courant + # get default solver options if none passed in + if solver_parameters is None: + self.solver_parameters = {'snes_type': 'ksponly', + 'ksp_type': 'cg', + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu'} + else: + self.solver_parameters = solver_parameters + def setup(self, equation, apply_bcs=True, *active_labels): """ Set up the time discretisation based on the equation. @@ -372,6 +447,19 @@ def setup(self, equation, apply_bcs=True, *active_labels): self.x0 = Function(self.fs) self.x1 = Function(self.fs) + # If the time_derivative term is nonlinear, we must use a nonlinear solver + if ( + len(self.residual.label_map( + lambda t: t.has_label(nonlinear_time_derivative), + map_if_false=drop + )) > 0 and self.solver_parameters.get('snes_type') == 'ksponly' + ): + message = ('Switching to newton line search' + + f' nonlinear solver for {self.field_name}' + + ' as the time derivative term is nonlinear') + logger.warning(message) + self.solver_parameters['snes_type'] = 'newtonls' + @cached_property def lhs(self): """Set up the discretisation's left hand side (the time derivative).""" @@ -388,8 +476,6 @@ def solver(self): # setup linear solver using lhs and rhs defined in derived class problem = NonlinearVariationalProblem(self.lhs - self.rhs, self.x_out, bcs=self.bcs) solver_name = self.field_name+self.__class__.__name__ - # If snes_type not specified by user, set this to ksp only to avoid outer Newton iteration - self.solver_parameters.setdefault('snes_type', 'ksponly') return NonlinearVariationalSolver(problem, solver_parameters=self.solver_parameters, options_prefix=solver_name) @@ -420,6 +506,7 @@ def apply(self, x_out, x_in): self.x0.assign(x_in) for i in range(self.ncycles): + self.subcycle_idx = i self.apply_cycle(self.x1, self.x0) self.x0.assign(self.x1) x_out.assign(self.x1) @@ -480,6 +567,7 @@ def rhs(self): return r.form + @wrapper_apply def apply(self, x_out, x_in): """ Apply the time discretisation to advance one whole time step. @@ -496,6 +584,8 @@ def apply(self, x_out, x_in): self.x_out.assign(x_in) self.x1.assign(x_in) + # Set initial solver guess + self.x_out.assign(x_in) self.solver.solve() x_out.assign(self.x_out) @@ -570,6 +660,7 @@ def rhs(self): return r.form + @wrapper_apply def apply(self, x_out, x_in): """ Apply the time discretisation to advance one whole time step. @@ -579,6 +670,8 @@ def apply(self, x_out, x_in): x_in (:class:`Function`): the input field. """ self.x1.assign(x_in) + # Set initial solver guess + self.x_out.assign(x_in) self.solver.solve() x_out.assign(self.x_out) @@ -613,7 +706,6 @@ def __init__(self, domain, field_name=None, solver_parameters=None, options=options) -# TODO: this should be implemented as an ImplicitRK class TR_BDF2(TimeDiscretisation): """ Implements the two stage implicit TR-BDF2 time stepping method, with a @@ -731,6 +823,7 @@ def solver_bdf2(self): return NonlinearVariationalSolver(problem, solver_parameters=self.solver_parameters, options_prefix=solver_name) + @wrapper_apply def apply(self, x_out, x_in): """ Apply the time discretisation to advance one whole time step. @@ -740,6 +833,12 @@ def apply(self, x_out, x_in): x_in (:class:`Function`): the input field(s). """ self.xn.assign(x_in) + + # Set initial solver guess + self.xnpg.assign(x_in) self.solver_tr.solve() + + # Set initial solver guess + self.x_out.assign(self.xnpg) self.solver_bdf2.solve() x_out.assign(self.x_out) diff --git a/gusto/time_discretisation/wrappers.py b/gusto/time_discretisation/wrappers.py index 8b1a5c4f1..2d388e998 100644 --- a/gusto/time_discretisation/wrappers.py +++ b/gusto/time_discretisation/wrappers.py @@ -11,8 +11,9 @@ ) from firedrake.fml import Term from gusto.core.configuration import EmbeddedDGOptions, RecoveryOptions, SUPGOptions -from gusto.recovery import Recoverer, ReversibleRecoverer +from gusto.recovery import Recoverer, ReversibleRecoverer, ConservativeRecoverer from gusto.core.labels import transporting_velocity +from gusto.core.conservative_projection import ConservativeProjector import ufl __all__ = ["EmbeddedDGWrapper", "RecoveryWrapper", "SUPGWrapper", "MixedFSWrapper"] @@ -34,6 +35,7 @@ def __init__(self, time_discretisation, wrapper_options): self.options = wrapper_options self.solver_parameters = None self.original_space = None + self.is_conservative = False @abstractmethod def setup(self, original_space): @@ -46,8 +48,8 @@ def setup(self, original_space): Args: original_space (:class:`FunctionSpace`): the space that the - prognostic variable is defined on. This is a subset space of - a mixed function space when using a MixedFSWrapper. + prognostic variable is defined on. This is a subset space of + a mixed function space when using a MixedFSWrapper. """ self.original_space = original_space @@ -85,8 +87,17 @@ class EmbeddedDGWrapper(Wrapper): the original space. """ - def setup(self, original_space): - """Sets up function spaces and fields needed for this wrapper.""" + def setup(self, original_space, post_apply_bcs): + """ + Sets up function spaces and fields needed for this wrapper. + + Args: + original_space (:class:`FunctionSpace`): the space that the + prognostic variable is defined on. + post_apply_bcs (list of :class:`DirichletBC`): list of Dirichlet + boundary condition objects to be passed to the projector used + in the post-apply step. + """ assert isinstance(self.options, EmbeddedDGOptions), \ 'Embedded DG wrapper can only be used with Embedded DG Options' @@ -114,6 +125,7 @@ def setup(self, original_space): self.x_in = Function(self.function_space) self.x_out = Function(self.function_space) + self.x_in_orig = Function(original_space) if self.time_discretisation.idx is None: self.x_projected = Function(self.original_space) @@ -121,9 +133,23 @@ def setup(self, original_space): self.x_projected = Function(equation.spaces[self.time_discretisation.idx]) if self.options.project_back_method == 'project': - self.x_out_projector = Projector(self.x_out, self.x_projected) + self.x_out_projector = Projector(self.x_out, self.x_projected, + bcs=post_apply_bcs) elif self.options.project_back_method == 'recover': self.x_out_projector = Recoverer(self.x_out, self.x_projected) + elif self.options.project_back_method == 'conservative_project': + self.is_conservative = True + self.rho_name = self.options.rho_name + self.rho_in_orig = Function(self.options.orig_rho_space) + self.rho_out_orig = Function(self.options.orig_rho_space) + self.rho_in_embedded = Function(self.function_space) + self.rho_out_embedded = Function(self.function_space) + self.x_in_projector = ConservativeProjector( + self.rho_in_orig, self.rho_in_embedded, + self.x_in_orig, self.x_in) + self.x_out_projector = ConservativeProjector( + self.rho_out_embedded, self.rho_out_orig, + self.x_out, self.x_projected, subtract_mean=True) else: raise NotImplementedError( 'EmbeddedDG Wrapper: project_back_method' @@ -142,10 +168,15 @@ def pre_apply(self, x_in): x_in (:class:`Function`): the original input field. """ - try: - self.x_in.interpolate(x_in) - except NotImplementedError: - self.x_in.project(x_in) + self.x_in_orig.assign(x_in) + + if self.is_conservative: + self.x_in_projector.project() + else: + try: + self.x_in.interpolate(x_in) + except NotImplementedError: + self.x_in.project(x_in) def post_apply(self, x_out): """ @@ -169,8 +200,17 @@ class RecoveryWrapper(Wrapper): field is then returned to the original space. """ - def setup(self, original_space): - """Sets up function spaces and fields needed for this wrapper.""" + def setup(self, original_space, post_apply_bcs): + """ + Sets up function spaces and fields needed for this wrapper. + + Args: + original_space (:class:`FunctionSpace`): the space that the + prognostic variable is defined on. + post_apply_bcs (list of :class:`DirichletBC`): list of Dirichlet + boundary condition objects to be passed to the projector used + in the post-apply step. + """ assert isinstance(self.options, RecoveryOptions), \ 'Recovery wrapper can only be used with Recovery Options' @@ -196,7 +236,7 @@ def setup(self, original_space): # Internal variables to be used # -------------------------------------------------------------------- # - self.x_in_tmp = Function(self.original_space) + self.x_in_orig = Function(self.original_space) self.x_in = Function(self.function_space) self.x_out = Function(self.function_space) @@ -206,17 +246,34 @@ def setup(self, original_space): self.x_projected = Function(equation.spaces[self.time_discretisation.idx]) # Operator to recover to higher discontinuous space - self.x_recoverer = ReversibleRecoverer(self.x_in_tmp, self.x_in, self.options) + if self.options.project_low_method == 'conservative_project': + self.is_conservative = True + self.rho_name = self.options.rho_name + self.rho_in_orig = Function(self.options.orig_rho_space) + self.rho_out_orig = Function(self.options.orig_rho_space) + self.rho_in_embedded = Function(self.function_space) + self.rho_out_embedded = Function(self.function_space) + self.x_recoverer = ConservativeRecoverer(self.x_in_orig, self.x_in, + self.rho_in_orig, + self.rho_in_embedded, + self.options) + else: + self.x_recoverer = ReversibleRecoverer(self.x_in_orig, self.x_in, self.options) # Operators for projecting back self.interp_back = (self.options.project_low_method == 'interpolate') if self.options.project_low_method == 'interpolate': self.x_out_projector = Interpolator(self.x_out, self.x_projected) elif self.options.project_low_method == 'project': - self.x_out_projector = Projector(self.x_out, self.x_projected) + self.x_out_projector = Projector(self.x_out, self.x_projected, + bcs=post_apply_bcs) elif self.options.project_low_method == 'recover': self.x_out_projector = Recoverer(self.x_out, self.x_projected, method=self.options.broken_method) + elif self.options.project_low_method == 'conservative_project': + self.x_out_projector = ConservativeProjector( + self.rho_out_embedded, self.rho_out_orig, + self.x_out, self.x_projected, subtract_mean=True) else: raise NotImplementedError( 'Recovery Wrapper: project_back_method' @@ -231,7 +288,7 @@ def pre_apply(self, x_in): x_in (:class:`Function`): the original input field. """ - self.x_in_tmp.assign(x_in) + self.x_in_orig.assign(x_in) self.x_recoverer.project() def post_apply(self, x_out): @@ -277,16 +334,22 @@ class SUPGWrapper(Wrapper): test function space that is used to solve the problem. """ - def setup(self): + def setup(self, field_name): """Sets up function spaces and fields needed for this wrapper.""" assert isinstance(self.options, SUPGOptions), \ 'SUPG wrapper can only be used with SUPG Options' domain = self.time_discretisation.domain + if self.options.suboptions is not None: + self.idx = self.time_discretisation.equation.field_names.index(field_name) + self.test_space = self.time_discretisation.equation.spaces[self.idx] + else: + self.idx = None + self.test_space = self.time_discretisation.fs self.function_space = self.time_discretisation.fs - self.test_space = self.function_space self.x_out = Function(self.function_space) + self.field_name = field_name # -------------------------------------------------------------------- # # Work out SUPG parameter @@ -303,10 +366,10 @@ def setup(self): default_vals = [self.options.default*self.time_discretisation.dt]*dim # check for directions is which the space is discontinuous # so that we don't apply supg in that direction - if is_cg(self.function_space): + if is_cg(self.test_space): vals = default_vals else: - space = self.function_space.ufl_element().sobolev_space + space = self.test_space.ufl_element().sobolev_space if space.name in ["HDiv", "DirectionalH"]: vals = [default_vals[i] if space[i].name == "H1" else 0. for i in range(dim)] @@ -324,8 +387,11 @@ def setup(self): # -------------------------------------------------------------------- # # Set up test function # -------------------------------------------------------------------- # + if self.options.suboptions is not None: + test = self.time_discretisation.equation.tests[self.idx] + else: + test = TestFunction(self.test_space) - test = TestFunction(self.test_space) uadv = Function(domain.spaces('HDiv')) self.test = test + dot(dot(uadv, self.tau), grad(test)) self.transporting_velocity = uadv @@ -396,6 +462,7 @@ def setup(self): self.function_space = MixedFunctionSpace(self.wrapper_spaces) self.x_in = Function(self.function_space) self.x_out = Function(self.function_space) + self.is_conservative = any([subwrapper.is_conservative for subwrapper in self.subwrappers.values()]) def pre_apply(self, x_in): """ @@ -410,6 +477,8 @@ def pre_apply(self, x_in): if field_name in self.subwrappers: subwrapper = self.subwrappers[field_name] + if subwrapper.is_conservative: + self.pre_update_rho(subwrapper) subwrapper.pre_apply(field) x_in_sub.assign(subwrapper.x_in) else: @@ -429,6 +498,34 @@ def post_apply(self, x_out): if field_name in self.subwrappers: subwrapper = self.subwrappers[field_name] subwrapper.x_out.assign(field) + if subwrapper.is_conservative: + self.post_update_rho(subwrapper) subwrapper.post_apply(x_out_sub) else: x_out_sub.assign(field) + + def pre_update_rho(self, subwrapper): + """ + Updates the stored density field for the pre-apply for the subwrapper. + + Args: + subwrapper (:class:`Wrapper`): the original input field. + """ + + rho_subwrapper = self.subwrappers[subwrapper.rho_name] + + subwrapper.rho_in_orig.assign(rho_subwrapper.x_in_orig) + subwrapper.rho_in_embedded.assign(rho_subwrapper.x_in) + + def post_update_rho(self, subwrapper): + """ + Updates the stored density field for the post-apply for the subwrapper. + + Args: + subwrapper (:class:`Wrapper`): the original input field. + """ + + rho_subwrapper = self.subwrappers[subwrapper.rho_name] + + subwrapper.rho_out_orig.assign(rho_subwrapper.x_projected) + subwrapper.rho_out_embedded.assign(rho_subwrapper.x_out) diff --git a/gusto/timestepping/semi_implicit_quasi_newton.py b/gusto/timestepping/semi_implicit_quasi_newton.py index 1e524a100..7c4de236b 100644 --- a/gusto/timestepping/semi_implicit_quasi_newton.py +++ b/gusto/timestepping/semi_implicit_quasi_newton.py @@ -3,8 +3,10 @@ and GungHo dynamical cores. """ -from firedrake import (Function, Constant, TrialFunctions, DirichletBC, - LinearVariationalProblem, LinearVariationalSolver) +from firedrake import ( + Function, Constant, TrialFunctions, DirichletBC, div, Interpolator, + LinearVariationalProblem, LinearVariationalSolver +) from firedrake.fml import drop, replace_subject from pyop2.profiling import timed_stage from gusto.core import TimeLevelFields, StateFields @@ -35,8 +37,8 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, diffusion_schemes=None, physics_schemes=None, slow_physics_schemes=None, fast_physics_schemes=None, alpha=Constant(0.5), off_centred_u=False, - num_outer=2, num_inner=2, accelerator=False): - + num_outer=2, num_inner=2, accelerator=False, + predictor=None, reference_update_freq=None): """ Args: equation_set (:class:`PrognosticEquationSet`): the prognostic @@ -84,13 +86,37 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, implicit forcing (pressure gradient and Coriolis) terms, and the linear solve. Defaults to 2. Note that default used by the Met Office's ENDGame and GungHo models is 2. - accelerator (bool, optional): Whether to zero non-wind implicit forcings - for transport terms in order to speed up solver convergence + accelerator (bool, optional): Whether to zero non-wind implicit + forcings for transport terms in order to speed up solver + convergence. Defaults to False. + predictor (str, optional): a single string corresponding to the name + of a variable to transport using the divergence predictor. This + pre-multiplies that variable by (1 - beta*dt*div(u)) before the + transport step, and calculates its transport increment from the + transport of this variable. This can improve the stability of + the time stepper at large time steps, when not using an + advective-then-flux formulation. This is only suitable for the + use on the conservative variable (e.g. depth or density). + Defaults to None, in which case no predictor is used. + reference_update_freq (float, optional): frequency with which to + update the reference profile with the n-th time level state + fields. This variable corresponds to time in seconds, and + setting this to zero will update the reference profiles every + time step. Setting it to None turns off the update, and + reference profiles will remain at their initial values. + Defaults to None. """ self.num_outer = num_outer self.num_inner = num_inner self.alpha = alpha + self.predictor = predictor + self.accelerator = accelerator + self.reference_update_freq = reference_update_freq + self.to_update_ref_profile = False + + # Flag for if we have simultaneous transport + self.simult = False # default is to not offcentre transporting velocity but if it # is offcentred then use the same value as alpha @@ -125,15 +151,30 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, self.transported_fields = [] for scheme in transport_schemes: assert scheme.nlevels == 1, "multilevel schemes not supported as part of this timestepping loop" - assert scheme.field_name in equation_set.field_names - self.active_transport.append((scheme.field_name, scheme)) - self.transported_fields.append(scheme.field_name) - # Check that there is a corresponding transport method - method_found = False - for method in spatial_methods: - if scheme.field_name == method.variable and method.term_label == transport: - method_found = True - assert method_found, f'No transport method found for variable {scheme.field_name}' + if isinstance(scheme.field_name, list): + # This means that multiple fields are being transported simultaneously + self.simult = True + for subfield in scheme.field_name: + assert subfield in equation_set.field_names + + # Check that there is a corresponding transport method for + # each field in the list + method_found = False + for method in spatial_methods: + if subfield == method.variable and method.term_label == transport: + method_found = True + assert method_found, f'No transport method found for variable {scheme.field_name}' + self.active_transport.append((scheme.field_name, scheme)) + else: + assert scheme.field_name in equation_set.field_names + + # Check that there is a corresponding transport method + method_found = False + for method in spatial_methods: + if scheme.field_name == method.variable and method.term_label == transport: + method_found = True + self.active_transport.append((scheme.field_name, scheme)) + assert method_found, f'No transport method found for variable {scheme.field_name}' self.diffusion_schemes = [] if diffusion_schemes is not None: @@ -188,7 +229,14 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, self.linear_solver = linear_solver self.forcing = Forcing(equation_set, self.alpha) self.bcs = equation_set.bcs - self.accelerator = accelerator + + if self.predictor is not None: + V_DG = equation_set.domain.spaces('DG') + self.predictor_field_in = Function(V_DG) + div_factor = Constant(1.0) - (Constant(1.0) - self.alpha)*self.dt*div(self.x.n('u')) + self.predictor_interpolator = Interpolator( + self.x.star(predictor)*div_factor, self.predictor_field_in + ) def _apply_bcs(self): """ @@ -210,7 +258,11 @@ def transporting_velocity(self): def setup_fields(self): """Sets up time levels n, star, p and np1""" self.x = TimeLevelFields(self.equation, 1) - self.x.add_fields(self.equation, levels=("star", "p", "after_slow", "after_fast")) + if self.simult is True: + # If there is any simultaneous transport, add an extra 'simult' field: + self.x.add_fields(self.equation, levels=("star", "p", "simult", "after_slow", "after_fast")) + else: + self.x.add_fields(self.equation, levels=("star", "p", "after_slow", "after_fast")) for aux_eqn, _ in self.auxiliary_equations_and_schemes: self.x.add_fields(aux_eqn) # Prescribed fields for auxiliary eqns should come from prognostics of @@ -252,6 +304,63 @@ def copy_active_tracers(self, x_in, x_out): for name in self.tracers_to_copy: x_out(name).assign(x_in(name)) + def transport_fields(self, outer, xstar, xp): + """ + Transports all fields in xstar with a transport scheme + and places the result in xp. + + Args: + outer (int): the outer loop iteration number + xstar (:class:`Fields`): the collection of state fields to be + transported. + xp (:class:`Fields`): the collection of state fields resulting from + the transport. + """ + for name, scheme in self.active_transport: + if isinstance(name, list): + # Transport multiple fields from xstar simultaneously. + # We transport the mixed function space from xstar to xsimult, then + # extract the updated fields and pass them to xp; this avoids overwriting + # any previously transported fields. + logger.info(f'Semi-implicit Quasi Newton: Transport {outer}: ' + + f'Simultaneous transport of {name}') + scheme.apply(self.x.simult(self.field_name), xstar(self.field_name)) + for field_name in name: + xp(field_name).assign(self.x.simult(field_name)) + else: + logger.info(f'Semi-implicit Quasi Newton: Transport {outer}: {name}') + # transports a single field from xstar and puts the result in xp + if name == self.predictor: + # Pre-multiply this variable by (1 - dt*beta*div(u)) + V = xstar(name).function_space() + field_out = Function(V) + self.predictor_interpolator.interpolate() + scheme.apply(field_out, self.predictor_field_in) + + # xp is xstar plus the increment from the transported predictor + xp(name).assign(xstar(name) + field_out - self.predictor_field_in) + else: + # Standard transport + scheme.apply(xp(name), xstar(name)) + + def update_reference_profiles(self): + """ + Updates the reference profiles and if required also updates them in the + linear solver. + """ + + if self.reference_update_freq is not None: + if float(self.t) + self.reference_update_freq > self.last_ref_update_time: + self.equation.X_ref.assign(self.x.n(self.field_name)) + self.last_ref_update_time = float(self.t) + if hasattr(self.linear_solver, 'update_reference_profiles'): + self.linear_solver.update_reference_profiles() + + elif self.to_update_ref_profile: + if hasattr(self.linear_solver, 'update_reference_profiles'): + self.linear_solver.update_reference_profiles() + self.to_update_ref_profile = False + def timestep(self): """Defines the timestep""" xn = self.x.n @@ -264,6 +373,10 @@ def timestep(self): xrhs_phys = self.xrhs_phys dy = self.dy + # Update reference profiles -------------------------------------------- + self.update_reference_profiles() + + # Slow physics --------------------------------------------------------- x_after_slow(self.field_name).assign(xn(self.field_name)) if len(self.slow_physics_schemes) > 0: with timed_stage("Slow physics"): @@ -271,6 +384,7 @@ def timestep(self): for _, scheme in self.slow_physics_schemes: scheme.apply(x_after_slow(scheme.field_name), x_after_slow(scheme.field_name)) + # Explict forcing ------------------------------------------------------ with timed_stage("Apply forcing terms"): logger.info('Semi-implicit Quasi Newton: Explicit forcing') # Put explicit forcing into xstar @@ -280,16 +394,16 @@ def timestep(self): # the correct values xp(self.field_name).assign(xstar(self.field_name)) + # OUTER ---------------------------------------------------------------- for outer in range(self.num_outer): + # Transport -------------------------------------------------------- with timed_stage("Transport"): self.io.log_courant(self.fields, 'transporting_velocity', message=f'transporting velocity, outer iteration {outer}') - for name, scheme in self.active_transport: - logger.info(f'Semi-implicit Quasi Newton: Transport {outer}: {name}') - # transports a field from xstar and puts result in xp - scheme.apply(xp(name), xstar(name)) + self.transport_fields(outer, xstar, xp) + # Fast physics ----------------------------------------------------- x_after_fast(self.field_name).assign(xp(self.field_name)) if len(self.fast_physics_schemes) > 0: with timed_stage("Fast physics"): @@ -302,8 +416,7 @@ def timestep(self): for inner in range(self.num_inner): - # TODO: this is where to update the reference state - + # Implicit forcing --------------------------------------------- with timed_stage("Apply forcing terms"): logger.info(f'Semi-implicit Quasi Newton: Implicit forcing {(outer, inner)}') self.forcing.apply(xp, xnp1, xrhs, "implicit") @@ -314,6 +427,7 @@ def timestep(self): xrhs -= xnp1(self.field_name) xrhs += xrhs_phys + # Linear solve ------------------------------------------------- with timed_stage("Implicit solve"): logger.info(f'Semi-implicit Quasi Newton: Mixed solve {(outer, inner)}') self.linear_solver.solve(xrhs, dy) # solves linear system and places result in dy @@ -353,10 +467,18 @@ def run(self, t, tmax, pick_up=False): pick_up: (bool): specify whether to pick_up from a previous run """ - if not pick_up: + if not pick_up and self.reference_update_freq is None: assert self.reference_profiles_initialised, \ 'Reference profiles for must be initialised to use Semi-Implicit Timestepper' + if not pick_up and self.reference_update_freq is not None: + # Force reference profiles to be updated on first time step + self.last_ref_update_time = float(t) - float(self.dt) + + elif not pick_up or (pick_up and self.reference_update_freq is None): + # Indicate that linear solver profile needs updating + self.to_update_ref_profile = True + super().run(t, tmax, pick_up=pick_up) @@ -440,11 +562,13 @@ def __init__(self, equation, alpha): # now we can set up the explicit and implicit problems explicit_forcing_problem = LinearVariationalProblem( - a.form, L_explicit.form, self.xF, bcs=bcs + a.form, L_explicit.form, self.xF, bcs=bcs, + constant_jacobian=True ) implicit_forcing_problem = LinearVariationalProblem( - a.form, L_implicit.form, self.xF, bcs=bcs + a.form, L_implicit.form, self.xF, bcs=bcs, + constant_jacobian=True ) self.solvers = {} diff --git a/gusto/timestepping/split_timestepper.py b/gusto/timestepping/split_timestepper.py index 21dd9db18..2006c73ac 100644 --- a/gusto/timestepping/split_timestepper.py +++ b/gusto/timestepping/split_timestepper.py @@ -1,13 +1,165 @@ """Split timestepping methods for generically solving terms separately.""" from firedrake import Projector -from firedrake.fml import Label +from firedrake.fml import Label, drop from pyop2.profiling import timed_stage +from gusto.core import TimeLevelFields, StateFields from gusto.core.labels import time_derivative, physics_label from gusto.time_discretisation.time_discretisation import ExplicitTimeDiscretisation -from gusto.timestepping.timestepper import Timestepper +from gusto.timestepping.timestepper import BaseTimestepper, Timestepper +from numpy import ones -__all__ = ["SplitPhysicsTimestepper", "SplitPrescribedTransport"] +__all__ = ["SplitTimestepper", "SplitPhysicsTimestepper", "SplitPrescribedTransport"] + + +class SplitTimestepper(BaseTimestepper): + """ + Implements a timeloop by applying separate schemes to different terms, e.g, physics + and individual dynamics components in a user-defined order. This allows a different + time discretisation to be applied to each defined component. Different terms can be + substepped by specifying weights; this enables Strang-Splitting to be applied. + """ + + def __init__(self, equation, term_splitting, dynamics_schemes, io, + weights=None, spatial_methods=None, physics_schemes=None): + """ + Args: + equation (:class:`PrognosticEquation`): the prognostic equation + term_splitting (list): a list of labels specifying the terms that should + be solved separately and the order to do so. + dynamics_schemes: (:class:`TimeDiscretisation`) A list of time + discretisations for the dynamics schemes. A scheme must be + provided for each non-physics label that is provided in the + term_splitting list. + io (:class:`IO`): the model's object for controlling input/output. + weights (array, optional): An array of weights for substepping + of any dynamics or physics scheme. The sum of weights for + each distinct label in term_splitting must be 1. + spatial_methods (iter,optional): a list of objects describing the + methods to use for discretising transport or diffusion terms + for each transported/diffused variable. Defaults to None, + in which case the terms follow the original discretisation in + the equation. + physics_schemes: (list, optional): a list of tuples of the form + (:class:`PhysicsParametrisation`, :class:`TimeDiscretisation`), + pairing physics parametrisations and timestepping schemes to use + for each. Timestepping schemes for physics must be explicit. + Defaults to None. + """ + + if spatial_methods is not None: + self.spatial_methods = spatial_methods + else: + self.spatial_methods = [] + + # If we have physics schemes, extract these. + if 'physics' in term_splitting: + if physics_schemes is None: + raise ValueError('Physics schemes need to be specified when applying ' + + 'a physics splitting in the SplitTimestepper') + else: + # Check that the weights are correct for physics: + count = 0 + if weights is not None: + for idx, term in enumerate(term_splitting): + if term == 'physics': + count += weights[idx] + if count != 1: + raise ValueError('Incorrect weights are specified for the ' + + 'physics schemes in the split timestepper.') + self.physics_schemes = physics_schemes + else: + self.physics_schemes = [] + + for parametrisation, phys_scheme in self.physics_schemes: + # Check that the supplied schemes for physics are valid + if hasattr(parametrisation, "explicit_only") and parametrisation.explicit_only: + assert isinstance(phys_scheme, ExplicitTimeDiscretisation), \ + ("Only explicit time discretisations can be used with " + + f"physics scheme {parametrisation.label.label}") + + self.term_splitting = term_splitting + self.dynamics_schemes = dynamics_schemes + + if weights is not None: + self.weights = weights + else: + self.weights = ones(len(self.term_splitting)) + + # Check that each dynamics label in term_splitting has a corresponding + # dynamics scheme + for term in self.term_splitting: + if term != 'physics': + assert term in self.dynamics_schemes, \ + f'The {term} terms do not have a specified scheme in the split timestepper' + + # Multilevel schemes are currently not supported for the dynamics terms. + for label, scheme in self.dynamics_schemes.items(): + assert scheme.nlevels == 1, \ + "Multilevel schemes are not currently implemented in the split timestepper" + + # As we handle physics in separate parametrisations, these are not + # passed to the super __init__ + super().__init__(equation, io) + + # Check that each dynamics term is specified by a label + # in the term_splitting list, but also that there are not + # multiple labels, i.e. there is a single specified time discretisation. + # When using weights, these should add to 1 for each term. + terms = self.equation.residual.label_map( + lambda t: any(t.has_label(time_derivative, physics_label)), map_if_true=drop + ) + for term in terms: + count = 0 + for idx, label in enumerate(self.term_splitting): + if term.has_label(Label(label)): + count += self.weights[idx] + if count != 1: + raise ValueError('The term_splitting list does not correctly cover ' + + 'the dynamics terms in the equation(s).') + + # Timesteps for each scheme in the term_splitting list + self.split_dts = [self.equation.domain.dt*weight for weight in self.weights] + + @property + def transporting_velocity(self): + return self.fields('u') + + def setup_fields(self): + self.x = TimeLevelFields(self.equation, 1) + self.fields = StateFields(self.x, self.equation.prescribed_fields, + *self.io.output.dumplist) + + def setup_scheme(self): + """Sets up transport, diffusion and physics schemes""" + # TODO: apply_bcs should be False for advection but this means + # tests with KGOs fail + self.setup_equation(self.equation) + + apply_bcs = True + for label, scheme in self.dynamics_schemes.items(): + scheme.setup(self.equation, apply_bcs, Label(label)) + self.setup_transporting_velocity(scheme) + if self.io.output.log_courant and label == 'transport': + scheme.courant_max = self.io.courant_max + + apply_bcs = False + for parametrisation, scheme in self.physics_schemes: + scheme.setup(self.equation, apply_bcs, parametrisation.label) + + def timestep(self): + + for idx, term in enumerate(self.term_splitting): + split_dt = self.split_dts[idx] + if term == 'physics': + with timed_stage("Physics"): + for _, scheme in self.physics_schemes: + scheme.dt = split_dt + scheme.apply(self.x.np1(scheme.field_name), self.x.np1(scheme.field_name)) + else: + scheme = self.dynamics_schemes[term] + scheme.dt = split_dt + scheme.apply(self.x.np1(scheme.field_name), self.x.np1(scheme.field_name)) class SplitPhysicsTimestepper(Timestepper): diff --git a/gusto/timestepping/timestepper.py b/gusto/timestepping/timestepper.py index 5aec7e9b4..5c906429b 100644 --- a/gusto/timestepping/timestepper.py +++ b/gusto/timestepping/timestepper.py @@ -2,10 +2,11 @@ from abc import ABCMeta, abstractmethod, abstractproperty from firedrake import Function, Projector, split -from firedrake.fml import drop, Term, all_terms +from firedrake.fml import drop, Term, all_terms, LabelledForm from pyop2.profiling import timed_stage from gusto.equations import PrognosticEquationSet from gusto.core import TimeLevelFields, StateFields +from gusto.core.io import TimeData from gusto.core.labels import transport, diffusion, prognostic, transporting_velocity from gusto.core.logging import logger from gusto.time_discretisation.time_discretisation import ExplicitTimeDiscretisation @@ -31,6 +32,7 @@ def __init__(self, equation, io): self.dt = self.equation.domain.dt self.t = self.equation.domain.t self.reference_profiles_initialised = False + self.last_ref_update_time = None self.setup_fields() self.setup_scheme() @@ -167,6 +169,25 @@ def setup_transporting_velocity(self, scheme): scheme.residual = transporting_velocity.update_value(scheme.residual, uadv) + # Now also replace transporting velocity in the terms that are + # contained in labels + for idx, t in enumerate(scheme.residual.terms): + if t.has_label(transporting_velocity): + for label in t.labels.keys(): + if type(t.labels[label]) is LabelledForm: + t.labels[label] = t.labels[label].label_map( + lambda s: s.has_label(transporting_velocity), + map_if_true=lambda s: + Term(ufl.replace( + s.form, + {s.get(transporting_velocity): uadv}), + s.labels + ) + ) + + scheme.residual.terms[idx].labels[label] = \ + transporting_velocity.update_value(t.labels[label], uadv) + def log_timestep(self): """ Logs the start of a time step. @@ -197,9 +218,14 @@ def run(self, t, tmax, pick_up=False): if pick_up: # Pick up fields, and return other info to be picked up - t, reference_profiles, self.step, initial_timesteps = self.io.pick_up_from_checkpoint(self.fields) - self.set_reference_profiles(reference_profiles) + time_data, reference_profiles = self.io.pick_up_from_checkpoint(self.fields) + t = time_data.t + self.step = time_data.step + initial_timesteps = time_data.initial_steps + last_ref_update_time = time_data.last_ref_update_time + self.set_reference_profiles(reference_profiles, last_ref_update_time) self.set_initial_timesteps(initial_timesteps) + else: self.step = 1 @@ -227,14 +253,19 @@ def run(self, t, tmax, pick_up=False): self.step += 1 with timed_stage("Dump output"): - self.io.dump(self.fields, float(self.t), self.step, self.get_initial_timesteps()) + time_data = TimeData( + t=float(self.t), step=self.step, + initial_steps=self.get_initial_timesteps(), + last_ref_update_time=self.last_ref_update_time + ) + self.io.dump(self.fields, time_data) if self.io.output.checkpoint and self.io.output.checkpoint_method == 'dumbcheckpoint': self.io.chkpt.close() logger.info(f'TIMELOOP complete. t={float(self.t):.5f}, {tmax=:.5f}') - def set_reference_profiles(self, reference_profiles): + def set_reference_profiles(self, reference_profiles, last_ref_update_time=None): """ Initialise the model's reference profiles. @@ -242,6 +273,8 @@ def set_reference_profiles(self, reference_profiles): where 'field_name' is the string giving the name of the reference profile field expr is the :class:`ufl.Expr` whose value is used to set the reference field. + last_ref_update_time (float, optional): the last time that the reference + profiles were updated. Defaults to None. """ for field_name, profile in reference_profiles: if field_name+'_bar' in self.fields: @@ -271,6 +304,8 @@ def set_reference_profiles(self, reference_profiles): # Don't need to do anything else as value in field container has already been set self.reference_profiles_initialised = True + self.last_ref_update_time = last_ref_update_time + class Timestepper(BaseTimestepper): """ diff --git a/integration-tests/data/simult_SIQN_order0_chkpt.h5 b/integration-tests/data/simult_SIQN_order0_chkpt.h5 new file mode 100644 index 000000000..0d1367606 Binary files /dev/null and b/integration-tests/data/simult_SIQN_order0_chkpt.h5 differ diff --git a/integration-tests/data/simult_SIQN_order1_chkpt.h5 b/integration-tests/data/simult_SIQN_order1_chkpt.h5 new file mode 100644 index 000000000..0309870ea Binary files /dev/null and b/integration-tests/data/simult_SIQN_order1_chkpt.h5 differ diff --git a/integration-tests/equations/test_boussinesq.py b/integration-tests/equations/test_boussinesq.py index 4d90e0d67..f2cea3175 100644 --- a/integration-tests/equations/test_boussinesq.py +++ b/integration-tests/equations/test_boussinesq.py @@ -128,6 +128,8 @@ def test_boussinesq(tmpdir, compressible): diff_array = new_variable.dat.data - check_variable.dat.data error = np.linalg.norm(diff_array) / np.linalg.norm(check_variable.dat.data) + test_type = 'compressible' if compressible else 'incompressible' + # Slack values chosen to be robust to different platforms assert error < 1e-10, f'Values for {variable} in ' + \ - 'Incompressible test do not match KGO values' + f'{test_type} test do not match KGO values' diff --git a/integration-tests/equations/test_coupled_transport.py b/integration-tests/equations/test_coupled_transport.py index 6103d008c..7923f0085 100644 --- a/integration-tests/equations/test_coupled_transport.py +++ b/integration-tests/equations/test_coupled_transport.py @@ -98,9 +98,14 @@ def test_conservative_coupled_transport(tmpdir, m_X_space, tracer_setup): 'f2': EmbeddedDGOptions()} opts = MixedFSOptions(suboptions=suboptions) - transport_scheme = SSPRK3(domain, options=opts, increment_form=False) + transport_scheme = SSPRK3( + domain, options=opts, + rk_formulation=RungeKuttaFormulation.predictor + ) else: - transport_scheme = SSPRK3(domain, increment_form=False) + transport_scheme = SSPRK3( + domain, rk_formulation=RungeKuttaFormulation.predictor + ) transport_method = [DGUpwind(eqn, 'f1'), DGUpwind(eqn, 'f2')] diff --git a/integration-tests/model/test_checkpointing.py b/integration-tests/model/test_checkpointing.py index 28549a66f..dc72fb1de 100644 --- a/integration-tests/model/test_checkpointing.py +++ b/integration-tests/model/test_checkpointing.py @@ -11,7 +11,7 @@ import pytest -def set_up_model_objects(mesh, dt, output, stepper_type): +def set_up_model_objects(mesh, dt, output, stepper_type, ref_update_freq): domain = Domain(mesh, dt, "CG", 1) @@ -40,7 +40,8 @@ def set_up_model_objects(mesh, dt, output, stepper_type): # build time stepper stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver) + linear_solver=linear_solver, + reference_update_freq=ref_update_freq) elif stepper_type == 'multi_level': scheme = AdamsBashforth(domain, order=2) @@ -92,9 +93,10 @@ def initialise_fields(eqns, stepper): stepper.set_reference_profiles([('rho', rho_b), ('theta', theta_b)]) -@pytest.mark.parametrize("stepper_type", ["multi_level", "semi_implicit"]) +@pytest.mark.parametrize("stepper_type, ref_update_freq", [ + ("multi_level", None), ("semi_implicit", None), ("semi_implicit", 0.6)]) @pytest.mark.parametrize("checkpoint_method", ["dumbcheckpoint", "checkpointfile"]) -def test_checkpointing(tmpdir, stepper_type, checkpoint_method): +def test_checkpointing(tmpdir, stepper_type, checkpoint_method, ref_update_freq): mesh_name = 'checkpointing_mesh' @@ -128,8 +130,8 @@ def test_checkpointing(tmpdir, stepper_type, checkpoint_method): chkptfreq=2, ) - stepper_1, eqns_1 = set_up_model_objects(mesh, dt, output_1, stepper_type) - stepper_2, eqns_2 = set_up_model_objects(mesh, dt, output_2, stepper_type) + stepper_1, eqns_1 = set_up_model_objects(mesh, dt, output_1, stepper_type, ref_update_freq) + stepper_2, eqns_2 = set_up_model_objects(mesh, dt, output_2, stepper_type, ref_update_freq) initialise_fields(eqns_1, stepper_1) initialise_fields(eqns_2, stepper_2) @@ -163,7 +165,7 @@ def test_checkpointing(tmpdir, stepper_type, checkpoint_method): if checkpoint_method == 'checkpointfile': mesh = pick_up_mesh(output_3, mesh_name) - stepper_3, _ = set_up_model_objects(mesh, dt, output_3, stepper_type) + stepper_3, _ = set_up_model_objects(mesh, dt, output_3, stepper_type, ref_update_freq) stepper_3.io.pick_up_from_checkpoint(stepper_3.fields) # ------------------------------------------------------------------------ # @@ -192,7 +194,7 @@ def test_checkpointing(tmpdir, stepper_type, checkpoint_method): ) if checkpoint_method == 'checkpointfile': mesh = pick_up_mesh(output_3, mesh_name) - stepper_3, _ = set_up_model_objects(mesh, dt, output_3, stepper_type) + stepper_3, _ = set_up_model_objects(mesh, dt, output_3, stepper_type, ref_update_freq) stepper_3.run(t=2*dt, tmax=4*dt, pick_up=True) # ------------------------------------------------------------------------ # diff --git a/integration-tests/model/test_conservative_transport_with_physics.py b/integration-tests/model/test_conservative_transport_with_physics.py index 67860f3bc..1833e2216 100644 --- a/integration-tests/model/test_conservative_transport_with_physics.py +++ b/integration-tests/model/test_conservative_transport_with_physics.py @@ -60,8 +60,10 @@ def run_conservative_transport_with_physics(dirname): # Time stepper time_varying_velocity = False stepper = SplitPrescribedTransport( - eqn, SSPRK3(domain, increment_form=False), io, time_varying_velocity, - transport_method, physics_schemes=physics_schemes) + eqn, SSPRK3(domain, rk_formulation=RungeKuttaFormulation.predictor), + io, time_varying_velocity, transport_method, + physics_schemes=physics_schemes + ) # ------------------------------------------------------------------------ # # Initial conditions diff --git a/integration-tests/model/test_simultaneous_SIQN.py b/integration-tests/model/test_simultaneous_SIQN.py new file mode 100644 index 000000000..ea3a12327 --- /dev/null +++ b/integration-tests/model/test_simultaneous_SIQN.py @@ -0,0 +1,263 @@ +""" +This tests the use of simultaneous transport with the SIQN +timestepping method. A few timesteps are taken with the Bryan-Fritsch +bubble test case, which solves the Compressible Euler Equations. +The two tracers of water vapour and cloud water are being tranpsported +conservatively, which means they need to be transported simultaneously +with the density. +Degree 0 and 1 configurations are tested to ensure that the simultaneous +transport is working with the different wrappers. +""" + +from os.path import join, abspath, dirname +from firedrake import ( + PeriodicIntervalMesh, ExtrudedMesh, SpatialCoordinate, conditional, cos, pi, + sqrt, NonlinearVariationalProblem, NonlinearVariationalSolver, TestFunction, + dx, TrialFunction, Function, as_vector, LinearVariationalProblem, + LinearVariationalSolver, Constant, BrokenElement +) +from gusto import * +import pytest + + +def run_simult_SIQN(tmpdir, order): + + if order == 0: + ncolumns = 20 + nlayers = 20 + u_eqn_type = "vector_advection_form" + else: + ncolumns = 10 + nlayers = 10 + u_eqn_type = "vector_invariant_form" + + dt = 2.0 + tmax = 10.0 + + domain_width = 10000. # domain width, in m + domain_height = 10000. # domain height, in m + zc = 2000. # vertical centre of bubble, in m + rc = 2000. # radius of bubble, in m + Tdash = 2.0 # strength of temperature perturbation, in K + Tsurf = 320.0 # background theta_e value, in K + total_water = 0.02 # total moisture mixing ratio, in kg/kg + + # Domain + mesh_name = 'bryan_fritsch_mesh' + base_mesh = PeriodicIntervalMesh(ncolumns, domain_width) + mesh = ExtrudedMesh( + base_mesh, layers=nlayers, layer_height=domain_height/nlayers, name=mesh_name + ) + domain = Domain(mesh, dt, 'CG', order) + + # Set up the tracers and their transport schemes + V_rho = domain.spaces('DG') + V_theta = domain.spaces('theta') + + tracers = [WaterVapour(space='theta', + transport_eqn=TransportEquationType.tracer_conservative, + density_name='rho'), + CloudWater(space='theta', + transport_eqn=TransportEquationType.tracer_conservative, + density_name='rho')] + + # Equation + params = CompressibleParameters() + eqns = CompressibleEulerEquations( + domain, params, active_tracers=tracers, u_transport_option=u_eqn_type + ) + + # I/O + output_dirname = tmpdir+"/simult_SIQN_order"+str(order) + + output = OutputParameters( + dirname=output_dirname, dumpfreq=5, chkptfreq=5, checkpoint=True + ) + io = IO(domain, output) + + # Set up transport schemes + if order == 0: + VDG1 = domain.spaces("DG1_equispaced") + VCG1 = FunctionSpace(mesh, "CG", 1) + Vu_DG1 = VectorFunctionSpace(mesh, VDG1.ufl_element()) + Vu_CG1 = VectorFunctionSpace(mesh, "CG", 1) + + u_opts = RecoveryOptions(embedding_space=Vu_DG1, + recovered_space=Vu_CG1, + boundary_method=BoundaryMethod.taylor) + theta_opts = RecoveryOptions(embedding_space=VDG1, + recovered_space=VCG1) + + suboptions = {'rho': RecoveryOptions(embedding_space=VDG1, + recovered_space=VCG1, + boundary_method=BoundaryMethod.taylor), + 'water_vapour': ConservativeRecoveryOptions(embedding_space=VDG1, + recovered_space=VCG1, + rho_name="rho", + orig_rho_space=V_rho), + 'cloud_water': ConservativeRecoveryOptions(embedding_space=VDG1, + recovered_space=VCG1, + rho_name="rho", + orig_rho_space=V_rho)} + else: + theta_opts = EmbeddedDGOptions() + Vt_brok = FunctionSpace(mesh, BrokenElement(V_theta.ufl_element())) + suboptions = {'rho': EmbeddedDGOptions(embedding_space=Vt_brok), + 'water_vapour': ConservativeEmbeddedDGOptions(embedding_space=Vt_brok, + rho_name="rho", + orig_rho_space=V_rho), + 'cloud_water': ConservativeEmbeddedDGOptions(embedding_space=Vt_brok, + rho_name="rho", + orig_rho_space=V_rho)} + + transported_fields = [SSPRK3(domain, "theta", options=theta_opts)] + + mixed_opts = MixedFSOptions(suboptions=suboptions) + transported_fields.append(SSPRK3(domain, ["rho", "water_vapour", "cloud_water"], options=mixed_opts, rk_formulation=RungeKuttaFormulation.predictor)) + + if order == 0: + transported_fields.append(SSPRK3(domain, 'u', options=u_opts)) + else: + transported_fields.append(TrapeziumRule(domain, 'u')) + + transport_methods = [ + DGUpwind(eqns, field) for field in + ["u", "rho", "theta", "water_vapour", "cloud_water"] + ] + + # Linear solver + linear_solver = CompressibleSolver(eqns) + + # Physics schemes (condensation/evaporation) + physics_schemes = [(SaturationAdjustment(eqns), ForwardEuler(domain))] + + # Time stepper + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + linear_solver=linear_solver, physics_schemes=physics_schemes + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + rho0 = stepper.fields("rho") + theta0 = stepper.fields("theta") + water_v0 = stepper.fields("water_vapour") + water_c0 = stepper.fields("cloud_water") + + # spaces + Vt = domain.spaces("theta") + Vr = domain.spaces("DG") + x, z = SpatialCoordinate(mesh) + quadrature_degree = (4, 4) + dxp = dx(degree=(quadrature_degree)) + + # Define constant theta_e and water_t + theta_e = Function(Vt).assign(Tsurf) + water_t = Function(Vt).assign(total_water) + + # Calculate hydrostatic fields + saturated_hydrostatic_balance(eqns, stepper.fields, theta_e, water_t) + + # make mean fields + theta_b = Function(Vt).assign(theta0) + rho_b = Function(Vr).assign(rho0) + water_vb = Function(Vt).assign(water_v0) + water_cb = Function(Vt).assign(water_t - water_vb) + + # define perturbation + xc = domain_width / 2 + r = sqrt((x - xc) ** 2 + (z - zc) ** 2) + theta_pert = Function(Vt).interpolate( + conditional( + r > rc, + 0.0, + Tdash * (cos(pi * r / (2.0 * rc))) ** 2 + ) + ) + + # define initial theta + theta0.interpolate(theta_b * (theta_pert / 300.0 + 1.0)) + + # find perturbed rho + gamma = TestFunction(Vr) + rho_trial = TrialFunction(Vr) + a = gamma * rho_trial * dxp + L = gamma * (rho_b * theta_b / theta0) * dxp + rho_problem = LinearVariationalProblem(a, L, rho0) + rho_solver = LinearVariationalSolver(rho_problem) + rho_solver.solve() + + # find perturbed water_v + w_v = Function(Vt) + phi = TestFunction(Vt) + rho_averaged = Function(Vt) + rho_recoverer = Recoverer(rho0, rho_averaged) + rho_recoverer.project() + + exner = thermodynamics.exner_pressure(eqns.parameters, rho_averaged, theta0) + p = thermodynamics.p(eqns.parameters, exner) + T = thermodynamics.T(eqns.parameters, theta0, exner, r_v=w_v) + w_sat = thermodynamics.r_sat(eqns.parameters, T, p) + + w_functional = (phi * w_v * dxp - phi * w_sat * dxp) + w_problem = NonlinearVariationalProblem(w_functional, w_v) + w_solver = NonlinearVariationalSolver(w_problem) + w_solver.solve() + + water_v0.assign(w_v) + water_c0.assign(water_t - water_v0) + + # wind initially zero + u0.project(as_vector( + [Constant(0.0, domain=mesh), Constant(0.0, domain=mesh)] + )) + + stepper.set_reference_profiles( + [ + ('rho', rho_b), + ('theta', theta_b), + ('water_vapour', water_vb), + ('cloud_water', water_cb) + ] + ) + + # --------------------------------------------------------------------- # + # Run + # --------------------------------------------------------------------- # + + stepper.run(t=0, tmax=tmax) + + # State for checking checkpoints + checkpoint_name = 'simult_SIQN_order'+str(order)+'_chkpt.h5' + new_path = join(abspath(dirname(__file__)), '..', f'data/{checkpoint_name}') + check_output = OutputParameters(dirname=output_dirname, + checkpoint_pickup_filename=new_path, + checkpoint=True) + check_mesh = pick_up_mesh(check_output, mesh_name) + check_domain = Domain(check_mesh, dt, "CG", order) + check_eqn = CompressibleEulerEquations(check_domain, params, active_tracers=tracers, u_transport_option=u_eqn_type) + check_io = IO(check_domain, check_output) + check_stepper = SemiImplicitQuasiNewton(check_eqn, check_io, [], []) + check_stepper.io.pick_up_from_checkpoint(check_stepper.fields) + + return stepper, check_stepper + + +@pytest.mark.parametrize("order", [0, 1]) +def test_simult_SIQN(tmpdir, order): + + dirname = str(tmpdir) + stepper, check_stepper = run_simult_SIQN(dirname, order) + + for variable in ['u', 'rho', 'theta', 'water_vapour', 'cloud_water']: + new_variable = stepper.fields(variable) + check_variable = check_stepper.fields(variable) + diff_array = new_variable.dat.data - check_variable.dat.data + error = np.linalg.norm(diff_array) / np.linalg.norm(check_variable.dat.data) + + # Slack values chosen to be robust to different platforms + assert error < 1e-10, f'Values for {variable} in the ' + \ + f'order {order} elements test do not match KGO values' diff --git a/integration-tests/model/test_split_timestepper.py b/integration-tests/model/test_split_timestepper.py new file mode 100644 index 000000000..5f675cfd6 --- /dev/null +++ b/integration-tests/model/test_split_timestepper.py @@ -0,0 +1,127 @@ +""" +This script tests the split_timestepper using an advection-diffusion +equation with a physics parametrisation. Three different splittings are +tested, including splitting the dynamics and physics into two substeps +with different timestep sizes. +""" + +from firedrake import (SpatialCoordinate, PeriodicIntervalMesh, exp, as_vector, + norm, Constant, conditional, sqrt, VectorFunctionSpace) +from gusto import * +import pytest + + +def run_split_timestepper_adv_diff_physics(tmpdir, timestepper): + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + dt = 0.02 + tmax = 1.0 + L = 10 + mesh = PeriodicIntervalMesh(20, L) + domain = Domain(mesh, dt, "CG", 1) + + # Equation + diffusion_params = DiffusionParameters(kappa=0.75, mu=5) + V = domain.spaces("DG") + Vu = VectorFunctionSpace(mesh, "CG", 1) + + equation = AdvectionDiffusionEquation(domain, V, "f", Vu=Vu, + diffusion_parameters=diffusion_params) + spatial_methods = [DGUpwind(equation, "f"), + InteriorPenaltyDiffusion(equation, "f", diffusion_params)] + + x = SpatialCoordinate(mesh) + + # Add a source term to inject mass into the domain. + # Without the diffusion, this would simply add 0.1 + # units of mass equally across the domain. + source_expression = -Constant(0.1) + + physics_schemes = [(SourceSink(equation, "f", source_expression), SSPRK3(domain))] + + # I/O + output = OutputParameters(dirname=str(tmpdir), dumpfreq=25) + io = IO(domain, output) + + # Time stepper + if timestepper == 'split1': + # Split with no defined weights + dynamics_schemes = {'transport': ImplicitMidpoint(domain), + 'diffusion': ForwardEuler(domain)} + term_splitting = ['transport', 'diffusion', 'physics'] + stepper = SplitTimestepper(equation, term_splitting, dynamics_schemes, + io, spatial_methods=spatial_methods, + physics_schemes=physics_schemes) + elif timestepper == 'split2': + # Transport split into two substeps + dynamics_schemes = {'transport': SSPRK3(domain), + 'diffusion': ForwardEuler(domain)} + term_splitting = ['diffusion', 'transport', 'physics', 'transport'] + weights = [1., 0.6, 1., 0.4] + stepper = SplitTimestepper(equation, term_splitting, dynamics_schemes, + io, weights=weights, spatial_methods=spatial_methods, + physics_schemes=physics_schemes) + else: + # Physics split into two substeps + dynamics_schemes = {'transport': SSPRK3(domain), + 'diffusion': SSPRK3(domain)} + term_splitting = ['physics', 'transport', 'diffusion', 'physics'] + weights = [1./3., 1., 1., 2./3.] + stepper = SplitTimestepper(equation, term_splitting, dynamics_schemes, + io, weights=weights, spatial_methods=spatial_methods, + physics_schemes=physics_schemes) + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + xc_init = 0.25*L + xc_end = 0.75*L + umax = 0.5*L/tmax + + # Get minimum distance on periodic interval to xc + x_init = conditional(sqrt((x[0] - xc_init)**2) < 0.5*L, + x[0] - xc_init, L + x[0] - xc_init) + + x_end = conditional(sqrt((x[0] - xc_end)**2) < 0.5*L, + x[0] - xc_end, L + x[0] - xc_end) + + f_init = 5.0 + f_end = f_init / 2.0 + f_width_init = L / 10.0 + f_width_end = f_width_init * 2.0 + f_init_expr = f_init*exp(-(x_init / f_width_init)**2) + + # The end Gaussian should be advected by half the domain + # length, be more spread out due to the dissipation, + # and includes more mass due to the source term. + f_end_expr = 0.1 + f_end*exp(-(x_end / f_width_end)**2) + + stepper.fields('f').interpolate(f_init_expr) + stepper.fields('u').interpolate(as_vector([Constant(umax)])) + f_end = stepper.fields('f_end', space=V) + f_end.interpolate(f_end_expr) + + # ------------------------------------------------------------------------ # + # Run + # ------------------------------------------------------------------------ # + + stepper.run(0, tmax=tmax) + + error = norm(stepper.fields('f') - f_end) / norm(f_end) + + return error + + +@pytest.mark.parametrize("timestepper", ["split1", "split2", "split3"]) +def test_split_timestepper_adv_diff_physics(tmpdir, timestepper): + + tol = 0.015 + error = run_split_timestepper_adv_diff_physics(tmpdir, timestepper) + print(error) + assert error < tol, 'The split timestepper in the advection-diffusion' + \ + 'equation with source physics has an error greater than ' + \ + 'the permitted tolerance' diff --git a/integration-tests/model/test_time_discretisation.py b/integration-tests/model/test_time_discretisation.py index 5146107b2..6d484bfd1 100644 --- a/integration-tests/model/test_time_discretisation.py +++ b/integration-tests/model/test_time_discretisation.py @@ -9,9 +9,12 @@ def run(timestepper, tmax, f_end): @pytest.mark.parametrize( - "scheme", ["ssprk3_increment", "TrapeziumRule", "ImplicitMidpoint", - "QinZhang", "RK4", "Heun", "BDF2", "TR_BDF2", "AdamsBashforth", - "Leapfrog", "AdamsMoulton", "AdamsMoulton", "ssprk3_predictor"]) + "scheme", [ + "ssprk3_increment", "TrapeziumRule", "ImplicitMidpoint", "QinZhang", + "RK4", "Heun", "BDF2", "TR_BDF2", "AdamsBashforth", "Leapfrog", + "AdamsMoulton", "AdamsMoulton", "ssprk3_predictor", "ssprk3_linear" + ] +) def test_time_discretisation(tmpdir, scheme, tracer_setup): if (scheme == "AdamsBashforth"): # Tighter stability constraints @@ -28,9 +31,11 @@ def test_time_discretisation(tmpdir, scheme, tracer_setup): eqn = AdvectionEquation(domain, V, "f") if scheme == "ssprk3_increment": - transport_scheme = SSPRK3(domain, increment_form=True) + transport_scheme = SSPRK3(domain, rk_formulation=RungeKuttaFormulation.increment) elif scheme == "ssprk3_predictor": - transport_scheme = SSPRK3(domain, increment_form=False) + transport_scheme = SSPRK3(domain, rk_formulation=RungeKuttaFormulation.predictor) + elif scheme == "ssprk3_linear": + transport_scheme = SSPRK3(domain, rk_formulation=RungeKuttaFormulation.linear) elif scheme == "TrapeziumRule": transport_scheme = TrapeziumRule(domain) elif scheme == "ImplicitMidpoint": diff --git a/integration-tests/physics/test_sw_saturation_adjustment.py b/integration-tests/physics/test_sw_saturation_adjustment.py index 3bec4bb0f..66c971519 100644 --- a/integration-tests/physics/test_sw_saturation_adjustment.py +++ b/integration-tests/physics/test_sw_saturation_adjustment.py @@ -88,13 +88,12 @@ def run_sw_cond_evap(dirname, process): v_true = Function(v0.function_space()).interpolate(sat*(0.96+0.005*pert)) c_true = Function(c0.function_space()).interpolate(Constant(0.0)) # gain buoyancy - factor = parameters.g*beta2 sat_adj_expr = (v0 - sat) / dt sat_adj_expr = conditional(sat_adj_expr < 0, max_value(sat_adj_expr, -c0 / dt), min_value(sat_adj_expr, v0 / dt)) # include factor of -1 in true solution to compare term to LHS in Gusto - b_true = Function(b0.function_space()).interpolate(-dt*sat_adj_expr*factor) + b_true = Function(b0.function_space()).interpolate(-dt*sat_adj_expr*beta2) elif process == "condensation": # vapour is above saturation @@ -103,13 +102,12 @@ def run_sw_cond_evap(dirname, process): v_true = Function(v0.function_space()).interpolate(Constant(sat)) c_true = Function(c0.function_space()).interpolate(v0 - sat) # lose buoyancy - factor = parameters.g*beta2 sat_adj_expr = (v0 - sat) / dt sat_adj_expr = conditional(sat_adj_expr < 0, max_value(sat_adj_expr, -c0 / dt), min_value(sat_adj_expr, v0 / dt)) # include factor of -1 in true solution to compare term to LHS in Gusto - b_true = Function(b0.function_space()).interpolate(-dt*sat_adj_expr*factor) + b_true = Function(b0.function_space()).interpolate(-dt*sat_adj_expr*beta2) c_init = Function(c0.function_space()).interpolate(c0) diff --git a/integration-tests/transport/test_advective_then_flux.py b/integration-tests/transport/test_advective_then_flux.py new file mode 100644 index 000000000..0e92938b7 --- /dev/null +++ b/integration-tests/transport/test_advective_then_flux.py @@ -0,0 +1,109 @@ +""" +Tests transport using a Runge-Kutta scheme with an advective-then-flux approach. +This should yield increments that are linear in the divergence (and thus +preserve a constant in divergence-free flow). +""" + +from gusto import * +from firedrake import ( + PeriodicRectangleMesh, cos, sin, SpatialCoordinate, + assemble, dx, pi, as_vector, errornorm, Function, div +) +import pytest + + +def setup_advective_then_flux(dirname, desirable_property): + + # ------------------------------------------------------------------------ # + # Model set up + # ------------------------------------------------------------------------ # + + # Time parameters + dt = 2. + + # Domain + domain_width = 2000. + ncells_1d = 10. + mesh = PeriodicRectangleMesh( + ncells_1d, ncells_1d, domain_width, domain_width, quadrilateral=True + ) + domain = Domain(mesh, dt, "RTCF", 1) + + # Equation + V_DG = domain.spaces('DG') + V_HDiv = domain.spaces("HDiv") + eqn = ContinuityEquation(domain, V_DG, "rho", Vu=V_HDiv) + + # IO + output = OutputParameters(dirname=dirname) + io = IO(domain, output) + + # Transport method + transport_scheme = SSPRK3( + domain, rk_formulation=RungeKuttaFormulation.linear, fixed_subcycles=3 + ) + transport_method = DGUpwind(eqn, "rho", advective_then_flux=True) + + # Timestepper + time_varying = False + stepper = PrescribedTransport( + eqn, transport_scheme, io, time_varying, transport_method + ) + + # ------------------------------------------------------------------------ # + # Initial Conditions + # ------------------------------------------------------------------------ # + + x, y = SpatialCoordinate(mesh) + + # Density is initially constant for both tests + rho_0 = 10.0 + + # Set the initial state from the configuration choice + if desirable_property == 'constancy': + # Divergence free velocity + num_steps = 5 + psi = Function(domain.spaces('H1')) + psi_expr = cos(2*pi*x/domain_width)*sin(2*pi*y/domain_width) + psi.interpolate(psi_expr) + u_expr = as_vector([-psi.dx(1), psi.dx(0)]) + + elif desirable_property == 'divergence_linearity': + # Divergent velocity + num_steps = 1 + u_expr = as_vector([ + cos(2*pi*x/domain_width)*sin(4*pi*y/domain_width), + -pi*sin(2*pi*x*y/(domain_width)**2) + ]) + + stepper.fields("rho").assign(Constant(rho_0)) + stepper.fields("u").project(u_expr) + + rho_true = Function(V_DG) + rho_true.interpolate(rho_0*(1.0 - dt*div(stepper.fields('u')))) + + return stepper, rho_true, dt, num_steps + + +@pytest.mark.parametrize("desirable_property", ["constancy", "divergence_linearity"]) +def test_advective_then_flux(tmpdir, desirable_property): + + # Setup and run + dirname = str(tmpdir) + + stepper, rho_true, dt, num_steps = \ + setup_advective_then_flux(dirname, desirable_property) + + # Run for five timesteps + stepper.run(t=0, tmax=dt*num_steps) + rho = stepper.fields("rho") + + # Check for divergence-linearity/constancy + assert errornorm(rho, rho_true) < 1e-11, \ + "advective-then-flux form is not yielding the correct answer" + + # Check for conservation + mass_initial = assemble(rho_true*dx) + mass_final = assemble(rho*dx) + assert abs(mass_final - mass_initial) < 1e-14, \ + "advective-then-flux form is not conservative" diff --git a/integration-tests/transport/test_dg_transport.py b/integration-tests/transport/test_dg_transport.py index f581e2527..63a3898ac 100644 --- a/integration-tests/transport/test_dg_transport.py +++ b/integration-tests/transport/test_dg_transport.py @@ -14,7 +14,9 @@ def run(timestepper, tmax, f_end): @pytest.mark.parametrize("geometry", ["slice", "sphere"]) -@pytest.mark.parametrize("equation_form", ["advective", "continuity"]) +@pytest.mark.parametrize("equation_form", [ + "advective", "continuity", "advective_then_flux" +]) def test_dg_transport_scalar(tmpdir, geometry, equation_form, tracer_setup): setup = tracer_setup(tmpdir, geometry) domain = setup.domain @@ -25,8 +27,12 @@ def test_dg_transport_scalar(tmpdir, geometry, equation_form, tracer_setup): else: eqn = ContinuityEquation(domain, V, "f") - transport_scheme = SSPRK3(domain) - transport_method = DGUpwind(eqn, "f") + if equation_form == "advective_then_flux": + transport_method = DGUpwind(eqn, "f", advective_then_flux=True) + transport_scheme = SSPRK3(domain, rk_formulation=RungeKuttaFormulation.linear) + else: + transport_method = DGUpwind(eqn, "f") + transport_scheme = SSPRK3(domain) time_varying_velocity = False timestepper = PrescribedTransport( diff --git a/integration-tests/transport/test_supg_transport.py b/integration-tests/transport/test_supg_transport.py index d55edbd1d..656d6b75a 100644 --- a/integration-tests/transport/test_supg_transport.py +++ b/integration-tests/transport/test_supg_transport.py @@ -13,6 +13,59 @@ def run(timestepper, tmax, f_end): return norm(timestepper.fields("f") - f_end) / norm(f_end) +def run_coupled(timestepper, tmax, f_end): + timestepper.run(0, tmax) + norm1 = norm(timestepper.fields("f1") - f_end) / norm(f_end) + norm2 = norm(timestepper.fields("f2") - f_end) / norm(f_end) + return norm1, norm2 + + +@pytest.mark.parametrize("scheme", ["ssprk", "implicit_midpoint"]) +def test_supg_transport_mixed_scalar(tmpdir, scheme, tracer_setup): + setup = tracer_setup(tmpdir, geometry="slice") + domain = setup.domain + + ibp = IntegrateByParts.TWICE + + opts = SUPGOptions(ibp=ibp) + + tracer1 = ActiveTracer(name='f1', space="theta", + variable_type=TracerVariableType.mixing_ratio, + transport_eqn=TransportEquationType.advective) + tracer2 = ActiveTracer(name='f2', space="theta", + variable_type=TracerVariableType.mixing_ratio, + transport_eqn=TransportEquationType.conservative) + tracers = [tracer1, tracer2] + Vu = domain.spaces("HDiv") + eqn = CoupledTransportEquation(domain, active_tracers=tracers, Vu=Vu) + suboptions = {} + suboptions.update({'f1': [time_derivative, transport]}) + suboptions.update({'f2': None}) + opts = SUPGOptions(suboptions=suboptions) + transport_method = [DGUpwind(eqn, "f1", ibp=ibp), DGUpwind(eqn, "f2", ibp=ibp)] + + if scheme == "ssprk": + transport_scheme = SSPRK3(domain, options=opts) + elif scheme == "implicit_midpoint": + transport_scheme = TrapeziumRule(domain, options=opts) + + time_varying_velocity = False + timestepper = PrescribedTransport( + eqn, transport_scheme, setup.io, time_varying_velocity, transport_method + ) + + # Initial conditions + timestepper.fields("f1").interpolate(setup.f_init) + timestepper.fields("f2").interpolate(setup.f_init) + timestepper.fields("u").project(setup.uexpr) + + error1, error2 = run_coupled(timestepper, setup.tmax, setup.f_end) + assert error1 < setup.tol, \ + 'The transport error for f1 is greater than the permitted tolerance' + assert error2 < setup.tol, \ + 'The transport error for f2 is greater than the permitted tolerance' + + @pytest.mark.parametrize("equation_form", ["advective", "continuity"]) @pytest.mark.parametrize("scheme", ["ssprk", "implicit_midpoint"]) @pytest.mark.parametrize("space", ["CG", "theta"]) @@ -29,28 +82,26 @@ def test_supg_transport_scalar(tmpdir, equation_form, scheme, space, V = domain.spaces("theta") ibp = IntegrateByParts.TWICE - opts = SUPGOptions(ibp=ibp) - if equation_form == "advective": eqn = AdvectionEquation(domain, V, "f") else: eqn = ContinuityEquation(domain, V, "f") + opts = SUPGOptions(ibp=ibp) + transport_method = DGUpwind(eqn, "f", ibp=ibp) + if scheme == "ssprk": transport_scheme = SSPRK3(domain, options=opts) elif scheme == "implicit_midpoint": transport_scheme = TrapeziumRule(domain, options=opts) - transport_method = DGUpwind(eqn, "f", ibp=ibp) time_varying_velocity = False timestepper = PrescribedTransport( eqn, transport_scheme, setup.io, time_varying_velocity, transport_method ) - # Initial conditions timestepper.fields("f").interpolate(setup.f_init) timestepper.fields("u").project(setup.uexpr) - error = run(timestepper, setup.tmax, setup.f_end) assert error < setup.tol, \ 'The transport error is greater than the permitted tolerance' @@ -81,12 +132,14 @@ def test_supg_transport_vector(tmpdir, equation_form, scheme, space, else: eqn = ContinuityEquation(domain, V, "f") + opts = SUPGOptions(ibp=ibp) + transport_method = DGUpwind(eqn, "f", ibp=ibp) + if scheme == "ssprk": transport_scheme = SSPRK3(domain, options=opts) elif scheme == "implicit_midpoint": transport_scheme = TrapeziumRule(domain, options=opts) - transport_method = DGUpwind(eqn, "f", ibp=ibp) time_varying_velocity = False timestepper = PrescribedTransport( eqn, transport_scheme, setup.io, time_varying_velocity, transport_method diff --git a/integration-tests/transport/test_tracer_conservative_transport.py b/integration-tests/transport/test_tracer_conservative_transport.py new file mode 100644 index 000000000..bc12dca8d --- /dev/null +++ b/integration-tests/transport/test_tracer_conservative_transport.py @@ -0,0 +1,208 @@ +""" +Tests the conservative transport of a mixing ratio and dry density, both when +they are defined on the same and different function spaces. This checks +that there is conservation of the total species mass (dry density times the +mixing ratio) and that there is consistency (a constant field will remain +constant). +""" + +from gusto import * +from firedrake import ( + PeriodicIntervalMesh, ExtrudedMesh, exp, cos, sin, SpatialCoordinate, + assemble, dx, FunctionSpace, pi, min_value, as_vector, BrokenElement, + errornorm +) +import pytest + + +def setup_conservative_transport(dirname, pair_of_spaces, desirable_property): + + # Domain + Lx = 2000. + Hz = 2000. + + # Time parameters + dt = 2. + tmax = 2000. + + nlayers = 10. # horizontal layers + columns = 10. # number of columns + + # Define the spaces for the tracers + if pair_of_spaces == 'same_order_1': + rho_d_space = 'DG' + m_X_space = 'DG' + space_order = 1 + elif pair_of_spaces == 'diff_order_0': + rho_d_space = 'DG' + m_X_space = 'theta' + space_order = 0 + elif pair_of_spaces == 'diff_order_1': + rho_d_space = 'DG' + m_X_space = 'theta' + space_order = 1 + + period_mesh = PeriodicIntervalMesh(columns, Lx) + mesh = ExtrudedMesh(period_mesh, layers=nlayers, layer_height=Hz/nlayers) + domain = Domain(mesh, dt, "CG", space_order) + x, z = SpatialCoordinate(mesh) + + V_rho = domain.spaces(rho_d_space) + V_m_X = domain.spaces(m_X_space) + + m_X = ActiveTracer(name='m_X', space=m_X_space, + variable_type=TracerVariableType.mixing_ratio, + transport_eqn=TransportEquationType.tracer_conservative, + density_name='rho_d') + + rho_d = ActiveTracer(name='rho_d', space=rho_d_space, + variable_type=TracerVariableType.density, + transport_eqn=TransportEquationType.conservative) + + # Define m_X first to test that the tracers will be + # automatically re-ordered such that the density field + # is indexed before the mixing ratio. + tracers = [m_X, rho_d] + + # Equation + V = domain.spaces("HDiv") + eqn = CoupledTransportEquation(domain, active_tracers=tracers, Vu=V) + + # IO + output = OutputParameters(dirname=dirname) + io = IO(domain, output) + + if pair_of_spaces == 'diff_order_0': + VCG1 = FunctionSpace(mesh, 'CG', 1) + VDG1 = domain.spaces('DG1_equispaced') + + suboptions = { + 'rho_d': RecoveryOptions( + embedding_space=VDG1, + recovered_space=VCG1, + project_low_method='recover', + boundary_method=BoundaryMethod.taylor + ), + 'm_X': ConservativeRecoveryOptions( + embedding_space=VDG1, + recovered_space=VCG1, + boundary_method=BoundaryMethod.taylor, + rho_name='rho_d', + orig_rho_space=V_rho + ) + } + elif pair_of_spaces == 'diff_order_1': + Vt_brok = FunctionSpace(mesh, BrokenElement(V_m_X.ufl_element())) + suboptions = { + 'rho_d': EmbeddedDGOptions(embedding_space=Vt_brok), + 'm_X': ConservativeEmbeddedDGOptions( + rho_name='rho_d', + orig_rho_space=V_rho + ) + } + else: + suboptions = {} + + opts = MixedFSOptions(suboptions=suboptions) + + transport_scheme = SSPRK3( + domain, options=opts, rk_formulation=RungeKuttaFormulation.predictor + ) + transport_methods = [DGUpwind(eqn, "m_X"), DGUpwind(eqn, "rho_d")] + + # Timestepper + time_varying = True + stepper = PrescribedTransport( + eqn, transport_scheme, io, time_varying, transport_methods + ) + + # Initial Conditions + # Specify locations of the two Gaussians + xc1 = 5.*Lx/8. + zc1 = Hz/2. + + xc2 = 3.*Lx/8. + zc2 = Hz/2. + + def l2_dist(xc, zc): + return min_value(abs(x-xc), Lx-abs(x-xc))**2 + (z-zc)**2 + + lc = 2.*Lx/25. + m0 = 0.02 + + # Set the initial state from the configuration choice + if desirable_property == 'conservation': + f0 = 0.05 + + rho_t = 0.5 + rho_b = 1. + + rho_d_0 = rho_b + z*(rho_t-rho_b)/Hz + + g1 = f0*exp(-l2_dist(xc1, zc1)/(lc**2)) + g2 = f0*exp(-l2_dist(xc2, zc2)/(lc**2)) + + m_X_0 = m0 + g1 + g2 + + else: + f0 = 0.5 + rho_b = 0.5 + + g1 = f0*exp(-l2_dist(xc1, zc1)/(lc**2)) + g2 = f0*exp(-l2_dist(xc2, zc2)/(lc**2)) + + rho_d_0 = rho_b + g1 + g2 + + # Constant mass field + m_X_0 = m0 + 0*x + + # Set up the divergent, time-varying, velocity field + U = Lx/tmax + W = U/10. + + def u_t(t): + xd = x - U*t + u = U - (W*pi*Lx/Hz)*cos(pi*t/tmax)*cos(2*pi*xd/Lx)*cos(pi*z/Hz) + w = 2*pi*W*cos(pi*t/tmax)*sin(2*pi*xd/Lx)*sin(pi*z/Hz) + + u_expr = as_vector((u, w)) + + return u_expr + + stepper.setup_prescribed_expr(u_t) + + stepper.fields("m_X").interpolate(m_X_0) + stepper.fields("rho_d").interpolate(rho_d_0) + stepper.fields("u").project(u_t(0)) + + m_X_init = Function(V_m_X) + rho_d_init = Function(V_rho) + + m_X_init.assign(stepper.fields("m_X")) + rho_d_init.assign(stepper.fields("rho_d")) + + return stepper, m_X_init, rho_d_init + + +@pytest.mark.parametrize("pair_of_spaces", ["same_order_1", "diff_order_0", "diff_order_1"]) +@pytest.mark.parametrize("desirable_property", ["consistency", "conservation"]) +def test_conservative_transport(tmpdir, pair_of_spaces, desirable_property): + + # Setup and run + dirname = str(tmpdir) + + stepper, m_X_0, rho_d_0 = \ + setup_conservative_transport(dirname, pair_of_spaces, desirable_property) + + # Run for five timesteps + stepper.run(t=0, tmax=10) + m_X = stepper.fields("m_X") + rho_d = stepper.fields("rho_d") + + # Perform the check + if desirable_property == 'consistency': + assert errornorm(m_X_0, m_X) < 2e-13, "conservative transport is not consistent" + else: + rho_X_init = assemble(m_X_0*rho_d_0*dx) + rho_X_final = assemble(m_X*rho_d*dx) + assert abs((rho_X_init - rho_X_final)/rho_X_init) < 1e-14, "conservative transport is not conservative" diff --git a/plotting/boussinesq/plot_skamarock_klemp_boussinesq.py b/plotting/boussinesq/plot_skamarock_klemp_boussinesq.py new file mode 100644 index 000000000..6ac9a11cd --- /dev/null +++ b/plotting/boussinesq/plot_skamarock_klemp_boussinesq.py @@ -0,0 +1,175 @@ +""" +Plots the Boussinesq Skamarock-Klemp gravity wave in a vertical slice. This can +plot the compressible, incompressible and linear cases. + +This plots the initial conditions @ t = 0 s, with +(a) buoyancy perturbation, (b) buoyancy +and the final state @ t = 3600 s, with +(a) buoyancy perturbation, +(b) a 1D slice through the wave +""" +from os.path import abspath, dirname +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, tomplot_field_title, extract_gusto_coords, + extract_gusto_field, reshape_gusto_data, add_colorbar_fig +) + +# Can be incompressible/compressible/linear +test = 'skamarock_klemp_linear_bouss' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/boussinesq/{test}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['b_perturbation', 'b'] +init_colour_schemes = ['YlOrRd', 'Purples'] +init_field_labels = [r'$\Delta b$ (m s$^{-2}$)', r'$b$ (m s$^{-2}$)'] +init_contours = [np.linspace(0.0, 0.01, 11), np.linspace(0, 1, 11)] +init_contours_to_remove = [None, None] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_name = 'b_perturbation' +final_colour_scheme = 'RdBu_r' +final_field_label = r'$\Delta b$ (m s$^{-2}$)' +final_contours = np.linspace(-3.0e-3, 3.0e-3, 13) +final_contour_to_remove = 0.0 + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +contour_method = 'tricontour' +xlims = [0, 300.0] +ylims = [0, 10.0] + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(1, 2, figsize=(12, 6), sharex='all', sharey='all') +time_idx = 0 + +for i, (ax, field_name, field_label, colour_scheme, contours, to_remove) in \ + enumerate(zip(axarray.flatten(), init_field_names, init_field_labels, + init_colour_schemes, init_contours, init_contours_to_remove)): + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=to_remove) + cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines + ) + + add_colorbar_ax( + fig, cf, field_label, location='bottom', cbar_labelpad=-10 + ) + tomplot_field_title( + ax, f't = {time:.1f} s', minmax=True, field_data=field_data + ) + + # Labels ------------------------------------------------------------------- + if i == 0: + ax.set_ylabel(r'$z$ (km)', labelpad=-20) + ax.set_ylim(ylims) + ax.set_yticks(ylims) + ax.set_yticklabels(ylims) + + ax.set_xlabel(r'$x$ (km)', labelpad=-10) + ax.set_xlim(xlims) + ax.set_xticks(xlims) + ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.2) +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(2, 1, figsize=(8, 8), sharex='all') +time_idx = -1 + +# Data extraction ---------------------------------------------------------- +field_data = extract_gusto_field(data_file, final_field_name, time_idx=time_idx) +coords_X, coords_Y = extract_gusto_coords(data_file, final_field_name) +time = data_file['time'][time_idx] + +# Plot 2D data ----------------------------------------------------------------- +ax = axarray[0] + +cmap, lines = tomplot_cmap( + final_contours, final_colour_scheme, remove_contour=final_contour_to_remove +) +cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, final_contours, + cmap=cmap, line_contours=lines +) + +add_colorbar_fig( + fig, cf, final_field_label, ax_idxs=[0], location='right', cbar_labelpad=-40 +) +tomplot_field_title( + ax, f't = {time:.1f} s', minmax=True, field_data=field_data +) + +ax.set_ylabel(r'$z$ (km)', labelpad=-20) +ax.set_ylim(ylims) +ax.set_yticks(ylims) +ax.set_yticklabels(ylims) + +# Plot 1D data ----------------------------------------------------------------- +ax = axarray[1] + +field_data, coords_X, coords_Y = reshape_gusto_data(field_data, coords_X, coords_Y) + +# Determine midpoint index +mid_idx = np.floor_divide(np.shape(field_data)[1], 2) +slice_height = coords_Y[0, mid_idx] + +ax.plot(coords_X[:, mid_idx], field_data[:, mid_idx], color='black') + +tomplot_field_title( + ax, r'$z$' + f' = {slice_height} km' +) + +b_lims = [np.min(final_contours), np.max(final_contours)] + +ax.set_ylabel(final_field_label, labelpad=-20) +ax.set_ylim(b_lims) +ax.set_yticks(b_lims) +ax.set_yticklabels(b_lims) + +ax.set_xlabel(r'$x$ (km)', labelpad=-10) +ax.set_xlim(xlims) +ax.set_xticks(xlims) +ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(hspace=0.2) +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/compressible_euler/plot_dcmip_3_1_gravity_wave.py b/plotting/compressible_euler/plot_dcmip_3_1_gravity_wave.py new file mode 100644 index 000000000..1575bfaba --- /dev/null +++ b/plotting/compressible_euler/plot_dcmip_3_1_gravity_wave.py @@ -0,0 +1,277 @@ +""" +Plots the DCMIP 3-1 gravity wave test case. + +This plots the initial conditions @ t = 0 s, with +(a) zonal wind, (b) theta (c) theta perturbation: all on a lon-lat slice, +(d) zonal wind, (e) theta (f) theta perturbation: on a lat-z slice, + +and the final state @ t = 3600 s, with +(a) theta perturbation on a lon-lat slice, +(b) theta perturbation on a lon-z slice. +""" +from os.path import abspath, dirname +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, tomplot_field_title, extract_gusto_coords, + extract_gusto_field, reshape_gusto_data, extract_gusto_vertical_slice, + regrid_vertical_slice +) + +test = 'dcmip_3_1_gravity_wave' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/compressible_euler/{test}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['u_zonal', 'theta', 'theta_perturbation', + 'u_zonal', 'theta', 'theta_perturbation'] +init_colour_schemes = ['YlOrBr', 'PuRd', 'OrRd', + 'YlOrBr', 'PuRd', 'OrRd',] +init_field_labels = [r'$u$ (m s$^{-1}$)', r'$\theta$ (K)', r'$\Delta\theta$ (K)', + r'$u$ (m s$^{-1}$)', r'$\theta$ (K)', r'$\Delta\theta$ (K)'] +init_contours = [np.linspace(0, 25, 11), + np.linspace(300, 335, 13), + np.linspace(0.0, 1.0, 11), + np.linspace(0, 25, 11), + np.linspace(300, 335, 13), + np.linspace(0.0, 1.0, 11)] +init_contours_to_remove = [None, None, None, None, None, None] +init_slice_along = ['z', 'z', 'z', 'lon', 'lon', 'lon'] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_names = ['theta_perturbation', 'theta_perturbation'] +final_colour_schemes = ['RdBu_r', 'RdBu_r'] +final_field_labels = [r'$\Delta\theta$ (K)', r'$\Delta\theta$ (K)'] +final_contours = [np.linspace(-0.1, 0.1, 21), + np.linspace(-0.1, 0.1, 21)] +final_contours_to_remove = [0.0, 0.0] +final_slice_along = ['z', 'lat'] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +contour_method = 'tricontour' +lon_lims = [-180, 180] +lat_lims = [-90, 90] +z_lims = [0, 10] +level = 5 +slice_at_lon = 120.0 +slice_at_lat = 0.0 + +# 1D grids for vertical regridding +coords_lon_1d = np.linspace(-180, 180, 50) +coords_lat_1d = np.linspace(-90, 90, 50) +# Dictionary to hold plotting grids -- keys are "slice_along" values +plotting_grid = {'lat': coords_lon_1d, 'lon': coords_lat_1d} + +cbar_format = {'u_zonal': '1.0f', + 'theta': '1.0f', + 'theta_perturbation': '1.1f'} + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(2, 3, figsize=(18, 12), sharex='row', sharey='row') +time_idx = 0 + +for i, (ax, field_name, field_label, colour_scheme, contours, + to_remove, slice_along) in \ + enumerate(zip(axarray.flatten(), init_field_names, init_field_labels, + init_colour_schemes, init_contours, + init_contours_to_remove, init_slice_along)): + + # Data extraction ---------------------------------------------------------- + time = data_file['time'][time_idx] + + if slice_along == 'z': + field_full = extract_gusto_field(data_file, field_name, time_idx) + coords_X_full, coords_Y_full, coords_Z_full = \ + extract_gusto_coords(data_file, field_name) + + # Reshape + field_full, coords_X_full, coords_Y_full, _ = \ + reshape_gusto_data(field_full, coords_X_full, + coords_Y_full, coords_Z_full) + + # Take level for a horizontal slice + field_data = field_full[:, level] + # Abuse of names for coord variables but simplifies code below + coords_X = coords_X_full[:, level] + coords_Y = coords_Y_full[:, level] + + else: + orig_field_data, orig_coords_X, orig_coords_Y, orig_coords_Z = \ + extract_gusto_vertical_slice( + data_file, field_name, time_idx, + slice_along=slice_along, slice_at=slice_at_lon + ) + + # Slices need regridding as points don't cleanly live along lon or lat = 0.0 + field_data, coords_X, coords_Y = \ + regrid_vertical_slice( + plotting_grid[slice_along], slice_along, slice_at_lon, + orig_coords_X, orig_coords_Y, orig_coords_Z, orig_field_data + ) + # Scale coordinates + coords_Y /= 1000. + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=to_remove) + cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines + ) + + add_colorbar_ax( + fig, cf, field_label, location='bottom', cbar_labelpad=-10, + cbar_format=cbar_format[field_name] + ) + if slice_along == 'z': + tomplot_field_title( + ax, '$z = $ 5 km', minmax=True, field_data=field_data + ) + elif slice_along == 'lon': + tomplot_field_title( + ax, r'$\lambda = $ 120 deg', minmax=True, field_data=field_data + ) + + # Labels ------------------------------------------------------------------- + if i == 0: + ax.set_ylabel(r'$\vartheta$ (deg)', labelpad=-20) + ax.set_ylim(lat_lims) + ax.set_yticks(lat_lims) + ax.set_yticklabels(lat_lims) + elif i == 3: + ax.set_ylabel(r'$z$ (km)', labelpad=-20) + ax.set_ylim(z_lims) + ax.set_yticks(z_lims) + ax.set_yticklabels(z_lims) + + if i < 3: + ax.set_xlabel(r'$\lambda$ (deg)', labelpad=-10) + ax.set_xlim(lon_lims) + ax.set_xticks(lon_lims) + ax.set_xticklabels(lon_lims) + else: + ax.set_xlabel(r'$\vartheta$ (deg)', labelpad=-10) + ax.set_xlim(lat_lims) + ax.set_xticks(lat_lims) + ax.set_xticklabels(lat_lims) + +# Save figure ------------------------------------------------------------------ +plt.suptitle(f't = {time:.1f} s', y=0.95) +fig.subplots_adjust(wspace=0.25, hspace=0.1) +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(1, 2, figsize=(12, 6)) +time_idx = -1 +time = data_file['time'][time_idx] + +for i, (ax, field_name, field_label, colour_scheme, contours, + to_remove, slice_along) in \ + enumerate(zip(axarray.flatten(), final_field_names, final_field_labels, + final_colour_schemes, final_contours, + final_contours_to_remove, final_slice_along)): + + # Data extraction ---------------------------------------------------------- + if slice_along == 'z': + field_full = extract_gusto_field(data_file, field_name, time_idx) + coords_X_full, coords_Y_full, coords_Z_full = \ + extract_gusto_coords(data_file, field_name) + + # Reshape + field_full, coords_X_full, coords_Y_full, _ = \ + reshape_gusto_data(field_full, coords_X_full, + coords_Y_full, coords_Z_full) + + # Take level for a horizontal slice + field_data = field_full[:, level] + # Abuse of names for coord variables but simplifies code below + coords_X = coords_X_full[:, level] + coords_Y = coords_Y_full[:, level] + + else: + orig_field_data, orig_coords_X, orig_coords_Y, orig_coords_Z = \ + extract_gusto_vertical_slice( + data_file, field_name, time_idx, + slice_along=slice_along, slice_at=slice_at_lat + ) + + # Slices need regridding as points don't cleanly live along lon or lat = 0.0 + field_data, coords_X, coords_Y = \ + regrid_vertical_slice( + plotting_grid[slice_along], slice_along, slice_at_lat, + orig_coords_X, orig_coords_Y, orig_coords_Z, orig_field_data + ) + # Scale coordinates + coords_Y /= 1000. + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=to_remove) + cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines + ) + + add_colorbar_ax( + fig, cf, field_label, location='bottom', cbar_labelpad=-10, + cbar_format=cbar_format[field_name] + ) + if slice_along == 'z': + tomplot_field_title( + ax, r'$z = $ 5 km', minmax=True, field_data=field_data + ) + elif slice_along == 'lat': + tomplot_field_title( + ax, r'$\vartheta = $ 0 deg', minmax=True, field_data=field_data + ) + + # Labels ------------------------------------------------------------------- + if i == 0: + ax.set_xlabel(r'$\lambda$ (deg)', labelpad=-10) + ax.set_xlim(lon_lims) + ax.set_xticks(lon_lims) + ax.set_xticklabels(lon_lims) + ax.set_ylabel(r'$\vartheta$ (deg)', labelpad=-20) + ax.set_ylim(lat_lims) + ax.set_yticks(lat_lims) + ax.set_yticklabels(lat_lims) + else: + ax.set_xlabel(r'$\lambda$ (deg)', labelpad=-10) + ax.set_xlim(lon_lims) + ax.set_xticks(lon_lims) + ax.set_xticklabels(lon_lims) + ax.set_ylabel(r'$z$ (km)', labelpad=-20) + ax.set_ylim(z_lims) + ax.set_yticks(z_lims) + ax.set_yticklabels(z_lims) + +# Save figure ------------------------------------------------------------------ +plt.suptitle(f't = {time:.0f} s') +fig.subplots_adjust(wspace=0.18) +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/compressible_euler/plot_dry_bryan_fritsch.py b/plotting/compressible_euler/plot_dry_bryan_fritsch.py new file mode 100644 index 000000000..f696ac23f --- /dev/null +++ b/plotting/compressible_euler/plot_dry_bryan_fritsch.py @@ -0,0 +1,93 @@ +""" +Plots the dry Bryan and Fritsch rising bubble test case. + +This plots: +(a) theta perturbation @ t = 0 s, (b) theta perturbation @ t = 1000 s +""" +from os.path import abspath, dirname +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_cmap, plot_contoured_field, + add_colorbar_fig, tomplot_field_title, extract_gusto_coords, + extract_gusto_field +) + +test = 'dry_bryan_fritsch' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/compressible_euler/{test}' + +# ---------------------------------------------------------------------------- # +# Plot details +# ---------------------------------------------------------------------------- # +field_names = ['theta_perturbation', 'theta_perturbation'] +time_idxs = [0, -1] +cbars = [False, True] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +contours = np.linspace(-0.5, 2.5, 13) +colour_scheme = 'OrRd' +field_label = r'$\Delta \theta$ (K)' +contour_method = 'tricontour' +xlims = [0, 10] +ylims = [0, 10] + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(1, 2, figsize=(16, 6), sharex='all', sharey='all') + +for i, (ax, time_idx, field_name, cbar) in \ + enumerate(zip(axarray.flatten(), time_idxs, field_names, cbars)): + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=0.0) + cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines + ) + + if cbar: + add_colorbar_fig( + fig, cf, field_label, ax_idxs=[i], location='right' + ) + tomplot_field_title( + ax, f't = {time:.1f} s', minmax=True, field_data=field_data + ) + + # Labels ------------------------------------------------------------------- + if i == 0: + ax.set_ylabel(r'$z$ (km)', labelpad=-20) + ax.set_ylim(ylims) + ax.set_yticks(ylims) + ax.set_yticklabels(ylims) + + ax.set_xlabel(r'$x$ (km)', labelpad=-10) + ax.set_xlim(xlims) + ax.set_xticks(xlims) + ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.15) +plot_name = f'{plot_stem}.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/compressible_euler/plot_skamarock_klemp_nonhydrostatic.py b/plotting/compressible_euler/plot_skamarock_klemp_nonhydrostatic.py new file mode 100644 index 000000000..668d75dc8 --- /dev/null +++ b/plotting/compressible_euler/plot_skamarock_klemp_nonhydrostatic.py @@ -0,0 +1,173 @@ +""" +Plots the Skamarock-Klemp gravity wave in a vertical slice. + +This plots the initial conditions @ t = 0 s, with +(a) theta perturbation, (b) theta +and the final state @ t = 3600 s, with +(a) theta perturbation, +(b) a 1D slice through the wave +""" +from os.path import abspath, dirname +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, tomplot_field_title, extract_gusto_coords, + extract_gusto_field, reshape_gusto_data, add_colorbar_fig +) + +test = 'skamarock_klemp_nonhydrostatic' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/compressible_euler/{test}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['theta_perturbation', 'theta'] +init_colour_schemes = ['YlOrRd', 'Purples'] +init_field_labels = [r'$\Delta\theta$ (K)', r'$\theta$ (K)'] +init_contours = [np.linspace(0.0, 0.01, 11), np.linspace(300, 335, 8)] +init_contours_to_remove = [None, None] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_name = 'theta_perturbation' +final_colour_scheme = 'RdBu_r' +final_field_label = r'$\Delta\theta$ (K)' +final_contours = np.linspace(-3.0e-3, 3.0e-3, 13) +final_contour_to_remove = 0.0 + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +contour_method = 'tricontour' +xlims = [0, 300.0] +ylims = [0, 10.0] + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(1, 2, figsize=(12, 6), sharex='all', sharey='all') +time_idx = 0 + +for i, (ax, field_name, field_label, colour_scheme, contours, to_remove) in \ + enumerate(zip(axarray.flatten(), init_field_names, init_field_labels, + init_colour_schemes, init_contours, init_contours_to_remove)): + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=to_remove) + cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines + ) + + add_colorbar_ax( + fig, cf, field_label, location='bottom', cbar_labelpad=-10 + ) + tomplot_field_title( + ax, f't = {time:.1f} s', minmax=True, field_data=field_data + ) + + # Labels ------------------------------------------------------------------- + if i == 0: + ax.set_ylabel(r'$z$ (km)', labelpad=-20) + ax.set_ylim(ylims) + ax.set_yticks(ylims) + ax.set_yticklabels(ylims) + + ax.set_xlabel(r'$x$ (km)', labelpad=-10) + ax.set_xlim(xlims) + ax.set_xticks(xlims) + ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.2) +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(2, 1, figsize=(8, 8), sharex='all') +time_idx = -1 + +# Data extraction ---------------------------------------------------------- +field_data = extract_gusto_field(data_file, final_field_name, time_idx=time_idx) +coords_X, coords_Y = extract_gusto_coords(data_file, final_field_name) +time = data_file['time'][time_idx] + +# Plot 2D data ----------------------------------------------------------------- +ax = axarray[0] + +cmap, lines = tomplot_cmap( + final_contours, final_colour_scheme, remove_contour=final_contour_to_remove +) +cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, final_contours, + cmap=cmap, line_contours=lines +) + +add_colorbar_fig( + fig, cf, final_field_label, ax_idxs=[0], location='right', cbar_labelpad=-40 +) +tomplot_field_title( + ax, f't = {time:.1f} s', minmax=True, field_data=field_data +) + +ax.set_ylabel(r'$z$ (km)', labelpad=-20) +ax.set_ylim(ylims) +ax.set_yticks(ylims) +ax.set_yticklabels(ylims) + +# Plot 1D data ----------------------------------------------------------------- +ax = axarray[1] + +field_data, coords_X, coords_Y = reshape_gusto_data(field_data, coords_X, coords_Y) + +# Determine midpoint index +mid_idx = np.floor_divide(np.shape(field_data)[1], 2) +slice_height = coords_Y[0, mid_idx] + +ax.plot(coords_X[:, mid_idx], field_data[:, mid_idx], color='black') + +tomplot_field_title( + ax, r'$z$' + f' = {slice_height} km' +) + +theta_lims = [np.min(final_contours), np.max(final_contours)] + +ax.set_ylabel(final_field_label, labelpad=-20) +ax.set_ylim(theta_lims) +ax.set_yticks(theta_lims) +ax.set_yticklabels(theta_lims) + +ax.set_xlabel(r'$x$ (km)', labelpad=-10) +ax.set_xlim(xlims) +ax.set_xticks(xlims) +ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(hspace=0.2) +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/compressible_euler/plot_straka_bubble.py b/plotting/compressible_euler/plot_straka_bubble.py new file mode 100644 index 000000000..557de8bb6 --- /dev/null +++ b/plotting/compressible_euler/plot_straka_bubble.py @@ -0,0 +1,93 @@ +""" +Plots the Straka bubble test case. + +This plots: +(a) theta perturbation @ t = 0 s, (b) theta perturbation @ t = 900 s +""" +from os.path import abspath, dirname +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_cmap, plot_contoured_field, + add_colorbar_fig, tomplot_field_title, extract_gusto_coords, + extract_gusto_field +) + +test = 'straka_bubble' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/compressible_euler/{test}' + +# ---------------------------------------------------------------------------- # +# Plot details +# ---------------------------------------------------------------------------- # +field_names = ['theta_perturbation', 'theta_perturbation'] +time_idxs = [0, -1] +cbars = [False, True] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +contours = np.linspace(-7.5, 0.5, 17) +colour_scheme = 'Blues_r' +field_label = r'$\Delta \theta$ (K)' +contour_method = 'tricontour' +xlims = [25.6, 38.4] +ylims = [0., 5.0] + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(1, 2, figsize=(18, 6), sharex='all', sharey='all') + +for i, (ax, time_idx, field_name, cbar) in \ + enumerate(zip(axarray.flatten(), time_idxs, field_names, cbars)): + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=0.0) + cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, negative_linestyles='solid' + ) + + if cbar: + add_colorbar_fig( + fig, cf, field_label, ax_idxs=[i], location='right' + ) + tomplot_field_title( + ax, f't = {time:.1f} s', minmax=True, field_data=field_data + ) + + # Labels ------------------------------------------------------------------- + if i == 0: + ax.set_ylabel(r'$z$ (km)', labelpad=-20) + ax.set_ylim(ylims) + ax.set_yticks(ylims) + ax.set_yticklabels(ylims) + + ax.set_xlabel(r'$x$ (km)', labelpad=-10) + ax.set_xlim(xlims) + ax.set_xticks(xlims) + ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.15) +plot_name = f'{plot_stem}.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/compressible_euler/plot_unsaturated_bubble.py b/plotting/compressible_euler/plot_unsaturated_bubble.py new file mode 100644 index 000000000..3d1240c02 --- /dev/null +++ b/plotting/compressible_euler/plot_unsaturated_bubble.py @@ -0,0 +1,155 @@ +""" +Plots the unsaturated moist rising bubble test case, which features rain. + +This plots the initial conditions @ t = 0 s, with +(a) theta perturbation, (b) relative humidity, +and the final state @ t = 600 s, with +(a) theta perturbation, (b) relative humidity and (c) rain mixing ratio +""" +from os.path import abspath, dirname +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, tomplot_field_title, extract_gusto_coords, + extract_gusto_field +) + +test = 'unsaturated_bubble' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/compressible_euler/{test}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['theta_perturbation', 'RelativeHumidity'] +init_colour_schemes = ['Reds', 'Blues'] +init_field_labels = [r'$\Delta\theta$ (K)', 'Relative Humidity'] +init_contours = [np.linspace(-0.25, 3.0, 14), np.linspace(0.0, 1.1, 12)] +init_contours_to_remove = [0.0, 0.2] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_names = ['theta_perturbation', 'RelativeHumidity', 'rain'] +final_colour_schemes = ['RdBu_r', 'Blues', 'Purples'] +final_field_labels = [r'$\Delta\theta$ (K)', 'Relative Humidity', r'$m_r$ (kg/kg)'] +final_contours = [np.linspace(-3.5, 3.5, 15), + np.linspace(0.0, 1.1, 12), + np.linspace(-2.5e-6, 5.0e-5, 12)] +final_contours_to_remove = [0.0, None, None] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +contour_method = 'tricontour' +xlims = [0, 3.6] +ylims = [0, 2.4] + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(1, 2, figsize=(12, 6), sharex='all', sharey='all') +time_idx = 0 + +for i, (ax, field_name, field_label, colour_scheme, contours, to_remove) in \ + enumerate(zip(axarray.flatten(), init_field_names, init_field_labels, + init_colour_schemes, init_contours, init_contours_to_remove)): + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=to_remove) + cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines + ) + + add_colorbar_ax( + fig, cf, field_label, location='bottom', cbar_labelpad=-10 + ) + tomplot_field_title( + ax, f't = {time:.1f} s', minmax=True, field_data=field_data + ) + + # Labels ------------------------------------------------------------------- + if i == 0: + ax.set_ylabel(r'$z$ (km)', labelpad=-20) + ax.set_ylim(ylims) + ax.set_yticks(ylims) + ax.set_yticklabels(ylims) + + ax.set_xlabel(r'$x$ (km)', labelpad=-10) + ax.set_xlim(xlims) + ax.set_xticks(xlims) + ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.15) +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(1, 3, figsize=(18, 6), sharex='all', sharey='all') +time_idx = -1 + +for i, (ax, field_name, field_label, colour_scheme, contours, to_remove) in \ + enumerate(zip(axarray.flatten(), final_field_names, final_field_labels, + final_colour_schemes, final_contours, + final_contours_to_remove)): + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=to_remove) + cf, lines = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines + ) + + add_colorbar_ax( + fig, cf, field_label, location='bottom', cbar_labelpad=-10 + ) + tomplot_field_title( + ax, f't = {time:.1f} s', minmax=True, field_data=field_data + ) + + # Labels ------------------------------------------------------------------- + if i == 0: + ax.set_ylabel(r'$z$ (km)', labelpad=-20) + ax.set_ylim(ylims) + ax.set_yticks(ylims) + ax.set_yticklabels(ylims) + + ax.set_xlabel(r'$x$ (km)', labelpad=-10) + ax.set_xlim(xlims) + ax.set_xticks(xlims) + ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.18) +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/shallow_water/plot_linear_williamson_2.py b/plotting/shallow_water/plot_linear_williamson_2.py new file mode 100644 index 000000000..4851e0185 --- /dev/null +++ b/plotting/shallow_water/plot_linear_williamson_2.py @@ -0,0 +1,172 @@ +""" +Plots the linear Williamson 2 test case. + +The initial conditions are plotted: +(a) the velocity field, (b) the depth field. + +And after 5 days, this plots: +(a) the relative vorticity field, (b) the error in the depth field. +""" +from os.path import abspath, dirname +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_contours, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, plot_field_quivers, tomplot_field_title, + extract_gusto_coords, extract_gusto_field, regrid_horizontal_slice +) + +test_name = 'linear_williamson_2' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test_name}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/{test_name}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['u', 'D'] +init_colour_schemes = ['Oranges', 'YlGnBu'] +init_field_labels = [r'$|u|$ (m s$^{-1}$)', r'$D$ (m)'] +init_contours_to_remove = [None, None, None] +init_contours = [np.linspace(0, 40, 9), + np.linspace(-2000, 0, 11)] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_names = ['RelativeVorticity', 'D_error'] +final_colour_schemes = ['RdBu_r', 'PiYG'] +final_field_labels = [r'$\zeta \ / $ s$^{-1}$', r'$\Delta D$ (m)'] +final_contours_to_remove = [0.0, 0.0] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +projection = ccrs.Robinson() +contour_method = 'contour' +xlims = [-180, 180] +ylims = [-90, 90] + +# We need to regrid onto lon-lat grid -- specify that here +lon_1d = np.linspace(-180.0, 180.0, 120) +lat_1d = np.linspace(-90, 90, 120) +lon_2d, lat_2d = np.meshgrid(lon_1d, lat_1d, indexing='ij') + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(15, 5)) +time_idx = 0 + +for i, (field_name, colour_scheme, field_label, contour_to_remove, contours) in \ + enumerate(zip( + init_field_names, init_colour_schemes, + init_field_labels, init_contours_to_remove, init_contours)): + + # Make axes + ax = fig.add_subplot(1, 2, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + if field_name == 'u': + zonal_data = extract_gusto_field(data_file, 'u_zonal', time_idx=time_idx) + meridional_data = extract_gusto_field(data_file, 'u_meridional', time_idx=time_idx) + field_data = np.sqrt(zonal_data**2 + meridional_data**2) + coords_X, coords_Y = extract_gusto_coords(data_file, 'u_zonal') + + else: + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax(ax, cf, field_label, location='bottom', cbar_labelpad=-10) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + + # Add quivers -------------------------------------------------------------- + if field_name == 'u': + # Need to re-grid to lat-lon grid to get sensible looking quivers + regrid_zonal_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, zonal_data, + periodic_fix='sphere' + ) + regrid_meridional_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, meridional_data, + periodic_fix='sphere' + ) + plot_field_quivers( + ax, lon_2d, lat_2d, regrid_zonal_data, regrid_meridional_data, + spatial_filter_step=6, magnitude_filter=1.0, + projection=ccrs.PlateCarree() + ) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.25) +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(15, 5)) +time_idx = -1 + +for i, (field_name, colour_scheme, field_label, contour_to_remove) in \ + enumerate(zip( + final_field_names, final_colour_schemes, + final_field_labels, final_contours_to_remove)): + + # Make axes + ax = fig.add_subplot(1, 2, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + contours = tomplot_contours(field_data) + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax(ax, cf, field_label, location='bottom', cbar_labelpad=-10) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + +# Save figure ------------------------------------------------------------------ +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/shallow_water/plot_moist_convective_williamson_2.py b/plotting/shallow_water/plot_moist_convective_williamson_2.py new file mode 100644 index 000000000..3c0a4254e --- /dev/null +++ b/plotting/shallow_water/plot_moist_convective_williamson_2.py @@ -0,0 +1,186 @@ +""" +Plots the moist convective Williamson 2 test case. + +The initial conditions are plotted: +(a) the velocity field, (b) the depth field, (c) water vapour field. + +And after 5 days, this plots: +(a) relative vorticity, (b) depth error (c) cloud. +""" +from os.path import abspath, dirname +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_contours, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, plot_field_quivers, tomplot_field_title, + extract_gusto_coords, extract_gusto_field, regrid_horizontal_slice +) + +test_name = 'moist_convective_williamson_2' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test_name}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/shallow_water/{test_name}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['u', 'D', 'water_vapour'] +init_colour_schemes = ['Oranges', 'YlGnBu', 'Purples'] +init_field_labels = [r'$|u|$ (m s$^{-1}$)', r'$D$ (m)', r'$m_v$ (kg kg$^{-1}$)'] +init_contours_to_remove = [None, None, None] +init_contours = [np.linspace(0, 20, 9), + np.linspace(1900, 3100, 13), + np.linspace(0, 0.012, 13)] +init_data_formats = ['1.0f', '1.0f', '1.2e'] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_names = ['RelativeVorticity', 'D_error', 'cloud_water'] +final_colour_schemes = ['RdBu_r', 'PiYG', 'Blues'] +final_field_labels = [r'$\zeta \ / $ s$^{-1}$', r'$\Delta D$ (m)', r'$m_{cl}$ (kg kg$^{-1}$)'] +final_contours_to_remove = [0.0, 0.0, None] +final_data_formats = ['1.1e', '1.1f', '1.2e'] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +projection = ccrs.Robinson() +contour_method = 'contour' +xlims = [-180, 180] +ylims = [-90, 90] + +# We need to regrid onto lon-lat grid -- specify that here +lon_1d = np.linspace(-180.0, 180.0, 120) +lat_1d = np.linspace(-90, 90, 120) +lon_2d, lat_2d = np.meshgrid(lon_1d, lat_1d, indexing='ij') + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(21, 5)) +time_idx = 0 + +for i, (field_name, colour_scheme, field_label, + contour_to_remove, contours, data_format) in \ + enumerate(zip( + init_field_names, init_colour_schemes, init_field_labels, + init_contours_to_remove, init_contours, init_data_formats)): + + # Make axes + ax = fig.add_subplot(1, 3, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + if field_name == 'u': + zonal_data = extract_gusto_field(data_file, 'u_zonal', time_idx=time_idx) + meridional_data = extract_gusto_field(data_file, 'u_meridional', time_idx=time_idx) + field_data = np.sqrt(zonal_data**2 + meridional_data**2) + coords_X, coords_Y = extract_gusto_coords(data_file, 'u_zonal') + + else: + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax( + ax, cf, field_label, location='bottom', cbar_labelpad=-10, + cbar_format=data_format + ) + tomplot_field_title( + ax, None, minmax=True, field_data=field_data, minmax_format=data_format + ) + + # Add quivers -------------------------------------------------------------- + if field_name == 'u': + # Need to re-grid to lat-lon grid to get sensible looking quivers + regrid_zonal_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, zonal_data, + periodic_fix='sphere' + ) + regrid_meridional_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, meridional_data, + periodic_fix='sphere' + ) + plot_field_quivers( + ax, lon_2d, lat_2d, regrid_zonal_data, regrid_meridional_data, + spatial_filter_step=6, magnitude_filter=1.0, + projection=ccrs.PlateCarree() + ) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.25) +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(21, 5)) +time_idx = -1 + +for i, (field_name, colour_scheme, field_label, contour_to_remove, data_format) in \ + enumerate(zip( + final_field_names, final_colour_schemes, + final_field_labels, final_contours_to_remove, final_data_formats)): + + # Make axes + ax = fig.add_subplot(1, 3, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + contours = tomplot_contours(field_data) + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax( + ax, cf, field_label, location='bottom', cbar_labelpad=-10, + cbar_format=data_format + ) + tomplot_field_title( + ax, None, minmax=True, field_data=field_data, minmax_format=data_format + ) + +# Save figure ------------------------------------------------------------------ +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/shallow_water/plot_moist_thermal_williamson_5.py b/plotting/shallow_water/plot_moist_thermal_williamson_5.py new file mode 100644 index 000000000..9c5bf8ecf --- /dev/null +++ b/plotting/shallow_water/plot_moist_thermal_williamson_5.py @@ -0,0 +1,195 @@ +""" +Plots the moist thermal Williamson 5 test case. + +The initial conditions are plotted: +(a) velocity, (b) depth field, +(c) buoyancy, (d) water vapour. + +And after 50 days, this plots: +(a) relative vorticity, (b) free-surface height, +(c) buoyancy, (d) cloud. +""" +from os.path import abspath, dirname +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, plot_field_quivers, tomplot_field_title, + extract_gusto_coords, extract_gusto_field, regrid_horizontal_slice +) + +test_name = 'moist_thermal_williamson_5' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test_name}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/shallow_water/{test_name}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['u', 'D', 'b', 'water_vapour'] +init_colour_schemes = ['Oranges', 'YlGnBu', 'PuRd_r', 'Purples'] +init_field_labels = [r'$|u|$ (m s$^{-1}$)', r'$D$ (m)', + r'$b$ (m s$^{-2}$)', r'$m_v$ (kg kg$^{-1}$)'] +init_contours_to_remove = [None, None, None, None] +init_contours = [np.linspace(0, 20, 9), + np.linspace(3800, 6000, 13), + np.linspace(8.8, 11.2, 13), + np.linspace(0.0, 0.02, 11)] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_names = ['RelativeVorticity', 'D_plus_topography', 'b', 'cloud_water'] +final_colour_schemes = ['RdBu_r', 'YlGnBu', 'PuRd_r', 'Blues'] +final_field_labels = [r'$\zeta \ / $ s$^{-1}$', r'$D+B$ (m)', + r'$b$ (m s$^{-2}$)', r'$m_{cl}$ (kg kg$^{-1}$)'] +final_contours_to_remove = [0.0, None, None, 0.0] +final_contours = [np.linspace(-5e-5, 5e-5, 11), + np.linspace(4800, 6000, 13), + np.linspace(8.8, 11.2, 13), + np.linspace(-5e-5, 5e-4, 12)] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +projection = ccrs.Robinson() +contour_method = 'contour' +xlims = [-180, 180] +ylims = [-90, 90] + +# We need to regrid onto lon-lat grid -- specify that here +lon_1d = np.linspace(-180.0, 180.0, 120) +lat_1d = np.linspace(-90, 90, 120) +lon_2d, lat_2d = np.meshgrid(lon_1d, lat_1d, indexing='ij') + +cbar_format = {'RelativeVorticity': '1.1e', + 'u': '1.0f', + 'D': '1.0f', + 'D_plus_topography': '1.0f', + 'b': '1.1f', + 'water_vapour': '1.2f', + 'cloud_water': '1.1e'} + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(15, 10)) +time_idx = 0 + +for i, (field_name, colour_scheme, field_label, contour_to_remove, contours) in \ + enumerate(zip( + init_field_names, init_colour_schemes, + init_field_labels, init_contours_to_remove, init_contours)): + + # Make axes + ax = fig.add_subplot(2, 2, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + if field_name == 'u': + zonal_data = extract_gusto_field(data_file, 'u_zonal', time_idx=time_idx) + meridional_data = extract_gusto_field(data_file, 'u_meridional', time_idx=time_idx) + field_data = np.sqrt(zonal_data**2 + meridional_data**2) + coords_X, coords_Y = extract_gusto_coords(data_file, 'u_zonal') + + else: + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax( + ax, cf, field_label, location='bottom', cbar_labelpad=-10, + cbar_format=cbar_format[field_name] + ) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + + # Add quivers -------------------------------------------------------------- + if field_name == 'u': + # Need to re-grid to lat-lon grid to get sensible looking quivers + regrid_zonal_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, zonal_data, + periodic_fix='sphere' + ) + regrid_meridional_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, meridional_data, + periodic_fix='sphere' + ) + plot_field_quivers( + ax, lon_2d, lat_2d, regrid_zonal_data, regrid_meridional_data, + spatial_filter_step=6, magnitude_filter=1.0, + projection=ccrs.PlateCarree() + ) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.25) +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(15, 10)) +time_idx = -1 + +for i, (field_name, colour_scheme, field_label, contour_to_remove, contours) in \ + enumerate(zip( + final_field_names, final_colour_schemes, + final_field_labels, final_contours_to_remove, final_contours)): + + # Make axes + ax = fig.add_subplot(2, 2, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax( + ax, cf, field_label, location='bottom', cbar_labelpad=-10, + cbar_format=cbar_format[field_name] + ) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + +# Save figure ------------------------------------------------------------------ +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/shallow_water/plot_shallow_water_1d_wave.py b/plotting/shallow_water/plot_shallow_water_1d_wave.py new file mode 100644 index 000000000..256ac1c1e --- /dev/null +++ b/plotting/shallow_water/plot_shallow_water_1d_wave.py @@ -0,0 +1,102 @@ +""" +Plots the fields from the 1D shallow water wave. + +This plots: +(a) u @ t = 0 s, (b) v @ t = 0 s, (c) D @ t = 0 s +(d) u @ t = 1 s, (e) v @ t = 1 s, (f) D @ t = 1 s +""" +from os.path import abspath, dirname +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +import pandas as pd +from tomplot import ( + set_tomplot_style, tomplot_field_title, extract_gusto_coords, + extract_gusto_field +) + +test = 'shallow_water_1d_wave' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/shallow_water/{test}' + +# ---------------------------------------------------------------------------- # +# Plot details +# ---------------------------------------------------------------------------- # +field_names = ['u', 'v', 'D', 'u', 'v', 'D'] +time_idxs = [0, 0, 0, -1, -1, -1] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +xlims = [0, 2*np.pi] +xlims_labels = [0, r'$2\pi$'] + +ylims = { + 'u': [-0.5, 0.5], + 'v': [-0.5, 0.5], + 'D': [8, 12] +} +field_labels = { + 'u': r'$u$ (m s$^{-1}$)', + 'v': r'$v$ (m s$^{-1}$)', + 'D': r'$D$ (m)' +} + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(2, 3, figsize=(16, 6), sharex='all', sharey='col') + +for i, (ax, time_idx, field_name) in \ + enumerate(zip(axarray.flatten(), time_idxs, field_names)): + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] + + # Get coordinates in order + data_frame = pd.DataFrame({'coords': coords_X, 'field': field_data}) + data_frame = data_frame.sort_values(by=['coords']) + coords_X = data_frame['coords'] + field_data = data_frame['field'] + + # Convert coordinates to m + coords_X *= 1000. + + # Plot data ---------------------------------------------------------------- + ax.plot(coords_X, field_data, color='black', linestyle='-', marker='') + + tomplot_field_title( + ax, f't = {time:.1f} s', minmax=True, field_data=field_data, + minmax_format='1.2f' + ) + + # Labels ------------------------------------------------------------------- + ax.set_ylabel(field_labels[field_name], labelpad=-15) + ax.set_ylim(ylims[field_name]) + ax.set_yticks(ylims[field_name]) + ax.set_yticklabels(ylims[field_name]) + + if i > 2: + ax.set_xlabel(r'$x$ (m)', labelpad=-10) + ax.set_xlim(xlims) + ax.set_xticks(xlims) + ax.set_xticklabels(xlims_labels) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.2) +plot_name = f'{plot_stem}.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/shallow_water/plot_thermal_williamson_2.py b/plotting/shallow_water/plot_thermal_williamson_2.py new file mode 100644 index 000000000..32ab14832 --- /dev/null +++ b/plotting/shallow_water/plot_thermal_williamson_2.py @@ -0,0 +1,173 @@ +""" +Plots the thermal Williamson 2 test case. + +The initial conditions are plotted: +(a) the velocity field, (b) the depth field, (c) buoyancy field. + +And after 5 days, this plots: +(a) relative vorticity, (b) depth error (c) buoyancy error. +""" +from os.path import abspath, dirname +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_contours, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, plot_field_quivers, tomplot_field_title, + extract_gusto_coords, extract_gusto_field, regrid_horizontal_slice +) + +test_name = 'thermal_williamson_2' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test_name}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/{test_name}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['u', 'D', 'b'] +init_colour_schemes = ['Oranges', 'YlGnBu', 'PuRd_r'] +init_field_labels = [r'$|u|$ (m s$^{-1}$)', r'$D$ (m)', r'$b$ (m s$^{-2}$)'] +init_contours_to_remove = [None, None, None] +init_contours = [np.linspace(0, 20, 9), + np.linspace(1900, 3100, 13), + np.linspace(9, 10, 11)] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_names = ['RelativeVorticity', 'D_error', 'b_error'] +final_colour_schemes = ['RdBu_r', 'PiYG', 'PuOr'] +final_field_labels = [r'$\zeta \ / $ s$^{-1}$', r'$\Delta D$ (m)', r'$\Delta b$ (m s$^{-2}$)'] +final_contours_to_remove = [0.0, 0.0, 0.0] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +projection = ccrs.Robinson() +contour_method = 'contour' +xlims = [-180, 180] +ylims = [-90, 90] + +# We need to regrid onto lon-lat grid -- specify that here +lon_1d = np.linspace(-180.0, 180.0, 120) +lat_1d = np.linspace(-90, 90, 120) +lon_2d, lat_2d = np.meshgrid(lon_1d, lat_1d, indexing='ij') + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(21, 5)) +time_idx = 0 + +for i, (field_name, colour_scheme, field_label, contour_to_remove, contours) in \ + enumerate(zip( + init_field_names, init_colour_schemes, + init_field_labels, init_contours_to_remove, init_contours)): + + # Make axes + ax = fig.add_subplot(1, 3, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + if field_name == 'u': + zonal_data = extract_gusto_field(data_file, 'u_zonal', time_idx=time_idx) + meridional_data = extract_gusto_field(data_file, 'u_meridional', time_idx=time_idx) + field_data = np.sqrt(zonal_data**2 + meridional_data**2) + coords_X, coords_Y = extract_gusto_coords(data_file, 'u_zonal') + + else: + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax(ax, cf, field_label, location='bottom', cbar_labelpad=-10) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + + # Add quivers -------------------------------------------------------------- + if field_name == 'u': + # Need to re-grid to lat-lon grid to get sensible looking quivers + regrid_zonal_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, zonal_data, + periodic_fix='sphere' + ) + regrid_meridional_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, meridional_data, + periodic_fix='sphere' + ) + plot_field_quivers( + ax, lon_2d, lat_2d, regrid_zonal_data, regrid_meridional_data, + spatial_filter_step=6, magnitude_filter=1.0, + projection=ccrs.PlateCarree() + ) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.25) +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(21, 5)) +time_idx = -1 + +for i, (field_name, colour_scheme, field_label, contour_to_remove) in \ + enumerate(zip( + final_field_names, final_colour_schemes, + final_field_labels, final_contours_to_remove)): + + # Make axes + ax = fig.add_subplot(1, 3, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + contours = tomplot_contours(field_data) + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax(ax, cf, field_label, location='bottom', cbar_labelpad=-10) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + +# Save figure ------------------------------------------------------------------ +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/shallow_water/plot_williamson_2.py b/plotting/shallow_water/plot_williamson_2.py new file mode 100644 index 000000000..1f8eaef57 --- /dev/null +++ b/plotting/shallow_water/plot_williamson_2.py @@ -0,0 +1,172 @@ +""" +Plots the Williamson 2 test case. + +The initial conditions are plotted: +(a) the velocity field, (b) the depth field. + +And after 5 days, this plots: +(a) the relative vorticity field, (b) the error in the depth field. +""" +from os.path import abspath, dirname +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_contours, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, plot_field_quivers, tomplot_field_title, + extract_gusto_coords, extract_gusto_field, regrid_horizontal_slice +) + +test_name = 'williamson_2' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test_name}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/{test_name}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['u', 'D'] +init_colour_schemes = ['Oranges', 'YlGnBu'] +init_field_labels = [r'$|u|$ (m s$^{-1}$)', r'$D$ (m)'] +init_contours_to_remove = [None, None, None] +init_contours = [np.linspace(0, 40, 9), + np.linspace(4000, 6000, 9)] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_names = ['RelativeVorticity', 'D_error'] +final_colour_schemes = ['RdBu_r', 'PiYG'] +final_field_labels = [r'$\zeta \ / $ s$^{-1}$', r'$\Delta D$ (m)'] +final_contours_to_remove = [0.0, 0.0] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +projection = ccrs.Robinson() +contour_method = 'contour' +xlims = [-180, 180] +ylims = [-90, 90] + +# We need to regrid onto lon-lat grid -- specify that here +lon_1d = np.linspace(-180.0, 180.0, 120) +lat_1d = np.linspace(-90, 90, 120) +lon_2d, lat_2d = np.meshgrid(lon_1d, lat_1d, indexing='ij') + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(15, 5)) +time_idx = 0 + +for i, (field_name, colour_scheme, field_label, contour_to_remove, contours) in \ + enumerate(zip( + init_field_names, init_colour_schemes, + init_field_labels, init_contours_to_remove, init_contours)): + + # Make axes + ax = fig.add_subplot(1, 2, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + if field_name == 'u': + zonal_data = extract_gusto_field(data_file, 'u_zonal', time_idx=time_idx) + meridional_data = extract_gusto_field(data_file, 'u_meridional', time_idx=time_idx) + field_data = np.sqrt(zonal_data**2 + meridional_data**2) + coords_X, coords_Y = extract_gusto_coords(data_file, 'u_zonal') + + else: + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax(ax, cf, field_label, location='bottom', cbar_labelpad=-10) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + + # Add quivers -------------------------------------------------------------- + if field_name == 'u': + # Need to re-grid to lat-lon grid to get sensible looking quivers + regrid_zonal_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, zonal_data, + periodic_fix='sphere' + ) + regrid_meridional_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, meridional_data, + periodic_fix='sphere' + ) + plot_field_quivers( + ax, lon_2d, lat_2d, regrid_zonal_data, regrid_meridional_data, + spatial_filter_step=6, magnitude_filter=1.0, + projection=ccrs.PlateCarree() + ) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.25) +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(15, 5)) +time_idx = -1 + +for i, (field_name, colour_scheme, field_label, contour_to_remove) in \ + enumerate(zip( + final_field_names, final_colour_schemes, + final_field_labels, final_contours_to_remove)): + + # Make axes + ax = fig.add_subplot(1, 2, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + contours = tomplot_contours(field_data) + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax(ax, cf, field_label, location='bottom', cbar_labelpad=-10) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + +# Save figure ------------------------------------------------------------------ +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/shallow_water/plot_williamson_5.py b/plotting/shallow_water/plot_williamson_5.py new file mode 100644 index 000000000..51ce0e802 --- /dev/null +++ b/plotting/shallow_water/plot_williamson_5.py @@ -0,0 +1,184 @@ +""" +Plots the Williamson 5 test case. + +The initial conditions are plotted: +(a) the velocity field, (b) the depth field. + +And after 50 days, this plots: +(a) the relative vorticity field, (b) free-surface height. +""" +from os.path import abspath, dirname +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, plot_field_quivers, tomplot_field_title, + extract_gusto_coords, extract_gusto_field, regrid_horizontal_slice +) + +test_name = 'williamson_5' + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/{test_name}/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/shallow_water/{test_name}' + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['u', 'D'] +init_colour_schemes = ['Oranges', 'YlGnBu'] +init_field_labels = [r'$|u|$ (m s$^{-1}$)', r'$D$ (m)'] +init_contours_to_remove = [None, None, None] +init_contours = [np.linspace(0, 20, 9), + np.linspace(3800, 6000, 12)] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_names = ['RelativeVorticity', 'D_plus_topography'] +final_colour_schemes = ['RdBu_r', 'PiYG'] +final_field_labels = [r'$\zeta \ / $ s$^{-1}$', r'$D+B$ (m)'] +final_contours = [np.linspace(-1e-4, 1e-4, 21), + np.linspace(5000, 6000, 11)] +final_contours_to_remove = [0.0, None] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +projection = ccrs.Robinson() +contour_method = 'contour' +xlims = [-180, 180] +ylims = [-90, 90] + +cbar_format = {'RelativeVorticity': '1.1e', + 'u': '1.0f', + 'D': '1.0f', + 'D_plus_topography': '1.0f'} + +# We need to regrid onto lon-lat grid -- specify that here +lon_1d = np.linspace(-180.0, 180.0, 120) +lat_1d = np.linspace(-90, 90, 120) +lon_2d, lat_2d = np.meshgrid(lon_1d, lat_1d, indexing='ij') + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(15, 5)) +time_idx = 0 + +for i, (field_name, colour_scheme, field_label, contour_to_remove, contours) in \ + enumerate(zip( + init_field_names, init_colour_schemes, + init_field_labels, init_contours_to_remove, init_contours)): + + # Make axes + ax = fig.add_subplot(1, 2, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + if field_name == 'u': + zonal_data = extract_gusto_field(data_file, 'u_zonal', time_idx=time_idx) + meridional_data = extract_gusto_field(data_file, 'u_meridional', time_idx=time_idx) + field_data = np.sqrt(zonal_data**2 + meridional_data**2) + coords_X, coords_Y = extract_gusto_coords(data_file, 'u_zonal') + + else: + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax( + ax, cf, field_label, location='bottom', cbar_labelpad=-10, + cbar_format=cbar_format[field_name] + ) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + + # Add quivers -------------------------------------------------------------- + if field_name == 'u': + # Need to re-grid to lat-lon grid to get sensible looking quivers + regrid_zonal_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, zonal_data, + periodic_fix='sphere' + ) + regrid_meridional_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, meridional_data, + periodic_fix='sphere' + ) + plot_field_quivers( + ax, lon_2d, lat_2d, regrid_zonal_data, regrid_meridional_data, + spatial_filter_step=6, magnitude_filter=1.0, + projection=ccrs.PlateCarree() + ) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.25) +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig = plt.figure(figsize=(15, 5)) +time_idx = -1 + +for i, (field_name, colour_scheme, field_label, contours, contour_to_remove) in \ + enumerate(zip( + final_field_names, final_colour_schemes, + final_field_labels, final_contours, final_contours_to_remove)): + + # Make axes + ax = fig.add_subplot(1, 2, 1+i, projection=projection) + + # Data extraction ---------------------------------------------------------- + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Regrid onto lon-lat grid + field_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, field_data, periodic_fix='sphere' + ) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, lon_2d, lat_2d, field_data, contour_method, contours, + cmap=cmap, line_contours=lines, projection=projection + ) + + add_colorbar_ax( + ax, cf, field_label, location='bottom', cbar_labelpad=-10, + cbar_format=cbar_format[field_name] + ) + tomplot_field_title(ax, None, minmax=True, field_data=field_data) + +# Save figure ------------------------------------------------------------------ +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/unit-tests/recovery_tests/test_conservative_recovery.py b/unit-tests/recovery_tests/test_conservative_recovery.py new file mode 100644 index 000000000..796e75397 --- /dev/null +++ b/unit-tests/recovery_tests/test_conservative_recovery.py @@ -0,0 +1,112 @@ +""" +Test whether the conservative recovery process is working appropriately. +""" + +from firedrake import (PeriodicIntervalMesh, IntervalMesh, ExtrudedMesh, + SpatialCoordinate, FiniteElement, FunctionSpace, + TensorProductElement, Function, interval, norm, errornorm, + assemble) +from gusto import * +import numpy as np +import pytest + +np.random.seed(0) + + +@pytest.fixture +def mesh(geometry): + + L = 100. + H = 100. + + deltax = L / 5. + deltaz = H / 5. + nlayers = int(H/deltaz) + ncolumns = int(L/deltax) + + if geometry == "periodic": + m = PeriodicIntervalMesh(ncolumns, L) + elif geometry == "non-periodic": + m = IntervalMesh(ncolumns, L) + + extruded_mesh = ExtrudedMesh(m, layers=nlayers, layer_height=deltaz) + + return extruded_mesh + + +def expr(geometry, mesh, configuration): + + x, z = SpatialCoordinate(mesh) + + if configuration == 'rho_constant': + rho_expr = Constant(2.0) + if geometry == "periodic": + m_expr = np.random.randn() + np.random.randn() * z + elif geometry == "non-periodic": + m_expr = np.random.randn() + np.random.randn() * x + np.random.randn() * z + + elif configuration == 'm_constant': + m_expr = Constant(0.01) + if geometry == "periodic": + rho_expr = np.random.randn() + np.random.randn() * z + elif geometry == "non-periodic": + rho_expr = np.random.randn() + np.random.randn() * x + np.random.randn() * z + + return rho_expr, m_expr + + +@pytest.mark.parametrize("configuration", ["m_constant", "rho_constant"]) +@pytest.mark.parametrize("geometry", ["periodic", "non-periodic"]) +def test_conservative_recovery(geometry, mesh, configuration): + + rho_expr, m_expr = expr(geometry, mesh, configuration) + + # construct theta elemnt + cell = mesh._base_mesh.ufl_cell().cellname() + w_hori = FiniteElement("DG", cell, 0) + w_vert = FiniteElement("CG", interval, 1) + theta_element = TensorProductElement(w_hori, w_vert) + + # spaces + DG0 = FunctionSpace(mesh, "DG", 0) + CG1 = FunctionSpace(mesh, "CG", 1) + DG1 = FunctionSpace(mesh, "DG", 1) + Vt = FunctionSpace(mesh, theta_element) + + # set up density + rho_DG1 = Function(DG1).interpolate(rho_expr) + rho_DG0 = Function(DG0).project(rho_DG1) + + # mixing ratio fields + m_Vt = Function(Vt).interpolate(m_expr) + m_DG1_approx = Function(DG1).interpolate(m_expr) + m_Vt_back = Function(Vt) + m_DG1 = Function(DG1) + + options = ConservativeRecoveryOptions(embedding_space=DG1, + recovered_space=CG1, + boundary_method=BoundaryMethod.taylor) + + # make the recoverers and do the recovery + conservative_recoverer = ConservativeRecoverer(m_Vt, m_DG1, + rho_DG0, rho_DG1, options) + back_projector = ConservativeProjector(rho_DG1, rho_DG0, m_DG1, m_Vt_back, + subtract_mean=True) + + conservative_recoverer.project() + back_projector.project() + + # check various aspects of the process + m_high_diff = errornorm(m_DG1, m_DG1_approx) / norm(m_DG1_approx) + m_low_diff = errornorm(m_Vt_back, m_Vt) / norm(m_Vt) + mass_low = assemble(rho_DG0*m_Vt*dx) + mass_high = assemble(rho_DG1*m_DG1*dx) + + assert (mass_low - mass_high) / mass_high < 5e-14, \ + f'Conservative recovery on {geometry} vertical slice not conservative for {configuration} configuration' + assert m_low_diff < 2e-14, \ + f'Irreversible conservative recovery on {geometry} vertical slice for {configuration} configuration' + + if configuration in ['m_constant', 'rho_constant']: + assert m_high_diff < 2e-14, \ + f'Inaccurate conservative recovery on {geometry} vertical slice for {configuration} configuration' diff --git a/unit-tests/recovery_tests/test_reversible_recovery.py b/unit-tests/recovery_tests/test_reversible_recovery.py index 721b0158f..474e9ba0f 100644 --- a/unit-tests/recovery_tests/test_reversible_recovery.py +++ b/unit-tests/recovery_tests/test_reversible_recovery.py @@ -9,8 +9,8 @@ """ from firedrake import (IntervalMesh, CubedSphereMesh, IcosahedralSphereMesh, - SpatialCoordinate, FunctionSpace, - Function, norm, errornorm, as_vector) + SpatialCoordinate, FunctionSpace, Interpolator, + Projector, Function, norm, errornorm, as_vector) from gusto import * import numpy as np import pytest diff --git a/unit-tests/test_conservative_projection.py b/unit-tests/test_conservative_projection.py new file mode 100644 index 000000000..bb47c891e --- /dev/null +++ b/unit-tests/test_conservative_projection.py @@ -0,0 +1,64 @@ +""" +This tests the ConservativeProjector object, by projecting a mixing ratio from +DG1 to DG0, relative to different density fields, and checking that the tracer +mass is conserved. +""" + +from firedrake import (UnitSquareMesh, FunctionSpace, Constant, + Function, assemble, dx, sin, SpatialCoordinate) +from gusto import ConservativeProjector +import pytest + + +@pytest.mark.parametrize("projection", ["discontinuous", "continuous"]) +def test_conservative_projection(projection): + + # Set up mesh on plane + mesh = UnitSquareMesh(3, 3) + + # Function spaces and functions + DG0 = FunctionSpace(mesh, "DG", 0) + DG1 = FunctionSpace(mesh, "DG", 1) + + rho_DG0 = Function(DG0) + rho_DG1 = Function(DG1) + m_DG1 = Function(DG1) + + if projection == "continuous": + CG1 = FunctionSpace(mesh, "CG", 1) + m_CG1 = Function(CG1) + else: + m_DG0 = Function(DG0) + + # Projector object + if projection == "continuous": + projector = ConservativeProjector(rho_DG1, rho_DG0, m_DG1, m_CG1, + subtract_mean=True) + else: + projector = ConservativeProjector(rho_DG1, rho_DG0, m_DG1, m_DG0) + + # Initial conditions + x, y = SpatialCoordinate(mesh) + + rho_expr = Constant(1.0) + 0.5*x*y**2 + m_expr = Constant(2.0) + 0.6*sin(x) + + rho_DG1.interpolate(rho_expr) + m_DG1.interpolate(m_expr) + rho_DG0.project(rho_DG1) + + # Test projection + projector.project() + + tol = 1e-14 + mass_DG1 = assemble(rho_DG1*m_DG1*dx) + + if projection == "continuous": + mass_CG1 = assemble(rho_DG0*m_CG1*dx) + + assert abs(mass_CG1 - mass_DG1) < tol, "continuous projection is not conservative" + + else: + mass_DG0 = assemble(rho_DG0*m_DG0*dx) + + assert abs(mass_DG0 - mass_DG1) < tol, "discontinuous projection is not conservative"