diff --git a/.github/workflows/pip-install.yml b/.github/workflows/pip-install.yml
index fda2171b3..7cb534110 100644
--- a/.github/workflows/pip-install.yml
+++ b/.github/workflows/pip-install.yml
@@ -10,7 +10,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest, macos-latest, windows-latest]
+ os: [ubuntu-latest, macos-latest]
python: ['3.7', '3.8', '3.9', '3.10', '3.11']
steps:
- uses: actions/checkout@v3
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests-linux.yml
similarity index 50%
rename from .github/workflows/tests.yml
rename to .github/workflows/tests-linux.yml
index 7a9a52eae..726ec8a3e 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests-linux.yml
@@ -1,26 +1,24 @@
-name: tests
+name: tests-linux
on:
push:
- branches: [master, main]
+ branches: [main]
pull_request:
- branches: [master, main]
+ branches: [main]
workflow_dispatch:
jobs:
tests:
- name: 'Run tests on py${{ matrix.python-version }}'
- runs-on: ubuntu-latest
+ name: '${{ matrix.os }}:python-${{ matrix.python-version }}'
+ runs-on: ${{ matrix.os }}
strategy:
- fail-fast: false
matrix:
- include:
- - python-version: '3.6'
- - python-version: '3.7'
- - python-version: '3.8'
- - python-version: '3.9'
- - python-version: '3.10'
- - python-version: '3.11'
+ os: [ubuntu-20.04, ubuntu-22.04]
+ python-version: ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11']
+ exclude:
+ - os: ubuntu-22.04
+ python-version: '3.6'
+ fail-fast: false
steps:
- uses: actions/checkout@v3
@@ -29,10 +27,8 @@ jobs:
- name: Set up Python
id: setup_python
- uses: conda-incubator/setup-miniconda@v2
+ uses: actions/setup-python@v4
with:
- activate-environment: vplanet
- environment-file: environment.yml
python-version: ${{ matrix.python-version }}
- name: Install
@@ -42,30 +38,32 @@ jobs:
run: |
python -m pip install -U pip
python -m pip install -e .
+ pip install pytest pytest-cov
sudo apt install lcov
- name: Run tests and generate coverage
if: steps.install.outcome == 'success'
shell: bash -l {0}
- run: |
- make coverage
+ run: make coverage
+
+ - name: Check test ouptut created
+ id: check_test_file
+ uses: andstor/file-existence-action@v2
+ with:
+ files: "/home/runner/work/vplanet/vplanet/junit/test-results.xml"
+ fail: true
- name: Get unique id
- id: unique-id
- env:
- STRATEGY_CONTEXT: ${{ toJson(strategy) }}
- run: |
- export JOB_ID=`echo $STRATEGY_CONTEXT | md5sum`
- echo "::set-output name=id::$JOB_ID"
+ uses: Tiryoh/gha-jobid-action@v1
+ id: jobs
- name: Publish unit test results
- uses: EnricoMi/publish-unit-test-result-action@v1
+ uses: EnricoMi/publish-unit-test-result-action@v2
if: always()
with:
files: junit/test-*.xml
- comment_mode: update last
- name: CodeCov
- uses: codecov/codecov-action@v2.1.0
+ uses: codecov/codecov-action@v3
with:
files: ./.coverage
diff --git a/.github/workflows/tests-macos.yml b/.github/workflows/tests-macos.yml
new file mode 100644
index 000000000..eb315d46b
--- /dev/null
+++ b/.github/workflows/tests-macos.yml
@@ -0,0 +1,55 @@
+name: tests-macos
+on:
+ push:
+ branches: [main]
+ pull_request:
+ branches: [main]
+ workflow_dispatch:
+
+jobs:
+ tests:
+ name: '${{ matrix.os }}:python-${{ matrix.python-version }}'
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [macos-11, macos-12, macos-13]
+ python-version: ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '3.12']
+ fail-fast: false
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Set up Python
+ id: setup_python
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ if: steps.setup_python.outcome == 'success'
+ run: |
+ python -m pip install -U pip
+ python -m pip install -e .
+ python -m pip install pytest pytest-cov
+
+ - name: Run tests
+ if: steps.setup_python.outcome == 'success'
+ run: make test
+
+ - name: Check test ouptut created
+ id: check_test_file
+ uses: andstor/file-existence-action@v2
+ with:
+ files: "/home/runner/work/vplanet/vplanet/junit/test-results.xml"
+ fail: true
+
+ - name: Get unique id
+ uses: Tiryoh/gha-jobid-action@v1
+ id: jobs
+
+ - name: Publish unit test results
+ uses: EnricoMi/publish-unit-test-result-action/composite@v2
+ if: always()
+ with:
+ files: junit/test-*.xml
diff --git a/README.md b/README.md
index 503a90834..fada597bc 100644
--- a/README.md
+++ b/README.md
@@ -5,32 +5,60 @@
VPLanet: The Virtual Planet Simulator
-
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
-
-
+
+
+
+
+
+
-
-
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
@@ -83,11 +111,11 @@ Many of these modules can be combined together to simulate numerous phenomena an
The [examples/](examples) directory contains input files and scripts for generating the figures in [Barnes et al. (2020)](https://ui.adsabs.harvard.edu/abs/2020PASP..132b4502B/abstract) and subsequent publications. The "examples" badge shows if all the examples can be built with the most recent version. The [Manual/](Manual) directory contains the pdf of [Barnes et al. (2020)](https://ui.adsabs.harvard.edu/abs/2020PASP..132b4502B/abstract), which describes the physics of the first 11 modules, validates the software against observations and/or past results, and uses figures from the [examples/](examples) directory.
-An ecosystem of support software is also publicly available. [VPLot](https://github.com/VirtualPlanetaryLaboratory/vplot) is both a command line tool to quickly plot the evolution of a single integration, and also includes matplotlib functions to generate publication-worthy figures. The [VSPACE](https://github.com/VirtualPlanetaryLaboratory/vspace) script generates input files for a parameter space sweep, which can then be performed on an arbitrary number of cores with [MultiPlanet](https://github.com/VirtualPlanetaryLaboratory/multi-planet). For large parameter sweeps, an enormous amount of data can be generated, which can slow analyses. To overcome this barrier, the [BigPlanet](https://github.com/VirtualPlanetaryLaboratory/bigplanet) code can both compress datasets into HDF5 format, including statistics of an integration, and tools to facilitate plotting. These three scripts can be executed from the command line to seamlessly [perform parameter sweeps](https://virtualplanetarylaboratory.github.io/vplanet/parametersweep.html). These Python scripts are optimized for [anaconda](https://www.anaconda.com/) distributions versions 3.5-3.9. The "wheels" badge indicates if you can download and install the executables with pip for these Python distributions.
+An ecosystem of support software is also publicly available. [VPLot](https://github.com/VirtualPlanetaryLaboratory/vplot) is both a command line tool to quickly plot the evolution of a single integration, and also includes matplotlib functions to generate publication-worthy figures. The [VSPACE](https://github.com/VirtualPlanetaryLaboratory/vspace) script generates input files for a parameter space sweep, which can then be performed on an arbitrary number of cores with [MultiPlanet](https://github.com/VirtualPlanetaryLaboratory/multi-planet). For large parameter sweeps, an enormous amount of data can be generated, which can slow analyses. To overcome this barrier, the [BigPlanet](https://github.com/VirtualPlanetaryLaboratory/bigplanet) code can both compress datasets into HDF5 format, including statistics of an integration, and tools to facilitate plotting. These three scripts can be executed from the command line to seamlessly [perform parameter sweeps](https://virtualplanetarylaboratory.github.io/vplanet/parametersweep.html). These Python scripts are optimized for [anaconda](https://www.anaconda.com/) distributions versions 3.7-3.9. The "wheels" badge indicates if you can download and install the executables with pip for these Python distributions on the latest Linux and Mac operating systems.
### Code Integrity
-Behind the scenes, the VPLanet team maintains code integrity through via various automatic checks at every merge into the main branch. You can see the status of these checks via the "badges" the GitHub logo above. Currently we perform 5 checks: documentation ("docs"), units tests ("tests"), memory checks via [valgrind](http://valgrind.org) ("memcheck"), confirmation that all [examples](examples/) are working ("examples"), and that the code is pip-installable on Linux, Mac, and Windows machines ("pip-install") for the Python distributions listed. The "coverage" badge shows the percentage of the code (by line number) that is currently tested by Codecov at every commit. We are committed to maintaining a stable tool for scientists to analyze any planetary system.
+Behind the scenes, the VPLanet team maintains code integrity through via various automatic checks at every merge into the main branch. You can see the status of these checks via the "badges" the GitHub logo above. Currently we perform 5 checks: documentation ("docs"), units tests ("tests"), memory checks via [valgrind](http://valgrind.org) ("memcheck"), confirmation that all [examples](examples/) are working ("examples"), and that the code is pip-installable on the latest Linux and Mac operating systems ("pip-install") for the Python distributions listed after the GitHub Actions badge. The percentage of the lines of code that are executed by the unit tests is shown with the "codecov" badge, with details available at our Codecov account. We are committed to maintaining a stable tool for scientists to analyze any planetary system.
### Community
diff --git a/src/flare.c b/src/flare.c
index 805db014d..1e28291e0 100644
--- a/src/flare.c
+++ b/src/flare.c
@@ -97,49 +97,6 @@ void ReadFlareFFD(BODY *body,
} else if (iFile > 0)
body[iFile - 1].iFlareFFD = FLARE_FFD_DAVENPORT;
}
-/*
-void ReadFlareSlopeUnits(BODY *body,
- CONTROL *control,
- FILES *files,
- OPTIONS *options,
- SYSTEM *system,
- int iFile) {
- /* This parameter cannot exist in primary file */
-/*int lTmp = -1;
-char cTmp[OPTLEN];
-
-AddOptionString(files->Infile[iFile].cIn,
- options->cName,
- cTmp,
- &lTmp,
- control->Io.iVerbose);
-if (lTmp >= 0) {
- NotPrimaryInput(iFile,
- options->cName,
- files->Infile[iFile].cIn,
- lTmp,
- control->Io.iVerbose);
- if (!memcmp(sLower(cTmp), "se", 2)) {
- body[iFile - 1].iFlareSlopeUnits = FLARE_SLOPE_SEC;
- } else if (!memcmp(sLower(cTmp), "mi", 2)) {
- body[iFile - 1].iFlareSlopeUnits = FLARE_SLOPE_MINUTE;
- } else if (!memcmp(sLower(cTmp), "ho", 2)) {
- body[iFile - 1].iFlareSlopeUnits = FLARE_SLOPE_HOUR;
- } else if (!memcmp(sLower(cTmp), "da", 2)) {
- body[iFile - 1].iFlareSlopeUnits = FLARE_SLOPE_DAY;
- } else {
- if (control->Io.iVerbose >= VERBERR)
- fprintf(stderr,
- "ERROR: Unknown argument to %s: %s. Options are SEC, MIN, HOUR "
- "or DAY.\n",
- options->cName,
- cTmp);
- LineExit(files->Infile[iFile].cIn, lTmp);
- }
- UpdateFoundOption(&files->Infile[iFile], options, lTmp, iFile);
-} else if (iFile > 0)
- body[iFile - 1].iFlareSlopeUnits = FLARE_SLOPE_DAY;
-}*/
void ReadFlareBandPass(BODY *body,
CONTROL *control,
@@ -1018,8 +975,8 @@ void FinalizeUpdateLXUVFlare(BODY *body, UPDATE *update, int *iEqn, int iVar,
// not need this.
/*void FinalizeUpdateFlareFreqMax(BODY *body, UPDATE *update, int *iEqn, int
iVar, int iBody, int iFoo) {
- /* No primary variables for FLARE yet*/
-/* update[iBody].iaModule[iVar][*iEqn] = FLARE;
+ // No primary variables for FLARE yet
+ // update[iBody].iaModule[iVar][*iEqn] = FLARE;
update[iBody].iNumFlareFreqMax = (*iEqn)++;
}
*/
diff --git a/src/options.c b/src/options.c
index 10901f10e..da151ceed 100644
--- a/src/options.c
+++ b/src/options.c
@@ -436,8 +436,6 @@ int iGetNumLines(char cFile[]) {
FILE *fp;
char cLine[LINE];
- fprintf(stderr,"File: %s\n",cFile);
-
fp = fopen(cFile, "r");
if (fp == NULL) {
fprintf(stderr, "Unable to open %s.\n", cFile);
@@ -445,9 +443,7 @@ int iGetNumLines(char cFile[]) {
}
memset(cLine, '\0', LINE);
- // fprintf(stderr,"File: %s\n",cFile);
while (fgets(cLine, LINE, fp) != NULL) {
- // fprintf(stderr,"iLine: %d, %s",iNumLines,cLine);
iNumLines++;
/* Check to see if line is too long. The maximum length of a line is set
@@ -489,8 +485,6 @@ void InitializeInput(INFILE *input) {
FILE *fp;
char cLine[LINE];
-fprintf(stderr,"File: %s\n",input->cIn);
-
fp = fopen(input->cIn, "r");
if (fp == NULL) {
fprintf(stderr, "Unable to open %s.\n", input->cIn);
@@ -503,23 +497,12 @@ fprintf(stderr,"File: %s\n",input->cIn);
input->cReactions[0] = 0;
*/
- // fprintf(stderr,"File: %s\n",input->cIn);
for (iLine = 0; iLine < input->iNumLines; iLine++) {
- /* Initialize bLineOK */
input->bLineOK[iLine] = 0;
-
- /* Now find those lines that are comments or blank
- for (iPos = 0; iPos < LINE; iPos++) {
- cLine[iPos] = '\0';
- }
- */
memset(cLine, '\0', LINE);
fgets(cLine, LINE, fp);
- // fprintf(stderr,"iLine: %d, %s",iLine,cLine);
- /* Check for # sign or blank line */
if (CheckComment(cLine, LINE)) {
- /* Line is OK */
input->bLineOK[iLine] = 1;
} else {
// Is it a blank line?
diff --git a/src/output.c b/src/output.c
index 487df799e..8526ff5e8 100644
--- a/src/output.c
+++ b/src/output.c
@@ -2166,8 +2166,11 @@ void WriteLog(BODY *body, CONTROL *control, FILES *files, MODULE *module,
double dDt, dTotTime;
/* Get derivatives */
+ fprintf(stderr,"In WriteLog.\n");
PropertiesAuxiliary(body, control, system, update);
+ fprintf(stderr,"After PropsAux.\n");
dDt = fdGetTimeStep(body, control, system, update, fnUpdate);
+ fprintf(stderr,"After GetTimeStep.\n");
if (iEnd == 0) {
sprintf(cTime, "Input");
diff --git a/tests/conftest.py b/tests/conftest.py
index a5c21307f..6878c35b3 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -20,7 +20,7 @@
def vplanet_output(request):
path = os.path.abspath(os.path.dirname(request.fspath))
infile = os.path.join(path, "vpl.in")
- output = vplanet.run(infile, quiet=True, clobber=True, C=True)
+ output = vplanet.run(infile, quiet=False, clobber=True, C=True)
yield output
if CLEAN_OUTPUTS:
for file in (
diff --git a/tests/maketest.py b/tests/maketest.py
index 87debeae4..ab5d62924 100644
--- a/tests/maketest.py
+++ b/tests/maketest.py
@@ -33,7 +33,6 @@ def Main(dir, initial=False):
dir_list = [dir]
for dirname in dir_list:
-
if dirname in skip_list:
continue
if dirname in initial_list:
@@ -144,14 +143,12 @@ def GetSNames(bodyfiles):
def ProcessLogFile(logfile, data, forward, backward, initial=False):
-
prop = ""
body = "system"
with open(logfile, "r+", errors="ignore") as log:
content = [line.strip() for line in log.readlines()]
for line in content:
-
if len(line) == 0:
continue
@@ -173,11 +170,9 @@ def ProcessLogFile(logfile, data, forward, backward, initial=False):
# if the line starts with a '(' that means its a variable we need to grab the units
if line.startswith("("):
-
if initial == True and prop == "final":
continue
else:
-
fv_param = line[1 : line.find(")")].strip()
# THIS IS FOR VARIABLES THAT START WITH NUMBERS CURRENTLY BUGGED FIX AT LATER DATE
@@ -259,7 +254,6 @@ def ProcessLogFile(logfile, data, forward, backward, initial=False):
def ProcessOutputfile(file, data, body, Output):
-
header = []
units = []
for k, v in Output.items():
@@ -288,9 +282,7 @@ def ProcessOutputfile(file, data, body, Output):
def ProcessUnits(data):
-
for k, v in data.items():
-
if "Order" in k:
continue
@@ -437,7 +429,6 @@ def ProcessUnits(data):
def WriteTest(data, dirname, stellar):
-
badchars = "- "
for i in badchars:
dirname = dirname.replace(i, "")
@@ -514,20 +505,22 @@ def WriteTest(data, dirname, stellar):
t.write(" pass")
t.write(" \n")
- print("Successfuly created new test file: "+dirname+"/"+test_file)
-
+ print("Successfuly created new test file: " + dirname + "/" + test_file)
def Arguments():
parser = argparse.ArgumentParser(
- description="Extract data from Vplanet simulations"
+ description="Extract data from VPLanet simulations"
+ )
+ parser.add_argument(
+ "dir",
+ help="Name of directory you want to run. Use 'all' to recreate all tests.",
)
- parser.add_argument("dir", help="name of directory you want to run")
parser.add_argument(
"-i",
"--initial",
action="store_true",
- help="grabs initial data instead of final",
+ help="Only use initial data for test",
)
args = parser.parse_args()