Skip to content

Commit

Permalink
Get circleci configuration working for forks
Browse files Browse the repository at this point in the history
  • Loading branch information
Jesse Zoldak committed Aug 5, 2015
1 parent ba5da45 commit 8672455
Show file tree
Hide file tree
Showing 5 changed files with 351 additions and 217 deletions.
59 changes: 55 additions & 4 deletions circle.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,63 @@
machine:
python:
version: 2.7.3

general:
artifacts:
- "reports"
- "test_root/log"

dependencies:
override:
- pip install setuptools==0.6c11
- pip install distribute==0.6.49
- npm install
- bundle install

- pip install setuptools
- pip install -r requirements/edx/paver.txt

# Mirror what paver install_prereqs does.
# After a successful build, CircleCI will
# cache the virtualenv at that state, so that
# the next build will not need to install them
# from scratch again.
- pip install -r requirements/edx/pre.txt
- pip install -r requirements/edx/github.txt
- pip install -r requirements/edx/local.txt

# HACK: within base.txt stevedore had a
# dependency on a version range of pbr.
# Install a version which falls within that range.
- pip install pbr==0.9.0
- pip install -r requirements/edx/base.txt
- if [ -e requirements/edx/post.txt ]; then pip install -r requirements/edx/post.txt ; fi

- paver install_prereqs
- pip install coveralls

test:
override:
- ./scripts/circle.sh:
# Run tests for the system.
# all-tests.sh is the entry point for determining
# which tests to run.
# See the circleCI documentation regarding parallelism
# to understand how multiple containers can be used to
# run subsets of tests in parallel.
- ./scripts/all-tests.sh:
parallel: true

post:
- mkdir -p $CIRCLE_TEST_REPORTS/junit
# Copy the junit results up to be consumed by circleci,
# but only do this if there actually are results.
# Note that the greater than zero comparison is doing a
# string compare, but that should be fine for our purposes here.
# Do this on each of the containers that were used in
# the build so that all results are consolidated.
- "if [ $(find reports -type f | wc -l) -gt 0 ] ; then cp -r reports/. $CIRCLE_TEST_REPORTS/junit ; fi":
parallel: true

# If you have enabled coveralls for your repo, configure your COVERALLS_REPO_TOKEN
# as an Environment Variable in the Project Settings on CircleCI, and coverage
# data will automatically be sent to coveralls. See https://coveralls.io/
# If you have not set up set up coveralls then the following statement will
# print a message but not affect the pass/fail status of the build.
- if [ -z $COVERALLS_REPO_TOKEN ]; then echo "Coveralls token not defined."; else coveralls; fi
227 changes: 29 additions & 198 deletions scripts/all-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,206 +3,37 @@ set -e

###############################################################################
#
# edx-all-tests.sh
# all-tests.sh
#
# Execute all tests for edx-platform.
#
# This script can be called from a Jenkins
# multiconfiguration job that defines these environment
# variables:
#
# `TEST_SUITE` defines which kind of test to run.
# Possible values are:
#
# - "quality": Run the quality (pep8/pylint) checks
# - "lms-unit": Run the LMS Python unit tests
# - "cms-unit": Run the CMS Python unit tests
# - "js-unit": Run the JavaScript tests
# - "commonlib-unit": Run Python unit tests from the common/lib directory
# - "commonlib-js-unit": Run the JavaScript tests and the Python unit
# tests from the common/lib directory
# - "lms-acceptance": Run the acceptance (Selenium/Lettuce) tests for
# the LMS
# - "cms-acceptance": Run the acceptance (Selenium/Lettuce) tests for
# Studio
# - "bok-choy": Run acceptance tests that use the bok-choy framework
#
# `SHARD` is a number indicating which subset of the tests to build.
#
# For "bok-choy" and "lms-unit", the tests are put into shard groups
# using the nose'attr' decorator (e.g. "@attr('shard_1')"). Anything with
# the 'shard_n' attribute will run in the nth shard. If there isn't a
# shard explicitly assigned, the test will run in the last shard (the one
# with the highest number).
#
# Jenkins configuration:
#
# - The edx-platform git repository is checked out by the Jenkins git plugin.
#
# - Jenkins logs in as user "jenkins"
#
# - The Jenkins file system root is "/home/jenkins"
#
# - An init script creates a virtualenv at "/home/jenkins/edx-venv"
# with some requirements pre-installed (such as scipy)
#
# Jenkins worker setup:
# See the edx/configuration repo for Jenkins worker provisioning scripts.
# The provisioning scripts install requirements that this script depends on!
# Execute tests for edx-platform. This script is designed to be the
# entry point for various CI systems.
#
###############################################################################

# Clean up previous builds
git clean -qxfd

source scripts/jenkins-common.sh

# Violations thresholds for failing the build
PYLINT_THRESHOLD=6200
JSHINT_THRESHOLD=3700

# If the environment variable 'SHARD' is not set, default to 'all'.
# This could happen if you are trying to use this script from
# jenkins and do not define 'SHARD' in your multi-config project.
# Note that you will still need to pass a value for 'TEST_SUITE'
# or else no tests will be executed.
SHARD=${SHARD:="all"}

case "$TEST_SUITE" in

"quality")
echo "Finding fixme's and storing report..."
paver find_fixme > fixme.log || { cat fixme.log; EXIT=1; }
echo "Finding pep8 violations and storing report..."
paver run_pep8 > pep8.log || { cat pep8.log; EXIT=1; }
echo "Finding pylint violations and storing in report..."
paver run_pylint -l $PYLINT_THRESHOLD > pylint.log || { cat pylint.log; EXIT=1; }
# Run quality task. Pass in the 'fail-under' percentage to diff-quality
paver run_quality -p 100

mkdir -p reports
echo "Finding jshint violations and storing report..."
PATH=$PATH:node_modules/.bin
paver run_jshint -l $JSHINT_THRESHOLD > jshint.log || { cat jshint.log; EXIT=1; }
echo "Running code complexity report (python)."
paver run_complexity || echo "Unable to calculate code complexity. Ignoring error."
# Need to create an empty test result so the post-build
# action doesn't fail the build.
cat > reports/quality.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="quality" tests="1" errors="0" failures="0" skip="0">
<testcase classname="quality" name="quality" time="0.604"></testcase>
</testsuite>
END
exit $EXIT
;;

"lms-unit")
case "$SHARD" in
"1")
paver test_system -s lms --extra_args="--attr='shard_1' --with-flaky" --cov_args="-p"
;;
"2")
paver test_system -s lms --extra_args="--attr='shard_1=False' --with-flaky" --cov_args="-p"
;;
*)
paver test_system -s lms --extra_args="--with-flaky" --cov_args="-p"
;;
esac
;;

"cms-unit")
paver test_system -s cms --extra_args="--with-flaky" --cov_args="-p"
;;

"commonlib-unit")
paver test_lib --extra_args="--with-flaky" --cov_args="-p"
;;

"js-unit")
# Runs js tests under coverage. If the tests fail or error, then this
# will exit the script without running coverage. If the tests pass,
# then it will proceed and generate the diff-coverage reports for js.
paver test_js --coverage || exit 1
paver diff_coverage
;;

"commonlib-js-unit")
paver test_js --coverage --skip_clean || { EXIT=1; }
paver test_lib --skip_clean --extra_args="--with-flaky" --cov_args="-p" || { EXIT=1; }

# This is to ensure that the build status of the shard is properly set.
# Because we are running two paver commands in a row, we need to capture
# their return codes in order to exit with a non-zero code if either of
# them fail. We put the || clause there because otherwise, when a paver
# command fails, this entire script will exit, and not run the second
# paver command in this case statement. So instead of exiting, the value
# of a variable named EXIT will be set to 1 if either of the paver
# commands fail. We then use this variable's value as our exit code.
# Note that by default the value of this variable EXIT is not set, so if
# neither command fails then the exit command resolves to simply exit
# which is considered successful.
exit $EXIT
;;

"lms-acceptance")
paver test_acceptance -s lms --extra_args="-v 3"
;;

"cms-acceptance")
paver test_acceptance -s cms --extra_args="-v 3"
;;

"bok-choy")
case "$SHARD" in

"all")
paver test_bokchoy
;;

"1")
paver test_bokchoy --extra_args="-a shard_1 --with-flaky"
;;

"2")
paver test_bokchoy --extra_args="-a 'shard_2' --with-flaky"
;;

"3")
paver test_bokchoy --extra_args="-a 'shard_3' --with-flaky"
;;

"4")
paver test_bokchoy --extra_args="-a 'shard_4' --with-flaky"
;;

"5")
paver test_bokchoy --extra_args="-a 'shard_5' --with-flaky"
;;

"6")
paver test_bokchoy --extra_args="-a shard_1=False,shard_2=False,shard_3=False,shard_4=False,shard_5=False --with-flaky"
;;

# Default case because if we later define another bok-choy shard on Jenkins
# (e.g. Shard 5) in the multi-config project and expand this file
# with an additional case condition, old branches without that commit
# would not execute any tests on the worker assigned to that shard
# and thus their build would fail.
# This way they will just report 1 test executed and passed.
*)
# Need to create an empty test result so the post-build
# action doesn't fail the build.
# May be unnecessary if we changed the "Skip if there are no test files"
# option to True in the jenkins job definitions.
mkdir -p reports/bok_choy
cat > reports/bok_choy/xunit.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="nosetests" tests="1" errors="0" failures="0" skip="0">
<testcase classname="acceptance.tests" name="shard_placeholder" time="0.001"></testcase>
</testsuite>
END
;;
esac
;;
esac
export PYLINT_THRESHOLD=6200
export JSHINT_THRESHOLD=3700

doCheckVars() {
if [ -n "$CIRCLECI" ] ; then
SCRIPT_TO_RUN=scripts/circle-ci-tests.sh

elif [ -n "$JENKINS_HOME" ] ; then
source scripts/jenkins-common.sh
SCRIPT_TO_RUN=scripts/generic-ci-tests.sh
fi
}

# Determine the CI system for the environment
doCheckVars

# Run appropriate CI system script
if [ -n "$SCRIPT_TO_RUN" ] ; then
$SCRIPT_TO_RUN

# Exit with the exit code of the called script
exit $?
else
echo "ERROR. Could not detect continuous integration system."
exit 1
fi
73 changes: 73 additions & 0 deletions scripts/circle-ci-tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#!/usr/bin/env bash
###############################################################################
#
# circle-ci-tests.sh
#
# Execute tests for edx-platform on circleci.com
#
# Forks should configure parallelism, and use this script
# to define which tests to run in each of the containers.
#
###############################################################################

# From the sh(1) man page of FreeBSD:
# Exit immediately if any untested command fails. in non-interactive
# mode. The exit status of a command is considered to be explicitly
# tested if the command is part of the list used to control an if,
# elif, while, or until; if the command is the left hand operand of
# an “&&” or “||” operator; or if the command is a pipeline preceded
# by the ! operator. If a shell function is executed and its exit
# status is explicitly tested, all commands of the function are con‐
# sidered to be tested as well.
set -e

# Return status is that of the last command to fail in a
# piped command, or a zero if they all succeed.
set -o pipefail

EXIT=0

case $CIRCLE_NODE_INDEX in
0) # run the quality metrics
echo "Finding fixme's and storing report..."
paver find_fixme > fixme.log || { cat fixme.log; EXIT=1; }

echo "Finding pep8 violations and storing report..."
paver run_pep8 > pep8.log || { cat pep8.log; EXIT=1; }

echo "Finding pylint violations and storing in report..."
# HACK: we need to print something to the console, otherwise circleci
# fails and aborts the job because nothing is displayed for > 10 minutes.
paver run_pylint -l $PYLINT_THRESHOLD | tee pylint.log || EXIT=1

# Run quality task. Pass in the 'fail-under' percentage to diff-quality
paver run_quality -p 100 || EXIT=1

mkdir -p reports
echo "Finding jshint violations and storing report..."
PATH=$PATH:node_modules/.bin
paver run_jshint -l $JSHINT_THRESHOLD > jshint.log || { cat jshint.log; EXIT=1; }
echo "Running code complexity report (python)."
paver run_complexity > reports/code_complexity.log || echo "Unable to calculate code complexity. Ignoring error."

exit $EXIT
;;

1) # run all of the lms unit tests
paver test_system -s lms --extra_args="--with-flaky" --cov_args="-p"
;;

2) # run all of the cms unit tests
paver test_system -s cms --extra_args="--with-flaky" --cov_args="-p"
;;

3) # run the commonlib unit tests
paver test_lib --extra_args="--with-flaky" --cov_args="-p"
;;

*)
echo "No tests were executed in this container."
echo "Please adjust scripts/circle-ci-tests.sh to match your parallelism."
exit 1
;;
esac
15 changes: 0 additions & 15 deletions scripts/circle.sh

This file was deleted.

Loading

0 comments on commit 8672455

Please sign in to comment.