From c76feed4c2022a009a460b54b4ebbf40e5fb0557 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 23 Jul 2024 13:51:06 +0500 Subject: [PATCH 1/3] build: switch js & quality checks off of paver Co-authored-by: salman2013 --- .github/workflows/js-tests.yml | 10 +- .github/workflows/quality-checks.yml | 24 +- Makefile | 26 + pavelib/__init__.py | 2 +- pavelib/js_test.py | 143 ---- pavelib/paver_tests/conftest.py | 22 - pavelib/paver_tests/test_eslint.py | 54 -- pavelib/paver_tests/test_js_test.py | 148 ---- pavelib/paver_tests/test_paver_quality.py | 156 ---- pavelib/paver_tests/test_pii_check.py | 79 -- pavelib/paver_tests/test_stylelint.py | 36 - pavelib/paver_tests/test_timer.py | 190 ----- pavelib/paver_tests/test_xsslint.py | 120 --- pavelib/utils/test/suites/__init__.py | 5 - pavelib/utils/test/suites/js_suite.py | 109 --- pavelib/utils/test/suites/suite.py | 149 ---- pavelib/utils/test/utils.py | 91 -- scripts/generic-ci-tests.sh | 122 --- scripts/js_test.py | 492 +++++++++++ pavelib/quality.py => scripts/quality_test.py | 797 ++++++++---------- 20 files changed, 915 insertions(+), 1860 deletions(-) delete mode 100644 pavelib/js_test.py delete mode 100644 pavelib/paver_tests/conftest.py delete mode 100644 pavelib/paver_tests/test_eslint.py delete mode 100644 pavelib/paver_tests/test_js_test.py delete mode 100644 pavelib/paver_tests/test_paver_quality.py delete mode 100644 pavelib/paver_tests/test_pii_check.py delete mode 100644 pavelib/paver_tests/test_stylelint.py delete mode 100644 pavelib/paver_tests/test_timer.py delete mode 100644 pavelib/paver_tests/test_xsslint.py delete mode 100644 pavelib/utils/test/suites/__init__.py delete mode 100644 pavelib/utils/test/suites/js_suite.py delete mode 100644 pavelib/utils/test/suites/suite.py delete mode 100644 pavelib/utils/test/utils.py delete mode 100755 scripts/generic-ci-tests.sh create mode 100644 scripts/js_test.py rename pavelib/quality.py => scripts/quality_test.py (58%) diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index c9d2d7ab1191..eb8fb3ddc561 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -64,13 +64,15 @@ jobs: make base-requirements - uses: c-hive/gha-npm-cache@v1 + + - name: Install npm + run: npm ci + - name: Run JS Tests - env: - TEST_SUITE: js-unit - SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh run: | npm install -g jest - xvfb-run --auto-servernum ./scripts/all-tests.sh + xvfb-run --auto-servernum make test-js + make coverage-js - name: Save Job Artifacts uses: actions/upload-artifact@v4 diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 84610123493c..510059a9d62a 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -60,16 +60,30 @@ jobs: PIP_SRC: ${{ runner.temp }} run: | make test-requirements - + + - name: Install npm + env: + PIP_SRC: ${{ runner.temp }} + run: npm ci + + - name: Install python packages + env: + PIP_SRC: ${{ runner.temp }} + run: | + pip install -e . + - name: Run Quality Tests env: - TEST_SUITE: quality - SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh PIP_SRC: ${{ runner.temp }} TARGET_BRANCH: ${{ github.base_ref }} run: | - ./scripts/all-tests.sh - + make pycodestyle + make eslint + make stylelint + make xsslint + make pii_check + make check_keywords + - name: Save Job Artifacts if: always() uses: actions/upload-artifact@v4 diff --git a/Makefile b/Makefile index 15bab5df67a9..0fc07aab8f13 100644 --- a/Makefile +++ b/Makefile @@ -204,3 +204,29 @@ migrate: migrate-lms migrate-cms # Part of https://github.com/openedx/wg-developer-experience/issues/136 ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip install` to work on ubuntu. sudo apt install libmysqlclient-dev libxmlsec1-dev + +eslint: ## check javascript for quality issues + python scripts/quality_test.py eslint + +stylelint: ## check css/scss for quality issues + python scripts/quality_test.py stylelint + +xsslint: ## check xss for quality issues + python scripts/quality_test.py xsslint + +pycodestyle: ## check python files for quality issues + pycodestyle . + +pii_check: ## check django models for pii annotations + python scripts/quality_test.py pii_check + +check_keywords: ## check django models for reserve keywords + python scripts/quality_test.py check_keywords + +test-js: ## run javascript tests + python scripts/js_test.py --option jstest + +coverage-js: ## run javascript coverage test + python scripts/js_test.py --option coverage + +quality: pycodestyle eslint stylelint xsslint pii_check check_keywords \ No newline at end of file diff --git a/pavelib/__init__.py b/pavelib/__init__.py index 875068166ff5..24f05618bdd7 100644 --- a/pavelib/__init__.py +++ b/pavelib/__init__.py @@ -3,4 +3,4 @@ """ -from . import assets, js_test, prereqs, quality +from . import assets diff --git a/pavelib/js_test.py b/pavelib/js_test.py deleted file mode 100644 index fb9c213499ac..000000000000 --- a/pavelib/js_test.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -Javascript test tasks -""" - - -import os -import re -import sys - -from paver.easy import cmdopts, needs, sh, task - -from pavelib.utils.envs import Env -from pavelib.utils.test.suites import JestSnapshotTestSuite, JsTestSuite -from pavelib.utils.timer import timed - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - -__test__ = False # do not collect - - -@task -@needs( - 'pavelib.prereqs.install_node_prereqs', - 'pavelib.utils.test.utils.clean_reports_dir', -) -@cmdopts([ - ("suite=", "s", "Test suite to run"), - ("mode=", "m", "dev or run"), - ("coverage", "c", "Run test under coverage"), - ("port=", "p", "Port to run test server on (dev mode only)"), - ('skip-clean', 'C', 'skip cleaning repository before running tests'), - ('skip_clean', None, 'deprecated in favor of skip-clean'), -], share_with=["pavelib.utils.tests.utils.clean_reports_dir"]) -@timed -def test_js(options): - """ - Run the JavaScript tests - """ - mode = getattr(options, 'mode', 'run') - port = None - skip_clean = getattr(options, 'skip_clean', False) - - if mode == 'run': - suite = getattr(options, 'suite', 'all') - coverage = getattr(options, 'coverage', False) - elif mode == 'dev': - suite = getattr(options, 'suite', None) - coverage = False - port = getattr(options, 'port', None) - else: - sys.stderr.write("Invalid mode. Please choose 'dev' or 'run'.") - return - - if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): - sys.stderr.write( - "Unknown test suite. Please choose from ({suites})\n".format( - suites=", ".join(Env.JS_TEST_ID_KEYS) - ) - ) - return - - if suite != 'jest-snapshot': - test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) - test_suite.run() - - if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in - test_suite = JestSnapshotTestSuite('jest') - test_suite.run() - - -@task -@cmdopts([ - ("suite=", "s", "Test suite to run"), - ("coverage", "c", "Run test under coverage"), -]) -@timed -def test_js_run(options): - """ - Run the JavaScript tests and print results to the console - """ - options.mode = 'run' - test_js(options) - - -@task -@cmdopts([ - ("suite=", "s", "Test suite to run"), - ("port=", "p", "Port to run test server on"), -]) -@timed -def test_js_dev(options): - """ - Run the JavaScript tests in your default browsers - """ - options.mode = 'dev' - test_js(options) - - -@task -@needs('pavelib.prereqs.install_coverage_prereqs') -@cmdopts([ - ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), -], share_with=['coverage']) -@timed -def diff_coverage(options): - """ - Build the diff coverage reports - """ - compare_branch = options.get('compare_branch', 'origin/master') - - # Find all coverage XML files (both Python and JavaScript) - xml_reports = [] - - for filepath in Env.REPORT_DIR.walk(): - if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): - xml_reports.append(filepath) - - if not xml_reports: - err_msg = colorize( - 'red', - "No coverage info found. Run `paver test` before running " - "`paver coverage`.\n" - ) - sys.stderr.write(err_msg) - else: - xml_report_str = ' '.join(xml_reports) - diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') - - # Generate the diff coverage reports (HTML and console) - # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 - sh( - "diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} " - "--html-report {diff_html_path}".format( - xml_report_str=xml_report_str, - compare_branch=compare_branch, - diff_html_path=diff_html_path, - ) - ) - - print("\n") diff --git a/pavelib/paver_tests/conftest.py b/pavelib/paver_tests/conftest.py deleted file mode 100644 index 214a35e3fe85..000000000000 --- a/pavelib/paver_tests/conftest.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -Pytest fixtures for the pavelib unit tests. -""" - - -import os -from shutil import rmtree - -import pytest - -from pavelib.utils.envs import Env - - -@pytest.fixture(autouse=True, scope='session') -def delete_quality_junit_xml(): - """ - Delete the JUnit XML results files for quality check tasks run during the - unit tests. - """ - yield - if os.path.exists(Env.QUALITY_DIR): - rmtree(Env.QUALITY_DIR, ignore_errors=True) diff --git a/pavelib/paver_tests/test_eslint.py b/pavelib/paver_tests/test_eslint.py deleted file mode 100644 index 5802d7d0d21b..000000000000 --- a/pavelib/paver_tests/test_eslint.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Tests for Paver's Stylelint tasks. -""" - - -import unittest -from unittest.mock import patch - -import pytest -from paver.easy import BuildFailure, call_task - -import pavelib.quality - - -class TestPaverESLint(unittest.TestCase): - """ - For testing run_eslint - """ - - def setUp(self): - super().setUp() - - # Mock the paver @needs decorator - self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start() - self._mock_paver_needs.return_value = 0 - - # Mock shell commands - patcher = patch('pavelib.quality.sh') - self._mock_paver_sh = patcher.start() - - # Cleanup mocks - self.addCleanup(patcher.stop) - self.addCleanup(self._mock_paver_needs.stop) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_count_from_last_line') - def test_eslint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument - """ - run_eslint encounters an error parsing the eslint output log - """ - mock_count.return_value = None - with pytest.raises(BuildFailure): - call_task('pavelib.quality.run_eslint', args=['']) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_count_from_last_line') - def test_eslint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument - """ - eslint finds violations, but a limit was not set - """ - mock_count.return_value = 1 - pavelib.quality.run_eslint("") diff --git a/pavelib/paver_tests/test_js_test.py b/pavelib/paver_tests/test_js_test.py deleted file mode 100644 index 4b165a156674..000000000000 --- a/pavelib/paver_tests/test_js_test.py +++ /dev/null @@ -1,148 +0,0 @@ -"""Unit tests for the Paver JavaScript testing tasks.""" - -from unittest.mock import patch - -import ddt -from paver.easy import call_task - -import pavelib.js_test -from pavelib.utils.envs import Env - -from .utils import PaverTestCase - - -@ddt.ddt -class TestPaverJavaScriptTestTasks(PaverTestCase): - """ - Test the Paver JavaScript testing tasks. - """ - - EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = 'find {platform_root}/reports/javascript -type f -delete' - EXPECTED_KARMA_OPTIONS = ( - "{config_file} " - "--single-run={single_run} " - "--capture-timeout=60000 " - "--junitreportpath=" - "{platform_root}/reports/javascript/javascript_xunit-{suite}.xml " - "--browsers={browser}" - ) - EXPECTED_COVERAGE_OPTIONS = ( - ' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml' - ) - - EXPECTED_COMMANDS = [ - "make report_dir", - 'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads', - "find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;", - 'rm -rf test_root/log/auto_screenshots/*', - "rm -rf /tmp/mako_[cl]ms", - ] - - def setUp(self): - super().setUp() - - # Mock the paver @needs decorator - self._mock_paver_needs = patch.object(pavelib.js_test.test_js, 'needs').start() - self._mock_paver_needs.return_value = 0 - - # Cleanup mocks - self.addCleanup(self._mock_paver_needs.stop) - - @ddt.data( - [""], - ["--coverage"], - ["--suite=lms"], - ["--suite=lms --coverage"], - ) - @ddt.unpack - def test_test_js_run(self, options_string): - """ - Test the "test_js_run" task. - """ - options = self.parse_options_string(options_string) - self.reset_task_messages() - call_task("pavelib.js_test.test_js_run", options=options) - self.verify_messages(options=options, dev_mode=False) - - @ddt.data( - [""], - ["--port=9999"], - ["--suite=lms"], - ["--suite=lms --port=9999"], - ) - @ddt.unpack - def test_test_js_dev(self, options_string): - """ - Test the "test_js_run" task. - """ - options = self.parse_options_string(options_string) - self.reset_task_messages() - call_task("pavelib.js_test.test_js_dev", options=options) - self.verify_messages(options=options, dev_mode=True) - - def parse_options_string(self, options_string): - """ - Parse a string containing the options for a test run - """ - parameters = options_string.split(" ") - suite = "all" - if "--system=lms" in parameters: - suite = "lms" - elif "--system=common" in parameters: - suite = "common" - coverage = "--coverage" in parameters - port = None - if "--port=9999" in parameters: - port = 9999 - return { - "suite": suite, - "coverage": coverage, - "port": port, - } - - def verify_messages(self, options, dev_mode): - """ - Verify that the messages generated when running tests are as expected - for the specified options and dev_mode. - """ - is_coverage = options['coverage'] - port = options['port'] - expected_messages = [] - suites = Env.JS_TEST_ID_KEYS if options['suite'] == 'all' else [options['suite']] - - expected_messages.extend(self.EXPECTED_COMMANDS) - if not dev_mode and not is_coverage: - expected_messages.append(self.EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND.format( - platform_root=self.platform_root - )) - - command_template = ( - 'node --max_old_space_size=4096 node_modules/.bin/karma start {options}' - ) - - for suite in suites: - # Karma test command - if suite != 'jest-snapshot': - karma_config_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(suite)] - expected_test_tool_command = command_template.format( - options=self.EXPECTED_KARMA_OPTIONS.format( - config_file=karma_config_file, - single_run='false' if dev_mode else 'true', - suite=suite, - platform_root=self.platform_root, - browser=Env.KARMA_BROWSER, - ), - ) - if is_coverage: - expected_test_tool_command += self.EXPECTED_COVERAGE_OPTIONS.format( - platform_root=self.platform_root, - suite=suite - ) - if port: - expected_test_tool_command += f" --port={port}" - else: - expected_test_tool_command = 'jest' - - expected_messages.append(expected_test_tool_command) - - assert self.task_messages == expected_messages diff --git a/pavelib/paver_tests/test_paver_quality.py b/pavelib/paver_tests/test_paver_quality.py deleted file mode 100644 index 36d6dd59e172..000000000000 --- a/pavelib/paver_tests/test_paver_quality.py +++ /dev/null @@ -1,156 +0,0 @@ -""" # lint-amnesty, pylint: disable=django-not-configured -Tests for paver quality tasks -""" - - -import os -import shutil # lint-amnesty, pylint: disable=unused-import -import tempfile -import textwrap -import unittest -from unittest.mock import MagicMock, mock_open, patch # lint-amnesty, pylint: disable=unused-import - -import pytest # lint-amnesty, pylint: disable=unused-import -from ddt import data, ddt, file_data, unpack # lint-amnesty, pylint: disable=unused-import -from path import Path as path -from paver.easy import BuildFailure # lint-amnesty, pylint: disable=unused-import - -import pavelib.quality -from pavelib.paver_tests.utils import PaverTestCase, fail_on_eslint # lint-amnesty, pylint: disable=unused-import - -OPEN_BUILTIN = 'builtins.open' - - -@ddt -class TestPaverQualityViolations(unittest.TestCase): - """ - For testing the paver violations-counting tasks - """ - def setUp(self): - super().setUp() - self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with - self.f.close() - self.addCleanup(os.remove, self.f.name) - - def test_pep8_parser(self): - with open(self.f.name, 'w') as f: - f.write("hello\nhithere") - num = len(pavelib.quality._pep8_violations(f.name)) # pylint: disable=protected-access - assert num == 2 - - -class TestPaverReportViolationsCounts(unittest.TestCase): - """ - For testing utility functions for getting counts from reports for - run_eslint and run_xsslint. - """ - - def setUp(self): - super().setUp() - - # Temporary file infrastructure - self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with - self.f.close() - - # Cleanup various mocks and tempfiles - self.addCleanup(os.remove, self.f.name) - - def test_get_eslint_violations_count(self): - with open(self.f.name, 'w') as f: - f.write("3000 violations found") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access - assert actual_count == 3000 - - def test_get_eslint_violations_no_number_found(self): - with open(self.f.name, 'w') as f: - f.write("Not expected string regex") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access - assert actual_count is None - - def test_get_eslint_violations_count_truncated_report(self): - """ - A truncated report (i.e. last line is just a violation) - """ - with open(self.f.name, 'w') as f: - f.write("foo/bar/js/fizzbuzz.js: line 45, col 59, Missing semicolon.") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access - assert actual_count is None - - def test_generic_value(self): - """ - Default behavior is to look for an integer appearing at head of line - """ - with open(self.f.name, 'w') as f: - f.write("5.777 good to see you") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access - assert actual_count == 5 - - def test_generic_value_none_found(self): - """ - Default behavior is to look for an integer appearing at head of line - """ - with open(self.f.name, 'w') as f: - f.write("hello 5.777 good to see you") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access - assert actual_count is None - - def test_get_xsslint_counts_happy(self): - """ - Test happy path getting violation counts from xsslint report. - """ - report = textwrap.dedent(""" - test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos); - - javascript-concat-html: 310 violations - javascript-escape: 7 violations - - 2608 violations total - """) - with open(self.f.name, 'w') as f: - f.write(report) - counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access - self.assertDictEqual(counts, { - 'rules': { - 'javascript-concat-html': 310, - 'javascript-escape': 7, - }, - 'total': 2608, - }) - - def test_get_xsslint_counts_bad_counts(self): - """ - Test getting violation counts from truncated and malformed xsslint - report. - """ - report = textwrap.dedent(""" - javascript-concat-html: violations - """) - with open(self.f.name, 'w') as f: - f.write(report) - counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access - self.assertDictEqual(counts, { - 'rules': {}, - 'total': None, - }) - - -class TestPrepareReportDir(unittest.TestCase): - """ - Tests the report directory preparation - """ - - def setUp(self): - super().setUp() - self.test_dir = tempfile.mkdtemp() - self.test_file = tempfile.NamedTemporaryFile(delete=False, dir=self.test_dir) # lint-amnesty, pylint: disable=consider-using-with - self.addCleanup(os.removedirs, self.test_dir) - - def test_report_dir_with_files(self): - assert os.path.exists(self.test_file.name) - pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access - assert not os.path.exists(self.test_file.name) - - def test_report_dir_without_files(self): - os.remove(self.test_file.name) - pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access - assert os.listdir(path(self.test_dir)) == [] diff --git a/pavelib/paver_tests/test_pii_check.py b/pavelib/paver_tests/test_pii_check.py deleted file mode 100644 index d034360acde0..000000000000 --- a/pavelib/paver_tests/test_pii_check.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Tests for Paver's PII checker task. -""" - -import shutil -import tempfile -import unittest -from unittest.mock import patch - -from path import Path as path -from paver.easy import call_task, BuildFailure - -import pavelib.quality -from pavelib.utils.envs import Env - - -class TestPaverPIICheck(unittest.TestCase): - """ - For testing the paver run_pii_check task - """ - def setUp(self): - super().setUp() - self.report_dir = path(tempfile.mkdtemp()) - self.addCleanup(shutil.rmtree, self.report_dir) - - @patch.object(pavelib.quality.run_pii_check, 'needs') - @patch('pavelib.quality.sh') - def test_pii_check_report_dir_override(self, mock_paver_sh, mock_needs): - """ - run_pii_check succeeds with proper report dir - """ - # Make the expected stdout files. - cms_stdout_report = self.report_dir / 'pii_check_cms.report' - cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n']) - lms_stdout_report = self.report_dir / 'pii_check_lms.report' - lms_stdout_report.write_lines(['Coverage found 66 uncovered models:\n']) - - mock_needs.return_value = 0 - call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)}) - mock_calls = [str(call) for call in mock_paver_sh.mock_calls] - assert len(mock_calls) == 2 - assert any('lms.envs.test' in call for call in mock_calls) - assert any('cms.envs.test' in call for call in mock_calls) - assert all(str(self.report_dir) in call for call in mock_calls) - metrics_file = Env.METRICS_DIR / 'pii' - assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n' - - @patch.object(pavelib.quality.run_pii_check, 'needs') - @patch('pavelib.quality.sh') - def test_pii_check_failed(self, mock_paver_sh, mock_needs): - """ - run_pii_check fails due to crossing the threshold. - """ - # Make the expected stdout files. - cms_stdout_report = self.report_dir / 'pii_check_cms.report' - cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n']) - lms_stdout_report = self.report_dir / 'pii_check_lms.report' - lms_stdout_report.write_lines([ - 'Coverage found 66 uncovered models:', - 'Coverage threshold not met! Needed 100.0, actually 95.0!', - ]) - - mock_needs.return_value = 0 - try: - with self.assertRaises(BuildFailure): - call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)}) - except SystemExit: - # Sometimes the BuildFailure raises a SystemExit, sometimes it doesn't, not sure why. - # As a hack, we just wrap it in try-except. - # This is not good, but these tests weren't even running for years, and we're removing this whole test - # suite soon anyway. - pass - mock_calls = [str(call) for call in mock_paver_sh.mock_calls] - assert len(mock_calls) == 2 - assert any('lms.envs.test' in call for call in mock_calls) - assert any('cms.envs.test' in call for call in mock_calls) - assert all(str(self.report_dir) in call for call in mock_calls) - metrics_file = Env.METRICS_DIR / 'pii' - assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n' diff --git a/pavelib/paver_tests/test_stylelint.py b/pavelib/paver_tests/test_stylelint.py deleted file mode 100644 index 3e1c79c93f28..000000000000 --- a/pavelib/paver_tests/test_stylelint.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Tests for Paver's Stylelint tasks. -""" - -from unittest.mock import MagicMock, patch - -import pytest -import ddt -from paver.easy import call_task - -from .utils import PaverTestCase - - -@ddt.ddt -class TestPaverStylelint(PaverTestCase): - """ - Tests for Paver's Stylelint tasks. - """ - @ddt.data( - [False], - [True], - ) - @ddt.unpack - def test_run_stylelint(self, should_pass): - """ - Verify that the quality task fails with Stylelint violations. - """ - if should_pass: - _mock_stylelint_violations = MagicMock(return_value=0) - with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations): - call_task('pavelib.quality.run_stylelint') - else: - _mock_stylelint_violations = MagicMock(return_value=100) - with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations): - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_stylelint') diff --git a/pavelib/paver_tests/test_timer.py b/pavelib/paver_tests/test_timer.py deleted file mode 100644 index 5ccbf74abcf9..000000000000 --- a/pavelib/paver_tests/test_timer.py +++ /dev/null @@ -1,190 +0,0 @@ -""" -Tests of the pavelib.utils.timer module. -""" - - -from datetime import datetime, timedelta -from unittest import TestCase - -from unittest.mock import MagicMock, patch - -from pavelib.utils import timer - - -@timer.timed -def identity(*args, **kwargs): - """ - An identity function used as a default task to test the timing of. - """ - return args, kwargs - - -MOCK_OPEN = MagicMock(spec=open) - - -@patch.dict('pavelib.utils.timer.__builtins__', open=MOCK_OPEN) -class TimedDecoratorTests(TestCase): - """ - Tests of the pavelib.utils.timer:timed decorator. - """ - def setUp(self): - super().setUp() - - patch_dumps = patch.object(timer.json, 'dump', autospec=True) - self.mock_dump = patch_dumps.start() - self.addCleanup(patch_dumps.stop) - - patch_makedirs = patch.object(timer.os, 'makedirs', autospec=True) - self.mock_makedirs = patch_makedirs.start() - self.addCleanup(patch_makedirs.stop) - - patch_datetime = patch.object(timer, 'datetime', autospec=True) - self.mock_datetime = patch_datetime.start() - self.addCleanup(patch_datetime.stop) - - patch_exists = patch.object(timer, 'exists', autospec=True) - self.mock_exists = patch_exists.start() - self.addCleanup(patch_exists.stop) - - MOCK_OPEN.reset_mock() - - def get_log_messages(self, task=identity, args=None, kwargs=None, raises=None): - """ - Return all timing messages recorded during the execution of ``task``. - """ - if args is None: - args = [] - if kwargs is None: - kwargs = {} - - if raises is None: - task(*args, **kwargs) - else: - self.assertRaises(raises, task, *args, **kwargs) - - return [ - call[0][0] # log_message - for call in self.mock_dump.call_args_list - ] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_times(self): - start = datetime(2016, 7, 20, 10, 56, 19) - end = start + timedelta(seconds=35.6) - - self.mock_datetime.utcnow.side_effect = [start, end] - - messages = self.get_log_messages() - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'duration' in messages[0] - assert 35.6 == messages[0]['duration'] - - assert 'started_at' in messages[0] - assert start.isoformat(' ') == messages[0]['started_at'] - - assert 'ended_at' in messages[0] - assert end.isoformat(' ') == messages[0]['ended_at'] - - @patch.object(timer, 'PAVER_TIMER_LOG', None) - def test_no_logs(self): - messages = self.get_log_messages() - assert len(messages) == 0 - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_arguments(self): - messages = self.get_log_messages(args=(1, 'foo'), kwargs=dict(bar='baz')) - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'args' in messages[0] - assert [repr(1), repr('foo')] == messages[0]['args'] - assert 'kwargs' in messages[0] - assert {'bar': repr('baz')} == messages[0]['kwargs'] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_task_name(self): - messages = self.get_log_messages() - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'task' in messages[0] - assert 'pavelib.paver_tests.test_timer.identity' == messages[0]['task'] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_exceptions(self): - - @timer.timed - def raises(): - """ - A task used for testing exception handling of the timed decorator. - """ - raise Exception('The Message!') - - messages = self.get_log_messages(task=raises, raises=Exception) - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'exception' in messages[0] - assert 'Exception: The Message!' == messages[0]['exception'] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log-%Y-%m-%d-%H-%M-%S.log') - def test_date_formatting(self): - start = datetime(2016, 7, 20, 10, 56, 19) - end = start + timedelta(seconds=35.6) - - self.mock_datetime.utcnow.side_effect = [start, end] - - messages = self.get_log_messages() - assert len(messages) == 1 - - MOCK_OPEN.assert_called_once_with('/tmp/some-log-2016-07-20-10-56-19.log', 'a') - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_nested_tasks(self): - - @timer.timed - def parent(): - """ - A timed task that calls another task - """ - identity() - - parent_start = datetime(2016, 7, 20, 10, 56, 19) - parent_end = parent_start + timedelta(seconds=60) - child_start = parent_start + timedelta(seconds=10) - child_end = parent_end - timedelta(seconds=10) - - self.mock_datetime.utcnow.side_effect = [parent_start, child_start, child_end, parent_end] - - messages = self.get_log_messages(task=parent) - assert len(messages) == 2 - - # Child messages first - assert 'duration' in messages[0] - assert 40 == messages[0]['duration'] - - assert 'started_at' in messages[0] - assert child_start.isoformat(' ') == messages[0]['started_at'] - - assert 'ended_at' in messages[0] - assert child_end.isoformat(' ') == messages[0]['ended_at'] - - # Parent messages after - assert 'duration' in messages[1] - assert 60 == messages[1]['duration'] - - assert 'started_at' in messages[1] - assert parent_start.isoformat(' ') == messages[1]['started_at'] - - assert 'ended_at' in messages[1] - assert parent_end.isoformat(' ') == messages[1]['ended_at'] diff --git a/pavelib/paver_tests/test_xsslint.py b/pavelib/paver_tests/test_xsslint.py deleted file mode 100644 index a9b4a41e1600..000000000000 --- a/pavelib/paver_tests/test_xsslint.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -Tests for paver xsslint quality tasks -""" -from unittest.mock import patch - -import pytest -from paver.easy import call_task - -import pavelib.quality - -from .utils import PaverTestCase - - -class PaverXSSLintTest(PaverTestCase): - """ - Test run_xsslint with a mocked environment in order to pass in opts - """ - - def setUp(self): - super().setUp() - self.reset_task_messages() - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint encounters an error parsing the xsslint output log - """ - _mock_counts.return_value = {} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint') - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_vanilla(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds violations, but a limit was not set - """ - _mock_counts.return_value = {'total': 0} - call_task('pavelib.quality.run_xsslint') - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_invalid_thresholds_option(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint fails when thresholds option is poorly formatted - """ - _mock_counts.return_value = {'total': 0} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": "invalid"}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_invalid_thresholds_option_key(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint fails when thresholds option is poorly formatted - """ - _mock_counts.return_value = {'total': 0} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"invalid": 3}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_too_many_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds more violations than are allowed - """ - _mock_counts.return_value = {'total': 4} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 3}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_under_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds fewer violations than are allowed - """ - _mock_counts.return_value = {'total': 4} - # No System Exit is expected - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 5}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_rule_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint encounters an error parsing the xsslint output log for a - given rule threshold that was set. - """ - _mock_counts.return_value = {'total': 4} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_too_many_rule_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds more rule violations than are allowed - """ - _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_under_rule_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds fewer rule violations than are allowed - """ - _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}} - # No System Exit is expected - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 5}}'}) diff --git a/pavelib/utils/test/suites/__init__.py b/pavelib/utils/test/suites/__init__.py deleted file mode 100644 index 34ecd49c1c74..000000000000 --- a/pavelib/utils/test/suites/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -""" -TestSuite class and subclasses -""" -from .js_suite import JestSnapshotTestSuite, JsTestSuite -from .suite import TestSuite diff --git a/pavelib/utils/test/suites/js_suite.py b/pavelib/utils/test/suites/js_suite.py deleted file mode 100644 index 4e53d454fee5..000000000000 --- a/pavelib/utils/test/suites/js_suite.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Javascript test tasks -""" - - -from paver import tasks - -from pavelib.utils.envs import Env -from pavelib.utils.test import utils as test_utils -from pavelib.utils.test.suites.suite import TestSuite - -__test__ = False # do not collect - - -class JsTestSuite(TestSuite): - """ - A class for running JavaScript tests. - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.run_under_coverage = kwargs.get('with_coverage', True) - self.mode = kwargs.get('mode', 'run') - self.report_dir = Env.JS_REPORT_DIR - self.opts = kwargs - - suite = args[0] - self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)] - - def __enter__(self): - super().__enter__() - if tasks.environment.dry_run: - tasks.environment.info("make report_dir") - else: - self.report_dir.makedirs_p() - if not self.skip_clean: - test_utils.clean_test_files() - - if self.mode == 'run' and not self.run_under_coverage: - test_utils.clean_dir(self.report_dir) - - @property - def _default_subsuites(self): - """ - Returns all JS test suites - """ - return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot'] - - -class JsTestSubSuite(TestSuite): - """ - Class for JS suites like cms, cms-squire, lms, common, - common-requirejs and xmodule - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.test_id = args[0] - self.run_under_coverage = kwargs.get('with_coverage', True) - self.mode = kwargs.get('mode', 'run') - self.port = kwargs.get('port') - self.root = self.root + ' javascript' - self.report_dir = Env.JS_REPORT_DIR - - try: - self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)] - except ValueError: - self.test_conf_file = Env.KARMA_CONFIG_FILES[0] - - self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml' - self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml' - - @property - def cmd(self): - """ - Run the tests using karma runner. - """ - cmd = [ - "node", - "--max_old_space_size=4096", - "node_modules/.bin/karma", - "start", - self.test_conf_file, - "--single-run={}".format('false' if self.mode == 'dev' else 'true'), - "--capture-timeout=60000", - f"--junitreportpath={self.xunit_report}", - f"--browsers={Env.KARMA_BROWSER}", - ] - - if self.port: - cmd.append(f"--port={self.port}") - - if self.run_under_coverage: - cmd.extend([ - "--coverage", - f"--coveragereportpath={self.coverage_report}", - ]) - - return cmd - - -class JestSnapshotTestSuite(TestSuite): - """ - A class for running Jest Snapshot tests. - """ - @property - def cmd(self): - """ - Run the tests using Jest. - """ - return ["jest"] diff --git a/pavelib/utils/test/suites/suite.py b/pavelib/utils/test/suites/suite.py deleted file mode 100644 index 5a423c827c21..000000000000 --- a/pavelib/utils/test/suites/suite.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -A class used for defining and running test suites -""" - - -import os -import subprocess -import sys - -from paver import tasks - -from pavelib.utils.process import kill_process - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - -__test__ = False # do not collect - - -class TestSuite: - """ - TestSuite is a class that defines how groups of tests run. - """ - def __init__(self, *args, **kwargs): - self.root = args[0] - self.subsuites = kwargs.get('subsuites', []) - self.failed_suites = [] - self.verbosity = int(kwargs.get('verbosity', 1)) - self.skip_clean = kwargs.get('skip_clean', False) - self.passthrough_options = kwargs.get('passthrough_options', []) - - def __enter__(self): - """ - This will run before the test suite is run with the run_suite_tests method. - If self.run_test is called directly, it should be run in a 'with' block to - ensure that the proper context is created. - - Specific setup tasks should be defined in each subsuite. - - i.e. Checking for and defining required directories. - """ - print(f"\nSetting up for {self.root}") - self.failed_suites = [] - - def __exit__(self, exc_type, exc_value, traceback): - """ - This is run after the tests run with the run_suite_tests method finish. - Specific clean up tasks should be defined in each subsuite. - - If self.run_test is called directly, it should be run in a 'with' block - to ensure that clean up happens properly. - - i.e. Cleaning mongo after the lms tests run. - """ - print(f"\nCleaning up after {self.root}") - - @property - def cmd(self): - """ - The command to run tests (as a string). For this base class there is none. - """ - return None - - @staticmethod - def is_success(exit_code): - """ - Determine if the given exit code represents a success of the test - suite. By default, only a zero counts as a success. - """ - return exit_code == 0 - - def run_test(self): - """ - Runs a self.cmd in a subprocess and waits for it to finish. - It returns False if errors or failures occur. Otherwise, it - returns True. - """ - cmd = " ".join(self.cmd) - - if tasks.environment.dry_run: - tasks.environment.info(cmd) - return - - sys.stdout.write(cmd) - - msg = colorize( - 'green', - '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40), - ) - - sys.stdout.write(msg) - sys.stdout.flush() - - if 'TEST_SUITE' not in os.environ: - os.environ['TEST_SUITE'] = self.root.replace("/", "_") - kwargs = {'shell': True, 'cwd': None} - process = None - - try: - process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with - return self.is_success(process.wait()) - except KeyboardInterrupt: - kill_process(process) - sys.exit(1) - - def run_suite_tests(self): - """ - Runs each of the suites in self.subsuites while tracking failures - """ - # Uses __enter__ and __exit__ for context - with self: - # run the tests for this class, and for all subsuites - if self.cmd: - passed = self.run_test() - if not passed: - self.failed_suites.append(self) - - for suite in self.subsuites: - suite.run_suite_tests() - if suite.failed_suites: - self.failed_suites.extend(suite.failed_suites) - - def report_test_results(self): - """ - Writes a list of failed_suites to sys.stderr - """ - if self.failed_suites: - msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48)) - msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n') - else: - msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48)) - - print(msg) - - def run(self): - """ - Runs the tests in the suite while tracking and reporting failures. - """ - self.run_suite_tests() - - if tasks.environment.dry_run: - return - - self.report_test_results() - - if self.failed_suites: - sys.exit(1) diff --git a/pavelib/utils/test/utils.py b/pavelib/utils/test/utils.py deleted file mode 100644 index 0851251e2222..000000000000 --- a/pavelib/utils/test/utils.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -Helper functions for test tasks -""" - - -import os - -from paver.easy import cmdopts, sh, task - -from pavelib.utils.envs import Env -from pavelib.utils.timer import timed - - -MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) - -COVERAGE_CACHE_BUCKET = "edx-tools-coverage-caches" -COVERAGE_CACHE_BASEPATH = "test_root/who_tests_what" -COVERAGE_CACHE_BASELINE = "who_tests_what.{}.baseline".format(os.environ.get('WTW_CONTEXT', 'all')) -WHO_TESTS_WHAT_DIFF = "who_tests_what.diff" - - -__test__ = False # do not collect - - -@task -@timed -def clean_test_files(): - """ - Clean fixture files used by tests and .pyc files - """ - sh("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") - # This find command removes all the *.pyc files that aren't in the .git - # directory. See this blog post for more details: - # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html - sh(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") - sh("rm -rf test_root/log/auto_screenshots/*") - sh("rm -rf /tmp/mako_[cl]ms") - - -@task -@timed -def ensure_clean_package_lock(): - """ - Ensure no untracked changes have been made in the current git context. - """ - sh(""" - git diff --name-only --exit-code package-lock.json || - (echo \"Dirty package-lock.json, run 'npm install' and commit the generated changes\" && exit 1) - """) - - -def clean_dir(directory): - """ - Delete all the files from the specified directory. - """ - # We delete the files but preserve the directory structure - # so that coverage.py has a place to put the reports. - sh(f'find {directory} -type f -delete') - - -@task -@cmdopts([ - ('skip-clean', 'C', 'skip cleaning repository before running tests'), - ('skip_clean', None, 'deprecated in favor of skip-clean'), -]) -@timed -def clean_reports_dir(options): - """ - Clean coverage files, to ensure that we don't use stale data to generate reports. - """ - if getattr(options, 'skip_clean', False): - print('--skip-clean is set, skipping...') - return - - # We delete the files but preserve the directory structure - # so that coverage.py has a place to put the reports. - reports_dir = Env.REPORT_DIR.makedirs_p() - clean_dir(reports_dir) - - -@task -@timed -def clean_mongo(): - """ - Clean mongo test databases - """ - sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format( - host=Env.MONGO_HOST, - port=MONGO_PORT_NUM, - repo_root=Env.REPO_ROOT, - )) diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh deleted file mode 100755 index 54b9cbb9d500..000000000000 --- a/scripts/generic-ci-tests.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env bash -set -e - -############################################################################### -# -# generic-ci-tests.sh -# -# Execute some tests for edx-platform. -# (Most other tests are run by invoking `pytest`, `pylint`, etc. directly) -# -# This script can be called from CI jobs that define -# these environment variables: -# -# `TEST_SUITE` defines which kind of test to run. -# Possible values are: -# -# - "quality": Run the quality (pycodestyle/pylint) checks -# - "js-unit": Run the JavaScript tests -# - "pavelib-js-unit": Run the JavaScript tests and the Python unit -# tests from the pavelib/lib directory -# -############################################################################### - -# Clean up previous builds -git clean -qxfd - -function emptyxunit { - - cat > "reports/$1.xml" < - - - -END - -} - -# if specified tox environment is supported, prepend paver commands -# with tox env invocation -if [ -z ${TOX_ENV+x} ] || [[ ${TOX_ENV} == 'null' ]]; then - echo "TOX_ENV: ${TOX_ENV}" - TOX="" -elif tox -l |grep -q "${TOX_ENV}"; then - if [[ "${TOX_ENV}" == 'quality' ]]; then - TOX="" - else - TOX="tox -r -e ${TOX_ENV} --" - fi -else - echo "${TOX_ENV} is not currently supported. Please review the" - echo "tox.ini file to see which environments are supported" - exit 1 -fi - -PAVER_ARGS="-v" -export SUBSET_JOB=$JOB_NAME - -function run_paver_quality { - QUALITY_TASK=$1 - shift - mkdir -p test_root/log/ - LOG_PREFIX="test_root/log/$QUALITY_TASK" - $TOX paver "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || { - echo "STDOUT (last 100 lines of $LOG_PREFIX.out.log):"; - tail -n 100 "$LOG_PREFIX.out.log" - echo "STDERR (last 100 lines of $LOG_PREFIX.err.log):"; - tail -n 100 "$LOG_PREFIX.err.log" - return 1; - } - return 0; -} - -case "$TEST_SUITE" in - - "quality") - EXIT=0 - - mkdir -p reports - - echo "Finding pycodestyle violations and storing report..." - run_paver_quality run_pep8 || { EXIT=1; } - echo "Finding ESLint violations and storing report..." - run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } - echo "Finding Stylelint violations and storing report..." - run_paver_quality run_stylelint || { EXIT=1; } - echo "Running xss linter report." - run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } - echo "Running PII checker on all Django models..." - run_paver_quality run_pii_check || { EXIT=1; } - echo "Running reserved keyword checker on all Django models..." - run_paver_quality check_keywords || { EXIT=1; } - - # Need to create an empty test result so the post-build - # action doesn't fail the build. - emptyxunit "stub" - exit "$EXIT" - ;; - - "js-unit") - $TOX paver test_js --coverage - $TOX paver diff_coverage - ;; - - "pavelib-js-unit") - EXIT=0 - $TOX paver test_js --coverage --skip-clean || { EXIT=1; } - paver test_lib --skip-clean $PAVER_ARGS || { EXIT=1; } - - # This is to ensure that the build status of the shard is properly set. - # Because we are running two paver commands in a row, we need to capture - # their return codes in order to exit with a non-zero code if either of - # them fail. We put the || clause there because otherwise, when a paver - # command fails, this entire script will exit, and not run the second - # paver command in this case statement. So instead of exiting, the value - # of a variable named EXIT will be set to 1 if either of the paver - # commands fail. We then use this variable's value as our exit code. - # Note that by default the value of this variable EXIT is not set, so if - # neither command fails then the exit command resolves to simply exit - # which is considered successful. - exit "$EXIT" - ;; -esac diff --git a/scripts/js_test.py b/scripts/js_test.py new file mode 100644 index 000000000000..69be37f602fe --- /dev/null +++ b/scripts/js_test.py @@ -0,0 +1,492 @@ +""" +Javascript test tasks +""" + +import click +import os +import re +import sys +import subprocess + +from path import Path as path + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + +__test__ = False # do not collect + + +class Env: + """ + Load information about the execution environment. + """ + + @staticmethod + def repo_root(): + """ + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 + """ + + file_path = path(__file__) + attempt = 1 + while True: + try: + absolute_path = file_path.abspath() + break + except OSError: + print(f'Attempt {attempt}/180 to get an absolute path failed') + if attempt < 180: + attempt += 1 + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise + return absolute_path.parent.parent + + # Root of the git repository (edx-platform) + REPO_ROOT = repo_root() + + # Reports Directory + REPORT_DIR = REPO_ROOT / 'reports' + + # Detect if in a Docker container, and if so which one + FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') + USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' + + # Configured browser to use for the js test suites + SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') + if USING_DOCKER: + KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker' + else: + KARMA_BROWSER = 'FirefoxNoUpdates' + + # Files used to run each of the js test suites + # TODO: Store this as a dict. Order seems to matter for some + # reason. See issue TE-415. + KARMA_CONFIG_FILES = [ + REPO_ROOT / 'cms/static/karma_cms.conf.js', + REPO_ROOT / 'cms/static/karma_cms_squire.conf.js', + REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js', + REPO_ROOT / 'lms/static/karma_lms.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js', + REPO_ROOT / 'common/static/karma_common.conf.js', + REPO_ROOT / 'common/static/karma_common_requirejs.conf.js', + ] + + JS_TEST_ID_KEYS = [ + 'cms', + 'cms-squire', + 'cms-webpack', + 'lms', + 'xmodule', + 'xmodule-webpack', + 'common', + 'common-requirejs', + 'jest-snapshot' + ] + + JS_REPORT_DIR = REPORT_DIR / 'javascript' + + # Service variant (lms, cms, etc.) configured with an environment variable + # We use this to determine which envs.json file to load. + SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) + + # If service variant not configured in env, then pass the correct + # environment for lms / cms + if not SERVICE_VARIANT: # this will intentionally catch ""; + if any(i in sys.argv[1:] for i in ('cms', 'studio')): + SERVICE_VARIANT = 'cms' + else: + SERVICE_VARIANT = 'lms' + + +# def clean_test_files(): +# """ +# Clean fixture files used by tests and .pyc files +# """ +# # "git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads" +# subprocess.run("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") +# # This find command removes all the *.pyc files that aren't in the .git +# # directory. See this blog post for more details: +# # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html +# subprocess.run(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") +# subprocess.run("rm -rf test_root/log/auto_screenshots/*") +# subprocess.run("rm -rf /tmp/mako_[cl]ms") + + +# def clean_dir(directory): +# """ +# Delete all the files from the specified directory. +# """ +# # We delete the files but preserve the directory structure +# # so that coverage.py has a place to put the reports. +# subprocess.run(f'find {directory} -type f -delete') + + +# @task +# @cmdopts([ +# ('skip-clean', 'C', 'skip cleaning repository before running tests'), +# ('skip_clean', None, 'deprecated in favor of skip-clean'), +# ]) + +# def clean_reports_dir(options): +# """ +# Clean coverage files, to ensure that we don't use stale data to generate reports. +# """ +# if getattr(options, 'skip_clean', False): +# print('--skip-clean is set, skipping...') +# return + +# # We delete the files but preserve the directory structure +# # so that coverage.py has a place to put the reports. +# reports_dir = Env.REPORT_DIR.makedirs_p() +# clean_dir(reports_dir) + + +class TestSuite: + """ + TestSuite is a class that defines how groups of tests run. + """ + def __init__(self, *args, **kwargs): + self.root = args[0] + self.subsuites = kwargs.get('subsuites', []) + self.failed_suites = [] + self.verbosity = int(kwargs.get('verbosity', 1)) + self.skip_clean = kwargs.get('skip_clean', False) + self.passthrough_options = kwargs.get('passthrough_options', []) + + def __enter__(self): + """ + This will run before the test suite is run with the run_suite_tests method. + If self.run_test is called directly, it should be run in a 'with' block to + ensure that the proper context is created. + + Specific setup tasks should be defined in each subsuite. + + i.e. Checking for and defining required directories. + """ + print(f"\nSetting up for {self.root}") + self.failed_suites = [] + + def __exit__(self, exc_type, exc_value, traceback): + """ + This is run after the tests run with the run_suite_tests method finish. + Specific clean up tasks should be defined in each subsuite. + + If self.run_test is called directly, it should be run in a 'with' block + to ensure that clean up happens properly. + + i.e. Cleaning mongo after the lms tests run. + """ + print(f"\nCleaning up after {self.root}") + + @property + def cmd(self): + """ + The command to run tests (as a string). For this base class there is none. + """ + return None + + @staticmethod + def kill_process(proc): + """ + Kill the process `proc` created with `subprocess`. + """ + p1_group = psutil.Process(proc.pid) + child_pids = p1_group.children(recursive=True) + + for child_pid in child_pids: + os.kill(child_pid.pid, signal.SIGKILL) + + @staticmethod + def is_success(exit_code): + """ + Determine if the given exit code represents a success of the test + suite. By default, only a zero counts as a success. + """ + return exit_code == 0 + + def run_test(self): + """ + Runs a self.cmd in a subprocess and waits for it to finish. + It returns False if errors or failures occur. Otherwise, it + returns True. + """ + # cmd = " ".join(self.cmd) + cmd = " ".join(str(part) for part in self.cmd) + sys.stdout.write(cmd) + + msg = colorize( + 'green', + '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40), + ) + + sys.stdout.write(msg) + sys.stdout.flush() + + if 'TEST_SUITE' not in os.environ: + os.environ['TEST_SUITE'] = self.root.replace("/", "_") + kwargs = {'shell': True, 'cwd': None} + process = None + + try: + process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with + return self.is_success(process.wait()) + except KeyboardInterrupt: + self.kill_process(process) + sys.exit(1) + + def run_suite_tests(self): + """ + Runs each of the suites in self.subsuites while tracking failures + """ + # Uses __enter__ and __exit__ for context + with self: + # run the tests for this class, and for all subsuites + if self.cmd: + passed = self.run_test() + if not passed: + self.failed_suites.append(self) + + for suite in self.subsuites: + suite.run_suite_tests() + if suite.failed_suites: + self.failed_suites.extend(suite.failed_suites) + + def report_test_results(self): + """ + Writes a list of failed_suites to sys.stderr + """ + if self.failed_suites: + msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48)) + msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n') + else: + msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48)) + + print(msg) + + def run(self): + """ + Runs the tests in the suite while tracking and reporting failures. + """ + self.run_suite_tests() + + # if tasks.environment.dry_run: + # return + + self.report_test_results() + + if self.failed_suites: + sys.exit(1) + + +class JsTestSuite(TestSuite): + """ + A class for running JavaScript tests. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.report_dir = Env.JS_REPORT_DIR + self.opts = kwargs + + suite = args[0] + self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)] + + def __enter__(self): + super().__enter__() + self.report_dir.makedirs_p() + # self.report_dir.mkdir(exist_ok=True) + # if not self.skip_clean: + # test_utils.clean_test_files() + + # if self.mode == 'run' and not self.run_under_coverage: + # test_utils.clean_dir(self.report_dir) + + @property + def _default_subsuites(self): + """ + Returns all JS test suites + """ + return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot'] + + +class JsTestSubSuite(TestSuite): + """ + Class for JS suites like cms, cms-squire, lms, common, + common-requirejs and xmodule + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.test_id = args[0] + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.port = kwargs.get('port') + self.root = self.root + ' javascript' + self.report_dir = Env.JS_REPORT_DIR + + try: + self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)] + except ValueError: + self.test_conf_file = Env.KARMA_CONFIG_FILES[0] + + self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml' + self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml' + + @property + def cmd(self): + """ + Run the tests using karma runner. + """ + cmd = [ + "node", + "--max_old_space_size=4096", + "node_modules/.bin/karma", + "start", + self.test_conf_file, + "--single-run={}".format('false' if self.mode == 'dev' else 'true'), + "--capture-timeout=60000", + f"--junitreportpath={self.xunit_report}", + f"--browsers={Env.KARMA_BROWSER}", + ] + + if self.port: + cmd.append(f"--port={self.port}") + + if self.run_under_coverage: + cmd.extend([ + "--coverage", + f"--coveragereportpath={self.coverage_report}", + ]) + + return cmd + + +class JestSnapshotTestSuite(TestSuite): + """ + A class for running Jest Snapshot tests. + """ + @property + def cmd(self): + """ + Run the tests using Jest. + """ + return ["jest"] + + +def test_js(suite, mode, coverage, port, skip_clean): + """ + Run the JavaScript tests + """ + + if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): + sys.stderr.write( + "Unknown test suite. Please choose from ({suites})\n".format( + suites=", ".join(Env.JS_TEST_ID_KEYS) + ) + ) + return + + if suite != 'jest-snapshot': + test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) + test_suite.run() + + if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in + test_suite = JestSnapshotTestSuite('jest') + test_suite.run() + + +# @needs('pavelib.prereqs.install_coverage_prereqs') +# @cmdopts([ +# ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), +# ], share_with=['coverage']) + +def diff_coverage(): + """ + Build the diff coverage reports + """ + + compare_branch = 'origin/master' + + # Find all coverage XML files (both Python and JavaScript) + xml_reports = [] + for filepath in Env.REPORT_DIR.walk(): + if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): + xml_reports.append(filepath) + + if not xml_reports: + err_msg = colorize( + 'red', + "No coverage info found. Run `quality test` before running " + "`coverage test`.\n" + ) + sys.stderr.write(err_msg) + else: + xml_report_str = ' '.join(xml_reports) + diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + + # Generate the diff coverage reports (HTML and console) + # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 + command = ( + f"diff-cover {xml_report_str}" + f"--diff-range-notation '..'" + f"--compare-branch={compare_branch} " + f"--html-report {diff_html_path}" + ) + subprocess.run(command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True) + + +@click.command("main") +@click.option( + '--option', 'option', + help='Run javascript tests or coverage test as per given option' +) +@click.option( + '--s', 'suite', + default='all', + help='Test suite to run.' +) +@click.option( + '--m', 'mode', + default='run', + help='dev or run' +) +@click.option( + '--coverage', 'coverage', + default=True, + help='Run test under coverage' +) +@click.option( + '--p', 'port', + default=None, + help='Port to run test server on (dev mode only)' +) +@click.option( + '--C', 'skip_clean', + default=False, + help='skip cleaning repository before running tests' +) +def main(option, suite, mode, coverage, port, skip_clean): + if option == 'jstest': + test_js(suite, mode, coverage, port, skip_clean) + elif option == 'coverage': + diff_coverage() + + +if __name__ == "__main__": + main() diff --git a/pavelib/quality.py b/scripts/quality_test.py similarity index 58% rename from pavelib/quality.py rename to scripts/quality_test.py index 774179f45048..fb7d1e481eb9 100644 --- a/pavelib/quality.py +++ b/scripts/quality_test.py @@ -2,171 +2,188 @@ Check code quality using pycodestyle, pylint, and diff_quality. """ +import argparse +import glob import json import os import re -from datetime import datetime -from xml.sax.saxutils import quoteattr +import sys +import subprocess +import shutil +from pathlib import Path +from time import sleep -from paver.easy import BuildFailure, cmdopts, needs, sh, task +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text -from .utils.envs import Env -from .utils.timer import timed -ALL_SYSTEMS = 'lms,cms,common,openedx,pavelib,scripts' -JUNIT_XML_TEMPLATE = """ - -{failure_element} - -""" -JUNIT_XML_FAILURE_TEMPLATE = '' -START_TIME = datetime.utcnow() +class BuildFailure(Exception): + """Represents a problem with some part of the build's execution.""" -def write_junit_xml(name, message=None): +def fail_quality(name, message): """ - Write a JUnit results XML file describing the outcome of a quality check. + Fail the specified quality check. """ - if message: - failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message)) - else: - failure_element = '' - data = { - 'failure_count': 1 if message else 0, - 'failure_element': failure_element, - 'name': name, - 'seconds': (datetime.utcnow() - START_TIME).total_seconds(), - } - Env.QUALITY_DIR.makedirs_p() - filename = Env.QUALITY_DIR / f'{name}.xml' - with open(filename, 'w') as f: - f.write(JUNIT_XML_TEMPLATE.format(**data)) + print(name) + print(message) + sys.exit() -def fail_quality(name, message): +def _prepare_report_dir(dir_name): """ - Fail the specified quality check by generating the JUnit XML results file - and raising a ``BuildFailure``. + Sets a given directory to a created, but empty state """ - write_junit_xml(name, message) - raise BuildFailure(message) + if os.path.isdir(dir_name): + shutil.rmtree(dir_name) + os.makedirs(dir_name, exist_ok=True) -def top_python_dirs(dirname): +def repo_root(): """ - Find the directories to start from in order to find all the Python files in `dirname`. + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 """ - top_dirs = [] - dir_init = os.path.join(dirname, "__init__.py") - if os.path.exists(dir_init): - top_dirs.append(dirname) + file_path = Path(__file__) + max_attempts = 180 + for attempt in range(1, max_attempts + 1): + try: + absolute_path = file_path.resolve(strict=True) + return absolute_path.parents[1] + except OSError: + print(f'Attempt {attempt}/{max_attempts} to get an absolute path failed') + if attempt < max_attempts: + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise RuntimeError('Could not determine the repository root after multiple attempts') - for directory in ['djangoapps', 'lib']: - subdir = os.path.join(dirname, directory) - subdir_init = os.path.join(subdir, "__init__.py") - if os.path.exists(subdir) and not os.path.exists(subdir_init): - dirs = os.listdir(subdir) - top_dirs.extend(d for d in dirs if os.path.isdir(os.path.join(subdir, d))) - modules_to_remove = ['__pycache__'] - for module in modules_to_remove: - if module in top_dirs: - top_dirs.remove(module) +def _get_report_contents(filename, report_name, last_line_only=False): + """ + Returns the contents of the given file. Use last_line_only to only return + the last line, which can be used for getting output from quality output + files. - return top_dirs + Arguments: + last_line_only: True to return the last line only, False to return a + string with full contents. + Returns: + String containing full contents of the report, or the last line. -def _get_pep8_violations(clean=True): - """ - Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string) - where violations_string is a string of all PEP 8 violations found, separated - by new lines. """ - report_dir = (Env.REPORT_DIR / 'pep8') - if clean: - report_dir.rmtree(ignore_errors=True) - report_dir.makedirs_p() - report = report_dir / 'pep8.report' + if os.path.isfile(filename): + with open(filename) as report_file: + if last_line_only: + lines = report_file.readlines() + for line in reversed(lines): + if line != '\n': + return line + return None + else: + return report_file.read() + else: + file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" + fail_quality(report_name, file_not_found_message) - # Make sure the metrics subdirectory exists - Env.METRICS_DIR.makedirs_p() - if not report.exists(): - sh(f'pycodestyle . | tee {report} -a') +def _get_count_from_last_line(filename, file_type): + """ + This will return the number in the last line of a file. + It is returning only the value (as a floating number). + """ + report_contents = _get_report_contents(filename, file_type, last_line_only=True) + if report_contents is None: + return 0 - violations_list = _pep8_violations(report) + last_line = report_contents.strip() + # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" + regex = r'^\d+' - return len(violations_list), violations_list + try: + return float(re.search(regex, last_line).group(0)) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + return None -def _pep8_violations(report_file): +def _get_stylelint_violations(): """ - Returns the list of all PEP 8 violations in the given report_file. + Returns the number of Stylelint violations. """ - with open(report_file) as f: - return f.readlines() + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + stylelint_report_dir = (REPORT_DIR / "stylelint") + stylelint_report = stylelint_report_dir / "stylelint.report" + _prepare_report_dir(stylelint_report_dir) + command = [ + 'node', 'node_modules/stylelint', + '*scss_files', + '--custom-formatter', 'stylelint-formatter-pretty/index.js' + ] + + with open(stylelint_report, 'w') as report_file: + subprocess.run( + command, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) -@task -@cmdopts([ - ("system=", "s", "System to act on"), -]) -@timed -def run_pep8(options): # pylint: disable=unused-argument - """ - Run pycodestyle on system code. - Fail the task if any violations are found. - """ - (count, violations_list) = _get_pep8_violations() - violations_list = ''.join(violations_list) - - # Print number of violations to log - violations_count_str = f"Number of PEP 8 violations: {count}" - print(violations_count_str) - print(violations_list) - - # Also write the number of violations to a file - with open(Env.METRICS_DIR / "pep8", "w") as f: - f.write(violations_count_str + '\n\n') - f.write(violations_list) - - # Fail if any violations are found - if count: - failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str - failure_string += f"\n\nViolations:\n{violations_list}" - fail_quality('pep8', failure_string) - else: - write_junit_xml('pep8') - - -@task -@needs( - 'pavelib.prereqs.install_node_prereqs', - 'pavelib.utils.test.utils.ensure_clean_package_lock', -) -@cmdopts([ - ("limit=", "l", "limit for number of acceptable violations"), -]) -@timed -def run_eslint(options): + try: + return int(_get_count_from_last_line(stylelint_report, "stylelint")) + except TypeError: + fail_quality( + 'stylelint', + "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( + stylelint_report=stylelint_report + ) + ) + + +def run_eslint(): """ Runs eslint on static asset directories. If limit option is passed, fails build if more violations than the limit are found. """ - eslint_report_dir = (Env.REPORT_DIR / "eslint") + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + eslint_report_dir = REPORT_DIR / "eslint" eslint_report = eslint_report_dir / "eslint.report" _prepare_report_dir(eslint_report_dir) - violations_limit = int(getattr(options, 'limit', -1)) - - sh( - "node --max_old_space_size=4096 node_modules/.bin/eslint " - "--ext .js --ext .jsx --format=compact . | tee {eslint_report}".format( - eslint_report=eslint_report - ), - ignore_error=True - ) + violations_limit = 4950 + + command = [ + "node", + "--max_old_space_size=4096", + "node_modules/.bin/eslint", + "--ext", ".js", + "--ext", ".jsx", + "--format=compact", + "." + ] + + with open(eslint_report, 'w') as report_file: + subprocess.run( + command, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True, + check=False + ) try: num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) @@ -178,9 +195,6 @@ def run_eslint(options): ) ) - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) - # Fail if number of violations is greater than the limit if num_violations > violations_limit > -1: fail_quality( @@ -190,81 +204,231 @@ def run_eslint(options): ) ) else: - write_junit_xml('eslint') + print("successfully run eslint with violations") + print(num_violations) -def _get_stylelint_violations(): +def run_stylelint(): """ - Returns the number of Stylelint violations. + Runs stylelint on Sass files. + If limit option is passed, fails build if more violations than the limit are found. """ - stylelint_report_dir = (Env.REPORT_DIR / "stylelint") - stylelint_report = stylelint_report_dir / "stylelint.report" - _prepare_report_dir(stylelint_report_dir) - formatter = 'node_modules/stylelint-formatter-pretty' - - sh( - "stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}".format( - formatter=formatter, - stylelint_report=stylelint_report, - ), - ignore_error=True - ) - try: - return int(_get_count_from_last_line(stylelint_report, "stylelint")) - except TypeError: + violations_limit = 0 + num_violations = _get_stylelint_violations() + # Fail if number of violations is greater than the limit + if num_violations > violations_limit: fail_quality( 'stylelint', - "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( - stylelint_report=stylelint_report + "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, + violations_limit=violations_limit, ) ) + else: + print("successfully run stylelint with violations") + print(num_violations) -@task -@needs('pavelib.prereqs.install_node_prereqs') -@cmdopts([ - ("limit=", "l", "limit for number of acceptable violations"), -]) -@timed -def run_stylelint(options): +def _extract_missing_pii_annotations(filename): """ - Runs stylelint on Sass files. - If limit option is passed, fails build if more violations than the limit are found. + Returns the number of uncovered models from the stdout report of django_find_annotations. + + Arguments: + filename: Filename where stdout of django_find_annotations was captured. + + Returns: + three-tuple containing: + 1. The number of uncovered models, + 2. A bool indicating whether the coverage is still below the threshold, and + 3. The full report as a string. """ - violations_limit = 0 - num_violations = _get_stylelint_violations() + uncovered_models = 0 + pii_check_passed = True + if os.path.isfile(filename): + with open(filename) as report_file: + lines = report_file.readlines() - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "stylelint")) + # Find the count of uncovered models. + uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') + for line in lines: + uncovered_match = uncovered_regex.match(line) + if uncovered_match: + uncovered_models = int(uncovered_match.groups()[0]) + break - # Fail if number of violations is greater than the limit - if num_violations > violations_limit: + # Find a message which suggests the check failed. + failure_regex = re.compile(r'^Coverage threshold not met!') + for line in lines: + failure_match = failure_regex.match(line) + if failure_match: + pii_check_passed = False + break + + # Each line in lines already contains a newline. + full_log = ''.join(lines) + else: + fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') + + return (uncovered_models, pii_check_passed, full_log) + + +def run_pii_check(): + """ + Guarantee that all Django models are PII-annotated. + """ + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + pii_report_name = 'pii' + default_report_dir = (REPORT_DIR / pii_report_name) + report_dir = default_report_dir + output_file = os.path.join(report_dir, 'pii_check_{}.report') + env_report = [] + pii_check_passed = True + + for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): + try: + print(f"Running {env_name} PII Annotation check and report") + print("-" * 45) + + run_output_file = str(output_file).format(env_name.lower()) + os.makedirs(report_dir, exist_ok=True) + + # Prepare the environment for the command + env = { + **os.environ, # Include the current environment variables + "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment + } + + command = [ + "code_annotations", + "django_find_annotations", + "--config_file", ".pii_annotations.yml", + "--report_path", str(report_dir), + "--app_name", env_name.lower() + ] + + # Run the command without shell=True + with open(run_output_file, 'w') as report_file: + subprocess.run( + command, + env=env, # Pass the environment with DJANGO_SETTINGS_MODULE + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) + + # Extract results + uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) + env_report.append(( + uncovered_model_count, + full_log, + )) + + except BuildFailure as error_message: + fail_quality(pii_report_name, f'FAILURE: {error_message}') + + # Update pii_check_passed based on the result of the current environment + if not pii_check_passed_env: + pii_check_passed = False + + # If the PII check failed in any environment, fail the task + if not pii_check_passed: + fail_quality('pii', full_log) + else: + print("Successfully ran pii_check") + + +def check_keywords(): + """ + Check Django model fields for names that conflict with a list of reserved keywords + """ + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + report_path = REPORT_DIR / 'reserved_keywords' + report_path.mkdir(parents=True, exist_ok=True) + + overall_status = True + for env_name, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: + report_file_path = report_path / f"{env_name}_reserved_keyword_report.csv" + override_file = os.path.join(REPO_ROOT, "db_keyword_overrides.yml") + try: + env = { + **os.environ, # Include the current environment variables + "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment + } + command = [ + "python", "manage.py", env_name, "check_reserved_keywords", + "--override_file", str(override_file), + "--report_path", str(report_path), + "--report_file", str(report_file_path) + ] + with open(report_file_path, 'w') as report_file: + subprocess.run( + command, + env=env, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) + except BuildFailure: + overall_status = False + if not overall_status: fail_quality( - 'stylelint', - "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( - count=num_violations, - violations_limit=violations_limit, + 'keywords', + 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( + report_path ) ) else: - write_junit_xml('stylelint') + print("successfully run check_keywords") -@task -@needs('pavelib.prereqs.install_python_prereqs') -@cmdopts([ - ("thresholds=", "t", "json containing limit for number of acceptable violations per rule"), -]) -@timed -def run_xsslint(options): +def _get_xsslint_counts(filename): + """ + This returns a dict of violations from the xsslint report. + + Arguments: + filename: The name of the xsslint report. + + Returns: + A dict containing the following: + rules: A dict containing the count for each rule as follows: + violation-rule-id: N, where N is the number of violations + total: M, where M is the number of total violations + + """ + report_contents = _get_report_contents(filename, 'xsslint') + rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) + total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) + violations = {'rules': {}} + for violation_match in rule_count_regex.finditer(report_contents): + try: + violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) + except ValueError: + violations['rules'][violation_match.group('rule_id')] = None + try: + violations['total'] = int(total_count_regex.search(report_contents).group('count')) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + violations['total'] = None + return violations + + +def run_xsslint(): """ Runs xsslint/xss_linter.py on the codebase """ - thresholds_option = getattr(options, 'thresholds', '{}') try: - violation_thresholds = json.loads(thresholds_option) + thresholds_option = 'scripts/xsslint_thresholds.json' + # Read the JSON file + with open(thresholds_option, 'r') as file: + violation_thresholds = json.load(file) + except ValueError: violation_thresholds = None if isinstance(violation_thresholds, dict) is False or \ @@ -280,20 +444,25 @@ def run_xsslint(options): ) xsslint_script = "xss_linter.py" - xsslint_report_dir = (Env.REPORT_DIR / "xsslint") + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + xsslint_report_dir = (REPORT_DIR / "xsslint") xsslint_report = xsslint_report_dir / "xsslint.report" _prepare_report_dir(xsslint_report_dir) - sh( - "{repo_root}/scripts/xsslint/{xsslint_script} --rule-totals --config={cfg_module} >> {xsslint_report}".format( - repo_root=Env.REPO_ROOT, - xsslint_script=xsslint_script, - xsslint_report=xsslint_report, - cfg_module='scripts.xsslint_config' - ), - ignore_error=True - ) - + command = [ + f"{REPO_ROOT}/scripts/xsslint/{xsslint_script}", + "--rule-totals", + "--config=scripts.xsslint_config" + ] + with open(xsslint_report, 'w') as report_file: + subprocess.run( + command, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) xsslint_counts = _get_xsslint_counts(xsslint_report) try: @@ -316,14 +485,7 @@ def run_xsslint(options): ) ) - metrics_report = (Env.METRICS_DIR / "xsslint") - # Record the metric - _write_metric(metrics_str, metrics_report) - # Print number of violations to log. - sh(f"cat {metrics_report}", ignore_error=True) - error_message = "" - # Test total violations against threshold. if 'total' in list(violation_thresholds.keys()): if violation_thresholds['total'] < xsslint_counts['total']: @@ -359,244 +521,27 @@ def run_xsslint(options): ) ) else: - write_junit_xml('xsslint') - - -def _write_metric(metric, filename): - """ - Write a given metric to a given file - Used for things like reports/metrics/eslint, which will simply tell you the number of - eslint violations found - """ - Env.METRICS_DIR.makedirs_p() - - with open(filename, "w") as metric_file: - metric_file.write(str(metric)) - - -def _prepare_report_dir(dir_name): - """ - Sets a given directory to a created, but empty state - """ - dir_name.rmtree_p() - dir_name.mkdir_p() - - -def _get_report_contents(filename, report_name, last_line_only=False): - """ - Returns the contents of the given file. Use last_line_only to only return - the last line, which can be used for getting output from quality output - files. - - Arguments: - last_line_only: True to return the last line only, False to return a - string with full contents. - - Returns: - String containing full contents of the report, or the last line. - - """ - if os.path.isfile(filename): - with open(filename) as report_file: - if last_line_only: - lines = report_file.readlines() - for line in reversed(lines): - if line != '\n': - return line - return None - else: - return report_file.read() - else: - file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" - fail_quality(report_name, file_not_found_message) + print("successfully run xsslint") -def _get_count_from_last_line(filename, file_type): - """ - This will return the number in the last line of a file. - It is returning only the value (as a floating number). - """ - report_contents = _get_report_contents(filename, file_type, last_line_only=True) +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("command", choices=['eslint', 'stylelint', + 'xsslint', 'pii_check', 'check_keywords']) - if report_contents is None: - return 0 + argument = parser.parse_args() - last_line = report_contents.strip() - # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" - regex = r'^\d+' + if argument.command == 'eslint': + run_eslint() - try: - return float(re.search(regex, last_line).group(0)) - # An AttributeError will occur if the regex finds no matches. - # A ValueError will occur if the returned regex cannot be cast as a float. - except (AttributeError, ValueError): - return None + elif argument.command == 'stylelint': + run_stylelint() + elif argument.command == 'xsslint': + run_xsslint() -def _get_xsslint_counts(filename): - """ - This returns a dict of violations from the xsslint report. - - Arguments: - filename: The name of the xsslint report. - - Returns: - A dict containing the following: - rules: A dict containing the count for each rule as follows: - violation-rule-id: N, where N is the number of violations - total: M, where M is the number of total violations + elif argument.command == 'pii_check': + run_pii_check() - """ - report_contents = _get_report_contents(filename, 'xsslint') - rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) - total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) - violations = {'rules': {}} - for violation_match in rule_count_regex.finditer(report_contents): - try: - violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) - except ValueError: - violations['rules'][violation_match.group('rule_id')] = None - try: - violations['total'] = int(total_count_regex.search(report_contents).group('count')) - # An AttributeError will occur if the regex finds no matches. - # A ValueError will occur if the returned regex cannot be cast as a float. - except (AttributeError, ValueError): - violations['total'] = None - return violations - - -def _extract_missing_pii_annotations(filename): - """ - Returns the number of uncovered models from the stdout report of django_find_annotations. - - Arguments: - filename: Filename where stdout of django_find_annotations was captured. - - Returns: - three-tuple containing: - 1. The number of uncovered models, - 2. A bool indicating whether the coverage is still below the threshold, and - 3. The full report as a string. - """ - uncovered_models = 0 - pii_check_passed = True - if os.path.isfile(filename): - with open(filename) as report_file: - lines = report_file.readlines() - - # Find the count of uncovered models. - uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') - for line in lines: - uncovered_match = uncovered_regex.match(line) - if uncovered_match: - uncovered_models = int(uncovered_match.groups()[0]) - break - - # Find a message which suggests the check failed. - failure_regex = re.compile(r'^Coverage threshold not met!') - for line in lines: - failure_match = failure_regex.match(line) - if failure_match: - pii_check_passed = False - break - - # Each line in lines already contains a newline. - full_log = ''.join(lines) - else: - fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') - - return (uncovered_models, pii_check_passed, full_log) - - -@task -@needs('pavelib.prereqs.install_python_prereqs') -@cmdopts([ - ("report-dir=", "r", "Directory in which to put PII reports"), -]) -@timed -def run_pii_check(options): - """ - Guarantee that all Django models are PII-annotated. - """ - pii_report_name = 'pii' - default_report_dir = (Env.REPORT_DIR / pii_report_name) - report_dir = getattr(options, 'report_dir', default_report_dir) - output_file = os.path.join(report_dir, 'pii_check_{}.report') - env_report = [] - pii_check_passed = True - for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): - try: - print() - print(f"Running {env_name} PII Annotation check and report") - print("-" * 45) - run_output_file = str(output_file).format(env_name.lower()) - sh( - "mkdir -p {} && " # lint-amnesty, pylint: disable=duplicate-string-formatting-argument - "export DJANGO_SETTINGS_MODULE={}; " - "code_annotations django_find_annotations " - "--config_file .pii_annotations.yml --report_path {} --app_name {} " - "--lint --report --coverage | tee {}".format( - report_dir, env_settings_file, report_dir, env_name.lower(), run_output_file - ) - ) - uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) - env_report.append(( - uncovered_model_count, - full_log, - )) - - except BuildFailure as error_message: - fail_quality(pii_report_name, f'FAILURE: {error_message}') - - if not pii_check_passed_env: - pii_check_passed = False - - # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count. - uncovered_count, full_log = max(env_report, key=lambda r: r[0]) - - # Write metric file. - if uncovered_count is None: - uncovered_count = 0 - metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n" - _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name)) - - # Finally, fail the paver task if code_annotations suggests that the check failed. - if not pii_check_passed: - fail_quality('pii', full_log) - - -@task -@needs('pavelib.prereqs.install_python_prereqs') -@timed -def check_keywords(): - """ - Check Django model fields for names that conflict with a list of reserved keywords - """ - report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords') - sh(f"mkdir -p {report_path}") - - overall_status = True - for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: - report_file = f"{env}_reserved_keyword_report.csv" - override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") - try: - sh( - "export DJANGO_SETTINGS_MODULE={settings_file}; " - "python manage.py {app} check_reserved_keywords " - "--override_file {override_file} " - "--report_path {report_path} " - "--report_file {report_file}".format( - settings_file=env_settings_file, app=env, override_file=override_file, - report_path=report_path, report_file=report_file - ) - ) - except BuildFailure: - overall_status = False - - if not overall_status: - fail_quality( - 'keywords', - 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( - report_path - ) - ) + elif argument.command == 'check_keywords': + check_keywords() From f3e619e8faea9bc360051a9addaf1f1d25882027 Mon Sep 17 00:00:00 2001 From: "Kyle D. McCormick" Date: Mon, 28 Oct 2024 10:46:28 -0400 Subject: [PATCH 2/3] temp: put back paver checks for testing --- .github/workflows/js-tests-paver.yml | 84 +++ .github/workflows/quality-checks-paver.yml | 82 +++ pavelib/__init__.py | 2 +- pavelib/js_test.py | 143 +++++ pavelib/paver_tests/conftest.py | 22 + pavelib/paver_tests/test_eslint.py | 54 ++ pavelib/paver_tests/test_js_test.py | 148 +++++ pavelib/paver_tests/test_paver_quality.py | 156 ++++++ pavelib/paver_tests/test_pii_check.py | 79 +++ pavelib/paver_tests/test_stylelint.py | 36 ++ pavelib/paver_tests/test_timer.py | 190 +++++++ pavelib/paver_tests/test_xsslint.py | 120 ++++ pavelib/quality.py | 602 +++++++++++++++++++++ pavelib/utils/test/suites/__init__.py | 5 + pavelib/utils/test/suites/js_suite.py | 109 ++++ pavelib/utils/test/suites/suite.py | 149 +++++ pavelib/utils/test/utils.py | 91 ++++ scripts/generic-ci-tests.sh | 122 +++++ 18 files changed, 2193 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/js-tests-paver.yml create mode 100644 .github/workflows/quality-checks-paver.yml create mode 100644 pavelib/js_test.py create mode 100644 pavelib/paver_tests/conftest.py create mode 100644 pavelib/paver_tests/test_eslint.py create mode 100644 pavelib/paver_tests/test_js_test.py create mode 100644 pavelib/paver_tests/test_paver_quality.py create mode 100644 pavelib/paver_tests/test_pii_check.py create mode 100644 pavelib/paver_tests/test_stylelint.py create mode 100644 pavelib/paver_tests/test_timer.py create mode 100644 pavelib/paver_tests/test_xsslint.py create mode 100644 pavelib/quality.py create mode 100644 pavelib/utils/test/suites/__init__.py create mode 100644 pavelib/utils/test/suites/js_suite.py create mode 100644 pavelib/utils/test/suites/suite.py create mode 100644 pavelib/utils/test/utils.py create mode 100755 scripts/generic-ci-tests.sh diff --git a/.github/workflows/js-tests-paver.yml b/.github/workflows/js-tests-paver.yml new file mode 100644 index 000000000000..566063fdfd22 --- /dev/null +++ b/.github/workflows/js-tests-paver.yml @@ -0,0 +1,84 @@ +name: Javascript tests PAVER + +on: + pull_request: + push: + branches: + - master + +jobs: + run_tests: + name: JS + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + node-version: [18, 20] + python-version: + - "3.11" + + steps: + - uses: actions/checkout@v4 + - name: Fetch master to compare coverage + run: git fetch --depth=1 origin master + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + + - name: Setup npm + run: npm i -g npm@10.5.x + + - name: Install Firefox 123.0 + run: | + sudo apt-get purge firefox + wget "https://ftp.mozilla.org/pub/firefox/releases/123.0/linux-x86_64/en-US/firefox-123.0.tar.bz2" + tar -xjf firefox-123.0.tar.bz2 + sudo mv firefox /opt/firefox + sudo ln -s /opt/firefox/firefox /usr/bin/firefox + + - name: Install Required System Packages + run: sudo apt-get update && sudo apt-get install libxmlsec1-dev ubuntu-restricted-extras xvfb + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Get pip cache dir + id: pip-cache-dir + run: | + echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + + - name: Cache pip dependencies + id: cache-dependencies + uses: actions/cache@v4 + with: + path: ${{ steps.pip-cache-dir.outputs.dir }} + key: ${{ runner.os }}-pip-${{ hashFiles('requirements/edx/base.txt') }} + restore-keys: ${{ runner.os }}-pip- + + - name: Install Required Python Dependencies + run: | + make base-requirements + + - uses: c-hive/gha-npm-cache@v1 + - name: Run JS Tests + env: + TEST_SUITE: js-unit + SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh + run: | + npm install -g jest + xvfb-run --auto-servernum ./scripts/all-tests.sh + + - name: Save Job Artifacts + uses: actions/upload-artifact@v4 + with: + name: Build-Artifacts + path: | + reports/**/* + test_root/log/*.png + test_root/log/*.log + **/TEST-*.xml + overwrite: true diff --git a/.github/workflows/quality-checks-paver.yml b/.github/workflows/quality-checks-paver.yml new file mode 100644 index 000000000000..beb9fea8007f --- /dev/null +++ b/.github/workflows/quality-checks-paver.yml @@ -0,0 +1,82 @@ +name: Quality checks PAVER + +on: + pull_request: + push: + branches: + - master + - open-release/lilac.master + +jobs: + run_tests: + name: Quality Others + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-22.04] + python-version: + - "3.11" + node-version: [20] + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Fetch base branch for comparison + run: git fetch --depth=1 origin ${{ github.base_ref }} + + - name: Install Required System Packages + run: sudo apt-get update && sudo apt-get install libxmlsec1-dev + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + + - name: Setup npm + run: npm i -g npm@8.5.x + + - name: Get pip cache dir + id: pip-cache-dir + run: | + echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + + - name: Cache pip dependencies + id: cache-dependencies + uses: actions/cache@v4 + with: + path: ${{ steps.pip-cache-dir.outputs.dir }} + key: ${{ runner.os }}-pip-${{ hashFiles('requirements/edx/testing.txt') }} + restore-keys: ${{ runner.os }}-pip- + + - name: Install Required Python Dependencies + env: + PIP_SRC: ${{ runner.temp }} + run: | + make test-requirements + + - name: Run Quality Tests + env: + TEST_SUITE: quality + SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh + PIP_SRC: ${{ runner.temp }} + TARGET_BRANCH: ${{ github.base_ref }} + run: | + ./scripts/all-tests.sh + + - name: Save Job Artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: Build-Artifacts + path: | + **/reports/**/* + test_root/log/**/*.log + *.log + overwrite: true diff --git a/pavelib/__init__.py b/pavelib/__init__.py index 24f05618bdd7..875068166ff5 100644 --- a/pavelib/__init__.py +++ b/pavelib/__init__.py @@ -3,4 +3,4 @@ """ -from . import assets +from . import assets, js_test, prereqs, quality diff --git a/pavelib/js_test.py b/pavelib/js_test.py new file mode 100644 index 000000000000..fb9c213499ac --- /dev/null +++ b/pavelib/js_test.py @@ -0,0 +1,143 @@ +""" +Javascript test tasks +""" + + +import os +import re +import sys + +from paver.easy import cmdopts, needs, sh, task + +from pavelib.utils.envs import Env +from pavelib.utils.test.suites import JestSnapshotTestSuite, JsTestSuite +from pavelib.utils.timer import timed + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + +__test__ = False # do not collect + + +@task +@needs( + 'pavelib.prereqs.install_node_prereqs', + 'pavelib.utils.test.utils.clean_reports_dir', +) +@cmdopts([ + ("suite=", "s", "Test suite to run"), + ("mode=", "m", "dev or run"), + ("coverage", "c", "Run test under coverage"), + ("port=", "p", "Port to run test server on (dev mode only)"), + ('skip-clean', 'C', 'skip cleaning repository before running tests'), + ('skip_clean', None, 'deprecated in favor of skip-clean'), +], share_with=["pavelib.utils.tests.utils.clean_reports_dir"]) +@timed +def test_js(options): + """ + Run the JavaScript tests + """ + mode = getattr(options, 'mode', 'run') + port = None + skip_clean = getattr(options, 'skip_clean', False) + + if mode == 'run': + suite = getattr(options, 'suite', 'all') + coverage = getattr(options, 'coverage', False) + elif mode == 'dev': + suite = getattr(options, 'suite', None) + coverage = False + port = getattr(options, 'port', None) + else: + sys.stderr.write("Invalid mode. Please choose 'dev' or 'run'.") + return + + if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): + sys.stderr.write( + "Unknown test suite. Please choose from ({suites})\n".format( + suites=", ".join(Env.JS_TEST_ID_KEYS) + ) + ) + return + + if suite != 'jest-snapshot': + test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) + test_suite.run() + + if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in + test_suite = JestSnapshotTestSuite('jest') + test_suite.run() + + +@task +@cmdopts([ + ("suite=", "s", "Test suite to run"), + ("coverage", "c", "Run test under coverage"), +]) +@timed +def test_js_run(options): + """ + Run the JavaScript tests and print results to the console + """ + options.mode = 'run' + test_js(options) + + +@task +@cmdopts([ + ("suite=", "s", "Test suite to run"), + ("port=", "p", "Port to run test server on"), +]) +@timed +def test_js_dev(options): + """ + Run the JavaScript tests in your default browsers + """ + options.mode = 'dev' + test_js(options) + + +@task +@needs('pavelib.prereqs.install_coverage_prereqs') +@cmdopts([ + ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), +], share_with=['coverage']) +@timed +def diff_coverage(options): + """ + Build the diff coverage reports + """ + compare_branch = options.get('compare_branch', 'origin/master') + + # Find all coverage XML files (both Python and JavaScript) + xml_reports = [] + + for filepath in Env.REPORT_DIR.walk(): + if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): + xml_reports.append(filepath) + + if not xml_reports: + err_msg = colorize( + 'red', + "No coverage info found. Run `paver test` before running " + "`paver coverage`.\n" + ) + sys.stderr.write(err_msg) + else: + xml_report_str = ' '.join(xml_reports) + diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + + # Generate the diff coverage reports (HTML and console) + # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 + sh( + "diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} " + "--html-report {diff_html_path}".format( + xml_report_str=xml_report_str, + compare_branch=compare_branch, + diff_html_path=diff_html_path, + ) + ) + + print("\n") diff --git a/pavelib/paver_tests/conftest.py b/pavelib/paver_tests/conftest.py new file mode 100644 index 000000000000..214a35e3fe85 --- /dev/null +++ b/pavelib/paver_tests/conftest.py @@ -0,0 +1,22 @@ +""" +Pytest fixtures for the pavelib unit tests. +""" + + +import os +from shutil import rmtree + +import pytest + +from pavelib.utils.envs import Env + + +@pytest.fixture(autouse=True, scope='session') +def delete_quality_junit_xml(): + """ + Delete the JUnit XML results files for quality check tasks run during the + unit tests. + """ + yield + if os.path.exists(Env.QUALITY_DIR): + rmtree(Env.QUALITY_DIR, ignore_errors=True) diff --git a/pavelib/paver_tests/test_eslint.py b/pavelib/paver_tests/test_eslint.py new file mode 100644 index 000000000000..5802d7d0d21b --- /dev/null +++ b/pavelib/paver_tests/test_eslint.py @@ -0,0 +1,54 @@ +""" +Tests for Paver's Stylelint tasks. +""" + + +import unittest +from unittest.mock import patch + +import pytest +from paver.easy import BuildFailure, call_task + +import pavelib.quality + + +class TestPaverESLint(unittest.TestCase): + """ + For testing run_eslint + """ + + def setUp(self): + super().setUp() + + # Mock the paver @needs decorator + self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start() + self._mock_paver_needs.return_value = 0 + + # Mock shell commands + patcher = patch('pavelib.quality.sh') + self._mock_paver_sh = patcher.start() + + # Cleanup mocks + self.addCleanup(patcher.stop) + self.addCleanup(self._mock_paver_needs.stop) + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_count_from_last_line') + def test_eslint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument + """ + run_eslint encounters an error parsing the eslint output log + """ + mock_count.return_value = None + with pytest.raises(BuildFailure): + call_task('pavelib.quality.run_eslint', args=['']) + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_count_from_last_line') + def test_eslint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument + """ + eslint finds violations, but a limit was not set + """ + mock_count.return_value = 1 + pavelib.quality.run_eslint("") diff --git a/pavelib/paver_tests/test_js_test.py b/pavelib/paver_tests/test_js_test.py new file mode 100644 index 000000000000..4b165a156674 --- /dev/null +++ b/pavelib/paver_tests/test_js_test.py @@ -0,0 +1,148 @@ +"""Unit tests for the Paver JavaScript testing tasks.""" + +from unittest.mock import patch + +import ddt +from paver.easy import call_task + +import pavelib.js_test +from pavelib.utils.envs import Env + +from .utils import PaverTestCase + + +@ddt.ddt +class TestPaverJavaScriptTestTasks(PaverTestCase): + """ + Test the Paver JavaScript testing tasks. + """ + + EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = 'find {platform_root}/reports/javascript -type f -delete' + EXPECTED_KARMA_OPTIONS = ( + "{config_file} " + "--single-run={single_run} " + "--capture-timeout=60000 " + "--junitreportpath=" + "{platform_root}/reports/javascript/javascript_xunit-{suite}.xml " + "--browsers={browser}" + ) + EXPECTED_COVERAGE_OPTIONS = ( + ' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml' + ) + + EXPECTED_COMMANDS = [ + "make report_dir", + 'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads', + "find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;", + 'rm -rf test_root/log/auto_screenshots/*', + "rm -rf /tmp/mako_[cl]ms", + ] + + def setUp(self): + super().setUp() + + # Mock the paver @needs decorator + self._mock_paver_needs = patch.object(pavelib.js_test.test_js, 'needs').start() + self._mock_paver_needs.return_value = 0 + + # Cleanup mocks + self.addCleanup(self._mock_paver_needs.stop) + + @ddt.data( + [""], + ["--coverage"], + ["--suite=lms"], + ["--suite=lms --coverage"], + ) + @ddt.unpack + def test_test_js_run(self, options_string): + """ + Test the "test_js_run" task. + """ + options = self.parse_options_string(options_string) + self.reset_task_messages() + call_task("pavelib.js_test.test_js_run", options=options) + self.verify_messages(options=options, dev_mode=False) + + @ddt.data( + [""], + ["--port=9999"], + ["--suite=lms"], + ["--suite=lms --port=9999"], + ) + @ddt.unpack + def test_test_js_dev(self, options_string): + """ + Test the "test_js_run" task. + """ + options = self.parse_options_string(options_string) + self.reset_task_messages() + call_task("pavelib.js_test.test_js_dev", options=options) + self.verify_messages(options=options, dev_mode=True) + + def parse_options_string(self, options_string): + """ + Parse a string containing the options for a test run + """ + parameters = options_string.split(" ") + suite = "all" + if "--system=lms" in parameters: + suite = "lms" + elif "--system=common" in parameters: + suite = "common" + coverage = "--coverage" in parameters + port = None + if "--port=9999" in parameters: + port = 9999 + return { + "suite": suite, + "coverage": coverage, + "port": port, + } + + def verify_messages(self, options, dev_mode): + """ + Verify that the messages generated when running tests are as expected + for the specified options and dev_mode. + """ + is_coverage = options['coverage'] + port = options['port'] + expected_messages = [] + suites = Env.JS_TEST_ID_KEYS if options['suite'] == 'all' else [options['suite']] + + expected_messages.extend(self.EXPECTED_COMMANDS) + if not dev_mode and not is_coverage: + expected_messages.append(self.EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND.format( + platform_root=self.platform_root + )) + + command_template = ( + 'node --max_old_space_size=4096 node_modules/.bin/karma start {options}' + ) + + for suite in suites: + # Karma test command + if suite != 'jest-snapshot': + karma_config_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(suite)] + expected_test_tool_command = command_template.format( + options=self.EXPECTED_KARMA_OPTIONS.format( + config_file=karma_config_file, + single_run='false' if dev_mode else 'true', + suite=suite, + platform_root=self.platform_root, + browser=Env.KARMA_BROWSER, + ), + ) + if is_coverage: + expected_test_tool_command += self.EXPECTED_COVERAGE_OPTIONS.format( + platform_root=self.platform_root, + suite=suite + ) + if port: + expected_test_tool_command += f" --port={port}" + else: + expected_test_tool_command = 'jest' + + expected_messages.append(expected_test_tool_command) + + assert self.task_messages == expected_messages diff --git a/pavelib/paver_tests/test_paver_quality.py b/pavelib/paver_tests/test_paver_quality.py new file mode 100644 index 000000000000..36d6dd59e172 --- /dev/null +++ b/pavelib/paver_tests/test_paver_quality.py @@ -0,0 +1,156 @@ +""" # lint-amnesty, pylint: disable=django-not-configured +Tests for paver quality tasks +""" + + +import os +import shutil # lint-amnesty, pylint: disable=unused-import +import tempfile +import textwrap +import unittest +from unittest.mock import MagicMock, mock_open, patch # lint-amnesty, pylint: disable=unused-import + +import pytest # lint-amnesty, pylint: disable=unused-import +from ddt import data, ddt, file_data, unpack # lint-amnesty, pylint: disable=unused-import +from path import Path as path +from paver.easy import BuildFailure # lint-amnesty, pylint: disable=unused-import + +import pavelib.quality +from pavelib.paver_tests.utils import PaverTestCase, fail_on_eslint # lint-amnesty, pylint: disable=unused-import + +OPEN_BUILTIN = 'builtins.open' + + +@ddt +class TestPaverQualityViolations(unittest.TestCase): + """ + For testing the paver violations-counting tasks + """ + def setUp(self): + super().setUp() + self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with + self.f.close() + self.addCleanup(os.remove, self.f.name) + + def test_pep8_parser(self): + with open(self.f.name, 'w') as f: + f.write("hello\nhithere") + num = len(pavelib.quality._pep8_violations(f.name)) # pylint: disable=protected-access + assert num == 2 + + +class TestPaverReportViolationsCounts(unittest.TestCase): + """ + For testing utility functions for getting counts from reports for + run_eslint and run_xsslint. + """ + + def setUp(self): + super().setUp() + + # Temporary file infrastructure + self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with + self.f.close() + + # Cleanup various mocks and tempfiles + self.addCleanup(os.remove, self.f.name) + + def test_get_eslint_violations_count(self): + with open(self.f.name, 'w') as f: + f.write("3000 violations found") + actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access + assert actual_count == 3000 + + def test_get_eslint_violations_no_number_found(self): + with open(self.f.name, 'w') as f: + f.write("Not expected string regex") + actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access + assert actual_count is None + + def test_get_eslint_violations_count_truncated_report(self): + """ + A truncated report (i.e. last line is just a violation) + """ + with open(self.f.name, 'w') as f: + f.write("foo/bar/js/fizzbuzz.js: line 45, col 59, Missing semicolon.") + actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access + assert actual_count is None + + def test_generic_value(self): + """ + Default behavior is to look for an integer appearing at head of line + """ + with open(self.f.name, 'w') as f: + f.write("5.777 good to see you") + actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access + assert actual_count == 5 + + def test_generic_value_none_found(self): + """ + Default behavior is to look for an integer appearing at head of line + """ + with open(self.f.name, 'w') as f: + f.write("hello 5.777 good to see you") + actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access + assert actual_count is None + + def test_get_xsslint_counts_happy(self): + """ + Test happy path getting violation counts from xsslint report. + """ + report = textwrap.dedent(""" + test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos); + + javascript-concat-html: 310 violations + javascript-escape: 7 violations + + 2608 violations total + """) + with open(self.f.name, 'w') as f: + f.write(report) + counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access + self.assertDictEqual(counts, { + 'rules': { + 'javascript-concat-html': 310, + 'javascript-escape': 7, + }, + 'total': 2608, + }) + + def test_get_xsslint_counts_bad_counts(self): + """ + Test getting violation counts from truncated and malformed xsslint + report. + """ + report = textwrap.dedent(""" + javascript-concat-html: violations + """) + with open(self.f.name, 'w') as f: + f.write(report) + counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access + self.assertDictEqual(counts, { + 'rules': {}, + 'total': None, + }) + + +class TestPrepareReportDir(unittest.TestCase): + """ + Tests the report directory preparation + """ + + def setUp(self): + super().setUp() + self.test_dir = tempfile.mkdtemp() + self.test_file = tempfile.NamedTemporaryFile(delete=False, dir=self.test_dir) # lint-amnesty, pylint: disable=consider-using-with + self.addCleanup(os.removedirs, self.test_dir) + + def test_report_dir_with_files(self): + assert os.path.exists(self.test_file.name) + pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access + assert not os.path.exists(self.test_file.name) + + def test_report_dir_without_files(self): + os.remove(self.test_file.name) + pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access + assert os.listdir(path(self.test_dir)) == [] diff --git a/pavelib/paver_tests/test_pii_check.py b/pavelib/paver_tests/test_pii_check.py new file mode 100644 index 000000000000..d034360acde0 --- /dev/null +++ b/pavelib/paver_tests/test_pii_check.py @@ -0,0 +1,79 @@ +""" +Tests for Paver's PII checker task. +""" + +import shutil +import tempfile +import unittest +from unittest.mock import patch + +from path import Path as path +from paver.easy import call_task, BuildFailure + +import pavelib.quality +from pavelib.utils.envs import Env + + +class TestPaverPIICheck(unittest.TestCase): + """ + For testing the paver run_pii_check task + """ + def setUp(self): + super().setUp() + self.report_dir = path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, self.report_dir) + + @patch.object(pavelib.quality.run_pii_check, 'needs') + @patch('pavelib.quality.sh') + def test_pii_check_report_dir_override(self, mock_paver_sh, mock_needs): + """ + run_pii_check succeeds with proper report dir + """ + # Make the expected stdout files. + cms_stdout_report = self.report_dir / 'pii_check_cms.report' + cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n']) + lms_stdout_report = self.report_dir / 'pii_check_lms.report' + lms_stdout_report.write_lines(['Coverage found 66 uncovered models:\n']) + + mock_needs.return_value = 0 + call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)}) + mock_calls = [str(call) for call in mock_paver_sh.mock_calls] + assert len(mock_calls) == 2 + assert any('lms.envs.test' in call for call in mock_calls) + assert any('cms.envs.test' in call for call in mock_calls) + assert all(str(self.report_dir) in call for call in mock_calls) + metrics_file = Env.METRICS_DIR / 'pii' + assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n' + + @patch.object(pavelib.quality.run_pii_check, 'needs') + @patch('pavelib.quality.sh') + def test_pii_check_failed(self, mock_paver_sh, mock_needs): + """ + run_pii_check fails due to crossing the threshold. + """ + # Make the expected stdout files. + cms_stdout_report = self.report_dir / 'pii_check_cms.report' + cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n']) + lms_stdout_report = self.report_dir / 'pii_check_lms.report' + lms_stdout_report.write_lines([ + 'Coverage found 66 uncovered models:', + 'Coverage threshold not met! Needed 100.0, actually 95.0!', + ]) + + mock_needs.return_value = 0 + try: + with self.assertRaises(BuildFailure): + call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)}) + except SystemExit: + # Sometimes the BuildFailure raises a SystemExit, sometimes it doesn't, not sure why. + # As a hack, we just wrap it in try-except. + # This is not good, but these tests weren't even running for years, and we're removing this whole test + # suite soon anyway. + pass + mock_calls = [str(call) for call in mock_paver_sh.mock_calls] + assert len(mock_calls) == 2 + assert any('lms.envs.test' in call for call in mock_calls) + assert any('cms.envs.test' in call for call in mock_calls) + assert all(str(self.report_dir) in call for call in mock_calls) + metrics_file = Env.METRICS_DIR / 'pii' + assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n' diff --git a/pavelib/paver_tests/test_stylelint.py b/pavelib/paver_tests/test_stylelint.py new file mode 100644 index 000000000000..3e1c79c93f28 --- /dev/null +++ b/pavelib/paver_tests/test_stylelint.py @@ -0,0 +1,36 @@ +""" +Tests for Paver's Stylelint tasks. +""" + +from unittest.mock import MagicMock, patch + +import pytest +import ddt +from paver.easy import call_task + +from .utils import PaverTestCase + + +@ddt.ddt +class TestPaverStylelint(PaverTestCase): + """ + Tests for Paver's Stylelint tasks. + """ + @ddt.data( + [False], + [True], + ) + @ddt.unpack + def test_run_stylelint(self, should_pass): + """ + Verify that the quality task fails with Stylelint violations. + """ + if should_pass: + _mock_stylelint_violations = MagicMock(return_value=0) + with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations): + call_task('pavelib.quality.run_stylelint') + else: + _mock_stylelint_violations = MagicMock(return_value=100) + with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations): + with pytest.raises(SystemExit): + call_task('pavelib.quality.run_stylelint') diff --git a/pavelib/paver_tests/test_timer.py b/pavelib/paver_tests/test_timer.py new file mode 100644 index 000000000000..5ccbf74abcf9 --- /dev/null +++ b/pavelib/paver_tests/test_timer.py @@ -0,0 +1,190 @@ +""" +Tests of the pavelib.utils.timer module. +""" + + +from datetime import datetime, timedelta +from unittest import TestCase + +from unittest.mock import MagicMock, patch + +from pavelib.utils import timer + + +@timer.timed +def identity(*args, **kwargs): + """ + An identity function used as a default task to test the timing of. + """ + return args, kwargs + + +MOCK_OPEN = MagicMock(spec=open) + + +@patch.dict('pavelib.utils.timer.__builtins__', open=MOCK_OPEN) +class TimedDecoratorTests(TestCase): + """ + Tests of the pavelib.utils.timer:timed decorator. + """ + def setUp(self): + super().setUp() + + patch_dumps = patch.object(timer.json, 'dump', autospec=True) + self.mock_dump = patch_dumps.start() + self.addCleanup(patch_dumps.stop) + + patch_makedirs = patch.object(timer.os, 'makedirs', autospec=True) + self.mock_makedirs = patch_makedirs.start() + self.addCleanup(patch_makedirs.stop) + + patch_datetime = patch.object(timer, 'datetime', autospec=True) + self.mock_datetime = patch_datetime.start() + self.addCleanup(patch_datetime.stop) + + patch_exists = patch.object(timer, 'exists', autospec=True) + self.mock_exists = patch_exists.start() + self.addCleanup(patch_exists.stop) + + MOCK_OPEN.reset_mock() + + def get_log_messages(self, task=identity, args=None, kwargs=None, raises=None): + """ + Return all timing messages recorded during the execution of ``task``. + """ + if args is None: + args = [] + if kwargs is None: + kwargs = {} + + if raises is None: + task(*args, **kwargs) + else: + self.assertRaises(raises, task, *args, **kwargs) + + return [ + call[0][0] # log_message + for call in self.mock_dump.call_args_list + ] + + @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') + def test_times(self): + start = datetime(2016, 7, 20, 10, 56, 19) + end = start + timedelta(seconds=35.6) + + self.mock_datetime.utcnow.side_effect = [start, end] + + messages = self.get_log_messages() + assert len(messages) == 1 + + # I'm not using assertDictContainsSubset because it is + # removed in python 3.2 (because the arguments were backwards) + # and it wasn't ever replaced by anything *headdesk* + assert 'duration' in messages[0] + assert 35.6 == messages[0]['duration'] + + assert 'started_at' in messages[0] + assert start.isoformat(' ') == messages[0]['started_at'] + + assert 'ended_at' in messages[0] + assert end.isoformat(' ') == messages[0]['ended_at'] + + @patch.object(timer, 'PAVER_TIMER_LOG', None) + def test_no_logs(self): + messages = self.get_log_messages() + assert len(messages) == 0 + + @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') + def test_arguments(self): + messages = self.get_log_messages(args=(1, 'foo'), kwargs=dict(bar='baz')) + assert len(messages) == 1 + + # I'm not using assertDictContainsSubset because it is + # removed in python 3.2 (because the arguments were backwards) + # and it wasn't ever replaced by anything *headdesk* + assert 'args' in messages[0] + assert [repr(1), repr('foo')] == messages[0]['args'] + assert 'kwargs' in messages[0] + assert {'bar': repr('baz')} == messages[0]['kwargs'] + + @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') + def test_task_name(self): + messages = self.get_log_messages() + assert len(messages) == 1 + + # I'm not using assertDictContainsSubset because it is + # removed in python 3.2 (because the arguments were backwards) + # and it wasn't ever replaced by anything *headdesk* + assert 'task' in messages[0] + assert 'pavelib.paver_tests.test_timer.identity' == messages[0]['task'] + + @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') + def test_exceptions(self): + + @timer.timed + def raises(): + """ + A task used for testing exception handling of the timed decorator. + """ + raise Exception('The Message!') + + messages = self.get_log_messages(task=raises, raises=Exception) + assert len(messages) == 1 + + # I'm not using assertDictContainsSubset because it is + # removed in python 3.2 (because the arguments were backwards) + # and it wasn't ever replaced by anything *headdesk* + assert 'exception' in messages[0] + assert 'Exception: The Message!' == messages[0]['exception'] + + @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log-%Y-%m-%d-%H-%M-%S.log') + def test_date_formatting(self): + start = datetime(2016, 7, 20, 10, 56, 19) + end = start + timedelta(seconds=35.6) + + self.mock_datetime.utcnow.side_effect = [start, end] + + messages = self.get_log_messages() + assert len(messages) == 1 + + MOCK_OPEN.assert_called_once_with('/tmp/some-log-2016-07-20-10-56-19.log', 'a') + + @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') + def test_nested_tasks(self): + + @timer.timed + def parent(): + """ + A timed task that calls another task + """ + identity() + + parent_start = datetime(2016, 7, 20, 10, 56, 19) + parent_end = parent_start + timedelta(seconds=60) + child_start = parent_start + timedelta(seconds=10) + child_end = parent_end - timedelta(seconds=10) + + self.mock_datetime.utcnow.side_effect = [parent_start, child_start, child_end, parent_end] + + messages = self.get_log_messages(task=parent) + assert len(messages) == 2 + + # Child messages first + assert 'duration' in messages[0] + assert 40 == messages[0]['duration'] + + assert 'started_at' in messages[0] + assert child_start.isoformat(' ') == messages[0]['started_at'] + + assert 'ended_at' in messages[0] + assert child_end.isoformat(' ') == messages[0]['ended_at'] + + # Parent messages after + assert 'duration' in messages[1] + assert 60 == messages[1]['duration'] + + assert 'started_at' in messages[1] + assert parent_start.isoformat(' ') == messages[1]['started_at'] + + assert 'ended_at' in messages[1] + assert parent_end.isoformat(' ') == messages[1]['ended_at'] diff --git a/pavelib/paver_tests/test_xsslint.py b/pavelib/paver_tests/test_xsslint.py new file mode 100644 index 000000000000..a9b4a41e1600 --- /dev/null +++ b/pavelib/paver_tests/test_xsslint.py @@ -0,0 +1,120 @@ +""" +Tests for paver xsslint quality tasks +""" +from unittest.mock import patch + +import pytest +from paver.easy import call_task + +import pavelib.quality + +from .utils import PaverTestCase + + +class PaverXSSLintTest(PaverTestCase): + """ + Test run_xsslint with a mocked environment in order to pass in opts + """ + + def setUp(self): + super().setUp() + self.reset_task_messages() + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_xsslint_counts') + def test_xsslint_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric): + """ + run_xsslint encounters an error parsing the xsslint output log + """ + _mock_counts.return_value = {} + with pytest.raises(SystemExit): + call_task('pavelib.quality.run_xsslint') + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_xsslint_counts') + def test_xsslint_vanilla(self, _mock_counts, _mock_report_dir, _mock_write_metric): + """ + run_xsslint finds violations, but a limit was not set + """ + _mock_counts.return_value = {'total': 0} + call_task('pavelib.quality.run_xsslint') + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_xsslint_counts') + def test_xsslint_invalid_thresholds_option(self, _mock_counts, _mock_report_dir, _mock_write_metric): + """ + run_xsslint fails when thresholds option is poorly formatted + """ + _mock_counts.return_value = {'total': 0} + with pytest.raises(SystemExit): + call_task('pavelib.quality.run_xsslint', options={"thresholds": "invalid"}) + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_xsslint_counts') + def test_xsslint_invalid_thresholds_option_key(self, _mock_counts, _mock_report_dir, _mock_write_metric): + """ + run_xsslint fails when thresholds option is poorly formatted + """ + _mock_counts.return_value = {'total': 0} + with pytest.raises(SystemExit): + call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"invalid": 3}'}) + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_xsslint_counts') + def test_xsslint_too_many_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric): + """ + run_xsslint finds more violations than are allowed + """ + _mock_counts.return_value = {'total': 4} + with pytest.raises(SystemExit): + call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 3}'}) + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_xsslint_counts') + def test_xsslint_under_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric): + """ + run_xsslint finds fewer violations than are allowed + """ + _mock_counts.return_value = {'total': 4} + # No System Exit is expected + call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 5}'}) + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_xsslint_counts') + def test_xsslint_rule_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric): + """ + run_xsslint encounters an error parsing the xsslint output log for a + given rule threshold that was set. + """ + _mock_counts.return_value = {'total': 4} + with pytest.raises(SystemExit): + call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'}) + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_xsslint_counts') + def test_xsslint_too_many_rule_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric): + """ + run_xsslint finds more rule violations than are allowed + """ + _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}} + with pytest.raises(SystemExit): + call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'}) + + @patch.object(pavelib.quality, '_write_metric') + @patch.object(pavelib.quality, '_prepare_report_dir') + @patch.object(pavelib.quality, '_get_xsslint_counts') + def test_xsslint_under_rule_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric): + """ + run_xsslint finds fewer rule violations than are allowed + """ + _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}} + # No System Exit is expected + call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 5}}'}) diff --git a/pavelib/quality.py b/pavelib/quality.py new file mode 100644 index 000000000000..774179f45048 --- /dev/null +++ b/pavelib/quality.py @@ -0,0 +1,602 @@ +""" # lint-amnesty, pylint: disable=django-not-configured +Check code quality using pycodestyle, pylint, and diff_quality. +""" + +import json +import os +import re +from datetime import datetime +from xml.sax.saxutils import quoteattr + +from paver.easy import BuildFailure, cmdopts, needs, sh, task + +from .utils.envs import Env +from .utils.timer import timed + +ALL_SYSTEMS = 'lms,cms,common,openedx,pavelib,scripts' +JUNIT_XML_TEMPLATE = """ + +{failure_element} + +""" +JUNIT_XML_FAILURE_TEMPLATE = '' +START_TIME = datetime.utcnow() + + +def write_junit_xml(name, message=None): + """ + Write a JUnit results XML file describing the outcome of a quality check. + """ + if message: + failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message)) + else: + failure_element = '' + data = { + 'failure_count': 1 if message else 0, + 'failure_element': failure_element, + 'name': name, + 'seconds': (datetime.utcnow() - START_TIME).total_seconds(), + } + Env.QUALITY_DIR.makedirs_p() + filename = Env.QUALITY_DIR / f'{name}.xml' + with open(filename, 'w') as f: + f.write(JUNIT_XML_TEMPLATE.format(**data)) + + +def fail_quality(name, message): + """ + Fail the specified quality check by generating the JUnit XML results file + and raising a ``BuildFailure``. + """ + write_junit_xml(name, message) + raise BuildFailure(message) + + +def top_python_dirs(dirname): + """ + Find the directories to start from in order to find all the Python files in `dirname`. + """ + top_dirs = [] + + dir_init = os.path.join(dirname, "__init__.py") + if os.path.exists(dir_init): + top_dirs.append(dirname) + + for directory in ['djangoapps', 'lib']: + subdir = os.path.join(dirname, directory) + subdir_init = os.path.join(subdir, "__init__.py") + if os.path.exists(subdir) and not os.path.exists(subdir_init): + dirs = os.listdir(subdir) + top_dirs.extend(d for d in dirs if os.path.isdir(os.path.join(subdir, d))) + + modules_to_remove = ['__pycache__'] + for module in modules_to_remove: + if module in top_dirs: + top_dirs.remove(module) + + return top_dirs + + +def _get_pep8_violations(clean=True): + """ + Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string) + where violations_string is a string of all PEP 8 violations found, separated + by new lines. + """ + report_dir = (Env.REPORT_DIR / 'pep8') + if clean: + report_dir.rmtree(ignore_errors=True) + report_dir.makedirs_p() + report = report_dir / 'pep8.report' + + # Make sure the metrics subdirectory exists + Env.METRICS_DIR.makedirs_p() + + if not report.exists(): + sh(f'pycodestyle . | tee {report} -a') + + violations_list = _pep8_violations(report) + + return len(violations_list), violations_list + + +def _pep8_violations(report_file): + """ + Returns the list of all PEP 8 violations in the given report_file. + """ + with open(report_file) as f: + return f.readlines() + + +@task +@cmdopts([ + ("system=", "s", "System to act on"), +]) +@timed +def run_pep8(options): # pylint: disable=unused-argument + """ + Run pycodestyle on system code. + Fail the task if any violations are found. + """ + (count, violations_list) = _get_pep8_violations() + violations_list = ''.join(violations_list) + + # Print number of violations to log + violations_count_str = f"Number of PEP 8 violations: {count}" + print(violations_count_str) + print(violations_list) + + # Also write the number of violations to a file + with open(Env.METRICS_DIR / "pep8", "w") as f: + f.write(violations_count_str + '\n\n') + f.write(violations_list) + + # Fail if any violations are found + if count: + failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str + failure_string += f"\n\nViolations:\n{violations_list}" + fail_quality('pep8', failure_string) + else: + write_junit_xml('pep8') + + +@task +@needs( + 'pavelib.prereqs.install_node_prereqs', + 'pavelib.utils.test.utils.ensure_clean_package_lock', +) +@cmdopts([ + ("limit=", "l", "limit for number of acceptable violations"), +]) +@timed +def run_eslint(options): + """ + Runs eslint on static asset directories. + If limit option is passed, fails build if more violations than the limit are found. + """ + + eslint_report_dir = (Env.REPORT_DIR / "eslint") + eslint_report = eslint_report_dir / "eslint.report" + _prepare_report_dir(eslint_report_dir) + violations_limit = int(getattr(options, 'limit', -1)) + + sh( + "node --max_old_space_size=4096 node_modules/.bin/eslint " + "--ext .js --ext .jsx --format=compact . | tee {eslint_report}".format( + eslint_report=eslint_report + ), + ignore_error=True + ) + + try: + num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) + except TypeError: + fail_quality( + 'eslint', + "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( + eslint_report=eslint_report + ) + ) + + # Record the metric + _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) + + # Fail if number of violations is greater than the limit + if num_violations > violations_limit > -1: + fail_quality( + 'eslint', + "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, violations_limit=violations_limit + ) + ) + else: + write_junit_xml('eslint') + + +def _get_stylelint_violations(): + """ + Returns the number of Stylelint violations. + """ + stylelint_report_dir = (Env.REPORT_DIR / "stylelint") + stylelint_report = stylelint_report_dir / "stylelint.report" + _prepare_report_dir(stylelint_report_dir) + formatter = 'node_modules/stylelint-formatter-pretty' + + sh( + "stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}".format( + formatter=formatter, + stylelint_report=stylelint_report, + ), + ignore_error=True + ) + + try: + return int(_get_count_from_last_line(stylelint_report, "stylelint")) + except TypeError: + fail_quality( + 'stylelint', + "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( + stylelint_report=stylelint_report + ) + ) + + +@task +@needs('pavelib.prereqs.install_node_prereqs') +@cmdopts([ + ("limit=", "l", "limit for number of acceptable violations"), +]) +@timed +def run_stylelint(options): + """ + Runs stylelint on Sass files. + If limit option is passed, fails build if more violations than the limit are found. + """ + violations_limit = 0 + num_violations = _get_stylelint_violations() + + # Record the metric + _write_metric(num_violations, (Env.METRICS_DIR / "stylelint")) + + # Fail if number of violations is greater than the limit + if num_violations > violations_limit: + fail_quality( + 'stylelint', + "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, + violations_limit=violations_limit, + ) + ) + else: + write_junit_xml('stylelint') + + +@task +@needs('pavelib.prereqs.install_python_prereqs') +@cmdopts([ + ("thresholds=", "t", "json containing limit for number of acceptable violations per rule"), +]) +@timed +def run_xsslint(options): + """ + Runs xsslint/xss_linter.py on the codebase + """ + + thresholds_option = getattr(options, 'thresholds', '{}') + try: + violation_thresholds = json.loads(thresholds_option) + except ValueError: + violation_thresholds = None + if isinstance(violation_thresholds, dict) is False or \ + any(key not in ("total", "rules") for key in violation_thresholds.keys()): + + fail_quality( + 'xsslint', + """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" + """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ + """with property names in double-quotes.""".format( + thresholds_option=thresholds_option + ) + ) + + xsslint_script = "xss_linter.py" + xsslint_report_dir = (Env.REPORT_DIR / "xsslint") + xsslint_report = xsslint_report_dir / "xsslint.report" + _prepare_report_dir(xsslint_report_dir) + + sh( + "{repo_root}/scripts/xsslint/{xsslint_script} --rule-totals --config={cfg_module} >> {xsslint_report}".format( + repo_root=Env.REPO_ROOT, + xsslint_script=xsslint_script, + xsslint_report=xsslint_report, + cfg_module='scripts.xsslint_config' + ), + ignore_error=True + ) + + xsslint_counts = _get_xsslint_counts(xsslint_report) + + try: + metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( + xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total']) + ) + if 'rules' in xsslint_counts and any(xsslint_counts['rules']): + metrics_str += "\n" + rule_keys = sorted(xsslint_counts['rules'].keys()) + for rule in rule_keys: + metrics_str += "{rule} violations: {count}\n".format( + rule=rule, + count=int(xsslint_counts['rules'][rule]) + ) + except TypeError: + fail_quality( + 'xsslint', + "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format( + xsslint_script=xsslint_script, xsslint_report=xsslint_report + ) + ) + + metrics_report = (Env.METRICS_DIR / "xsslint") + # Record the metric + _write_metric(metrics_str, metrics_report) + # Print number of violations to log. + sh(f"cat {metrics_report}", ignore_error=True) + + error_message = "" + + # Test total violations against threshold. + if 'total' in list(violation_thresholds.keys()): + if violation_thresholds['total'] < xsslint_counts['total']: + error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( + count=xsslint_counts['total'], violations_limit=violation_thresholds['total'] + ) + + # Test rule violations against thresholds. + if 'rules' in violation_thresholds: + threshold_keys = sorted(violation_thresholds['rules'].keys()) + for threshold_key in threshold_keys: + if threshold_key not in xsslint_counts['rules']: + error_message += ( + "\nNumber of {xsslint_script} violations for {rule} could not be found in " + "{xsslint_report}." + ).format( + xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report + ) + elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]: + error_message += \ + "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( + rule=threshold_key, count=xsslint_counts['rules'][threshold_key], + violations_limit=violation_thresholds['rules'][threshold_key], + ) + + if error_message: + fail_quality( + 'xsslint', + "FAILURE: XSSLinter Failed.\n{error_message}\n" + "See {xsslint_report} or run the following command to hone in on the problem:\n" + " ./scripts/xss-commit-linter.sh -h".format( + error_message=error_message, xsslint_report=xsslint_report + ) + ) + else: + write_junit_xml('xsslint') + + +def _write_metric(metric, filename): + """ + Write a given metric to a given file + Used for things like reports/metrics/eslint, which will simply tell you the number of + eslint violations found + """ + Env.METRICS_DIR.makedirs_p() + + with open(filename, "w") as metric_file: + metric_file.write(str(metric)) + + +def _prepare_report_dir(dir_name): + """ + Sets a given directory to a created, but empty state + """ + dir_name.rmtree_p() + dir_name.mkdir_p() + + +def _get_report_contents(filename, report_name, last_line_only=False): + """ + Returns the contents of the given file. Use last_line_only to only return + the last line, which can be used for getting output from quality output + files. + + Arguments: + last_line_only: True to return the last line only, False to return a + string with full contents. + + Returns: + String containing full contents of the report, or the last line. + + """ + if os.path.isfile(filename): + with open(filename) as report_file: + if last_line_only: + lines = report_file.readlines() + for line in reversed(lines): + if line != '\n': + return line + return None + else: + return report_file.read() + else: + file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" + fail_quality(report_name, file_not_found_message) + + +def _get_count_from_last_line(filename, file_type): + """ + This will return the number in the last line of a file. + It is returning only the value (as a floating number). + """ + report_contents = _get_report_contents(filename, file_type, last_line_only=True) + + if report_contents is None: + return 0 + + last_line = report_contents.strip() + # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" + regex = r'^\d+' + + try: + return float(re.search(regex, last_line).group(0)) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + return None + + +def _get_xsslint_counts(filename): + """ + This returns a dict of violations from the xsslint report. + + Arguments: + filename: The name of the xsslint report. + + Returns: + A dict containing the following: + rules: A dict containing the count for each rule as follows: + violation-rule-id: N, where N is the number of violations + total: M, where M is the number of total violations + + """ + report_contents = _get_report_contents(filename, 'xsslint') + rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) + total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) + violations = {'rules': {}} + for violation_match in rule_count_regex.finditer(report_contents): + try: + violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) + except ValueError: + violations['rules'][violation_match.group('rule_id')] = None + try: + violations['total'] = int(total_count_regex.search(report_contents).group('count')) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + violations['total'] = None + return violations + + +def _extract_missing_pii_annotations(filename): + """ + Returns the number of uncovered models from the stdout report of django_find_annotations. + + Arguments: + filename: Filename where stdout of django_find_annotations was captured. + + Returns: + three-tuple containing: + 1. The number of uncovered models, + 2. A bool indicating whether the coverage is still below the threshold, and + 3. The full report as a string. + """ + uncovered_models = 0 + pii_check_passed = True + if os.path.isfile(filename): + with open(filename) as report_file: + lines = report_file.readlines() + + # Find the count of uncovered models. + uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') + for line in lines: + uncovered_match = uncovered_regex.match(line) + if uncovered_match: + uncovered_models = int(uncovered_match.groups()[0]) + break + + # Find a message which suggests the check failed. + failure_regex = re.compile(r'^Coverage threshold not met!') + for line in lines: + failure_match = failure_regex.match(line) + if failure_match: + pii_check_passed = False + break + + # Each line in lines already contains a newline. + full_log = ''.join(lines) + else: + fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') + + return (uncovered_models, pii_check_passed, full_log) + + +@task +@needs('pavelib.prereqs.install_python_prereqs') +@cmdopts([ + ("report-dir=", "r", "Directory in which to put PII reports"), +]) +@timed +def run_pii_check(options): + """ + Guarantee that all Django models are PII-annotated. + """ + pii_report_name = 'pii' + default_report_dir = (Env.REPORT_DIR / pii_report_name) + report_dir = getattr(options, 'report_dir', default_report_dir) + output_file = os.path.join(report_dir, 'pii_check_{}.report') + env_report = [] + pii_check_passed = True + for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): + try: + print() + print(f"Running {env_name} PII Annotation check and report") + print("-" * 45) + run_output_file = str(output_file).format(env_name.lower()) + sh( + "mkdir -p {} && " # lint-amnesty, pylint: disable=duplicate-string-formatting-argument + "export DJANGO_SETTINGS_MODULE={}; " + "code_annotations django_find_annotations " + "--config_file .pii_annotations.yml --report_path {} --app_name {} " + "--lint --report --coverage | tee {}".format( + report_dir, env_settings_file, report_dir, env_name.lower(), run_output_file + ) + ) + uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) + env_report.append(( + uncovered_model_count, + full_log, + )) + + except BuildFailure as error_message: + fail_quality(pii_report_name, f'FAILURE: {error_message}') + + if not pii_check_passed_env: + pii_check_passed = False + + # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count. + uncovered_count, full_log = max(env_report, key=lambda r: r[0]) + + # Write metric file. + if uncovered_count is None: + uncovered_count = 0 + metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n" + _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name)) + + # Finally, fail the paver task if code_annotations suggests that the check failed. + if not pii_check_passed: + fail_quality('pii', full_log) + + +@task +@needs('pavelib.prereqs.install_python_prereqs') +@timed +def check_keywords(): + """ + Check Django model fields for names that conflict with a list of reserved keywords + """ + report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords') + sh(f"mkdir -p {report_path}") + + overall_status = True + for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: + report_file = f"{env}_reserved_keyword_report.csv" + override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") + try: + sh( + "export DJANGO_SETTINGS_MODULE={settings_file}; " + "python manage.py {app} check_reserved_keywords " + "--override_file {override_file} " + "--report_path {report_path} " + "--report_file {report_file}".format( + settings_file=env_settings_file, app=env, override_file=override_file, + report_path=report_path, report_file=report_file + ) + ) + except BuildFailure: + overall_status = False + + if not overall_status: + fail_quality( + 'keywords', + 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( + report_path + ) + ) diff --git a/pavelib/utils/test/suites/__init__.py b/pavelib/utils/test/suites/__init__.py new file mode 100644 index 000000000000..34ecd49c1c74 --- /dev/null +++ b/pavelib/utils/test/suites/__init__.py @@ -0,0 +1,5 @@ +""" +TestSuite class and subclasses +""" +from .js_suite import JestSnapshotTestSuite, JsTestSuite +from .suite import TestSuite diff --git a/pavelib/utils/test/suites/js_suite.py b/pavelib/utils/test/suites/js_suite.py new file mode 100644 index 000000000000..4e53d454fee5 --- /dev/null +++ b/pavelib/utils/test/suites/js_suite.py @@ -0,0 +1,109 @@ +""" +Javascript test tasks +""" + + +from paver import tasks + +from pavelib.utils.envs import Env +from pavelib.utils.test import utils as test_utils +from pavelib.utils.test.suites.suite import TestSuite + +__test__ = False # do not collect + + +class JsTestSuite(TestSuite): + """ + A class for running JavaScript tests. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.report_dir = Env.JS_REPORT_DIR + self.opts = kwargs + + suite = args[0] + self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)] + + def __enter__(self): + super().__enter__() + if tasks.environment.dry_run: + tasks.environment.info("make report_dir") + else: + self.report_dir.makedirs_p() + if not self.skip_clean: + test_utils.clean_test_files() + + if self.mode == 'run' and not self.run_under_coverage: + test_utils.clean_dir(self.report_dir) + + @property + def _default_subsuites(self): + """ + Returns all JS test suites + """ + return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot'] + + +class JsTestSubSuite(TestSuite): + """ + Class for JS suites like cms, cms-squire, lms, common, + common-requirejs and xmodule + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.test_id = args[0] + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.port = kwargs.get('port') + self.root = self.root + ' javascript' + self.report_dir = Env.JS_REPORT_DIR + + try: + self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)] + except ValueError: + self.test_conf_file = Env.KARMA_CONFIG_FILES[0] + + self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml' + self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml' + + @property + def cmd(self): + """ + Run the tests using karma runner. + """ + cmd = [ + "node", + "--max_old_space_size=4096", + "node_modules/.bin/karma", + "start", + self.test_conf_file, + "--single-run={}".format('false' if self.mode == 'dev' else 'true'), + "--capture-timeout=60000", + f"--junitreportpath={self.xunit_report}", + f"--browsers={Env.KARMA_BROWSER}", + ] + + if self.port: + cmd.append(f"--port={self.port}") + + if self.run_under_coverage: + cmd.extend([ + "--coverage", + f"--coveragereportpath={self.coverage_report}", + ]) + + return cmd + + +class JestSnapshotTestSuite(TestSuite): + """ + A class for running Jest Snapshot tests. + """ + @property + def cmd(self): + """ + Run the tests using Jest. + """ + return ["jest"] diff --git a/pavelib/utils/test/suites/suite.py b/pavelib/utils/test/suites/suite.py new file mode 100644 index 000000000000..5a423c827c21 --- /dev/null +++ b/pavelib/utils/test/suites/suite.py @@ -0,0 +1,149 @@ +""" +A class used for defining and running test suites +""" + + +import os +import subprocess +import sys + +from paver import tasks + +from pavelib.utils.process import kill_process + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + +__test__ = False # do not collect + + +class TestSuite: + """ + TestSuite is a class that defines how groups of tests run. + """ + def __init__(self, *args, **kwargs): + self.root = args[0] + self.subsuites = kwargs.get('subsuites', []) + self.failed_suites = [] + self.verbosity = int(kwargs.get('verbosity', 1)) + self.skip_clean = kwargs.get('skip_clean', False) + self.passthrough_options = kwargs.get('passthrough_options', []) + + def __enter__(self): + """ + This will run before the test suite is run with the run_suite_tests method. + If self.run_test is called directly, it should be run in a 'with' block to + ensure that the proper context is created. + + Specific setup tasks should be defined in each subsuite. + + i.e. Checking for and defining required directories. + """ + print(f"\nSetting up for {self.root}") + self.failed_suites = [] + + def __exit__(self, exc_type, exc_value, traceback): + """ + This is run after the tests run with the run_suite_tests method finish. + Specific clean up tasks should be defined in each subsuite. + + If self.run_test is called directly, it should be run in a 'with' block + to ensure that clean up happens properly. + + i.e. Cleaning mongo after the lms tests run. + """ + print(f"\nCleaning up after {self.root}") + + @property + def cmd(self): + """ + The command to run tests (as a string). For this base class there is none. + """ + return None + + @staticmethod + def is_success(exit_code): + """ + Determine if the given exit code represents a success of the test + suite. By default, only a zero counts as a success. + """ + return exit_code == 0 + + def run_test(self): + """ + Runs a self.cmd in a subprocess and waits for it to finish. + It returns False if errors or failures occur. Otherwise, it + returns True. + """ + cmd = " ".join(self.cmd) + + if tasks.environment.dry_run: + tasks.environment.info(cmd) + return + + sys.stdout.write(cmd) + + msg = colorize( + 'green', + '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40), + ) + + sys.stdout.write(msg) + sys.stdout.flush() + + if 'TEST_SUITE' not in os.environ: + os.environ['TEST_SUITE'] = self.root.replace("/", "_") + kwargs = {'shell': True, 'cwd': None} + process = None + + try: + process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with + return self.is_success(process.wait()) + except KeyboardInterrupt: + kill_process(process) + sys.exit(1) + + def run_suite_tests(self): + """ + Runs each of the suites in self.subsuites while tracking failures + """ + # Uses __enter__ and __exit__ for context + with self: + # run the tests for this class, and for all subsuites + if self.cmd: + passed = self.run_test() + if not passed: + self.failed_suites.append(self) + + for suite in self.subsuites: + suite.run_suite_tests() + if suite.failed_suites: + self.failed_suites.extend(suite.failed_suites) + + def report_test_results(self): + """ + Writes a list of failed_suites to sys.stderr + """ + if self.failed_suites: + msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48)) + msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n') + else: + msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48)) + + print(msg) + + def run(self): + """ + Runs the tests in the suite while tracking and reporting failures. + """ + self.run_suite_tests() + + if tasks.environment.dry_run: + return + + self.report_test_results() + + if self.failed_suites: + sys.exit(1) diff --git a/pavelib/utils/test/utils.py b/pavelib/utils/test/utils.py new file mode 100644 index 000000000000..0851251e2222 --- /dev/null +++ b/pavelib/utils/test/utils.py @@ -0,0 +1,91 @@ +""" +Helper functions for test tasks +""" + + +import os + +from paver.easy import cmdopts, sh, task + +from pavelib.utils.envs import Env +from pavelib.utils.timer import timed + + +MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) + +COVERAGE_CACHE_BUCKET = "edx-tools-coverage-caches" +COVERAGE_CACHE_BASEPATH = "test_root/who_tests_what" +COVERAGE_CACHE_BASELINE = "who_tests_what.{}.baseline".format(os.environ.get('WTW_CONTEXT', 'all')) +WHO_TESTS_WHAT_DIFF = "who_tests_what.diff" + + +__test__ = False # do not collect + + +@task +@timed +def clean_test_files(): + """ + Clean fixture files used by tests and .pyc files + """ + sh("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") + # This find command removes all the *.pyc files that aren't in the .git + # directory. See this blog post for more details: + # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html + sh(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") + sh("rm -rf test_root/log/auto_screenshots/*") + sh("rm -rf /tmp/mako_[cl]ms") + + +@task +@timed +def ensure_clean_package_lock(): + """ + Ensure no untracked changes have been made in the current git context. + """ + sh(""" + git diff --name-only --exit-code package-lock.json || + (echo \"Dirty package-lock.json, run 'npm install' and commit the generated changes\" && exit 1) + """) + + +def clean_dir(directory): + """ + Delete all the files from the specified directory. + """ + # We delete the files but preserve the directory structure + # so that coverage.py has a place to put the reports. + sh(f'find {directory} -type f -delete') + + +@task +@cmdopts([ + ('skip-clean', 'C', 'skip cleaning repository before running tests'), + ('skip_clean', None, 'deprecated in favor of skip-clean'), +]) +@timed +def clean_reports_dir(options): + """ + Clean coverage files, to ensure that we don't use stale data to generate reports. + """ + if getattr(options, 'skip_clean', False): + print('--skip-clean is set, skipping...') + return + + # We delete the files but preserve the directory structure + # so that coverage.py has a place to put the reports. + reports_dir = Env.REPORT_DIR.makedirs_p() + clean_dir(reports_dir) + + +@task +@timed +def clean_mongo(): + """ + Clean mongo test databases + """ + sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format( + host=Env.MONGO_HOST, + port=MONGO_PORT_NUM, + repo_root=Env.REPO_ROOT, + )) diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh new file mode 100755 index 000000000000..54b9cbb9d500 --- /dev/null +++ b/scripts/generic-ci-tests.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash +set -e + +############################################################################### +# +# generic-ci-tests.sh +# +# Execute some tests for edx-platform. +# (Most other tests are run by invoking `pytest`, `pylint`, etc. directly) +# +# This script can be called from CI jobs that define +# these environment variables: +# +# `TEST_SUITE` defines which kind of test to run. +# Possible values are: +# +# - "quality": Run the quality (pycodestyle/pylint) checks +# - "js-unit": Run the JavaScript tests +# - "pavelib-js-unit": Run the JavaScript tests and the Python unit +# tests from the pavelib/lib directory +# +############################################################################### + +# Clean up previous builds +git clean -qxfd + +function emptyxunit { + + cat > "reports/$1.xml" < + + + +END + +} + +# if specified tox environment is supported, prepend paver commands +# with tox env invocation +if [ -z ${TOX_ENV+x} ] || [[ ${TOX_ENV} == 'null' ]]; then + echo "TOX_ENV: ${TOX_ENV}" + TOX="" +elif tox -l |grep -q "${TOX_ENV}"; then + if [[ "${TOX_ENV}" == 'quality' ]]; then + TOX="" + else + TOX="tox -r -e ${TOX_ENV} --" + fi +else + echo "${TOX_ENV} is not currently supported. Please review the" + echo "tox.ini file to see which environments are supported" + exit 1 +fi + +PAVER_ARGS="-v" +export SUBSET_JOB=$JOB_NAME + +function run_paver_quality { + QUALITY_TASK=$1 + shift + mkdir -p test_root/log/ + LOG_PREFIX="test_root/log/$QUALITY_TASK" + $TOX paver "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || { + echo "STDOUT (last 100 lines of $LOG_PREFIX.out.log):"; + tail -n 100 "$LOG_PREFIX.out.log" + echo "STDERR (last 100 lines of $LOG_PREFIX.err.log):"; + tail -n 100 "$LOG_PREFIX.err.log" + return 1; + } + return 0; +} + +case "$TEST_SUITE" in + + "quality") + EXIT=0 + + mkdir -p reports + + echo "Finding pycodestyle violations and storing report..." + run_paver_quality run_pep8 || { EXIT=1; } + echo "Finding ESLint violations and storing report..." + run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } + echo "Finding Stylelint violations and storing report..." + run_paver_quality run_stylelint || { EXIT=1; } + echo "Running xss linter report." + run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } + echo "Running PII checker on all Django models..." + run_paver_quality run_pii_check || { EXIT=1; } + echo "Running reserved keyword checker on all Django models..." + run_paver_quality check_keywords || { EXIT=1; } + + # Need to create an empty test result so the post-build + # action doesn't fail the build. + emptyxunit "stub" + exit "$EXIT" + ;; + + "js-unit") + $TOX paver test_js --coverage + $TOX paver diff_coverage + ;; + + "pavelib-js-unit") + EXIT=0 + $TOX paver test_js --coverage --skip-clean || { EXIT=1; } + paver test_lib --skip-clean $PAVER_ARGS || { EXIT=1; } + + # This is to ensure that the build status of the shard is properly set. + # Because we are running two paver commands in a row, we need to capture + # their return codes in order to exit with a non-zero code if either of + # them fail. We put the || clause there because otherwise, when a paver + # command fails, this entire script will exit, and not run the second + # paver command in this case statement. So instead of exiting, the value + # of a variable named EXIT will be set to 1 if either of the paver + # commands fail. We then use this variable's value as our exit code. + # Note that by default the value of this variable EXIT is not set, so if + # neither command fails then the exit command resolves to simply exit + # which is considered successful. + exit "$EXIT" + ;; +esac From a6248e7143718b188ae5c1cc94a030cdcc64abba Mon Sep 17 00:00:00 2001 From: "Kyle D. McCormick" Date: Fri, 25 Oct 2024 14:41:04 -0400 Subject: [PATCH 3/3] temp: coverage-js violation --- xmodule/js/spec/capa/display_spec.js | 1105 -------------- xmodule/js/spec/capa/imageinput_spec.js | 138 -- xmodule/js/spec/video/async_process_spec.js | 81 - xmodule/js/spec/video/completion_spec.js | 89 -- xmodule/js/spec/video/events_spec.js | 104 -- xmodule/js/spec/video/general_spec.js | 239 --- xmodule/js/spec/video/html5_video_spec.js | 384 ----- xmodule/js/spec/video/initialize_spec.js | 330 ---- xmodule/js/spec/video/iterator_spec.js | 105 -- xmodule/js/spec/video/resizer_spec.js | 270 ---- xmodule/js/spec/video/sjson_spec.js | 67 - xmodule/js/spec/video/social_share_spec.js | 44 - .../js/spec/video/video_autoadvance_spec.js | 110 -- xmodule/js/spec/video/video_bumper_spec.js | 108 -- xmodule/js/spec/video/video_caption_spec.js | 1331 ----------------- .../js/spec/video/video_context_menu_spec.js | 439 ------ xmodule/js/spec/video/video_control_spec.js | 524 ------- .../video/video_events_bumper_plugin_spec.js | 158 -- .../js/spec/video/video_events_plugin_spec.js | 259 ---- .../js/spec/video/video_focus_grabber_spec.js | 108 -- .../js/spec/video/video_full_screen_spec.js | 99 -- .../video/video_play_pause_control_spec.js | 63 - .../spec/video/video_play_placeholder_spec.js | 149 -- .../video/video_play_skip_control_spec.js | 60 - xmodule/js/spec/video/video_player_spec.js | 1101 -------------- xmodule/js/spec/video/video_poster_spec.js | 43 - .../spec/video/video_progress_slider_spec.js | 326 ---- .../spec/video/video_quality_control_spec.js | 123 -- .../video/video_save_state_plugin_spec.js | 278 ---- .../js/spec/video/video_skip_control_spec.js | 47 - .../js/spec/video/video_speed_control_spec.js | 231 --- xmodule/js/spec/video/video_storage_spec.js | 86 -- .../video/video_transcript_feedback_spec.js | 271 ---- .../spec/video/video_volume_control_spec.js | 326 ---- 34 files changed, 9196 deletions(-) delete mode 100644 xmodule/js/spec/capa/display_spec.js delete mode 100644 xmodule/js/spec/capa/imageinput_spec.js delete mode 100644 xmodule/js/spec/video/async_process_spec.js delete mode 100644 xmodule/js/spec/video/completion_spec.js delete mode 100644 xmodule/js/spec/video/events_spec.js delete mode 100644 xmodule/js/spec/video/general_spec.js delete mode 100644 xmodule/js/spec/video/html5_video_spec.js delete mode 100644 xmodule/js/spec/video/initialize_spec.js delete mode 100644 xmodule/js/spec/video/iterator_spec.js delete mode 100644 xmodule/js/spec/video/resizer_spec.js delete mode 100644 xmodule/js/spec/video/sjson_spec.js delete mode 100644 xmodule/js/spec/video/social_share_spec.js delete mode 100644 xmodule/js/spec/video/video_autoadvance_spec.js delete mode 100644 xmodule/js/spec/video/video_bumper_spec.js delete mode 100644 xmodule/js/spec/video/video_caption_spec.js delete mode 100644 xmodule/js/spec/video/video_context_menu_spec.js delete mode 100644 xmodule/js/spec/video/video_control_spec.js delete mode 100644 xmodule/js/spec/video/video_events_bumper_plugin_spec.js delete mode 100644 xmodule/js/spec/video/video_events_plugin_spec.js delete mode 100644 xmodule/js/spec/video/video_focus_grabber_spec.js delete mode 100644 xmodule/js/spec/video/video_full_screen_spec.js delete mode 100644 xmodule/js/spec/video/video_play_pause_control_spec.js delete mode 100644 xmodule/js/spec/video/video_play_placeholder_spec.js delete mode 100644 xmodule/js/spec/video/video_play_skip_control_spec.js delete mode 100644 xmodule/js/spec/video/video_player_spec.js delete mode 100644 xmodule/js/spec/video/video_poster_spec.js delete mode 100644 xmodule/js/spec/video/video_progress_slider_spec.js delete mode 100644 xmodule/js/spec/video/video_quality_control_spec.js delete mode 100644 xmodule/js/spec/video/video_save_state_plugin_spec.js delete mode 100644 xmodule/js/spec/video/video_skip_control_spec.js delete mode 100644 xmodule/js/spec/video/video_speed_control_spec.js delete mode 100644 xmodule/js/spec/video/video_storage_spec.js delete mode 100644 xmodule/js/spec/video/video_transcript_feedback_spec.js delete mode 100644 xmodule/js/spec/video/video_volume_control_spec.js diff --git a/xmodule/js/spec/capa/display_spec.js b/xmodule/js/spec/capa/display_spec.js deleted file mode 100644 index 385bd2d24333..000000000000 --- a/xmodule/js/spec/capa/display_spec.js +++ /dev/null @@ -1,1105 +0,0 @@ -/* - * decaffeinate suggestions: - * DS101: Remove unnecessary use of Array.from - * DS207: Consider shorter variations of null checks - * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md - */ -describe('Problem', function() { - const problem_content_default = readFixtures('problem_content.html'); - - beforeEach(function() { - // Stub MathJax - window.MathJax = { - Hub: jasmine.createSpyObj('MathJax.Hub', ['getAllJax', 'Queue']), - Callback: jasmine.createSpyObj('MathJax.Callback', ['After']) - }; - this.stubbedJax = {root: jasmine.createSpyObj('jax.root', ['toMathML'])}; - MathJax.Hub.getAllJax.and.returnValue([this.stubbedJax]); - window.update_schematics = function() {}; - spyOn(SR, 'readText'); - spyOn(SR, 'readTexts'); - - // Load this function from spec/helper.js - // Note that if your test fails with a message like: - // 'External request attempted for blah, which is not defined.' - // this msg is coming from the stubRequests function else clause. - jasmine.stubRequests(); - - loadFixtures('problem.html'); - - spyOn(Logger, 'log'); - spyOn($.fn, 'load').and.callFake(function(url, callback) { - $(this).html(readFixtures('problem_content.html')); - return callback(); - }); - }); - - describe('constructor', function() { - - it('set the element from html', function() { - this.problem999 = new Problem((`\ -
\ -
\ -
\ -
\ -`) - ); - expect(this.problem999.element_id).toBe('problem_999'); - }); - - it('set the element from loadFixtures', function() { - this.problem1 = new Problem($('.xblock-student_view')); - expect(this.problem1.element_id).toBe('problem_1'); - }); - }); - - describe('bind', function() { - beforeEach(function() { - spyOn(window, 'update_schematics'); - MathJax.Hub.getAllJax.and.returnValue([this.stubbedJax]); - this.problem = new Problem($('.xblock-student_view')); - }); - - it('set mathjax typeset', () => expect(MathJax.Hub.Queue).toHaveBeenCalled()); - - it('update schematics', () => expect(window.update_schematics).toHaveBeenCalled()); - - it('bind answer refresh on button click', function() { - expect($('div.action button')).toHandleWith('click', this.problem.refreshAnswers); - }); - - it('bind the submit button', function() { - expect($('.action .submit')).toHandleWith('click', this.problem.submit_fd); - }); - - it('bind the reset button', function() { - expect($('div.action button.reset')).toHandleWith('click', this.problem.reset); - }); - - it('bind the show button', function() { - expect($('.action .show')).toHandleWith('click', this.problem.show); - }); - - it('bind the save button', function() { - expect($('div.action button.save')).toHandleWith('click', this.problem.save); - }); - - it('bind the math input', function() { - expect($('input.math')).toHandleWith('keyup', this.problem.refreshMath); - }); - }); - - describe('bind_with_custom_input_id', function() { - beforeEach(function() { - spyOn(window, 'update_schematics'); - MathJax.Hub.getAllJax.and.returnValue([this.stubbedJax]); - this.problem = new Problem($('.xblock-student_view')); - return $(this).html(readFixtures('problem_content_1240.html')); - }); - - it('bind the submit button', function() { - expect($('.action .submit')).toHandleWith('click', this.problem.submit_fd); - }); - - it('bind the show button', function() { - expect($('div.action button.show')).toHandleWith('click', this.problem.show); - }); - }); - - - describe('renderProgressState', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - }); - - const testProgessData = function(problem, score, total_possible, attempts, graded, expected_progress_after_render) { - problem.el.data('problem-score', score); - problem.el.data('problem-total-possible', total_possible); - problem.el.data('attempts-used', attempts); - problem.el.data('graded', graded); - expect(problem.$('.problem-progress').html()).toEqual(""); - problem.renderProgressState(); - expect(problem.$('.problem-progress').html()).toEqual(expected_progress_after_render); - }; - - describe('with a status of "none"', function() { - it('reports the number of points possible and graded', function() { - testProgessData(this.problem, 0, 1, 0, "True", "1 point possible (graded)"); - }); - - it('displays the number of points possible when rendering happens with the content', function() { - testProgessData(this.problem, 0, 2, 0, "True", "2 points possible (graded)"); - }); - - it('reports the number of points possible and ungraded', function() { - testProgessData(this.problem, 0, 1, 0, "False", "1 point possible (ungraded)"); - }); - - it('displays ungraded if number of points possible is 0', function() { - testProgessData(this.problem, 0, 0, 0, "False", "0 points possible (ungraded)"); - }); - - it('displays ungraded if number of points possible is 0, even if graded value is True', function() { - testProgessData(this.problem, 0, 0, 0, "True", "0 points possible (ungraded)"); - }); - - it('reports the correct score with status none and >0 attempts', function() { - testProgessData(this.problem, 0, 1, 1, "True", "0/1 point (graded)"); - }); - - it('reports the correct score with >1 weight, status none, and >0 attempts', function() { - testProgessData(this.problem, 0, 2, 2, "True", "0/2 points (graded)"); - }); - }); - - describe('with any other valid status', function() { - - it('reports the current score', function() { - testProgessData(this.problem, 1, 1, 1, "True", "1/1 point (graded)"); - }); - - it('shows current score when rendering happens with the content', function() { - testProgessData(this.problem, 2, 2, 1, "True", "2/2 points (graded)"); - }); - - it('reports the current score even if problem is ungraded', function() { - testProgessData(this.problem, 1, 1, 1, "False", "1/1 point (ungraded)"); - }); - }); - - describe('with valid status and string containing an integer like "0" for detail', () => - // These tests are to address a failure specific to Chrome 51 and 52 + - it('shows 0 points possible for the detail', function() { - testProgessData(this.problem, 0, 0, 1, "False", "0 points possible (ungraded)"); - }) - ); - - describe('with a score of null (show_correctness == false)', function() { - it('reports the number of points possible and graded, results hidden', function() { - testProgessData(this.problem, null, 1, 0, "True", "1 point possible (graded, results hidden)"); - }); - - it('reports the number of points possible (plural) and graded, results hidden', function() { - testProgessData(this.problem, null, 2, 0, "True", "2 points possible (graded, results hidden)"); - }); - - it('reports the number of points possible and ungraded, results hidden', function() { - testProgessData(this.problem, null, 1, 0, "False", "1 point possible (ungraded, results hidden)"); - }); - - it('displays ungraded if number of points possible is 0, results hidden', function() { - testProgessData(this.problem, null, 0, 0, "False", "0 points possible (ungraded, results hidden)"); - }); - - it('displays ungraded if number of points possible is 0, even if graded value is True, results hidden', function() { - testProgessData(this.problem, null, 0, 0, "True", "0 points possible (ungraded, results hidden)"); - }); - - it('reports the correct score with status none and >0 attempts, results hidden', function() { - testProgessData(this.problem, null, 1, 1, "True", "1 point possible (graded, results hidden)"); - }); - - it('reports the correct score with >1 weight, status none, and >0 attempts, results hidden', function() { - testProgessData(this.problem, null, 2, 2, "True", "2 points possible (graded, results hidden)"); - }); - }); - }); - - describe('render', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.bind = this.problem.bind; - spyOn(this.problem, 'bind'); - }); - - describe('with content given', function() { - beforeEach(function() { - this.problem.render('Hello World'); - }); - - it('render the content', function() { - expect(this.problem.el.html()).toEqual('Hello World'); - }); - - it('re-bind the content', function() { - expect(this.problem.bind).toHaveBeenCalled(); - }); - }); - - describe('with no content given', function() { - beforeEach(function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({html: "Hello World"})); - this.problem.render(); - }); - - it('load the content via ajax', function() { - expect(this.problem.el.html()).toEqual('Hello World'); - }); - - it('re-bind the content', function() { - expect(this.problem.bind).toHaveBeenCalled(); - }); - }); - }); - - describe('submit_fd', function() { - beforeEach(function() { - // Insert an input of type file outside of the problem. - $('.xblock-student_view').after(''); - this.problem = new Problem($('.xblock-student_view')); - spyOn(this.problem, 'submit'); - }); - - it('submit method is called if input of type file is not in problem', function() { - this.problem.submit_fd(); - expect(this.problem.submit).toHaveBeenCalled(); - }); - }); - - describe('submit', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.answers = 'foo=1&bar=2'; - }); - - it('log the problem_check event', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect(Logger.log).toHaveBeenCalledWith('problem_check', 'foo=1&bar=2'); - }); - - it('log the problem_graded event, after the problem is done grading.', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - const response = { - success: 'correct', - contents: 'mock grader response' - }; - callback(response); - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect(Logger.log).toHaveBeenCalledWith('problem_graded', ['foo=1&bar=2', 'mock grader response'], this.problem.id); - }); - - it('submit the answer for submit', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_check', - 'foo=1&bar=2', jasmine.any(Function)); - }); - - describe('when the response is correct', () => - it('call render with returned content', function() { - const contents = '

Correctexcellent

' + - '

Yepcorrect

'; - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({success: 'correct', contents}); - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect(this.problem.el).toHaveHtml(contents); - expect(window.SR.readTexts).toHaveBeenCalledWith(['Question 1: excellent', 'Question 2: correct']); - }) - ); - - describe('when the response is incorrect', () => - it('call render with returned content', function() { - const contents = '

Incorrectno, try again

'; - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({success: 'incorrect', contents}); - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect(this.problem.el).toHaveHtml(contents); - expect(window.SR.readTexts).toHaveBeenCalledWith(['no, try again']); - }) - ); - - it('tests if the submit button is disabled while submitting and the text changes on the button', function() { - const self = this; - const curr_html = this.problem.el.html(); - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - // At this point enableButtons should have been called, making the submit button disabled with text 'submitting' - let promise; - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submitting'); - callback({ - success: 'incorrect', // does not matter if correct or incorrect here - contents: curr_html - }); - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - // Make sure the submit button is enabled before submitting - $('#input_example_1').val('test').trigger('input'); - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - this.problem.submit(); - // After submit, the button should not be disabled and should have text as 'Submit' - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - }); - }); - - describe('submit button on problems', function() { - - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.submitDisabled = disabled => { - if (disabled) { - expect(this.problem.submitButton).toHaveAttr('disabled'); - } else { - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - } - }; - }); - - describe('some basic tests for submit button', () => - it('should become enabled after a value is entered into the text box', function() { - $('#input_example_1').val('test').trigger('input'); - this.submitDisabled(false); - $('#input_example_1').val('').trigger('input'); - this.submitDisabled(true); - }) - ); - - describe('some advanced tests for submit button', function() { - const radioButtonProblemHtml = readFixtures('radiobutton_problem.html'); - const checkboxProblemHtml = readFixtures('checkbox_problem.html'); - - it('should become enabled after a checkbox is checked', function() { - $('#input_example_1').replaceWith(checkboxProblemHtml); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(true); - $('#input_1_1_1').click(); - this.submitDisabled(false); - $('#input_1_1_1').click(); - this.submitDisabled(true); - }); - - it('should become enabled after a radiobutton is checked', function() { - $('#input_example_1').replaceWith(radioButtonProblemHtml); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(true); - $('#input_1_1_1').attr('checked', true).trigger('click'); - this.submitDisabled(false); - $('#input_1_1_1').attr('checked', false).trigger('click'); - this.submitDisabled(true); - }); - - it('should become enabled after a value is selected in a selector', function() { - const html = `\ -
- -
\ -`; - $('#input_example_1').replaceWith(html); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(true); - $("#problem_sel select").val("val2").trigger('change'); - this.submitDisabled(false); - $("#problem_sel select").val("val0").trigger('change'); - this.submitDisabled(true); - }); - - it('should become enabled after a radiobutton is checked and a value is entered into the text box', function() { - $(radioButtonProblemHtml).insertAfter('#input_example_1'); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(true); - $('#input_1_1_1').attr('checked', true).trigger('click'); - this.submitDisabled(true); - $('#input_example_1').val('111').trigger('input'); - this.submitDisabled(false); - $('#input_1_1_1').attr('checked', false).trigger('click'); - this.submitDisabled(true); - }); - - it('should become enabled if there are only hidden input fields', function() { - const html = `\ -\ -`; - $('#input_example_1').replaceWith(html); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(false); - }); - }); - }); - - describe('reset', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - }); - - it('log the problem_reset event', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.answers = 'foo=1&bar=2'; - this.problem.reset(); - expect(Logger.log).toHaveBeenCalledWith('problem_reset', 'foo=1&bar=2'); - }); - - it('POST to the problem reset page', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.reset(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_reset', - { id: 'i4x://edX/101/problem/Problem1' }, jasmine.any(Function)); - }); - - it('render the returned content', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({html: "Reset", success: true}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.reset(); - expect(this.problem.el.html()).toEqual('Reset'); - }); - - it('sends a message to the window SR element', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({html: "Reset", success: true}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.reset(); - expect(window.SR.readText).toHaveBeenCalledWith('This problem has been reset.'); - }); - - it('shows a notification on error', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({msg: "Error on reset.", success: false}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.reset(); - expect($('.notification-gentle-alert .notification-message').text()).toEqual("Error on reset."); - }); - - it('tests that reset does not enable submit or modify the text while resetting', function() { - const self = this; - const curr_html = this.problem.el.html(); - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - // enableButtons should have been called at this point to set them to all disabled - let promise; - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submit'); - callback({success: 'correct', html: curr_html}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - // Submit should be disabled - expect(this.problem.submitButton).toHaveAttr('disabled'); - this.problem.reset(); - // Submit should remain disabled - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submit'); - }); - }); - - describe('show problem with column in id', function() { - beforeEach(function () { - this.problem = new Problem($('.xblock-student_view')); - this.problem.el.prepend('
'); - }); - - it('log the problem_show event', function() { - this.problem.show(); - expect(Logger.log).toHaveBeenCalledWith('problem_show', - {problem: 'i4x://edX/101/problem/Problem1'}); - }); - - it('fetch the answers', function() { - spyOn($, 'postWithPrefix'); - this.problem.show(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_show', - jasmine.any(Function)); - }); - - it('show the answers', function() { - spyOn($, 'postWithPrefix').and.callFake( - (url, callback) => callback({answers: {'1_1:11': 'One', '1_2:12': 'Two'}}) - ); - this.problem.show(); - expect($("#answer_1_1\\:11")).toHaveHtml('One'); - expect($("#answer_1_2\\:12")).toHaveHtml('Two'); - }); - - it('disables the show answer button', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {}})); - this.problem.show(); - expect(this.problem.el.find('.show').attr('disabled')).toEqual('disabled'); - }); - }); - - describe('show', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.el.prepend('
'); - }); - - describe('when the answer has not yet shown', function() { - beforeEach(function() { - expect(this.problem.el.find('.show').attr('disabled')).not.toEqual('disabled'); - }); - - it('log the problem_show event', function() { - this.problem.show(); - expect(Logger.log).toHaveBeenCalledWith('problem_show', - {problem: 'i4x://edX/101/problem/Problem1'}); - }); - - it('fetch the answers', function() { - spyOn($, 'postWithPrefix'); - this.problem.show(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_show', - jasmine.any(Function)); - }); - - it('show the answers', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {'1_1': 'One', '1_2': 'Two'}})); - this.problem.show(); - expect($('#answer_1_1')).toHaveHtml('One'); - expect($('#answer_1_2')).toHaveHtml('Two'); - }); - - it('disables the show answer button', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {}})); - this.problem.show(); - expect(this.problem.el.find('.show').attr('disabled')).toEqual('disabled'); - }); - - describe('radio text question', function() { - const radio_text_xml=`\ -
-

- -
-
- -
-
-
- - -

- -
- - -

-
-
- - -

-
-
-
\ -`; - beforeEach(function() { - // Append a radiotextresponse problem to the problem, so we can check it's javascript functionality - this.problem.el.prepend(radio_text_xml); - }); - - it('sets the correct class on the section for the correct choice', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {"1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3"}})); - this.problem.show(); - - expect($('#forinput1_2_1_choiceinput_0bc').attr('class')).toEqual( - 'choicetextgroup_show_correct'); - expect($('#answer_1_2_1_choiceinput_0bc').text()).toEqual('3'); - expect($('#answer_1_2_1_choiceinput_1bc').text()).toEqual(''); - expect($('#answer_1_2_1_choiceinput_2bc').text()).toEqual(''); - }); - - it('Should not disable input fields', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {"1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3"}})); - this.problem.show(); - expect($('input#1_2_1_choiceinput_0bc').attr('disabled')).not.toEqual('disabled'); - expect($('input#1_2_1_choiceinput_1bc').attr('disabled')).not.toEqual('disabled'); - expect($('input#1_2_1_choiceinput_2bc').attr('disabled')).not.toEqual('disabled'); - expect($('input#1_2_1').attr('disabled')).not.toEqual('disabled'); - }); - }); - - describe('imageinput', function() { - let el, height, width; - const imageinput_html = readFixtures('imageinput.underscore'); - - const DEFAULTS = { - id: '12345', - width: '300', - height: '400' - }; - - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.el.prepend(_.template(imageinput_html)(DEFAULTS)); - }); - - const assertAnswer = (problem, data) => { - stubRequest(data); - problem.show(); - - $.each(data['answers'], (id, answer) => { - const img = getImage(answer); - el = $(`#inputtype_${id}`); - expect(img).toImageDiffEqual(el.find('canvas')[0]); - }); - }; - - var stubRequest = data => { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback(data)); - }; - - var getImage = (coords, c_width, c_height) => { - let ctx, reg; - const types = { - rectangle: coords => { - reg = /^\(([0-9]+),([0-9]+)\)-\(([0-9]+),([0-9]+)\)$/; - const rects = coords.replace(/\s*/g, '').split(/;/); - - $.each(rects, (index, rect) => { - const { abs } = Math; - const points = reg.exec(rect); - if (points) { - width = abs(points[3] - points[1]); - height = abs(points[4] - points[2]); - - return ctx.rect(points[1], points[2], width, height); - } - }); - - ctx.stroke(); - ctx.fill(); - }, - - regions: coords => { - const parseCoords = coords => { - reg = JSON.parse(coords); - - if (typeof reg[0][0][0] === "undefined") { - reg = [reg]; - } - - return reg; - }; - - return $.each(parseCoords(coords), (index, region) => { - ctx.beginPath(); - $.each(region, (index, point) => { - if (index === 0) { - return ctx.moveTo(point[0], point[1]); - } else { - return ctx.lineTo(point[0], point[1]); - } - }); - - ctx.closePath(); - ctx.stroke(); - ctx.fill(); - }); - } - }; - - const canvas = document.createElement('canvas'); - canvas.width = c_width || 100; - canvas.height = c_height || 100; - - if (canvas.getContext) { - ctx = canvas.getContext('2d'); - } else { - console.log('Canvas is not supported.'); - } - - ctx.fillStyle = 'rgba(255,255,255,.3)'; - ctx.strokeStyle = "#FF0000"; - ctx.lineWidth = "2"; - - $.each(coords, (key, value) => { - if ((types[key] != null) && value) { return types[key](value); } - }); - - return canvas; - }; - - it('rectangle is drawn correctly', function() { - assertAnswer(this.problem, { - 'answers': { - '12345': { - 'rectangle': '(10,10)-(30,30)', - 'regions': null - } - } - }); - }); - - it('region is drawn correctly', function() { - assertAnswer(this.problem, { - 'answers': { - '12345': { - 'rectangle': null, - 'regions': '[[10,10],[30,30],[70,30],[20,30]]' - } - } - }); - }); - - it('mixed shapes are drawn correctly', function() { - assertAnswer(this.problem, { - 'answers': {'12345': { - 'rectangle': '(10,10)-(30,30);(5,5)-(20,20)', - 'regions': `[ - [[50,50],[40,40],[70,30],[50,70]], - [[90,95],[95,95],[90,70],[70,70]] -]` - } - } - }); - }); - - it('multiple image inputs draw answers on separate canvases', function() { - const data = { - id: '67890', - width: '400', - height: '300' - }; - - this.problem.el.prepend(_.template(imageinput_html)(data)); - assertAnswer(this.problem, { - 'answers': { - '12345': { - 'rectangle': null, - 'regions': '[[10,10],[30,30],[70,30],[20,30]]' - }, - '67890': { - 'rectangle': '(10,10)-(30,30)', - 'regions': null - } - } - }); - }); - - it('dictionary with answers doesn\'t contain answer for current id', function() { - spyOn(console, 'log'); - stubRequest({'answers':{}}); - this.problem.show(); - el = $('#inputtype_12345'); - expect(el.find('canvas')).not.toExist(); - expect(console.log).toHaveBeenCalledWith('Answer is absent for image input with id=12345'); - }); - }); - }); - }); - - describe('save', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.answers = 'foo=1&bar=2'; - }); - - it('log the problem_save event', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.save(); - expect(Logger.log).toHaveBeenCalledWith('problem_save', 'foo=1&bar=2'); - }); - - it('POST to save problem', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.save(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_save', - 'foo=1&bar=2', jasmine.any(Function)); - }); - - it('tests that save does not enable the submit button or change the text when submit is originally disabled', function() { - const self = this; - const curr_html = this.problem.el.html(); - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - // enableButtons should have been called at this point and the submit button should be unaffected - let promise; - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submit'); - callback({success: 'correct', html: curr_html}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - // Expect submit to be disabled and labeled properly at the start - expect(this.problem.submitButton).toHaveAttr('disabled'); - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - this.problem.save(); - // Submit button should have the same state after save has completed - expect(this.problem.submitButton).toHaveAttr('disabled'); - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - }); - - it('tests that save does not disable the submit button or change the text when submit is originally enabled', function() { - const self = this; - const curr_html = this.problem.el.html(); - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - // enableButtons should have been called at this point, and the submit button should be disabled while submitting - let promise; - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submit'); - callback({success: 'correct', html: curr_html}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - // Expect submit to be enabled and labeled properly at the start after adding an input - $('#input_example_1').val('test').trigger('input'); - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - this.problem.save(); - // Submit button should have the same state after save has completed - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - }); - }); - - describe('refreshMath', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - $('#input_example_1').val('E=mc^2'); - this.problem.refreshMath({target: $('#input_example_1').get(0)}); - }); - - it('should queue the conversion and MathML element update', function() { - expect(MathJax.Hub.Queue).toHaveBeenCalledWith(['Text', this.stubbedJax, 'E=mc^2'], - [this.problem.updateMathML, this.stubbedJax, $('#input_example_1').get(0)]); - }); -}); - - describe('updateMathML', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.stubbedJax.root.toMathML.and.returnValue(''); - }); - - describe('when there is no exception', function() { - beforeEach(function() { - this.problem.updateMathML(this.stubbedJax, $('#input_example_1').get(0)); - }); - - it('convert jax to MathML', () => expect($('#input_example_1_dynamath')).toHaveValue('')); - }); - - describe('when there is an exception', function() { - beforeEach(function() { - const error = new Error(); - error.restart = true; - this.stubbedJax.root.toMathML.and.throwError(error); - this.problem.updateMathML(this.stubbedJax, $('#input_example_1').get(0)); - }); - - it('should queue up the exception', function() { - expect(MathJax.Callback.After).toHaveBeenCalledWith([this.problem.refreshMath, this.stubbedJax], true); - }); - }); - }); - - describe('refreshAnswers', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.el.html(`\ -