diff --git a/.github/workflows/js-tests-paver.yml b/.github/workflows/js-tests-paver.yml new file mode 100644 index 000000000000..566063fdfd22 --- /dev/null +++ b/.github/workflows/js-tests-paver.yml @@ -0,0 +1,84 @@ +name: Javascript tests PAVER + +on: + pull_request: + push: + branches: + - master + +jobs: + run_tests: + name: JS + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + node-version: [18, 20] + python-version: + - "3.11" + + steps: + - uses: actions/checkout@v4 + - name: Fetch master to compare coverage + run: git fetch --depth=1 origin master + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + + - name: Setup npm + run: npm i -g npm@10.5.x + + - name: Install Firefox 123.0 + run: | + sudo apt-get purge firefox + wget "https://ftp.mozilla.org/pub/firefox/releases/123.0/linux-x86_64/en-US/firefox-123.0.tar.bz2" + tar -xjf firefox-123.0.tar.bz2 + sudo mv firefox /opt/firefox + sudo ln -s /opt/firefox/firefox /usr/bin/firefox + + - name: Install Required System Packages + run: sudo apt-get update && sudo apt-get install libxmlsec1-dev ubuntu-restricted-extras xvfb + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Get pip cache dir + id: pip-cache-dir + run: | + echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + + - name: Cache pip dependencies + id: cache-dependencies + uses: actions/cache@v4 + with: + path: ${{ steps.pip-cache-dir.outputs.dir }} + key: ${{ runner.os }}-pip-${{ hashFiles('requirements/edx/base.txt') }} + restore-keys: ${{ runner.os }}-pip- + + - name: Install Required Python Dependencies + run: | + make base-requirements + + - uses: c-hive/gha-npm-cache@v1 + - name: Run JS Tests + env: + TEST_SUITE: js-unit + SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh + run: | + npm install -g jest + xvfb-run --auto-servernum ./scripts/all-tests.sh + + - name: Save Job Artifacts + uses: actions/upload-artifact@v4 + with: + name: Build-Artifacts + path: | + reports/**/* + test_root/log/*.png + test_root/log/*.log + **/TEST-*.xml + overwrite: true diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index c9d2d7ab1191..eb8fb3ddc561 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -64,13 +64,15 @@ jobs: make base-requirements - uses: c-hive/gha-npm-cache@v1 + + - name: Install npm + run: npm ci + - name: Run JS Tests - env: - TEST_SUITE: js-unit - SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh run: | npm install -g jest - xvfb-run --auto-servernum ./scripts/all-tests.sh + xvfb-run --auto-servernum make test-js + make coverage-js - name: Save Job Artifacts uses: actions/upload-artifact@v4 diff --git a/.github/workflows/quality-checks-paver.yml b/.github/workflows/quality-checks-paver.yml new file mode 100644 index 000000000000..beb9fea8007f --- /dev/null +++ b/.github/workflows/quality-checks-paver.yml @@ -0,0 +1,82 @@ +name: Quality checks PAVER + +on: + pull_request: + push: + branches: + - master + - open-release/lilac.master + +jobs: + run_tests: + name: Quality Others + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-22.04] + python-version: + - "3.11" + node-version: [20] + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Fetch base branch for comparison + run: git fetch --depth=1 origin ${{ github.base_ref }} + + - name: Install Required System Packages + run: sudo apt-get update && sudo apt-get install libxmlsec1-dev + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + + - name: Setup npm + run: npm i -g npm@8.5.x + + - name: Get pip cache dir + id: pip-cache-dir + run: | + echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + + - name: Cache pip dependencies + id: cache-dependencies + uses: actions/cache@v4 + with: + path: ${{ steps.pip-cache-dir.outputs.dir }} + key: ${{ runner.os }}-pip-${{ hashFiles('requirements/edx/testing.txt') }} + restore-keys: ${{ runner.os }}-pip- + + - name: Install Required Python Dependencies + env: + PIP_SRC: ${{ runner.temp }} + run: | + make test-requirements + + - name: Run Quality Tests + env: + TEST_SUITE: quality + SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh + PIP_SRC: ${{ runner.temp }} + TARGET_BRANCH: ${{ github.base_ref }} + run: | + ./scripts/all-tests.sh + + - name: Save Job Artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: Build-Artifacts + path: | + **/reports/**/* + test_root/log/**/*.log + *.log + overwrite: true diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 84610123493c..510059a9d62a 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -60,16 +60,30 @@ jobs: PIP_SRC: ${{ runner.temp }} run: | make test-requirements - + + - name: Install npm + env: + PIP_SRC: ${{ runner.temp }} + run: npm ci + + - name: Install python packages + env: + PIP_SRC: ${{ runner.temp }} + run: | + pip install -e . + - name: Run Quality Tests env: - TEST_SUITE: quality - SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh PIP_SRC: ${{ runner.temp }} TARGET_BRANCH: ${{ github.base_ref }} run: | - ./scripts/all-tests.sh - + make pycodestyle + make eslint + make stylelint + make xsslint + make pii_check + make check_keywords + - name: Save Job Artifacts if: always() uses: actions/upload-artifact@v4 diff --git a/Makefile b/Makefile index 15bab5df67a9..0fc07aab8f13 100644 --- a/Makefile +++ b/Makefile @@ -204,3 +204,29 @@ migrate: migrate-lms migrate-cms # Part of https://github.com/openedx/wg-developer-experience/issues/136 ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip install` to work on ubuntu. sudo apt install libmysqlclient-dev libxmlsec1-dev + +eslint: ## check javascript for quality issues + python scripts/quality_test.py eslint + +stylelint: ## check css/scss for quality issues + python scripts/quality_test.py stylelint + +xsslint: ## check xss for quality issues + python scripts/quality_test.py xsslint + +pycodestyle: ## check python files for quality issues + pycodestyle . + +pii_check: ## check django models for pii annotations + python scripts/quality_test.py pii_check + +check_keywords: ## check django models for reserve keywords + python scripts/quality_test.py check_keywords + +test-js: ## run javascript tests + python scripts/js_test.py --option jstest + +coverage-js: ## run javascript coverage test + python scripts/js_test.py --option coverage + +quality: pycodestyle eslint stylelint xsslint pii_check check_keywords \ No newline at end of file diff --git a/scripts/js_test.py b/scripts/js_test.py new file mode 100644 index 000000000000..69be37f602fe --- /dev/null +++ b/scripts/js_test.py @@ -0,0 +1,492 @@ +""" +Javascript test tasks +""" + +import click +import os +import re +import sys +import subprocess + +from path import Path as path + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + +__test__ = False # do not collect + + +class Env: + """ + Load information about the execution environment. + """ + + @staticmethod + def repo_root(): + """ + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 + """ + + file_path = path(__file__) + attempt = 1 + while True: + try: + absolute_path = file_path.abspath() + break + except OSError: + print(f'Attempt {attempt}/180 to get an absolute path failed') + if attempt < 180: + attempt += 1 + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise + return absolute_path.parent.parent + + # Root of the git repository (edx-platform) + REPO_ROOT = repo_root() + + # Reports Directory + REPORT_DIR = REPO_ROOT / 'reports' + + # Detect if in a Docker container, and if so which one + FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') + USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' + + # Configured browser to use for the js test suites + SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') + if USING_DOCKER: + KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker' + else: + KARMA_BROWSER = 'FirefoxNoUpdates' + + # Files used to run each of the js test suites + # TODO: Store this as a dict. Order seems to matter for some + # reason. See issue TE-415. + KARMA_CONFIG_FILES = [ + REPO_ROOT / 'cms/static/karma_cms.conf.js', + REPO_ROOT / 'cms/static/karma_cms_squire.conf.js', + REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js', + REPO_ROOT / 'lms/static/karma_lms.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js', + REPO_ROOT / 'common/static/karma_common.conf.js', + REPO_ROOT / 'common/static/karma_common_requirejs.conf.js', + ] + + JS_TEST_ID_KEYS = [ + 'cms', + 'cms-squire', + 'cms-webpack', + 'lms', + 'xmodule', + 'xmodule-webpack', + 'common', + 'common-requirejs', + 'jest-snapshot' + ] + + JS_REPORT_DIR = REPORT_DIR / 'javascript' + + # Service variant (lms, cms, etc.) configured with an environment variable + # We use this to determine which envs.json file to load. + SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) + + # If service variant not configured in env, then pass the correct + # environment for lms / cms + if not SERVICE_VARIANT: # this will intentionally catch ""; + if any(i in sys.argv[1:] for i in ('cms', 'studio')): + SERVICE_VARIANT = 'cms' + else: + SERVICE_VARIANT = 'lms' + + +# def clean_test_files(): +# """ +# Clean fixture files used by tests and .pyc files +# """ +# # "git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads" +# subprocess.run("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") +# # This find command removes all the *.pyc files that aren't in the .git +# # directory. See this blog post for more details: +# # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html +# subprocess.run(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") +# subprocess.run("rm -rf test_root/log/auto_screenshots/*") +# subprocess.run("rm -rf /tmp/mako_[cl]ms") + + +# def clean_dir(directory): +# """ +# Delete all the files from the specified directory. +# """ +# # We delete the files but preserve the directory structure +# # so that coverage.py has a place to put the reports. +# subprocess.run(f'find {directory} -type f -delete') + + +# @task +# @cmdopts([ +# ('skip-clean', 'C', 'skip cleaning repository before running tests'), +# ('skip_clean', None, 'deprecated in favor of skip-clean'), +# ]) + +# def clean_reports_dir(options): +# """ +# Clean coverage files, to ensure that we don't use stale data to generate reports. +# """ +# if getattr(options, 'skip_clean', False): +# print('--skip-clean is set, skipping...') +# return + +# # We delete the files but preserve the directory structure +# # so that coverage.py has a place to put the reports. +# reports_dir = Env.REPORT_DIR.makedirs_p() +# clean_dir(reports_dir) + + +class TestSuite: + """ + TestSuite is a class that defines how groups of tests run. + """ + def __init__(self, *args, **kwargs): + self.root = args[0] + self.subsuites = kwargs.get('subsuites', []) + self.failed_suites = [] + self.verbosity = int(kwargs.get('verbosity', 1)) + self.skip_clean = kwargs.get('skip_clean', False) + self.passthrough_options = kwargs.get('passthrough_options', []) + + def __enter__(self): + """ + This will run before the test suite is run with the run_suite_tests method. + If self.run_test is called directly, it should be run in a 'with' block to + ensure that the proper context is created. + + Specific setup tasks should be defined in each subsuite. + + i.e. Checking for and defining required directories. + """ + print(f"\nSetting up for {self.root}") + self.failed_suites = [] + + def __exit__(self, exc_type, exc_value, traceback): + """ + This is run after the tests run with the run_suite_tests method finish. + Specific clean up tasks should be defined in each subsuite. + + If self.run_test is called directly, it should be run in a 'with' block + to ensure that clean up happens properly. + + i.e. Cleaning mongo after the lms tests run. + """ + print(f"\nCleaning up after {self.root}") + + @property + def cmd(self): + """ + The command to run tests (as a string). For this base class there is none. + """ + return None + + @staticmethod + def kill_process(proc): + """ + Kill the process `proc` created with `subprocess`. + """ + p1_group = psutil.Process(proc.pid) + child_pids = p1_group.children(recursive=True) + + for child_pid in child_pids: + os.kill(child_pid.pid, signal.SIGKILL) + + @staticmethod + def is_success(exit_code): + """ + Determine if the given exit code represents a success of the test + suite. By default, only a zero counts as a success. + """ + return exit_code == 0 + + def run_test(self): + """ + Runs a self.cmd in a subprocess and waits for it to finish. + It returns False if errors or failures occur. Otherwise, it + returns True. + """ + # cmd = " ".join(self.cmd) + cmd = " ".join(str(part) for part in self.cmd) + sys.stdout.write(cmd) + + msg = colorize( + 'green', + '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40), + ) + + sys.stdout.write(msg) + sys.stdout.flush() + + if 'TEST_SUITE' not in os.environ: + os.environ['TEST_SUITE'] = self.root.replace("/", "_") + kwargs = {'shell': True, 'cwd': None} + process = None + + try: + process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with + return self.is_success(process.wait()) + except KeyboardInterrupt: + self.kill_process(process) + sys.exit(1) + + def run_suite_tests(self): + """ + Runs each of the suites in self.subsuites while tracking failures + """ + # Uses __enter__ and __exit__ for context + with self: + # run the tests for this class, and for all subsuites + if self.cmd: + passed = self.run_test() + if not passed: + self.failed_suites.append(self) + + for suite in self.subsuites: + suite.run_suite_tests() + if suite.failed_suites: + self.failed_suites.extend(suite.failed_suites) + + def report_test_results(self): + """ + Writes a list of failed_suites to sys.stderr + """ + if self.failed_suites: + msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48)) + msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n') + else: + msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48)) + + print(msg) + + def run(self): + """ + Runs the tests in the suite while tracking and reporting failures. + """ + self.run_suite_tests() + + # if tasks.environment.dry_run: + # return + + self.report_test_results() + + if self.failed_suites: + sys.exit(1) + + +class JsTestSuite(TestSuite): + """ + A class for running JavaScript tests. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.report_dir = Env.JS_REPORT_DIR + self.opts = kwargs + + suite = args[0] + self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)] + + def __enter__(self): + super().__enter__() + self.report_dir.makedirs_p() + # self.report_dir.mkdir(exist_ok=True) + # if not self.skip_clean: + # test_utils.clean_test_files() + + # if self.mode == 'run' and not self.run_under_coverage: + # test_utils.clean_dir(self.report_dir) + + @property + def _default_subsuites(self): + """ + Returns all JS test suites + """ + return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot'] + + +class JsTestSubSuite(TestSuite): + """ + Class for JS suites like cms, cms-squire, lms, common, + common-requirejs and xmodule + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.test_id = args[0] + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.port = kwargs.get('port') + self.root = self.root + ' javascript' + self.report_dir = Env.JS_REPORT_DIR + + try: + self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)] + except ValueError: + self.test_conf_file = Env.KARMA_CONFIG_FILES[0] + + self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml' + self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml' + + @property + def cmd(self): + """ + Run the tests using karma runner. + """ + cmd = [ + "node", + "--max_old_space_size=4096", + "node_modules/.bin/karma", + "start", + self.test_conf_file, + "--single-run={}".format('false' if self.mode == 'dev' else 'true'), + "--capture-timeout=60000", + f"--junitreportpath={self.xunit_report}", + f"--browsers={Env.KARMA_BROWSER}", + ] + + if self.port: + cmd.append(f"--port={self.port}") + + if self.run_under_coverage: + cmd.extend([ + "--coverage", + f"--coveragereportpath={self.coverage_report}", + ]) + + return cmd + + +class JestSnapshotTestSuite(TestSuite): + """ + A class for running Jest Snapshot tests. + """ + @property + def cmd(self): + """ + Run the tests using Jest. + """ + return ["jest"] + + +def test_js(suite, mode, coverage, port, skip_clean): + """ + Run the JavaScript tests + """ + + if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): + sys.stderr.write( + "Unknown test suite. Please choose from ({suites})\n".format( + suites=", ".join(Env.JS_TEST_ID_KEYS) + ) + ) + return + + if suite != 'jest-snapshot': + test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) + test_suite.run() + + if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in + test_suite = JestSnapshotTestSuite('jest') + test_suite.run() + + +# @needs('pavelib.prereqs.install_coverage_prereqs') +# @cmdopts([ +# ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), +# ], share_with=['coverage']) + +def diff_coverage(): + """ + Build the diff coverage reports + """ + + compare_branch = 'origin/master' + + # Find all coverage XML files (both Python and JavaScript) + xml_reports = [] + for filepath in Env.REPORT_DIR.walk(): + if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): + xml_reports.append(filepath) + + if not xml_reports: + err_msg = colorize( + 'red', + "No coverage info found. Run `quality test` before running " + "`coverage test`.\n" + ) + sys.stderr.write(err_msg) + else: + xml_report_str = ' '.join(xml_reports) + diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + + # Generate the diff coverage reports (HTML and console) + # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 + command = ( + f"diff-cover {xml_report_str}" + f"--diff-range-notation '..'" + f"--compare-branch={compare_branch} " + f"--html-report {diff_html_path}" + ) + subprocess.run(command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True) + + +@click.command("main") +@click.option( + '--option', 'option', + help='Run javascript tests or coverage test as per given option' +) +@click.option( + '--s', 'suite', + default='all', + help='Test suite to run.' +) +@click.option( + '--m', 'mode', + default='run', + help='dev or run' +) +@click.option( + '--coverage', 'coverage', + default=True, + help='Run test under coverage' +) +@click.option( + '--p', 'port', + default=None, + help='Port to run test server on (dev mode only)' +) +@click.option( + '--C', 'skip_clean', + default=False, + help='skip cleaning repository before running tests' +) +def main(option, suite, mode, coverage, port, skip_clean): + if option == 'jstest': + test_js(suite, mode, coverage, port, skip_clean) + elif option == 'coverage': + diff_coverage() + + +if __name__ == "__main__": + main() diff --git a/scripts/quality_test.py b/scripts/quality_test.py new file mode 100644 index 000000000000..fb7d1e481eb9 --- /dev/null +++ b/scripts/quality_test.py @@ -0,0 +1,547 @@ +""" # lint-amnesty, pylint: disable=django-not-configured +Check code quality using pycodestyle, pylint, and diff_quality. +""" + +import argparse +import glob +import json +import os +import re +import sys +import subprocess +import shutil +from pathlib import Path +from time import sleep + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + + +class BuildFailure(Exception): + """Represents a problem with some part of the build's execution.""" + + +def fail_quality(name, message): + """ + Fail the specified quality check. + """ + print(name) + print(message) + sys.exit() + + +def _prepare_report_dir(dir_name): + """ + Sets a given directory to a created, but empty state + """ + if os.path.isdir(dir_name): + shutil.rmtree(dir_name) + os.makedirs(dir_name, exist_ok=True) + + +def repo_root(): + """ + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 + """ + + file_path = Path(__file__) + max_attempts = 180 + for attempt in range(1, max_attempts + 1): + try: + absolute_path = file_path.resolve(strict=True) + return absolute_path.parents[1] + except OSError: + print(f'Attempt {attempt}/{max_attempts} to get an absolute path failed') + if attempt < max_attempts: + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise RuntimeError('Could not determine the repository root after multiple attempts') + + +def _get_report_contents(filename, report_name, last_line_only=False): + """ + Returns the contents of the given file. Use last_line_only to only return + the last line, which can be used for getting output from quality output + files. + + Arguments: + last_line_only: True to return the last line only, False to return a + string with full contents. + + Returns: + String containing full contents of the report, or the last line. + + """ + if os.path.isfile(filename): + with open(filename) as report_file: + if last_line_only: + lines = report_file.readlines() + for line in reversed(lines): + if line != '\n': + return line + return None + else: + return report_file.read() + else: + file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" + fail_quality(report_name, file_not_found_message) + + +def _get_count_from_last_line(filename, file_type): + """ + This will return the number in the last line of a file. + It is returning only the value (as a floating number). + """ + report_contents = _get_report_contents(filename, file_type, last_line_only=True) + if report_contents is None: + return 0 + + last_line = report_contents.strip() + # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" + regex = r'^\d+' + + try: + return float(re.search(regex, last_line).group(0)) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + return None + + +def _get_stylelint_violations(): + """ + Returns the number of Stylelint violations. + """ + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + stylelint_report_dir = (REPORT_DIR / "stylelint") + stylelint_report = stylelint_report_dir / "stylelint.report" + _prepare_report_dir(stylelint_report_dir) + + command = [ + 'node', 'node_modules/stylelint', + '*scss_files', + '--custom-formatter', 'stylelint-formatter-pretty/index.js' + ] + + with open(stylelint_report, 'w') as report_file: + subprocess.run( + command, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) + + try: + return int(_get_count_from_last_line(stylelint_report, "stylelint")) + except TypeError: + fail_quality( + 'stylelint', + "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( + stylelint_report=stylelint_report + ) + ) + + +def run_eslint(): + """ + Runs eslint on static asset directories. + If limit option is passed, fails build if more violations than the limit are found. + """ + + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + eslint_report_dir = REPORT_DIR / "eslint" + eslint_report = eslint_report_dir / "eslint.report" + _prepare_report_dir(eslint_report_dir) + violations_limit = 4950 + + command = [ + "node", + "--max_old_space_size=4096", + "node_modules/.bin/eslint", + "--ext", ".js", + "--ext", ".jsx", + "--format=compact", + "." + ] + + with open(eslint_report, 'w') as report_file: + subprocess.run( + command, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True, + check=False + ) + + try: + num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) + except TypeError: + fail_quality( + 'eslint', + "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( + eslint_report=eslint_report + ) + ) + + # Fail if number of violations is greater than the limit + if num_violations > violations_limit > -1: + fail_quality( + 'eslint', + "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, violations_limit=violations_limit + ) + ) + else: + print("successfully run eslint with violations") + print(num_violations) + + +def run_stylelint(): + """ + Runs stylelint on Sass files. + If limit option is passed, fails build if more violations than the limit are found. + """ + + violations_limit = 0 + num_violations = _get_stylelint_violations() + # Fail if number of violations is greater than the limit + if num_violations > violations_limit: + fail_quality( + 'stylelint', + "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, + violations_limit=violations_limit, + ) + ) + else: + print("successfully run stylelint with violations") + print(num_violations) + + +def _extract_missing_pii_annotations(filename): + """ + Returns the number of uncovered models from the stdout report of django_find_annotations. + + Arguments: + filename: Filename where stdout of django_find_annotations was captured. + + Returns: + three-tuple containing: + 1. The number of uncovered models, + 2. A bool indicating whether the coverage is still below the threshold, and + 3. The full report as a string. + """ + uncovered_models = 0 + pii_check_passed = True + if os.path.isfile(filename): + with open(filename) as report_file: + lines = report_file.readlines() + + # Find the count of uncovered models. + uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') + for line in lines: + uncovered_match = uncovered_regex.match(line) + if uncovered_match: + uncovered_models = int(uncovered_match.groups()[0]) + break + + # Find a message which suggests the check failed. + failure_regex = re.compile(r'^Coverage threshold not met!') + for line in lines: + failure_match = failure_regex.match(line) + if failure_match: + pii_check_passed = False + break + + # Each line in lines already contains a newline. + full_log = ''.join(lines) + else: + fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') + + return (uncovered_models, pii_check_passed, full_log) + + +def run_pii_check(): + """ + Guarantee that all Django models are PII-annotated. + """ + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + pii_report_name = 'pii' + default_report_dir = (REPORT_DIR / pii_report_name) + report_dir = default_report_dir + output_file = os.path.join(report_dir, 'pii_check_{}.report') + env_report = [] + pii_check_passed = True + + for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): + try: + print(f"Running {env_name} PII Annotation check and report") + print("-" * 45) + + run_output_file = str(output_file).format(env_name.lower()) + os.makedirs(report_dir, exist_ok=True) + + # Prepare the environment for the command + env = { + **os.environ, # Include the current environment variables + "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment + } + + command = [ + "code_annotations", + "django_find_annotations", + "--config_file", ".pii_annotations.yml", + "--report_path", str(report_dir), + "--app_name", env_name.lower() + ] + + # Run the command without shell=True + with open(run_output_file, 'w') as report_file: + subprocess.run( + command, + env=env, # Pass the environment with DJANGO_SETTINGS_MODULE + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) + + # Extract results + uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) + env_report.append(( + uncovered_model_count, + full_log, + )) + + except BuildFailure as error_message: + fail_quality(pii_report_name, f'FAILURE: {error_message}') + + # Update pii_check_passed based on the result of the current environment + if not pii_check_passed_env: + pii_check_passed = False + + # If the PII check failed in any environment, fail the task + if not pii_check_passed: + fail_quality('pii', full_log) + else: + print("Successfully ran pii_check") + + +def check_keywords(): + """ + Check Django model fields for names that conflict with a list of reserved keywords + """ + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + report_path = REPORT_DIR / 'reserved_keywords' + report_path.mkdir(parents=True, exist_ok=True) + + overall_status = True + for env_name, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: + report_file_path = report_path / f"{env_name}_reserved_keyword_report.csv" + override_file = os.path.join(REPO_ROOT, "db_keyword_overrides.yml") + try: + env = { + **os.environ, # Include the current environment variables + "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment + } + command = [ + "python", "manage.py", env_name, "check_reserved_keywords", + "--override_file", str(override_file), + "--report_path", str(report_path), + "--report_file", str(report_file_path) + ] + with open(report_file_path, 'w') as report_file: + subprocess.run( + command, + env=env, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) + except BuildFailure: + overall_status = False + if not overall_status: + fail_quality( + 'keywords', + 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( + report_path + ) + ) + else: + print("successfully run check_keywords") + + +def _get_xsslint_counts(filename): + """ + This returns a dict of violations from the xsslint report. + + Arguments: + filename: The name of the xsslint report. + + Returns: + A dict containing the following: + rules: A dict containing the count for each rule as follows: + violation-rule-id: N, where N is the number of violations + total: M, where M is the number of total violations + + """ + report_contents = _get_report_contents(filename, 'xsslint') + rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) + total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) + violations = {'rules': {}} + for violation_match in rule_count_regex.finditer(report_contents): + try: + violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) + except ValueError: + violations['rules'][violation_match.group('rule_id')] = None + try: + violations['total'] = int(total_count_regex.search(report_contents).group('count')) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + violations['total'] = None + return violations + + +def run_xsslint(): + """ + Runs xsslint/xss_linter.py on the codebase + """ + + try: + thresholds_option = 'scripts/xsslint_thresholds.json' + # Read the JSON file + with open(thresholds_option, 'r') as file: + violation_thresholds = json.load(file) + + except ValueError: + violation_thresholds = None + if isinstance(violation_thresholds, dict) is False or \ + any(key not in ("total", "rules") for key in violation_thresholds.keys()): + + fail_quality( + 'xsslint', + """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" + """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ + """with property names in double-quotes.""".format( + thresholds_option=thresholds_option + ) + ) + + xsslint_script = "xss_linter.py" + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + xsslint_report_dir = (REPORT_DIR / "xsslint") + xsslint_report = xsslint_report_dir / "xsslint.report" + _prepare_report_dir(xsslint_report_dir) + + command = [ + f"{REPO_ROOT}/scripts/xsslint/{xsslint_script}", + "--rule-totals", + "--config=scripts.xsslint_config" + ] + with open(xsslint_report, 'w') as report_file: + subprocess.run( + command, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) + xsslint_counts = _get_xsslint_counts(xsslint_report) + + try: + metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( + xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total']) + ) + if 'rules' in xsslint_counts and any(xsslint_counts['rules']): + metrics_str += "\n" + rule_keys = sorted(xsslint_counts['rules'].keys()) + for rule in rule_keys: + metrics_str += "{rule} violations: {count}\n".format( + rule=rule, + count=int(xsslint_counts['rules'][rule]) + ) + except TypeError: + fail_quality( + 'xsslint', + "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format( + xsslint_script=xsslint_script, xsslint_report=xsslint_report + ) + ) + + error_message = "" + # Test total violations against threshold. + if 'total' in list(violation_thresholds.keys()): + if violation_thresholds['total'] < xsslint_counts['total']: + error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( + count=xsslint_counts['total'], violations_limit=violation_thresholds['total'] + ) + + # Test rule violations against thresholds. + if 'rules' in violation_thresholds: + threshold_keys = sorted(violation_thresholds['rules'].keys()) + for threshold_key in threshold_keys: + if threshold_key not in xsslint_counts['rules']: + error_message += ( + "\nNumber of {xsslint_script} violations for {rule} could not be found in " + "{xsslint_report}." + ).format( + xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report + ) + elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]: + error_message += \ + "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( + rule=threshold_key, count=xsslint_counts['rules'][threshold_key], + violations_limit=violation_thresholds['rules'][threshold_key], + ) + + if error_message: + fail_quality( + 'xsslint', + "FAILURE: XSSLinter Failed.\n{error_message}\n" + "See {xsslint_report} or run the following command to hone in on the problem:\n" + " ./scripts/xss-commit-linter.sh -h".format( + error_message=error_message, xsslint_report=xsslint_report + ) + ) + else: + print("successfully run xsslint") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("command", choices=['eslint', 'stylelint', + 'xsslint', 'pii_check', 'check_keywords']) + + argument = parser.parse_args() + + if argument.command == 'eslint': + run_eslint() + + elif argument.command == 'stylelint': + run_stylelint() + + elif argument.command == 'xsslint': + run_xsslint() + + elif argument.command == 'pii_check': + run_pii_check() + + elif argument.command == 'check_keywords': + check_keywords() diff --git a/xmodule/js/spec/capa/display_spec.js b/xmodule/js/spec/capa/display_spec.js deleted file mode 100644 index 385bd2d24333..000000000000 --- a/xmodule/js/spec/capa/display_spec.js +++ /dev/null @@ -1,1105 +0,0 @@ -/* - * decaffeinate suggestions: - * DS101: Remove unnecessary use of Array.from - * DS207: Consider shorter variations of null checks - * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md - */ -describe('Problem', function() { - const problem_content_default = readFixtures('problem_content.html'); - - beforeEach(function() { - // Stub MathJax - window.MathJax = { - Hub: jasmine.createSpyObj('MathJax.Hub', ['getAllJax', 'Queue']), - Callback: jasmine.createSpyObj('MathJax.Callback', ['After']) - }; - this.stubbedJax = {root: jasmine.createSpyObj('jax.root', ['toMathML'])}; - MathJax.Hub.getAllJax.and.returnValue([this.stubbedJax]); - window.update_schematics = function() {}; - spyOn(SR, 'readText'); - spyOn(SR, 'readTexts'); - - // Load this function from spec/helper.js - // Note that if your test fails with a message like: - // 'External request attempted for blah, which is not defined.' - // this msg is coming from the stubRequests function else clause. - jasmine.stubRequests(); - - loadFixtures('problem.html'); - - spyOn(Logger, 'log'); - spyOn($.fn, 'load').and.callFake(function(url, callback) { - $(this).html(readFixtures('problem_content.html')); - return callback(); - }); - }); - - describe('constructor', function() { - - it('set the element from html', function() { - this.problem999 = new Problem((`\ -
\ -
\ -
\ -
\ -`) - ); - expect(this.problem999.element_id).toBe('problem_999'); - }); - - it('set the element from loadFixtures', function() { - this.problem1 = new Problem($('.xblock-student_view')); - expect(this.problem1.element_id).toBe('problem_1'); - }); - }); - - describe('bind', function() { - beforeEach(function() { - spyOn(window, 'update_schematics'); - MathJax.Hub.getAllJax.and.returnValue([this.stubbedJax]); - this.problem = new Problem($('.xblock-student_view')); - }); - - it('set mathjax typeset', () => expect(MathJax.Hub.Queue).toHaveBeenCalled()); - - it('update schematics', () => expect(window.update_schematics).toHaveBeenCalled()); - - it('bind answer refresh on button click', function() { - expect($('div.action button')).toHandleWith('click', this.problem.refreshAnswers); - }); - - it('bind the submit button', function() { - expect($('.action .submit')).toHandleWith('click', this.problem.submit_fd); - }); - - it('bind the reset button', function() { - expect($('div.action button.reset')).toHandleWith('click', this.problem.reset); - }); - - it('bind the show button', function() { - expect($('.action .show')).toHandleWith('click', this.problem.show); - }); - - it('bind the save button', function() { - expect($('div.action button.save')).toHandleWith('click', this.problem.save); - }); - - it('bind the math input', function() { - expect($('input.math')).toHandleWith('keyup', this.problem.refreshMath); - }); - }); - - describe('bind_with_custom_input_id', function() { - beforeEach(function() { - spyOn(window, 'update_schematics'); - MathJax.Hub.getAllJax.and.returnValue([this.stubbedJax]); - this.problem = new Problem($('.xblock-student_view')); - return $(this).html(readFixtures('problem_content_1240.html')); - }); - - it('bind the submit button', function() { - expect($('.action .submit')).toHandleWith('click', this.problem.submit_fd); - }); - - it('bind the show button', function() { - expect($('div.action button.show')).toHandleWith('click', this.problem.show); - }); - }); - - - describe('renderProgressState', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - }); - - const testProgessData = function(problem, score, total_possible, attempts, graded, expected_progress_after_render) { - problem.el.data('problem-score', score); - problem.el.data('problem-total-possible', total_possible); - problem.el.data('attempts-used', attempts); - problem.el.data('graded', graded); - expect(problem.$('.problem-progress').html()).toEqual(""); - problem.renderProgressState(); - expect(problem.$('.problem-progress').html()).toEqual(expected_progress_after_render); - }; - - describe('with a status of "none"', function() { - it('reports the number of points possible and graded', function() { - testProgessData(this.problem, 0, 1, 0, "True", "1 point possible (graded)"); - }); - - it('displays the number of points possible when rendering happens with the content', function() { - testProgessData(this.problem, 0, 2, 0, "True", "2 points possible (graded)"); - }); - - it('reports the number of points possible and ungraded', function() { - testProgessData(this.problem, 0, 1, 0, "False", "1 point possible (ungraded)"); - }); - - it('displays ungraded if number of points possible is 0', function() { - testProgessData(this.problem, 0, 0, 0, "False", "0 points possible (ungraded)"); - }); - - it('displays ungraded if number of points possible is 0, even if graded value is True', function() { - testProgessData(this.problem, 0, 0, 0, "True", "0 points possible (ungraded)"); - }); - - it('reports the correct score with status none and >0 attempts', function() { - testProgessData(this.problem, 0, 1, 1, "True", "0/1 point (graded)"); - }); - - it('reports the correct score with >1 weight, status none, and >0 attempts', function() { - testProgessData(this.problem, 0, 2, 2, "True", "0/2 points (graded)"); - }); - }); - - describe('with any other valid status', function() { - - it('reports the current score', function() { - testProgessData(this.problem, 1, 1, 1, "True", "1/1 point (graded)"); - }); - - it('shows current score when rendering happens with the content', function() { - testProgessData(this.problem, 2, 2, 1, "True", "2/2 points (graded)"); - }); - - it('reports the current score even if problem is ungraded', function() { - testProgessData(this.problem, 1, 1, 1, "False", "1/1 point (ungraded)"); - }); - }); - - describe('with valid status and string containing an integer like "0" for detail', () => - // These tests are to address a failure specific to Chrome 51 and 52 + - it('shows 0 points possible for the detail', function() { - testProgessData(this.problem, 0, 0, 1, "False", "0 points possible (ungraded)"); - }) - ); - - describe('with a score of null (show_correctness == false)', function() { - it('reports the number of points possible and graded, results hidden', function() { - testProgessData(this.problem, null, 1, 0, "True", "1 point possible (graded, results hidden)"); - }); - - it('reports the number of points possible (plural) and graded, results hidden', function() { - testProgessData(this.problem, null, 2, 0, "True", "2 points possible (graded, results hidden)"); - }); - - it('reports the number of points possible and ungraded, results hidden', function() { - testProgessData(this.problem, null, 1, 0, "False", "1 point possible (ungraded, results hidden)"); - }); - - it('displays ungraded if number of points possible is 0, results hidden', function() { - testProgessData(this.problem, null, 0, 0, "False", "0 points possible (ungraded, results hidden)"); - }); - - it('displays ungraded if number of points possible is 0, even if graded value is True, results hidden', function() { - testProgessData(this.problem, null, 0, 0, "True", "0 points possible (ungraded, results hidden)"); - }); - - it('reports the correct score with status none and >0 attempts, results hidden', function() { - testProgessData(this.problem, null, 1, 1, "True", "1 point possible (graded, results hidden)"); - }); - - it('reports the correct score with >1 weight, status none, and >0 attempts, results hidden', function() { - testProgessData(this.problem, null, 2, 2, "True", "2 points possible (graded, results hidden)"); - }); - }); - }); - - describe('render', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.bind = this.problem.bind; - spyOn(this.problem, 'bind'); - }); - - describe('with content given', function() { - beforeEach(function() { - this.problem.render('Hello World'); - }); - - it('render the content', function() { - expect(this.problem.el.html()).toEqual('Hello World'); - }); - - it('re-bind the content', function() { - expect(this.problem.bind).toHaveBeenCalled(); - }); - }); - - describe('with no content given', function() { - beforeEach(function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({html: "Hello World"})); - this.problem.render(); - }); - - it('load the content via ajax', function() { - expect(this.problem.el.html()).toEqual('Hello World'); - }); - - it('re-bind the content', function() { - expect(this.problem.bind).toHaveBeenCalled(); - }); - }); - }); - - describe('submit_fd', function() { - beforeEach(function() { - // Insert an input of type file outside of the problem. - $('.xblock-student_view').after(''); - this.problem = new Problem($('.xblock-student_view')); - spyOn(this.problem, 'submit'); - }); - - it('submit method is called if input of type file is not in problem', function() { - this.problem.submit_fd(); - expect(this.problem.submit).toHaveBeenCalled(); - }); - }); - - describe('submit', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.answers = 'foo=1&bar=2'; - }); - - it('log the problem_check event', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect(Logger.log).toHaveBeenCalledWith('problem_check', 'foo=1&bar=2'); - }); - - it('log the problem_graded event, after the problem is done grading.', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - const response = { - success: 'correct', - contents: 'mock grader response' - }; - callback(response); - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect(Logger.log).toHaveBeenCalledWith('problem_graded', ['foo=1&bar=2', 'mock grader response'], this.problem.id); - }); - - it('submit the answer for submit', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_check', - 'foo=1&bar=2', jasmine.any(Function)); - }); - - describe('when the response is correct', () => - it('call render with returned content', function() { - const contents = '

Correctexcellent

' + - '

Yepcorrect

'; - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({success: 'correct', contents}); - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect(this.problem.el).toHaveHtml(contents); - expect(window.SR.readTexts).toHaveBeenCalledWith(['Question 1: excellent', 'Question 2: correct']); - }) - ); - - describe('when the response is incorrect', () => - it('call render with returned content', function() { - const contents = '

Incorrectno, try again

'; - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({success: 'incorrect', contents}); - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - this.problem.submit(); - expect(this.problem.el).toHaveHtml(contents); - expect(window.SR.readTexts).toHaveBeenCalledWith(['no, try again']); - }) - ); - - it('tests if the submit button is disabled while submitting and the text changes on the button', function() { - const self = this; - const curr_html = this.problem.el.html(); - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - // At this point enableButtons should have been called, making the submit button disabled with text 'submitting' - let promise; - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submitting'); - callback({ - success: 'incorrect', // does not matter if correct or incorrect here - contents: curr_html - }); - promise = { - always(callable) { return callable(); }, - done(callable) { return callable(); } - }; - return promise; - }); - // Make sure the submit button is enabled before submitting - $('#input_example_1').val('test').trigger('input'); - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - this.problem.submit(); - // After submit, the button should not be disabled and should have text as 'Submit' - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - }); - }); - - describe('submit button on problems', function() { - - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.submitDisabled = disabled => { - if (disabled) { - expect(this.problem.submitButton).toHaveAttr('disabled'); - } else { - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - } - }; - }); - - describe('some basic tests for submit button', () => - it('should become enabled after a value is entered into the text box', function() { - $('#input_example_1').val('test').trigger('input'); - this.submitDisabled(false); - $('#input_example_1').val('').trigger('input'); - this.submitDisabled(true); - }) - ); - - describe('some advanced tests for submit button', function() { - const radioButtonProblemHtml = readFixtures('radiobutton_problem.html'); - const checkboxProblemHtml = readFixtures('checkbox_problem.html'); - - it('should become enabled after a checkbox is checked', function() { - $('#input_example_1').replaceWith(checkboxProblemHtml); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(true); - $('#input_1_1_1').click(); - this.submitDisabled(false); - $('#input_1_1_1').click(); - this.submitDisabled(true); - }); - - it('should become enabled after a radiobutton is checked', function() { - $('#input_example_1').replaceWith(radioButtonProblemHtml); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(true); - $('#input_1_1_1').attr('checked', true).trigger('click'); - this.submitDisabled(false); - $('#input_1_1_1').attr('checked', false).trigger('click'); - this.submitDisabled(true); - }); - - it('should become enabled after a value is selected in a selector', function() { - const html = `\ -
- -
\ -`; - $('#input_example_1').replaceWith(html); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(true); - $("#problem_sel select").val("val2").trigger('change'); - this.submitDisabled(false); - $("#problem_sel select").val("val0").trigger('change'); - this.submitDisabled(true); - }); - - it('should become enabled after a radiobutton is checked and a value is entered into the text box', function() { - $(radioButtonProblemHtml).insertAfter('#input_example_1'); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(true); - $('#input_1_1_1').attr('checked', true).trigger('click'); - this.submitDisabled(true); - $('#input_example_1').val('111').trigger('input'); - this.submitDisabled(false); - $('#input_1_1_1').attr('checked', false).trigger('click'); - this.submitDisabled(true); - }); - - it('should become enabled if there are only hidden input fields', function() { - const html = `\ -\ -`; - $('#input_example_1').replaceWith(html); - this.problem.submitAnswersAndSubmitButton(true); - this.submitDisabled(false); - }); - }); - }); - - describe('reset', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - }); - - it('log the problem_reset event', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.answers = 'foo=1&bar=2'; - this.problem.reset(); - expect(Logger.log).toHaveBeenCalledWith('problem_reset', 'foo=1&bar=2'); - }); - - it('POST to the problem reset page', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.reset(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_reset', - { id: 'i4x://edX/101/problem/Problem1' }, jasmine.any(Function)); - }); - - it('render the returned content', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({html: "Reset", success: true}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.reset(); - expect(this.problem.el.html()).toEqual('Reset'); - }); - - it('sends a message to the window SR element', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({html: "Reset", success: true}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.reset(); - expect(window.SR.readText).toHaveBeenCalledWith('This problem has been reset.'); - }); - - it('shows a notification on error', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - callback({msg: "Error on reset.", success: false}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.reset(); - expect($('.notification-gentle-alert .notification-message').text()).toEqual("Error on reset."); - }); - - it('tests that reset does not enable submit or modify the text while resetting', function() { - const self = this; - const curr_html = this.problem.el.html(); - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - // enableButtons should have been called at this point to set them to all disabled - let promise; - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submit'); - callback({success: 'correct', html: curr_html}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - // Submit should be disabled - expect(this.problem.submitButton).toHaveAttr('disabled'); - this.problem.reset(); - // Submit should remain disabled - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submit'); - }); - }); - - describe('show problem with column in id', function() { - beforeEach(function () { - this.problem = new Problem($('.xblock-student_view')); - this.problem.el.prepend('
'); - }); - - it('log the problem_show event', function() { - this.problem.show(); - expect(Logger.log).toHaveBeenCalledWith('problem_show', - {problem: 'i4x://edX/101/problem/Problem1'}); - }); - - it('fetch the answers', function() { - spyOn($, 'postWithPrefix'); - this.problem.show(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_show', - jasmine.any(Function)); - }); - - it('show the answers', function() { - spyOn($, 'postWithPrefix').and.callFake( - (url, callback) => callback({answers: {'1_1:11': 'One', '1_2:12': 'Two'}}) - ); - this.problem.show(); - expect($("#answer_1_1\\:11")).toHaveHtml('One'); - expect($("#answer_1_2\\:12")).toHaveHtml('Two'); - }); - - it('disables the show answer button', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {}})); - this.problem.show(); - expect(this.problem.el.find('.show').attr('disabled')).toEqual('disabled'); - }); - }); - - describe('show', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.el.prepend('
'); - }); - - describe('when the answer has not yet shown', function() { - beforeEach(function() { - expect(this.problem.el.find('.show').attr('disabled')).not.toEqual('disabled'); - }); - - it('log the problem_show event', function() { - this.problem.show(); - expect(Logger.log).toHaveBeenCalledWith('problem_show', - {problem: 'i4x://edX/101/problem/Problem1'}); - }); - - it('fetch the answers', function() { - spyOn($, 'postWithPrefix'); - this.problem.show(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_show', - jasmine.any(Function)); - }); - - it('show the answers', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {'1_1': 'One', '1_2': 'Two'}})); - this.problem.show(); - expect($('#answer_1_1')).toHaveHtml('One'); - expect($('#answer_1_2')).toHaveHtml('Two'); - }); - - it('disables the show answer button', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {}})); - this.problem.show(); - expect(this.problem.el.find('.show').attr('disabled')).toEqual('disabled'); - }); - - describe('radio text question', function() { - const radio_text_xml=`\ -
-

- -
-
- -
-
-
- - -

- -
- - -

-
-
- - -

-
-
-
\ -`; - beforeEach(function() { - // Append a radiotextresponse problem to the problem, so we can check it's javascript functionality - this.problem.el.prepend(radio_text_xml); - }); - - it('sets the correct class on the section for the correct choice', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {"1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3"}})); - this.problem.show(); - - expect($('#forinput1_2_1_choiceinput_0bc').attr('class')).toEqual( - 'choicetextgroup_show_correct'); - expect($('#answer_1_2_1_choiceinput_0bc').text()).toEqual('3'); - expect($('#answer_1_2_1_choiceinput_1bc').text()).toEqual(''); - expect($('#answer_1_2_1_choiceinput_2bc').text()).toEqual(''); - }); - - it('Should not disable input fields', function() { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback({answers: {"1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3"}})); - this.problem.show(); - expect($('input#1_2_1_choiceinput_0bc').attr('disabled')).not.toEqual('disabled'); - expect($('input#1_2_1_choiceinput_1bc').attr('disabled')).not.toEqual('disabled'); - expect($('input#1_2_1_choiceinput_2bc').attr('disabled')).not.toEqual('disabled'); - expect($('input#1_2_1').attr('disabled')).not.toEqual('disabled'); - }); - }); - - describe('imageinput', function() { - let el, height, width; - const imageinput_html = readFixtures('imageinput.underscore'); - - const DEFAULTS = { - id: '12345', - width: '300', - height: '400' - }; - - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.el.prepend(_.template(imageinput_html)(DEFAULTS)); - }); - - const assertAnswer = (problem, data) => { - stubRequest(data); - problem.show(); - - $.each(data['answers'], (id, answer) => { - const img = getImage(answer); - el = $(`#inputtype_${id}`); - expect(img).toImageDiffEqual(el.find('canvas')[0]); - }); - }; - - var stubRequest = data => { - spyOn($, 'postWithPrefix').and.callFake((url, callback) => callback(data)); - }; - - var getImage = (coords, c_width, c_height) => { - let ctx, reg; - const types = { - rectangle: coords => { - reg = /^\(([0-9]+),([0-9]+)\)-\(([0-9]+),([0-9]+)\)$/; - const rects = coords.replace(/\s*/g, '').split(/;/); - - $.each(rects, (index, rect) => { - const { abs } = Math; - const points = reg.exec(rect); - if (points) { - width = abs(points[3] - points[1]); - height = abs(points[4] - points[2]); - - return ctx.rect(points[1], points[2], width, height); - } - }); - - ctx.stroke(); - ctx.fill(); - }, - - regions: coords => { - const parseCoords = coords => { - reg = JSON.parse(coords); - - if (typeof reg[0][0][0] === "undefined") { - reg = [reg]; - } - - return reg; - }; - - return $.each(parseCoords(coords), (index, region) => { - ctx.beginPath(); - $.each(region, (index, point) => { - if (index === 0) { - return ctx.moveTo(point[0], point[1]); - } else { - return ctx.lineTo(point[0], point[1]); - } - }); - - ctx.closePath(); - ctx.stroke(); - ctx.fill(); - }); - } - }; - - const canvas = document.createElement('canvas'); - canvas.width = c_width || 100; - canvas.height = c_height || 100; - - if (canvas.getContext) { - ctx = canvas.getContext('2d'); - } else { - console.log('Canvas is not supported.'); - } - - ctx.fillStyle = 'rgba(255,255,255,.3)'; - ctx.strokeStyle = "#FF0000"; - ctx.lineWidth = "2"; - - $.each(coords, (key, value) => { - if ((types[key] != null) && value) { return types[key](value); } - }); - - return canvas; - }; - - it('rectangle is drawn correctly', function() { - assertAnswer(this.problem, { - 'answers': { - '12345': { - 'rectangle': '(10,10)-(30,30)', - 'regions': null - } - } - }); - }); - - it('region is drawn correctly', function() { - assertAnswer(this.problem, { - 'answers': { - '12345': { - 'rectangle': null, - 'regions': '[[10,10],[30,30],[70,30],[20,30]]' - } - } - }); - }); - - it('mixed shapes are drawn correctly', function() { - assertAnswer(this.problem, { - 'answers': {'12345': { - 'rectangle': '(10,10)-(30,30);(5,5)-(20,20)', - 'regions': `[ - [[50,50],[40,40],[70,30],[50,70]], - [[90,95],[95,95],[90,70],[70,70]] -]` - } - } - }); - }); - - it('multiple image inputs draw answers on separate canvases', function() { - const data = { - id: '67890', - width: '400', - height: '300' - }; - - this.problem.el.prepend(_.template(imageinput_html)(data)); - assertAnswer(this.problem, { - 'answers': { - '12345': { - 'rectangle': null, - 'regions': '[[10,10],[30,30],[70,30],[20,30]]' - }, - '67890': { - 'rectangle': '(10,10)-(30,30)', - 'regions': null - } - } - }); - }); - - it('dictionary with answers doesn\'t contain answer for current id', function() { - spyOn(console, 'log'); - stubRequest({'answers':{}}); - this.problem.show(); - el = $('#inputtype_12345'); - expect(el.find('canvas')).not.toExist(); - expect(console.log).toHaveBeenCalledWith('Answer is absent for image input with id=12345'); - }); - }); - }); - }); - - describe('save', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.answers = 'foo=1&bar=2'; - }); - - it('log the problem_save event', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.save(); - expect(Logger.log).toHaveBeenCalledWith('problem_save', 'foo=1&bar=2'); - }); - - it('POST to save problem', function() { - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - let promise; - promise = - {always(callable) { return callable(); }}; - return promise; - }); - this.problem.save(); - expect($.postWithPrefix).toHaveBeenCalledWith('/problem/Problem1/problem_save', - 'foo=1&bar=2', jasmine.any(Function)); - }); - - it('tests that save does not enable the submit button or change the text when submit is originally disabled', function() { - const self = this; - const curr_html = this.problem.el.html(); - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - // enableButtons should have been called at this point and the submit button should be unaffected - let promise; - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submit'); - callback({success: 'correct', html: curr_html}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - // Expect submit to be disabled and labeled properly at the start - expect(this.problem.submitButton).toHaveAttr('disabled'); - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - this.problem.save(); - // Submit button should have the same state after save has completed - expect(this.problem.submitButton).toHaveAttr('disabled'); - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - }); - - it('tests that save does not disable the submit button or change the text when submit is originally enabled', function() { - const self = this; - const curr_html = this.problem.el.html(); - spyOn($, 'postWithPrefix').and.callFake(function(url, answers, callback) { - // enableButtons should have been called at this point, and the submit button should be disabled while submitting - let promise; - expect(self.problem.submitButton).toHaveAttr('disabled'); - expect(self.problem.submitButtonLabel.text()).toBe('Submit'); - callback({success: 'correct', html: curr_html}); - promise = - {always(callable) { return callable(); }}; - return promise; - }); - // Expect submit to be enabled and labeled properly at the start after adding an input - $('#input_example_1').val('test').trigger('input'); - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - this.problem.save(); - // Submit button should have the same state after save has completed - expect(this.problem.submitButton).not.toHaveAttr('disabled'); - expect(this.problem.submitButtonLabel.text()).toBe('Submit'); - }); - }); - - describe('refreshMath', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - $('#input_example_1').val('E=mc^2'); - this.problem.refreshMath({target: $('#input_example_1').get(0)}); - }); - - it('should queue the conversion and MathML element update', function() { - expect(MathJax.Hub.Queue).toHaveBeenCalledWith(['Text', this.stubbedJax, 'E=mc^2'], - [this.problem.updateMathML, this.stubbedJax, $('#input_example_1').get(0)]); - }); -}); - - describe('updateMathML', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.stubbedJax.root.toMathML.and.returnValue(''); - }); - - describe('when there is no exception', function() { - beforeEach(function() { - this.problem.updateMathML(this.stubbedJax, $('#input_example_1').get(0)); - }); - - it('convert jax to MathML', () => expect($('#input_example_1_dynamath')).toHaveValue('')); - }); - - describe('when there is an exception', function() { - beforeEach(function() { - const error = new Error(); - error.restart = true; - this.stubbedJax.root.toMathML.and.throwError(error); - this.problem.updateMathML(this.stubbedJax, $('#input_example_1').get(0)); - }); - - it('should queue up the exception', function() { - expect(MathJax.Callback.After).toHaveBeenCalledWith([this.problem.refreshMath, this.stubbedJax], true); - }); - }); - }); - - describe('refreshAnswers', function() { - beforeEach(function() { - this.problem = new Problem($('.xblock-student_view')); - this.problem.el.html(`\ -