diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6af98a350..646472188 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: "3.7" + python-version: "3.8" - name: Install dependencies run: | python -m pip install --upgrade pip @@ -44,10 +44,10 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - - name: Set up Python 3.7 + - name: Set up Python 3.8 uses: actions/setup-python@v4 with: - python-version: "3.7" + python-version: "3.8" - name: Upgrade pip run: python -m pip install --upgrade pip - name: Install dependencies diff --git a/.github/workflows/update_spaces.yml b/.github/workflows/update_spaces.yml index 7ecfe1860..b4d7dcec7 100644 --- a/.github/workflows/update_spaces.yml +++ b/.github/workflows/update_spaces.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: "3.7" + python-version: "3.8" - name: Set up default Git config run: | git config --global user.name evaluate-bot diff --git a/setup.py b/setup.py index c861731ac..1f49460f1 100644 --- a/setup.py +++ b/setup.py @@ -87,7 +87,7 @@ "rouge_score>=0.1.2", "sacrebleu", "sacremoses", - "scipy", + "scipy>=1.10.0", "seqeval", "scikit-learn", "jiwer", @@ -139,7 +139,7 @@ entry_points={"console_scripts": ["evaluate-cli=evaluate.commands.evaluate_cli:main"]}, install_requires=REQUIRED_PKGS, extras_require=EXTRAS_REQUIRE, - python_requires=">=3.7.0", + python_requires=">=3.8.0", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", @@ -148,7 +148,6 @@ "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", diff --git a/tests/test_evaluator.py b/tests/test_evaluator.py index b0e4d5a6f..259b5c7b9 100644 --- a/tests/test_evaluator.py +++ b/tests/test_evaluator.py @@ -359,8 +359,8 @@ def test_bootstrap(self): random_state=0, ) self.assertAlmostEqual(results["accuracy"]["score"], 0.666666, 5) - self.assertAlmostEqual(results["accuracy"]["confidence_interval"][0], 0.33333, 5) - self.assertAlmostEqual(results["accuracy"]["confidence_interval"][1], 0.666666, 5) + self.assertAlmostEqual(results["accuracy"]["confidence_interval"][0], 0.33557, 5) + self.assertAlmostEqual(results["accuracy"]["confidence_interval"][1], 1.0, 5) self.assertAlmostEqual(results["accuracy"]["standard_error"], 0.22498, 5) def test_perf(self): @@ -394,8 +394,8 @@ def test_bootstrap_and_perf(self): random_state=0, ) self.assertAlmostEqual(results["accuracy"]["score"], 0.666666, 5) - self.assertAlmostEqual(results["accuracy"]["confidence_interval"][0], 0.333333, 5) - self.assertAlmostEqual(results["accuracy"]["confidence_interval"][1], 0.666666, 5) + self.assertAlmostEqual(results["accuracy"]["confidence_interval"][0], 0.33557, 5) + self.assertAlmostEqual(results["accuracy"]["confidence_interval"][1], 1.0, 5) self.assertAlmostEqual(results["accuracy"]["standard_error"], 0.22498285, 5) self.assertAlmostEqual(results["total_time_in_seconds"], 0.1, 1) self.assertAlmostEqual(results["samples_per_second"], len(data) / results["total_time_in_seconds"], 5)