diff --git a/docs/resources/cli-reference.md b/docs/resources/cli-reference.md index 4a82cbd1b..3359d13f2 100644 --- a/docs/resources/cli-reference.md +++ b/docs/resources/cli-reference.md @@ -274,6 +274,7 @@ launchable subset [OPTIONS] TESTRUNNER ... | `--ignore-new-tests` | Ignore tests that were not recognized by the subset service and are therefore assumed to be new tests. This option is useful if you want to prevent new tests (with unknown execution time) from increasing subset execution time, but it also means that it might take longer for new tests to be recognized (since they were not run in the subset). To maintain consistency between inputs to and outputs from `launchable subset`, these tests will be added to the end of the `--rest` file output (if that option is used) | No | | `--get-tests-from-previous-sessions` | Let the server generate the full list of tests from which to create a subset of tests. Intended for use with `--output-exclusion-rules`, otherwise new tests might be skipped accidentally. See [zero-input-subsetting](../features/predictive-test-selection/requesting-and-running-a-subset-of-tests/subsetting-with-the-launchable-cli/zero-input-subsetting/ "mention") | No | | `--output-exclusion-rules` | Output a list of tests to _exclude_ instead of a list of tests to _include_. See [zero-input-subsetting](../features/predictive-test-selection/requesting-and-running-a-subset-of-tests/subsetting-with-the-launchable-cli/zero-input-subsetting/ "mention") | No | +| `--ignore-flaky-tests-above` | Ignore tests that flaky score is higher than the value as you set this option. You can confirm the flaky scores on WebApp. | No | Exactly how this command generates the subset and what's required to do this depends on test runners. For available supported `TESTRUNNER`s, see [Integrations](integrations/). diff --git a/launchable/commands/subset.py b/launchable/commands/subset.py index 00d7ea9cf..a1631a78b 100644 --- a/launchable/commands/subset.py +++ b/launchable/commands/subset.py @@ -119,6 +119,12 @@ help="outputs the exclude test list. Switch the subset and rest.", is_flag=True, ) +@click.option( + "--ignore-flaky-tests-above", + "ignore_flaky_tests_above", + help='Ignore flaky tests above the value set by this option.You can confirm flaky scores in WebApp', + type=click.FloatRange(min=0, max=1.0), +) @click.pass_context def subset( context: click.core.Context, @@ -136,6 +142,7 @@ def subset( is_observation: bool, is_get_tests_from_previous_sessions: bool, is_output_exclusion_rules: bool, + ignore_flaky_tests_above: Optional[float], ): if is_observation and is_get_tests_from_previous_sessions: @@ -304,6 +311,9 @@ def get_payload( else: payload['useServerSideOptimizationTarget'] = True + if ignore_flaky_tests_above: + payload["dropFlakinessThreshold"] = ignore_flaky_tests_above + return payload def run(self): diff --git a/tests/commands/test_subset.py b/tests/commands/test_subset.py index ccbd29f8f..3175915fa 100644 --- a/tests/commands/test_subset.py +++ b/tests/commands/test_subset.py @@ -208,6 +208,48 @@ def test_subset_targetless(self): payload = json.loads(gzip.decompress(responses.calls[0].request.body).decode()) self.assertTrue(payload.get('useServerSideOptimizationTarget')) + @responses.activate + @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + def test_subset_ignore_flaky_tests_above(self): + pipe = "test_aaa.py\ntest_bbb.py\ntest_ccc.py\ntest_flaky.py" + responses.replace( + responses.POST, + "{}/intake/organizations/{}/workspaces/{}/subset".format( + get_base_url(), + self.organization, + self.workspace), + json={ + "testPaths": [ + [{"type": "file", "name": "test_aaa.py"}], + [{"type": "file", "name": "test_bbb.py"}], + + ], + "testRunner": "file", + "rest": [ + [{"type": "file", "name": "test_ccc.py"}], + ], + "subsettingId": 123, + "summary": { + "subset": {"duration": 20, "candidates": 2, "rate": 67}, + "rest": {"duration": 10, "candidates": 1, "rate": 33} + }, + }, + status=200) + + result = self.cli( + "subset", + "--session", + self.session, + "--ignore-flaky-tests-above", + 0.05, + "file", + input=pipe, + mix_stderr=False) + self.assertEqual(result.exit_code, 0) + + payload = json.loads(gzip.decompress(responses.calls[0].request.body).decode()) + self.assertEqual(payload.get('dropFlakinessThreshold'), 0.05) + @ responses.activate @ mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) def test_subset_with_get_tests_from_previous_full_runs(self):