From 7660e8a3fb9add0efcec1b47d903b06dbabfdf40 Mon Sep 17 00:00:00 2001 From: Matthieu Caneill Date: Thu, 8 Aug 2024 12:32:43 +0200 Subject: [PATCH] Add JSON formatter for machine-readable output (#68) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ability to save and use results from dbt-score in other tooling opens new possibilities. To that end, add a new formatter, which uses `stdout` to print a JSON document displaying `dbt-score`'s results. Example: ```shell $ dbt_score lint -f json { "models": { "model1": { "score": 5.0, "badge": "🥈", "pass": true, "results": { "dbt_score.rules.generic.columns_have_description": { "result": "OK", "severity": "medium", "message": null }, "dbt_score.rules.generic.has_description": { "result": "WARN", "severity": "medium", "message": "Model lacks a description." } } } }, "project": { "score": 5.0, "badge": "🥈", "pass": true } } ``` --------- Co-authored-by: Jochem van Dooren --- CHANGELOG.md | 3 +- docs/reference/formatters/json_formatter.md | 3 + src/dbt_score/cli.py | 4 +- src/dbt_score/formatters/json_formatter.py | 106 ++++++++++++++++++++ src/dbt_score/lint.py | 2 + tests/formatters/test_json_formatter.py | 64 ++++++++++++ 6 files changed, 179 insertions(+), 3 deletions(-) create mode 100644 docs/reference/formatters/json_formatter.md create mode 100644 src/dbt_score/formatters/json_formatter.py create mode 100644 tests/formatters/test_json_formatter.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 323d145..9f4cc1f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,8 @@ and this project adheres to ## [Unreleased] - Add null check before calling `project_evaluated` in the `evaluate` method to - prevent errors when no models are found. See PR #64. + prevent errors when no models are found. (#64) +- Add JSON formatter for machine-readable output. (#68) ## [0.3.0] - 2024-06-20 diff --git a/docs/reference/formatters/json_formatter.md b/docs/reference/formatters/json_formatter.md new file mode 100644 index 0000000..5b7cdc2 --- /dev/null +++ b/docs/reference/formatters/json_formatter.md @@ -0,0 +1,3 @@ +# JSON formatter + +::: dbt_score.formatters.json_formatter diff --git a/src/dbt_score/cli.py b/src/dbt_score/cli.py index 9694b5e..8b439ae 100644 --- a/src/dbt_score/cli.py +++ b/src/dbt_score/cli.py @@ -39,8 +39,8 @@ def cli() -> None: "--format", "-f", help="Output format. Plain is suitable for terminals, manifest for rich " - "documentation.", - type=click.Choice(["plain", "manifest", "ascii"]), + "documentation, json for machine-readable output.", + type=click.Choice(["plain", "manifest", "ascii", "json"]), default="plain", ) @click.option( diff --git a/src/dbt_score/formatters/json_formatter.py b/src/dbt_score/formatters/json_formatter.py new file mode 100644 index 0000000..9e6f8b3 --- /dev/null +++ b/src/dbt_score/formatters/json_formatter.py @@ -0,0 +1,106 @@ +"""JSON formatter. + +Shape of the JSON output: + +{ + "models": { + "model_foo": { + "score": 5.0, + "badge": "🥈", + "pass": true, + "results": { + "rule1": { + "result": "OK", + "severity": null + "message": null + }, + "rule2": { + "result": "WARN", + "severity": "medium", + "message": "Model lacks a description." + } + ] + }, + "model_bar": { + "score": 0.0, + "badge": "🥉", + "pass": false, + "results": { + "rule1": { + "result": "ERR", + "message": "Exception message" + } + } + } + }, + "project": { + "score": 2.5, + "badge": "🥉", + "pass": false + } +} +""" + + +import json +from typing import Any + +from dbt_score.evaluation import ModelResultsType +from dbt_score.formatters import Formatter +from dbt_score.models import Model +from dbt_score.rule import RuleViolation +from dbt_score.scoring import Score + + +class JSONFormatter(Formatter): + """Formatter for JSON output.""" + + def __init__(self, *args: Any, **kwargs: Any): + """Instantiate formatter.""" + super().__init__(*args, **kwargs) + self._model_results: dict[str, dict[str, Any]] = {} + self._project_results: dict[str, Any] + + def model_evaluated( + self, model: Model, results: ModelResultsType, score: Score + ) -> None: + """Callback when a model has been evaluated.""" + self._model_results[model.name] = { + "score": score.value, + "badge": score.badge, + "pass": score.value >= self._config.fail_any_model_under, + "results": {}, + } + for rule, result in results.items(): + severity = rule.severity.name.lower() + if result is None: + self._model_results[model.name]["results"][rule.source()] = { + "result": "OK", + "severity": severity, + "message": None, + } + elif isinstance(result, RuleViolation): + self._model_results[model.name]["results"][rule.source()] = { + "result": "WARN", + "severity": severity, + "message": result.message, + } + else: + self._model_results[model.name]["results"][rule.source()] = { + "result": "ERR", + "severity": severity, + "message": str(result), + } + + def project_evaluated(self, score: Score) -> None: + """Callback when a project has been evaluated.""" + self._project_results = { + "score": score.value, + "badge": score.badge, + "pass": score.value >= self._config.fail_project_under, + } + document = { + "models": self._model_results, + "project": self._project_results, + } + print(json.dumps(document, indent=2, ensure_ascii=False)) diff --git a/src/dbt_score/lint.py b/src/dbt_score/lint.py index 5b6d95f..53d09f0 100644 --- a/src/dbt_score/lint.py +++ b/src/dbt_score/lint.py @@ -7,6 +7,7 @@ from dbt_score.evaluation import Evaluation from dbt_score.formatters.ascii_formatter import ASCIIFormatter from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter +from dbt_score.formatters.json_formatter import JSONFormatter from dbt_score.formatters.manifest_formatter import ManifestFormatter from dbt_score.models import ManifestLoader from dbt_score.rule_registry import RuleRegistry @@ -32,6 +33,7 @@ def lint_dbt_project( "plain": HumanReadableFormatter, "manifest": ManifestFormatter, "ascii": ASCIIFormatter, + "json": JSONFormatter, } formatter = formatters[format](manifest_loader=manifest_loader, config=config) diff --git a/tests/formatters/test_json_formatter.py b/tests/formatters/test_json_formatter.py new file mode 100644 index 0000000..696d746 --- /dev/null +++ b/tests/formatters/test_json_formatter.py @@ -0,0 +1,64 @@ +"""Unit tests for the JSON formatter.""" + +from typing import Type + +from dbt_score.formatters.json_formatter import JSONFormatter +from dbt_score.rule import Rule, RuleViolation +from dbt_score.scoring import Score + + +def test_json_formatter( + capsys, + default_config, + manifest_loader, + model1, + rule_severity_low, + rule_severity_medium, + rule_severity_critical, +): + """Ensure the formatter has the correct output after model evaluation.""" + formatter = JSONFormatter(manifest_loader=manifest_loader, config=default_config) + results: dict[Type[Rule], RuleViolation | Exception | None] = { + rule_severity_low: None, + rule_severity_medium: Exception("Oh noes"), + rule_severity_critical: RuleViolation("Error"), + } + formatter.model_evaluated(model1, results, Score(10.0, "🥇")) + formatter.project_evaluated(Score(10.0, "🥇")) + stdout = capsys.readouterr().out + print() + assert ( + stdout + == """{ + "models": { + "model1": { + "score": 10.0, + "badge": "🥇", + "pass": true, + "results": { + "tests.conftest.rule_severity_low": { + "result": "OK", + "severity": "low", + "message": null + }, + "tests.conftest.rule_severity_medium": { + "result": "ERR", + "severity": "medium", + "message": "Oh noes" + }, + "tests.conftest.rule_severity_critical": { + "result": "WARN", + "severity": "critical", + "message": "Error" + } + } + } + }, + "project": { + "score": 10.0, + "badge": "🥇", + "pass": true + } +} +""" + )