Skip to content

Commit

Permalink
Add option to output error markers with diagnostic context (Pull requ…
Browse files Browse the repository at this point in the history
…est #673)

This fixes #531
  • Loading branch information
hzeller authored Mar 22, 2021
2 parents 0226108 + 31a5ecd commit f333f20
Show file tree
Hide file tree
Showing 16 changed files with 301 additions and 54 deletions.
1 change: 1 addition & 0 deletions common/analysis/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ cc_library(
"//common/text:text_structure",
"//common/text:token_info",
"//common/text:token_stream_view",
"//common/util:spacer",
"@com_google_absl//absl/status",
"@com_google_absl//absl/strings",
],
Expand Down
17 changes: 14 additions & 3 deletions common/analysis/file_analyzer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include "common/text/text_structure.h"
#include "common/text/token_info.h"
#include "common/text/token_stream_view.h"
#include "common/util/spacer.h"

namespace verible {

Expand Down Expand Up @@ -135,15 +136,23 @@ std::vector<std::string> FileAnalyzer::TokenErrorMessages() const {
}

std::string FileAnalyzer::LinterTokenErrorMessage(
const RejectedToken& error_token) const {
const RejectedToken& error_token, bool diagnostic_context) const {
const LineColumnMap& line_column_map = Data().GetLineColumnMap();
const absl::string_view base_text = Data().Contents();

std::ostringstream output_stream;
output_stream << filename_ << ':';
if (!error_token.token_info.isEOF()) {
const auto left = line_column_map(error_token.token_info.left(base_text));
output_stream << left << ": " << error_token.phase << " error, rejected \""
<< error_token.token_info.text() << "\" (syntax-error).";
if (diagnostic_context) {
const auto& lines = Data().Lines();
if (left.line < static_cast<int>(lines.size())) {
output_stream << "\n" << lines[left.line] << std::endl;
output_stream << verible::Spacer(left.column) << "^";
}
}
} else {
const int file_size = base_text.length();
const auto end = line_column_map(file_size);
Expand All @@ -157,11 +166,13 @@ std::string FileAnalyzer::LinterTokenErrorMessage(
return output_stream.str();
}

std::vector<std::string> FileAnalyzer::LinterTokenErrorMessages() const {
std::vector<std::string> FileAnalyzer::LinterTokenErrorMessages(
bool diagnostic_context) const {
std::vector<std::string> messages;
messages.reserve(rejected_tokens_.size());
for (const auto& rejected_token : rejected_tokens_) {
messages.push_back(LinterTokenErrorMessage(rejected_token));
messages.push_back(
LinterTokenErrorMessage(rejected_token, diagnostic_context));
}
return messages;
}
Expand Down
12 changes: 9 additions & 3 deletions common/analysis/file_analyzer.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,15 @@ class FileAnalyzer : public TextStructure {
std::vector<std::string> TokenErrorMessages() const;

// Diagnostic message for rejected tokens for linter.
std::string LinterTokenErrorMessage(const RejectedToken&) const;

std::vector<std::string> LinterTokenErrorMessages() const;
// Second argument is the show_context option. When enabled
// additional diagnostic line is concatenated to an error message
// with marker that points to vulnerable token
std::string LinterTokenErrorMessage(const RejectedToken&, bool) const;

// First argument is the show_context option. When enabled
// additional diagnostic line is concatenated to an error message
// with marker that points to vulnerable token
std::vector<std::string> LinterTokenErrorMessages(bool) const;

const std::vector<RejectedToken>& GetRejectedTokens() const {
return rejected_tokens_;
Expand Down
97 changes: 93 additions & 4 deletions common/analysis/file_analyzer_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,13 +63,36 @@ TEST(FileAnalyzerTest, TokenErrorMessageSameLine) {
EXPECT_EQ(message, "token: \"w0rld\" at 2:5-9");
}
{
constexpr bool with_diagnostic_context = false;
const auto message = analyzer.LinterTokenErrorMessage(
{error_token, AnalysisPhase::kParsePhase});
{error_token, AnalysisPhase::kParsePhase}, with_diagnostic_context);
EXPECT_TRUE(absl::StrContains(
message, "hello.txt:2:5: syntax error, rejected \"w0rld\""));
}
}

// Verify that an error token on one line is reported correctly.
TEST(FileAnalyzerTest, TokenErrorMessageSameLineWithContext) {
const std::string text("hello, world\nbye w0rld\n");
FakeFileAnalyzer analyzer(text, "hello.txt");
const TokenInfo error_token(1, analyzer.Data().Contents().substr(17, 5));
{
const auto message = analyzer.TokenErrorMessage(error_token);
EXPECT_EQ(message, "token: \"w0rld\" at 2:5-9");
}
{
constexpr bool with_diagnostic_context = true;
const auto message = analyzer.LinterTokenErrorMessage(
{error_token, AnalysisPhase::kParsePhase}, with_diagnostic_context);
EXPECT_TRUE(
absl::StrContains(message,
"hello.txt:2:5: syntax error, rejected \"w0rld\" "
"(syntax-error).\n"
"bye w0rld\n"
" ^"));
}
}

// Verify that an error token on one character is reported correctly.
TEST(FileAnalyzerTest, TokenErrorMessageOneChar) {
const std::string text("hello, world\nbye w0rld\n");
Expand All @@ -80,13 +103,35 @@ TEST(FileAnalyzerTest, TokenErrorMessageOneChar) {
EXPECT_EQ(message, "token: \",\" at 1:6");
}
{
constexpr bool with_diagnostic_context = false;
const auto message = analyzer.LinterTokenErrorMessage(
{error_token, AnalysisPhase::kParsePhase});
{error_token, AnalysisPhase::kParsePhase}, with_diagnostic_context);
EXPECT_TRUE(absl::StrContains(
message, "hello.txt:1:6: syntax error, rejected \",\""));
}
}

// Verify that an error token on one character is reported correctly.
TEST(FileAnalyzerTest, TokenErrorMessageOneCharWithContext) {
const std::string text("hello, world\nbye w0rld\n");
FakeFileAnalyzer analyzer(text, "hello.txt");
const TokenInfo error_token(1, analyzer.Data().Contents().substr(5, 1));
{
const auto message = analyzer.TokenErrorMessage(error_token);
EXPECT_EQ(message, "token: \",\" at 1:6");
}
{
constexpr bool with_diagnostic_context = true;
const auto message = analyzer.LinterTokenErrorMessage(
{error_token, AnalysisPhase::kParsePhase}, with_diagnostic_context);
EXPECT_TRUE(absl::StrContains(message,
"hello.txt:1:6: syntax error, rejected \",\" "
"(syntax-error).\n"
"hello, world\n"
" ^"));
}
}

// Verify that an error token spanning multiple lines is reported correctly.
TEST(FileAnalyzerTest, TokenErrorMessageDifferentLine) {
const std::string text("hello, world\nbye w0rld\n");
Expand All @@ -97,12 +142,36 @@ TEST(FileAnalyzerTest, TokenErrorMessageDifferentLine) {
EXPECT_EQ(message, "token: \"world\nbye\" at 1:8-2:3");
}
{
constexpr bool with_diagnostic_context = false;
const auto message = analyzer.LinterTokenErrorMessage(
{error_token, AnalysisPhase::kParsePhase});
{error_token, AnalysisPhase::kParsePhase}, with_diagnostic_context);
EXPECT_TRUE(absl::StrContains(
message, "hello.txt:1:8: syntax error, rejected \"world\nbye\""));
}
}

// Verify that an error token spanning multiple lines is reported correctly.
TEST(FileAnalyzerTest, TokenErrorMessageDifferentLineWithContext) {
const std::string text("hello, world\nbye w0rld\n");
FakeFileAnalyzer analyzer(text, "hello.txt");
const TokenInfo error_token(1, analyzer.Data().Contents().substr(7, 9));
{
const auto message = analyzer.TokenErrorMessage(error_token);
EXPECT_EQ(message, "token: \"world\nbye\" at 1:8-2:3");
}
{
constexpr bool with_diagnostic_context = true;
const auto message = analyzer.LinterTokenErrorMessage(
{error_token, AnalysisPhase::kParsePhase}, with_diagnostic_context);
EXPECT_TRUE(absl::StrContains(
message,
"hello.txt:1:8: syntax error, rejected \"world\nbye\" "
"(syntax-error).\n"
"hello, world\n"
" ^"));
}
}

//
// Verify that an error at EOF reported correctly.
TEST(FileAnalyzerTest, TokenErrorMessageEOF) {
Expand All @@ -114,8 +183,28 @@ TEST(FileAnalyzerTest, TokenErrorMessageEOF) {
EXPECT_EQ(message, "token: <<EOF>> at 3:1");
}
{
constexpr bool with_diagnostic_context = false;
const auto message = analyzer.LinterTokenErrorMessage(
{error_token, AnalysisPhase::kParsePhase}, with_diagnostic_context);
EXPECT_TRUE(absl::StrContains(
message, "unbalanced.txt:3:1: syntax error (unexpected EOF)"));
}
}

//
// Verify that an error at EOF reported correctly.
TEST(FileAnalyzerTest, TokenErrorMessageEOFWithContext) {
const std::string text("hello, world\nbye w0rld (\n");
const TokenInfo error_token(TokenInfo::EOFToken());
FakeFileAnalyzer analyzer(text, "unbalanced.txt");
{
const auto message = analyzer.TokenErrorMessage(error_token);
EXPECT_EQ(message, "token: <<EOF>> at 3:1");
}
{
constexpr bool with_diagnostic_context = true;
const auto message = analyzer.LinterTokenErrorMessage(
{error_token, AnalysisPhase::kParsePhase});
{error_token, AnalysisPhase::kParsePhase}, with_diagnostic_context);
EXPECT_TRUE(absl::StrContains(
message, "unbalanced.txt:3:1: syntax error (unexpected EOF)"));
}
Expand Down
10 changes: 9 additions & 1 deletion common/analysis/lint_rule_status.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include <algorithm>
#include <functional>
#include <iostream>
#include <iterator>
#include <ostream>
#include <string>
Expand All @@ -28,6 +29,7 @@
#include "common/text/syntax_tree_context.h"
#include "common/text/token_info.h"
#include "common/text/tree_utils.h"
#include "common/util/spacer.h"

namespace verible {

Expand Down Expand Up @@ -73,7 +75,8 @@ struct LintViolationWithStatus {

void LintStatusFormatter::FormatLintRuleStatuses(
std::ostream* stream, const std::vector<LintRuleStatus>& statuses,
absl::string_view base, absl::string_view path) const {
absl::string_view base, absl::string_view path,
const std::vector<absl::string_view>& lines) const {
std::set<LintViolationWithStatus> violations;

// TODO(fangism): rewrite as a linear time merge of pre-ordered sub-sequences
Expand All @@ -87,6 +90,11 @@ void LintStatusFormatter::FormatLintRuleStatuses(
FormatViolation(stream, *violation.violation, base, path,
violation.status->url, violation.status->lint_rule_name);
*stream << std::endl;
auto cursor = line_column_map_(violation.violation->token.left(base));
if (cursor.line < static_cast<int>(lines.size())) {
*stream << lines[cursor.line] << std::endl;
*stream << verible::Spacer(cursor.column) << "^" << std::endl;
}
}
}

Expand Down
6 changes: 4 additions & 2 deletions common/analysis/lint_rule_status.h
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,8 @@ class LintStatusFormatter {
absl::string_view base,
absl::string_view path) const;

// Formats, sorts and outputs status to stream.
// Formats, sorts and outputs status to stream with additional vulnerable code
// line printed when enabled.
// The violations contained in the statuses are sorted by their occurrence
// in the code and are not grouped by the status object.
// Path is the file path of original file. This is needed because it is not
Expand All @@ -132,7 +133,8 @@ class LintStatusFormatter {
void FormatLintRuleStatuses(std::ostream* stream,
const std::vector<LintRuleStatus>& statuses,
absl::string_view base,
absl::string_view path) const;
absl::string_view path,
const std::vector<absl::string_view>& lines) const;

// Formats and outputs violation on stream.
// Path is file path of original file and url is a link to the ratified rule
Expand Down
43 changes: 39 additions & 4 deletions common/analysis/lint_rule_status_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ TEST(LintRuleStatusFormatterTest, NoOutput) {
RunLintStatusTest(test);
}

void RunLintStatusesTest(const LintStatusTest& test) {
void RunLintStatusesTest(const LintStatusTest& test, bool show_context) {
// Dummy tree so we have something for test cases to point at
SymbolPtr root = Node();

Expand Down Expand Up @@ -172,8 +172,21 @@ void RunLintStatusesTest(const LintStatusTest& test) {
std::ostringstream ss;

LintStatusFormatter formatter(test.text);
formatter.FormatLintRuleStatuses(&ss, statuses, test.text, test.path);
auto result_parts = absl::StrSplit(ss.str(), '\n');
const std::vector<absl::string_view> lines;
if (!show_context) {
formatter.FormatLintRuleStatuses(&ss, statuses, test.text, test.path, {});
} else {
formatter.FormatLintRuleStatuses(&ss, statuses, test.text, test.path,
absl::StrSplit(test.text, '\n'));
std::cout << ss.str() << std::endl;
}
std::vector<std::string> result_parts;
if (!show_context)
result_parts = absl::StrSplit(ss.str(), '\n');
else {
result_parts = absl::StrSplit(ss.str(), "^\n");
}

auto part_iterator = result_parts.begin();

for (const auto& violation_test : test.violations) {
Expand All @@ -200,7 +213,29 @@ TEST(LintRuleStatusFormatterTest, MultipleStatusesSimpleOutput) {
{"reason2", TokenInfo(dont_care_tag, text.substr(21, 4)),
"some/path/to/somewhere.fvg:2:4: reason2 http://foobar [test-rule]"}}};

RunLintStatusesTest(test);
RunLintStatusesTest(test, false);
}

TEST(LintRuleStatusFormatterTestWithContext, MultipleStatusesSimpleOutput) {
SymbolPtr root = Node();
static const int dont_care_tag = 0;
constexpr absl::string_view text(
"This is some code\n"
"That you are looking at right now\n"
"It is nice code, make no mistake\n"
"Very nice");
LintStatusTest test = {
"test-rule",
"http://foobar",
"some/path/to/somewhere.fvg",
text,
{{"reason1", TokenInfo(dont_care_tag, text.substr(0, 5)),
"some/path/to/somewhere.fvg:1:1: reason1 http://foobar "
"[test-rule]\nThis is some code\n"},
{"reason2", TokenInfo(dont_care_tag, text.substr(21, 4)),
"some/path/to/somewhere.fvg:2:4: reason2 http://foobar "
"[test-rule]\nThat you are looking at right now\n "}}};
RunLintStatusesTest(test, true);
}

} // namespace
Expand Down
Loading

0 comments on commit f333f20

Please sign in to comment.