forked from sqlfluff/sqlfluff
-
Notifications
You must be signed in to change notification settings - Fork 0
/
generate-auto-docs.py
155 lines (133 loc) · 5.59 KB
/
generate-auto-docs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
"""Generate some documentation automatically.
This script generates partial documentation sections (i.e. the content of
`/docs/source/_partials/`) by importing SQLFluff and extracting data about
rules and dialects.
It should run before every docs generation so that those partial .rst files
can then be correctly referenced by other sections of the docs. For example
this file builds the file `/docs/source/_partials/rule_summaries.rst`, which
is then inserted into `/docs/source/reference/rules.rst` using the directive
`.. include:: ../_partials/rule_summaries.rst`.
This script is referenced in the `Makefile` and the `make.bat` file to ensure
it is run at the appropriate moment.
"""
import json
from collections import defaultdict
from pathlib import Path
import sqlfluff
from sqlfluff.core.plugin.host import get_plugin_manager
base_path = Path(__file__).parent.absolute()
##########################################
# Generate rule documentation dynamically.
##########################################
autogen_header = """..
NOTE: This file is generated by the conf.py script.
Don't edit this by hand
"""
table_header = f"""
+{'-' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+
|{'Bundle' : <42}|{'Rule Name' : <50}|{'Code' : <30}|{'Aliases' : <20}|
+{'=' * 42}+{'=' * 50}+{'=' * 30}+{'=' * 20}+
"""
# Extract all the rules.
print("Rule Docs Generation: Reading Rules...")
rule_bundles = defaultdict(list)
rule_list = []
for plugin_rules in get_plugin_manager().hook.get_rules():
for rule in plugin_rules:
_bundle_name = rule.name.split(".")[0]
rule_bundles[_bundle_name].append(rule)
rule_list.append((rule.code, rule.name))
# Write them into a json file for use by redirects.
print("Rule Docs Generation: Writing Rule JSON...")
with open(base_path / "source/_partials/rule_list.json", "w", encoding="utf8") as f:
json.dump(rule_list, f)
# Write them into the table. Bundle by bundle.
print("Rule Docs Generation: Writing Rule Table...")
with open(base_path / "source/_partials/rule_table.rst", "w", encoding="utf8") as f:
f.write(autogen_header)
f.write(table_header)
for bundle in sorted(rule_bundles.keys()):
# Set the bundle name to the ref.
_bundle_name = f":ref:`bundle_{bundle}`"
for idx, rule in enumerate(rule_bundles[bundle]):
step = 1 # The number of aliases per line.
aliases = ", ".join(rule.aliases[:step]) + (
"," if len(rule.aliases) > step else ""
)
name_ref = f":sqlfluff:ref:`{rule.name}`"
code_ref = f":sqlfluff:ref:`{rule.code}`"
f.write(
f"| {_bundle_name : <40} | {name_ref : <48} "
f"| {code_ref : <28} | {aliases : <18} |\n"
)
j = 1
while True:
if not rule.aliases[j:]:
break
aliases = ", ".join(rule.aliases[j : j + step]) + (
"," if len(rule.aliases[j:]) > step else ""
)
f.write(f"|{' ' * 42}|{' ' * 50}|{' ' * 30}| {aliases : <18} |\n")
j += step
if idx + 1 < len(rule_bundles[bundle]):
f.write(f"|{' ' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+\n")
else:
f.write(f"+{'-' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+\n")
# Unset the bundle name so we don't repeat it.
_bundle_name = ""
f.write("\n\n")
# Write each of the summary files.
print("Rule Docs Generation: Writing Rule Summaries...")
with open(base_path / "source/_partials/rule_summaries.rst", "w", encoding="utf8") as f:
f.write(autogen_header)
for bundle in sorted(rule_bundles.keys()):
if "sql" in bundle:
# This accounts for things like "TSQL"
header_name = bundle.upper()
else:
header_name = bundle.capitalize()
# Write the bundle header.
f.write(
f".. _bundle_{bundle}:\n\n"
f"{header_name} bundle\n"
f"{'-' * (len(bundle) + 7)}\n\n"
)
for rule in rule_bundles[bundle]:
f.write(
f".. sqlfluff:rule:: {rule.code}\n"
f" {rule.name}\n\n"
)
# Separate off the heading so we can bold it.
heading, _, doc_body = rule.__doc__.partition("\n")
underline_char = '"'
f.write(f" {heading}\n")
f.write(f" {underline_char * len(heading)}\n\n")
f.write(" " + doc_body)
f.write("\n\n")
print("Rule Docs Generation: Done")
# Extract all the dialects.
print("Dialect Docs Generation: Reading Dialects...")
# We make a dictionary of all of them first, because we want to force the ANSI
# one to be first.
dialect_dict = {dialect.label: dialect for dialect in sqlfluff.list_dialects()}
dialect_list = [dialect_dict["ansi"]] + [
dialect for dialect_name, dialect in dialect_dict.items() if dialect_name != "ansi"
]
# Write each of the summary files.
print("Dialect Docs Generation: Writing Dialect Summaries...")
with open(
base_path / "source/_partials/dialect_summaries.rst", "w", encoding="utf8"
) as f:
f.write(autogen_header)
for dialect in dialect_list:
f.write(
f".. _{dialect.label}_dialect_ref:\n\n"
f"{dialect.name}\n{'-' * len(dialect.name)}\n\n"
f"**Label**: ``{dialect.label}``\n\n"
)
if dialect.label != "ansi":
f.write(
f"**Inherits from**: :ref:`{dialect.inherits_from}_dialect_ref`\n\n"
)
if dialect.docstring:
f.write(dialect.docstring + "\n\n")