diff --git a/dashboard/src2/components/NavigationItems.vue b/dashboard/src2/components/NavigationItems.vue
index 03055b9b4a..8a88848b9b 100644
--- a/dashboard/src2/components/NavigationItems.vue
+++ b/dashboard/src2/components/NavigationItems.vue
@@ -14,6 +14,7 @@ import WalletCards from '~icons/lucide/wallet-cards';
import Settings from '~icons/lucide/settings';
import App from '~icons/lucide/layout-grid';
import DatabaseZap from '~icons/lucide/database-zap';
+import Activity from '~icons/lucide/activity';
import Logs from '~icons/lucide/scroll-text';
import Globe from '~icons/lucide/globe';
import Notification from '~icons/lucide/inbox';
@@ -129,9 +130,18 @@ export default {
icon: () => h(Logs),
route: '/log-browser',
isActive: routeName === 'Log Browser'
+ },
+ {
+ name: 'DB Analyzer',
+ icon: () => h(Activity),
+ route: '/database-analyzer',
+ isActive: routeName === 'DB Analyzer',
+ condition: this.$team.doc?.is_desk_user
}
- ],
- isActive: ['SQL Playground', 'Log Browser'].includes(routeName),
+ ].filter(item => item.condition ?? true),
+ isActive: ['SQL Playground', 'DB Analyzer', 'Log Browser'].includes(
+ routeName
+ ),
disabled: enforce2FA
},
{
diff --git a/dashboard/src2/components/ToggleContent.vue b/dashboard/src2/components/ToggleContent.vue
new file mode 100644
index 0000000000..01a1e049fc
--- /dev/null
+++ b/dashboard/src2/components/ToggleContent.vue
@@ -0,0 +1,51 @@
+
+
+
+
+
+ {{ label }}
+
+
+ {{ subLabel }}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dashboard/src2/components/devtools/database/DatabaseAddIndexButton.vue b/dashboard/src2/components/devtools/database/DatabaseAddIndexButton.vue
new file mode 100644
index 0000000000..49ec1289cd
--- /dev/null
+++ b/dashboard/src2/components/devtools/database/DatabaseAddIndexButton.vue
@@ -0,0 +1,76 @@
+
+
+
+
+
diff --git a/dashboard/src2/components/devtools/database/DatabaseProcessKillButton.vue b/dashboard/src2/components/devtools/database/DatabaseProcessKillButton.vue
new file mode 100644
index 0000000000..89a72ae51e
--- /dev/null
+++ b/dashboard/src2/components/devtools/database/DatabaseProcessKillButton.vue
@@ -0,0 +1,55 @@
+
+ Process Killed
+
+
+
diff --git a/dashboard/src2/components/devtools/database/DatabaseTableSchemaDialog.vue b/dashboard/src2/components/devtools/database/DatabaseTableSchemaDialog.vue
index 6e88a09148..c60a9e6a05 100644
--- a/dashboard/src2/components/devtools/database/DatabaseTableSchemaDialog.vue
+++ b/dashboard/src2/components/devtools/database/DatabaseTableSchemaDialog.vue
@@ -20,7 +20,10 @@
v-if="selectedSchema"
/>
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dashboard/src2/pages/devtools/database/DatabaseAnalyzer.vue b/dashboard/src2/pages/devtools/database/DatabaseAnalyzer.vue
new file mode 100644
index 0000000000..8a8ad6ae0f
--- /dev/null
+++ b/dashboard/src2/pages/devtools/database/DatabaseAnalyzer.vue
@@ -0,0 +1,668 @@
+
+
+
+
+
+
+
+
+
+ Database Size Breakup
+
+
+
+ View Details
+
+
+ Optimize Table
+
+
+
+
+
+
+
+
+
+
+
+
Data Size{{ this.databaseSizeBreakup.data_size }} MB
+
+
+
+
Index Size{{ this.databaseSizeBreakup.index_size }} MB
+
+
+
+
Free Space{{ this.databaseSizeBreakup.free_size }} MB
+
+
+
+
+
+
+
+
+ Refresh
+
+
+
+
+ Loading Database Processes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
{
+ this.isIndexSuggestionTriggered = true;
+ this.$resources.suggestDatabaseIndexes.submit();
+ }
+ "
+ :loading="this.$resources.suggestDatabaseIndexes.loading"
+ >Suggest Indexes
+
+ This may take a while to analyze
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Select a site to get started
+
+
+ Loading Table Schemas
+
+
+
+
diff --git a/dashboard/src2/pages/devtools/database/DatabaseSQLPlayground.vue b/dashboard/src2/pages/devtools/database/DatabaseSQLPlayground.vue
index 40401b823d..db6dd68da2 100644
--- a/dashboard/src2/pages/devtools/database/DatabaseSQLPlayground.vue
+++ b/dashboard/src2/pages/devtools/database/DatabaseSQLPlayground.vue
@@ -143,6 +143,7 @@
:site="this.site"
:tableSchemas="$resources.tableSchemas?.data?.message?.data ?? {}"
v-model="showTableSchemasDialog"
+ :showSQLActions="true"
@runSQLQuery="runSQLQueryForViewingTable"
/>
@@ -156,7 +157,7 @@
import { toast } from 'vue-sonner';
import Header from '../../../components/Header.vue';
import { Tabs, Breadcrumbs } from 'frappe-ui';
-import SQLResultTable from '../../../components/devtools/database/SQLResultTable.vue';
+import SQLResultTable from '../../../components/devtools/database/ResultTable.vue';
import SQLCodeEditor from '../../../components/devtools/database/SQLCodeEditor.vue';
import { confirmDialog } from '../../../utils/components';
import DatabaseSQLPlaygroundLog from '../../../components/devtools/database/DatabaseSQLPlaygroundLog.vue';
@@ -264,7 +265,7 @@ export default {
for (const tableName in tableSchemas) {
childrenSchemas[tableName] = {
self: { label: tableName, type: 'table' },
- children: tableSchemas[tableName].map(x => ({
+ children: tableSchemas[tableName].columns.map(x => ({
label: x.column,
type: 'column',
detail: x.data_type
diff --git a/dashboard/src2/router.js b/dashboard/src2/router.js
index 3113d083bd..19afb7b733 100644
--- a/dashboard/src2/router.js
+++ b/dashboard/src2/router.js
@@ -291,6 +291,11 @@ let router = createRouter({
component: () =>
import('./pages/devtools/database/DatabaseSQLPlayground.vue')
},
+ {
+ path: '/database-analyzer',
+ name: 'DB Analyzer',
+ component: () => import('./pages/devtools/database/DatabaseAnalyzer.vue')
+ },
{
path: '/log-browser/:mode?/:docName?/:logId?',
name: 'Log Browser',
diff --git a/press/agent.py b/press/agent.py
index 34ea88f61f..3e37165c80 100644
--- a/press/agent.py
+++ b/press/agent.py
@@ -1161,13 +1161,18 @@ def get_site_apps(self, site):
apps: list[str] = [line.split()[0] for line in raw_apps_list["data"].splitlines() if line]
return apps
- def fetch_database_table_schema(self, site):
+ def fetch_database_table_schema(
+ self, site, include_table_size: bool = False, include_index_info: bool = False
+ ):
return self.create_agent_job(
"Fetch Database Table Schema",
f"benches/{site.bench}/sites/{site.name}/database/schema",
bench=site.bench,
site=site.name,
- data={},
+ data={
+ "include_table_size": include_table_size,
+ "include_index_info": include_index_info,
+ },
reference_doctype="Site",
reference_name=site.name,
)
@@ -1178,6 +1183,47 @@ def run_sql_query_in_database(self, site, query, commit):
data={"query": query, "commit": commit, "as_dict": False},
)
+ def get_summarized_performance_report_of_database(self, site):
+ return self.post(
+ f"benches/{site.bench}/sites/{site.name}/database/performance-report",
+ data={"mariadb_root_password": get_mariadb_root_password(site)},
+ )
+
+ def analyze_slow_queries(self, site, normalized_queries: list[dict]):
+ """
+ normalized_queries format:
+ [
+ {
+ "example": "",
+ "normalized" : "",
+ }
+ ]
+ """
+ # TODO move to agent job
+ return self.post(
+ f"benches/{site.bench}/sites/{site.name}/database/analyze-slow-queries",
+ data={
+ "queries": normalized_queries,
+ "mariadb_root_password": get_mariadb_root_password(site),
+ },
+ )
+
+ def fetch_database_processes(self, site):
+ return self.post(
+ f"benches/{site.bench}/sites/{site.name}/database/processes",
+ data={
+ "mariadb_root_password": get_mariadb_root_password(site),
+ },
+ )
+
+ def kill_database_process(self, site, id):
+ return self.post(
+ f"benches/{site.bench}/sites/{site.name}/database/kill-process/{id}",
+ data={
+ "mariadb_root_password": get_mariadb_root_password(site),
+ },
+ )
+
class AgentCallbackException(Exception):
pass
diff --git a/press/api/dboptimize.py b/press/api/dboptimize.py
deleted file mode 100644
index 37209e0408..0000000000
--- a/press/api/dboptimize.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import json
-
-import frappe
-
-from press.api.site import protected
-from press.press.report.mariadb_slow_queries.db_optimizer import (
- ColumnStat,
- DBExplain,
- DBOptimizer,
- DBTable,
-)
-from press.press.report.mariadb_slow_queries.mariadb_slow_queries import (
- OptimizeDatabaseQuery,
- _fetch_column_stats,
- _fetch_table_stats,
-)
-from press.utils import log_error
-
-
-@frappe.whitelist()
-@protected("Site")
-def mariadb_analyze_query(name, row):
- return analyze_query(row=row, site=name)
-
-
-def analyze_query(row, site):
- # if mariadb_analyze_query_already_exists(site, row["query"]):
- # frappe.throw("The query seems to have already been optimized")
- doc = frappe.get_doc(
- {
- "doctype": "MariaDB Analyze Query",
- "site": site,
- "tables_in_query": [],
- }
- )
- doc.status = "Running"
-
- query = row["example"]
- doc.query = query
- doc.normalized_query = row["query"]
-
- if not query.lower().startswith(("select", "update", "delete")):
- doc.status = "Failure"
- doc.save(ignore_permissions=True)
- frappe.db.commit()
- return None
-
- doc.save(ignore_permissions=True)
- frappe.db.commit()
-
- analyzer = OptimizeDatabaseQuery(site, query)
- explain_output = analyzer.fetch_explain() or []
- doc.explain_output = json.dumps(explain_output)
- explain_output = [DBExplain.from_frappe_ouput(e) for e in explain_output]
-
- optimizer = DBOptimizer(query=analyzer.query, explain_plan=explain_output)
- for table in optimizer.tables_examined:
- stats = _fetch_table_stats(analyzer.site, table)
- doc.append("tables_in_query", {"table": table, "table_statistics": json.dumps(stats)})
-
- if not stats:
- # Old framework version
- doc.status = "Failure"
- doc.save(ignore_permissions=True)
- frappe.db.commit()
- return None
-
- # This is an agent job. Remaining is processed in the callback.
- _fetch_column_stats(analyzer.site, table, doc.get_title())
-
- doc.save(ignore_permissions=True)
- return doc.status
-
-
-def check_if_all_fetch_column_stats_was_successful(doc):
- return all(item.status == "Success" for item in doc.tables_in_query)
-
-
-def fetch_column_stats_update(job, response_data):
- request_data_json = json.loads(job.request_data)
- doc_name = request_data_json["doc_name"]
- table = request_data_json["table"]
-
- if job.status == "Success":
- column_statistics = response_data["steps"][0]["data"]["output"]
- doc = frappe.get_doc("MariaDB Analyze Query", doc_name)
- for item in doc.tables_in_query:
- if item.table == table:
- item.column_statistics = column_statistics
- item.status = "Success"
- doc.save()
- frappe.db.commit()
- if check_if_all_fetch_column_stats_was_successful(doc):
- doc.status = "Success"
- doc.save()
- frappe.db.commit()
- # Persists within doctype
- save_suggested_index(doc)
- elif job.status == "Failure":
- doc = frappe.get_doc("MariaDB Analyze Query", doc_name)
- for item in doc.tables_in_query:
- if item.table == table:
- item.status = "Failure"
- doc.save()
-
- doc.status = "Failure"
- doc.save()
- frappe.db.commit()
-
-
-def save_suggested_index(doc):
- explain_output = json.loads(doc.explain_output)
- explain_output = [DBExplain.from_frappe_ouput(e) for e in explain_output]
- optimizer = DBOptimizer(query=doc.query, explain_plan=explain_output)
- for item in doc.tables_in_query:
- stats = json.loads(item.table_statistics)
- if not stats:
- # Old framework version
- return
- db_table = DBTable.from_frappe_ouput(stats)
- column_stats = json.loads(item.column_statistics)
- column_stats = [ColumnStat.from_frappe_ouput(c) for c in column_stats]
- db_table.update_cardinality(column_stats)
- optimizer.update_table_data(db_table)
- index = optimizer.suggest_index()
- doc.suggested_index = f"{index.table}.{index.column}"
- doc.save()
-
-
-@frappe.whitelist()
-@protected("Site")
-def get_status_of_mariadb_analyze_query(name, query):
- filters = {"site": name, "query": query}
- doc = frappe.get_all(
- "MariaDB Analyze Query",
- filters=filters,
- fields=["status", "suggested_index"],
- limit=1,
- )
- if doc:
- return doc[0]
- return None
-
-
-def mariadb_analyze_query_already_exists(site, normalized_query):
- if frappe.db.exists("MariaDB Analyze Query", {"site": site, "normalized_query": normalized_query}):
- return True
- return False
-
-
-@frappe.whitelist()
-@protected("Site")
-def mariadb_analyze_query_already_running_for_site(name):
- if frappe.db.exists("MariaDB Analyze Query", {"site": name, "status": "Running"}):
- return True
- return False
-
-
-@frappe.whitelist()
-@protected("Site")
-def get_suggested_index(name, normalized_query):
- return frappe.get_value(
- "MariaDB Analyze Query",
- {"site": name, "status": "Success", "normalized_query": normalized_query},
- ["site", "normalized_query", "suggested_index"],
- as_dict=True,
- )
-
-
-def delete_all_occurences_of_mariadb_analyze_query(job):
- try:
- if job.status == "Success" or job.status == "Failure":
- frappe.db.delete("MariaDB Analyze Query", {"site": job.site})
- frappe.db.commit()
- except Exception as e:
- log_error("Deleting all occurrences of MariaDB Analyze Query Failed", data=e)
diff --git a/press/api/server.py b/press/api/server.py
index df37dbd7ab..e36746ef47 100644
--- a/press/api/server.py
+++ b/press/api/server.py
@@ -300,6 +300,42 @@ def analytics(name, query, timezone, duration):
f"""node_memory_MemTotal_bytes{{instance="{name}",job="node"}} - node_memory_MemFree_bytes{{instance="{name}",job="node"}} - (node_memory_Cached_bytes{{instance="{name}",job="node"}} + node_memory_Buffers_bytes{{instance="{name}",job="node"}})""",
lambda x: "Used",
),
+ "database_uptime": (
+ f"""mysql_up{{instance="{name}",job="mariadb"}}""",
+ lambda x: "Uptime",
+ ),
+ "database_commands_count": (
+ f"""sum(round(increase(mysql_global_status_commands_total{{instance='{name}', command=~"select|update|insert|delete|begin|commit|rollback"}}[{timegrain}s]))) by (command)""",
+ lambda x: x["command"],
+ ),
+ "database_connections": (
+ f"""{{__name__=~"mysql_global_status_threads_connected|mysql_global_variables_max_connections", instance="{name}"}}""",
+ lambda x: "Max Connections"
+ if x["__name__"] == "mysql_global_variables_max_connections"
+ else "Connected Clients",
+ ),
+ "innodb_bp_size": (
+ f"""mysql_global_variables_innodb_buffer_pool_size{{instance='{name}'}}""",
+ lambda x: "Buffer Pool Size",
+ ),
+ "innodb_bp_size_of_total_ram": (
+ f"""avg by (instance) ((mysql_global_variables_innodb_buffer_pool_size{{instance=~"{name}"}} * 100)) / on (instance) (avg by (instance) (node_memory_MemTotal_bytes{{instance=~"{name}"}}))""",
+ lambda x: "Buffer Pool Size of Total Ram",
+ ),
+ "innodb_bp_miss_percent": (
+ f"""
+avg by (instance) (
+ rate(mysql_global_status_innodb_buffer_pool_reads{{instance=~"{name}"}}[{timegrain}s])
+ /
+ rate(mysql_global_status_innodb_buffer_pool_read_requests{{instance=~"{name}"}}[{timegrain}s])
+)
+""",
+ lambda x: "Buffer Pool Miss Percentage",
+ ),
+ "innodb_avg_row_lock_time": (
+ f"""(rate(mysql_global_status_innodb_row_lock_time{{instance="{name}"}}[{timegrain}s]) / 1000)/rate(mysql_global_status_innodb_row_lock_waits{{instance="{name}"}}[{timegrain}s])""",
+ lambda x: "Avg Row Lock Time",
+ ),
}
return prometheus_query(query_map[query][0], query_map[query][1], timezone, timespan, timegrain)
diff --git a/press/fixtures/agent_job_type.json b/press/fixtures/agent_job_type.json
index b6b044d9e4..a4b52ccdaa 100644
--- a/press/fixtures/agent_job_type.json
+++ b/press/fixtures/agent_job_type.json
@@ -2260,5 +2260,35 @@
"step_name": "Modify Database User Permissions"
}
]
+ },
+ {
+ "disabled_auto_retry": 1,
+ "docstatus": 0,
+ "doctype": "Agent Job Type",
+ "max_retry_count": 1,
+ "modified": "2024-10-28 14:49:19.894247",
+ "name": "Fetch Database Table Schema",
+ "request_method": "POST",
+ "request_path": "/benches/{bench}/sites/{site}/database/schema",
+ "steps": [
+ {
+ "step_name": "Fetch Database Table Schema"
+ }
+ ]
+ },
+ {
+ "disabled_auto_retry": 1,
+ "docstatus": 0,
+ "doctype": "Agent Job Type",
+ "max_retry_count": 3,
+ "modified": "2024-12-19 17:21:14.136650",
+ "name": "Analyze Slow Queries",
+ "request_method": "POST",
+ "request_path": "/benches/{bench}/sites/{site}/database/analyze-slow-queries",
+ "steps": [
+ {
+ "step_name": "Analyze Slow Queries"
+ }
+ ]
}
]
\ No newline at end of file
diff --git a/press/press/doctype/agent_job/agent_job.py b/press/press/doctype/agent_job/agent_job.py
index 69ab83a916..f0c7589f62 100644
--- a/press/press/doctype/agent_job/agent_job.py
+++ b/press/press/doctype/agent_job/agent_job.py
@@ -879,10 +879,6 @@ def process_job_updates(job_name: str, response_data: dict | None = None): # no
start = now_datetime()
try:
- from press.api.dboptimize import (
- delete_all_occurences_of_mariadb_analyze_query,
- fetch_column_stats_update,
- )
from press.press.doctype.agent_job.agent_job_notifications import (
send_job_failure_notification,
)
@@ -912,6 +908,7 @@ def process_job_updates(job_name: str, response_data: dict | None = None): # no
process_archive_site_job_update,
process_complete_setup_wizard_job_update,
process_create_user_job_update,
+ process_fetch_database_table_schema_job_update,
process_install_app_site_job_update,
process_migrate_site_job_update,
process_move_site_to_bench_job_update,
@@ -923,9 +920,6 @@ def process_job_updates(job_name: str, response_data: dict | None = None): # no
process_uninstall_app_site_job_update,
)
from press.press.doctype.site_backup.site_backup import process_backup_site_job_update
- from press.press.doctype.site_database_table_schema.site_database_table_schema import (
- SiteDatabaseTableSchema,
- )
from press.press.doctype.site_domain.site_domain import process_new_host_job_update
from press.press.doctype.site_update.site_update import (
process_update_site_job_update,
@@ -1001,23 +995,8 @@ def process_job_updates(job_name: str, response_data: dict | None = None): # no
AppPatch.process_patch_app(job)
elif job.job_type == "Run Remote Builder":
DeployCandidate.process_run_build(job, response_data)
- elif job.job_type == "Column Statistics":
- frappe.enqueue(
- fetch_column_stats_update,
- queue="default",
- timeout=None,
- is_async=True,
- now=False,
- job_name="Fetch Column Updates Through Enque",
- enqueue_after_commit=False,
- at_front=False,
- job=job,
- response_data=response_data,
- )
elif job.job_type == "Create User":
process_create_user_job_update(job)
- elif job.job_type == "Add Database Index":
- delete_all_occurences_of_mariadb_analyze_query(job)
elif job.job_type == "Complete Setup Wizard":
process_complete_setup_wizard_job_update(job)
elif job.job_type == "Update Bench In Place":
@@ -1025,7 +1004,7 @@ def process_job_updates(job_name: str, response_data: dict | None = None): # no
elif job.job_type == "Recover Update In Place":
Bench.process_recover_update_inplace(job)
elif job.job_type == "Fetch Database Table Schema":
- SiteDatabaseTableSchema.process_job_update(job)
+ process_fetch_database_table_schema_job_update(job)
elif job.job_type in [
"Create Database User",
"Remove Database User",
diff --git a/press/press/doctype/mariadb_analyze_query/__init__.py b/press/press/doctype/mariadb_analyze_query/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/press/press/doctype/mariadb_analyze_query/mariadb_analyze_query.js b/press/press/doctype/mariadb_analyze_query/mariadb_analyze_query.js
deleted file mode 100644
index f6228a4cd9..0000000000
--- a/press/press/doctype/mariadb_analyze_query/mariadb_analyze_query.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright (c) 2024, Frappe and contributors
-// For license information, please see license.txt
-
-// frappe.ui.form.on("MariaDB Analyze Query", {
-// refresh(frm) {
-
-// },
-// });
diff --git a/press/press/doctype/mariadb_analyze_query/mariadb_analyze_query.json b/press/press/doctype/mariadb_analyze_query/mariadb_analyze_query.json
deleted file mode 100644
index 17e02dffe0..0000000000
--- a/press/press/doctype/mariadb_analyze_query/mariadb_analyze_query.json
+++ /dev/null
@@ -1,92 +0,0 @@
-{
- "actions": [],
- "allow_rename": 1,
- "creation": "2024-07-21 16:44:56.074012",
- "doctype": "DocType",
- "engine": "InnoDB",
- "field_order": [
- "site",
- "normalized_query",
- "column_break_holt",
- "query",
- "status",
- "section_break_cdwy",
- "explain_output",
- "tables_in_query",
- "suggested_index"
- ],
- "fields": [
- {
- "fieldname": "query",
- "fieldtype": "Long Text",
- "label": "Query",
- "reqd": 1
- },
- {
- "fieldname": "site",
- "fieldtype": "Data",
- "in_list_view": 1,
- "label": "Site",
- "reqd": 1
- },
- {
- "fieldname": "tables_in_query",
- "fieldtype": "Table",
- "label": "Tables In Query",
- "options": "MariaDB Analyze Query Tables"
- },
- {
- "fieldname": "suggested_index",
- "fieldtype": "Data",
- "label": "Suggested Index"
- },
- {
- "fieldname": "explain_output",
- "fieldtype": "Code",
- "label": "Explain Output"
- },
- {
- "fieldname": "status",
- "fieldtype": "Data",
- "label": "Status"
- },
- {
- "fieldname": "column_break_holt",
- "fieldtype": "Column Break"
- },
- {
- "fieldname": "normalized_query",
- "fieldtype": "Long Text",
- "label": "Normalized Query",
- "reqd": 1
- },
- {
- "fieldname": "section_break_cdwy",
- "fieldtype": "Section Break"
- }
- ],
- "index_web_pages_for_search": 1,
- "links": [],
- "modified": "2024-07-29 17:32:21.578471",
- "modified_by": "Administrator",
- "module": "Press",
- "name": "MariaDB Analyze Query",
- "owner": "Administrator",
- "permissions": [
- {
- "create": 1,
- "delete": 1,
- "email": 1,
- "export": 1,
- "print": 1,
- "read": 1,
- "report": 1,
- "role": "System Manager",
- "share": 1,
- "write": 1
- }
- ],
- "sort_field": "creation",
- "sort_order": "DESC",
- "states": []
-}
\ No newline at end of file
diff --git a/press/press/doctype/mariadb_analyze_query/mariadb_analyze_query.py b/press/press/doctype/mariadb_analyze_query/mariadb_analyze_query.py
deleted file mode 100644
index cb963a42af..0000000000
--- a/press/press/doctype/mariadb_analyze_query/mariadb_analyze_query.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) 2024, Frappe and contributors
-# For license information, please see license.txt
-
-# import frappe
-from frappe.model.document import Document
-
-
-class MariaDBAnalyzeQuery(Document):
- # begin: auto-generated types
- # This code is auto-generated. Do not modify anything in this block.
-
- from typing import TYPE_CHECKING
-
- if TYPE_CHECKING:
- from frappe.types import DF
- from press.press.doctype.mariadb_analyze_query_tables.mariadb_analyze_query_tables import (
- MariaDBAnalyzeQueryTables,
- )
-
- explain_output: DF.Code | None
- normalized_query: DF.LongText
- query: DF.LongText
- site: DF.Data
- status: DF.Data | None
- suggested_index: DF.Data | None
- tables_in_query: DF.Table[MariaDBAnalyzeQueryTables]
- # end: auto-generated types
-
- pass
diff --git a/press/press/doctype/mariadb_analyze_query/test_mariadb_analyze_query.py b/press/press/doctype/mariadb_analyze_query/test_mariadb_analyze_query.py
deleted file mode 100644
index 082e41b5b7..0000000000
--- a/press/press/doctype/mariadb_analyze_query/test_mariadb_analyze_query.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (c) 2024, Frappe and Contributors
-# See license.txt
-
-# import frappe
-from frappe.tests.utils import FrappeTestCase
-
-
-class TestMariaDBAnalyzeQuery(FrappeTestCase):
- pass
diff --git a/press/press/doctype/mariadb_analyze_query_tables/__init__.py b/press/press/doctype/mariadb_analyze_query_tables/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/press/press/doctype/mariadb_analyze_query_tables/mariadb_analyze_query_tables.json b/press/press/doctype/mariadb_analyze_query_tables/mariadb_analyze_query_tables.json
deleted file mode 100644
index dc8bb19960..0000000000
--- a/press/press/doctype/mariadb_analyze_query_tables/mariadb_analyze_query_tables.json
+++ /dev/null
@@ -1,52 +0,0 @@
-{
- "actions": [],
- "allow_rename": 1,
- "creation": "2024-07-21 16:49:47.144539",
- "doctype": "DocType",
- "editable_grid": 1,
- "engine": "InnoDB",
- "field_order": [
- "table",
- "table_statistics",
- "column_statistics",
- "status"
- ],
- "fields": [
- {
- "fieldname": "table",
- "fieldtype": "Data",
- "in_list_view": 1,
- "label": "Table"
- },
- {
- "fieldname": "column_statistics",
- "fieldtype": "Code",
- "in_list_view": 1,
- "label": "Column Statistics"
- },
- {
- "fieldname": "table_statistics",
- "fieldtype": "Code",
- "in_list_view": 1,
- "label": "Table Statistics"
- },
- {
- "fieldname": "status",
- "fieldtype": "Data",
- "in_list_view": 1,
- "label": "Status"
- }
- ],
- "index_web_pages_for_search": 1,
- "istable": 1,
- "links": [],
- "modified": "2024-07-24 11:46:38.741427",
- "modified_by": "Administrator",
- "module": "Press",
- "name": "MariaDB Analyze Query Tables",
- "owner": "Administrator",
- "permissions": [],
- "sort_field": "creation",
- "sort_order": "DESC",
- "states": []
-}
\ No newline at end of file
diff --git a/press/press/doctype/mariadb_analyze_query_tables/mariadb_analyze_query_tables.py b/press/press/doctype/mariadb_analyze_query_tables/mariadb_analyze_query_tables.py
deleted file mode 100644
index c0a058d29d..0000000000
--- a/press/press/doctype/mariadb_analyze_query_tables/mariadb_analyze_query_tables.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (c) 2024, Frappe and contributors
-# For license information, please see license.txt
-
-# import frappe
-from frappe.model.document import Document
-
-
-class MariaDBAnalyzeQueryTables(Document):
- # begin: auto-generated types
- # This code is auto-generated. Do not modify anything in this block.
-
- from typing import TYPE_CHECKING
-
- if TYPE_CHECKING:
- from frappe.types import DF
-
- column_statistics: DF.Code | None
- parent: DF.Data
- parentfield: DF.Data
- parenttype: DF.Data
- status: DF.Data | None
- table: DF.Data | None
- table_statistics: DF.Code | None
- # end: auto-generated types
-
- pass
diff --git a/press/press/doctype/site/site.py b/press/press/doctype/site/site.py
index 39fd53254d..191c7d4990 100644
--- a/press/press/doctype/site/site.py
+++ b/press/press/doctype/site/site.py
@@ -13,6 +13,8 @@
import dateutil.parser
import frappe
+import frappe.data
+import frappe.utils
import pytz
import requests
from frappe import _
@@ -68,6 +70,9 @@
from press.press.doctype.site_activity.site_activity import log_site_activity
from press.press.doctype.site_analytics.site_analytics import create_site_analytics
from press.press.doctype.site_plan.site_plan import get_plan_config
+from press.press.report.mariadb_slow_queries.mariadb_slow_queries import (
+ get_doctype_name,
+)
from press.utils import (
convert,
fmt_timedelta,
@@ -2265,10 +2270,84 @@ def get_update_information(self):
out.update_available = any([app["update_available"] for app in out.apps])
return out
- @frappe.whitelist()
- def optimize_tables(self):
+ def fetch_running_optimize_tables_job(self):
+ return frappe.db.exists(
+ "Agent Job",
+ {
+ "site": self.name,
+ "job_type": "Optimize Tables",
+ "status": ["in", ["Undelivered", "Running", "Pending"]],
+ },
+ )
+
+ @dashboard_whitelist()
+ def optimize_tables(self, ignore_checks: bool = False):
+ if not ignore_checks:
+ # check for running `Optimize Tables` agent job
+ if job := self.fetch_running_optimize_tables_job():
+ return {
+ "success": True,
+ "message": "Optimize Tables job is already running on this site.",
+ "job_name": job,
+ }
+ # check if `Optimize Tables` has run in last 1 hour
+ recent_agent_job_name = frappe.db.exists(
+ "Agent Job",
+ {
+ "site": self.name,
+ "job_type": "Optimize Tables",
+ "status": ["not in", ["Failure", "Delivery Failure"]],
+ "creation": [">", frappe.utils.add_to_date(frappe.utils.now_datetime(), hours=-1)],
+ },
+ )
+ if recent_agent_job_name:
+ return {
+ "success": False,
+ "message": "Optimize Tables job has already run in the last 1 hour. Try later.",
+ "job_name": None,
+ }
+
+ agent = Agent(self.server)
+ job_name = agent.optimize_tables(self).name
+ return {
+ "success": True,
+ "message": "Optimize Tables has been triggered on this site.",
+ "job_name": job_name,
+ }
+
+ @dashboard_whitelist()
+ def get_database_performance_report(self):
+ from press.press.report.mariadb_slow_queries.mariadb_slow_queries import get_data as get_slow_queries
+
agent = Agent(self.server)
- agent.optimize_tables(self)
+ result = agent.get_summarized_performance_report_of_database(self)
+ # fetch slow queries of last 7 days
+ slow_queries = get_slow_queries(
+ frappe._dict(
+ {
+ "database": self.database_name,
+ "start_datetime": frappe.utils.add_to_date(None, days=-7),
+ "stop_datetime": frappe.utils.now_datetime(),
+ "search_pattern": ".*",
+ "max_lines": 2000,
+ "normalize_queries": True,
+ }
+ )
+ )
+ # remove `parent` & `creation` indexes from unused_indexes
+ result["unused_indexes"] = [
+ index
+ for index in result.get("unused_indexes", [])
+ if index["index_name"] not in ["parent", "creation"]
+ ]
+
+ # convert all the float to int
+ for query in slow_queries:
+ for key, value in query.items():
+ if isinstance(value, float):
+ query[key] = int(value)
+ result["slow_queries"] = slow_queries
+ return result
@property
def server_logs(self):
@@ -2625,19 +2704,66 @@ def forcefully_remove_site(self, bench):
@dashboard_whitelist()
def fetch_database_table_schema(self, reload=False):
- if not frappe.db.exists("Site Database Table Schema", {"site": self.name}):
- frappe.get_doc({"doctype": "Site Database Table Schema", "site": self.name}).insert(
- ignore_permissions=True
- )
+ """
+ Store dump in redis cache
+ """
+ key_for_schema = f"database_table_schema__data:{self.name}"
+ key_for_schema_status = (
+ f"database_table_schema__status:{self.name}" # 1 - loading, 2 - done, None - not available
+ )
+
+ if reload:
+ frappe.cache().delete_value(key_for_schema)
+ frappe.cache().delete_value(key_for_schema_status)
+
+ status = frappe.utils.cint(frappe.cache().get_value(key_for_schema_status))
+ if status:
+ if status == 1:
+ return {
+ "loading": True,
+ "data": [],
+ }
+ if status == 2:
+ return {
+ "loading": False,
+ "data": json.loads(frappe.cache().get_value(key_for_schema)),
+ }
- doc = frappe.get_doc("Site Database Table Schema", {"site": self.name})
- loading, data = doc.fetch(reload)
+ # Check if any agent job is created within 5 minutes and in pending/running condition
+ # Checks to prevent duplicate agent job creation due to race condition
+ if not frappe.db.exists(
+ "Agent Job",
+ {
+ "job_type": "Fetch Database Table Schema",
+ "site": self.name,
+ "status": ["in", ["Undelivered", "Pending", "Running"]],
+ "creation": (">", frappe.utils.add_to_date(None, minutes=-5)),
+ },
+ ):
+ # create the agent job and put it in loading state
+ frappe.cache().set_value(key_for_schema_status, 1, expires_in_sec=600)
+ Agent(self.server).fetch_database_table_schema(
+ self, include_index_info=True, include_table_size=True
+ )
return {
- "loading": loading,
- "data": data,
- "last_updated": doc.last_updated,
+ "loading": True,
+ "data": [],
}
+ @dashboard_whitelist()
+ def fetch_database_processes(self):
+ agent = Agent(self.server)
+ if agent.should_skip_requests():
+ return None
+ return agent.fetch_database_processes(self)
+
+ @dashboard_whitelist()
+ def kill_database_process(self, id):
+ agent = Agent(self.server)
+ if agent.should_skip_requests():
+ return None
+ return agent.kill_database_process(self, id)
+
@dashboard_whitelist()
def run_sql_query_in_database(self, query: str, commit: bool):
if not query:
@@ -2656,6 +2782,52 @@ def run_sql_query_in_database(self, query: str, commit: bool):
doc.insert(ignore_permissions=True)
return response
+ @dashboard_whitelist()
+ def suggest_database_indexes(self):
+ from press.press.report.mariadb_slow_queries.mariadb_slow_queries import get_data as get_slow_queries
+
+ # fetch slow queries of last 7 days
+ slow_queries = get_slow_queries(
+ frappe._dict(
+ {
+ "database": self.database_name,
+ "start_datetime": frappe.utils.add_to_date(None, days=-7),
+ "stop_datetime": frappe.utils.now_datetime(),
+ "search_pattern": ".*",
+ "max_lines": 2000,
+ "normalize_queries": True,
+ }
+ )
+ )
+ slow_queries = [{"example": x["example"], "normalized": x["query"]} for x in slow_queries]
+ agent = Agent(self.server)
+ return agent.analyze_slow_queries(self, slow_queries)
+
+ @dashboard_whitelist()
+ def add_database_index(self, table, column):
+ record = frappe.db.exists(
+ "Agent Job",
+ {
+ "site": self.name,
+ "status": ["in", ["Undelivered", "Running", "Pending"]],
+ "job_type": "Add Database Index",
+ },
+ )
+ if record:
+ return {
+ "success": False,
+ "message": "There is already a job running for adding database index. Please wait until finished.",
+ "job_name": record,
+ }
+ doctype = get_doctype_name(table)
+ agent = Agent(self.server)
+ job = agent.add_database_index(self, doctype=doctype, columns=[column])
+ return {
+ "success": True,
+ "message": "Database index will be added on site.",
+ "job_name": job.name,
+ }
+
def site_cleanup_after_archive(site):
delete_site_domains(site)
@@ -2689,6 +2861,52 @@ def release_name(name):
frappe.rename_doc("Site", name, new_name)
+def process_fetch_database_table_schema_job_update(job):
+ key_for_schema = f"database_table_schema__data:{job.site}"
+ key_for_schema_status = (
+ f"database_table_schema__status:{job.site}" # 1 - loading, 2 - done, None - not available
+ )
+
+ if job.status == "Pending":
+ return
+
+ if job.status == "Success":
+ """
+ Support old agent versions
+ Remove this once all agents are updated
+ """
+ data = json.loads(job.data)
+ is_old_agent = False
+
+ if len(data) > 0 and isinstance(data[next(iter(data.keys()))], list):
+ is_old_agent = True
+
+ if is_old_agent:
+ data_copy = data.copy()
+ data = {}
+ for key, value in data_copy.items():
+ data[key] = {
+ "columns": value,
+ "size": {
+ "data_length": 0,
+ "index_length": 0,
+ "total_size": 0,
+ }, # old agent api doesn't have size info
+ }
+ for column in data[key]["columns"]:
+ column["index_info"] = {
+ "index_usage": {x: 0 for x in column["indexes"]}, # just fill some dummy value
+ "indexes": column["indexes"],
+ "is_indexed": len(column["indexes"]) > 0,
+ }
+
+ frappe.cache().set_value(key_for_schema, json.dumps(data), expires_in_sec=6000)
+ frappe.cache().set_value(key_for_schema_status, 2, expires_in_sec=6000)
+ else:
+ frappe.cache().delete_value(key_for_schema)
+ frappe.cache().delete_value(key_for_schema_status)
+
+
def process_new_site_job_update(job): # noqa: C901
site_status = frappe.get_value("Site", job.site, "status", for_update=True)
diff --git a/press/press/doctype/site_database_table_schema/__init__.py b/press/press/doctype/site_database_table_schema/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/press/press/doctype/site_database_table_schema/site_database_table_schema.js b/press/press/doctype/site_database_table_schema/site_database_table_schema.js
deleted file mode 100644
index 4747337e12..0000000000
--- a/press/press/doctype/site_database_table_schema/site_database_table_schema.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright (c) 2024, Frappe and contributors
-// For license information, please see license.txt
-
-// frappe.ui.form.on("Site Database Table Schema", {
-// refresh(frm) {
-
-// },
-// });
diff --git a/press/press/doctype/site_database_table_schema/site_database_table_schema.json b/press/press/doctype/site_database_table_schema/site_database_table_schema.json
deleted file mode 100644
index 7917ec3755..0000000000
--- a/press/press/doctype/site_database_table_schema/site_database_table_schema.json
+++ /dev/null
@@ -1,91 +0,0 @@
-{
- "actions": [],
- "creation": "2024-10-28 11:45:28.634612",
- "doctype": "DocType",
- "engine": "InnoDB",
- "field_order": [
- "site",
- "column_break_mufi",
- "agent_job",
- "section_break_heok",
- "schema_json"
- ],
- "fields": [
- {
- "fieldname": "site",
- "fieldtype": "Link",
- "in_list_view": 1,
- "in_standard_filter": 1,
- "label": "Site",
- "options": "Site",
- "reqd": 1,
- "unique": 1
- },
- {
- "fieldname": "agent_job",
- "fieldtype": "Link",
- "in_list_view": 1,
- "in_standard_filter": 1,
- "label": "Agent Job",
- "options": "Agent Job"
- },
- {
- "fieldname": "column_break_mufi",
- "fieldtype": "Column Break"
- },
- {
- "fieldname": "section_break_heok",
- "fieldtype": "Section Break"
- },
- {
- "default": "{}",
- "fieldname": "schema_json",
- "fieldtype": "Long Text",
- "label": "Schema JSON",
- "reqd": 1
- }
- ],
- "index_web_pages_for_search": 1,
- "links": [],
- "modified": "2024-10-28 15:43:58.768773",
- "modified_by": "Administrator",
- "module": "Press",
- "name": "Site Database Table Schema",
- "owner": "Administrator",
- "permissions": [
- {
- "create": 1,
- "delete": 1,
- "email": 1,
- "export": 1,
- "print": 1,
- "read": 1,
- "report": 1,
- "role": "System Manager",
- "share": 1,
- "write": 1
- },
- {
- "email": 1,
- "export": 1,
- "print": 1,
- "read": 1,
- "report": 1,
- "role": "Press Admin",
- "share": 1
- },
- {
- "email": 1,
- "export": 1,
- "print": 1,
- "read": 1,
- "report": 1,
- "role": "Press Member",
- "share": 1
- }
- ],
- "sort_field": "creation",
- "sort_order": "DESC",
- "states": [],
- "title_field": "site"
-}
\ No newline at end of file
diff --git a/press/press/doctype/site_database_table_schema/site_database_table_schema.py b/press/press/doctype/site_database_table_schema/site_database_table_schema.py
deleted file mode 100644
index cf13d99e5c..0000000000
--- a/press/press/doctype/site_database_table_schema/site_database_table_schema.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright (c) 2024, Frappe and contributors
-# For license information, please see license.txt
-from __future__ import annotations
-
-import json
-from typing import TYPE_CHECKING
-
-import frappe
-from frappe.model.document import Document
-
-from press.agent import Agent
-
-if TYPE_CHECKING:
- from press.press.doctype.site_migration.site_migration import AgentJob
-
-
-class SiteDatabaseTableSchema(Document):
- # begin: auto-generated types
- # This code is auto-generated. Do not modify anything in this block.
-
- from typing import TYPE_CHECKING
-
- if TYPE_CHECKING:
- from frappe.types import DF
-
- agent_job: DF.Link | None
- schema_json: DF.LongText
- site: DF.Link
- # end: auto-generated types
-
- def fetch(self, reload=False) -> tuple[bool, dict]:
- """
- This function will return the schema of the database table
-
- Args:
- reload: bool - if True, it will fetch the schema from the server again
-
- Returns:
- tuple[bool, list]
- - 1st element: bool - Loading status
- - 2nd element: dict - Dictionary of table schemas
- Example -
- {
- "__Auth": [
- {
- "column": "doctype",
- "data_type": "varchar",
- "default": "NULL",
- "indexes": [
- "PRIMARY"
- ],
- "is_nullable": false
- },
- ....
- ],
- ....
- }
- """
- if len(self.schema) > 0 and not reload:
- return False, self.schema
-
- if self.agent_job is not None and frappe.get_value("Agent Job", self.agent_job, "status") in [
- "Undelivered",
- "Pending",
- "Running",
- ]:
- return True, {}
-
- self.schema_json = "{}"
- site = frappe.get_doc("Site", self.site)
- self.agent_job = Agent(site.server).fetch_database_table_schema(site).name
- self.save(ignore_permissions=True)
-
- return True, {}
-
- @property
- def last_updated(self) -> str:
- return self.modified or self.creation
-
- @property
- def schema(self) -> dict:
- try:
- return json.loads(self.schema_json)
- except frappe.DoesNotExistError:
- return {}
-
- @staticmethod
- def process_job_update(job: "AgentJob"):
- if job.status != "Success":
- return
- response_data = json.loads(job.data) or {}
- if response_data and frappe.db.exists("Site Database Table Schema", {"site": job.site}):
- doc = frappe.get_doc("Site Database Table Schema", {"site": job.site})
- doc.schema_json = json.dumps(response_data)
- doc.save(ignore_permissions=True)
diff --git a/press/press/doctype/site_database_table_schema/test_site_database_table_schema.py b/press/press/doctype/site_database_table_schema/test_site_database_table_schema.py
deleted file mode 100644
index b75f628d81..0000000000
--- a/press/press/doctype/site_database_table_schema/test_site_database_table_schema.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) 2024, Frappe and Contributors
-# See license.txt
-
-# import frappe
-from frappe.tests import UnitTestCase
-
-# On IntegrationTestCase, the doctype test records and all
-# link-field test record depdendencies are recursively loaded
-# Use these module variables to add/remove to/from that list
-EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
-IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"]
-
-
-class TestSiteDatabaseTableSchema(UnitTestCase):
- """
- Unit tests for SiteDatabaseTableSchema.
- Use this class for testing individual functions and methods.
- """
-
- pass
diff --git a/press/press/report/mariadb_slow_queries/db_optimizer.py b/press/press/report/mariadb_slow_queries/db_optimizer.py
deleted file mode 100644
index 36f6ad5afb..0000000000
--- a/press/press/report/mariadb_slow_queries/db_optimizer.py
+++ /dev/null
@@ -1,352 +0,0 @@
-"""Basic DB optimizer for Frappe Framework based app.
-
-This is largely based on heuristics and known good practices for indexing.
-"""
-
-from collections import defaultdict
-from dataclasses import dataclass
-from typing import Literal
-import re
-import frappe
-from frappe.utils import cint, cstr, flt
-from sql_metadata import Parser
-
-# Any index that reads more than 30% table on average is not "useful"
-INDEX_SCORE_THRESHOLD = 0.3
-# Anything reading less than this percent of table is considered optimal
-OPTIMIZATION_THRESHOLD = 0.1
-
-
-@dataclass
-class DBExplain:
- # refer: https://mariadb.com/kb/en/explain/
- # Anything not explicitly encoded here is likely not supported.
- select_type: Literal["SIMPLE", "PRIMARY", "SUBQUERY", "UNION", "DERIVED"]
- table: str
- scan_type: Literal[ # What type of scan will be performed
- "ALL", # Full table scan
- "CONST", # Single row will be read
- "EQ_REF", # A single row is found from *unique* index
- "REF", # Index is used, but MIGHT hit more than 1 rows as it's non-unique
- "RANGE", # The table will be accessed with a key over one or more value ranges.
- "INDEX_MERGE", # multiple indexes are used and merged smartly. Equivalent to RANGE
- "INDEX_SUBQUERY",
- "INDEX", # Full index scan is performed. Similar to full table scan in case of large number of rows.
- "REF_OR_NULL",
- "UNIQUE_SUBQUERY",
- "FULLTEXT", # Full text index is used,
- ]
- possible_keys: list[str] | None = None # possible indexes that can be used
- key: str | None = None # This index is being used
- key_len: int | None = None # How many prefix bytes from index are being used
- ref: str | None = None # is reference constant or some other column
- rows: int = 0 # roughly how many rows will be examined
- extra: str | None = None
-
- @classmethod
- def from_frappe_ouput(cls, data) -> "DBExplain":
- return cls(
- select_type=cstr(data["select_type"]).upper(),
- table=data["table"],
- scan_type=cstr(data["type"]).upper(),
- possible_keys=data["possible_keys"],
- key=data["key"],
- key_len=cint(data["key_len"]) if data["key_len"] else None,
- ref=data["ref"],
- rows=cint(data["rows"]),
- extra=data.get("Extra"),
- )
-
-
-@dataclass
-class DBColumn:
- name: str
- cardinality: int | None
- is_nullable: bool
- default: str
- data_type: str
-
- @classmethod
- def from_frappe_ouput(cls, data) -> "DBColumn":
- "Parse DBColumn from output of describe-database-table command in Frappe"
- return cls(
- name=data["column"],
- cardinality=data.get("cardinality"),
- is_nullable=data["is_nullable"],
- default=data["default"],
- data_type=data["type"],
- )
-
-
-@dataclass
-class DBIndex:
- name: str
- column: str
- table: str
- unique: bool | None = None
- cardinality: int | None = None
- sequence: int = 1
- nullable: bool = True
- _score: float = 0.0
-
- def __eq__(self, other: "DBIndex") -> bool:
- return (
- self.column == other.column
- and self.sequence == other.sequence
- and self.table == other.table
- )
-
- def __repr__(self):
- return f"DBIndex(`{self.table}`.`{self.column}`)"
-
- @classmethod
- def from_frappe_ouput(cls, data, table) -> "DBIndex":
- "Parse DBIndex from output of describe-database-table command in Frappe"
- return cls(
- name=data["name"],
- table=table,
- unique=data["unique"],
- cardinality=data["cardinality"],
- sequence=data["sequence"],
- nullable=data["nullable"],
- column=data["column"],
- )
-
-
-@dataclass
-class ColumnStat:
- column_name: str
- avg_frequency: float
- avg_length: float
- nulls_ratio: float | None = None
- histogram: list[float] = None
-
- def __post_init__(self):
- if not self.histogram:
- self.histogram = []
-
- @classmethod
- def from_frappe_ouput(cls, data) -> "ColumnStat":
- return cls(
- column_name=data["column_name"],
- avg_frequency=data["avg_frequency"],
- avg_length=data["avg_length"],
- nulls_ratio=data["nulls_ratio"],
- histogram=[flt(bin) for bin in data["histogram"].split(",")]
- if data["histogram"]
- else [],
- )
-
-
-@dataclass
-class DBTable:
- name: str
- total_rows: int
- schema: list[DBColumn] | None = None
- indexes: list[DBIndex] | None = None
-
- def __post_init__(self):
- if not self.schema:
- self.schema = []
- if not self.indexes:
- self.indexes = []
-
- def update_cardinality(self, column_stats: list[ColumnStat]) -> None:
- """Estimate cardinality using mysql.column_stat"""
- for column_stat in column_stats:
- for col in self.schema:
- if (
- col.name == column_stat.column_name
- and not col.cardinality
- and column_stat.avg_frequency
- ):
- # "hack" or "math" - average frequency is on average how frequently a row value appears.
- # Avg = total_rows / cardinality, so...
- col.cardinality = self.total_rows / column_stat.avg_frequency
-
- @classmethod
- def from_frappe_ouput(cls, data) -> "DBTable":
- "Parse DBTable from output of describe-database-table command in Frappe"
- table_name = data["table_name"]
- return cls(
- name=table_name,
- total_rows=data["total_rows"],
- schema=[DBColumn.from_frappe_ouput(c) for c in data["schema"]],
- indexes=[DBIndex.from_frappe_ouput(i, table_name) for i in data["indexes"]],
- )
-
- def has_column(self, column: str) -> bool:
- for col in self.schema:
- if col.name == column:
- return True
- return False
-
-
-@dataclass
-class DBOptimizer:
- query: str # raw query in string format
- explain_plan: list[DBExplain] = None
- tables: dict[str, DBTable] = None
- parsed_query: Parser = None
-
- def __post_init__(self):
- if not self.explain_plan:
- self.explain_plan = []
- if not self.tables:
- self.tables = {}
- for explain_entry in self.explain_plan:
- explain_entry.select_type = explain_entry.select_type.upper()
- explain_entry.scan_type = explain_entry.scan_type.upper()
- self.parsed_query = Parser(re.sub(r'"(\S+)"', r"'\1'", self.query))
-
- @property
- def tables_examined(self) -> list[str]:
- return self.parsed_query.tables
-
- def update_table_data(self, table: DBTable):
- self.tables[table.name] = table
-
- def potential_indexes(self) -> list[DBIndex]:
- """Get all columns that can potentially be indexed to speed up this query."""
-
- possible_indexes = []
-
- # Where claus columns using these operators benefit from index
- # 1. = (equality)
- # 2. >, <, >=, <=
- # 3. LIKE 'xyz%' (Prefix search)
- # 4. BETWEEN (for date[time] fields)
- # 5. IN (similar to equality)
- if where_columns := self.parsed_query.columns_dict.get("where"):
- # TODO: Apply some heuristics here, not all columns in where clause are actually useful
- possible_indexes.extend(where_columns)
-
- # Join clauses - Both sides of join should ideally be indexed. One will *usually* be primary key.
- if join_columns := self.parsed_query.columns_dict.get("join"):
- possible_indexes.extend(join_columns)
-
- # Top N query variant - Order by column can possibly speed up the query
- if order_by_columns := self.parsed_query.columns_dict.get("order_by"):
- if self.parsed_query.limit_and_offset:
- possible_indexes.extend(order_by_columns)
-
- possible_db_indexes = [self._convert_to_db_index(i) for i in possible_indexes]
- possible_db_indexes = [
- i for i in possible_db_indexes if i.column not in ("*", "name")
- ]
- possible_db_indexes.sort(key=lambda i: (i.table, i.column))
-
- return self._remove_existing_indexes(possible_db_indexes)
-
- def _convert_to_db_index(self, column: str) -> DBIndex:
- column_name, table = None, None
-
- if "." in column:
- table, column_name = column.split(".")
- else:
- column_name = column
- for table_name, db_table in self.tables.items():
- if db_table.has_column(column):
- table = table_name
- break
- return DBIndex(column=column_name, name=column_name, table=table)
-
- def _remove_existing_indexes(self, potential_indexes: list[DBIndex]) -> list[DBIndex]:
- """Given list of potential index candidates remove the ones that already exist.
-
- This also removes multi-column indexes for parts that are applicable to query.
- Example: If multi-col index A+B+C exists and query utilizes A+B then
- A+B are removed from potential indexes.
- """
-
- def remove_maximum_indexes(idx: list[DBIndex]):
- """Try to remove entire index from potential indexes, if not possible, reduce one part and try again until no parts are left."""
- if not idx:
- return
- matched_sub_index = []
- for idx_part in list(idx):
- matching_part = [
- i
- for i in potential_indexes
- if i.column == idx_part.column and i.table == idx_part.table
- ]
- if not matching_part:
- # pop and recurse
- idx.pop()
- return remove_maximum_indexes(idx)
- else:
- matched_sub_index.extend(matching_part)
-
- # Every part matched now, lets remove those parts
- for i in matched_sub_index:
- potential_indexes.remove(i)
-
- # Reconstruct multi-col index
- for table in self.tables.values():
- merged_indexes = defaultdict(list)
- for index in table.indexes:
- merged_indexes[index.name].append(index)
-
- for idx in merged_indexes.values():
- idx.sort(key=lambda x: x.sequence)
-
- for idx in merged_indexes.values():
- remove_maximum_indexes(idx)
- return potential_indexes
-
- def suggest_index(self) -> DBIndex | None:
- """Suggest best possible column to index given query and table stats."""
- if missing_tables := (set(self.tables_examined) - set(self.tables.keys())):
- frappe.throw("DBTable infomation missing for: " + ", ".join(missing_tables))
-
- potential_indexes = self.potential_indexes()
-
- for index in list(potential_indexes):
- table = self.tables[index.table]
-
- # Data type is not easily indexable - skip
- column = [c for c in table.schema if c.name == index.column][0]
- if "text" in column.data_type.lower() or "json" in column.data_type.lower():
- potential_indexes.remove(index)
- # Update cardinality from column so scoring can be done
- index.cardinality = column.cardinality
-
- for index in potential_indexes:
- index._score = self.index_score(index)
-
- potential_indexes.sort(key=lambda i: i._score)
- if (
- potential_indexes
- and (best_index := potential_indexes[0])
- and best_index._score < INDEX_SCORE_THRESHOLD
- ):
- return best_index
-
- def index_score(self, index: DBIndex) -> float:
- """Score an index from 0 to 1 based on usefulness.
-
- A score of 0.5 indicates on average this index will read 50% of the table. (e.g. checkboxes)"""
- table = self.tables[index.table]
-
- cardinality = index.cardinality or 2
- total_rows = table.total_rows or cardinality or 1
-
- # We assume most unique values are evenly distributed, this is
- # definitely not the case IRL but it should be good enough assumptions
- # Score is rouhgly what percentage of table we will end up reading on typical query
- rows_fetched_on_average = (table.total_rows or cardinality) / cardinality
- return rows_fetched_on_average / total_rows
-
- def can_be_optimized(self) -> bool:
- """Return true if it's worth optimizeing.
-
- Few cases can not be optimized any further. E.g. ref/eq_ref/cost type
- of queries. Assume that anything that reads <10% of table already is
- not possible to truly optimize with these heuristics."""
- for explain in self.explain_plan:
- for table in self.tables.values():
- if table.name != explain.table:
- continue
- if (explain.rows / table.total_rows) > OPTIMIZATION_THRESHOLD:
- return True
- return False
diff --git a/press/press/report/mariadb_slow_queries/mariadb_slow_queries.json b/press/press/report/mariadb_slow_queries/mariadb_slow_queries.json
index 5897aaffd9..d32316c08f 100644
--- a/press/press/report/mariadb_slow_queries/mariadb_slow_queries.json
+++ b/press/press/report/mariadb_slow_queries/mariadb_slow_queries.json
@@ -1,15 +1,15 @@
{
"add_total_row": 0,
"columns": [],
- "creation": "2021-11-01 19:16:08.357082",
- "disable_prepared_report": 0,
+ "creation": "2024-12-23 11:36:40.301426",
"disabled": 0,
"docstatus": 0,
"doctype": "Report",
"filters": [],
"idx": 0,
"is_standard": "Yes",
- "modified": "2022-11-08 17:10:41.382656",
+ "letterhead": null,
+ "modified": "2024-12-23 11:36:40.301426",
"modified_by": "Administrator",
"module": "Press",
"name": "MariaDB Slow Queries",
@@ -25,5 +25,6 @@
{
"role": "Site Manager"
}
- ]
+ ],
+ "timeout": 0
}
\ No newline at end of file
diff --git a/press/press/report/mariadb_slow_queries/mariadb_slow_queries.py b/press/press/report/mariadb_slow_queries/mariadb_slow_queries.py
index 902c4cd70c..a546e699db 100644
--- a/press/press/report/mariadb_slow_queries/mariadb_slow_queries.py
+++ b/press/press/report/mariadb_slow_queries/mariadb_slow_queries.py
@@ -3,28 +3,16 @@
from __future__ import annotations
-import json
import re
from collections import defaultdict
-from dataclasses import dataclass
import frappe
import requests
import sqlparse
from frappe.core.doctype.access_log.access_log import make_access_log
from frappe.utils import convert_utc_to_timezone, get_system_timezone
-from frappe.utils.caching import redis_cache
from frappe.utils.password import get_decrypted_password
-from press.agent import Agent
-from press.api.site import protected
-from press.press.report.mariadb_slow_queries.db_optimizer import (
- DBExplain,
- DBIndex,
- DBOptimizer,
- DBTable,
-)
-
def execute(filters=None):
frappe.only_for(["System Manager", "Site Manager", "Press Admin", "Press Member"])
@@ -89,15 +77,6 @@ def execute(filters=None):
},
)
- if filters.analyze:
- columns.append(
- {
- "fieldname": "suggested_index",
- "label": frappe._("Suggest Index"),
- "fieldtype": "Data",
- },
- )
-
data = get_data(filters)
return columns, data
@@ -120,13 +99,13 @@ def get_data(filters):
get_system_timezone(),
)
+ # Filter out queries starting with `SET`
+ dql_stmt = ["SELECT", "UPDATE", "DELETE", "INSERT"]
+ rows = [x for x in rows if any(x["query"].startswith(stmt) for stmt in dql_stmt)]
+
if filters.normalize_queries:
rows = summarize_by_query(rows)
- # You can not analyze a query unless it has been normalized.
- if filters.analyze:
- rows = analyze_queries(rows, filters.site)
-
return rows
@@ -211,111 +190,5 @@ def summarize_by_query(data):
return result
-def analyze_queries(data, site):
- # TODO: handle old framework and old agents and general failures
- for row in data:
- query = row["example"]
- if not query.lower().startswith(("select", "update", "delete")):
- continue
- analyzer = OptimizeDatabaseQuery(site, query)
- if index := analyzer.analyze():
- row["suggested_index"] = f"{index.table}.{index.column}"
- return data
-
-
-@dataclass
-class OptimizeDatabaseQuery:
- site: str
- query: str
-
- def analyze(self) -> DBIndex | None:
- explain_output = self.fetch_explain() or []
-
- explain_output = [DBExplain.from_frappe_ouput(e) for e in explain_output]
- optimizer = DBOptimizer(query=self.query, explain_plan=explain_output)
- tables = optimizer.tables_examined
-
- for table in tables:
- stats = _fetch_table_stats(self.site, table)
- if not stats:
- # Old framework version
- return None
- db_table = DBTable.from_frappe_ouput(stats)
- column_stats = _fetch_column_stats(self.site, table)
- if not column_stats:
- # Failing due to large size, TODO: move this to a job
- return None
- db_table.update_cardinality(column_stats)
- optimizer.update_table_data(db_table)
-
- return optimizer.suggest_index()
-
- def fetch_explain(self) -> list[dict]:
- site = frappe.get_cached_doc("Site", self.site)
- db_server_name = frappe.db.get_value("Server", site.server, "database_server", cache=True)
- database_server = frappe.get_cached_doc("Database Server", db_server_name)
- agent = Agent(database_server.name, "Database Server")
-
- data = {
- "schema": site.database_name,
- "query": self.query,
- "private_ip": database_server.private_ip,
- "mariadb_root_password": database_server.get_password("mariadb_root_password"),
- }
-
- return agent.post("database/explain", data=data)
-
-
-@redis_cache(ttl=60 * 5)
-def _fetch_table_stats(site: str, table: str):
- site = frappe.get_cached_doc("Site", site)
- agent = Agent(site.server)
- return agent.describe_database_table(
- site,
- doctype=get_doctype_name(table),
- columns=[],
- )
-
-
-@redis_cache(ttl=60 * 5)
-def _fetch_column_stats(site, table, doc_name):
- site = frappe.get_cached_doc("Site", site)
- db_server_name = frappe.db.get_value("Server", site.server, "database_server", cache=True)
- database_server = frappe.get_cached_doc("Database Server", db_server_name)
- agent = Agent(database_server.name, "Database Server")
-
- data = {
- # "site": site,
- "doc_name": doc_name,
- "schema": site.database_name,
- "table": table,
- "private_ip": database_server.private_ip,
- "mariadb_root_password": database_server.get_password("mariadb_root_password"),
- }
- agent.create_agent_job("Column Statistics", "/database/column-stats", data)
-
-
def get_doctype_name(table_name: str) -> str:
return table_name.removeprefix("tab")
-
-
-@frappe.whitelist()
-@protected("Site")
-def add_suggested_index(name, indexes):
- if isinstance(indexes, str):
- indexes = json.loads(indexes)
- frappe.enqueue(_add_suggested_index, indexes=indexes, site_name=name)
-
-
-def _add_suggested_index(site_name, indexes):
- if not indexes:
- frappe.throw("No index suggested")
-
- for index in indexes:
- table, column = index.split(".")
- doctype = get_doctype_name(table)
-
- site = frappe.get_cached_doc("Site", site_name)
- agent = Agent(site.server)
- agent.add_database_index(site, doctype=doctype, columns=[column])
- frappe.msgprint(f"Index {index} added on site {site_name} successfully", realtime=True)
diff --git a/press/press/report/mariadb_slow_queries/test_db_optimizer.py b/press/press/report/mariadb_slow_queries/test_db_optimizer.py
deleted file mode 100644
index 65eead5535..0000000000
--- a/press/press/report/mariadb_slow_queries/test_db_optimizer.py
+++ /dev/null
@@ -1,618 +0,0 @@
-# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
-# License: MIT. See LICENSE
-
-import json
-
-from frappe.tests.utils import FrappeTestCase
-
-from press.press.report.mariadb_slow_queries.db_optimizer import (
- DBExplain,
- DBOptimizer,
- DBTable,
-)
-
-
-class TestDBOptimizer(FrappeTestCase):
- def test_basic_index_existence_analysis(self):
- def possible_indexes(q):
- user = DBTable.from_frappe_ouput(USER_TABLE)
- has_role = DBTable.from_frappe_ouput(HAS_ROLE_TABLE)
- return [
- i.column
- for i in DBOptimizer(
- query=q,
- tables={"tabUser": user, "tabHas Role": has_role},
- ).potential_indexes()
- ]
-
- self.assertEqual(
- ["creation"],
- possible_indexes("select `name` from `tabUser` order by `creation` desc limit 1"),
- )
-
- self.assertEqual(
- ["full_name"],
- possible_indexes("select `name` from `tabUser` where full_name = 'xyz'"),
- )
-
- self.assertIn(
- "user",
- possible_indexes(
- "select `name` from `tabUser` u join `tabHas Role` h on h.user = u.name"
- ),
- )
-
- def test_suggestion_using_table_stats(self):
- user = DBTable.from_frappe_ouput(USER_TABLE)
- has_role = DBTable.from_frappe_ouput(HAS_ROLE_TABLE)
-
- tables = {"tabUser": user, "tabHas Role": has_role}
- self.assertEqual(user.total_rows, 92)
-
- # This should suggest adding api_key as it definitely has highest cardinality.
- optimizer = DBOptimizer(
- query="select name from tabUser where enabled = 1 and api_key = 'xyz'", tables=tables
- )
- self.assertIn("api_key", [i.column for i in optimizer.potential_indexes()])
-
- index = optimizer.suggest_index()
- self.assertEqual(index.column, "api_key")
-
- # This should suggest nothing as modified is already indexed
- optimizer = DBOptimizer(
- query="select name from tabUser order by modified asc",
- tables=tables,
- )
- self.assertIsNone(optimizer.suggest_index())
-
- # This should suggest nothing as modified is already indexed
- optimizer = DBOptimizer(
- query="select name from tabUser u join `tabHas Role` r on r.parent = u.name where r.role='System Manager'",
- tables=tables,
- )
- index = optimizer.suggest_index()
- self.assertEqual(index.column, "role")
- self.assertEqual(index.table, "tabHas Role")
-
- def test_complex_sub_query_aliases(self):
- """Check if table identification is correct for subqueries."""
-
- q = """SELECT *,
- (SELECT COUNT(*) FROM `tabHD Ticket Comment` WHERE `tabHD Ticket Comment`.`reference_ticket`=`tabHD Ticket`.`name`) `count_comment`,
- (SELECT COUNT(*) FROM `tabCommunication` WHERE `tabCommunication`.`reference_doctype`='HD Ticket' AND `tabCommunication`.`reference_name`=`tabHD Ticket`.`name`) `count_msg`,
- FROM `tabHD Ticket`
- WHERE `agent_group`='L2'
- ORDER BY `modified` DESC
- LIMIT 20
- """
- explain = [DBExplain.from_frappe_ouput(e) for e in json.loads(EXPLAIN_OUTPUT)]
- optimizer = DBOptimizer(query=q, explain_plan=explain)
- optimizer.update_table_data(DBTable.from_frappe_ouput(HD_TICKET_TABLE))
- optimizer.update_table_data(DBTable.from_frappe_ouput(HD_TICKET_COMMENT_TABLE))
- optimizer.update_table_data(DBTable.from_frappe_ouput(COMMUNICATION_TABLE))
-
- self.assertTrue(optimizer.can_be_optimized())
- index = optimizer.suggest_index()
- self.assertEqual(index.table, "tabHD Ticket Comment")
- self.assertEqual(index.column, "reference_ticket")
-
-
-# Table stats extracted using describe-database-table for testing.
-
-USER_TABLE = {
- "table_name": "tabUser",
- "total_rows": 92,
- "schema": [
- {
- "column": "name",
- "type": "varchar(140)",
- "is_nullable": False,
- "default": None,
- "cardinality": 91,
- },
- {"column": "creation", "type": "datetime(6)", "is_nullable": True, "default": None},
- {
- "column": "modified",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- "cardinality": 91,
- },
- {
- "column": "modified_by",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- },
- {"column": "owner", "type": "varchar(140)", "is_nullable": True, "default": None},
- {"column": "docstatus", "type": "int(1)", "is_nullable": False, "default": "0"},
- {"column": "idx", "type": "int(8)", "is_nullable": False, "default": "0"},
- {"column": "enabled", "type": "int(1)", "is_nullable": False, "default": "1"},
- {"column": "email", "type": "varchar(140)", "is_nullable": False, "default": ""},
- {
- "column": "first_name",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- "cardinality": 88,
- },
- {
- "column": "reset_password_key",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- "cardinality": 84,
- },
- {
- "column": "user_type",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": "System User",
- "cardinality": 2,
- },
- {
- "column": "api_key",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- "cardinality": 70,
- },
- {"column": "api_secret", "type": "text", "is_nullable": True, "default": None},
- {"column": "_user_tags", "type": "text", "is_nullable": True, "default": None},
- {"column": "_comments", "type": "text", "is_nullable": True, "default": None},
- {"column": "_assign", "type": "text", "is_nullable": True, "default": None},
- {"column": "_liked_by", "type": "text", "is_nullable": True, "default": None},
- ],
- "indexes": [
- {
- "unique": True,
- "cardinality": 91,
- "name": "PRIMARY",
- "sequence": 1,
- "nullable": False,
- "column": "name",
- "type": "BTREE",
- },
- {
- "unique": True,
- "cardinality": 91,
- "name": "username",
- "sequence": 1,
- "nullable": True,
- "column": "username",
- "type": "BTREE",
- },
- {
- "unique": False,
- "cardinality": 91,
- "name": "modified",
- "sequence": 1,
- "nullable": True,
- "column": "modified",
- "type": "BTREE",
- },
- {
- "unique": False,
- "cardinality": 91,
- "name": "reset_password_key_index",
- "sequence": 1,
- "nullable": True,
- "column": "reset_password_key",
- "type": "BTREE",
- },
- ],
-}
-
-
-HAS_ROLE_TABLE = {
- "table_name": "tabHas Role",
- "total_rows": 96,
- "schema": [
- {
- "column": "name",
- "type": "varchar(140)",
- "is_nullable": "NO",
- "default": None,
- "cardinality": 92,
- },
- {"column": "creation", "type": "datetime(6)", "is_nullable": "YES", "default": None},
- {"column": "modified", "type": "datetime(6)", "is_nullable": "YES", "default": None},
- {
- "column": "modified_by",
- "type": "varchar(140)",
- "is_nullable": "YES",
- "default": None,
- },
- {"column": "owner", "type": "varchar(140)", "is_nullable": "YES", "default": None},
- {"column": "docstatus", "type": "int(1)", "is_nullable": "NO", "default": "0"},
- {"column": "idx", "type": "int(8)", "is_nullable": "NO", "default": "0"},
- {
- "column": "role",
- "type": "varchar(140)",
- "is_nullable": "YES",
- "default": None,
- "cardinality": 78,
- },
- {
- "column": "parent",
- "type": "varchar(140)",
- "is_nullable": "YES",
- "default": None,
- "cardinality": 92,
- },
- {
- "column": "parentfield",
- "type": "varchar(140)",
- "is_nullable": "YES",
- "default": None,
- },
- {
- "column": "parenttype",
- "type": "varchar(140)",
- "is_nullable": "YES",
- "default": None,
- },
- ],
- "indexes": [
- {
- "unique": True,
- "cardinality": 92,
- "name": "PRIMARY",
- "sequence": 1,
- "nullable": "",
- "column": "name",
- "type": "BTREE",
- },
- {
- "unique": False,
- "cardinality": 92,
- "name": "parent",
- "sequence": 1,
- "nullable": "YES",
- "column": "parent",
- "type": "BTREE",
- },
- ],
-}
-
-
-HD_TICKET_TABLE = {
- "table_name": "tabHD Ticket",
- "total_rows": 3820,
- "schema": [
- {
- "column": "name",
- "type": "bigint(20)",
- "is_nullable": False,
- "default": None,
- "cardinality": 3529,
- },
- {"column": "creation", "type": "datetime(6)", "is_nullable": True, "default": None},
- {
- "column": "modified",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- "cardinality": 3529,
- },
- {
- "column": "modified_by",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- },
- {"column": "owner", "type": "varchar(140)", "is_nullable": True, "default": None},
- {"column": "docstatus", "type": "int(1)", "is_nullable": False, "default": "0"},
- {"column": "idx", "type": "int(8)", "is_nullable": False, "default": "0"},
- {"column": "subject", "type": "varchar(140)", "is_nullable": True, "default": None},
- {"column": "raised_by", "type": "varchar(140)", "is_nullable": True, "default": None},
- {
- "column": "status",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": "Open",
- "cardinality": 8,
- },
- {"column": "priority", "type": "varchar(140)", "is_nullable": True, "default": None},
- {
- "column": "ticket_type",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "agent_group",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": "L1",
- "cardinality": 9,
- },
- {
- "column": "ticket_split_from",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- },
- {"column": "description", "type": "longtext", "is_nullable": True, "default": None},
- {"column": "template", "type": "varchar(140)", "is_nullable": True, "default": None},
- {"column": "sla", "type": "varchar(140)", "is_nullable": True, "default": None},
- {
- "column": "response_by",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "response_by_variance",
- "type": "decimal(21,9)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "agreement_status",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "resolution_by",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "resolution_by_variance",
- "type": "decimal(21,9)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "service_level_agreement_creation",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "on_hold_since",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "total_hold_time",
- "type": "decimal(21,9)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "first_response_time",
- "type": "decimal(21,9)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "first_responded_on",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "avg_response_time",
- "type": "decimal(21,9)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "resolution_details",
- "type": "longtext",
- "is_nullable": True,
- "default": None,
- },
- {"column": "opening_date", "type": "date", "is_nullable": True, "default": None},
- {"column": "opening_time", "type": "time(6)", "is_nullable": True, "default": None},
- {
- "column": "resolution_date",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "resolution_time",
- "type": "decimal(21,9)",
- "is_nullable": True,
- "default": None,
- },
- {
- "column": "user_resolution_time",
- "type": "decimal(21,9)",
- "is_nullable": True,
- "default": None,
- },
- {"column": "contact", "type": "varchar(140)", "is_nullable": True, "default": None},
- {"column": "customer", "type": "varchar(140)", "is_nullable": True, "default": None},
- {
- "column": "email_account",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- },
- {"column": "attachment", "type": "text", "is_nullable": True, "default": None},
- {"column": "_user_tags", "type": "text", "is_nullable": True, "default": None},
- {"column": "_comments", "type": "text", "is_nullable": True, "default": None},
- {"column": "_assign", "type": "text", "is_nullable": True, "default": None},
- {"column": "_liked_by", "type": "text", "is_nullable": True, "default": None},
- {"column": "_seen", "type": "text", "is_nullable": True, "default": None},
- ],
- "indexes": [
- {
- "unique": True,
- "cardinality": 3529,
- "name": "PRIMARY",
- "sequence": 1,
- "nullable": False,
- "column": "name",
- "type": "BTREE",
- },
- {
- "unique": False,
- "cardinality": 8,
- "name": "status",
- "sequence": 1,
- "nullable": True,
- "column": "status",
- "type": "BTREE",
- },
- {
- "unique": False,
- "cardinality": 3529,
- "name": "modified",
- "sequence": 1,
- "nullable": True,
- "column": "modified",
- "type": "BTREE",
- },
- ],
-}
-
-
-HD_TICKET_COMMENT_TABLE = {
- "table_name": "tabHD Ticket Comment",
- "total_rows": 2683,
- "schema": [
- {
- "column": "name",
- "type": "varchar(140)",
- "is_nullable": False,
- "default": None,
- "cardinality": 2683,
- },
- {"column": "creation", "type": "datetime(6)", "is_nullable": True, "default": None},
- {
- "column": "modified",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- "cardinality": 2345,
- },
- {
- "column": "reference_ticket",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- "cardinality": 1379,
- },
- {
- "column": "commented_by",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- },
- {"column": "content", "type": "longtext", "is_nullable": True, "default": None},
- {"column": "is_pinned", "type": "int(1)", "is_nullable": False, "default": "0"},
- ],
- "indexes": [
- {
- "unique": True,
- "cardinality": 2345,
- "name": "PRIMARY",
- "sequence": 1,
- "nullable": False,
- "column": "name",
- "type": "BTREE",
- },
- {
- "unique": False,
- "cardinality": 2345,
- "name": "modified",
- "sequence": 1,
- "nullable": True,
- "column": "modified",
- "type": "BTREE",
- },
- ],
-}
-
-
-COMMUNICATION_TABLE = {
- "table_name": "tabCommunication",
- "total_rows": 20727,
- "schema": [
- {
- "column": "name",
- "type": "varchar(140)",
- "is_nullable": False,
- "default": None,
- "cardinality": 19713,
- },
- {"column": "creation", "type": "datetime(6)", "is_nullable": True, "default": None},
- {
- "column": "modified",
- "type": "datetime(6)",
- "is_nullable": True,
- "default": None,
- "cardinality": 19713,
- },
- {
- "column": "reference_doctype",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- "cardinality": 1,
- },
- {
- "column": "reference_name",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- "cardinality": 3798,
- },
- {
- "column": "reference_owner",
- "type": "varchar(140)",
- "is_nullable": True,
- "default": None,
- "cardinality": 1314,
- },
- ],
- "indexes": [
- {
- "unique": True,
- "cardinality": 19713,
- "name": "PRIMARY",
- "sequence": 1,
- "nullable": False,
- "column": "name",
- "type": "BTREE",
- },
- {
- "unique": False,
- "cardinality": 19713,
- "name": "modified",
- "sequence": 1,
- "nullable": True,
- "column": "modified",
- "type": "BTREE",
- },
- {
- "unique": False,
- "cardinality": 2,
- "name": "reference_doctype_reference_name_index",
- "sequence": 1,
- "nullable": True,
- "column": "reference_doctype",
- "type": "BTREE",
- },
- {
- "unique": False,
- "cardinality": 9856,
- "name": "reference_doctype_reference_name_index",
- "sequence": 2,
- "nullable": True,
- "column": "reference_name",
- "type": "BTREE",
- },
- ],
-}
-
-
-EXPLAIN_OUTPUT = """[{"Extra": "", "id": 1, "key": "modified", "key_len": "9", "possible_keys": null, "ref": null, "rows": "20", "select_type": "PRIMARY", "table": "tabHD Ticket", "type": "index"}, {"Extra": "Using index condition; Using where", "id": 4, "key": "reference_doctype_reference_name_index", "key_len": "563", "possible_keys": "reference_doctype_reference_name_index", "ref": "const", "rows": "10236", "select_type": "DEPENDENT SUBQUERY", "table": "tabCommunication", "type": "ref"}, {"Extra": "Using index condition; Using where", "id": 3, "key": "reference_doctype_reference_name_index", "key_len": "563", "possible_keys": "reference_doctype_reference_name_index", "ref": "const", "rows": "10236", "select_type": "DEPENDENT SUBQUERY", "table": "tabCommunication", "type": "ref"}, {"Extra": "Using where; Using index", "id": 2, "key": "reference_ticket_index", "key_len": "563", "possible_keys": "reference_ticket_index", "ref": null, "rows": "2823", "select_type": "DEPENDENT SUBQUERY", "table": "tabHD Ticket Comment", "type": "index"}]"""