From 7498b211231d36f07de4e01b479c03af4b2b176b Mon Sep 17 00:00:00 2001 From: Kould <2435992353@qq.com> Date: Sun, 3 Nov 2024 17:40:45 +0800 Subject: [PATCH] Feat/View (#236) * feat: impl `View` Encode & Decode * chore: add `ReferenceSerialization` for `DataValue::Tuple` & fix subquery plan on `ReferenceSerialization` * feat: impl `CreateView` * feat: impl `View` as data source * test: add test for `View` * chore: add feature `is_sorted` * chore: version up --- Cargo.toml | 2 +- README.md | 2 + rust-toolchain | 2 +- src/binder/aggregate.rs | 2 +- src/binder/alter_table.rs | 4 +- src/binder/analyze.rs | 20 +- src/binder/copy.rs | 17 +- src/binder/create_index.rs | 14 +- src/binder/create_table.rs | 22 +- src/binder/create_view.rs | 62 +++++ src/binder/delete.rs | 23 +- src/binder/describe.rs | 2 +- src/binder/distinct.rs | 2 +- src/binder/drop_table.rs | 7 +- src/binder/explain.rs | 2 +- src/binder/expr.rs | 84 ++++--- src/binder/insert.rs | 10 +- src/binder/mod.rs | 159 ++++++++++-- src/binder/select.rs | 105 +++++--- src/binder/show.rs | 2 +- src/binder/truncate.rs | 2 +- src/binder/update.rs | 2 +- src/catalog/column.rs | 61 ++++- src/catalog/mod.rs | 1 + src/catalog/table.rs | 10 +- src/catalog/view.rs | 19 ++ src/db.rs | 26 +- src/errors.rs | 4 + src/execution/ddl/add_column.rs | 7 +- src/execution/ddl/create_index.rs | 4 +- src/execution/ddl/create_table.rs | 4 +- src/execution/ddl/create_view.rs | 35 +++ src/execution/ddl/drop_column.rs | 8 +- src/execution/ddl/drop_table.rs | 4 +- src/execution/ddl/mod.rs | 1 + src/execution/ddl/truncate.rs | 4 +- src/execution/dml/analyze.rs | 18 +- src/execution/dml/copy_from_file.rs | 49 ++-- src/execution/dml/delete.rs | 7 +- src/execution/dml/insert.rs | 13 +- src/execution/dml/update.rs | 9 +- src/execution/dql/aggregate/hash_agg.rs | 7 +- src/execution/dql/aggregate/simple_agg.rs | 4 +- src/execution/dql/describe.rs | 15 +- src/execution/dql/dummy.rs | 8 +- src/execution/dql/explain.rs | 8 +- src/execution/dql/filter.rs | 4 +- src/execution/dql/function_scan.rs | 8 +- src/execution/dql/index_scan.rs | 4 +- src/execution/dql/join/hash_join.rs | 42 ++-- src/execution/dql/join/nested_loop_join.rs | 55 +++-- src/execution/dql/limit.rs | 4 +- src/execution/dql/projection.rs | 4 +- src/execution/dql/seq_scan.rs | 4 +- src/execution/dql/show_table.rs | 4 +- src/execution/dql/sort.rs | 4 +- src/execution/dql/union.rs | 4 +- src/execution/dql/values.rs | 8 +- src/execution/mod.rs | 12 +- src/expression/evaluator.rs | 14 +- src/expression/mod.rs | 25 +- src/expression/range_detacher.rs | 3 +- src/function/numbers.rs | 2 +- src/optimizer/core/cm_sketch.rs | 3 +- src/optimizer/core/histogram.rs | 5 +- src/optimizer/core/memo.rs | 3 +- src/optimizer/core/statistics_meta.rs | 22 +- .../rule/normalization/column_pruning.rs | 1 + .../normalization/compilation_in_advance.rs | 2 + .../rule/normalization/pushdown_limit.rs | 2 +- .../rule/normalization/simplification.rs | 24 +- src/planner/mod.rs | 224 +++++++++-------- src/planner/operator/create_view.rs | 22 ++ src/planner/operator/drop_table.rs | 6 +- src/planner/operator/mod.rs | 6 + src/planner/operator/table_scan.rs | 2 +- src/serdes/column.rs | 73 ++++-- src/serdes/data_value.rs | 99 +++++++- src/serdes/mod.rs | 55 ++++- src/storage/mod.rs | 205 +++++++++++----- src/storage/rocksdb.rs | 6 +- src/storage/table_codec.rs | 227 +++++++++++++++--- src/types/index.rs | 7 +- src/types/tuple.rs | 2 +- src/types/tuple_builder.rs | 2 +- src/types/value.rs | 11 +- src/utils/lru.rs | 4 +- tests/macros-test/src/main.rs | 6 +- tests/slt/crdb/delete.slt | 40 +-- tests/slt/sql_2016/F031_02.slt | 5 +- tests/slt/sql_2016/F031_16.slt | 5 +- tests/slt/sql_2016/F081.slt | 10 +- tests/slt/sql_2016/F131_01.slt | 38 +-- tests/slt/sql_2016/F131_02.slt | 8 +- tests/slt/sql_2016/F131_03.slt | 17 +- tests/slt/sql_2016/F131_04.slt | 110 ++++++--- tests/slt/view.slt | 64 +++++ 97 files changed, 1688 insertions(+), 702 deletions(-) create mode 100644 src/binder/create_view.rs create mode 100644 src/catalog/view.rs create mode 100644 src/execution/ddl/create_view.rs create mode 100644 src/planner/operator/create_view.rs create mode 100644 tests/slt/view.slt diff --git a/Cargo.toml b/Cargo.toml index 44a49ecb..00d40e95 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "fnck_sql" -version = "0.0.3" +version = "0.0.4" edition = "2021" authors = ["Kould ", "Xwg "] description = "SQL as a Function for Rust" diff --git a/README.md b/README.md index f2155246..90989db1 100755 --- a/README.md +++ b/README.md @@ -185,9 +185,11 @@ let fnck_sql = DataBaseBuilder::path("./data") - Create - [x] Table - [x] Index: Unique\Normal\Composite + - [x] View - Drop - [x] Table - [ ] Index + - [ ] View - Alert - [x] Add Column - [x] Drop Column diff --git a/rust-toolchain b/rust-toolchain index 5f739953..4e42420d 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2024-10-18 \ No newline at end of file +nightly-2024-10-10 \ No newline at end of file diff --git a/src/binder/aggregate.rs b/src/binder/aggregate.rs index f333a0c2..3cd1e63a 100644 --- a/src/binder/aggregate.rs +++ b/src/binder/aggregate.rs @@ -14,7 +14,7 @@ use crate::{ use super::{Binder, QueryBindStep}; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub fn bind_aggregate( &mut self, children: LogicalPlan, diff --git a/src/binder/alter_table.rs b/src/binder/alter_table.rs index 798bf963..f30a5063 100644 --- a/src/binder/alter_table.rs +++ b/src/binder/alter_table.rs @@ -12,7 +12,7 @@ use crate::planner::operator::Operator; use crate::planner::LogicalPlan; use crate::storage::Transaction; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_alter_table( &mut self, name: &ObjectName, @@ -21,7 +21,7 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { let table_name: Arc = Arc::new(lower_case_name(name)?); let table = self .context - .table(table_name.clone()) + .table(table_name.clone())? .ok_or(DatabaseError::TableNotFound)?; let plan = match operation { AlterTableOperation::AddColumn { diff --git a/src/binder/analyze.rs b/src/binder/analyze.rs index 321e0559..3511fa28 100644 --- a/src/binder/analyze.rs +++ b/src/binder/analyze.rs @@ -1,4 +1,4 @@ -use crate::binder::{lower_case_name, Binder}; +use crate::binder::{lower_case_name, Binder, Source}; use crate::errors::DatabaseError; use crate::planner::operator::analyze::AnalyzeOperator; use crate::planner::operator::table_scan::TableScanOperator; @@ -8,16 +8,24 @@ use crate::storage::Transaction; use sqlparser::ast::ObjectName; use std::sync::Arc; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_analyze(&mut self, name: &ObjectName) -> Result { let table_name = Arc::new(lower_case_name(name)?); - let table_catalog = self + let table = self .context - .table_and_bind(table_name.clone(), None, None)?; - let index_metas = table_catalog.indexes.clone(); + .source_and_bind(table_name.clone(), None, None, true)? + .and_then(|source| { + if let Source::Table(table) = source { + Some(table) + } else { + None + } + }) + .ok_or(DatabaseError::TableNotFound)?; + let index_metas = table.indexes.clone(); - let scan_op = TableScanOperator::build(table_name.clone(), table_catalog); + let scan_op = TableScanOperator::build(table_name.clone(), table); Ok(LogicalPlan::new( Operator::Analyze(AnalyzeOperator { table_name, diff --git a/src/binder/copy.rs b/src/binder/copy.rs index 59f2284c..1ea6caf4 100644 --- a/src/binder/copy.rs +++ b/src/binder/copy.rs @@ -11,18 +11,7 @@ use serde::{Deserialize, Serialize}; use serde_macros::ReferenceSerialization; use sqlparser::ast::{CopyOption, CopySource, CopyTarget}; -#[derive( - Debug, - PartialEq, - PartialOrd, - Ord, - Hash, - Eq, - Clone, - Serialize, - Deserialize, - ReferenceSerialization, -)] +#[derive(Debug, PartialEq, PartialOrd, Ord, Hash, Eq, Clone, ReferenceSerialization)] pub struct ExtSource { pub path: PathBuf, pub format: FileFormat, @@ -73,7 +62,7 @@ impl FromStr for ExtSource { } } -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(super) fn bind_copy( &mut self, source: CopySource, @@ -91,7 +80,7 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { } }; - if let Some(table) = self.context.table(Arc::new(table_name.to_string())) { + if let Some(table) = self.context.table(Arc::new(table_name.to_string()))? { let schema_ref = table.schema_ref().clone(); let ext_source = ExtSource { path: match target { diff --git a/src/binder/create_index.rs b/src/binder/create_index.rs index 7ef0da7e..65a04b32 100644 --- a/src/binder/create_index.rs +++ b/src/binder/create_index.rs @@ -1,4 +1,4 @@ -use crate::binder::{lower_case_name, Binder}; +use crate::binder::{lower_case_name, Binder, Source}; use crate::errors::DatabaseError; use crate::expression::ScalarExpression; use crate::planner::operator::create_index::CreateIndexOperator; @@ -10,7 +10,7 @@ use crate::types::index::IndexType; use sqlparser::ast::{ObjectName, OrderByExpr}; use std::sync::Arc; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_create_index( &mut self, table_name: &ObjectName, @@ -29,10 +29,14 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { IndexType::Composite }; - let table = self + let source = self .context - .table_and_bind(table_name.clone(), None, None)?; - let plan = TableScanOperator::build(table_name.clone(), table); + .source_and_bind(table_name.clone(), None, None, false)? + .ok_or(DatabaseError::SourceNotFound)?; + let plan = match source { + Source::Table(table) => TableScanOperator::build(table_name.clone(), table), + Source::View(view) => LogicalPlan::clone(&view.plan), + }; let mut columns = Vec::with_capacity(exprs.len()); for expr in exprs { diff --git a/src/binder/create_table.rs b/src/binder/create_table.rs index ba17262c..44c9ec8b 100644 --- a/src/binder/create_table.rs +++ b/src/binder/create_table.rs @@ -14,7 +14,7 @@ use crate::planner::LogicalPlan; use crate::storage::Transaction; use crate::types::LogicalType; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { // TODO: TableConstraint pub(crate) fn bind_create_table( &mut self, @@ -62,9 +62,9 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { .find(|column| column.name() == column_name) { if *is_primary { - column.desc.is_primary = true; + column.desc_mut().is_primary = true; } else { - column.desc.is_unique = true; + column.desc_mut().is_unique = true; } } } @@ -73,7 +73,7 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { } } - if columns.iter().filter(|col| col.desc.is_primary).count() != 1 { + if columns.iter().filter(|col| col.desc().is_primary).count() != 1 { return Err(DatabaseError::InvalidTable( "The primary key field must exist and have at least one".to_string(), )); @@ -158,6 +158,7 @@ mod tests { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let scala_functions = Default::default(); let table_functions = Default::default(); @@ -165,6 +166,7 @@ mod tests { let mut binder = Binder::new( BinderContext::new( &table_cache, + &view_cache, &transaction, &scala_functions, &table_functions, @@ -179,16 +181,16 @@ mod tests { Operator::CreateTable(op) => { debug_assert_eq!(op.table_name, Arc::new("t1".to_string())); debug_assert_eq!(op.columns[0].name(), "id"); - debug_assert_eq!(op.columns[0].nullable, false); + debug_assert_eq!(op.columns[0].nullable(), false); debug_assert_eq!( - op.columns[0].desc, - ColumnDesc::new(LogicalType::Integer, true, false, None)? + op.columns[0].desc(), + &ColumnDesc::new(LogicalType::Integer, true, false, None)? ); debug_assert_eq!(op.columns[1].name(), "name"); - debug_assert_eq!(op.columns[1].nullable, true); + debug_assert_eq!(op.columns[1].nullable(), true); debug_assert_eq!( - op.columns[1].desc, - ColumnDesc::new( + op.columns[1].desc(), + &ColumnDesc::new( LogicalType::Varchar(Some(10), CharLengthUnits::Characters), false, false, diff --git a/src/binder/create_view.rs b/src/binder/create_view.rs new file mode 100644 index 00000000..1531d1fa --- /dev/null +++ b/src/binder/create_view.rs @@ -0,0 +1,62 @@ +use crate::binder::{lower_case_name, lower_ident, Binder}; +use crate::catalog::view::View; +use crate::catalog::{ColumnCatalog, ColumnRef}; +use crate::errors::DatabaseError; +use crate::expression::{AliasType, ScalarExpression}; +use crate::planner::operator::create_view::CreateViewOperator; +use crate::planner::operator::Operator; +use crate::planner::LogicalPlan; +use crate::storage::Transaction; +use itertools::Itertools; +use sqlparser::ast::{Ident, ObjectName, Query}; +use std::sync::Arc; +use ulid::Ulid; + +impl Binder<'_, '_, T> { + pub(crate) fn bind_create_view( + &mut self, + or_replace: &bool, + name: &ObjectName, + columns: &[Ident], + query: &Query, + ) -> Result { + let view_name = Arc::new(lower_case_name(name)?); + let mut plan = self.bind_query(query)?; + + if !columns.is_empty() { + let mapping_schema = plan.output_schema(); + let exprs = columns + .iter() + .enumerate() + .map(|(i, ident)| { + let mapping_column = &mapping_schema[i]; + let mut column = ColumnCatalog::new( + lower_ident(ident), + mapping_column.nullable(), + mapping_column.desc().clone(), + ); + column.set_ref_table(view_name.clone(), Ulid::new(), true); + + ScalarExpression::Alias { + expr: Box::new(ScalarExpression::ColumnRef(mapping_column.clone())), + alias: AliasType::Expr(Box::new(ScalarExpression::ColumnRef( + ColumnRef::from(column), + ))), + } + }) + .collect_vec(); + plan = self.bind_project(plan, exprs)?; + } + + Ok(LogicalPlan::new( + Operator::CreateView(CreateViewOperator { + view: View { + name: view_name, + plan: Box::new(plan), + }, + or_replace: *or_replace, + }), + vec![], + )) + } +} diff --git a/src/binder/delete.rs b/src/binder/delete.rs index ad4841d0..192e21e6 100644 --- a/src/binder/delete.rs +++ b/src/binder/delete.rs @@ -1,4 +1,4 @@ -use crate::binder::{lower_case_name, Binder}; +use crate::binder::{lower_case_name, Binder, Source}; use crate::errors::DatabaseError; use crate::planner::operator::delete::DeleteOperator; use crate::planner::operator::table_scan::TableScanOperator; @@ -8,7 +8,7 @@ use crate::storage::Transaction; use sqlparser::ast::{Expr, TableAlias, TableFactor, TableWithJoins}; use std::sync::Arc; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_delete( &mut self, from: &TableWithJoins, @@ -23,15 +23,20 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { table_alias = Some(Arc::new(name.value.to_lowercase())); alias_idents = Some(columns); } - let table_catalog = - self.context - .table_and_bind(table_name.clone(), table_alias.clone(), None)?; - let primary_key_column = table_catalog - .columns() - .find(|column| column.desc.is_primary) + let source = self + .context + .source_and_bind(table_name.clone(), table_alias.as_ref(), None, false)? + .ok_or(DatabaseError::SourceNotFound)?; + let schema_buf = self.table_schema_buf.entry(table_name.clone()).or_default(); + let primary_key_column = source + .columns(schema_buf) + .find(|column| column.desc().is_primary) .cloned() .unwrap(); - let mut plan = TableScanOperator::build(table_name.clone(), table_catalog); + let mut plan = match source { + Source::Table(table) => TableScanOperator::build(table_name.clone(), table), + Source::View(view) => LogicalPlan::clone(&view.plan), + }; if let Some(alias_idents) = alias_idents { plan = diff --git a/src/binder/describe.rs b/src/binder/describe.rs index 159d4ad1..2a99791b 100644 --- a/src/binder/describe.rs +++ b/src/binder/describe.rs @@ -7,7 +7,7 @@ use crate::storage::Transaction; use sqlparser::ast::ObjectName; use std::sync::Arc; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_describe( &mut self, name: &ObjectName, diff --git a/src/binder/distinct.rs b/src/binder/distinct.rs index 2d03989d..fa88d1eb 100644 --- a/src/binder/distinct.rs +++ b/src/binder/distinct.rs @@ -4,7 +4,7 @@ use crate::planner::operator::aggregate::AggregateOperator; use crate::planner::LogicalPlan; use crate::storage::Transaction; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub fn bind_distinct( &mut self, children: LogicalPlan, diff --git a/src/binder/drop_table.rs b/src/binder/drop_table.rs index 39dc19dd..ec31dfce 100644 --- a/src/binder/drop_table.rs +++ b/src/binder/drop_table.rs @@ -7,7 +7,7 @@ use crate::storage::Transaction; use sqlparser::ast::ObjectName; use std::sync::Arc; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_drop_table( &mut self, name: &ObjectName, @@ -15,13 +15,12 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { ) -> Result { let table_name = Arc::new(lower_case_name(name)?); - let plan = LogicalPlan::new( + Ok(LogicalPlan::new( Operator::DropTable(DropTableOperator { table_name, if_exists: *if_exists, }), vec![], - ); - Ok(plan) + )) } } diff --git a/src/binder/explain.rs b/src/binder/explain.rs index 3fe29c32..8620e1cd 100644 --- a/src/binder/explain.rs +++ b/src/binder/explain.rs @@ -4,7 +4,7 @@ use crate::planner::operator::Operator; use crate::planner::LogicalPlan; use crate::storage::Transaction; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_explain(&mut self, plan: LogicalPlan) -> Result { Ok(LogicalPlan::new(Operator::Explain, vec![plan])) } diff --git a/src/binder/expr.rs b/src/binder/expr.rs index 9d8ff31a..e8f67f2e 100644 --- a/src/binder/expr.rs +++ b/src/binder/expr.rs @@ -1,4 +1,4 @@ -use crate::catalog::{ColumnCatalog, ColumnRef}; +use crate::catalog::{ColumnCatalog, ColumnRef, TableName}; use crate::errors::DatabaseError; use crate::expression; use crate::expression::agg::AggKind; @@ -7,6 +7,7 @@ use sqlparser::ast::{ BinaryOperator, CharLengthUnits, DataType, Expr, Function, FunctionArg, FunctionArgExpr, Ident, Query, UnaryOperator, }; +use std::collections::HashMap; use std::slice; use std::sync::Arc; @@ -15,7 +16,7 @@ use crate::expression::function::scala::{ArcScalarFunctionImpl, ScalarFunction}; use crate::expression::function::table::{ArcTableFunctionImpl, TableFunction}; use crate::expression::function::FunctionSummary; use crate::expression::{AliasType, ScalarExpression}; -use crate::planner::LogicalPlan; +use crate::planner::{LogicalPlan, SchemaOutput}; use crate::storage::Transaction; use crate::types::value::{DataValue, Utf8Type}; use crate::types::{ColumnId, LogicalType}; @@ -39,7 +40,7 @@ macro_rules! try_default { }; } -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl<'a, T: Transaction> Binder<'a, '_, T> { pub(crate) fn bind_expr(&mut self, expr: &Expr) -> Result { match expr { Expr::Identifier(ident) => { @@ -231,7 +232,7 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { sub_query: LogicalPlan, ) -> Result<(ScalarExpression, LogicalPlan), DatabaseError> { let mut alias_column = ColumnCatalog::clone(&column); - alias_column.set_ref_table(self.context.temp_table(), ColumnId::new()); + alias_column.set_ref_table(self.context.temp_table(), ColumnId::new(), true); let alias_expr = ScalarExpression::Alias { expr: Box::new(ScalarExpression::ColumnRef(column)), @@ -249,6 +250,7 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { ) -> Result<(LogicalPlan, ColumnRef), DatabaseError> { let BinderContext { table_cache, + view_cache, transaction, scala_functions, table_functions, @@ -258,6 +260,7 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { let mut binder = Binder::new( BinderContext::new( table_cache, + view_cache, *transaction, scala_functions, table_functions, @@ -324,46 +327,53 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { try_default!(&full_name.0, full_name.1); } if let Some(table) = full_name.0.or(bind_table_name) { - let table_catalog = self.context.bind_table(&table, self.parent)?; - - let column_catalog = table_catalog - .get_column_by_name(&full_name.1) - .ok_or_else(|| DatabaseError::NotFound("column", full_name.1))?; - Ok(ScalarExpression::ColumnRef(column_catalog.clone())) + let source = self.context.bind_source(&table, self.parent)?; + let schema_buf = self.table_schema_buf.entry(Arc::new(table)).or_default(); + + Ok(ScalarExpression::ColumnRef( + source + .column(&full_name.1, schema_buf) + .ok_or_else(|| DatabaseError::NotFound("column", full_name.1.to_string()))?, + )) } else { - let op = |got_column: &mut Option, context: &BinderContext<'a, T>| { - for ((_, alias, _), table_catalog) in context.bind_table.iter() { - if got_column.is_some() { - break; - } - if let Some(alias) = alias { - *got_column = self.context.expr_aliases.iter().find_map( - |((alias_table, alias_column), expr)| { - matches!( - alias_table - .as_ref() - .map(|table_name| table_name == alias.as_ref() - && alias_column == &full_name.1), - Some(true) - ) - .then(|| expr.clone()) - }, - ); - } else if let Some(column_catalog) = - table_catalog.get_column_by_name(&full_name.1) - { - *got_column = Some(ScalarExpression::ColumnRef(column_catalog.clone())); + let op = + |got_column: &mut Option, + context: &BinderContext<'a, T>, + table_schema_buf: &mut HashMap>| { + for ((table_name, alias, _), source) in context.bind_table.iter() { + if got_column.is_some() { + break; + } + if let Some(alias) = alias { + *got_column = self.context.expr_aliases.iter().find_map( + |((alias_table, alias_column), expr)| { + matches!( + alias_table + .as_ref() + .map(|table_name| table_name == alias.as_ref() + && alias_column == &full_name.1), + Some(true) + ) + .then(|| expr.clone()) + }, + ); + } else if let Some(column) = { + let schema_buf = + table_schema_buf.entry(table_name.clone()).or_default(); + source.column(&full_name.1, schema_buf) + } { + *got_column = Some(ScalarExpression::ColumnRef(column)); + } } - } - }; + }; // handle col syntax let mut got_column = None; - op(&mut got_column, &self.context); + op(&mut got_column, &self.context, &mut self.table_schema_buf); if let Some(parent) = self.parent { - op(&mut got_column, &parent.context); + op(&mut got_column, &parent.context, &mut self.table_schema_buf); } - Ok(got_column.ok_or_else(|| DatabaseError::NotFound("column", full_name.1))?) + Ok(got_column.ok_or(DatabaseError::NotFound("column", full_name.1))?) } } diff --git a/src/binder/insert.rs b/src/binder/insert.rs index 6047cc09..87e8491f 100644 --- a/src/binder/insert.rs +++ b/src/binder/insert.rs @@ -12,7 +12,7 @@ use sqlparser::ast::{Expr, Ident, ObjectName}; use std::slice; use std::sync::Arc; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_insert( &mut self, name: &ObjectName, @@ -25,14 +25,16 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { self.context.allow_default = true; let table_name = Arc::new(lower_case_name(name)?); - let table = self + let source = self .context - .table_and_bind(table_name.clone(), None, None)?; + .source_and_bind(table_name.clone(), None, None, false)? + .ok_or(DatabaseError::TableNotFound)?; let mut _schema_ref = None; let values_len = expr_rows[0].len(); if idents.is_empty() { - let temp_schema_ref = table.schema_ref().clone(); + let schema_buf = self.table_schema_buf.entry(table_name.clone()).or_default(); + let temp_schema_ref = source.schema_ref(schema_buf); if values_len > temp_schema_ref.len() { return Err(DatabaseError::ValuesLenMismatch( temp_schema_ref.len(), diff --git a/src/binder/mod.rs b/src/binder/mod.rs index a8c24f0b..1c06401a 100644 --- a/src/binder/mod.rs +++ b/src/binder/mod.rs @@ -4,6 +4,7 @@ mod analyze; pub mod copy; mod create_index; mod create_table; +mod create_view; mod delete; mod describe; mod distinct; @@ -21,13 +22,15 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; -use crate::catalog::{TableCatalog, TableName}; +use crate::catalog::view::View; +use crate::catalog::{ColumnRef, TableCatalog, TableName}; use crate::db::{ScalaFunctions, TableFunctions}; use crate::errors::DatabaseError; use crate::expression::ScalarExpression; use crate::planner::operator::join::JoinType; -use crate::planner::LogicalPlan; -use crate::storage::{TableCache, Transaction}; +use crate::planner::{LogicalPlan, SchemaOutput}; +use crate::storage::{TableCache, Transaction, ViewCache}; +use crate::types::tuple::SchemaRef; pub enum InputRefType { AggCall, @@ -44,6 +47,7 @@ pub fn command_type(stmt: &Statement) -> Result { match stmt { Statement::CreateTable { .. } | Statement::CreateIndex { .. } + | Statement::CreateView { .. } | Statement::AlterTable { .. } | Statement::Drop { .. } => Ok(CommandType::DDL), Statement::Query(_) @@ -80,15 +84,21 @@ pub enum SubQueryType { InSubQuery(bool, LogicalPlan), } +#[derive(Debug, Clone)] +pub enum Source<'a> { + Table(&'a TableCatalog), + View(&'a View), +} + #[derive(Clone)] pub struct BinderContext<'a, T: Transaction> { pub(crate) scala_functions: &'a ScalaFunctions, pub(crate) table_functions: &'a TableFunctions, pub(crate) table_cache: &'a TableCache, + pub(crate) view_cache: &'a ViewCache, pub(crate) transaction: &'a T, // Tips: When there are multiple tables and Wildcard, use BTreeMap to ensure that the order of the output tables is certain. - pub(crate) bind_table: - BTreeMap<(TableName, Option, Option), &'a TableCatalog>, + pub(crate) bind_table: BTreeMap<(TableName, Option, Option), Source<'a>>, // alias expr_aliases: BTreeMap<(Option, String), ScalarExpression>, table_aliases: HashMap, @@ -105,9 +115,53 @@ pub struct BinderContext<'a, T: Transaction> { pub(crate) allow_default: bool, } +impl Source<'_> { + pub(crate) fn column( + &self, + name: &str, + schema_buf: &mut Option, + ) -> Option { + match self { + Source::Table(table) => table.get_column_by_name(name), + Source::View(view) => schema_buf + .get_or_insert_with(|| view.plan.output_schema_direct()) + .columns() + .find(|column| column.name() == name), + } + .cloned() + } + + pub(crate) fn columns<'a>( + &'a self, + schema_buf: &'a mut Option, + ) -> Box + 'a> { + match self { + Source::Table(table) => Box::new(table.columns()), + Source::View(view) => Box::new( + schema_buf + .get_or_insert_with(|| view.plan.output_schema_direct()) + .columns(), + ), + } + } + + pub(crate) fn schema_ref(&self, schema_buf: &mut Option) -> SchemaRef { + match self { + Source::Table(table) => table.schema_ref().clone(), + Source::View(view) => { + match schema_buf.get_or_insert_with(|| view.plan.output_schema_direct()) { + SchemaOutput::Schema(schema) => Arc::new(schema.clone()), + SchemaOutput::SchemaRef(schema_ref) => schema_ref.clone(), + } + } + } + } +} + impl<'a, T: Transaction> BinderContext<'a, T> { pub fn new( table_cache: &'a TableCache, + view_cache: &'a ViewCache, transaction: &'a T, scala_functions: &'a ScalaFunctions, table_functions: &'a TableFunctions, @@ -117,6 +171,7 @@ impl<'a, T: Transaction> BinderContext<'a, T> { scala_functions, table_functions, table_cache, + view_cache, transaction, bind_table: Default::default(), expr_aliases: Default::default(), @@ -161,7 +216,7 @@ impl<'a, T: Transaction> BinderContext<'a, T> { self.sub_queries.remove(&self.bind_step) } - pub fn table(&self, table_name: TableName) -> Option<&TableCatalog> { + pub fn table(&self, table_name: TableName) -> Result, DatabaseError> { if let Some(real_name) = self.table_aliases.get(table_name.as_ref()) { self.transaction.table(self.table_cache, real_name.clone()) } else { @@ -169,38 +224,76 @@ impl<'a, T: Transaction> BinderContext<'a, T> { } } - pub fn table_and_bind( + pub fn view(&self, view_name: TableName) -> Result, DatabaseError> { + if let Some(real_name) = self.table_aliases.get(view_name.as_ref()) { + self.transaction.view( + self.view_cache, + real_name.clone(), + (self.transaction, self.table_cache), + ) + } else { + self.transaction.view( + self.view_cache, + view_name.clone(), + (self.transaction, self.table_cache), + ) + } + } + + #[allow(unused_assignments)] + pub fn source_and_bind( &mut self, table_name: TableName, - alias: Option, + alias: Option<&TableName>, join_type: Option, - ) -> Result<&TableCatalog, DatabaseError> { - let table = if let Some(real_name) = self.table_aliases.get(table_name.as_ref()) { + only_table: bool, + ) -> Result, DatabaseError> { + let mut source = None; + + source = if let Some(real_name) = self.table_aliases.get(table_name.as_ref()) { self.transaction.table(self.table_cache, real_name.clone()) } else { self.transaction.table(self.table_cache, table_name.clone()) + }? + .map(Source::Table); + + if source.is_none() && !only_table { + source = if let Some(real_name) = self.table_aliases.get(table_name.as_ref()) { + self.transaction.view( + self.view_cache, + real_name.clone(), + (self.transaction, self.table_cache), + ) + } else { + self.transaction.view( + self.view_cache, + table_name.clone(), + (self.transaction, self.table_cache), + ) + }? + .map(Source::View); } - .ok_or(DatabaseError::TableNotFound)?; - - self.bind_table - .insert((table_name.clone(), alias, join_type), table); - - Ok(table) + if let Some(source) = &source { + self.bind_table.insert( + (table_name.clone(), alias.cloned(), join_type), + source.clone(), + ); + } + Ok(source) } - /// get table from bindings - pub fn bind_table<'b: 'a>( + pub fn bind_source<'b: 'a>( &self, table_name: &str, parent: Option<&'b Binder<'a, 'b, T>>, - ) -> Result<&TableCatalog, DatabaseError> { - if let Some(table_catalog) = self.bind_table.iter().find(|((t, alias, _), _)| { + ) -> Result<&Source, DatabaseError> { + if let Some(source) = self.bind_table.iter().find(|((t, alias, _), _)| { t.as_str() == table_name || matches!(alias.as_ref().map(|a| a.as_str() == table_name), Some(true)) }) { - Ok(table_catalog.1) + Ok(source.1) } else if let Some(binder) = parent { - binder.context.bind_table(table_name, binder.parent) + binder.context.bind_source(table_name, binder.parent) } else { Err(DatabaseError::InvalidTable(table_name.into())) } @@ -238,12 +331,17 @@ impl<'a, T: Transaction> BinderContext<'a, T> { pub struct Binder<'a, 'b, T: Transaction> { context: BinderContext<'a, T>, + table_schema_buf: HashMap>, pub(crate) parent: Option<&'b Binder<'a, 'b, T>>, } impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { pub fn new(context: BinderContext<'a, T>, parent: Option<&'b Binder<'a, 'b, T>>) -> Self { - Binder { context, parent } + Binder { + context, + table_schema_buf: Default::default(), + parent, + } } pub fn bind(&mut self, stmt: &Statement) -> Result { @@ -329,6 +427,13 @@ impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { unique, .. } => self.bind_create_index(table_name, name, columns, *if_not_exists, *unique)?, + Statement::CreateView { + or_replace, + name, + columns, + query, + .. + } => self.bind_create_view(or_replace, name, columns, query)?, _ => return Err(DatabaseError::UnsupportedStmt(stmt.to_string())), }; Ok(plan) @@ -386,7 +491,7 @@ pub mod test { use crate::errors::DatabaseError; use crate::planner::LogicalPlan; use crate::storage::rocksdb::RocksStorage; - use crate::storage::{Storage, TableCache, Transaction}; + use crate::storage::{Storage, TableCache, Transaction, ViewCache}; use crate::types::ColumnId; use crate::types::LogicalType::Integer; use crate::utils::lru::ShardingLruCache; @@ -399,6 +504,7 @@ pub mod test { pub(crate) struct TableState { pub(crate) table: TableCatalog, pub(crate) table_cache: Arc, + pub(crate) view_cache: Arc, pub(crate) storage: S, } @@ -410,6 +516,7 @@ pub mod test { let mut binder = Binder::new( BinderContext::new( &self.table_cache, + &self.view_cache, &transaction, &scala_functions, &table_functions, @@ -430,11 +537,12 @@ pub mod test { pub(crate) fn build_t1_table() -> Result, DatabaseError> { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let storage = build_test_catalog(&table_cache, temp_dir.path())?; let table = { let transaction = storage.transaction()?; transaction - .table(&table_cache, Arc::new("t1".to_string())) + .table(&table_cache, Arc::new("t1".to_string()))? .unwrap() .clone() }; @@ -442,6 +550,7 @@ pub mod test { Ok(TableState { table, table_cache, + view_cache, storage, }) } diff --git a/src/binder/select.rs b/src/binder/select.rs index a1647948..851b409f 100644 --- a/src/binder/select.rs +++ b/src/binder/select.rs @@ -14,7 +14,9 @@ use crate::{ types::value::DataValue, }; -use super::{lower_case_name, lower_ident, Binder, BinderContext, QueryBindStep, SubQueryType}; +use super::{ + lower_case_name, lower_ident, Binder, BinderContext, QueryBindStep, Source, SubQueryType, +}; use crate::catalog::{ColumnCatalog, ColumnRef, ColumnSummary, TableName}; use crate::errors::DatabaseError; @@ -25,7 +27,7 @@ use crate::planner::operator::insert::InsertOperator; use crate::planner::operator::join::JoinCondition; use crate::planner::operator::sort::{SortField, SortOperator}; use crate::planner::operator::union::UnionOperator; -use crate::planner::LogicalPlan; +use crate::planner::{LogicalPlan, SchemaOutput}; use crate::storage::Transaction; use crate::types::tuple::{Schema, SchemaRef}; use crate::types::{ColumnId, LogicalType}; @@ -312,7 +314,7 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { self.context .bind_table - .insert((table_name, table_alias, joint_type), table); + .insert((table_name, table_alias, joint_type), Source::Table(table)); plan } else { unreachable!() @@ -353,7 +355,11 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { for (alias, column) in aliases_with_columns { let mut alias_column = ColumnCatalog::clone(&column); alias_column.set_name(alias.clone()); - alias_column.set_ref_table(table_alias.clone(), column.id().unwrap_or(ColumnId::new())); + alias_column.set_ref_table( + table_alias.clone(), + column.id().unwrap_or(ColumnId::new()), + false, + ); let alias_column_expr = ScalarExpression::Alias { expr: Box::new(ScalarExpression::ColumnRef(column)), @@ -387,16 +393,19 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { alias_idents = Some(columns); } - let table_catalog = - self.context - .table_and_bind(table_name.clone(), table_alias.clone(), join_type)?; - let mut scan_op = TableScanOperator::build(table_name.clone(), table_catalog); + let source = self + .context + .source_and_bind(table_name.clone(), table_alias.as_ref(), join_type, false)? + .ok_or(DatabaseError::SourceNotFound)?; + let mut plan = match source { + Source::Table(table) => TableScanOperator::build(table_name.clone(), table), + Source::View(view) => LogicalPlan::clone(&view.plan), + }; if let Some(idents) = alias_idents { - scan_op = self.bind_alias(scan_op, idents, table_alias.unwrap(), table_name.clone())?; + plan = self.bind_alias(plan, idents, table_alias.unwrap(), table_name.clone())?; } - - Ok(scan_op) + Ok(plan) } /// Normalize select item. @@ -417,7 +426,7 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { SelectItem::UnnamedExpr(expr) => select_items.push(self.bind_expr(expr)?), SelectItem::ExprWithAlias { expr, alias } => { let expr = self.bind_expr(expr)?; - let alias_name = alias.to_string(); + let alias_name = alias.value.to_lowercase(); self.context .add_alias(None, alias_name.clone(), expr.clone()); @@ -437,7 +446,11 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { let mut join_used = HashSet::with_capacity(self.context.using.len()); for (table_name, alias, _) in self.context.bind_table.keys() { - self.bind_table_column_refs( + let schema_buf = + self.table_schema_buf.entry(table_name.clone()).or_default(); + Self::bind_table_column_refs( + &self.context, + schema_buf, &mut select_items, alias.as_ref().unwrap_or(table_name).clone(), Some(&mut join_used), @@ -445,9 +458,14 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { } } SelectItem::QualifiedWildcard(table_name, _) => { - self.bind_table_column_refs( + let table_name = Arc::new(lower_case_name(table_name)?); + let schema_buf = self.table_schema_buf.entry(table_name.clone()).or_default(); + + Self::bind_table_column_refs( + &self.context, + schema_buf, &mut select_items, - Arc::new(lower_case_name(table_name)?), + table_name, None, )?; } @@ -457,8 +475,10 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { Ok(select_items) } + #[allow(unused_assignments)] fn bind_table_column_refs( - &self, + context: &BinderContext<'a, T>, + schema_buf: &mut Option, exprs: &mut Vec, table_name: TableName, mut join_used: Option<&mut HashSet>, @@ -470,12 +490,12 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { context.using.contains(column_name) && matches!(join_used.map(|used| used.contains(column_name)), Some(true)) }; - for (_, alias_expr) in self.context.expr_aliases.iter().filter(|(_, expr)| { + for (_, alias_expr) in context.expr_aliases.iter().filter(|(_, expr)| { if let ScalarExpression::ColumnRef(col) = expr.unpack_alias_ref() { let column_name = col.name(); if Some(&table_name) == col.table_name() - && !fn_used(column_name, &self.context, join_used.as_deref()) + && !fn_used(column_name, context, join_used.as_deref()) { if let Some(used) = join_used.as_mut() { used.insert(column_name.to_string()); @@ -492,14 +512,19 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { return Ok(()); } - let table = self - .context - .table(table_name.clone()) - .ok_or(DatabaseError::TableNotFound)?; - for column in table.columns() { + let mut source = None; + + source = context.table(table_name.clone())?.map(Source::Table); + if source.is_none() { + source = context.view(table_name)?.map(Source::View); + } + for column in source + .ok_or(DatabaseError::SourceNotFound)? + .columns(schema_buf) + { let column_name = column.name(); - if fn_used(column_name, &self.context, join_used.as_deref()) { + if fn_used(column_name, context, join_used.as_deref()) { continue; } let expr = ScalarExpression::ColumnRef(column.clone()); @@ -532,6 +557,7 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { }; let BinderContext { table_cache, + view_cache, transaction, scala_functions, table_functions, @@ -541,6 +567,7 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { let mut binder = Binder::new( BinderContext::new( table_cache, + view_cache, *transaction, scala_functions, table_functions, @@ -714,30 +741,36 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { let mut left_table_force_nullable = false; let mut left_table = None; - for ((_, _, join_option), table) in bind_tables { + for ((table_name, _, join_option), table) in bind_tables { if let Some(join_type) = join_option { let (left_force_nullable, right_force_nullable) = joins_nullable(join_type); - table_force_nullable.push((table, right_force_nullable)); + table_force_nullable.push((table_name, table, right_force_nullable)); left_table_force_nullable = left_force_nullable; } else { - left_table = Some(table); + left_table = Some((table_name, table)); } } - if let Some(table) = left_table { - table_force_nullable.push((table, left_table_force_nullable)); + if let Some((table_name, table)) = left_table { + table_force_nullable.push((table_name, table, left_table_force_nullable)); } for column in select_items { if let ScalarExpression::ColumnRef(col) = column { let _ = table_force_nullable .iter() - .find(|(table, _)| table.contains_column(col.name())) - .map(|(_, nullable)| { - let mut new_col = ColumnCatalog::clone(col); - new_col.nullable = *nullable; - - *col = ColumnRef::from(new_col); + .find(|(table_name, source, _)| { + let schema_buf = self + .table_schema_buf + .entry((*table_name).clone()) + .or_default(); + + source.column(col.name(), schema_buf).is_some() + }) + .map(|(_, _, nullable)| { + if let Some(new_column) = col.nullable_for_join(*nullable) { + *col = new_column; + } }); } } @@ -852,7 +885,7 @@ impl<'a: 'b, 'b, T: Transaction> Binder<'a, 'b, T> { right_schema: &Schema, ) -> Result<(), DatabaseError> { let fn_contains = |schema: &Schema, summary: &ColumnSummary| { - schema.iter().any(|column| summary == &column.summary) + schema.iter().any(|column| summary == column.summary()) }; let fn_or_contains = |left_schema: &Schema, right_schema: &Schema, summary: &ColumnSummary| { diff --git a/src/binder/show.rs b/src/binder/show.rs index b3b54b40..9d9b0536 100644 --- a/src/binder/show.rs +++ b/src/binder/show.rs @@ -4,7 +4,7 @@ use crate::planner::operator::Operator; use crate::planner::LogicalPlan; use crate::storage::Transaction; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_show_tables(&mut self) -> Result { Ok(LogicalPlan::new(Operator::Show, vec![])) } diff --git a/src/binder/truncate.rs b/src/binder/truncate.rs index 2dbd9e86..a1a0ee98 100644 --- a/src/binder/truncate.rs +++ b/src/binder/truncate.rs @@ -7,7 +7,7 @@ use crate::storage::Transaction; use sqlparser::ast::ObjectName; use std::sync::Arc; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_truncate( &mut self, name: &ObjectName, diff --git a/src/binder/update.rs b/src/binder/update.rs index fe803a3f..9ab28994 100644 --- a/src/binder/update.rs +++ b/src/binder/update.rs @@ -10,7 +10,7 @@ use sqlparser::ast::{Assignment, Expr, TableFactor, TableWithJoins}; use std::slice; use std::sync::Arc; -impl<'a, 'b, T: Transaction> Binder<'a, 'b, T> { +impl Binder<'_, '_, T> { pub(crate) fn bind_update( &mut self, to: &TableWithJoins, diff --git a/src/catalog/column.rs b/src/catalog/column.rs index 524ffd36..5bf8fced 100644 --- a/src/catalog/column.rs +++ b/src/catalog/column.rs @@ -4,7 +4,6 @@ use crate::expression::ScalarExpression; use crate::types::tuple::EMPTY_TUPLE; use crate::types::value::ValueRef; use crate::types::{ColumnId, LogicalType}; -use serde::{Deserialize, Serialize}; use serde_macros::ReferenceSerialization; use sqlparser::ast::CharLengthUnits; use std::hash::Hash; @@ -30,26 +29,40 @@ impl From for ColumnRef { #[derive(Debug, Clone, Hash, Eq, PartialEq, ReferenceSerialization)] pub struct ColumnCatalog { - pub summary: ColumnSummary, - pub nullable: bool, - pub desc: ColumnDesc, + summary: ColumnSummary, + nullable: bool, + desc: ColumnDesc, + in_join: bool, } -#[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq)] +#[derive(Debug, Clone, Hash, Eq, PartialEq)] pub enum ColumnRelation { None, Table { column_id: ColumnId, table_name: TableName, + is_temp: bool, }, } -#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, ReferenceSerialization)] +#[derive(Debug, Clone, Hash, Eq, PartialEq, ReferenceSerialization)] pub struct ColumnSummary { pub name: String, pub relation: ColumnRelation, } +impl ColumnRef { + pub(crate) fn nullable_for_join(&self, nullable: bool) -> Option { + if self.nullable == nullable { + return None; + } + let mut temp = ColumnCatalog::clone(self); + temp.nullable = nullable; + temp.in_join = true; + Some(ColumnRef::from(temp)) + } +} + impl ColumnCatalog { pub fn new(column_name: String, nullable: bool, column_desc: ColumnDesc) -> ColumnCatalog { ColumnCatalog { @@ -59,6 +72,21 @@ impl ColumnCatalog { }, nullable, desc: column_desc, + in_join: false, + } + } + + pub(crate) fn direct_new( + summary: ColumnSummary, + nullable: bool, + column_desc: ColumnDesc, + in_join: bool, + ) -> ColumnCatalog { + ColumnCatalog { + summary, + nullable, + desc: column_desc, + in_join, } } @@ -77,6 +105,7 @@ impl ColumnCatalog { None, ) .unwrap(), + in_join: false, } } @@ -84,6 +113,10 @@ impl ColumnCatalog { &self.summary } + pub fn summary_mut(&mut self) -> &mut ColumnSummary { + &mut self.summary + } + pub fn id(&self) -> Option { match &self.summary.relation { ColumnRelation::None => None, @@ -113,13 +146,22 @@ impl ColumnCatalog { self.summary.name = name; } - pub fn set_ref_table(&mut self, table_name: TableName, column_id: ColumnId) { + pub fn set_ref_table(&mut self, table_name: TableName, column_id: ColumnId, is_temp: bool) { self.summary.relation = ColumnRelation::Table { column_id, table_name, + is_temp, }; } + pub fn in_join(&self) -> bool { + self.in_join + } + + pub fn nullable(&self) -> bool { + self.nullable + } + pub fn datatype(&self) -> &LogicalType { &self.desc.column_datatype } @@ -132,10 +174,13 @@ impl ColumnCatalog { .transpose() } - #[allow(dead_code)] pub(crate) fn desc(&self) -> &ColumnDesc { &self.desc } + + pub(crate) fn desc_mut(&mut self) -> &mut ColumnDesc { + &mut self.desc + } } /// The descriptor of a column. diff --git a/src/catalog/mod.rs b/src/catalog/mod.rs index 27e63f94..5408fc1a 100644 --- a/src/catalog/mod.rs +++ b/src/catalog/mod.rs @@ -5,3 +5,4 @@ pub(crate) use self::table::*; pub mod column; pub mod table; +pub mod view; diff --git a/src/catalog/table.rs b/src/catalog/table.rs index 81e9bfe2..23c2601d 100644 --- a/src/catalog/table.rs +++ b/src/catalog/table.rs @@ -4,7 +4,7 @@ use crate::types::index::{IndexMeta, IndexMetaRef, IndexType}; use crate::types::tuple::SchemaRef; use crate::types::{ColumnId, LogicalType}; use itertools::Itertools; -use serde::{Deserialize, Serialize}; +use serde_macros::ReferenceSerialization; use std::collections::BTreeMap; use std::sync::Arc; use std::{slice, vec}; @@ -24,7 +24,7 @@ pub struct TableCatalog { } //TODO: can add some like Table description and other information as attributes -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, ReferenceSerialization)] pub struct TableMeta { pub(crate) table_name: TableName, } @@ -52,6 +52,7 @@ impl TableCatalog { .map(|(_, i)| &self.schema_ref[*i]) } + #[allow(dead_code)] pub(crate) fn contains_column(&self, name: &str) -> bool { self.column_idxs.contains_key(name) } @@ -76,7 +77,7 @@ impl TableCatalog { self.schema_ref .iter() .enumerate() - .find(|(_, column)| column.desc.is_primary) + .find(|(_, column)| column.desc().is_primary) .ok_or(DatabaseError::PrimaryKeyNotFound) } @@ -97,9 +98,10 @@ impl TableCatalog { } let col_id = generator.generate().unwrap(); - col.summary.relation = ColumnRelation::Table { + col.summary_mut().relation = ColumnRelation::Table { column_id: col_id, table_name: self.name.clone(), + is_temp: false, }; self.column_idxs diff --git a/src/catalog/view.rs b/src/catalog/view.rs new file mode 100644 index 00000000..b6e42cbb --- /dev/null +++ b/src/catalog/view.rs @@ -0,0 +1,19 @@ +use crate::catalog::TableName; +use crate::planner::LogicalPlan; +use serde_macros::ReferenceSerialization; +use std::fmt; +use std::fmt::Formatter; + +#[derive(Debug, Clone, Hash, Eq, PartialEq, ReferenceSerialization)] +pub struct View { + pub name: TableName, + pub plan: Box, +} + +impl fmt::Display for View { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "View {}: {}", self.name, self.plan.explain(0))?; + + Ok(()) + } +} diff --git a/src/db.rs b/src/db.rs index fdfcda52..0b8f0ded 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,5 +1,4 @@ use crate::binder::{command_type, Binder, BinderContext, CommandType}; -use crate::catalog::TableCatalog; use crate::errors::DatabaseError; use crate::execution::{build_write, try_collect}; use crate::expression::function::scala::ScalarFunctionImpl; @@ -17,7 +16,7 @@ use crate::optimizer::rule::normalization::NormalizationRuleImpl; use crate::parser::parse_sql; use crate::planner::LogicalPlan; use crate::storage::rocksdb::RocksStorage; -use crate::storage::{StatisticsMetaCache, Storage, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, Storage, TableCache, Transaction, ViewCache}; use crate::types::tuple::{SchemaRef, Tuple}; use crate::utils::lru::ShardingLruCache; use ahash::HashMap; @@ -79,6 +78,7 @@ impl DataBaseBuilder { let storage = RocksStorage::new(self.path)?; let meta_cache = Arc::new(ShardingLruCache::new(256, 8, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(48, 4, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(12, 4, RandomState::new())?); Ok(Database { storage, @@ -87,6 +87,7 @@ impl DataBaseBuilder { mdl: Arc::new(RwLock::new(())), meta_cache, table_cache, + view_cache, }) } } @@ -97,7 +98,8 @@ pub struct Database { table_functions: Arc, mdl: Arc>, pub(crate) meta_cache: Arc, - pub(crate) table_cache: Arc>, + pub(crate) table_cache: Arc, + pub(crate) view_cache: Arc, } impl Database { @@ -118,6 +120,7 @@ impl Database { let mut plan = Self::build_plan( stmt, &self.table_cache, + &self.view_cache, &self.meta_cache, &transaction, &self.scala_functions, @@ -127,7 +130,7 @@ impl Database { let schema = plan.output_schema().clone(); let iterator = build_write( plan, - (&self.table_cache, &self.meta_cache), + (&self.table_cache, &self.view_cache, &self.meta_cache), &mut transaction, ); let tuples = try_collect(iterator)?; @@ -148,12 +151,14 @@ impl Database { _guard: guard, meta_cache: self.meta_cache.clone(), table_cache: self.table_cache.clone(), + view_cache: self.view_cache.clone(), }) } pub(crate) fn build_plan( stmt: &Statement, table_cache: &TableCache, + view_cache: &ViewCache, meta_cache: &StatisticsMetaCache, transaction: &::TransactionType<'_>, scala_functions: &ScalaFunctions, @@ -162,6 +167,7 @@ impl Database { let mut binder = Binder::new( BinderContext::new( table_cache, + view_cache, transaction, scala_functions, table_functions, @@ -273,7 +279,8 @@ pub struct DBTransaction<'a, S: Storage + 'a> { table_functions: Arc, _guard: ArcRwLockReadGuard, pub(crate) meta_cache: Arc, - pub(crate) table_cache: Arc>, + pub(crate) table_cache: Arc, + pub(crate) view_cache: Arc, } impl DBTransaction<'_, S> { @@ -291,6 +298,7 @@ impl DBTransaction<'_, S> { let mut plan = Database::::build_plan( stmt, &self.table_cache, + &self.view_cache, &self.meta_cache, &self.inner, &self.scala_functions, @@ -298,7 +306,11 @@ impl DBTransaction<'_, S> { )?; let schema = plan.output_schema().clone(); - let executor = build_write(plan, (&self.table_cache, &self.meta_cache), &mut self.inner); + let executor = build_write( + plan, + (&self.table_cache, &self.view_cache, &self.meta_cache), + &mut self.inner, + ); Ok((schema, try_collect(executor)?)) } @@ -408,7 +420,7 @@ pub(crate) mod test { ColumnDesc::new(LogicalType::Integer, false, false, None).unwrap(), ); let number_column_id = schema[0].id().unwrap(); - column.set_ref_table(Arc::new("a".to_string()), number_column_id); + column.set_ref_table(Arc::new("a".to_string()), number_column_id, false); debug_assert_eq!(schema, Arc::new(vec![ColumnRef::from(column)])); debug_assert_eq!( diff --git a/src/errors.rs b/src/errors.rs index 1d3b280c..e903fd45 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -120,6 +120,8 @@ pub enum DatabaseError { ), #[error("the number of caches cannot be divisible by the number of shards")] ShardingNotAlign, + #[error("the view not found")] + SourceNotFound, #[error("the table already exists")] TableExists, #[error("the table not found")] @@ -150,4 +152,6 @@ pub enum DatabaseError { UnsupportedStmt(String), #[error("values length not match, expect {0}, got {1}")] ValuesLenMismatch(usize, usize), + #[error("the view already exists")] + ViewExists, } diff --git a/src/execution/ddl/add_column.rs b/src/execution/ddl/add_column.rs index 863d2efc..09bd0398 100644 --- a/src/execution/ddl/add_column.rs +++ b/src/execution/ddl/add_column.rs @@ -1,6 +1,6 @@ use crate::execution::{build_read, Executor, WriteExecutor}; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache}; +use crate::storage::{StatisticsMetaCache, TableCache, ViewCache}; use crate::types::index::{Index, IndexType}; use crate::types::tuple::Tuple; use crate::types::tuple_builder::TupleBuilder; @@ -28,7 +28,7 @@ impl From<(AddColumnOperator, LogicalPlan)> for AddColumn { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for AddColumn { fn execute_mut( mut self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( @@ -76,8 +76,7 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for AddColumn { // Unique Index if let (Some(unique_values), Some(unique_meta)) = ( unique_values, - transaction - .table(cache.0, table_name.clone()) + throw!(transaction.table(cache.0, table_name.clone())) .and_then(|table| table.get_unique_index(&col_id)) .cloned(), ) { diff --git a/src/execution/ddl/create_index.rs b/src/execution/ddl/create_index.rs index 94c6dd13..b73394c2 100644 --- a/src/execution/ddl/create_index.rs +++ b/src/execution/ddl/create_index.rs @@ -4,7 +4,7 @@ use crate::execution::{build_read, Executor, WriteExecutor}; use crate::expression::ScalarExpression; use crate::planner::operator::create_index::CreateIndexOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::index::Index; use crate::types::tuple::Tuple; @@ -28,7 +28,7 @@ impl From<(CreateIndexOperator, LogicalPlan)> for CreateIndex { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for CreateIndex { fn execute_mut( mut self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/ddl/create_table.rs b/src/execution/ddl/create_table.rs index 1ad9555b..c2d2a7f4 100644 --- a/src/execution/ddl/create_table.rs +++ b/src/execution/ddl/create_table.rs @@ -1,6 +1,6 @@ use crate::execution::{Executor, WriteExecutor}; use crate::planner::operator::create_table::CreateTableOperator; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple_builder::TupleBuilder; @@ -17,7 +17,7 @@ impl From for CreateTable { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for CreateTable { fn execute_mut( self, - (table_cache, _): (&'a TableCache, &'a StatisticsMetaCache), + (table_cache, _, _): (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/ddl/create_view.rs b/src/execution/ddl/create_view.rs new file mode 100644 index 00000000..5fe81863 --- /dev/null +++ b/src/execution/ddl/create_view.rs @@ -0,0 +1,35 @@ +use crate::execution::{Executor, WriteExecutor}; +use crate::planner::operator::create_view::CreateViewOperator; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; +use crate::throw; +use crate::types::tuple_builder::TupleBuilder; + +pub struct CreateView { + op: CreateViewOperator, +} + +impl From for CreateView { + fn from(op: CreateViewOperator) -> Self { + CreateView { op } + } +} + +impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for CreateView { + fn execute_mut( + self, + (_, view_cache, _): (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), + transaction: &'a mut T, + ) -> Executor<'a> { + Box::new( + #[coroutine] + move || { + let CreateViewOperator { view, or_replace } = self.op; + + let result_tuple = TupleBuilder::build_result(format!("{}", view.name)); + throw!(transaction.create_view(view_cache, view, or_replace)); + + yield Ok(result_tuple); + }, + ) + } +} diff --git a/src/execution/ddl/drop_column.rs b/src/execution/ddl/drop_column.rs index 73802989..5aee0f16 100644 --- a/src/execution/ddl/drop_column.rs +++ b/src/execution/ddl/drop_column.rs @@ -2,7 +2,7 @@ use crate::errors::DatabaseError; use crate::execution::{build_read, Executor, WriteExecutor}; use crate::planner::operator::alter_table::drop_column::DropColumnOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::Tuple; use crate::types::tuple_builder::TupleBuilder; @@ -24,7 +24,7 @@ impl From<(DropColumnOperator, LogicalPlan)> for DropColumn { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for DropColumn { fn execute_mut( mut self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( @@ -41,7 +41,7 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for DropColumn { .iter() .enumerate() .find(|(_, column)| column.name() == column_name) - .map(|(i, column)| (i, column.desc.is_primary)) + .map(|(i, column)| (i, column.desc().is_primary)) { if is_primary { throw!(Err(DatabaseError::InvalidColumn( @@ -69,7 +69,7 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for DropColumn { for tuple in tuples { throw!(transaction.append_tuple(&table_name, tuple, &types, true)); } - throw!(transaction.drop_column(cache.0, cache.1, &table_name, &column_name)); + throw!(transaction.drop_column(cache.0, cache.2, &table_name, &column_name)); yield Ok(TupleBuilder::build_result("1".to_string())); } else if if_exists { diff --git a/src/execution/ddl/drop_table.rs b/src/execution/ddl/drop_table.rs index 3e949bf2..e80b862f 100644 --- a/src/execution/ddl/drop_table.rs +++ b/src/execution/ddl/drop_table.rs @@ -1,6 +1,6 @@ use crate::execution::{Executor, WriteExecutor}; use crate::planner::operator::drop_table::DropTableOperator; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple_builder::TupleBuilder; @@ -17,7 +17,7 @@ impl From for DropTable { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for DropTable { fn execute_mut( self, - (table_cache, _): (&'a TableCache, &'a StatisticsMetaCache), + (table_cache, _, _): (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/ddl/mod.rs b/src/execution/ddl/mod.rs index 384bd2d7..5ead54b6 100644 --- a/src/execution/ddl/mod.rs +++ b/src/execution/ddl/mod.rs @@ -1,6 +1,7 @@ pub mod add_column; pub(crate) mod create_index; pub(crate) mod create_table; +pub(crate) mod create_view; pub mod drop_column; pub(crate) mod drop_table; pub(crate) mod truncate; diff --git a/src/execution/ddl/truncate.rs b/src/execution/ddl/truncate.rs index 0d57ac1e..d1e00f84 100644 --- a/src/execution/ddl/truncate.rs +++ b/src/execution/ddl/truncate.rs @@ -1,6 +1,6 @@ use crate::execution::{Executor, WriteExecutor}; use crate::planner::operator::truncate::TruncateOperator; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple_builder::TupleBuilder; @@ -17,7 +17,7 @@ impl From for Truncate { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Truncate { fn execute_mut( self, - _: (&'a TableCache, &'a StatisticsMetaCache), + _: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dml/analyze.rs b/src/execution/dml/analyze.rs index 7ecf5ccf..ab3918af 100644 --- a/src/execution/dml/analyze.rs +++ b/src/execution/dml/analyze.rs @@ -6,7 +6,7 @@ use crate::optimizer::core::histogram::HistogramBuilder; use crate::optimizer::core::statistics_meta::StatisticsMeta; use crate::planner::operator::analyze::AnalyzeOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::index::IndexMetaRef; use crate::types::tuple::Tuple; @@ -53,7 +53,7 @@ impl From<(AnalyzeOperator, LogicalPlan)> for Analyze { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Analyze { fn execute_mut( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( @@ -67,8 +67,7 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Analyze { let schema = input.output_schema().clone(); let mut builders = Vec::with_capacity(index_metas.len()); - let table = throw!(transaction - .table(cache.0, table_name.clone()) + let table = throw!(throw!(transaction.table(cache.0, table_name.clone())) .cloned() .ok_or(DatabaseError::TableNotFound)); @@ -122,7 +121,7 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Analyze { ty: Utf8Type::Variable(None), unit: CharLengthUnits::Characters, })); - throw!(transaction.save_table_meta(cache.1, &table_name, path_str, meta)); + throw!(transaction.save_table_meta(cache.2, &table_name, path_str, meta)); throw!(fs::rename(&temp_path, &path).map_err(DatabaseError::IO)); active_index_paths.insert(index_file); @@ -133,7 +132,7 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Analyze { let entry: DirEntry = throw!(entry.map_err(DatabaseError::IO)); if !active_index_paths.remove(&entry.file_name()) { - throw!(fs::remove_file(&entry.path()).map_err(DatabaseError::IO)); + throw!(fs::remove_file(entry.path()).map_err(DatabaseError::IO)); } } @@ -159,6 +158,7 @@ mod test { use crate::errors::DatabaseError; use crate::execution::dml::analyze::{DEFAULT_NUM_OF_BUCKETS, DEFAULT_STATISTICS_META_PATH}; use crate::optimizer::core::statistics_meta::StatisticsMeta; + use crate::storage::rocksdb::RocksTransaction; use std::ffi::OsStr; use std::fs; use tempfile::TempDir; @@ -196,17 +196,17 @@ mod test { } paths.sort(); - let statistics_meta_pk_index = StatisticsMeta::from_file(&paths[0])?; + let statistics_meta_pk_index = StatisticsMeta::from_file::(&paths[0])?; assert_eq!(statistics_meta_pk_index.index_id(), 0); assert_eq!(statistics_meta_pk_index.histogram().values_len(), 101); - let statistics_meta_b_index = StatisticsMeta::from_file(&paths[1])?; + let statistics_meta_b_index = StatisticsMeta::from_file::(&paths[1])?; assert_eq!(statistics_meta_b_index.index_id(), 1); assert_eq!(statistics_meta_b_index.histogram().values_len(), 101); - let statistics_meta_p_index = StatisticsMeta::from_file(&paths[2])?; + let statistics_meta_p_index = StatisticsMeta::from_file::(&paths[2])?; assert_eq!(statistics_meta_p_index.index_id(), 2); assert_eq!(statistics_meta_p_index.histogram().values_len(), 101); diff --git a/src/execution/dml/copy_from_file.rs b/src/execution/dml/copy_from_file.rs index 5f53b22c..8e12ea1a 100644 --- a/src/execution/dml/copy_from_file.rs +++ b/src/execution/dml/copy_from_file.rs @@ -2,7 +2,7 @@ use crate::binder::copy::FileFormat; use crate::errors::DatabaseError; use crate::execution::{Executor, WriteExecutor}; use crate::planner::operator::copy_from_file::CopyFromFileOperator; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::{types, Tuple}; use crate::types::tuple_builder::TupleBuilder; @@ -26,7 +26,7 @@ impl From for CopyFromFile { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for CopyFromFile { fn execute_mut( self, - _: (&'a TableCache, &'a StatisticsMetaCache), + _: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( @@ -131,45 +131,50 @@ mod tests { write!(file, "{}", csv).expect("failed to write file"); let columns = vec![ - ColumnRef::from(ColumnCatalog { - summary: ColumnSummary { + ColumnRef::from(ColumnCatalog::direct_new( + ColumnSummary { name: "a".to_string(), relation: ColumnRelation::Table { column_id: Ulid::new(), table_name: Arc::new("t1".to_string()), + is_temp: false, }, }, - nullable: false, - desc: ColumnDesc::new(LogicalType::Integer, true, false, None).unwrap(), - }), - ColumnRef::from(ColumnCatalog { - summary: ColumnSummary { + false, + ColumnDesc::new(LogicalType::Integer, true, false, None)?, + false, + )), + ColumnRef::from(ColumnCatalog::direct_new( + ColumnSummary { name: "b".to_string(), relation: ColumnRelation::Table { column_id: Ulid::new(), table_name: Arc::new("t1".to_string()), + is_temp: false, }, }, - nullable: false, - desc: ColumnDesc::new(LogicalType::Float, false, false, None).unwrap(), - }), - ColumnRef::from(ColumnCatalog { - summary: ColumnSummary { + false, + ColumnDesc::new(LogicalType::Float, false, false, None)?, + false, + )), + ColumnRef::from(ColumnCatalog::direct_new( + ColumnSummary { name: "c".to_string(), relation: ColumnRelation::Table { column_id: Ulid::new(), table_name: Arc::new("t1".to_string()), + is_temp: false, }, }, - nullable: false, - desc: ColumnDesc::new( + false, + ColumnDesc::new( LogicalType::Varchar(Some(10), CharLengthUnits::Characters), false, false, None, - ) - .unwrap(), - }), + )?, + false, + )), ]; let op = CopyFromFileOperator { @@ -196,8 +201,10 @@ mod tests { let storage = db.storage; let mut transaction = storage.transaction()?; - let mut coroutine = - executor.execute_mut((&db.table_cache, &db.meta_cache), &mut transaction); + let mut coroutine = executor.execute_mut( + (&db.table_cache, &db.view_cache, &db.meta_cache), + &mut transaction, + ); let tuple = match Pin::new(&mut coroutine).resume(()) { CoroutineState::Yielded(tuple) => tuple, CoroutineState::Complete(()) => unreachable!(), diff --git a/src/execution/dml/delete.rs b/src/execution/dml/delete.rs index 9eaa9c62..6ab02785 100644 --- a/src/execution/dml/delete.rs +++ b/src/execution/dml/delete.rs @@ -5,7 +5,7 @@ use crate::execution::{build_read, Executor, WriteExecutor}; use crate::expression::ScalarExpression; use crate::planner::operator::delete::DeleteOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::index::{Index, IndexId, IndexType}; use crate::types::tuple::Tuple; @@ -30,7 +30,7 @@ impl From<(DeleteOperator, LogicalPlan)> for Delete { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Delete { fn execute_mut( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( @@ -42,8 +42,7 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Delete { } = self; let schema = input.output_schema().clone(); - let table = throw!(transaction - .table(cache.0, table_name.clone()) + let table = throw!(throw!(transaction.table(cache.0, table_name.clone())) .cloned() .ok_or(DatabaseError::TableNotFound)); let mut tuple_ids = Vec::new(); diff --git a/src/execution/dml/insert.rs b/src/execution/dml/insert.rs index 954e6c12..fe81b67b 100644 --- a/src/execution/dml/insert.rs +++ b/src/execution/dml/insert.rs @@ -4,7 +4,7 @@ use crate::execution::dql::projection::Projection; use crate::execution::{build_read, Executor, WriteExecutor}; use crate::planner::operator::insert::InsertOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::index::Index; use crate::types::tuple::Tuple; @@ -63,7 +63,7 @@ impl ColumnCatalog { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Insert { fn execute_mut( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( @@ -81,11 +81,12 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Insert { let pk_key = throw!(schema .iter() - .find(|col| col.desc.is_primary) + .find(|col| col.desc().is_primary) .map(|col| col.key(is_mapping_by_name)) - .ok_or_else(|| DatabaseError::NotNull)); + .ok_or(DatabaseError::NotNull)); - if let Some(table_catalog) = transaction.table(cache.0, table_name.clone()).cloned() + if let Some(table_catalog) = + throw!(transaction.table(cache.0, table_name.clone())).cloned() { let types = table_catalog.types(); let mut coroutine = build_read(input, cache, transaction); @@ -112,7 +113,7 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Insert { } value.unwrap_or_else(|| Arc::new(DataValue::none(col.datatype()))) }; - if value.is_null() && !col.nullable { + if value.is_null() && !col.nullable() { yield Err(DatabaseError::NotNull); return; } diff --git a/src/execution/dml/update.rs b/src/execution/dml/update.rs index bb26690f..93d2f966 100644 --- a/src/execution/dml/update.rs +++ b/src/execution/dml/update.rs @@ -3,7 +3,7 @@ use crate::execution::dql::projection::Projection; use crate::execution::{build_read, Executor, WriteExecutor}; use crate::planner::operator::update::UpdateOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::index::Index; use crate::types::tuple::types; @@ -35,7 +35,7 @@ impl From<(UpdateOperator, LogicalPlan, LogicalPlan)> for Update { impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Update { fn execute_mut( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { Box::new( @@ -51,7 +51,8 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Update { let input_schema = input.output_schema().clone(); let types = types(&input_schema); - if let Some(table_catalog) = transaction.table(cache.0, table_name.clone()).cloned() + if let Some(table_catalog) = + throw!(transaction.table(cache.0, table_name.clone())).cloned() { let mut value_map = HashMap::new(); let mut tuples = Vec::new(); @@ -95,7 +96,7 @@ impl<'a, T: Transaction + 'a> WriteExecutor<'a, T> for Update { for (i, column) in input_schema.iter().enumerate() { if let Some(value) = value_map.get(&column.id()) { - if column.desc.is_primary { + if column.desc().is_primary { let old_key = tuple.id.replace(value.clone()).unwrap(); throw!(transaction.remove_tuple(&table_name, &old_key)); diff --git a/src/execution/dql/aggregate/hash_agg.rs b/src/execution/dql/aggregate/hash_agg.rs index dd1d3b3f..484bd08d 100644 --- a/src/execution/dql/aggregate/hash_agg.rs +++ b/src/execution/dql/aggregate/hash_agg.rs @@ -5,7 +5,7 @@ use crate::execution::{build_read, Executor, ReadExecutor}; use crate::expression::ScalarExpression; use crate::planner::operator::aggregate::AggregateOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::{SchemaRef, Tuple}; use crate::types::value::ValueRef; @@ -42,7 +42,7 @@ impl From<(AggregateOperator, LogicalPlan)> for HashAggExecutor { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for HashAggExecutor { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( @@ -184,6 +184,7 @@ mod test { #[test] fn test_hash_agg() -> Result<(), DatabaseError> { let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let temp_dir = TempDir::new().expect("unable to create temporary working directory"); @@ -241,7 +242,7 @@ mod test { let tuples = try_collect( HashAggExecutor::from((operator, input)) - .execute((&table_cache, &meta_cache), &transaction), + .execute((&table_cache, &view_cache, &meta_cache), &transaction), )?; println!( diff --git a/src/execution/dql/aggregate/simple_agg.rs b/src/execution/dql/aggregate/simple_agg.rs index e3d159a0..368cbc54 100644 --- a/src/execution/dql/aggregate/simple_agg.rs +++ b/src/execution/dql/aggregate/simple_agg.rs @@ -3,7 +3,7 @@ use crate::execution::{build_read, Executor, ReadExecutor}; use crate::expression::ScalarExpression; use crate::planner::operator::aggregate::AggregateOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::Tuple; use crate::types::value::ValueRef; @@ -28,7 +28,7 @@ impl From<(AggregateOperator, LogicalPlan)> for SimpleAggExecutor { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for SimpleAggExecutor { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dql/describe.rs b/src/execution/dql/describe.rs index 0518d830..1b8a0a17 100644 --- a/src/execution/dql/describe.rs +++ b/src/execution/dql/describe.rs @@ -2,7 +2,7 @@ use crate::catalog::{ColumnCatalog, TableName}; use crate::execution::DatabaseError; use crate::execution::{Executor, ReadExecutor}; use crate::planner::operator::describe::DescribeOperator; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::Tuple; use crate::types::value::{DataValue, Utf8Type, ValueRef}; @@ -43,19 +43,18 @@ impl From for Describe { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Describe { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( #[coroutine] move || { - let table = throw!(transaction - .table(cache.0, self.table_name.clone()) + let table = throw!(throw!(transaction.table(cache.0, self.table_name.clone())) .ok_or(DatabaseError::TableNotFound)); let key_fn = |column: &ColumnCatalog| { - if column.desc.is_primary { + if column.desc().is_primary { PRIMARY_KEY_TYPE.clone() - } else if column.desc.is_unique { + } else if column.desc().is_unique { UNIQUE_KEY_TYPE.clone() } else { EMPTY_KEY_TYPE.clone() @@ -65,7 +64,7 @@ impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Describe { for column in table.columns() { let datatype = column.datatype(); let default = column - .desc + .desc() .default .as_ref() .map(|expr| format!("{}", expr)) @@ -87,7 +86,7 @@ impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Describe { unit: CharLengthUnits::Characters, }), Arc::new(DataValue::Utf8 { - value: Some(column.nullable.to_string()), + value: Some(column.nullable().to_string()), ty: Utf8Type::Variable(None), unit: CharLengthUnits::Characters, }), diff --git a/src/execution/dql/dummy.rs b/src/execution/dql/dummy.rs index 57e267c0..3c4acd40 100644 --- a/src/execution/dql/dummy.rs +++ b/src/execution/dql/dummy.rs @@ -1,11 +1,15 @@ use crate::execution::{Executor, ReadExecutor}; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::types::tuple::Tuple; pub struct Dummy {} impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Dummy { - fn execute(self, _: (&'a TableCache, &'a StatisticsMetaCache), _: &T) -> Executor<'a> { + fn execute( + self, + _: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), + _: &'a T, + ) -> Executor<'a> { Box::new( #[coroutine] move || { diff --git a/src/execution/dql/explain.rs b/src/execution/dql/explain.rs index f1badc7b..8c5fbdf0 100644 --- a/src/execution/dql/explain.rs +++ b/src/execution/dql/explain.rs @@ -1,6 +1,6 @@ use crate::execution::{Executor, ReadExecutor}; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::types::tuple::Tuple; use crate::types::value::{DataValue, Utf8Type}; use sqlparser::ast::CharLengthUnits; @@ -17,7 +17,11 @@ impl From for Explain { } impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Explain { - fn execute(self, _: (&'a TableCache, &'a StatisticsMetaCache), _: &T) -> Executor<'a> { + fn execute( + self, + _: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), + _: &'a T, + ) -> Executor<'a> { Box::new( #[coroutine] move || { diff --git a/src/execution/dql/filter.rs b/src/execution/dql/filter.rs index f72098c3..a4132142 100644 --- a/src/execution/dql/filter.rs +++ b/src/execution/dql/filter.rs @@ -2,7 +2,7 @@ use crate::execution::{build_read, Executor, ReadExecutor}; use crate::expression::ScalarExpression; use crate::planner::operator::filter::FilterOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use std::ops::Coroutine; use std::ops::CoroutineState; @@ -22,7 +22,7 @@ impl From<(FilterOperator, LogicalPlan)> for Filter { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Filter { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dql/function_scan.rs b/src/execution/dql/function_scan.rs index a9cfa5bc..410312e3 100644 --- a/src/execution/dql/function_scan.rs +++ b/src/execution/dql/function_scan.rs @@ -1,7 +1,7 @@ use crate::execution::{Executor, ReadExecutor}; use crate::expression::function::table::TableFunction; use crate::planner::operator::function_scan::FunctionScanOperator; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; pub struct FunctionScan { @@ -17,7 +17,11 @@ impl From for FunctionScan { } impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for FunctionScan { - fn execute(self, _: (&'a TableCache, &'a StatisticsMetaCache), _: &T) -> Executor<'a> { + fn execute( + self, + _: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), + _: &'a T, + ) -> Executor<'a> { Box::new( #[coroutine] move || { diff --git a/src/execution/dql/index_scan.rs b/src/execution/dql/index_scan.rs index 7ace9b4d..e1027280 100644 --- a/src/execution/dql/index_scan.rs +++ b/src/execution/dql/index_scan.rs @@ -1,7 +1,7 @@ use crate::execution::{Executor, ReadExecutor}; use crate::expression::range_detacher::Range; use crate::planner::operator::table_scan::TableScanOperator; -use crate::storage::{Iter, StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{Iter, StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::index::IndexMetaRef; @@ -29,7 +29,7 @@ impl From<(TableScanOperator, IndexMetaRef, Range)> for IndexScan { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for IndexScan { fn execute( self, - (table_cache, _): (&'a TableCache, &'a StatisticsMetaCache), + (table_cache, _, _): (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dql/join/hash_join.rs b/src/execution/dql/join/hash_join.rs index a77b68ba..b5746e74 100644 --- a/src/execution/dql/join/hash_join.rs +++ b/src/execution/dql/join/hash_join.rs @@ -1,11 +1,11 @@ -use crate::catalog::{ColumnCatalog, ColumnRef}; +use crate::catalog::ColumnRef; use crate::errors::DatabaseError; use crate::execution::dql::join::joins_nullable; use crate::execution::{build_read, Executor, ReadExecutor}; use crate::expression::ScalarExpression; use crate::planner::operator::join::{JoinCondition, JoinOperator, JoinType}; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::{Schema, SchemaRef, Tuple}; use crate::types::value::{DataValue, ValueRef, NULL_VALUE}; @@ -44,7 +44,7 @@ impl From<(JoinOperator, LogicalPlan, LogicalPlan)> for HashJoin { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for HashJoin { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( @@ -142,10 +142,9 @@ impl HashJoinStatus { let fn_process = |schema: &mut Vec, force_nullable| { for column in schema.iter_mut() { - let mut temp = ColumnCatalog::clone(column); - temp.nullable = force_nullable; - - *column = ColumnRef::from(temp); + if let Some(new_column) = column.nullable_for_join(force_nullable) { + *column = new_column; + } } }; let (left_force_nullable, right_force_nullable) = joins_nullable(&ty); @@ -529,6 +528,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right) = build_join_values(); @@ -539,8 +539,8 @@ mod test { }, join_type: JoinType::Inner, }; - let executor = - HashJoin::from((op, left, right)).execute((&table_cache, &meta_cache), &transaction); + let executor = HashJoin::from((op, left, right)) + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; debug_assert_eq!(tuples.len(), 3); @@ -567,6 +567,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right) = build_join_values(); @@ -580,7 +581,9 @@ mod test { //Outer { let executor = HashJoin::from((op.clone(), left.clone(), right.clone())); - let tuples = try_collect(executor.execute((&table_cache, &meta_cache), &transaction))?; + let tuples = try_collect( + executor.execute((&table_cache, &view_cache, &meta_cache), &transaction), + )?; debug_assert_eq!(tuples.len(), 4); @@ -605,8 +608,9 @@ mod test { { let mut executor = HashJoin::from((op.clone(), left.clone(), right.clone())); executor.ty = JoinType::LeftSemi; - let mut tuples = - try_collect(executor.execute((&table_cache, &meta_cache), &transaction))?; + let mut tuples = try_collect( + executor.execute((&table_cache, &view_cache, &meta_cache), &transaction), + )?; debug_assert_eq!(tuples.len(), 2); tuples.sort_by_key(|tuple| { @@ -628,7 +632,9 @@ mod test { { let mut executor = HashJoin::from((op, left, right)); executor.ty = JoinType::LeftAnti; - let tuples = try_collect(executor.execute((&table_cache, &meta_cache), &transaction))?; + let tuples = try_collect( + executor.execute((&table_cache, &view_cache, &meta_cache), &transaction), + )?; debug_assert_eq!(tuples.len(), 1); debug_assert_eq!( @@ -646,6 +652,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right) = build_join_values(); @@ -656,8 +663,8 @@ mod test { }, join_type: JoinType::RightOuter, }; - let executor = - HashJoin::from((op, left, right)).execute((&table_cache, &meta_cache), &transaction); + let executor = HashJoin::from((op, left, right)) + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; debug_assert_eq!(tuples.len(), 4); @@ -688,6 +695,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right) = build_join_values(); @@ -698,8 +706,8 @@ mod test { }, join_type: JoinType::Full, }; - let executor = - HashJoin::from((op, left, right)).execute((&table_cache, &meta_cache), &transaction); + let executor = HashJoin::from((op, left, right)) + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; debug_assert_eq!(tuples.len(), 5); diff --git a/src/execution/dql/join/nested_loop_join.rs b/src/execution/dql/join/nested_loop_join.rs index 5344988e..faaa7176 100644 --- a/src/execution/dql/join/nested_loop_join.rs +++ b/src/execution/dql/join/nested_loop_join.rs @@ -2,14 +2,14 @@ //! [`JoinType::LeftSemi`], [`JoinType::LeftAnti`], [`JoinType::RightOuter`], [`JoinType::Cross`], [`JoinType::Full`]. use super::joins_nullable; -use crate::catalog::{ColumnCatalog, ColumnRef}; +use crate::catalog::ColumnRef; use crate::errors::DatabaseError; use crate::execution::dql::projection::Projection; use crate::execution::{build_read, Executor, ReadExecutor}; use crate::expression::ScalarExpression; use crate::planner::operator::join::{JoinCondition, JoinOperator, JoinType}; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::{Schema, SchemaRef, Tuple}; use crate::types::value::{DataValue, NULL_VALUE}; @@ -128,7 +128,7 @@ impl From<(JoinOperator, LogicalPlan, LogicalPlan)> for NestedLoopJoin { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for NestedLoopJoin { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( @@ -168,9 +168,13 @@ impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for NestedLoopJoin { throw!(eq_cond.equals(&left_tuple, &right_tuple)), ) { (None, true) if matches!(ty, JoinType::RightOuter) => { + has_matched = true; Self::emit_tuple(&right_tuple, &left_tuple, ty, true) } - (None, true) => Self::emit_tuple(&left_tuple, &right_tuple, ty, true), + (None, true) => { + has_matched = true; + Self::emit_tuple(&left_tuple, &right_tuple, ty, true) + } (Some(filter), true) => { let new_tuple = Self::merge_tuple(&left_tuple, &right_tuple, &ty); let value = throw!(filter.eval(&new_tuple, &output_schema_ref)); @@ -363,14 +367,18 @@ impl NestedLoopJoin { let mut join_schema = vec![]; for column in left_schema.iter() { - let mut temp = ColumnCatalog::clone(column); - temp.nullable = left_force_nullable; - join_schema.push(ColumnRef::from(temp)); + join_schema.push( + column + .nullable_for_join(left_force_nullable) + .unwrap_or_else(|| column.clone()), + ); } for column in right_schema.iter() { - let mut temp = ColumnCatalog::clone(column); - temp.nullable = right_force_nullable; - join_schema.push(ColumnRef::from(temp)); + join_schema.push( + column + .nullable_for_join(right_force_nullable) + .unwrap_or_else(|| column.clone()), + ); } Arc::new(join_schema) } @@ -533,6 +541,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right, filter) = build_join_values(true); let op = JoinOperator { @@ -543,7 +552,7 @@ mod test { join_type: JoinType::Inner, }; let executor = NestedLoopJoin::from((op, left, right)) - .execute((&table_cache, &meta_cache), &transaction); + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; let mut expected_set = HashSet::with_capacity(1); @@ -561,6 +570,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right, filter) = build_join_values(true); let op = JoinOperator { @@ -571,7 +581,7 @@ mod test { join_type: JoinType::LeftOuter, }; let executor = NestedLoopJoin::from((op, left, right)) - .execute((&table_cache, &meta_cache), &transaction); + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; debug_assert_eq!( @@ -601,6 +611,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right, filter) = build_join_values(true); let op = JoinOperator { @@ -611,7 +622,7 @@ mod test { join_type: JoinType::Cross, }; let executor = NestedLoopJoin::from((op, left, right)) - .execute((&table_cache, &meta_cache), &transaction); + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; let mut expected_set = HashSet::with_capacity(1); @@ -630,6 +641,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right, _) = build_join_values(true); let op = JoinOperator { @@ -640,7 +652,7 @@ mod test { join_type: JoinType::Cross, }; let executor = NestedLoopJoin::from((op, left, right)) - .execute((&table_cache, &meta_cache), &transaction); + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; let mut expected_set = HashSet::with_capacity(3); @@ -662,6 +674,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right, _) = build_join_values(false); let op = JoinOperator { @@ -672,7 +685,7 @@ mod test { join_type: JoinType::Cross, }; let executor = NestedLoopJoin::from((op, left, right)) - .execute((&table_cache, &meta_cache), &transaction); + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; debug_assert_eq!(tuples.len(), 16); @@ -686,6 +699,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right, filter) = build_join_values(true); let op = JoinOperator { @@ -696,7 +710,7 @@ mod test { join_type: JoinType::LeftSemi, }; let executor = NestedLoopJoin::from((op, left, right)) - .execute((&table_cache, &meta_cache), &transaction); + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; let mut expected_set = HashSet::with_capacity(1); @@ -713,6 +727,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right, filter) = build_join_values(true); let op = JoinOperator { @@ -723,7 +738,7 @@ mod test { join_type: JoinType::LeftAnti, }; let executor = NestedLoopJoin::from((op, left, right)) - .execute((&table_cache, &meta_cache), &transaction); + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; let mut expected_set = HashSet::with_capacity(3); @@ -742,6 +757,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right, filter) = build_join_values(true); let op = JoinOperator { @@ -752,7 +768,7 @@ mod test { join_type: JoinType::RightOuter, }; let executor = NestedLoopJoin::from((op, left, right)) - .execute((&table_cache, &meta_cache), &transaction); + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; let mut expected_set = HashSet::with_capacity(4); @@ -776,6 +792,7 @@ mod test { let storage = RocksStorage::new(temp_dir.path())?; let transaction = storage.transaction()?; let meta_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); + let view_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let table_cache = Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?); let (keys, left, right, filter) = build_join_values(true); let op = JoinOperator { @@ -786,7 +803,7 @@ mod test { join_type: JoinType::Full, }; let executor = NestedLoopJoin::from((op, left, right)) - .execute((&table_cache, &meta_cache), &transaction); + .execute((&table_cache, &view_cache, &meta_cache), &transaction); let tuples = try_collect(executor)?; debug_assert_eq!( diff --git a/src/execution/dql/limit.rs b/src/execution/dql/limit.rs index 46a8d69a..cbb0eb1c 100644 --- a/src/execution/dql/limit.rs +++ b/src/execution/dql/limit.rs @@ -1,7 +1,7 @@ use crate::execution::{build_read, Executor, ReadExecutor}; use crate::planner::operator::limit::LimitOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use std::ops::Coroutine; use std::ops::CoroutineState; use std::pin::Pin; @@ -25,7 +25,7 @@ impl From<(LimitOperator, LogicalPlan)> for Limit { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Limit { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dql/projection.rs b/src/execution/dql/projection.rs index e0ac6e85..7ec0e056 100644 --- a/src/execution/dql/projection.rs +++ b/src/execution/dql/projection.rs @@ -4,7 +4,7 @@ use crate::execution::{build_read, Executor, ReadExecutor}; use crate::expression::ScalarExpression; use crate::planner::operator::project::ProjectOperator; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::Tuple; use crate::types::value::ValueRef; @@ -26,7 +26,7 @@ impl From<(ProjectOperator, LogicalPlan)> for Projection { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Projection { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dql/seq_scan.rs b/src/execution/dql/seq_scan.rs index 2eb309a5..2a304dc6 100644 --- a/src/execution/dql/seq_scan.rs +++ b/src/execution/dql/seq_scan.rs @@ -1,6 +1,6 @@ use crate::execution::{Executor, ReadExecutor}; use crate::planner::operator::table_scan::TableScanOperator; -use crate::storage::{Iter, StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{Iter, StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; pub(crate) struct SeqScan { @@ -16,7 +16,7 @@ impl From for SeqScan { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for SeqScan { fn execute( self, - (table_cache, _): (&'a TableCache, &'a StatisticsMetaCache), + (table_cache, _, _): (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dql/show_table.rs b/src/execution/dql/show_table.rs index d03af33d..9b773c7f 100644 --- a/src/execution/dql/show_table.rs +++ b/src/execution/dql/show_table.rs @@ -1,6 +1,6 @@ use crate::catalog::TableMeta; use crate::execution::{Executor, ReadExecutor}; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::Tuple; use crate::types::value::{DataValue, Utf8Type}; @@ -12,7 +12,7 @@ pub struct ShowTables; impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for ShowTables { fn execute( self, - _: (&'a TableCache, &'a StatisticsMetaCache), + _: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dql/sort.rs b/src/execution/dql/sort.rs index 51635f85..fdd1212b 100644 --- a/src/execution/dql/sort.rs +++ b/src/execution/dql/sort.rs @@ -2,7 +2,7 @@ use crate::errors::DatabaseError; use crate::execution::{build_read, Executor, ReadExecutor}; use crate::planner::operator::sort::{SortField, SortOperator}; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::throw; use crate::types::tuple::{Schema, Tuple}; use itertools::Itertools; @@ -227,7 +227,7 @@ impl From<(SortOperator, LogicalPlan)> for Sort { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Sort { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dql/union.rs b/src/execution/dql/union.rs index b34e2e79..9cb3409e 100644 --- a/src/execution/dql/union.rs +++ b/src/execution/dql/union.rs @@ -1,6 +1,6 @@ use crate::execution::{build_read, Executor, ReadExecutor}; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use std::ops::Coroutine; use std::ops::CoroutineState; use std::pin::Pin; @@ -22,7 +22,7 @@ impl From<(LogicalPlan, LogicalPlan)> for Union { impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Union { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { Box::new( diff --git a/src/execution/dql/values.rs b/src/execution/dql/values.rs index 22f58eef..84a73dd8 100644 --- a/src/execution/dql/values.rs +++ b/src/execution/dql/values.rs @@ -1,6 +1,6 @@ use crate::execution::{Executor, ReadExecutor}; use crate::planner::operator::values::ValuesOperator; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::types::tuple::Tuple; pub struct Values { @@ -14,7 +14,11 @@ impl From for Values { } impl<'a, T: Transaction + 'a> ReadExecutor<'a, T> for Values { - fn execute(self, _: (&'a TableCache, &'a StatisticsMetaCache), _: &T) -> Executor<'a> { + fn execute( + self, + _: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), + _: &'a T, + ) -> Executor<'a> { Box::new( #[coroutine] move || { diff --git a/src/execution/mod.rs b/src/execution/mod.rs index 7cd5c75c..bf783963 100644 --- a/src/execution/mod.rs +++ b/src/execution/mod.rs @@ -8,6 +8,7 @@ use self::dql::join::nested_loop_join::NestedLoopJoin; use crate::errors::DatabaseError; use crate::execution::ddl::create_index::CreateIndex; use crate::execution::ddl::create_table::CreateTable; +use crate::execution::ddl::create_view::CreateView; use crate::execution::ddl::drop_column::DropColumn; use crate::execution::ddl::drop_table::DropTable; use crate::execution::ddl::truncate::Truncate; @@ -35,7 +36,7 @@ use crate::execution::dql::values::Values; use crate::planner::operator::join::JoinCondition; use crate::planner::operator::{Operator, PhysicalOption}; use crate::planner::LogicalPlan; -use crate::storage::{StatisticsMetaCache, TableCache, Transaction}; +use crate::storage::{StatisticsMetaCache, TableCache, Transaction, ViewCache}; use crate::types::index::IndexInfo; use crate::types::tuple::Tuple; use std::ops::{Coroutine, CoroutineState}; @@ -47,7 +48,7 @@ pub type Executor<'a> = pub trait ReadExecutor<'a, T: Transaction + 'a> { fn execute( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a>; } @@ -55,14 +56,14 @@ pub trait ReadExecutor<'a, T: Transaction + 'a> { pub trait WriteExecutor<'a, T: Transaction + 'a> { fn execute_mut( self, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a>; } pub fn build_read<'a, T: Transaction + 'a>( plan: LogicalPlan, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a T, ) -> Executor<'a> { let LogicalPlan { @@ -149,7 +150,7 @@ pub fn build_read<'a, T: Transaction + 'a>( pub fn build_write<'a, T: Transaction + 'a>( plan: LogicalPlan, - cache: (&'a TableCache, &'a StatisticsMetaCache), + cache: (&'a TableCache, &'a ViewCache, &'a StatisticsMetaCache), transaction: &'a mut T, ) -> Executor<'a> { let LogicalPlan { @@ -190,6 +191,7 @@ pub fn build_write<'a, T: Transaction + 'a>( CreateIndex::from((op, input)).execute_mut(cache, transaction) } + Operator::CreateView(op) => CreateView::from(op).execute_mut(cache, transaction), Operator::DropTable(op) => DropTable::from(op).execute_mut(cache, transaction), Operator::Truncate(op) => Truncate::from(op).execute_mut(cache, transaction), Operator::CopyFromFile(op) => CopyFromFile::from(op).execute_mut(cache, transaction), diff --git a/src/expression/evaluator.rs b/src/expression/evaluator.rs index bdbd1b29..d09d69cb 100644 --- a/src/expression/evaluator.rs +++ b/src/expression/evaluator.rs @@ -64,7 +64,7 @@ impl ScalarExpression { tul_col.table_name().is_none() && tul_col.name() == alias } AliasType::Expr(alias_expr) => { - alias_expr.output_column().summary == tul_col.summary + alias_expr.output_column().summary() == tul_col.summary() } }) .map(|(i, _)| &tuple.values[i]) @@ -267,13 +267,11 @@ impl ScalarExpression { unit: CharLengthUnits::Characters, })) } - ScalarExpression::Reference { pos, .. } => { - return Ok(tuple - .values - .get(*pos) - .unwrap_or_else(|| &NULL_VALUE) - .clone()); - } + ScalarExpression::Reference { pos, .. } => Ok(tuple + .values + .get(*pos) + .unwrap_or_else(|| &NULL_VALUE) + .clone()), ScalarExpression::Tuple(exprs) => { let mut values = Vec::with_capacity(exprs.len()); diff --git a/src/expression/mod.rs b/src/expression/mod.rs index 0bd5e674..252a38b7 100644 --- a/src/expression/mod.rs +++ b/src/expression/mod.rs @@ -1347,7 +1347,7 @@ mod test { let mut reference_tables = ReferenceTables::new(); let c3_column_id = { let table = transaction - .table(&table_cache, Arc::new("t1".to_string())) + .table(&table_cache, Arc::new("t1".to_string()))? .unwrap(); *table.get_column_id_by_name("c3").unwrap() }; @@ -1376,30 +1376,33 @@ mod test { )?; fn_assert( &mut cursor, - ScalarExpression::ColumnRef(ColumnRef::from(ColumnCatalog { - summary: ColumnSummary { + ScalarExpression::ColumnRef(ColumnRef::from(ColumnCatalog::direct_new( + ColumnSummary { name: "c3".to_string(), relation: ColumnRelation::Table { column_id: c3_column_id, table_name: Arc::new("t1".to_string()), + is_temp: false, }, }, - nullable: false, - desc: ColumnDesc::new(LogicalType::Integer, false, false, None).unwrap(), - })), + false, + ColumnDesc::new(LogicalType::Integer, false, false, None)?, + false, + ))), Some((&transaction, &table_cache)), &mut reference_tables, )?; fn_assert( &mut cursor, - ScalarExpression::ColumnRef(ColumnRef::from(ColumnCatalog { - summary: ColumnSummary { + ScalarExpression::ColumnRef(ColumnRef::from(ColumnCatalog::direct_new( + ColumnSummary { name: "c4".to_string(), relation: ColumnRelation::None, }, - nullable: false, - desc: ColumnDesc::new(LogicalType::Boolean, false, false, None).unwrap(), - })), + false, + ColumnDesc::new(LogicalType::Boolean, false, false, None)?, + false, + ))), Some((&transaction, &table_cache)), &mut reference_tables, )?; diff --git a/src/expression/range_detacher.rs b/src/expression/range_detacher.rs index bdbf8f89..764afdff 100644 --- a/src/expression/range_detacher.rs +++ b/src/expression/range_detacher.rs @@ -3,7 +3,6 @@ use crate::expression::{BinaryOperator, ScalarExpression}; use crate::types::value::{DataValue, ValueRef, NULL_VALUE}; use crate::types::ColumnId; use itertools::Itertools; -use serde::{Deserialize, Serialize}; use serde_macros::ReferenceSerialization; use std::cmp::Ordering; use std::collections::Bound; @@ -14,7 +13,7 @@ use std::{fmt, mem}; /// Used to represent binary relationships between fields and constants /// Tips: The NotEq case is ignored because it makes expression composition very complex /// - [`Range::Scope`]: -#[derive(Debug, PartialEq, Eq, Clone, Hash, Serialize, Deserialize, ReferenceSerialization)] +#[derive(Debug, PartialEq, Eq, Clone, Hash, ReferenceSerialization)] pub enum Range { Scope { min: Bound, diff --git a/src/function/numbers.rs b/src/function/numbers.rs index 7680c739..24ef5237 100644 --- a/src/function/numbers.rs +++ b/src/function/numbers.rs @@ -64,7 +64,7 @@ impl TableFunctionImpl for Numbers { if value.logical_type() != LogicalType::Integer { value = Arc::new(DataValue::clone(&value).cast(&LogicalType::Integer)?); } - let num = value.i32().ok_or_else(|| DatabaseError::NotNull)?; + let num = value.i32().ok_or(DatabaseError::NotNull)?; Ok(Box::new((0..num).map(|i| { Ok(Tuple { diff --git a/src/optimizer/core/cm_sketch.rs b/src/optimizer/core/cm_sketch.rs index d056b61e..b5b3516b 100644 --- a/src/optimizer/core/cm_sketch.rs +++ b/src/optimizer/core/cm_sketch.rs @@ -4,7 +4,6 @@ use crate::serdes::{ReferenceSerialization, ReferenceTables}; use crate::storage::{TableCache, Transaction}; use crate::types::value::DataValue; use rand::RngCore; -use serde::{Deserialize, Serialize}; use siphasher::sip::SipHasher13; use std::borrow::Borrow; use std::hash::{Hash, Hasher}; @@ -15,7 +14,7 @@ use std::{cmp, mem}; pub(crate) type FastHasher = SipHasher13; // https://github.com/jedisct1/rust-count-min-sketch -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone)] pub struct CountMinSketch { counters: Vec>, offsets: Vec, diff --git a/src/optimizer/core/histogram.rs b/src/optimizer/core/histogram.rs index 1fcc777f..cd04751b 100644 --- a/src/optimizer/core/histogram.rs +++ b/src/optimizer/core/histogram.rs @@ -8,7 +8,6 @@ use crate::types::index::{IndexId, IndexMeta}; use crate::types::value::{DataValue, ValueRef}; use crate::types::LogicalType; use ordered_float::OrderedFloat; -use serde::{Deserialize, Serialize}; use serde_macros::ReferenceSerialization; use std::collections::Bound; use std::sync::Arc; @@ -25,7 +24,7 @@ pub struct HistogramBuilder { } // Equal depth histogram -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ReferenceSerialization)] +#[derive(Debug, Clone, PartialEq, ReferenceSerialization)] pub struct Histogram { index_id: IndexId, @@ -40,7 +39,7 @@ pub struct Histogram { correlation: f64, } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ReferenceSerialization)] +#[derive(Debug, Clone, PartialEq, ReferenceSerialization)] struct Bucket { lower: ValueRef, upper: ValueRef, diff --git a/src/optimizer/core/memo.rs b/src/optimizer/core/memo.rs index b6ba9715..6b7bebb0 100644 --- a/src/optimizer/core/memo.rs +++ b/src/optimizer/core/memo.rs @@ -118,7 +118,7 @@ mod tests { let transaction = database.storage.transaction()?; let c1_column_id = { transaction - .table(&database.table_cache, Arc::new("t1".to_string())) + .table(&database.table_cache, Arc::new("t1".to_string()))? .unwrap() .get_column_id_by_name("c1") .unwrap() @@ -128,6 +128,7 @@ mod tests { let mut binder = Binder::new( BinderContext::new( &database.table_cache, + &database.view_cache, &transaction, &scala_functions, &table_functions, diff --git a/src/optimizer/core/statistics_meta.rs b/src/optimizer/core/statistics_meta.rs index 72bf29e4..4686a590 100644 --- a/src/optimizer/core/statistics_meta.rs +++ b/src/optimizer/core/statistics_meta.rs @@ -3,10 +3,10 @@ use crate::errors::DatabaseError; use crate::expression::range_detacher::Range; use crate::optimizer::core::cm_sketch::CountMinSketch; use crate::optimizer::core::histogram::Histogram; +use crate::serdes::{ReferenceSerialization, ReferenceTables}; use crate::storage::{StatisticsMetaCache, Transaction}; use crate::types::index::IndexId; use crate::types::value::DataValue; -use serde::{Deserialize, Serialize}; use serde_macros::ReferenceSerialization; use std::fs::OpenOptions; use std::io::Write; @@ -35,17 +35,16 @@ impl<'a, T: Transaction> StatisticMetaLoader<'a, T> { return Ok(Some(statistics_meta)); } if let Some(path) = self.tx.table_meta_path(table_name.as_str(), index_id)? { - Ok(Some( - self.cache - .get_or_insert(key, |_| StatisticsMeta::from_file(path))?, - )) + Ok(Some(self.cache.get_or_insert(key, |_| { + StatisticsMeta::from_file::(path) + })?)) } else { Ok(None) } } } -#[derive(Debug, Serialize, Deserialize, ReferenceSerialization)] +#[derive(Debug, ReferenceSerialization)] pub struct StatisticsMeta { index_id: IndexId, histogram: Histogram, @@ -86,20 +85,20 @@ impl StatisticsMeta { .read(true) .truncate(false) .open(path)?; - bincode::serialize_into(&mut file, self)?; + self.encode(&mut file, true, &mut ReferenceTables::new())?; file.flush()?; Ok(()) } - pub fn from_file(path: impl AsRef) -> Result { - let file = OpenOptions::new() + pub fn from_file(path: impl AsRef) -> Result { + let mut file = OpenOptions::new() .create(true) .write(true) .read(true) .truncate(false) .open(path)?; - Ok(bincode::deserialize_from(file)?) + Self::decode::(&mut file, None, &ReferenceTables::new()) } } @@ -108,6 +107,7 @@ mod tests { use crate::errors::DatabaseError; use crate::optimizer::core::histogram::HistogramBuilder; use crate::optimizer::core::statistics_meta::StatisticsMeta; + use crate::storage::rocksdb::RocksTransaction; use crate::types::index::{IndexMeta, IndexType}; use crate::types::value::DataValue; use crate::types::LogicalType; @@ -154,7 +154,7 @@ mod tests { let path = temp_dir.path().join("meta"); StatisticsMeta::new(histogram.clone(), sketch.clone()).to_file(path.clone())?; - let statistics_meta = StatisticsMeta::from_file(path)?; + let statistics_meta = StatisticsMeta::from_file::(path)?; debug_assert_eq!(histogram, statistics_meta.histogram); debug_assert_eq!( diff --git a/src/optimizer/rule/normalization/column_pruning.rs b/src/optimizer/rule/normalization/column_pruning.rs index 1bd6e8eb..9af5a47d 100644 --- a/src/optimizer/rule/normalization/column_pruning.rs +++ b/src/optimizer/rule/normalization/column_pruning.rs @@ -150,6 +150,7 @@ impl ColumnPruning { // DDL Single Plan Operator::CreateTable(_) | Operator::CreateIndex(_) + | Operator::CreateView(_) | Operator::DropTable(_) | Operator::Truncate(_) | Operator::Show diff --git a/src/optimizer/rule/normalization/compilation_in_advance.rs b/src/optimizer/rule/normalization/compilation_in_advance.rs index db8f7e43..4f4ec4d2 100644 --- a/src/optimizer/rule/normalization/compilation_in_advance.rs +++ b/src/optimizer/rule/normalization/compilation_in_advance.rs @@ -104,6 +104,7 @@ impl ExpressionRemapper { | Operator::DropColumn(_) | Operator::CreateTable(_) | Operator::CreateIndex(_) + | Operator::CreateView(_) | Operator::DropTable(_) | Operator::Truncate(_) | Operator::CopyFromFile(_) @@ -205,6 +206,7 @@ impl EvaluatorBind { | Operator::DropColumn(_) | Operator::CreateTable(_) | Operator::CreateIndex(_) + | Operator::CreateView(_) | Operator::DropTable(_) | Operator::Truncate(_) | Operator::CopyFromFile(_) diff --git a/src/optimizer/rule/normalization/pushdown_limit.rs b/src/optimizer/rule/normalization/pushdown_limit.rs index 3d804160..9a7b3e6e 100644 --- a/src/optimizer/rule/normalization/pushdown_limit.rs +++ b/src/optimizer/rule/normalization/pushdown_limit.rs @@ -67,7 +67,7 @@ impl NormalizationRule for LimitProjectTranspose { /// Add extra limits below JOIN: /// 1. For LEFT OUTER and RIGHT OUTER JOIN, we push limits to the left and right sides, -/// respectively. +/// respectively. /// /// TODO: 2. For INNER and CROSS JOIN, we push limits to both the left and right sides /// TODO: if join condition is empty. diff --git a/src/optimizer/rule/normalization/simplification.rs b/src/optimizer/rule/normalization/simplification.rs index 1ccb81d6..d55c315f 100644 --- a/src/optimizer/rule/normalization/simplification.rs +++ b/src/optimizer/rule/normalization/simplification.rs @@ -250,38 +250,42 @@ mod test { ) .find_best::(None)?; if let Operator::Filter(filter_op) = best_plan.childrens[0].clone().operator { - let c1_col = ColumnCatalog { - summary: ColumnSummary { + let c1_col = ColumnCatalog::direct_new( + ColumnSummary { name: "c1".to_string(), relation: ColumnRelation::Table { column_id: *table_state.column_id_by_name("c1"), table_name: Arc::new("t1".to_string()), + is_temp: false, }, }, - nullable: false, - desc: ColumnDesc { + false, + ColumnDesc { column_datatype: LogicalType::Integer, is_primary: true, is_unique: false, default: None, }, - }; - let c2_col = ColumnCatalog { - summary: ColumnSummary { + false, + ); + let c2_col = ColumnCatalog::direct_new( + ColumnSummary { name: "c2".to_string(), relation: ColumnRelation::Table { column_id: *table_state.column_id_by_name("c2"), table_name: Arc::new("t1".to_string()), + is_temp: false, }, }, - nullable: false, - desc: ColumnDesc { + false, + ColumnDesc { column_datatype: LogicalType::Integer, is_primary: false, is_unique: true, default: None, }, - }; + false, + ); // -(c1 + 1) > c2 => c1 < -c2 - 1 debug_assert_eq!( diff --git a/src/planner/mod.rs b/src/planner/mod.rs index 00f5eac1..805f7229 100644 --- a/src/planner/mod.rs +++ b/src/planner/mod.rs @@ -5,11 +5,17 @@ use crate::planner::operator::join::JoinType; use crate::planner::operator::union::UnionOperator; use crate::planner::operator::values::ValuesOperator; use crate::planner::operator::{Operator, PhysicalOption}; -use crate::types::tuple::SchemaRef; +use crate::types::tuple::{Schema, SchemaRef}; use itertools::Itertools; use serde_macros::ReferenceSerialization; use std::sync::Arc; +#[derive(Debug, Clone)] +pub(crate) enum SchemaOutput { + Schema(Schema), + SchemaRef(SchemaRef), +} + #[derive(Debug, PartialEq, Eq, Clone, Hash, ReferenceSerialization)] pub struct LogicalPlan { pub(crate) operator: Operator, @@ -19,6 +25,15 @@ pub struct LogicalPlan { pub(crate) _output_schema_ref: Option, } +impl SchemaOutput { + pub(crate) fn columns(&self) -> impl Iterator { + match self { + SchemaOutput::Schema(schema) => schema.iter(), + SchemaOutput::SchemaRef(schema_ref) => schema_ref.iter(), + } + } +} + impl LogicalPlan { pub fn new(operator: Operator, childrens: Vec) -> Self { Self { @@ -48,105 +63,120 @@ impl LogicalPlan { tables } - pub fn output_schema(&mut self) -> &SchemaRef { - self._output_schema_ref - .get_or_insert_with(|| match &self.operator { - Operator::Filter(_) | Operator::Sort(_) | Operator::Limit(_) => { - self.childrens[0].output_schema().clone() - } - Operator::Aggregate(op) => { - let out_columns = op - .agg_calls - .iter() - .chain(op.groupby_exprs.iter()) - .map(|expr| expr.output_column()) - .collect_vec(); - Arc::new(out_columns) + pub(crate) fn _output_schema_direct( + operator: &Operator, + childrens: &[LogicalPlan], + ) -> SchemaOutput { + match operator { + Operator::Filter(_) | Operator::Sort(_) | Operator::Limit(_) => { + childrens[0].output_schema_direct() + } + Operator::Aggregate(op) => SchemaOutput::Schema( + op.agg_calls + .iter() + .chain(op.groupby_exprs.iter()) + .map(|expr| expr.output_column()) + .collect_vec(), + ), + Operator::Join(op) => { + if matches!(op.join_type, JoinType::LeftSemi | JoinType::LeftAnti) { + return childrens[0].output_schema_direct(); } - Operator::Join(op) => { - if matches!(op.join_type, JoinType::LeftSemi | JoinType::LeftAnti) { - return self.childrens[0].output_schema().clone(); + let mut columns = Vec::new(); + + for plan in childrens.iter() { + for column in plan.output_schema_direct().columns() { + columns.push(column.clone()); } - let out_columns = self - .childrens - .iter_mut() - .flat_map(|children| Vec::clone(children.output_schema())) - .collect_vec(); - Arc::new(out_columns) - } - Operator::Project(op) => { - let out_columns = op - .exprs - .iter() - .map(|expr| expr.output_column()) - .collect_vec(); - Arc::new(out_columns) } - Operator::TableScan(op) => { - let out_columns = op - .columns - .iter() - .map(|(_, column)| column.clone()) - .collect_vec(); - Arc::new(out_columns) - } - // FIXME: redundant clone - Operator::FunctionScan(op) => op.table_function.output_schema().clone(), - Operator::Values(ValuesOperator { schema_ref, .. }) - | Operator::Union(UnionOperator { - left_schema_ref: schema_ref, - .. - }) => schema_ref.clone(), - Operator::Dummy => Arc::new(vec![]), - Operator::Show => Arc::new(vec![ColumnRef::from(ColumnCatalog::new_dummy( - "TABLE".to_string(), - ))]), - Operator::Explain => Arc::new(vec![ColumnRef::from(ColumnCatalog::new_dummy( - "PLAN".to_string(), - ))]), - Operator::Describe(_) => Arc::new(vec![ - ColumnRef::from(ColumnCatalog::new_dummy("FIELD".to_string())), - ColumnRef::from(ColumnCatalog::new_dummy("TYPE".to_string())), - ColumnRef::from(ColumnCatalog::new_dummy("LEN".to_string())), - ColumnRef::from(ColumnCatalog::new_dummy("NULL".to_string())), - ColumnRef::from(ColumnCatalog::new_dummy("Key".to_string())), - ColumnRef::from(ColumnCatalog::new_dummy("DEFAULT".to_string())), - ]), - Operator::Insert(_) => Arc::new(vec![ColumnRef::from(ColumnCatalog::new_dummy( - "INSERTED".to_string(), - ))]), - Operator::Update(_) => Arc::new(vec![ColumnRef::from(ColumnCatalog::new_dummy( - "UPDATED".to_string(), - ))]), - Operator::Delete(_) => Arc::new(vec![ColumnRef::from(ColumnCatalog::new_dummy( - "DELETED".to_string(), - ))]), - Operator::Analyze(_) => Arc::new(vec![ColumnRef::from(ColumnCatalog::new_dummy( - "STATISTICS_META_PATH".to_string(), - ))]), - Operator::AddColumn(_) => Arc::new(vec![ColumnRef::from( - ColumnCatalog::new_dummy("ADD COLUMN SUCCESS".to_string()), - )]), - Operator::DropColumn(_) => Arc::new(vec![ColumnRef::from( - ColumnCatalog::new_dummy("DROP COLUMN SUCCESS".to_string()), - )]), - Operator::CreateTable(_) => Arc::new(vec![ColumnRef::from( - ColumnCatalog::new_dummy("CREATE TABLE SUCCESS".to_string()), - )]), - Operator::CreateIndex(_) => Arc::new(vec![ColumnRef::from( - ColumnCatalog::new_dummy("CREATE INDEX SUCCESS".to_string()), - )]), - Operator::DropTable(_) => Arc::new(vec![ColumnRef::from( - ColumnCatalog::new_dummy("DROP TABLE SUCCESS".to_string()), - )]), - Operator::Truncate(_) => Arc::new(vec![ColumnRef::from(ColumnCatalog::new_dummy( - "TRUNCATE TABLE SUCCESS".to_string(), - ))]), - Operator::CopyFromFile(_) => Arc::new(vec![ColumnRef::from( - ColumnCatalog::new_dummy("COPY FROM SOURCE".to_string()), - )]), - Operator::CopyToFile(_) => todo!(), - }) + SchemaOutput::Schema(columns) + } + Operator::Project(op) => SchemaOutput::Schema( + op.exprs + .iter() + .map(|expr| expr.output_column()) + .collect_vec(), + ), + Operator::TableScan(op) => SchemaOutput::Schema( + op.columns + .iter() + .map(|(_, column)| column.clone()) + .collect_vec(), + ), + Operator::FunctionScan(op) => { + SchemaOutput::SchemaRef(op.table_function.output_schema().clone()) + } + Operator::Values(ValuesOperator { schema_ref, .. }) + | Operator::Union(UnionOperator { + left_schema_ref: schema_ref, + .. + }) => SchemaOutput::SchemaRef(schema_ref.clone()), + Operator::Dummy => SchemaOutput::Schema(vec![]), + Operator::Show => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("TABLE".to_string()), + )]), + Operator::Explain => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("PLAN".to_string()), + )]), + Operator::Describe(_) => SchemaOutput::Schema(vec![ + ColumnRef::from(ColumnCatalog::new_dummy("FIELD".to_string())), + ColumnRef::from(ColumnCatalog::new_dummy("TYPE".to_string())), + ColumnRef::from(ColumnCatalog::new_dummy("LEN".to_string())), + ColumnRef::from(ColumnCatalog::new_dummy("NULL".to_string())), + ColumnRef::from(ColumnCatalog::new_dummy("Key".to_string())), + ColumnRef::from(ColumnCatalog::new_dummy("DEFAULT".to_string())), + ]), + Operator::Insert(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("INSERTED".to_string()), + )]), + Operator::Update(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("UPDATED".to_string()), + )]), + Operator::Delete(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("DELETED".to_string()), + )]), + Operator::Analyze(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("STATISTICS_META_PATH".to_string()), + )]), + Operator::AddColumn(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("ADD COLUMN SUCCESS".to_string()), + )]), + Operator::DropColumn(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("DROP COLUMN SUCCESS".to_string()), + )]), + Operator::CreateTable(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("CREATE TABLE SUCCESS".to_string()), + )]), + Operator::CreateIndex(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("CREATE INDEX SUCCESS".to_string()), + )]), + Operator::CreateView(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("CREATE VIEW SUCCESS".to_string()), + )]), + Operator::DropTable(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("DROP TABLE SUCCESS".to_string()), + )]), + Operator::Truncate(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("TRUNCATE TABLE SUCCESS".to_string()), + )]), + Operator::CopyFromFile(_) => SchemaOutput::Schema(vec![ColumnRef::from( + ColumnCatalog::new_dummy("COPY FROM SOURCE".to_string()), + )]), + Operator::CopyToFile(_) => todo!(), + } + } + + pub(crate) fn output_schema_direct(&self) -> SchemaOutput { + Self::_output_schema_direct(&self.operator, &self.childrens) + } + + pub fn output_schema(&mut self) -> &SchemaRef { + self._output_schema_ref.get_or_insert_with(|| { + match Self::_output_schema_direct(&self.operator, &self.childrens) { + SchemaOutput::Schema(schema) => Arc::new(schema), + SchemaOutput::SchemaRef(schema_ref) => schema_ref.clone(), + } + }) } pub fn explain(&self, indentation: usize) -> String { diff --git a/src/planner/operator/create_view.rs b/src/planner/operator/create_view.rs new file mode 100644 index 00000000..3623b3db --- /dev/null +++ b/src/planner/operator/create_view.rs @@ -0,0 +1,22 @@ +use crate::catalog::view::View; +use serde_macros::ReferenceSerialization; +use std::fmt; +use std::fmt::Formatter; + +#[derive(Debug, PartialEq, Eq, Clone, Hash, ReferenceSerialization)] +pub struct CreateViewOperator { + pub view: View, + pub or_replace: bool, +} + +impl fmt::Display for CreateViewOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!( + f, + "Create View as {}, Or Replace: {}", + self.view, self.or_replace + )?; + + Ok(()) + } +} diff --git a/src/planner/operator/drop_table.rs b/src/planner/operator/drop_table.rs index e3c5b212..6a8e0519 100644 --- a/src/planner/operator/drop_table.rs +++ b/src/planner/operator/drop_table.rs @@ -12,7 +12,11 @@ pub struct DropTableOperator { impl fmt::Display for DropTableOperator { fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "Drop {}, If Exists: {}", self.table_name, self.if_exists)?; + write!( + f, + "Drop Table {}, If Exists: {}", + self.table_name, self.if_exists + )?; Ok(()) } diff --git a/src/planner/operator/mod.rs b/src/planner/operator/mod.rs index 7913299d..095d69f6 100644 --- a/src/planner/operator/mod.rs +++ b/src/planner/operator/mod.rs @@ -5,6 +5,7 @@ pub mod copy_from_file; pub mod copy_to_file; pub mod create_index; pub mod create_table; +pub mod create_view; pub mod delete; pub mod describe; pub mod drop_table; @@ -34,6 +35,7 @@ use crate::planner::operator::copy_from_file::CopyFromFileOperator; use crate::planner::operator::copy_to_file::CopyToFileOperator; use crate::planner::operator::create_index::CreateIndexOperator; use crate::planner::operator::create_table::CreateTableOperator; +use crate::planner::operator::create_view::CreateViewOperator; use crate::planner::operator::delete::DeleteOperator; use crate::planner::operator::describe::DescribeOperator; use crate::planner::operator::drop_table::DropTableOperator; @@ -77,6 +79,7 @@ pub enum Operator { DropColumn(DropColumnOperator), CreateTable(CreateTableOperator), CreateIndex(CreateIndexOperator), + CreateView(CreateViewOperator), DropTable(DropTableOperator), Truncate(TruncateOperator), // Copy @@ -164,6 +167,7 @@ impl Operator { | Operator::DropColumn(_) | Operator::CreateTable(_) | Operator::CreateIndex(_) + | Operator::CreateView(_) | Operator::DropTable(_) | Operator::Truncate(_) | Operator::CopyFromFile(_) @@ -240,6 +244,7 @@ impl Operator { | Operator::DropColumn(_) | Operator::CreateTable(_) | Operator::CreateIndex(_) + | Operator::CreateView(_) | Operator::DropTable(_) | Operator::Truncate(_) | Operator::CopyFromFile(_) @@ -272,6 +277,7 @@ impl fmt::Display for Operator { Operator::DropColumn(op) => write!(f, "{}", op), Operator::CreateTable(op) => write!(f, "{}", op), Operator::CreateIndex(op) => write!(f, "{}", op), + Operator::CreateView(op) => write!(f, "{}", op), Operator::DropTable(op) => write!(f, "{}", op), Operator::Truncate(op) => write!(f, "{}", op), Operator::CopyFromFile(op) => write!(f, "{}", op), diff --git a/src/planner/operator/table_scan.rs b/src/planner/operator/table_scan.rs index 6f3934bf..67cd2772 100644 --- a/src/planner/operator/table_scan.rs +++ b/src/planner/operator/table_scan.rs @@ -30,7 +30,7 @@ impl TableScanOperator { .columns() .enumerate() .map(|(i, column)| { - if column.desc.is_primary { + if column.desc().is_primary { primary_key_option = column.id(); } diff --git a/src/serdes/column.rs b/src/serdes/column.rs index bcd51273..8c85779d 100644 --- a/src/serdes/column.rs +++ b/src/serdes/column.rs @@ -13,10 +13,20 @@ impl ReferenceSerialization for ColumnRef { is_direct: bool, reference_tables: &mut ReferenceTables, ) -> Result<(), DatabaseError> { - self.summary.encode(writer, is_direct, reference_tables)?; - if is_direct || matches!(self.summary.relation, ColumnRelation::None) { - self.nullable.encode(writer, is_direct, reference_tables)?; - self.desc.encode(writer, is_direct, reference_tables)?; + self.summary().encode(writer, is_direct, reference_tables)?; + self.in_join() + .then(|| self.nullable()) + .encode(writer, is_direct, reference_tables)?; + + if is_direct + || !matches!( + self.summary().relation, + ColumnRelation::Table { is_temp: false, .. } + ) + { + self.nullable() + .encode(writer, is_direct, reference_tables)?; + self.desc().encode(writer, is_direct, reference_tables)?; } Ok(()) @@ -28,17 +38,19 @@ impl ReferenceSerialization for ColumnRef { reference_tables: &ReferenceTables, ) -> Result { let summary = ColumnSummary::decode(reader, drive, reference_tables)?; + let nullable_for_join = Option::::decode(reader, drive, reference_tables)?; if let ( ColumnRelation::Table { column_id, table_name, + is_temp: false, }, Some((transaction, table_cache)), ) = (&summary.relation, drive) { let table = transaction - .table(table_cache, table_name.clone()) + .table(table_cache, table_name.clone())? .ok_or(DatabaseError::TableNotFound)?; let column = table .get_column_by_id(column_id) @@ -46,16 +58,21 @@ impl ReferenceSerialization for ColumnRef { "column id: {} not found", column_id )))?; - Ok(column.clone()) + Ok(nullable_for_join + .and_then(|nullable| column.nullable_for_join(nullable)) + .unwrap_or_else(|| column.clone())) } else { - let nullable = bool::decode(reader, drive, reference_tables)?; + let mut nullable = bool::decode(reader, drive, reference_tables)?; let desc = ColumnDesc::decode(reader, drive, reference_tables)?; + let mut in_join = false; + if let Some(nullable_for_join) = nullable_for_join { + in_join = true; + nullable = nullable_for_join; + } - Ok(Self(Arc::new(ColumnCatalog { - summary, - nullable, - desc, - }))) + Ok(Self(Arc::new(ColumnCatalog::direct_new( + summary, nullable, desc, in_join, + )))) } } } @@ -74,10 +91,11 @@ impl ReferenceSerialization for ColumnRelation { ColumnRelation::Table { column_id, table_name, + is_temp, } => { writer.write_all(&[1])?; - column_id.encode(writer, is_direct, reference_tables)?; + is_temp.encode(writer, is_direct, reference_tables)?; reference_tables.push_or_replace(table_name).encode( writer, @@ -102,6 +120,7 @@ impl ReferenceSerialization for ColumnRelation { 0 => ColumnRelation::None, 1 => { let column_id = ColumnId::decode(reader, drive, reference_tables)?; + let is_temp = bool::decode(reader, drive, reference_tables)?; let table_name = reference_tables .get(::decode( reader, @@ -113,6 +132,7 @@ impl ReferenceSerialization for ColumnRelation { ColumnRelation::Table { column_id, table_name, + is_temp, } } _ => unreachable!(), @@ -154,28 +174,30 @@ pub(crate) mod test { let mut reference_tables = ReferenceTables::new(); let c3_column_id = { let table = transaction - .table(&table_cache, Arc::new("t1".to_string())) + .table(&table_cache, Arc::new("t1".to_string()))? .unwrap(); *table.get_column_id_by_name("c3").unwrap() }; { - let ref_column = ColumnRef(Arc::new(ColumnCatalog { - summary: ColumnSummary { + let ref_column = ColumnRef(Arc::new(ColumnCatalog::direct_new( + ColumnSummary { name: "c3".to_string(), relation: ColumnRelation::Table { column_id: c3_column_id, table_name: table_name.clone(), + is_temp: false, }, }, - nullable: false, - desc: ColumnDesc { + false, + ColumnDesc { column_datatype: LogicalType::Integer, is_primary: false, is_unique: false, default: None, }, - })); + false, + ))); ref_column.encode(&mut cursor, false, &mut reference_tables)?; cursor.seek(SeekFrom::Start(0))?; @@ -200,13 +222,13 @@ pub(crate) mod test { cursor.seek(SeekFrom::Start(0))?; } { - let not_ref_column = ColumnRef(Arc::new(ColumnCatalog { - summary: ColumnSummary { + let not_ref_column = ColumnRef(Arc::new(ColumnCatalog::direct_new( + ColumnSummary { name: "c3".to_string(), relation: ColumnRelation::None, }, - nullable: false, - desc: ColumnDesc { + false, + ColumnDesc { column_datatype: LogicalType::Integer, is_primary: false, is_unique: false, @@ -214,7 +236,8 @@ pub(crate) mod test { Some(42), )))), }, - })); + false, + ))); not_ref_column.encode(&mut cursor, false, &mut reference_tables)?; cursor.seek(SeekFrom::Start(0))?; @@ -240,6 +263,7 @@ pub(crate) mod test { relation: ColumnRelation::Table { column_id: Ulid::new(), table_name: Arc::new("t1".to_string()), + is_temp: false, }, }; summary.encode(&mut cursor, false, &mut reference_tables)?; @@ -275,6 +299,7 @@ pub(crate) mod test { let table_relation = ColumnRelation::Table { column_id: Ulid::new(), table_name: Arc::new("t1".to_string()), + is_temp: false, }; table_relation.encode(&mut cursor, false, &mut reference_tables)?; cursor.seek(SeekFrom::Start(0))?; diff --git a/src/serdes/data_value.rs b/src/serdes/data_value.rs index 01cc18a2..8bf606d2 100644 --- a/src/serdes/data_value.rs +++ b/src/serdes/data_value.rs @@ -1,7 +1,7 @@ use crate::errors::DatabaseError; use crate::serdes::{ReferenceSerialization, ReferenceTables}; use crate::storage::{TableCache, Transaction}; -use crate::types::value::DataValue; +use crate::types::value::{DataValue, ValueRef}; use crate::types::LogicalType; use std::io::{Read, Write}; @@ -20,6 +20,10 @@ impl ReferenceSerialization for DataValue { if self.is_null() { return Ok(()); } + if let DataValue::Tuple(values) = self { + values.encode(writer, is_direct, reference_tables)?; + return Ok(()); + } if logical_type.raw_len().is_none() { let mut bytes = Vec::new(); self.to_raw(&mut bytes)? @@ -40,16 +44,89 @@ impl ReferenceSerialization for DataValue { let logical_type = LogicalType::decode(reader, drive, reference_tables)?; if bool::decode(reader, drive, reference_tables)? { - Ok(DataValue::none(&logical_type)) - } else { - let value_len = match logical_type.raw_len() { - None => usize::decode(reader, drive, reference_tables)?, - Some(len) => len, - }; - let mut buf = vec![0u8; value_len]; - reader.read_exact(&mut buf)?; - - Ok(DataValue::from_raw(&buf, &logical_type)) + return Ok(DataValue::none(&logical_type)); + } + if matches!(logical_type, LogicalType::Tuple) { + return Ok(DataValue::Tuple(Option::>::decode( + reader, + drive, + reference_tables, + )?)); } + let value_len = match logical_type.raw_len() { + None => usize::decode(reader, drive, reference_tables)?, + Some(len) => len, + }; + let mut buf = vec![0u8; value_len]; + reader.read_exact(&mut buf)?; + + Ok(DataValue::from_raw(&buf, &logical_type)) + } +} + +#[cfg(test)] +pub(crate) mod test { + use crate::errors::DatabaseError; + use crate::serdes::{ReferenceSerialization, ReferenceTables}; + use crate::storage::rocksdb::RocksTransaction; + use crate::types::value::{DataValue, Utf8Type}; + use sqlparser::ast::CharLengthUnits; + use std::io::{Cursor, Seek, SeekFrom}; + use std::sync::Arc; + + #[test] + fn test_serialization() -> Result<(), DatabaseError> { + let source_0 = DataValue::Int32(None); + let source_1 = DataValue::Int32(Some(32)); + let source_2 = DataValue::Utf8 { + value: None, + ty: Utf8Type::Variable(None), + unit: CharLengthUnits::Characters, + }; + let source_3 = DataValue::Utf8 { + value: Some("hello".to_string()), + ty: Utf8Type::Variable(None), + unit: CharLengthUnits::Characters, + }; + let source_4 = DataValue::Tuple(None); + let source_5 = DataValue::Tuple(Some(vec![ + Arc::new(DataValue::Int32(None)), + Arc::new(DataValue::Int32(Some(42))), + ])); + + let mut reference_tables = ReferenceTables::new(); + let mut bytes = Vec::new(); + let mut cursor = Cursor::new(&mut bytes); + + source_0.encode(&mut cursor, false, &mut reference_tables)?; + source_1.encode(&mut cursor, false, &mut reference_tables)?; + source_2.encode(&mut cursor, false, &mut reference_tables)?; + source_3.encode(&mut cursor, false, &mut reference_tables)?; + source_4.encode(&mut cursor, false, &mut reference_tables)?; + source_5.encode(&mut cursor, false, &mut reference_tables)?; + + cursor.seek(SeekFrom::Start(0))?; + + let decoded_0 = + DataValue::decode::(&mut cursor, None, &reference_tables).unwrap(); + let decoded_1 = + DataValue::decode::(&mut cursor, None, &reference_tables).unwrap(); + let decoded_2 = + DataValue::decode::(&mut cursor, None, &reference_tables).unwrap(); + let decoded_3 = + DataValue::decode::(&mut cursor, None, &reference_tables).unwrap(); + let decoded_4 = + DataValue::decode::(&mut cursor, None, &reference_tables).unwrap(); + let decoded_5 = + DataValue::decode::(&mut cursor, None, &reference_tables).unwrap(); + + assert_eq!(source_0, decoded_0); + assert_eq!(source_1, decoded_1); + assert_eq!(source_2, decoded_2); + assert_eq!(source_3, decoded_3); + assert_eq!(source_4, decoded_4); + assert_eq!(source_5, decoded_5); + + Ok(()) } } diff --git a/src/serdes/mod.rs b/src/serdes/mod.rs index 3cf06e82..fa836161 100644 --- a/src/serdes/mod.rs +++ b/src/serdes/mod.rs @@ -22,7 +22,9 @@ mod vec; use crate::catalog::TableName; use crate::errors::DatabaseError; use crate::storage::{TableCache, Transaction}; +use std::io; use std::io::{Read, Write}; +use std::sync::Arc; #[macro_export] macro_rules! implement_serialization_by_bincode { @@ -82,7 +84,7 @@ pub trait ReferenceSerialization: Sized { ) -> Result; } -#[derive(Default)] +#[derive(Debug, Default, Eq, PartialEq)] pub struct ReferenceTables { tables: Vec, } @@ -113,4 +115,55 @@ impl ReferenceTables { self.tables.push(table_name.clone()); self.tables.len() - 1 } + + pub fn to_raw(&self, mut writer: W) -> io::Result<()> { + writer.write_all(&(self.tables.len() as u32).to_le_bytes())?; + for table_name in self.tables.iter() { + writer.write_all(&(table_name.len() as u32).to_le_bytes())?; + writer.write_all(table_name.as_bytes())? + } + + Ok(()) + } + + pub fn from_raw(reader: &mut R) -> io::Result { + let mut bytes = [0u8; 4]; + reader.read_exact(&mut bytes)?; + let tables_len = u32::from_le_bytes(bytes) as usize; + let mut tables = Vec::with_capacity(tables_len); + + for _ in 0..tables_len { + let mut bytes = [0u8; 4]; + reader.read_exact(&mut bytes)?; + let len = u32::from_le_bytes(bytes) as usize; + let mut bytes = vec![0u8; len]; + reader.read_exact(&mut bytes)?; + tables.push(Arc::new(String::from_utf8(bytes).unwrap())); + } + + Ok(ReferenceTables { tables }) + } +} + +#[cfg(test)] +mod tests { + use crate::serdes::ReferenceTables; + use std::io; + use std::io::{Seek, SeekFrom}; + use std::sync::Arc; + + #[test] + fn test_to_raw() -> io::Result<()> { + let reference_tables = ReferenceTables { + tables: vec![Arc::new("t1".to_string()), Arc::new("t2".to_string())], + }; + + let mut cursor = io::Cursor::new(Vec::new()); + reference_tables.to_raw(&mut cursor)?; + + cursor.seek(SeekFrom::Start(0))?; + assert_eq!(reference_tables, ReferenceTables::from_raw(&mut cursor)?); + + Ok(()) + } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 60f43519..f271e6fe 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -1,6 +1,7 @@ pub mod rocksdb; mod table_codec; +use crate::catalog::view::View; use crate::catalog::{ColumnCatalog, ColumnRef, TableCatalog, TableMeta, TableName}; use crate::errors::DatabaseError; use crate::expression::range_detacher::Range; @@ -22,7 +23,8 @@ use std::{mem, slice}; use ulid::Generator; pub(crate) type StatisticsMetaCache = ShardingLruCache<(TableName, IndexId), StatisticsMeta>; -pub(crate) type TableCache = ShardingLruCache; +pub(crate) type TableCache = ShardingLruCache; +pub(crate) type ViewCache = ShardingLruCache; pub trait Storage: Clone { type TransactionType<'a>: Transaction @@ -54,7 +56,7 @@ pub trait Transaction: Sized { debug_assert!(columns.iter().map(|(i, _)| i).all_unique()); let table = self - .table(table_cache, table_name.clone()) + .table(table_cache, table_name.clone())? .ok_or(DatabaseError::TableNotFound)?; let table_types = table.types(); if columns.is_empty() { @@ -94,7 +96,7 @@ pub trait Transaction: Sized { debug_assert!(columns.iter().map(|(i, _)| i).all_unique()); let table = self - .table(table_cache, table_name.clone()) + .table(table_cache, table_name.clone())? .ok_or(DatabaseError::TableNotFound)?; let table_types = table.types(); let table_name = table.name.as_str(); @@ -133,7 +135,7 @@ pub trait Transaction: Sized { column_ids: Vec, ty: IndexType, ) -> Result { - if let Some(mut table) = self.table(table_cache, table_name.clone()).cloned() { + if let Some(mut table) = self.table(table_cache, table_name.clone())?.cloned() { let index_meta = table.add_index_meta(index_name, column_ids, ty)?; let (key, value) = TableCodec::encode_index_meta(table_name, index_meta)?; self.set(key, value)?; @@ -215,8 +217,8 @@ pub trait Transaction: Sized { column: &ColumnCatalog, if_not_exists: bool, ) -> Result { - if let Some(mut table) = self.table(table_cache, table_name.clone()).cloned() { - if !column.nullable && column.default_value()?.is_none() { + if let Some(mut table) = self.table(table_cache, table_name.clone())?.cloned() { + if !column.nullable() && column.default_value()?.is_none() { return Err(DatabaseError::NeedNullAbleOrDefault); } @@ -232,7 +234,7 @@ pub trait Transaction: Sized { let mut generator = Generator::new(); let col_id = table.add_column(column.clone(), &mut generator)?; - if column.desc.is_unique { + if column.desc().is_unique { let meta_ref = table.add_index_meta( format!("uk_{}", column.name()), vec![col_id], @@ -260,7 +262,7 @@ pub trait Transaction: Sized { table_name: &TableName, column_name: &str, ) -> Result<(), DatabaseError> { - if let Some(table_catalog) = self.table(table_cache, table_name.clone()).cloned() { + if let Some(table_catalog) = self.table(table_cache, table_name.clone())?.cloned() { let column = table_catalog.get_column_by_name(column_name).unwrap(); let (key, _) = TableCodec::encode_column(column, &mut ReferenceTables::new())?; @@ -273,7 +275,7 @@ pub trait Transaction: Sized { let (index_meta_key, _) = TableCodec::encode_index_meta(table_name, index_meta)?; self.remove(&index_meta_key)?; - let (index_min, index_max) = TableCodec::index_bound(table_name, &index_meta.id); + let (index_min, index_max) = TableCodec::index_bound(table_name, &index_meta.id)?; self._drop_data(&index_min, &index_max)?; self.remove_table_meta(meta_cache, table_name, index_meta.id)?; @@ -286,6 +288,23 @@ pub trait Transaction: Sized { } } + fn create_view( + &mut self, + view_cache: &ViewCache, + view: View, + or_replace: bool, + ) -> Result<(), DatabaseError> { + let (view_key, value) = TableCodec::encode_view(&view)?; + + if !or_replace && self.get(&view_key)?.is_some() { + return Err(DatabaseError::ViewExists); + } + self.set(view_key, value)?; + let _ = view_cache.put(view.name.clone(), view); + + Ok(()) + } + fn create_table( &mut self, table_cache: &TableCache, @@ -315,7 +334,7 @@ pub trait Transaction: Sized { self.set(key, value)?; } debug_assert_eq!(reference_tables.len(), 1); - table_cache.put(table_name.to_string(), table_catalog); + table_cache.put(table_name.clone(), table_catalog); Ok(table_name) } @@ -326,7 +345,7 @@ pub trait Transaction: Sized { table_name: TableName, if_exists: bool, ) -> Result<(), DatabaseError> { - if self.table(table_cache, table_name.clone()).is_none() { + if self.table(table_cache, table_name.clone())?.is_none() { if if_exists { return Ok(()); } else { @@ -360,19 +379,40 @@ pub trait Transaction: Sized { Ok(()) } + fn view<'a>( + &'a self, + view_cache: &'a ViewCache, + view_name: TableName, + drive: (&Self, &TableCache), + ) -> Result, DatabaseError> { + if let Some(view) = view_cache.get(&view_name) { + return Ok(Some(view)); + } + let Some(bytes) = self.get(&TableCodec::encode_view_key(&view_name))? else { + return Ok(None); + }; + Ok(Some(view_cache.get_or_insert(view_name.clone(), |_| { + TableCodec::decode_view(&bytes, drive) + })?)) + } + fn table<'a>( &'a self, table_cache: &'a TableCache, table_name: TableName, - ) -> Option<&TableCatalog> { - table_cache - .get_or_insert(table_name.to_string(), |_| { - // `TableCache` is not theoretically used in `table_collect` because ColumnCatalog should not depend on other Column - let (columns, indexes) = self.table_collect(table_name.clone())?; + ) -> Result, DatabaseError> { + if let Some(table) = table_cache.get(&table_name) { + return Ok(Some(table)); + } - TableCatalog::reload(table_name.clone(), columns, indexes) + // `TableCache` is not theoretically used in `table_collect` because ColumnCatalog should not depend on other Column + self.table_collect(&table_name)? + .map(|(columns, indexes)| { + table_cache.get_or_insert(table_name.clone(), |_| { + TableCatalog::reload(table_name, columns, indexes) + }) }) - .ok() + .transpose() } fn table_metas(&self) -> Result, DatabaseError> { @@ -381,7 +421,7 @@ pub trait Transaction: Sized { let mut iter = self.range(Bound::Included(&min), Bound::Included(&max))?; while let Some((_, value)) = iter.try_next().ok().flatten() { - let meta = TableCodec::decode_root_table(&value)?; + let meta = TableCodec::decode_root_table::(&value)?; metas.push(meta); } @@ -430,25 +470,29 @@ pub trait Transaction: Sized { Ok(()) } - fn meta_loader<'a>(&'a self, meta_cache: &'a StatisticsMetaCache) -> StatisticMetaLoader + fn meta_loader<'a>( + &'a self, + meta_cache: &'a StatisticsMetaCache, + ) -> StatisticMetaLoader<'a, Self> where Self: Sized, { StatisticMetaLoader::new(self, meta_cache) } + #[allow(clippy::type_complexity)] fn table_collect( &self, - table_name: TableName, - ) -> Result<(Vec, Vec), DatabaseError> { - let (table_min, table_max) = TableCodec::table_bound(&table_name); + table_name: &TableName, + ) -> Result, Vec)>, DatabaseError> { + let (table_min, table_max) = TableCodec::table_bound(table_name); let mut column_iter = self.range(Bound::Included(&table_min), Bound::Included(&table_max))?; let mut columns = Vec::new(); let mut index_metas = Vec::new(); let mut reference_tables = ReferenceTables::new(); - let _ = reference_tables.push_or_replace(&table_name); + let _ = reference_tables.push_or_replace(table_name); // Tips: only `Column`, `IndexMeta`, `TableMeta` while let Some((key, value)) = column_iter.try_next().ok().flatten() { @@ -459,11 +503,11 @@ pub trait Transaction: Sized { &reference_tables, )?); } else { - index_metas.push(Arc::new(TableCodec::decode_index_meta(&value)?)); + index_metas.push(Arc::new(TableCodec::decode_index_meta::(&value)?)); } } - Ok((columns, index_metas)) + Ok((!columns.is_empty()).then_some((columns, index_metas))) } fn _drop_data(&mut self, min: &[u8], max: &[u8]) -> Result<(), DatabaseError> { @@ -489,15 +533,15 @@ pub trait Transaction: Sized { let table_name = table.name.clone(); let index_column = table .columns() - .filter(|column| column.desc.is_primary || column.desc.is_unique) + .filter(|column| column.desc().is_primary || column.desc().is_unique) .map(|column| (column.id().unwrap(), column.clone())) .collect_vec(); for (col_id, col) in index_column { - let is_primary = col.desc.is_primary; + let is_primary = col.desc().is_primary; let index_ty = if is_primary { IndexType::PrimaryKey - } else if col.desc.is_unique { + } else if col.desc().is_unique { IndexType::Unique } else { continue; @@ -947,7 +991,7 @@ impl Iter for IndexIter<'_, T> { let (bound_min, bound_max) = if matches!(index_meta.ty, IndexType::PrimaryKey) { TableCodec::tuple_bound(table_name) } else { - TableCodec::index_bound(table_name, &index_meta.id) + TableCodec::index_bound(table_name, &index_meta.id)? }; let check_bound = |value: &mut Bound>, bound: Vec| { if matches!(value, Bound::Unbounded) { @@ -994,9 +1038,9 @@ pub trait Iter { #[cfg(test)] mod test { - use crate::catalog::{ - ColumnCatalog, ColumnDesc, ColumnRef, ColumnRelation, ColumnSummary, TableCatalog, - }; + use crate::binder::test::build_t1_table; + use crate::catalog::view::View; + use crate::catalog::{ColumnCatalog, ColumnDesc, ColumnRef, ColumnRelation, ColumnSummary}; use crate::db::test::build_table; use crate::errors::DatabaseError; use crate::expression::range_detacher::Range; @@ -1086,7 +1130,7 @@ mod test { table_cache: &TableCache| -> Result<(), DatabaseError> { let table = transaction - .table(&table_cache, Arc::new("t1".to_string())) + .table(&table_cache, Arc::new("t1".to_string()))? .unwrap(); let c1_column_id = *table.get_column_id_by_name("c1").unwrap(); let c2_column_id = *table.get_column_id_by_name("c2").unwrap(); @@ -1108,54 +1152,57 @@ mod test { let mut column_iter = table.columns(); let c1_column = column_iter.next().unwrap(); - assert_eq!(c1_column.nullable, false); + assert_eq!(c1_column.nullable(), false); assert_eq!( c1_column.summary(), &ColumnSummary { name: "c1".to_string(), relation: ColumnRelation::Table { column_id: c1_column_id, - table_name: Arc::new("t1".to_string()) + table_name: Arc::new("t1".to_string()), + is_temp: false, }, } ); assert_eq!( - c1_column.desc, - ColumnDesc::new(LogicalType::Integer, true, false, None)? + c1_column.desc(), + &ColumnDesc::new(LogicalType::Integer, true, false, None)? ); let c2_column = column_iter.next().unwrap(); - assert_eq!(c2_column.nullable, false); + assert_eq!(c2_column.nullable(), false); assert_eq!( c2_column.summary(), &ColumnSummary { name: "c2".to_string(), relation: ColumnRelation::Table { column_id: c2_column_id, - table_name: Arc::new("t1".to_string()) + table_name: Arc::new("t1".to_string()), + is_temp: false, }, } ); assert_eq!( - c2_column.desc, - ColumnDesc::new(LogicalType::Boolean, false, false, None)? + c2_column.desc(), + &ColumnDesc::new(LogicalType::Boolean, false, false, None)? ); let c3_column = column_iter.next().unwrap(); - assert_eq!(c3_column.nullable, false); + assert_eq!(c3_column.nullable(), false); assert_eq!( c3_column.summary(), &ColumnSummary { name: "c3".to_string(), relation: ColumnRelation::Table { column_id: c3_column_id, - table_name: Arc::new("t1".to_string()) + table_name: Arc::new("t1".to_string()), + is_temp: false, }, } ); assert_eq!( - c3_column.desc, - ColumnDesc::new(LogicalType::Integer, false, false, None)? + c3_column.desc(), + &ColumnDesc::new(LogicalType::Integer, false, false, None)? ); Ok(()) @@ -1250,7 +1297,7 @@ mod test { build_table(&table_cache, &mut transaction)?; let (c2_column_id, c3_column_id) = { let t1_table = transaction - .table(&table_cache, Arc::new("t1".to_string())) + .table(&table_cache, Arc::new("t1".to_string()))? .unwrap(); ( @@ -1278,7 +1325,7 @@ mod test { table_cache: &TableCache| -> Result<(), DatabaseError> { let table = transaction - .table(&table_cache, Arc::new("t1".to_string())) + .table(&table_cache, Arc::new("t1".to_string()))? .unwrap(); let i1_meta = table.indexes[1].clone(); @@ -1324,7 +1371,7 @@ mod test { fn test_index_insert_delete() -> Result<(), DatabaseError> { fn build_index_iter<'a>( transaction: &'a RocksTransaction<'a>, - table_cache: &'a Arc>, + table_cache: &'a Arc, index_column_id: ColumnId, ) -> Result>, DatabaseError> { transaction.read_by_index( @@ -1354,7 +1401,7 @@ mod test { build_table(&table_cache, &mut transaction)?; let t1_table = transaction - .table(&table_cache, Arc::new("t1".to_string())) + .table(&table_cache, Arc::new("t1".to_string()))? .unwrap(); let c3_column_id = *t1_table.get_column_id_by_name("c3").unwrap(); @@ -1403,7 +1450,7 @@ mod test { assert_eq!(index_iter.next_tuple()?.unwrap(), tuples[2]); assert_eq!(index_iter.next_tuple()?.unwrap(), tuples[1]); - let (min, max) = TableCodec::index_bound("t1", &1); + let (min, max) = TableCodec::index_bound("t1", &1)?; let mut iter = transaction.range(Bound::Included(&min), Bound::Included(&max))?; let (_, value) = iter.try_next()?.unwrap(); @@ -1421,7 +1468,7 @@ mod test { assert_eq!(index_iter.next_tuple()?.unwrap(), tuples[2]); assert_eq!(index_iter.next_tuple()?.unwrap(), tuples[1]); - let (min, max) = TableCodec::index_bound("t1", &1); + let (min, max) = TableCodec::index_bound("t1", &1)?; let mut iter = transaction.range(Bound::Included(&min), Bound::Included(&max))?; let (_, value) = iter.try_next()?.unwrap(); @@ -1461,7 +1508,9 @@ mod test { ); } { - let table = transaction.table(&table_cache, table_name.clone()).unwrap(); + let table = transaction + .table(&table_cache, table_name.clone())? + .unwrap(); assert!(table.contains_column("c4")); let mut new_column = ColumnCatalog::new( @@ -1469,9 +1518,10 @@ mod test { true, ColumnDesc::new(LogicalType::Integer, false, false, None)?, ); - new_column.summary.relation = ColumnRelation::Table { + new_column.summary_mut().relation = ColumnRelation::Table { column_id: *table.get_column_id_by_name("c4").unwrap(), table_name: table_name.clone(), + is_temp: false, }; assert_eq!( table.get_column_by_name("c4"), @@ -1480,11 +1530,56 @@ mod test { } transaction.drop_column(&table_cache, &meta_cache, &table_name, "c4")?; { - let table = transaction.table(&table_cache, table_name.clone()).unwrap(); + let table = transaction + .table(&table_cache, table_name.clone())? + .unwrap(); assert!(!table.contains_column("c4")); assert!(table.get_column_by_name("c4").is_none()); } Ok(()) } + + #[test] + fn test_view_create_drop() -> Result<(), DatabaseError> { + let table_state = build_t1_table()?; + + let view_name = Arc::new("v1".to_string()); + let view = View { + name: view_name.clone(), + plan: Box::new( + table_state.plan("select c1, c3 from t1 inner join t2 on c1 = c3 and c1 > 1")?, + ), + }; + let mut transaction = table_state.storage.transaction()?; + transaction.create_view(&table_state.view_cache, view.clone(), true)?; + + assert_eq!( + &view, + transaction + .view( + &table_state.view_cache, + view_name.clone(), + (&transaction, &table_state.table_cache) + )? + .unwrap() + ); + assert_eq!( + &view, + transaction + .view( + &table_state.view_cache, + view_name.clone(), + ( + &transaction, + &Arc::new(ShardingLruCache::new(4, 1, RandomState::new())?) + ) + )? + .unwrap() + ); + + // TODO: Drop View + + Ok(()) + } } diff --git a/src/storage/rocksdb.rs b/src/storage/rocksdb.rs index e4bc9e6d..5e436aa7 100644 --- a/src/storage/rocksdb.rs +++ b/src/storage/rocksdb.rs @@ -180,7 +180,7 @@ mod test { false, )?; - let table_catalog = transaction.table(&table_cache, Arc::new("test".to_string())); + let table_catalog = transaction.table(&table_cache, Arc::new("test".to_string()))?; debug_assert!(table_catalog.is_some()); debug_assert!(table_catalog .unwrap() @@ -242,7 +242,7 @@ mod test { let table_name = Arc::new("t1".to_string()); let table = transaction - .table(&fnck_sql.table_cache, table_name.clone()) + .table(&fnck_sql.table_cache, table_name.clone())? .unwrap() .clone(); let a_column_id = table.get_column_id_by_name("a").unwrap(); @@ -300,7 +300,7 @@ mod test { let transaction = fnck_sql.storage.transaction().unwrap(); let table = transaction - .table(&fnck_sql.table_cache, Arc::new("t1".to_string())) + .table(&fnck_sql.table_cache, Arc::new("t1".to_string()))? .unwrap() .clone(); let columns = table.columns().cloned().enumerate().collect_vec(); diff --git a/src/storage/table_codec.rs b/src/storage/table_codec.rs index 44ba244f..826882c9 100644 --- a/src/storage/table_codec.rs +++ b/src/storage/table_codec.rs @@ -1,7 +1,8 @@ +use crate::catalog::view::View; use crate::catalog::{ColumnRef, ColumnRelation, TableMeta}; use crate::errors::DatabaseError; use crate::serdes::{ReferenceSerialization, ReferenceTables}; -use crate::storage::Transaction; +use crate::storage::{TableCache, Transaction}; use crate::types::index::{Index, IndexId, IndexMeta, IndexType}; use crate::types::tuple::{Schema, Tuple, TupleId}; use crate::types::value::DataValue; @@ -17,6 +18,8 @@ const BOUND_MAX_TAG: u8 = 1; lazy_static! { static ref ROOT_BYTES: Vec = b"Root".to_vec(); + static ref VIEW_BYTES: Vec = b"View".to_vec(); + static ref EMPTY_REFERENCE_TABLES: ReferenceTables = ReferenceTables::new(); } #[derive(Clone)] @@ -28,6 +31,7 @@ enum CodecType { IndexMeta, Index, Statistics, + View, Tuple, Root, } @@ -54,9 +58,9 @@ impl TableCodec { /// TableName + Type /// - /// Tips: Root full key = key_prefix - fn key_prefix(ty: CodecType, table_name: &str) -> Vec { - let mut table_bytes = table_name.to_string().into_bytes(); + /// Tips: Root & View full key = key_prefix + fn key_prefix(ty: CodecType, name: &str) -> Vec { + let mut table_bytes = name.to_string().into_bytes(); match ty { CodecType::Column => { @@ -79,6 +83,13 @@ impl TableCodec { bytes.push(BOUND_MIN_TAG); bytes.append(&mut table_bytes); + return bytes; + } + CodecType::View => { + let mut bytes = VIEW_BYTES.clone(); + bytes.push(BOUND_MIN_TAG); + bytes.append(&mut table_bytes); + return bytes; } } @@ -108,17 +119,21 @@ impl TableCodec { (op(BOUND_MIN_TAG), op(BOUND_MAX_TAG)) } - pub fn index_bound(table_name: &str, index_id: &IndexId) -> (Vec, Vec) { - let op = |bound_id| { - let mut key_prefix = Self::key_prefix(CodecType::Index, table_name); + pub fn index_bound( + table_name: &str, + index_id: &IndexId, + ) -> Result<(Vec, Vec), DatabaseError> { + let op = |bound_id| -> Result, DatabaseError> { + let mut key_prefix = Cursor::new(Self::key_prefix(CodecType::Index, table_name)); + key_prefix.seek(SeekFrom::End(0))?; - key_prefix.push(BOUND_MIN_TAG); - key_prefix.append(&mut index_id.to_be_bytes().to_vec()); - key_prefix.push(bound_id); - key_prefix + key_prefix.write_all(&[BOUND_MIN_TAG])?; + key_prefix.write_all(&index_id.to_be_bytes()[..])?; + key_prefix.write_all(&[bound_id])?; + Ok(key_prefix.into_inner()) }; - (op(BOUND_MIN_TAG), op(BOUND_MAX_TAG)) + Ok((op(BOUND_MIN_TAG)?, op(BOUND_MAX_TAG)?)) } pub fn all_index_bound(table_name: &str) -> (Vec, Vec) { @@ -175,6 +190,17 @@ impl TableCodec { (op(BOUND_MIN_TAG), op(BOUND_MAX_TAG)) } + pub fn view_bound() -> (Vec, Vec) { + let op = |bound_id| { + let mut key_prefix = VIEW_BYTES.clone(); + + key_prefix.push(bound_id); + key_prefix + }; + + (op(BOUND_MIN_TAG), op(BOUND_MAX_TAG)) + } + /// Key: {TableName}{TUPLE_TAG}{BOUND_MIN_TAG}{RowID}(Sorted) /// Value: Tuple pub fn encode_tuple( @@ -217,18 +243,23 @@ impl TableCodec { table_name: &str, index_meta: &IndexMeta, ) -> Result<(Bytes, Bytes), DatabaseError> { - let mut key_prefix = Self::key_prefix(CodecType::IndexMeta, table_name); - key_prefix.push(BOUND_MIN_TAG); - key_prefix.append(&mut index_meta.id.to_be_bytes().to_vec()); + let mut key_prefix = Cursor::new(Self::key_prefix(CodecType::IndexMeta, table_name)); + key_prefix.seek(SeekFrom::End(0))?; + + key_prefix.write_all(&[BOUND_MIN_TAG])?; + key_prefix.write_all(&index_meta.id.to_be_bytes()[..])?; + + let mut value_bytes = Cursor::new(Vec::new()); + index_meta.encode(&mut value_bytes, true, &mut ReferenceTables::new())?; Ok(( - Bytes::from(key_prefix), - Bytes::from(bincode::serialize(&index_meta)?), + Bytes::from(key_prefix.into_inner()), + Bytes::from(value_bytes.into_inner()), )) } - pub fn decode_index_meta(bytes: &[u8]) -> Result { - Ok(bincode::deserialize(bytes)?) + pub fn decode_index_meta(bytes: &[u8]) -> Result { + IndexMeta::decode::(&mut Cursor::new(bytes), None, &EMPTY_REFERENCE_TABLES) } /// NonUnique Index: @@ -311,7 +342,8 @@ impl TableCodec { if let ColumnRelation::Table { column_id, table_name, - } = &col.summary.relation + is_temp: false, + } = &col.summary().relation { let mut key_prefix = Cursor::new(Self::key_prefix(CodecType::Column, table_name)); key_prefix.seek(SeekFrom::End(0))?; @@ -365,25 +397,72 @@ impl TableCodec { Ok(String::from_utf8(bytes.to_vec())?) } + /// Key: View{BOUND_MIN_TAG}{ViewName} + /// Value: View + pub fn encode_view(view: &View) -> Result<(Bytes, Bytes), DatabaseError> { + let key = Self::encode_view_key(&view.name); + + let mut reference_tables = ReferenceTables::new(); + let mut bytes = vec![0u8; 4]; + let reference_tables_pos = { + let mut value = Cursor::new(&mut bytes); + value.seek(SeekFrom::End(0))?; + view.encode(&mut value, false, &mut reference_tables)?; + let pos = value.position() as usize; + reference_tables.to_raw(&mut bytes)?; + pos + }; + bytes[..4].copy_from_slice(&(reference_tables_pos as u32).to_le_bytes()); + + Ok((Bytes::from(key), Bytes::from(bytes))) + } + + pub fn encode_view_key(view_name: &str) -> Vec { + Self::key_prefix(CodecType::View, view_name) + } + + pub fn decode_view( + bytes: &[u8], + drive: (&T, &TableCache), + ) -> Result { + let mut cursor = Cursor::new(bytes); + let reference_tables_pos = { + let mut bytes = [0u8; 4]; + cursor.read_exact(&mut bytes)?; + u32::from_le_bytes(bytes) as u64 + }; + cursor.seek(SeekFrom::Start(reference_tables_pos))?; + let reference_tables = ReferenceTables::from_raw(&mut cursor)?; + cursor.seek(SeekFrom::Start(4))?; + + View::decode(&mut cursor, Some(drive), &reference_tables) + } + /// Key: Root{BOUND_MIN_TAG}{TableName} /// Value: TableMeta pub fn encode_root_table(meta: &TableMeta) -> Result<(Bytes, Bytes), DatabaseError> { let key = Self::encode_root_table_key(&meta.table_name); - Ok((Bytes::from(key), Bytes::from(bincode::serialize(meta)?))) + let mut meta_bytes = Cursor::new(Vec::new()); + meta.encode(&mut meta_bytes, true, &mut ReferenceTables::new())?; + Ok((Bytes::from(key), Bytes::from(meta_bytes.into_inner()))) } pub fn encode_root_table_key(table_name: &str) -> Vec { Self::key_prefix(CodecType::Root, table_name) } - pub fn decode_root_table(bytes: &[u8]) -> Result { - Ok(bincode::deserialize(bytes)?) + pub fn decode_root_table(bytes: &[u8]) -> Result { + let mut bytes = Cursor::new(bytes); + + TableMeta::decode::(&mut bytes, None, &EMPTY_REFERENCE_TABLES) } } #[cfg(test)] mod tests { + use crate::binder::test::build_t1_table; + use crate::catalog::view::View; use crate::catalog::{ ColumnCatalog, ColumnDesc, ColumnRef, ColumnRelation, TableCatalog, TableMeta, }; @@ -391,6 +470,7 @@ mod tests { use crate::serdes::ReferenceTables; use crate::storage::rocksdb::RocksTransaction; use crate::storage::table_codec::TableCodec; + use crate::storage::Storage; use crate::types::index::{Index, IndexMeta, IndexType}; use crate::types::tuple::Tuple; use crate::types::value::DataValue; @@ -455,7 +535,7 @@ mod tests { }) .unwrap(); - let table_meta = TableCodec::decode_root_table(&bytes).unwrap(); + let table_meta = TableCodec::decode_root_table::(&bytes).unwrap(); debug_assert_eq!(table_meta.table_name.as_str(), table_catalog.name.as_str()); } @@ -481,7 +561,10 @@ mod tests { }; let (_, bytes) = TableCodec::encode_index_meta(&"T1".to_string(), &index_meta)?; - debug_assert_eq!(TableCodec::decode_index_meta(&bytes)?, index_meta); + debug_assert_eq!( + TableCodec::decode_index_meta::(&bytes)?, + index_meta + ); Ok(()) } @@ -509,9 +592,10 @@ mod tests { false, ColumnDesc::new(LogicalType::Boolean, false, false, None).unwrap(), ); - col.summary.relation = ColumnRelation::Table { + col.summary_mut().relation = ColumnRelation::Table { column_id: Ulid::new(), table_name: Arc::new("t1".to_string()), + is_temp: false, }; let col = ColumnRef::from(col); @@ -520,14 +604,67 @@ mod tests { let (_, bytes) = TableCodec::encode_column(&col, &mut reference_tables).unwrap(); let mut cursor = Cursor::new(bytes.as_ref()); let decode_col = - TableCodec::decode_column::(&mut cursor, &reference_tables) - .unwrap(); + TableCodec::decode_column::(&mut cursor, &reference_tables)?; debug_assert_eq!(decode_col, col); Ok(()) } + #[test] + fn test_table_codec_view() -> Result<(), DatabaseError> { + let table_state = build_t1_table()?; + // Subquery + { + let plan = table_state + .plan("select * from t1 where c1 in (select c1 from t1 where c1 > 1)")?; + println!("{:#?}", plan); + let view = View { + name: Arc::new("view_subquery".to_string()), + plan: Box::new(plan), + }; + let (_, bytes) = TableCodec::encode_view(&view)?; + let transaction = table_state.storage.transaction()?; + + debug_assert_eq!( + view, + TableCodec::decode_view(&bytes, (&transaction, &table_state.table_cache))? + ); + } + // No Join + { + let plan = table_state.plan("select * from t1 where c1 > 1")?; + let view = View { + name: Arc::new("view_filter".to_string()), + plan: Box::new(plan), + }; + let (_, bytes) = TableCodec::encode_view(&view)?; + let transaction = table_state.storage.transaction()?; + + debug_assert_eq!( + view, + TableCodec::decode_view(&bytes, (&transaction, &table_state.table_cache))? + ); + } + // Join + { + let plan = table_state.plan("select * from t1 left join t2 on c1 = c3")?; + let view = View { + name: Arc::new("view_join".to_string()), + plan: Box::new(plan), + }; + let (_, bytes) = TableCodec::encode_view(&view)?; + let transaction = table_state.storage.transaction()?; + + debug_assert_eq!( + view, + TableCodec::decode_view(&bytes, (&transaction, &table_state.table_cache))? + ); + } + + Ok(()) + } + #[test] fn test_table_codec_column_bound() { let mut set = BTreeSet::new(); @@ -543,9 +680,10 @@ mod tests { }, ); - col.summary.relation = ColumnRelation::Table { + col.summary_mut().relation = ColumnRelation::Table { column_id: Ulid::from(col_id as u128), table_name: Arc::new(table_name.to_string()), + is_temp: false, }; let (key, _) = @@ -663,7 +801,7 @@ mod tests { println!("{:#?}", set); - let (min, max) = TableCodec::index_bound(&table_catalog.name, &1); + let (min, max) = TableCodec::index_bound(&table_catalog.name, &1).unwrap(); println!("{:?}", min); println!("{:?}", max); @@ -771,7 +909,7 @@ mod tests { #[test] fn test_root_codec_name_bound() { let mut set = BTreeSet::new(); - let op = |table_name: &str| TableCodec::encode_root_table_key(&table_name.to_string()); + let op = |table_name: &str| TableCodec::encode_root_table_key(table_name); set.insert(b"A".to_vec()); @@ -794,4 +932,31 @@ mod tests { debug_assert_eq!(vec[1], &op("T1")); debug_assert_eq!(vec[2], &op("T2")); } + + #[test] + fn test_view_codec_name_bound() { + let mut set = BTreeSet::new(); + let op = |view_name: &str| TableCodec::encode_view_key(view_name); + + set.insert(b"A".to_vec()); + + set.insert(op("V0")); + set.insert(op("V1")); + set.insert(op("V2")); + + set.insert(b"Z".to_vec()); + + let (min, max) = TableCodec::view_bound(); + + let vec = set + .range::, (Bound<&Vec>, Bound<&Vec>)>(( + Bound::Included(&min), + Bound::Included(&max), + )) + .collect_vec(); + + debug_assert_eq!(vec[0], &op("V0")); + debug_assert_eq!(vec[1], &op("V1")); + debug_assert_eq!(vec[2], &op("V2")); + } } diff --git a/src/types/index.rs b/src/types/index.rs index 57af89f1..a64c5df7 100644 --- a/src/types/index.rs +++ b/src/types/index.rs @@ -4,7 +4,6 @@ use crate::expression::range_detacher::Range; use crate::expression::ScalarExpression; use crate::types::value::ValueRef; use crate::types::{ColumnId, LogicalType}; -use serde::{Deserialize, Serialize}; use serde_macros::ReferenceSerialization; use std::fmt; use std::fmt::Formatter; @@ -13,9 +12,7 @@ use std::sync::Arc; pub type IndexId = u32; pub type IndexMetaRef = Arc; -#[derive( - Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize, ReferenceSerialization, -)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, ReferenceSerialization)] pub enum IndexType { PrimaryKey, Unique, @@ -29,7 +26,7 @@ pub struct IndexInfo { pub(crate) range: Option, } -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize, ReferenceSerialization)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, ReferenceSerialization)] pub struct IndexMeta { pub id: IndexId, pub column_ids: Vec, diff --git a/src/types/tuple.rs b/src/types/tuple.rs index 68edcc19..54b1b427 100644 --- a/src/types/tuple.rs +++ b/src/types/tuple.rs @@ -101,7 +101,7 @@ impl Tuple { id_option: &mut Option>, projection_i: &mut usize, ) { - if tuple_columns[*projection_i].desc.is_primary { + if tuple_columns[*projection_i].desc().is_primary { let _ = id_option.replace(tuple_values[*projection_i].clone()); } *projection_i += 1; diff --git a/src/types/tuple_builder.rs b/src/types/tuple_builder.rs index 40835e4b..87bb1089 100644 --- a/src/types/tuple_builder.rs +++ b/src/types/tuple_builder.rs @@ -40,7 +40,7 @@ impl<'a> TupleBuilder<'a> { .cast(self.schema[i].datatype())?, ); - if primary_key.is_none() && self.schema[i].desc.is_primary { + if primary_key.is_none() && self.schema[i].desc().is_primary { primary_key = Some(data_value.clone()); } values.push(data_value); diff --git a/src/types/value.rs b/src/types/value.rs index ce316bd0..01d727b2 100644 --- a/src/types/value.rs +++ b/src/types/value.rs @@ -6,7 +6,6 @@ use lazy_static::lazy_static; use ordered_float::OrderedFloat; use rust_decimal::prelude::{FromPrimitive, ToPrimitive}; use rust_decimal::Decimal; -use serde::{Deserialize, Serialize}; use sqlparser::ast::CharLengthUnits; use std::cmp::Ordering; use std::fmt::Formatter; @@ -33,13 +32,13 @@ const ENCODE_MARKER: u8 = 0xFF; pub type ValueRef = Arc; -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone)] pub enum Utf8Type { Variable(Option), Fixed(u32), } -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone)] pub enum DataValue { Null, Boolean(Option), @@ -241,11 +240,7 @@ impl Hash for DataValue { Date64(v) => v.hash(state), Time(v) => v.hash(state), Decimal(v) => v.hash(state), - Tuple(values) => { - for v in values { - v.hash(state) - } - } + Tuple(values) => values.hash(state), } } } diff --git a/src/utils/lru.rs b/src/utils/lru.rs index 97cd073f..e22d0d03 100644 --- a/src/utils/lru.rs +++ b/src/utils/lru.rs @@ -256,6 +256,7 @@ impl LruCache { } #[inline] + #[allow(clippy::manual_inspect)] pub fn put(&mut self, key: K, value: V) -> Option { let node = NodeReadPtr(Box::leak(Box::new(Node::new(key, value))).into()); let old_node = self.inner.remove(&KeyRef(node)).map(|node| { @@ -323,9 +324,8 @@ impl LruCache { } else { let value = fn_once(&key)?; let node = NodeReadPtr(Box::leak(Box::new(Node::new(key, value))).into()); - let _ignore = self.inner.remove(&KeyRef(node)).map(|node| { + self.inner.remove(&KeyRef(node)).inspect(|&node| { self.detach(node); - node }); self.expulsion(); self.attach(node); diff --git a/tests/macros-test/src/main.rs b/tests/macros-test/src/main.rs index 4cf05949..05743dd2 100644 --- a/tests/macros-test/src/main.rs +++ b/tests/macros-test/src/main.rs @@ -178,18 +178,20 @@ mod test { true, ColumnDesc::new(LogicalType::Integer, false, false, None)?, ); - c1.summary.relation = ColumnRelation::Table { + c1.summary_mut().relation = ColumnRelation::Table { column_id: function_schema[0].id().unwrap(), table_name: table_name.clone(), + is_temp: false, }; let mut c2 = ColumnCatalog::new( "c2".to_string(), true, ColumnDesc::new(LogicalType::Integer, false, false, None)?, ); - c2.summary.relation = ColumnRelation::Table { + c2.summary_mut().relation = ColumnRelation::Table { column_id: function_schema[1].id().unwrap(), table_name: table_name.clone(), + is_temp: false, }; assert_eq!( diff --git a/tests/slt/crdb/delete.slt b/tests/slt/crdb/delete.slt index 01f88d16..32bc80a1 100644 --- a/tests/slt/crdb/delete.slt +++ b/tests/slt/crdb/delete.slt @@ -24,31 +24,31 @@ SELECT * FROM kv 5 6 7 8 -# TODO: View +# TODO: Drop View # statement ok # drop view if exists kview -# statement ok -# CREATE VIEW kview AS SELECT k,v FROM kv +statement ok +CREATE VIEW kview AS SELECT k,v FROM kv -# query II -# SELECT * FROM kview -# ---- -# 1 2 -# 3 4 -# 5 6 -# 7 8 +query II +SELECT * FROM kview +---- +1 2 +3 4 +5 6 +7 8 -# statement error 1002 -# DELETE FROM kview +statement error +DELETE FROM kview -# query II -# SELECT * FROM kview -# ---- -# 1 2 -# 3 4 -# 5 6 -# 7 8 +query II +SELECT * FROM kview +---- +1 2 +3 4 +5 6 +7 8 statement ok DELETE FROM kv WHERE k=3 OR v=6 @@ -167,7 +167,7 @@ statement ok drop table if exists family statement ok -CREATE TABLE family ( x INT PRIMARY KEY, y INT) +CREATE TABLE family ( x INT PRIMARY KEY, y INT) statement ok INSERT INTO family VALUES (1, 1), (2, 2), (3, 3) diff --git a/tests/slt/sql_2016/F031_02.slt b/tests/slt/sql_2016/F031_02.slt index bdc41fa5..2c3fcfd0 100644 --- a/tests/slt/sql_2016/F031_02.slt +++ b/tests/slt/sql_2016/F031_02.slt @@ -1,8 +1,7 @@ # F031-02: CREATE VIEW statement -# TODO: Support `VIEW` - statement ok CREATE TABLE TABLE_F031_02_01_01 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F031_02_01_01 AS SELECT A FROM TABLE_F031_02_01_01 +statement ok +CREATE VIEW VIEW_F031_02_01_01 AS SELECT A FROM TABLE_F031_02_01_01 diff --git a/tests/slt/sql_2016/F031_16.slt b/tests/slt/sql_2016/F031_16.slt index 92273d0c..92ef25dc 100644 --- a/tests/slt/sql_2016/F031_16.slt +++ b/tests/slt/sql_2016/F031_16.slt @@ -1,10 +1,11 @@ # F031-16: DROP VIEW statement: RESTRICT clause -# TODO: Support `VIEW` & `CREATE VIEW` & `DROP VIEW` +# TODO: Support `DROP VIEW` statement ok CREATE TABLE TABLE_F031_16_01_01 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F031_16_01_01 AS SELECT A FROM TABLE_F031_16_01_01; +statement ok +CREATE VIEW VIEW_F031_16_01_01 AS SELECT A FROM TABLE_F031_16_01_01; # DROP VIEW VIEW_F031_16_01_01 diff --git a/tests/slt/sql_2016/F081.slt b/tests/slt/sql_2016/F081.slt index 3353fda9..48d86f90 100644 --- a/tests/slt/sql_2016/F081.slt +++ b/tests/slt/sql_2016/F081.slt @@ -1,13 +1,13 @@ # F081: UNION and EXCEPT in views -# TODO: VIEW & (UNION & EXCEPT with VIEW) - statement ok CREATE TABLE TABLE_F081_01_011 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F081_01_012 ( ID INT PRIMARY KEY, A INTEGER ); +# TODO: Except +# statement ok # CREATE VIEW VIEW_F081_01_01 AS SELECT A FROM TABLE_F081_01_011 EXCEPT SELECT A FROM TABLE_F081_01_012 statement ok @@ -16,7 +16,8 @@ CREATE TABLE TABLE_F081_01_021 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F081_01_022 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F081_01_02 AS SELECT A FROM TABLE_F081_01_021 UNION ALL SELECT A FROM TABLE_F081_01_022 +statement ok +CREATE VIEW VIEW_F081_01_02 AS SELECT A FROM TABLE_F081_01_021 UNION ALL SELECT A FROM TABLE_F081_01_022 statement ok CREATE TABLE TABLE_F081_01_031 ( ID INT PRIMARY KEY, A INTEGER ); @@ -24,4 +25,5 @@ CREATE TABLE TABLE_F081_01_031 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F081_01_032 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F081_01_03 AS SELECT A FROM TABLE_F081_01_031 UNION SELECT A FROM TABLE_F081_01_032 +statement ok +CREATE VIEW VIEW_F081_01_03 AS SELECT A FROM TABLE_F081_01_031 UNION SELECT A FROM TABLE_F081_01_032 diff --git a/tests/slt/sql_2016/F131_01.slt b/tests/slt/sql_2016/F131_01.slt index 66876ea6..bc6bbe13 100644 --- a/tests/slt/sql_2016/F131_01.slt +++ b/tests/slt/sql_2016/F131_01.slt @@ -1,45 +1,55 @@ # F131-01: WHERE, GROUP BY, and HAVING clauses supported in queries with grouped views -# TODO: WHERE, GROUP BY, and HAVING clauses supported in queries with grouped views - statement ok CREATE TABLE TABLE_F131_01_01_01 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F131_01_01_01 AS SELECT A FROM TABLE_F131_01_01_01 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_01_01_01 AS SELECT A FROM TABLE_F131_01_01_01 GROUP BY A; -# SELECT A FROM VIEW_F131_01_01_01 +query I +SELECT A FROM VIEW_F131_01_01_01 statement ok CREATE TABLE TABLE_F131_01_01_02 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F131_01_01_02 AS SELECT A FROM TABLE_F131_01_01_02 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_01_01_02 AS SELECT A FROM TABLE_F131_01_01_02 GROUP BY A; -# SELECT A FROM VIEW_F131_01_01_02 GROUP BY A +query I +SELECT A FROM VIEW_F131_01_01_02 GROUP BY A statement ok CREATE TABLE TABLE_F131_01_01_03 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F131_01_01_03 AS SELECT A FROM TABLE_F131_01_01_03 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_01_01_03 AS SELECT A FROM TABLE_F131_01_01_03 GROUP BY A; -# SELECT A FROM VIEW_F131_01_01_03 GROUP BY A HAVING A = 2 +query I +SELECT A FROM VIEW_F131_01_01_03 GROUP BY A HAVING A = 2 statement ok CREATE TABLE TABLE_F131_01_01_04 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F131_01_01_04 AS SELECT A FROM TABLE_F131_01_01_04 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_01_01_04 AS SELECT A FROM TABLE_F131_01_01_04 GROUP BY A; -# SELECT A FROM VIEW_F131_01_01_04 WHERE A = 1 +query I +SELECT A FROM VIEW_F131_01_01_04 WHERE A = 1 statement ok CREATE TABLE TABLE_F131_01_01_05 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F131_01_01_05 AS SELECT A FROM TABLE_F131_01_01_05 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_01_01_05 AS SELECT A FROM TABLE_F131_01_01_05 GROUP BY A; -# SELECT A FROM VIEW_F131_01_01_05 WHERE A = 1 GROUP BY A +query I +SELECT A FROM VIEW_F131_01_01_05 WHERE A = 1 GROUP BY A statement ok CREATE TABLE TABLE_F131_01_01_06 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F131_01_01_06 AS SELECT A FROM TABLE_F131_01_01_06 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_01_01_06 AS SELECT A FROM TABLE_F131_01_01_06 GROUP BY A; -# SELECT A FROM VIEW_F131_01_01_06 WHERE A = 1 GROUP BY A HAVING A = 2 +query I +SELECT A FROM VIEW_F131_01_01_06 WHERE A = 1 GROUP BY A HAVING A = 2 diff --git a/tests/slt/sql_2016/F131_02.slt b/tests/slt/sql_2016/F131_02.slt index 9fbae914..a5a75e86 100644 --- a/tests/slt/sql_2016/F131_02.slt +++ b/tests/slt/sql_2016/F131_02.slt @@ -1,13 +1,13 @@ # F131-02: Multiple tables supported in queries with grouped views -# TODO Multiple tables supported in queries with grouped views - statement ok CREATE TABLE TABLE_F131_02_01_011 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_02_01_012 ( ID INT PRIMARY KEY, A INTEGER ); -# CREATE VIEW VIEW_F131_02_01_01 AS SELECT A FROM TABLE_F131_02_01_011 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_02_01_01 AS SELECT A FROM TABLE_F131_02_01_011 GROUP BY A; -# SELECT A FROM VIEW_F131_02_01_01 JOIN TABLE_F131_02_01_012 USING ( A ) +query I +SELECT A FROM VIEW_F131_02_01_01 JOIN TABLE_F131_02_01_012 USING ( A ) diff --git a/tests/slt/sql_2016/F131_03.slt b/tests/slt/sql_2016/F131_03.slt index 1338a3bd..e552a367 100644 --- a/tests/slt/sql_2016/F131_03.slt +++ b/tests/slt/sql_2016/F131_03.slt @@ -1,17 +1,22 @@ # F131-03: Set functions supported in queries with grouped views -# TODO: Set functions supported in queries with grouped views - statement ok CREATE TABLE TABLE_F131_03_01_011 ( ID INT PRIMARY KEY, A INTEGER, B INTEGER ); -# CREATE VIEW VIEW_F131_03_01_01 AS SELECT A, MIN ( B ) AS C FROM TABLE_F131_03_01_011 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_03_01_01 AS SELECT A, MIN ( B ) AS C FROM TABLE_F131_03_01_011 GROUP BY A; -# SELECT SUM ( C ) FROM VIEW_F131_03_01_01 +query I +SELECT SUM ( C ) FROM VIEW_F131_03_01_01 +---- +null statement ok CREATE TABLE TABLE_F131_03_01_021 ( ID INT PRIMARY KEY, A INTEGER, B INTEGER ); -# CREATE VIEW VIEW_F131_03_01_02 AS SELECT A, MIN ( B ) AS C FROM TABLE_F131_03_01_021 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_03_01_02 AS SELECT A, MIN ( B ) AS C FROM TABLE_F131_03_01_021 GROUP BY A; -# SELECT SUM ( C ) FROM VIEW_F131_03_01_02 GROUP BY A +query I +SELECT A, SUM ( C ) FROM VIEW_F131_03_01_02 GROUP BY A +---- diff --git a/tests/slt/sql_2016/F131_04.slt b/tests/slt/sql_2016/F131_04.slt index 36089bc4..8b7916a5 100644 --- a/tests/slt/sql_2016/F131_04.slt +++ b/tests/slt/sql_2016/F131_04.slt @@ -1,14 +1,13 @@ # F131-04: Subqueries with GROUP BY and HAVING clauses and grouped views -# TODO: Subqueries with GROUP BY and HAVING clauses and grouped views - statement ok CREATE TABLE TABLE_F131_04_01_011 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_012 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B < ( SELECT MAX ( A ) FROM TABLE_F131_04_01_011 GROUP BY A ) FROM TABLE_F131_04_01_012 +query B +SELECT B < ( SELECT A FROM TABLE_F131_04_01_011 GROUP BY A ) FROM TABLE_F131_04_01_012 statement ok CREATE TABLE TABLE_F131_04_01_021 ( ID INT PRIMARY KEY, A INTEGER ); @@ -16,7 +15,8 @@ CREATE TABLE TABLE_F131_04_01_021 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_022 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B < ( SELECT MAX ( A ) FROM TABLE_F131_04_01_021 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_022 +query B +SELECT B < ( SELECT A FROM TABLE_F131_04_01_021 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_022 statement ok CREATE TABLE TABLE_F131_04_01_031 ( ID INT PRIMARY KEY, A INTEGER ); @@ -24,7 +24,8 @@ CREATE TABLE TABLE_F131_04_01_031 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_032 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B <= ( SELECT MAX ( A ) FROM TABLE_F131_04_01_031 GROUP BY A ) FROM TABLE_F131_04_01_032 +query B +SELECT B <= ( SELECT A FROM TABLE_F131_04_01_031 GROUP BY A ) FROM TABLE_F131_04_01_032 statement ok CREATE TABLE TABLE_F131_04_01_041 ( ID INT PRIMARY KEY, A INTEGER ); @@ -32,7 +33,8 @@ CREATE TABLE TABLE_F131_04_01_041 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_042 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B <= ( SELECT MAX ( A ) FROM TABLE_F131_04_01_041 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_042 +query B +SELECT B <= ( SELECT A FROM TABLE_F131_04_01_041 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_042 statement ok CREATE TABLE TABLE_F131_04_01_051 ( ID INT PRIMARY KEY, A INTEGER ); @@ -40,7 +42,8 @@ CREATE TABLE TABLE_F131_04_01_051 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_052 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B <> ( SELECT MAX ( A ) FROM TABLE_F131_04_01_051 GROUP BY A ) FROM TABLE_F131_04_01_052 +query B +SELECT B <> ( SELECT A FROM TABLE_F131_04_01_051 GROUP BY A ) FROM TABLE_F131_04_01_052 statement ok CREATE TABLE TABLE_F131_04_01_061 ( ID INT PRIMARY KEY, A INTEGER ); @@ -48,7 +51,8 @@ CREATE TABLE TABLE_F131_04_01_061 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_062 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B <> ( SELECT MAX ( A ) FROM TABLE_F131_04_01_061 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_062 +query B +SELECT B <> ( SELECT A FROM TABLE_F131_04_01_061 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_062 statement ok CREATE TABLE TABLE_F131_04_01_071 ( ID INT PRIMARY KEY, A INTEGER ); @@ -56,7 +60,8 @@ CREATE TABLE TABLE_F131_04_01_071 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_072 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B = ( SELECT MAX ( A ) FROM TABLE_F131_04_01_071 GROUP BY A ) FROM TABLE_F131_04_01_072 +query B +SELECT B = ( SELECT A FROM TABLE_F131_04_01_071 GROUP BY A ) FROM TABLE_F131_04_01_072 statement ok CREATE TABLE TABLE_F131_04_01_081 ( ID INT PRIMARY KEY, A INTEGER ); @@ -64,7 +69,8 @@ CREATE TABLE TABLE_F131_04_01_081 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_082 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B = ( SELECT MAX ( A ) FROM TABLE_F131_04_01_081 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_082 +query B +SELECT B = ( SELECT A FROM TABLE_F131_04_01_081 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_082 statement ok CREATE TABLE TABLE_F131_04_01_091 ( ID INT PRIMARY KEY, A INTEGER ); @@ -72,7 +78,8 @@ CREATE TABLE TABLE_F131_04_01_091 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_092 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B > ( SELECT MAX ( A ) FROM TABLE_F131_04_01_091 GROUP BY A ) FROM TABLE_F131_04_01_092 +query B +SELECT B > ( SELECT A FROM TABLE_F131_04_01_091 GROUP BY A ) FROM TABLE_F131_04_01_092 statement ok CREATE TABLE TABLE_F131_04_01_101 ( ID INT PRIMARY KEY, A INTEGER ); @@ -80,7 +87,8 @@ CREATE TABLE TABLE_F131_04_01_101 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_102 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B > ( SELECT MAX ( A ) FROM TABLE_F131_04_01_101 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_102 +query B +SELECT B > ( SELECT A FROM TABLE_F131_04_01_101 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_102 statement ok CREATE TABLE TABLE_F131_04_01_111 ( ID INT PRIMARY KEY, A INTEGER ); @@ -88,7 +96,8 @@ CREATE TABLE TABLE_F131_04_01_111 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_112 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B >= ( SELECT MAX ( A ) FROM TABLE_F131_04_01_111 GROUP BY A ) FROM TABLE_F131_04_01_112 +query B +SELECT B >= ( SELECT A FROM TABLE_F131_04_01_111 GROUP BY A ) FROM TABLE_F131_04_01_112 statement ok CREATE TABLE TABLE_F131_04_01_121 ( ID INT PRIMARY KEY, A INTEGER ); @@ -96,7 +105,8 @@ CREATE TABLE TABLE_F131_04_01_121 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_01_122 ( ID INT PRIMARY KEY, B INTEGER ); -# SELECT B >= ( SELECT MAX ( A ) FROM TABLE_F131_04_01_121 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_122 +query B +SELECT B >= ( SELECT A FROM TABLE_F131_04_01_121 GROUP BY A HAVING A = 5 ) FROM TABLE_F131_04_01_122 statement ok CREATE TABLE TABLE_F131_04_02_011 ( ID INT PRIMARY KEY, A INTEGER ); @@ -104,9 +114,11 @@ CREATE TABLE TABLE_F131_04_02_011 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_012 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_01 AS SELECT A FROM TABLE_F131_04_02_011 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_04_02_01 AS SELECT A FROM TABLE_F131_04_02_011 GROUP BY A; -# SELECT B < ( SELECT MAX ( A ) FROM VIEW_F131_04_02_01 ) FROM TABLE_F131_04_02_012 +query B +SELECT B < ( SELECT A FROM VIEW_F131_04_02_01 ) FROM TABLE_F131_04_02_012 statement ok CREATE TABLE TABLE_F131_04_02_021 ( ID INT PRIMARY KEY, A INTEGER ); @@ -114,9 +126,11 @@ CREATE TABLE TABLE_F131_04_02_021 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_022 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_02 AS SELECT A FROM TABLE_F131_04_02_021 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_04_02_02 AS SELECT A FROM TABLE_F131_04_02_021 GROUP BY A; -# SELECT B <= ( SELECT MAX ( A ) FROM VIEW_F131_04_02_02 ) FROM TABLE_F131_04_02_022 +query B +SELECT B <= ( SELECT A FROM VIEW_F131_04_02_02 ) FROM TABLE_F131_04_02_022 statement ok CREATE TABLE TABLE_F131_04_02_031 ( ID INT PRIMARY KEY, A INTEGER ); @@ -124,9 +138,11 @@ CREATE TABLE TABLE_F131_04_02_031 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_032 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_03 AS SELECT A FROM TABLE_F131_04_02_031 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_04_02_03 AS SELECT A FROM TABLE_F131_04_02_031 GROUP BY A; -# SELECT B <> ( SELECT MAX ( A ) FROM VIEW_F131_04_02_03 ) FROM TABLE_F131_04_02_032 +query B +SELECT B <> ( SELECT A FROM VIEW_F131_04_02_03 ) FROM TABLE_F131_04_02_032 statement ok CREATE TABLE TABLE_F131_04_02_041 ( ID INT PRIMARY KEY, A INTEGER ); @@ -134,9 +150,11 @@ CREATE TABLE TABLE_F131_04_02_041 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_042 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_04 AS SELECT A FROM TABLE_F131_04_02_041 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_04_02_04 AS SELECT A FROM TABLE_F131_04_02_041 GROUP BY A; -# SELECT B = ( SELECT MAX ( A ) FROM VIEW_F131_04_02_04 ) FROM TABLE_F131_04_02_042 +query B +SELECT B = ( SELECT A FROM VIEW_F131_04_02_04 ) FROM TABLE_F131_04_02_042 statement ok CREATE TABLE TABLE_F131_04_02_051 ( ID INT PRIMARY KEY, A INTEGER ); @@ -144,9 +162,11 @@ CREATE TABLE TABLE_F131_04_02_051 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_052 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_05 AS SELECT A FROM TABLE_F131_04_02_051 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_04_02_05 AS SELECT A FROM TABLE_F131_04_02_051 GROUP BY A; -# SELECT B > ( SELECT MAX ( A ) FROM VIEW_F131_04_02_05 ) FROM TABLE_F131_04_02_052 +query B +SELECT B > ( SELECT A FROM VIEW_F131_04_02_05 ) FROM TABLE_F131_04_02_052 statement ok CREATE TABLE TABLE_F131_04_02_061 ( ID INT PRIMARY KEY, A INTEGER ); @@ -154,9 +174,11 @@ CREATE TABLE TABLE_F131_04_02_061 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_062 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_06 AS SELECT A FROM TABLE_F131_04_02_061 GROUP BY A; +statement ok +CREATE VIEW VIEW_F131_04_02_06 AS SELECT A FROM TABLE_F131_04_02_061 GROUP BY A; -# SELECT B >= ( SELECT MAX ( A ) FROM VIEW_F131_04_02_06 ) FROM TABLE_F131_04_02_062 +query B +SELECT B >= ( SELECT A FROM VIEW_F131_04_02_06 ) FROM TABLE_F131_04_02_062 statement ok CREATE TABLE TABLE_F131_04_02_071 ( ID INT PRIMARY KEY, A INTEGER ); @@ -164,9 +186,11 @@ CREATE TABLE TABLE_F131_04_02_071 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_072 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_07 AS SELECT A FROM TABLE_F131_04_02_071 GROUP BY A HAVING A = 5; +statement ok +CREATE VIEW VIEW_F131_04_02_07 AS SELECT A FROM TABLE_F131_04_02_071 GROUP BY A HAVING A = 5; -# SELECT B < ( SELECT MAX ( A ) FROM VIEW_F131_04_02_07 ) FROM TABLE_F131_04_02_072 +query B +SELECT B < ( SELECT A FROM VIEW_F131_04_02_07 ) FROM TABLE_F131_04_02_072 statement ok CREATE TABLE TABLE_F131_04_02_081 ( ID INT PRIMARY KEY, A INTEGER ); @@ -174,9 +198,11 @@ CREATE TABLE TABLE_F131_04_02_081 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_082 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_08 AS SELECT A FROM TABLE_F131_04_02_081 GROUP BY A HAVING A = 5; +statement ok +CREATE VIEW VIEW_F131_04_02_08 AS SELECT A FROM TABLE_F131_04_02_081 GROUP BY A HAVING A = 5; -# SELECT B <= ( SELECT MAX ( A ) FROM VIEW_F131_04_02_08 ) FROM TABLE_F131_04_02_082 +query B +SELECT B <= ( SELECT A FROM VIEW_F131_04_02_08 ) FROM TABLE_F131_04_02_082 statement ok CREATE TABLE TABLE_F131_04_02_091 ( ID INT PRIMARY KEY, A INTEGER ); @@ -184,9 +210,11 @@ CREATE TABLE TABLE_F131_04_02_091 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_092 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_09 AS SELECT A FROM TABLE_F131_04_02_091 GROUP BY A HAVING A = 5; +statement ok +CREATE VIEW VIEW_F131_04_02_09 AS SELECT A FROM TABLE_F131_04_02_091 GROUP BY A HAVING A = 5; -# SELECT B <> ( SELECT MAX ( A ) FROM VIEW_F131_04_02_09 ) FROM TABLE_F131_04_02_092 +query B +SELECT B <> ( SELECT A FROM VIEW_F131_04_02_09 ) FROM TABLE_F131_04_02_092 statement ok CREATE TABLE TABLE_F131_04_02_101 ( ID INT PRIMARY KEY, A INTEGER ); @@ -194,9 +222,11 @@ CREATE TABLE TABLE_F131_04_02_101 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_102 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_10 AS SELECT A FROM TABLE_F131_04_02_101 GROUP BY A HAVING A = 5; +statement ok +CREATE VIEW VIEW_F131_04_02_10 AS SELECT A FROM TABLE_F131_04_02_101 GROUP BY A HAVING A = 5; -# SELECT B = ( SELECT MAX ( A ) FROM VIEW_F131_04_02_10 ) FROM TABLE_F131_04_02_102 +query B +SELECT B = ( SELECT A FROM VIEW_F131_04_02_10 ) FROM TABLE_F131_04_02_102 statement ok CREATE TABLE TABLE_F131_04_02_111 ( ID INT PRIMARY KEY, A INTEGER ); @@ -204,9 +234,11 @@ CREATE TABLE TABLE_F131_04_02_111 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_112 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_11 AS SELECT A FROM TABLE_F131_04_02_111 GROUP BY A HAVING A = 5; +statement ok +CREATE VIEW VIEW_F131_04_02_11 AS SELECT A FROM TABLE_F131_04_02_111 GROUP BY A HAVING A = 5; -# SELECT B > ( SELECT MAX ( A ) FROM VIEW_F131_04_02_11 ) FROM TABLE_F131_04_02_112 +query B +SELECT B > ( SELECT A FROM VIEW_F131_04_02_11 ) FROM TABLE_F131_04_02_112 statement ok CREATE TABLE TABLE_F131_04_02_121 ( ID INT PRIMARY KEY, A INTEGER ); @@ -214,6 +246,8 @@ CREATE TABLE TABLE_F131_04_02_121 ( ID INT PRIMARY KEY, A INTEGER ); statement ok CREATE TABLE TABLE_F131_04_02_122 ( ID INT PRIMARY KEY, B INTEGER ); -# CREATE VIEW VIEW_F131_04_02_12 AS SELECT A FROM TABLE_F131_04_02_121 GROUP BY A HAVING A = 5; +statement ok +CREATE VIEW VIEW_F131_04_02_12 AS SELECT A FROM TABLE_F131_04_02_121 GROUP BY A HAVING A = 5; -# SELECT B >= ( SELECT MAX ( A ) FROM VIEW_F131_04_02_12 ) FROM TABLE_F131_04_02_122 +query B +SELECT B >= ( SELECT A FROM VIEW_F131_04_02_12 ) FROM TABLE_F131_04_02_122 diff --git a/tests/slt/view.slt b/tests/slt/view.slt new file mode 100644 index 00000000..a62c9764 --- /dev/null +++ b/tests/slt/view.slt @@ -0,0 +1,64 @@ +statement ok +create table t1(id int primary key, a int, b int, c int) + +statement ok +insert into t1 values(0, 0, 0, 0) + +statement ok +insert into t1 values(1, 1, 1, 1) + +statement ok +insert into t1 values(2, 2, 2, 2) + +statement ok +create view v1 as select * from t1 + +statement error +create view v1 (c0, c1, c2) as select * from t1 + +statement ok +create or replace view v1 (c0, c1, c2) as select * from t1 + +statement ok +create view v2 as select * from t1 where a != 1 + +query III +select * from v1 +---- +0 0 0 +1 1 1 +2 2 2 + +query IIII +select * from v2 +---- +0 0 0 0 +2 2 2 2 + +query IIIIIII +select * from v1 left join v2 +---- +0 0 0 0 0 0 0 +0 0 0 2 2 2 2 +1 1 1 0 0 0 0 +1 1 1 2 2 2 2 +2 2 2 0 0 0 0 +2 2 2 2 2 2 2 + +query IIIIIII +select * from v1 left join v2 where v1.c0 = v2.id +---- +0 0 0 0 0 0 0 +2 2 2 2 2 2 2 + +query I rowsort +select sum(c0) from v1 +---- +3 + +query III rowsort +select c1, sum(c0) from v1 group by c1 +---- +0 0 +1 1 +2 2