From d05d70d949cfb4c0baa58de40c3e439fedf7f016 Mon Sep 17 00:00:00 2001 From: Andy Pfister Date: Tue, 15 Oct 2024 11:09:21 +0200 Subject: [PATCH 1/4] Move `insert` to client class --- ext/tiny_tds/client.c | 198 ++++++++++++++++++++++++++++++++++++------ ext/tiny_tds/result.c | 34 -------- test/client_test.rb | 49 +++++++++++ test/result_test.rb | 36 +------- 4 files changed, 223 insertions(+), 94 deletions(-) diff --git a/ext/tiny_tds/client.c b/ext/tiny_tds/client.c index 03a1b4b5..fa9547f0 100644 --- a/ext/tiny_tds/client.c +++ b/ext/tiny_tds/client.c @@ -15,12 +15,6 @@ VALUE opt_escape_regex, opt_escape_dblquote; tinytds_client_wrapper *cwrap; \ Data_Get_Struct(self, tinytds_client_wrapper, cwrap) -#define REQUIRE_OPEN_CLIENT(cwrap) \ - if (cwrap->closed || cwrap->userdata->closed) { \ - rb_raise(cTinyTdsError, "closed connection"); \ - return Qnil; \ - } - // Lib Backend (Helpers) @@ -55,6 +49,128 @@ VALUE rb_tinytds_raise_error(DBPROCESS *dbproc, tinytds_errordata error) { return Qnil; } +static void rb_tinytds_client_reset_userdata(tinytds_client_userdata *userdata) { + userdata->timing_out = 0; + userdata->dbsql_sent = 0; + userdata->dbsqlok_sent = 0; + userdata->dbcancel_sent = 0; + userdata->nonblocking = 0; + // the following is mainly done for consistency since the values are reset accordingly in nogvl_setup/cleanup. + // the nonblocking_errors array does not need to be freed here. That is done as part of nogvl_cleanup. + userdata->nonblocking_errors_length = 0; + userdata->nonblocking_errors_size = 0; +} + +static VALUE rb_tinytds_send_sql_to_server(tinytds_client_wrapper *cwrap, VALUE sql) { + rb_tinytds_client_reset_userdata(cwrap->userdata); + + if (cwrap->closed || cwrap->userdata->closed) { \ + rb_raise(cTinyTdsError, "closed connection"); \ + return Qnil; \ + } + + dbcmd(cwrap->client, StringValueCStr(sql)); + if (dbsqlsend(cwrap->client) == FAIL) { + rb_raise(cTinyTdsError, "failed dbsqlsend() function"); + } + + cwrap->userdata->dbsql_sent = 1; +} + +// code part used to invoke FreeTDS functions with releasing the Ruby GVL +// basically, while FreeTDS is interacting with the SQL server, other Ruby code can be executed +#define NOGVL_DBCALL(_dbfunction, _client) ( \ + (RETCODE)(intptr_t)rb_thread_call_without_gvl( \ + (void *(*)(void *))_dbfunction, _client, \ + (rb_unblock_function_t*)dbcancel_ubf, _client ) \ +) + +static void dbcancel_ubf(DBPROCESS *client) { + GET_CLIENT_USERDATA(client); + dbcancel(client); + userdata->dbcancel_sent = 1; +} + +static void nogvl_setup(DBPROCESS *client) { + GET_CLIENT_USERDATA(client); + userdata->nonblocking = 1; + userdata->nonblocking_errors_length = 0; + userdata->nonblocking_errors = malloc(ERRORS_STACK_INIT_SIZE * sizeof(tinytds_errordata)); + userdata->nonblocking_errors_size = ERRORS_STACK_INIT_SIZE; +} + +static void nogvl_cleanup(DBPROCESS *client) { + GET_CLIENT_USERDATA(client); + userdata->nonblocking = 0; + userdata->timing_out = 0; + /* + Now that the blocking operation is done, we can finally throw any + exceptions based on errors from SQL Server. + */ + short int i; + for (i = 0; i < userdata->nonblocking_errors_length; i++) { + tinytds_errordata error = userdata->nonblocking_errors[i]; + + // lookahead to drain any info messages ahead of raising error + if (!error.is_message) { + short int j; + for (j = i; j < userdata->nonblocking_errors_length; j++) { + tinytds_errordata msg_error = userdata->nonblocking_errors[j]; + if (msg_error.is_message) { + rb_tinytds_raise_error(client, msg_error); + } + } + } + + rb_tinytds_raise_error(client, error); + } + + free(userdata->nonblocking_errors); + userdata->nonblocking_errors_length = 0; + userdata->nonblocking_errors_size = 0; +} + +static RETCODE nogvl_dbnextrow(DBPROCESS * client) { + int retcode = FAIL; + nogvl_setup(client); + retcode = NOGVL_DBCALL(dbnextrow, client); + nogvl_cleanup(client); + return retcode; +} + +static RETCODE nogvl_dbresults(DBPROCESS *client) { + int retcode = FAIL; + nogvl_setup(client); + retcode = NOGVL_DBCALL(dbresults, client); + nogvl_cleanup(client); + return retcode; +} + +static RETCODE nogvl_dbsqlexec(DBPROCESS *client) { + int retcode = FAIL; + nogvl_setup(client); + retcode = NOGVL_DBCALL(dbsqlexec, client); + nogvl_cleanup(client); + return retcode; +} + +static RETCODE nogvl_dbsqlok(DBPROCESS *client) { + int retcode = FAIL; + GET_CLIENT_USERDATA(client); + nogvl_setup(client); + retcode = NOGVL_DBCALL(dbsqlok, client); + nogvl_cleanup(client); + userdata->dbsqlok_sent = 1; + return retcode; +} + +static RETCODE rb_tinytds_result_ok_helper(DBPROCESS *client) { + GET_CLIENT_USERDATA(client); + if (userdata->dbsqlok_sent == 0) { + userdata->dbsqlok_retcode = nogvl_dbsqlok(client); + } + return userdata->dbsqlok_retcode; +} // Lib Backend (Memory Management & Handlers) static void push_userdata_error(tinytds_client_userdata *userdata, tinytds_errordata error) { @@ -207,18 +323,6 @@ static int handle_interrupt(void *ptr) { return INT_CONTINUE; } -static void rb_tinytds_client_reset_userdata(tinytds_client_userdata *userdata) { - userdata->timing_out = 0; - userdata->dbsql_sent = 0; - userdata->dbsqlok_sent = 0; - userdata->dbcancel_sent = 0; - userdata->nonblocking = 0; - // the following is mainly done for consistency since the values are reset accordingly in nogvl_setup/cleanup. - // the nonblocking_errors array does not need to be freed here. That is done as part of nogvl_cleanup. - userdata->nonblocking_errors_length = 0; - userdata->nonblocking_errors_size = 0; -} - static void rb_tinytds_client_mark(void *ptr) { tinytds_client_wrapper *cwrap = (tinytds_client_wrapper *)ptr; if (cwrap) { @@ -295,13 +399,7 @@ static VALUE rb_tinytds_execute(VALUE self, VALUE sql) { VALUE result; GET_CLIENT_WRAPPER(self); - rb_tinytds_client_reset_userdata(cwrap->userdata); - REQUIRE_OPEN_CLIENT(cwrap); - dbcmd(cwrap->client, StringValueCStr(sql)); - if (dbsqlsend(cwrap->client) == FAIL) { - rb_raise(cTinyTdsError, "failed dbsqlsend() function"); - } - cwrap->userdata->dbsql_sent = 1; + rb_tinytds_send_sql_to_server(cwrap, sql); result = rb_tinytds_new_result_obj(cwrap); rb_iv_set(result, "@query_options", rb_funcall(rb_iv_get(self, "@query_options"), intern_dup, 0)); { @@ -312,6 +410,55 @@ static VALUE rb_tinytds_execute(VALUE self, VALUE sql) { } } +static VALUE rb_tiny_tds_insert(VALUE self, VALUE sql) { + VALUE identity = Qnil; + GET_CLIENT_WRAPPER(self); + rb_tinytds_send_sql_to_server(cwrap, sql); + + RETCODE dbsqlok_rc = rb_tinytds_result_ok_helper(cwrap->client); + if (dbsqlok_rc == SUCCEED) { + /* + This is to just process each result set. Commands such as backup and + restore are not done when the first result set is returned, so we need to + exhaust the result sets before it is complete. + */ + while (nogvl_dbresults(cwrap->client) == SUCCEED) { + /* + If we don't loop through each row for calls to TinyTds::Result.do that + actually do return result sets, we will trigger error 20019 about trying + to execute a new command with pending results. Oh well. + */ + while (nogvl_dbnextrow(cwrap->client) != NO_MORE_ROWS); + } + } + + dbcancel(cwrap->client); + cwrap->userdata->dbcancel_sent = 1; + cwrap->userdata->dbsql_sent = 0; + + // prepare second query to fetch last identity + dbcmd(cwrap->client, cwrap->identity_insert_sql); + + if ( + nogvl_dbsqlexec(cwrap->client) != FAIL + && nogvl_dbresults(cwrap->client) != FAIL + && DBROWS(cwrap->client) != FAIL + ) { + while (nogvl_dbnextrow(cwrap->client) != NO_MORE_ROWS) { + int col = 1; + BYTE *data = dbdata(cwrap->client, col); + DBINT data_len = dbdatlen(cwrap->client, col); + int null_val = ((data == NULL) && (data_len == 0)); + + if (!null_val) { + identity = LL2NUM(*(DBBIGINT *)data); + } + } + } + + return identity; +} + static VALUE rb_tinytds_charset(VALUE self) { GET_CLIENT_WRAPPER(self); return cwrap->charset; @@ -451,6 +598,7 @@ void init_tinytds_client() { rb_define_method(cTinyTdsClient, "dead?", rb_tinytds_dead, 0); rb_define_method(cTinyTdsClient, "sqlsent?", rb_tinytds_sqlsent, 0); rb_define_method(cTinyTdsClient, "execute", rb_tinytds_execute, 1); + rb_define_method(cTinyTdsClient, "insert", rb_tiny_tds_insert, 1); rb_define_method(cTinyTdsClient, "charset", rb_tinytds_charset, 0); rb_define_method(cTinyTdsClient, "encoding", rb_tinytds_encoding, 0); rb_define_method(cTinyTdsClient, "escape", rb_tinytds_escape, 1); diff --git a/ext/tiny_tds/result.c b/ext/tiny_tds/result.c index fc17ce07..28f853c0 100644 --- a/ext/tiny_tds/result.c +++ b/ext/tiny_tds/result.c @@ -132,14 +132,6 @@ static RETCODE nogvl_dbsqlok(DBPROCESS *client) { return retcode; } -static RETCODE nogvl_dbsqlexec(DBPROCESS *client) { - int retcode = FAIL; - nogvl_setup(client); - retcode = NOGVL_DBCALL(dbsqlexec, client); - nogvl_cleanup(client); - return retcode; -} - static RETCODE nogvl_dbresults(DBPROCESS *client) { int retcode = FAIL; nogvl_setup(client); @@ -543,31 +535,6 @@ static VALUE rb_tinytds_result_return_code(VALUE self) { } } -static VALUE rb_tinytds_result_insert(VALUE self) { - GET_RESULT_WRAPPER(self); - if (rwrap->client) { - VALUE identity = Qnil; - rb_tinytds_result_exec_helper(rwrap->client); - dbcmd(rwrap->client, rwrap->cwrap->identity_insert_sql); - if (nogvl_dbsqlexec(rwrap->client) != FAIL - && nogvl_dbresults(rwrap->client) != FAIL - && DBROWS(rwrap->client) != FAIL) { - while (nogvl_dbnextrow(rwrap->client) != NO_MORE_ROWS) { - int col = 1; - BYTE *data = dbdata(rwrap->client, col); - DBINT data_len = dbdatlen(rwrap->client, col); - int null_val = ((data == NULL) && (data_len == 0)); - if (!null_val) - identity = LL2NUM(*(DBBIGINT *)data); - } - } - return identity; - } else { - return Qnil; - } -} - - // Lib Init void init_tinytds_result() { @@ -584,7 +551,6 @@ void init_tinytds_result() { rb_define_method(cTinyTdsResult, "do", rb_tinytds_result_do, 0); rb_define_method(cTinyTdsResult, "affected_rows", rb_tinytds_result_affected_rows, 0); rb_define_method(cTinyTdsResult, "return_code", rb_tinytds_result_return_code, 0); - rb_define_method(cTinyTdsResult, "insert", rb_tinytds_result_insert, 0); /* Intern String Helpers */ intern_new = rb_intern("new"); intern_utc = rb_intern("utc"); diff --git a/test/client_test.rb b/test/client_test.rb index fff406f6..3083afce 100644 --- a/test/client_test.rb +++ b/test/client_test.rb @@ -269,4 +269,53 @@ class ClientTest < TinyTds::TestCase ).must_equal 'user' end end + + describe "#insert" do + before do + @client = new_connection + end + + it 'has an #insert method that cancels result rows and returns IDENTITY natively' do + rollback_transaction(@client) do + text = 'test scope identity rows native' + @client.execute("DELETE FROM [datatypes] WHERE [varchar_50] = '#{text}'").do + @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')").do + sql_identity = @client.execute(@client.identity_sql).each.first['Ident'] + native_identity = @client.insert("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')") + + assert_equal(sql_identity + 1, native_identity) + assert_client_works(@client) + end + end + + it 'returns bigint for #insert when needed' do + return if sqlserver_azure? # We can not alter clustered index like this test does. + # 'CREATE TABLE' command is not allowed within a multi-statement transaction + # and and sp_helpindex creates a temporary table #spindtab. + + rollback_transaction(@client) do + seed = 9223372036854775805 + @client.execute("DELETE FROM [datatypes]").do + id_constraint_name = @client.execute("EXEC sp_helpindex [datatypes]").detect { |row| row['index_keys'] == 'id' }['index_name'] + @client.execute("ALTER TABLE [datatypes] DROP CONSTRAINT [#{id_constraint_name}]").do + @client.execute("ALTER TABLE [datatypes] DROP COLUMN [id]").do + @client.execute("ALTER TABLE [datatypes] ADD [id] [bigint] NOT NULL IDENTITY(1,1) PRIMARY KEY").do + @client.execute("DBCC CHECKIDENT ('datatypes', RESEED, #{seed})").do + identity = @client.insert("INSERT INTO [datatypes] ([varchar_50]) VALUES ('something')") + + assert_equal(seed, identity) + assert_client_works(@client) + end + end + + it "throws an error if client is closed" do + @client.close + assert @client.closed? + + action = lambda { @client.insert('SELECT 1 as [one]') } + assert_raise_tinytds_error(action) do |e| + assert_match %r{closed connection}i, e.message + end + end + end end diff --git a/test/result_test.rb b/test/result_test.rb index 1d054f92..2b771f9e 100644 --- a/test/result_test.rb +++ b/test/result_test.rb @@ -170,34 +170,6 @@ class ResultTest < TinyTds::TestCase end end - it 'has an #insert method that cancels result rows and returns IDENTITY natively' do - rollback_transaction(@client) do - text = 'test scope identity rows native' - @client.execute("DELETE FROM [datatypes] WHERE [varchar_50] = '#{text}'").do - @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')").do - sql_identity = @client.execute(@client.identity_sql).each.first['Ident'] - native_identity = @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')").insert - assert_equal sql_identity + 1, native_identity - end - end - - it 'returns bigint for #insert when needed' do - return if sqlserver_azure? # We can not alter clustered index like this test does. - # 'CREATE TABLE' command is not allowed within a multi-statement transaction - # and and sp_helpindex creates a temporary table #spindtab. - rollback_transaction(@client) do - seed = 9223372036854775805 - @client.execute("DELETE FROM [datatypes]").do - id_constraint_name = @client.execute("EXEC sp_helpindex [datatypes]").detect { |row| row['index_keys'] == 'id' }['index_name'] - @client.execute("ALTER TABLE [datatypes] DROP CONSTRAINT [#{id_constraint_name}]").do - @client.execute("ALTER TABLE [datatypes] DROP COLUMN [id]").do - @client.execute("ALTER TABLE [datatypes] ADD [id] [bigint] NOT NULL IDENTITY(1,1) PRIMARY KEY").do - @client.execute("DBCC CHECKIDENT ('datatypes', RESEED, #{seed})").do - identity = @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('something')").insert - assert_equal seed, identity - end - end - it 'must be able to begin/commit transactions with raw sql' do rollback_transaction(@client) do @client.execute("BEGIN TRANSACTION").do @@ -292,12 +264,6 @@ class ResultTest < TinyTds::TestCase @client.execute(@query1).do _(@client.sqlsent?).must_equal false _(@client.canceled?).must_equal true - # With insert method. - rollback_transaction(@client) do - @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('test')").insert - _(@client.sqlsent?).must_equal false - _(@client.canceled?).must_equal true - end # With first @client.execute("SELECT [id] FROM [datatypes]").each(:first => true) _(@client.sqlsent?).must_equal false @@ -712,7 +678,7 @@ def assert_followup_query def insert_and_select_datatype(datatype) rollback_transaction(@client) do @client.execute("DELETE FROM [datatypes] WHERE [#{datatype}] IS NOT NULL").do - id = @client.execute("INSERT INTO [datatypes] ([#{datatype}]) VALUES (N'#{@big_text}')").insert + id = @client.insert("INSERT INTO [datatypes] ([#{datatype}]) VALUES (N'#{@big_text}')") found_text = find_value id, datatype flunk "Large #{datatype} data with a length of #{@big_text.length} did not match found text with length of #{found_text.length}" unless @big_text == found_text end From 0598fdefbaa654b5d9575f076fac04693e3f181d Mon Sep 17 00:00:00 2001 From: Andy Pfister Date: Tue, 15 Oct 2024 11:31:56 +0200 Subject: [PATCH 2/4] Move `do` to client class --- ext/tiny_tds/client.c | 86 ++++++++++++++++++++++++----------------- ext/tiny_tds/result.c | 34 ----------------- test/client_test.rb | 67 +++++++++++++++++++++++--------- test/result_test.rb | 89 ++++++++++++++----------------------------- test/schema_test.rb | 2 +- test/test_helper.rb | 38 +++++++++--------- test/thread_test.rb | 2 +- 7 files changed, 151 insertions(+), 167 deletions(-) diff --git a/ext/tiny_tds/client.c b/ext/tiny_tds/client.c index fa9547f0..650ff8cd 100644 --- a/ext/tiny_tds/client.c +++ b/ext/tiny_tds/client.c @@ -61,22 +61,6 @@ static void rb_tinytds_client_reset_userdata(tinytds_client_userdata *userdata) userdata->nonblocking_errors_size = 0; } -static VALUE rb_tinytds_send_sql_to_server(tinytds_client_wrapper *cwrap, VALUE sql) { - rb_tinytds_client_reset_userdata(cwrap->userdata); - - if (cwrap->closed || cwrap->userdata->closed) { \ - rb_raise(cTinyTdsError, "closed connection"); \ - return Qnil; \ - } - - dbcmd(cwrap->client, StringValueCStr(sql)); - if (dbsqlsend(cwrap->client) == FAIL) { - rb_raise(cTinyTdsError, "failed dbsqlsend() function"); - } - - cwrap->userdata->dbsql_sent = 1; -} - // code part used to invoke FreeTDS functions with releasing the Ruby GVL // basically, while FreeTDS is interacting with the SQL server, other Ruby code can be executed #define NOGVL_DBCALL(_dbfunction, _client) ( \ @@ -164,14 +148,55 @@ static RETCODE nogvl_dbsqlok(DBPROCESS *client) { return retcode; } -static RETCODE rb_tinytds_result_ok_helper(DBPROCESS *client) { +// some additional helpers interacting with the SQL server +static VALUE rb_tinytds_send_sql_to_server(tinytds_client_wrapper *cwrap, VALUE sql) { + rb_tinytds_client_reset_userdata(cwrap->userdata); + + if (cwrap->closed || cwrap->userdata->closed) { \ + rb_raise(cTinyTdsError, "closed connection"); \ + return Qnil; \ + } + + dbcmd(cwrap->client, StringValueCStr(sql)); + if (dbsqlsend(cwrap->client) == FAIL) { + rb_raise(cTinyTdsError, "failed dbsqlsend() function"); + } + + cwrap->userdata->dbsql_sent = 1; +} + +static RETCODE rb_tiny_tds_client_ok_helper(DBPROCESS *client) { GET_CLIENT_USERDATA(client); if (userdata->dbsqlok_sent == 0) { userdata->dbsqlok_retcode = nogvl_dbsqlok(client); } + return userdata->dbsqlok_retcode; } +static void rb_tinytds_result_exec_helper(DBPROCESS *client) { + RETCODE dbsqlok_rc = rb_tiny_tds_client_ok_helper(client); + GET_CLIENT_USERDATA(client); + if (dbsqlok_rc == SUCCEED) { + /* + This is to just process each result set. Commands such as backup and + restore are not done when the first result set is returned, so we need to + exhaust the result sets before it is complete. + */ + while (nogvl_dbresults(client) == SUCCEED) { + /* + If we don't loop through each row for calls to TinyTds::Client.do that + actually do return result sets, we will trigger error 20019 about trying + to execute a new command with pending results. Oh well. + */ + while (dbnextrow(client) != NO_MORE_ROWS); + } + } + dbcancel(client); + userdata->dbcancel_sent = 1; + userdata->dbsql_sent = 0; +} + // Lib Backend (Memory Management & Handlers) static void push_userdata_error(tinytds_client_userdata *userdata, tinytds_errordata error) { // reallocate memory for the array as needed @@ -414,23 +439,7 @@ static VALUE rb_tiny_tds_insert(VALUE self, VALUE sql) { VALUE identity = Qnil; GET_CLIENT_WRAPPER(self); rb_tinytds_send_sql_to_server(cwrap, sql); - - RETCODE dbsqlok_rc = rb_tinytds_result_ok_helper(cwrap->client); - if (dbsqlok_rc == SUCCEED) { - /* - This is to just process each result set. Commands such as backup and - restore are not done when the first result set is returned, so we need to - exhaust the result sets before it is complete. - */ - while (nogvl_dbresults(cwrap->client) == SUCCEED) { - /* - If we don't loop through each row for calls to TinyTds::Result.do that - actually do return result sets, we will trigger error 20019 about trying - to execute a new command with pending results. Oh well. - */ - while (nogvl_dbnextrow(cwrap->client) != NO_MORE_ROWS); - } - } + rb_tinytds_result_exec_helper(cwrap->client); dbcancel(cwrap->client); cwrap->userdata->dbcancel_sent = 1; @@ -459,6 +468,14 @@ static VALUE rb_tiny_tds_insert(VALUE self, VALUE sql) { return identity; } +static VALUE rb_tiny_tds_do(VALUE self, VALUE sql) { + GET_CLIENT_WRAPPER(self); + rb_tinytds_send_sql_to_server(cwrap, sql); + rb_tinytds_result_exec_helper(cwrap->client); + + return LONG2NUM((long)dbcount(cwrap->client)); +} + static VALUE rb_tinytds_charset(VALUE self) { GET_CLIENT_WRAPPER(self); return cwrap->charset; @@ -599,6 +616,7 @@ void init_tinytds_client() { rb_define_method(cTinyTdsClient, "sqlsent?", rb_tinytds_sqlsent, 0); rb_define_method(cTinyTdsClient, "execute", rb_tinytds_execute, 1); rb_define_method(cTinyTdsClient, "insert", rb_tiny_tds_insert, 1); + rb_define_method(cTinyTdsClient, "do", rb_tiny_tds_do, 1); rb_define_method(cTinyTdsClient, "charset", rb_tinytds_charset, 0); rb_define_method(cTinyTdsClient, "encoding", rb_tinytds_encoding, 0); rb_define_method(cTinyTdsClient, "escape", rb_tinytds_escape, 1); diff --git a/ext/tiny_tds/result.c b/ext/tiny_tds/result.c index 28f853c0..bab5c55e 100644 --- a/ext/tiny_tds/result.c +++ b/ext/tiny_tds/result.c @@ -173,29 +173,6 @@ static RETCODE rb_tinytds_result_ok_helper(DBPROCESS *client) { return userdata->dbsqlok_retcode; } -static void rb_tinytds_result_exec_helper(DBPROCESS *client) { - RETCODE dbsqlok_rc = rb_tinytds_result_ok_helper(client); - GET_CLIENT_USERDATA(client); - if (dbsqlok_rc == SUCCEED) { - /* - This is to just process each result set. Commands such as backup and - restore are not done when the first result set is returned, so we need to - exhaust the result sets before it is complete. - */ - while (nogvl_dbresults(client) == SUCCEED) { - /* - If we don't loop through each row for calls to TinyTds::Result.do that - actually do return result sets, we will trigger error 20019 about trying - to execute a new command with pending results. Oh well. - */ - while (dbnextrow(client) != NO_MORE_ROWS); - } - } - dbcancel(client); - userdata->dbcancel_sent = 1; - userdata->dbsql_sent = 0; -} - static VALUE rb_tinytds_result_fetch_row(VALUE self, ID timezone, int symbolize_keys, int as_array) { VALUE row; /* Storing Values */ @@ -506,16 +483,6 @@ static VALUE rb_tinytds_result_cancel(VALUE self) { return Qtrue; } -static VALUE rb_tinytds_result_do(VALUE self) { - GET_RESULT_WRAPPER(self); - if (rwrap->client) { - rb_tinytds_result_exec_helper(rwrap->client); - return LONG2NUM((long)dbcount(rwrap->client)); - } else { - return Qnil; - } -} - static VALUE rb_tinytds_result_affected_rows(VALUE self) { GET_RESULT_WRAPPER(self); if (rwrap->client) { @@ -548,7 +515,6 @@ void init_tinytds_result() { rb_define_method(cTinyTdsResult, "fields", rb_tinytds_result_fields, 0); rb_define_method(cTinyTdsResult, "each", rb_tinytds_result_each, -1); rb_define_method(cTinyTdsResult, "cancel", rb_tinytds_result_cancel, 0); - rb_define_method(cTinyTdsResult, "do", rb_tinytds_result_do, 0); rb_define_method(cTinyTdsResult, "affected_rows", rb_tinytds_result_affected_rows, 0); rb_define_method(cTinyTdsResult, "return_code", rb_tinytds_result_return_code, 0); /* Intern String Helpers */ diff --git a/test/client_test.rb b/test/client_test.rb index 3083afce..faf2ebae 100644 --- a/test/client_test.rb +++ b/test/client_test.rb @@ -98,7 +98,7 @@ class ClientTest < TinyTds::TestCase it 'raises TinyTds exception with long query past :timeout option' do client = new_connection :timeout => 1 - action = lambda { client.execute("WaitFor Delay '00:00:02'").do } + action = lambda { client.do("WaitFor Delay '00:00:02'") } assert_raise_tinytds_error(action) do |e| assert_equal 20003, e.db_error_number assert_equal 6, e.severity @@ -111,21 +111,21 @@ class ClientTest < TinyTds::TestCase it 'must not timeout per sql batch when not under transaction' do client = new_connection :timeout => 2 - client.execute("WaitFor Delay '00:00:01'").do - client.execute("WaitFor Delay '00:00:01'").do - client.execute("WaitFor Delay '00:00:01'").do + client.do("WaitFor Delay '00:00:01'") + client.do("WaitFor Delay '00:00:01'") + client.do("WaitFor Delay '00:00:01'") close_client(client) end it 'must not timeout per sql batch when under transaction' do client = new_connection :timeout => 2 begin - client.execute("BEGIN TRANSACTION").do - client.execute("WaitFor Delay '00:00:01'").do - client.execute("WaitFor Delay '00:00:01'").do - client.execute("WaitFor Delay '00:00:01'").do + client.do("BEGIN TRANSACTION") + client.do("WaitFor Delay '00:00:01'") + client.do("WaitFor Delay '00:00:01'") + client.do("WaitFor Delay '00:00:01'") ensure - client.execute("COMMIT TRANSACTION").do + client.do("COMMIT TRANSACTION") close_client(client) end end @@ -134,7 +134,7 @@ class ClientTest < TinyTds::TestCase begin client = new_connection timeout: 2, port: 1234, host: ENV['TOXIPROXY_HOST'] assert_client_works(client) - action = lambda { client.execute("waitfor delay '00:00:05'").do } + action = lambda { client.do("waitfor delay '00:00:05'") } # Use toxiproxy to close the TCP socket after 1 second. # We want TinyTds to execute the statement, hit the timeout configured above, and then not be able to use the network to cancel @@ -157,7 +157,7 @@ class ClientTest < TinyTds::TestCase begin client = new_connection timeout: 2, port: 1234, host: ENV['TOXIPROXY_HOST'] assert_client_works(client) - action = lambda { client.execute("waitfor delay '00:00:05'").do } + action = lambda { client.do("waitfor delay '00:00:05'") } # Use toxiproxy to close the network connection after 1 second. # We want TinyTds to execute the statement, hit the timeout configured above, and then not be able to use the network to cancel @@ -278,8 +278,8 @@ class ClientTest < TinyTds::TestCase it 'has an #insert method that cancels result rows and returns IDENTITY natively' do rollback_transaction(@client) do text = 'test scope identity rows native' - @client.execute("DELETE FROM [datatypes] WHERE [varchar_50] = '#{text}'").do - @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')").do + @client.do("DELETE FROM [datatypes] WHERE [varchar_50] = '#{text}'") + @client.do("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')") sql_identity = @client.execute(@client.identity_sql).each.first['Ident'] native_identity = @client.insert("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')") @@ -295,12 +295,12 @@ class ClientTest < TinyTds::TestCase rollback_transaction(@client) do seed = 9223372036854775805 - @client.execute("DELETE FROM [datatypes]").do + @client.do("DELETE FROM [datatypes]") id_constraint_name = @client.execute("EXEC sp_helpindex [datatypes]").detect { |row| row['index_keys'] == 'id' }['index_name'] - @client.execute("ALTER TABLE [datatypes] DROP CONSTRAINT [#{id_constraint_name}]").do - @client.execute("ALTER TABLE [datatypes] DROP COLUMN [id]").do - @client.execute("ALTER TABLE [datatypes] ADD [id] [bigint] NOT NULL IDENTITY(1,1) PRIMARY KEY").do - @client.execute("DBCC CHECKIDENT ('datatypes', RESEED, #{seed})").do + @client.do("ALTER TABLE [datatypes] DROP CONSTRAINT [#{id_constraint_name}]") + @client.do("ALTER TABLE [datatypes] DROP COLUMN [id]") + @client.do("ALTER TABLE [datatypes] ADD [id] [bigint] NOT NULL IDENTITY(1,1) PRIMARY KEY") + @client.do("DBCC CHECKIDENT ('datatypes', RESEED, #{seed})") identity = @client.insert("INSERT INTO [datatypes] ([varchar_50]) VALUES ('something')") assert_equal(seed, identity) @@ -318,4 +318,35 @@ class ClientTest < TinyTds::TestCase end end end + + describe "#do" do + before do + @client = new_connection + end + + it 'has a #do method that cancels result rows and returns affected rows natively' do + rollback_transaction(@client) do + text = 'test affected rows native' + count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").each.first['count'] + deleted_rows = @client.do("DELETE FROM [datatypes]") + assert_equal count, deleted_rows, 'should have deleted rows equal to count' + inserted_rows = @client.do("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')") + assert_equal 1, inserted_rows, 'should have inserted row for one above' + updated_rows = @client.do("UPDATE [datatypes] SET [varchar_50] = NULL WHERE [varchar_50] = '#{text}'") + assert_equal 1, updated_rows, 'should have updated row for one above' + end + end + + it 'allows native affected rows using #do to work under transaction' do + rollback_transaction(@client) do + text = 'test affected rows native in transaction' + @client.do("BEGIN TRANSACTION") + @client.do("DELETE FROM [datatypes]") + inserted_rows = @client.do("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')") + assert_equal 1, inserted_rows, 'should have inserted row for one above' + updated_rows = @client.do("UPDATE [datatypes] SET [varchar_50] = NULL WHERE [varchar_50] = '#{text}'") + assert_equal 1, updated_rows, 'should have updated row for one above' + end + end + end end diff --git a/test/result_test.rb b/test/result_test.rb index 2b771f9e..03c8df57 100644 --- a/test/result_test.rb +++ b/test/result_test.rb @@ -77,8 +77,8 @@ class ResultTest < TinyTds::TestCase it 'allows sql concat + to work' do rollback_transaction(@client) do - @client.execute("DELETE FROM [datatypes]").do - @client.execute("INSERT INTO [datatypes] ([char_10], [varchar_50]) VALUES ('1', '2')").do + @client.do("DELETE FROM [datatypes]") + @client.do("INSERT INTO [datatypes] ([char_10], [varchar_50]) VALUES ('1', '2')") result = @client.execute("SELECT TOP (1) [char_10] + 'test' + [varchar_50] AS [test] FROM [datatypes]").each.first['test'] _(result).must_equal "1 test2" end @@ -112,8 +112,8 @@ class ResultTest < TinyTds::TestCase it 'must delete, insert and find data' do rollback_transaction(@client) do text = 'test insert and delete' - @client.execute("DELETE FROM [datatypes] WHERE [varchar_50] IS NOT NULL").do - @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')").do + @client.do("DELETE FROM [datatypes] WHERE [varchar_50] IS NOT NULL") + @client.do("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')") row = @client.execute("SELECT [varchar_50] FROM [datatypes] WHERE [varchar_50] IS NOT NULL").each.first assert row assert_equal text, row['varchar_50'] @@ -123,8 +123,8 @@ class ResultTest < TinyTds::TestCase it 'must insert and find unicode data' do rollback_transaction(@client) do text = '😍' - @client.execute("DELETE FROM [datatypes] WHERE [nvarchar_50] IS NOT NULL").do - @client.execute("INSERT INTO [datatypes] ([nvarchar_50]) VALUES (N'#{text}')").do + @client.do("DELETE FROM [datatypes] WHERE [nvarchar_50] IS NOT NULL") + @client.do("INSERT INTO [datatypes] ([nvarchar_50]) VALUES (N'#{text}')") row = @client.execute("SELECT [nvarchar_50] FROM [datatypes] WHERE [nvarchar_50] IS NOT NULL").each.first assert_equal text, row['nvarchar_50'] end @@ -133,48 +133,23 @@ class ResultTest < TinyTds::TestCase it 'must delete and update with affected rows support and insert with identity support in native sql' do rollback_transaction(@client) do text = 'test affected rows sql' - @client.execute("DELETE FROM [datatypes]").do + @client.do("DELETE FROM [datatypes]") afrows = @client.execute("SELECT @@ROWCOUNT AS AffectedRows").each.first['AffectedRows'] _(['Fixnum', 'Integer']).must_include afrows.class.name - @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')").do + @client.do("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')") pk1 = @client.execute(@client.identity_sql).each.first['Ident'] _(['Fixnum', 'Integer']).must_include pk1.class.name, 'we it be able to CAST to bigint' - @client.execute("UPDATE [datatypes] SET [varchar_50] = NULL WHERE [varchar_50] = '#{text}'").do + @client.do("UPDATE [datatypes] SET [varchar_50] = NULL WHERE [varchar_50] = '#{text}'") afrows = @client.execute("SELECT @@ROWCOUNT AS AffectedRows").each.first['AffectedRows'] assert_equal 1, afrows end end - it 'has a #do method that cancels result rows and returns affected rows natively' do - rollback_transaction(@client) do - text = 'test affected rows native' - count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").each.first['count'] - deleted_rows = @client.execute("DELETE FROM [datatypes]").do - assert_equal count, deleted_rows, 'should have deleted rows equal to count' - inserted_rows = @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')").do - assert_equal 1, inserted_rows, 'should have inserted row for one above' - updated_rows = @client.execute("UPDATE [datatypes] SET [varchar_50] = NULL WHERE [varchar_50] = '#{text}'").do - assert_equal 1, updated_rows, 'should have updated row for one above' - end - end - - it 'allows native affected rows using #do to work under transaction' do - rollback_transaction(@client) do - text = 'test affected rows native in transaction' - @client.execute("BEGIN TRANSACTION").do - @client.execute("DELETE FROM [datatypes]").do - inserted_rows = @client.execute("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')").do - assert_equal 1, inserted_rows, 'should have inserted row for one above' - updated_rows = @client.execute("UPDATE [datatypes] SET [varchar_50] = NULL WHERE [varchar_50] = '#{text}'").do - assert_equal 1, updated_rows, 'should have updated row for one above' - end - end - it 'must be able to begin/commit transactions with raw sql' do rollback_transaction(@client) do - @client.execute("BEGIN TRANSACTION").do - @client.execute("DELETE FROM [datatypes]").do - @client.execute("COMMIT TRANSACTION").do + @client.do("BEGIN TRANSACTION") + @client.do("DELETE FROM [datatypes]") + @client.do("COMMIT TRANSACTION") count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").each.first['count'] assert_equal 0, count end @@ -182,9 +157,9 @@ class ResultTest < TinyTds::TestCase it 'must be able to begin/rollback transactions with raw sql' do load_current_schema - @client.execute("BEGIN TRANSACTION").do - @client.execute("DELETE FROM [datatypes]").do - @client.execute("ROLLBACK TRANSACTION").do + @client.do("BEGIN TRANSACTION") + @client.do("DELETE FROM [datatypes]") + @client.do("ROLLBACK TRANSACTION") count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").each.first['count'] _(count).wont_equal 0 end @@ -199,8 +174,6 @@ class ResultTest < TinyTds::TestCase it 'always returns an array for fields for all sql' do result = @client.execute("USE [tinytdstest]") _(result.fields).must_equal [] - result.do - _(result.fields).must_equal [] end it 'returns fields even when no results are found' do @@ -260,10 +233,6 @@ class ResultTest < TinyTds::TestCase result.cancel _(@client.sqlsent?).must_equal false _(@client.canceled?).must_equal true - # With do method. - @client.execute(@query1).do - _(@client.sqlsent?).must_equal false - _(@client.canceled?).must_equal true # With first @client.execute("SELECT [id] FROM [datatypes]").each(:first => true) _(@client.sqlsent?).must_equal false @@ -277,9 +246,9 @@ class ResultTest < TinyTds::TestCase it 'has properly encoded column names with symbol keys' do col_name = "öäüß" - @client.execute("DROP TABLE [test_encoding]").do rescue nil - @client.execute("CREATE TABLE [dbo].[test_encoding] ( [id] int NOT NULL IDENTITY(1,1) PRIMARY KEY, [#{col_name}] [nvarchar](10) NOT NULL )").do - @client.execute("INSERT INTO [test_encoding] ([#{col_name}]) VALUES (N'#{col_name}')").do + @client.do("DROP TABLE [test_encoding]") rescue nil + @client.do("CREATE TABLE [dbo].[test_encoding] ( [id] int NOT NULL IDENTITY(1,1) PRIMARY KEY, [#{col_name}] [nvarchar](10) NOT NULL )") + @client.do("INSERT INTO [test_encoding] ([#{col_name}]) VALUES (N'#{col_name}')") result = @client.execute("SELECT [#{col_name}] FROM [test_encoding]") row = result.each(:as => :hash, :symbolize_keys => true).first assert_instance_of Symbol, result.fields.first @@ -521,7 +490,7 @@ class ResultTest < TinyTds::TestCase after { File.delete(backup_file) if File.exist?(backup_file) } it 'must not cancel the query until complete' do - @client.execute("BACKUP DATABASE tinytdstest TO DISK = '#{backup_file}'").do + @client.do("BACKUP DATABASE tinytdstest TO DISK = '#{backup_file}'") end end unless sqlserver_azure? @@ -547,7 +516,7 @@ class ResultTest < TinyTds::TestCase before do @big_text = 'x' * 2_000_000 @old_textsize = @client.execute("SELECT @@TEXTSIZE AS [textsize]").each.first['textsize'].inspect - @client.execute("SET TEXTSIZE #{(@big_text.length * 2) + 1}").do + @client.do("SET TEXTSIZE #{(@big_text.length * 2) + 1}") end it 'must insert and select large varchar_max' do @@ -590,7 +559,7 @@ class ResultTest < TinyTds::TestCase messages.clear msg = "Test #{severity} severity" state = rand(1..255) - @client.execute("RAISERROR(N'#{msg}', #{severity}, #{state})").do + @client.do("RAISERROR(N'#{msg}', #{severity}, #{state})") m = messages.first assert_equal 1, messages.length, 'there should be one message after one raiserror' assert_equal msg, m.message, 'message text' @@ -602,7 +571,7 @@ class ResultTest < TinyTds::TestCase it 'calls the provided message handler for `print` messages' do messages.clear msg = 'hello' - @client.execute("PRINT '#{msg}'").do + @client.do("PRINT '#{msg}'") m = messages.first assert_equal 1, messages.length, 'there should be one message after one print statement' assert_equal msg, m.message, 'message text' @@ -610,7 +579,7 @@ class ResultTest < TinyTds::TestCase it 'must raise an error preceded by a `print` message' do messages.clear - action = lambda { @client.execute("EXEC tinytds_TestPrintWithError").do } + action = lambda { @client.do("EXEC tinytds_TestPrintWithError") } assert_raise_tinytds_error(action) do |e| assert_equal 'hello', messages.first.message, 'message text' @@ -622,13 +591,13 @@ class ResultTest < TinyTds::TestCase it 'calls the provided message handler for each of a series of `print` messages' do messages.clear - @client.execute("EXEC tinytds_TestSeveralPrints").do + @client.do("EXEC tinytds_TestSeveralPrints") assert_equal ['hello 1', 'hello 2', 'hello 3'], messages.map { |e| e.message }, 'message list' end it 'should flush info messages before raising error in cases of timeout' do @client = new_connection timeout: 1, message_handler: Proc.new { |m| messages << m } - action = lambda { @client.execute("print 'hello'; waitfor delay '00:00:02'").do } + action = lambda { @client.do("print 'hello'; waitfor delay '00:00:02'") } messages.clear assert_raise_tinytds_error(action) do |e| assert_match %r{timed out}i, e.message, 'ignore if non-english test run' @@ -640,7 +609,7 @@ class ResultTest < TinyTds::TestCase it 'should print info messages before raising error in cases of timeout' do @client = new_connection timeout: 1, message_handler: Proc.new { |m| messages << m } - action = lambda { @client.execute("raiserror('hello', 1, 1) with nowait; waitfor delay '00:00:02'").do } + action = lambda { @client.do("raiserror('hello', 1, 1) with nowait; waitfor delay '00:00:02'") } messages.clear assert_raise_tinytds_error(action) do |e| assert_match %r{timed out}i, e.message, 'ignore if non-english test run' @@ -653,12 +622,12 @@ class ResultTest < TinyTds::TestCase it 'must not raise an error when severity is 10 or less' do (1..10).to_a.each do |severity| - @client.execute("RAISERROR(N'Test #{severity} severity', #{severity}, 1)").do + @client.do("RAISERROR(N'Test #{severity} severity', #{severity}, 1)") end end it 'raises an error when severity is greater than 10' do - action = lambda { @client.execute("RAISERROR(N'Test 11 severity', 11, 1)").do } + action = lambda { @client.do("RAISERROR(N'Test 11 severity', 11, 1)") } assert_raise_tinytds_error(action) do |e| assert_equal "Test 11 severity", e.message assert_equal 11, e.severity @@ -677,7 +646,7 @@ def assert_followup_query def insert_and_select_datatype(datatype) rollback_transaction(@client) do - @client.execute("DELETE FROM [datatypes] WHERE [#{datatype}] IS NOT NULL").do + @client.do("DELETE FROM [datatypes] WHERE [#{datatype}] IS NOT NULL") id = @client.insert("INSERT INTO [datatypes] ([#{datatype}]) VALUES (N'#{@big_text}')") found_text = find_value id, datatype flunk "Large #{datatype} data with a length of #{@big_text.length} did not match found text with length of #{found_text.length}" unless @big_text == found_text diff --git a/test/schema_test.rb b/test/schema_test.rb index a1128d7b..f4fadf50 100644 --- a/test/schema_test.rb +++ b/test/schema_test.rb @@ -117,7 +117,7 @@ class SchemaTest < TinyTds::TestCase assert_utf8_encoding find_value(182, :ntext) # If this test fails, try setting the "text size" in your freetds.conf. See: http://www.freetds.org/faq.html#textdata large_value = "x" * 5000 - large_value_id = @client.execute("INSERT INTO [datatypes] ([ntext]) VALUES (N'#{large_value}')").insert + large_value_id = @client.insert("INSERT INTO [datatypes] ([ntext]) VALUES (N'#{large_value}')") assert_equal large_value, find_value(large_value_id, :ntext) end diff --git a/test/test_helper.rb b/test/test_helper.rb index 82ca1a6d..d49348bd 100755 --- a/test/test_helper.rb +++ b/test/test_helper.rb @@ -46,20 +46,20 @@ def close_client(client=@client) def new_connection(options={}) client = TinyTds::Client.new(connection_options(options)) if sqlserver_azure? - client.execute('SET ANSI_NULLS ON').do - client.execute('SET CURSOR_CLOSE_ON_COMMIT OFF').do - client.execute('SET ANSI_NULL_DFLT_ON ON').do - client.execute('SET IMPLICIT_TRANSACTIONS OFF').do - client.execute('SET ANSI_PADDING ON').do - client.execute('SET QUOTED_IDENTIFIER ON').do - client.execute('SET ANSI_WARNINGS ON').do + client.do('SET ANSI_NULLS ON') + client.do('SET CURSOR_CLOSE_ON_COMMIT OFF') + client.do('SET ANSI_NULL_DFLT_ON ON') + client.do('SET IMPLICIT_TRANSACTIONS OFF') + client.do('SET ANSI_PADDING ON') + client.do('SET QUOTED_IDENTIFIER ON') + client.do('SET ANSI_WARNINGS ON') else - client.execute('SET ANSI_DEFAULTS ON').do - client.execute('SET CURSOR_CLOSE_ON_COMMIT OFF').do - client.execute('SET IMPLICIT_TRANSACTIONS OFF').do + client.do('SET ANSI_DEFAULTS ON') + client.do('SET CURSOR_CLOSE_ON_COMMIT OFF') + client.do('SET IMPLICIT_TRANSACTIONS OFF') end - client.execute('SET TEXTSIZE 2147483647').do - client.execute('SET CONCAT_NULL_YIELDS_NULL ON').do + client.do('SET TEXTSIZE 2147483647') + client.do('SET CONCAT_NULL_YIELDS_NULL ON') client end @@ -142,11 +142,11 @@ def load_current_schema loader = new_connection schema_file = File.expand_path File.join(File.dirname(__FILE__), 'schema', "#{current_schema}.sql") schema_sql = File.open(schema_file,"rb:UTF-8") { |f|f.read } - loader.execute(drop_sql).do - loader.execute(schema_sql).do - loader.execute(sp_sql).do - loader.execute(sp_error_sql).do - loader.execute(sp_several_prints_sql).do + loader.do(drop_sql) + loader.do(schema_sql) + loader.do(sp_sql) + loader.do(sp_error_sql) + loader.do(sp_several_prints_sql) loader.close true end @@ -210,10 +210,10 @@ def utc_offset end def rollback_transaction(client) - client.execute("BEGIN TRANSACTION").do + client.do("BEGIN TRANSACTION") yield ensure - client.execute("ROLLBACK TRANSACTION").do + client.do("ROLLBACK TRANSACTION") end def init_toxiproxy diff --git a/test/thread_test.rb b/test/thread_test.rb index a79353f2..99b7c336 100644 --- a/test/thread_test.rb +++ b/test/thread_test.rb @@ -27,7 +27,7 @@ class ThreadTest < TinyTds::TestCase start = Time.new threads << Thread.new do ts = Time.new - @pool.with { |c| c.execute(@query).do } + @pool.with { |c| c.do(@query) } te = Time.new @logger.info "Thread #{i} finished in #{te - ts} thread seconds, #{te - start} real seconds" end From 62fd9cd3d75f52602a3bba38df59c1fdfe0772b6 Mon Sep 17 00:00:00 2001 From: Andy Pfister Date: Tue, 15 Oct 2024 17:09:57 +0200 Subject: [PATCH 3/4] Refactor `execute` to fetch an entire result object --- README.md | 172 ++++------- ext/tiny_tds/client.c | 406 +++++++++++++++++++++++--- ext/tiny_tds/result.c | 558 ------------------------------------ ext/tiny_tds/result.h | 32 --- ext/tiny_tds/tiny_tds_ext.c | 1 - ext/tiny_tds/tiny_tds_ext.h | 1 - lib/tiny_tds.rb | 2 +- lib/tiny_tds/client.rb | 7 +- lib/tiny_tds/result.rb | 4 + test/result_test.rb | 319 +++++++-------------- test/test_helper.rb | 5 +- 11 files changed, 532 insertions(+), 975 deletions(-) delete mode 100644 ext/tiny_tds/result.c delete mode 100644 ext/tiny_tds/result.h diff --git a/README.md b/README.md index de93787e..21e10afa 100644 --- a/README.md +++ b/README.md @@ -116,8 +116,8 @@ opts[:message_handler] = Proc.new { |m| puts m.message } client = TinyTds::Client.new opts # => Changed database context to 'master'. # => Changed language setting to us_english. -client.execute("print 'hello world!'").do -# => hello world! +client.do("print 'hello world!'") +# => -1 (no affected rows) ``` Use the `#active?` method to determine if a connection is good. The implementation of this method may change but it should always guarantee that a connection is good. Current it checks for either a closed or dead connection. @@ -147,169 +147,99 @@ Send a SQL string to the database and return a TinyTds::Result object. result = client.execute("SELECT * FROM [datatypes]") ``` +## Sending queries and receiving results -## TinyTds::Result Usage +The client implements three different methods to send queries to a SQL server. -A result object is returned by the client's execute command. It is important that you either return the data from the query, most likely with the #each method, or that you cancel the results before asking the client to execute another SQL batch. Failing to do so will yield an error. - -Calling #each on the result will lazily load each row from the database. +`client.insert` will execute the query and return the last identifier. ```ruby -result.each do |row| - # By default each row is a hash. - # The keys are the fields, as you'd expect. - # The values are pre-built Ruby primitives mapped from their corresponding types. -end +client.insert("INSERT INTO [datatypes] ([varchar_50]) VALUES ('text')") +# => 363 ``` -A result object has a `#fields` accessor. It can be called before the result rows are iterated over. Even if no rows are returned, #fields will still return the column names you expected. Any SQL that does not return columned data will always return an empty array for `#fields`. It is important to remember that if you access the `#fields` before iterating over the results, the columns will always follow the default query option's `:symbolize_keys` setting at the client's level and will ignore the query options passed to each. +`client.do` will execute the query and tell you how many rows were affected. ```ruby -result = client.execute("USE [tinytdstest]") -result.fields # => [] -result.do - -result = client.execute("SELECT [id] FROM [datatypes]") -result.fields # => ["id"] -result.cancel -result = client.execute("SELECT [id] FROM [datatypes]") -result.each(:symbolize_keys => true) -result.fields # => [:id] +client.do("DELETE FROM [datatypes] WHERE [varchar_50] = 'text'") +# 1 ``` -You can cancel a result object's data from being loading by the server. +Both `do` and `insert` will not serialize any results sent by the SQL server, making them extremely fast and memory-efficient for large operations. + +`client.execute` will execute the query and return you a `TinyTds::Result` object. ```ruby -result = client.execute("SELECT * FROM [super_big_table]") -result.cancel +client.execute("SELECT [id] FROM [datatypes]") +# => +# #11}, +# {"id"=>12}, +# {"id"=>21}, +# {"id"=>31}, ``` -You can use results cancelation in conjunction with results lazy loading, no problem. +A result object has a `fields` accessor. Even if no rows are returned, `fields` will still return the column names you expected. Any SQL that does not return columned data will always return an empty array for `fields`. ```ruby -result = client.execute("SELECT * FROM [super_big_table]") -result.each_with_index do |row, i| - break if row > 10 -end -result.cancel +result = client.execute("USE [tinytdstest]") +result.fields # => [] + +result = client.execute("SELECT [id] FROM [datatypes]") +result.fields # => ["id"] ``` -If the SQL executed by the client returns affected rows, you can easily find out how many. +You can retrieve the results by accessing the `rows` property on the result. ```ruby -result.each -result.affected_rows # => 24 +result.rows +# => +# [{"id"=>11}, +# {"id"=>12}, +# {"id"=>21}, +# ... ``` -This pattern is so common for UPDATE and DELETE statements that the #do method cancels any need for loading the result data and returns the `#affected_rows`. +The result object also has `affected_rows`, which usually also corresponds to the length of items in `rows`. But if you execute a `DELETE` statement with `execute, `rows` is likely empty but `affected_rows` will still list a couple of items. ```ruby result = client.execute("DELETE FROM [datatypes]") -result.do # => 72 +# # +result.count +# 0 +result.affected_rows +# 75 ``` -Likewise for `INSERT` statements, the #insert method cancels any need for loading the result data and executes a `SCOPE_IDENTITY()` for the primary key. - -```ruby -result = client.execute("INSERT INTO [datatypes] ([xml]) VALUES ('
')") -result.insert # => 420 -``` +But as mentioned earlier, best use `do` when you are only interested in the `affected_rows`. -The result object can handle multiple result sets form batched SQL or stored procedures. It is critical to remember that when calling each with a block for the first time will return each "row" of each result set. Calling each a second time with a block will yield each "set". +The result object can handle multiple result sets form batched SQL or stored procedures. ```ruby sql = ["SELECT TOP (1) [id] FROM [datatypes]", "SELECT TOP (2) [bigint] FROM [datatypes] WHERE [bigint] IS NOT NULL"].join(' ') -set1, set2 = client.execute(sql).each +set1, set2 = client.execute(sql).rows set1 # => [{"id"=>11}] set2 # => [{"bigint"=>-9223372036854775807}, {"bigint"=>9223372036854775806}] - -result = client.execute(sql) - -result.each do |rowset| - # First time data loading, yields each row from each set. - # 1st: {"id"=>11} - # 2nd: {"bigint"=>-9223372036854775807} - # 3rd: {"bigint"=>9223372036854775806} -end - -result.each do |rowset| - # Second time over (if columns cached), yields each set. - # 1st: [{"id"=>11}] - # 2nd: [{"bigint"=>-9223372036854775807}, {"bigint"=>9223372036854775806}] -end -``` - -Use the `#sqlsent?` and `#canceled?` query methods on the client to determine if an active SQL batch still needs to be processed and or if data results were canceled from the last result object. These values reset to true and false respectively for the client at the start of each `#execute` and new result object. Or if all rows are processed normally, `#sqlsent?` will return false. To demonstrate, lets assume we have 100 rows in the result object. - -```ruby -client.sqlsent? # = false -client.canceled? # = false - -result = client.execute("SELECT * FROM [super_big_table]") - -client.sqlsent? # = true -client.canceled? # = false - -result.each do |row| - # Assume we break after 20 rows with 80 still pending. - break if row["id"] > 20 -end - -client.sqlsent? # = true -client.canceled? # = false - -result.cancel - -client.sqlsent? # = false -client.canceled? # = true -``` - -It is possible to get the return code after executing a stored procedure from either the result or client object. - -```ruby -client.return_code # => nil - -result = client.execute("EXEC tinytds_TestReturnCodes") -result.do -result.return_code # => 420 -client.return_code # => 420 ``` - ## Query Options -Every `TinyTds::Result` object can pass query options to the #each method. The defaults are defined and configurable by setting options in the `TinyTds::Client.default_query_options` hash. The default values are: - -* :as => :hash - Object for each row yielded. Can be set to :array. -* :symbolize_keys => false - Row hash keys. Defaults to shared/frozen string keys. -* :cache_rows => true - Successive calls to #each returns the cached rows. -* :timezone => :local - Local to the Ruby client or :utc for UTC. -* :empty_sets => true - Include empty results set in queries that return multiple result sets. +You can pass query options to `execute`. The defaults are defined and configurable by setting options in the `TinyTds::Client.default_query_options` hash. The default values are: -Each result gets a copy of the default options you specify at the client level and can be overridden by passing an options hash to the #each method. For example +* `as: :hash` - Object for each row yielded. Can be set to :array. +* `empty_sets: true` - Include empty results set in queries that return multiple result sets. +* `timezone: :local` - Local to the Ruby client or :utc for UTC. ```ruby -result.each(:as => :array, :cache_rows => false) do |row| - # Each row is now an array of values ordered by #fields. - # Rows are yielded and forgotten about, freeing memory. -end +result = client.execute("SELECT [datetime2_2] FROM [datatypes] WHERE [id] = 74", as: :array, timezone: :utc, empty_sets: true) +# => # ``` -Besides the standard query options, the result object can take one additional option. Using `:first => true` will only load the first row of data and cancel all remaining results. - -```ruby -result = client.execute("SELECT * FROM [super_big_table]") -result.each(:first => true) # => [{'id' => 24}] -``` - - -## Row Caching - -By default row caching is turned on because the SQL Server adapter for ActiveRecord would not work without it. I hope to find some time to create some performance patches for ActiveRecord that would allow it to take advantages of lazily created yielded rows from result objects. Currently only TinyTDS and the Mysql2 gem allow such a performance gain. - - ## Encoding Error Handling TinyTDS takes an opinionated stance on how we handle encoding errors. First, we treat errors differently on reads vs. writes. Our opinion is that if you are reading bad data due to your client's encoding option, you would rather just find `?` marks in your strings vs being blocked with exceptions. This is how things wold work via ODBC or SMS. On the other hand, writes will raise an exception. In this case we raise the SYBEICONVO/2402 error message which has a description of `Error converting characters into server's character set. Some character(s) could not be converted.`. Even though the severity of this message is only a `4` and TinyTDS will automatically strip/ignore unknown characters, we feel you should know that you are inserting bad encodings. In this way, a transaction can be rolled back, etc. Remember, any database write that has bad characters due to the client encoding will still be written to the database, but it is up to you rollback said write if needed. Most ORMs like ActiveRecord handle this scenario just fine. diff --git a/ext/tiny_tds/client.c b/ext/tiny_tds/client.c index 650ff8cd..74e95a68 100644 --- a/ext/tiny_tds/client.c +++ b/ext/tiny_tds/client.c @@ -8,6 +8,12 @@ static ID intern_source_eql, intern_severity_eql, intern_db_error_number_eql, in static ID intern_new, intern_dup, intern_transpose_iconv_encoding, intern_local_offset, intern_gsub, intern_call; VALUE opt_escape_regex, opt_escape_dblquote; +static ID id_ivar_fields, id_ivar_rows, id_ivar_return_code, id_ivar_affected_rows, id_ivar_default_query_options, intern_bigd, intern_divide; +static ID sym_as, sym_array, sym_timezone, sym_empty_sets, sym_local, sym_utc, intern_utc, intern_local, intern_as, intern_empty_sets, intern_timezone; +static VALUE cTinyTdsResult, cKernel, cDate; + +rb_encoding *binaryEncoding; +VALUE opt_onek, opt_onebil, opt_float_zero, opt_four, opt_tenk; // Lib Macros @@ -15,6 +21,22 @@ VALUE opt_escape_regex, opt_escape_dblquote; tinytds_client_wrapper *cwrap; \ Data_Get_Struct(self, tinytds_client_wrapper, cwrap) +#ifdef _WIN32 + #define LONG_LONG_FORMAT "I64d" +#else + #define LONG_LONG_FORMAT "lld" +#endif + +#define ENCODED_STR_NEW(_data, _len) ({ \ + VALUE _val = rb_str_new((char *)_data, (long)_len); \ + rb_enc_associate(_val, cwrap->encoding); \ + _val; \ +}) +#define ENCODED_STR_NEW2(_data2) ({ \ + VALUE _val = rb_str_new2((char *)_data2); \ + rb_enc_associate(_val, cwrap->encoding); \ + _val; \ +}) // Lib Backend (Helpers) @@ -149,12 +171,11 @@ static RETCODE nogvl_dbsqlok(DBPROCESS *client) { } // some additional helpers interacting with the SQL server -static VALUE rb_tinytds_send_sql_to_server(tinytds_client_wrapper *cwrap, VALUE sql) { +static void rb_tinytds_send_sql_to_server(tinytds_client_wrapper *cwrap, VALUE sql) { rb_tinytds_client_reset_userdata(cwrap->userdata); - if (cwrap->closed || cwrap->userdata->closed) { \ - rb_raise(cTinyTdsError, "closed connection"); \ - return Qnil; \ + if (cwrap->closed || cwrap->userdata->closed) { + rb_raise(cTinyTdsError, "closed connection"); } dbcmd(cwrap->client, StringValueCStr(sql)); @@ -174,9 +195,15 @@ static RETCODE rb_tiny_tds_client_ok_helper(DBPROCESS *client) { return userdata->dbsqlok_retcode; } +static void rb_tinytds_client_cancel_results(DBPROCESS * client) { + GET_CLIENT_USERDATA(client); + dbcancel(client); + userdata->dbcancel_sent = 1; + userdata->dbsql_sent = 0; +} + static void rb_tinytds_result_exec_helper(DBPROCESS *client) { RETCODE dbsqlok_rc = rb_tiny_tds_client_ok_helper(client); - GET_CLIENT_USERDATA(client); if (dbsqlok_rc == SUCCEED) { /* This is to just process each result set. Commands such as backup and @@ -192,9 +219,8 @@ static void rb_tinytds_result_exec_helper(DBPROCESS *client) { while (dbnextrow(client) != NO_MORE_ROWS); } } - dbcancel(client); - userdata->dbcancel_sent = 1; - userdata->dbsql_sent = 0; + + rb_tinytds_client_cancel_results(client); } // Lib Backend (Memory Management & Handlers) @@ -420,19 +446,308 @@ static VALUE rb_tinytds_sqlsent(VALUE self) { return cwrap->userdata->dbsql_sent ? Qtrue : Qfalse; } -static VALUE rb_tinytds_execute(VALUE self, VALUE sql) { - VALUE result; +static VALUE rb_tinytds_result_fetch_value(VALUE self, ID timezone, unsigned int number_of_fields, int field_index) { + GET_CLIENT_WRAPPER(self); + + VALUE val = Qnil; + + int col = field_index + 1; + int coltype = dbcoltype(cwrap->client, col); + BYTE *data = dbdata(cwrap->client, col); + DBINT data_len = dbdatlen(cwrap->client, col); + int null_val = ((data == NULL) && (data_len == 0)); + + if (!null_val) { + switch(coltype) { + case SYBINT1: + val = INT2FIX(*(DBTINYINT *)data); + break; + case SYBINT2: + val = INT2FIX(*(DBSMALLINT *)data); + break; + case SYBINT4: + val = INT2NUM(*(DBINT *)data); + break; + case SYBINT8: + val = LL2NUM(*(DBBIGINT *)data); + break; + case SYBBIT: + val = *(int *)data ? Qtrue : Qfalse; + break; + case SYBNUMERIC: + case SYBDECIMAL: { + DBTYPEINFO *data_info = dbcoltypeinfo(cwrap->client, col); + int data_slength = (int)data_info->precision + (int)data_info->scale + 1; + char converted_decimal[data_slength]; + dbconvert(cwrap->client, coltype, data, data_len, SYBVARCHAR, (BYTE *)converted_decimal, -1); + val = rb_funcall(cKernel, intern_bigd, 1, rb_str_new2((char *)converted_decimal)); + break; + } + case SYBFLT8: { + double col_to_double = *(double *)data; + val = (col_to_double == 0.000000) ? opt_float_zero : rb_float_new(col_to_double); + break; + } + case SYBREAL: { + float col_to_float = *(float *)data; + val = (col_to_float == 0.0) ? opt_float_zero : rb_float_new(col_to_float); + break; + } + case SYBMONEY: { + DBMONEY *money = (DBMONEY *)data; + char converted_money[25]; + long long money_value = ((long long)money->mnyhigh << 32) | money->mnylow; + sprintf(converted_money, "%" LONG_LONG_FORMAT, money_value); + val = rb_funcall(cKernel, intern_bigd, 2, rb_str_new2(converted_money), opt_four); + val = rb_funcall(val, intern_divide, 1, opt_tenk); + break; + } + case SYBMONEY4: { + DBMONEY4 *money = (DBMONEY4 *)data; + char converted_money[20]; + sprintf(converted_money, "%f", money->mny4 / 10000.0); + val = rb_funcall(cKernel, intern_bigd, 1, rb_str_new2(converted_money)); + break; + } + case SYBBINARY: + case SYBIMAGE: + val = rb_str_new((char *)data, (long)data_len); + rb_enc_associate(val, binaryEncoding); + break; + case 36: { // SYBUNIQUE + char converted_unique[37]; + dbconvert(cwrap->client, coltype, data, 37, SYBVARCHAR, (BYTE *)converted_unique, -1); + val = ENCODED_STR_NEW2(converted_unique); + break; + } + case SYBDATETIME4: { + DBDATETIME new_data; + dbconvert(cwrap->client, coltype, data, data_len, SYBDATETIME, (BYTE *)&new_data, sizeof(new_data)); + data = (BYTE *)&new_data; + data_len = sizeof(new_data); + } + case SYBDATETIME: { + DBDATEREC dr; + dbdatecrack(cwrap->client, &dr, (DBDATETIME *)data); + if (dr.year + dr.month + dr.day + dr.hour + dr.minute + dr.second + dr.millisecond != 0) { + val = rb_funcall(rb_cTime, timezone, 7, INT2NUM(dr.year), INT2NUM(dr.month), INT2NUM(dr.day), INT2NUM(dr.hour), INT2NUM(dr.minute), INT2NUM(dr.second), INT2NUM(dr.millisecond*1000)); + } + break; + } + case SYBMSDATE: + case SYBMSTIME: + case SYBMSDATETIME2: + case SYBMSDATETIMEOFFSET: { + DBDATEREC2 dr2; + dbanydatecrack(cwrap->client, &dr2, coltype, data); + switch(coltype) { + case SYBMSDATE: { + val = rb_funcall(cDate, intern_new, 3, INT2NUM(dr2.year), INT2NUM(dr2.month), INT2NUM(dr2.day)); + break; + } + case SYBMSTIME: { + VALUE rational_nsec = rb_Rational(INT2NUM(dr2.nanosecond), opt_onek); + val = rb_funcall(rb_cTime, timezone, 7, INT2NUM(1900), INT2NUM(1), INT2NUM(1), INT2NUM(dr2.hour), INT2NUM(dr2.minute), INT2NUM(dr2.second), rational_nsec); + break; + } + case SYBMSDATETIME2: { + VALUE rational_nsec = rb_Rational(INT2NUM(dr2.nanosecond), opt_onek); + val = rb_funcall(rb_cTime, timezone, 7, INT2NUM(dr2.year), INT2NUM(dr2.month), INT2NUM(dr2.day), INT2NUM(dr2.hour), INT2NUM(dr2.minute), INT2NUM(dr2.second), rational_nsec); + break; + } + case SYBMSDATETIMEOFFSET: { + long long numerator = ((long)dr2.second * (long long)1000000000) + (long long)dr2.nanosecond; + VALUE rational_sec = rb_Rational(LL2NUM(numerator), opt_onebil); + val = rb_funcall(rb_cTime, intern_new, 7, INT2NUM(dr2.year), INT2NUM(dr2.month), INT2NUM(dr2.day), INT2NUM(dr2.hour), INT2NUM(dr2.minute), rational_sec, INT2NUM(dr2.tzone*60)); + break; + } + } + break; + } + case SYBCHAR: + case SYBTEXT: + val = ENCODED_STR_NEW(data, data_len); + break; + case 98: { // SYBVARIANT + if (data_len == 4) { + val = INT2NUM(*(DBINT *)data); + break; + } else { + val = ENCODED_STR_NEW(data, data_len); + break; + } + } + default: + val = ENCODED_STR_NEW(data, data_len); + break; + } + } + + return val; + } + +static VALUE get_default_query_option(VALUE key) { + return rb_hash_aref(rb_ivar_get(cTinyTdsClient, id_ivar_default_query_options), key); +} + +static VALUE rb_tinytds_return_code(VALUE self) { + GET_CLIENT_WRAPPER(self); + if (cwrap->client && dbhasretstat(cwrap->client)) { + return LONG2NUM((long)dbretstatus(cwrap->client)); + } else { + return Qnil; + } +} + +static VALUE rb_tinytds_affected_rows(DBPROCESS * client) { + return LONG2NUM((long)dbcount(client)); +} + +static VALUE rb_tinytds_execute(int argc, VALUE *argv, VALUE self) { + VALUE sql; // The required argument (non-keyword) + VALUE kwds; // A hash to store keyword arguments + ID kw_table[3]; // ID array to hold keys for keyword arguments + VALUE kw_values[3]; // VALUE array to hold values of keyword arguments + + // Define the keyword argument names + kw_table[0] = intern_as; + kw_table[1] = intern_empty_sets; + kw_table[2] = intern_timezone; + + // Extract the SQL argument (1st argument) and keyword arguments (kwargs) + rb_scan_args(argc, argv, "1:", &sql, &kwds); + rb_get_kwargs(kwds, kw_table, 0, 3, kw_values); + + kw_values[0] = kw_values[0] == Qundef ? get_default_query_option(sym_as) : kw_values[0]; + kw_values[1] = kw_values[1] == Qundef ? get_default_query_option(sym_empty_sets) : kw_values[1]; + kw_values[2] = kw_values[2] == Qundef ? get_default_query_option(sym_timezone) : kw_values[2]; + + unsigned int as_array = 0; + if (kw_values[0] == sym_array) { + as_array = 1; + } + + unsigned int empty_sets = 0; + if (kw_values[1] == Qtrue) { + empty_sets = 1; + } + + VALUE timezone; + if (kw_values[2] == sym_local) { + timezone = intern_local; + } else if (kw_values[2] == sym_utc) { + timezone = intern_utc; + } else { + rb_warn(":timezone option must be :utc or :local - defaulting to :local"); + timezone = intern_local; + } GET_CLIENT_WRAPPER(self); rb_tinytds_send_sql_to_server(cwrap, sql); - result = rb_tinytds_new_result_obj(cwrap); - rb_iv_set(result, "@query_options", rb_funcall(rb_iv_get(self, "@query_options"), intern_dup, 0)); - { - GET_RESULT_WRAPPER(result); - rwrap->local_offset = rb_funcall(cTinyTdsClient, intern_local_offset, 0); - rwrap->encoding = cwrap->encoding; - return result; + + VALUE result = rb_obj_alloc(cTinyTdsResult); + VALUE rows = rb_ary_new(); + rb_ivar_set(result, id_ivar_rows, rows); + + unsigned int field_index; + unsigned int number_of_result_sets = 0; + + VALUE key; + + unsigned int number_of_fields = 0; + + // if a user makes a nested query (e.g. "SELECT 1 as [one]; SELECT 2 as [two];") + // this will loop multiple times + // our fields data structure then will get to be an array of arrays + // and rows will be an array of arrays or hashes + // we track this loop using number_of_result_sets + while ((rb_tiny_tds_client_ok_helper(cwrap->client) == SUCCEED) && (dbresults(cwrap->client) == SUCCEED)) { + unsigned int has_rows = (DBROWS(cwrap->client) == SUCCEED) ? 1 : 0; + + if (has_rows || empty_sets || number_of_result_sets == 0) { + number_of_fields = dbnumcols(cwrap->client); + VALUE fields = rb_ary_new2(number_of_fields); + + for (field_index = 0; field_index < number_of_fields; field_index++) { + char *colname = dbcolname(cwrap->client, field_index+1); + VALUE field = rb_obj_freeze(ENCODED_STR_NEW2(colname)); + rb_ary_store(fields, field_index, field); + } + + if (number_of_result_sets == 0) { + rb_ivar_set(result, id_ivar_fields, fields); + } else if (number_of_result_sets == 1) { + // we encounter our second loop, so we shuffle the fields around + VALUE multi_result_sets_fields = rb_ary_new(); + + rb_ary_store(multi_result_sets_fields, 0, rb_ivar_get(result, id_ivar_fields)); + rb_ary_store(multi_result_sets_fields, 1, fields); + + rb_ivar_set(result, id_ivar_fields, multi_result_sets_fields); + } else { + rb_ary_push(rb_ivar_get(result, id_ivar_fields), fields); + } + } else { + // it could be that + // there are no rows to be processed + // the user does not want empty sets to be included in their results (our default actually) + // or we are not in the first iteration of the result loop (we always want to fill out fields on the first iteration) + // in any case, through number_of_fields we signal the next loop that we do not want to fetch results + number_of_fields = 0; + } + + if ((has_rows || empty_sets) && number_of_fields > 0) { + VALUE rows = rb_ary_new(); + + while (nogvl_dbnextrow(cwrap->client) != NO_MORE_ROWS) { + VALUE row = as_array ? rb_ary_new2(number_of_fields) : rb_hash_new(); + + for (field_index = 0; field_index < number_of_fields; field_index++) { + VALUE val = rb_tinytds_result_fetch_value(self, timezone, number_of_fields, field_index); + + if (as_array) { + rb_ary_store(row, field_index, val); + } else { + if (number_of_result_sets > 0) { + key = rb_ary_entry(rb_ary_entry(rb_ivar_get(result, id_ivar_fields), number_of_result_sets), field_index); + } else { + key = rb_ary_entry(rb_ivar_get(result, id_ivar_fields), field_index); + } + + // for our current row, add a pair with the field name from our fields array and the parsed value + rb_hash_aset(row, key, val); + } + } + + rb_ary_push(rows, row); + } + + // if we have only one set of results, we overwrite @rows with our rows object here + if (number_of_result_sets == 0) { + rb_ivar_set(result, id_ivar_rows, rows); + } else if (number_of_result_sets == 1) { + // when encountering the second result set, we have to adjust @rows to be an array of arrays + VALUE multi_result_set_results = rb_ary_new(); + + rb_ary_store(multi_result_set_results, 0, rb_ivar_get(result, id_ivar_rows)); + rb_ary_store(multi_result_set_results, 1, rows); + + rb_ivar_set(result, id_ivar_rows, multi_result_set_results); + } else { + // when encountering two or more results sets, the structure of @rows has already been adjusted + // to be an array of arrays (with the previous condition) + rb_ary_push(rb_ivar_get(result, id_ivar_rows), rows); + } + + number_of_result_sets++; + } } + + rb_ivar_set(result, id_ivar_affected_rows, rb_tinytds_affected_rows(cwrap->client)); + rb_ivar_set(result, id_ivar_return_code, rb_tinytds_return_code(self)); + rb_tinytds_client_cancel_results(cwrap->client); + + return result; } static VALUE rb_tiny_tds_insert(VALUE self, VALUE sql) { @@ -441,10 +756,6 @@ static VALUE rb_tiny_tds_insert(VALUE self, VALUE sql) { rb_tinytds_send_sql_to_server(cwrap, sql); rb_tinytds_result_exec_helper(cwrap->client); - dbcancel(cwrap->client); - cwrap->userdata->dbcancel_sent = 1; - cwrap->userdata->dbsql_sent = 0; - // prepare second query to fetch last identity dbcmd(cwrap->client, cwrap->identity_insert_sql); @@ -473,7 +784,7 @@ static VALUE rb_tiny_tds_do(VALUE self, VALUE sql) { rb_tinytds_send_sql_to_server(cwrap, sql); rb_tinytds_result_exec_helper(cwrap->client); - return LONG2NUM((long)dbcount(cwrap->client)); + return rb_tinytds_affected_rows(cwrap->client); } static VALUE rb_tinytds_charset(VALUE self) { @@ -496,16 +807,6 @@ static VALUE rb_tinytds_escape(VALUE self, VALUE string) { return new_string; } -/* Duplicated in result.c */ -static VALUE rb_tinytds_return_code(VALUE self) { - GET_CLIENT_WRAPPER(self); - if (cwrap->client && dbhasretstat(cwrap->client)) { - return LONG2NUM((long)dbretstatus(cwrap->client)); - } else { - return Qnil; - } -} - static VALUE rb_tinytds_identity_sql(VALUE self) { GET_CLIENT_WRAPPER(self); return rb_str_new2(cwrap->identity_insert_sql); @@ -614,7 +915,7 @@ void init_tinytds_client() { rb_define_method(cTinyTdsClient, "canceled?", rb_tinytds_canceled, 0); rb_define_method(cTinyTdsClient, "dead?", rb_tinytds_dead, 0); rb_define_method(cTinyTdsClient, "sqlsent?", rb_tinytds_sqlsent, 0); - rb_define_method(cTinyTdsClient, "execute", rb_tinytds_execute, 1); + rb_define_method(cTinyTdsClient, "execute", rb_tinytds_execute, -1); rb_define_method(cTinyTdsClient, "insert", rb_tiny_tds_insert, 1); rb_define_method(cTinyTdsClient, "do", rb_tiny_tds_do, 1); rb_define_method(cTinyTdsClient, "charset", rb_tinytds_charset, 0); @@ -653,6 +954,45 @@ void init_tinytds_client() { /* Escape Regexp Global */ opt_escape_regex = rb_funcall(rb_cRegexp, intern_new, 1, rb_str_new2("\\\'")); opt_escape_dblquote = rb_str_new2("''"); + rb_global_variable(&opt_escape_regex); rb_global_variable(&opt_escape_dblquote); + + intern_bigd = rb_intern("BigDecimal"); + intern_divide = rb_intern("/"); + id_ivar_fields = rb_intern("@fields"); + id_ivar_rows = rb_intern("@rows"); + id_ivar_default_query_options = rb_intern("@default_query_options"); + id_ivar_return_code = rb_intern("@return_code"); + id_ivar_affected_rows = rb_intern("@affected_rows"); + + intern_as = rb_intern("as"); + intern_empty_sets = rb_intern("empty_sets"); + intern_timezone = rb_intern("timezone"); + intern_utc = rb_intern("utc"); + intern_local = rb_intern("local"); + + cTinyTdsClient = rb_const_get(mTinyTds, rb_intern("Client")); + cTinyTdsResult = rb_const_get(mTinyTds, rb_intern("Result")); + cKernel = rb_const_get(rb_cObject, rb_intern("Kernel")); + cDate = rb_const_get(rb_cObject, rb_intern("Date")); + + opt_float_zero = rb_float_new((double)0); + opt_four = INT2NUM(4); + opt_onek = INT2NUM(1000); + opt_tenk = INT2NUM(10000); + opt_onebil = INT2NUM(1000000000); + + binaryEncoding = rb_enc_find("binary"); + + rb_global_variable(&cTinyTdsResult); + rb_global_variable(&opt_float_zero); + + /* Symbol Helpers */ + sym_as = ID2SYM(intern_as); + sym_array = ID2SYM(rb_intern("array")); + sym_timezone = ID2SYM(intern_timezone); + sym_empty_sets = ID2SYM(intern_empty_sets); + sym_local = ID2SYM(intern_local); + sym_utc = ID2SYM(intern_utc); } diff --git a/ext/tiny_tds/result.c b/ext/tiny_tds/result.c deleted file mode 100644 index bab5c55e..00000000 --- a/ext/tiny_tds/result.c +++ /dev/null @@ -1,558 +0,0 @@ - -#include -#include - -// File Types/Vars - -VALUE cTinyTdsResult; -extern VALUE mTinyTds, cTinyTdsClient, cTinyTdsError; -VALUE cKernel, cDate; -VALUE opt_decimal_zero, opt_float_zero, opt_one, opt_zero, opt_four, opt_19hdr, opt_onek, opt_tenk, opt_onemil, opt_onebil; -static ID intern_new, intern_utc, intern_local, intern_localtime, intern_merge, - intern_civil, intern_new_offset, intern_plus, intern_divide, intern_bigd; -static ID sym_symbolize_keys, sym_as, sym_array, sym_cache_rows, sym_first, sym_timezone, sym_local, sym_utc, sym_empty_sets; - - -// Lib Macros - -rb_encoding *binaryEncoding; -#define ENCODED_STR_NEW(_data, _len) ({ \ - VALUE _val = rb_str_new((char *)_data, (long)_len); \ - rb_enc_associate(_val, rwrap->encoding); \ - _val; \ -}) -#define ENCODED_STR_NEW2(_data2) ({ \ - VALUE _val = rb_str_new2((char *)_data2); \ - rb_enc_associate(_val, rwrap->encoding); \ - _val; \ -}) - -#ifdef _WIN32 - #define LONG_LONG_FORMAT "I64d" -#else - #define LONG_LONG_FORMAT "lld" -#endif - - -// Lib Backend (Memory Management) - -static void rb_tinytds_result_mark(void *ptr) { - tinytds_result_wrapper *rwrap = (tinytds_result_wrapper *)ptr; - if (rwrap) { - rb_gc_mark(rwrap->local_offset); - rb_gc_mark(rwrap->fields); - rb_gc_mark(rwrap->fields_processed); - rb_gc_mark(rwrap->results); - rb_gc_mark(rwrap->dbresults_retcodes); - } -} - -static void rb_tinytds_result_free(void *ptr) { - xfree(ptr); -} - -VALUE rb_tinytds_new_result_obj(tinytds_client_wrapper *cwrap) { - VALUE obj; - tinytds_result_wrapper *rwrap; - obj = Data_Make_Struct(cTinyTdsResult, tinytds_result_wrapper, rb_tinytds_result_mark, rb_tinytds_result_free, rwrap); - rwrap->cwrap = cwrap; - rwrap->client = cwrap->client; - rwrap->local_offset = Qnil; - rwrap->fields = rb_ary_new(); - rwrap->fields_processed = rb_ary_new(); - rwrap->results = Qnil; - rwrap->dbresults_retcodes = rb_ary_new(); - rwrap->number_of_results = 0; - rwrap->number_of_fields = 0; - rwrap->number_of_rows = 0; - rb_obj_call_init(obj, 0, NULL); - return obj; -} - -// No GVL Helpers - -#define NOGVL_DBCALL(_dbfunction, _client) ( \ - (RETCODE)(intptr_t)rb_thread_call_without_gvl( \ - (void *(*)(void *))_dbfunction, _client, \ - (rb_unblock_function_t*)dbcancel_ubf, _client ) \ -) - -static void dbcancel_ubf(DBPROCESS *client) { - GET_CLIENT_USERDATA(client); - dbcancel(client); - userdata->dbcancel_sent = 1; -} - -static void nogvl_setup(DBPROCESS *client) { - GET_CLIENT_USERDATA(client); - userdata->nonblocking = 1; - userdata->nonblocking_errors_length = 0; - userdata->nonblocking_errors = malloc(ERRORS_STACK_INIT_SIZE * sizeof(tinytds_errordata)); - userdata->nonblocking_errors_size = ERRORS_STACK_INIT_SIZE; -} - -static void nogvl_cleanup(DBPROCESS *client) { - GET_CLIENT_USERDATA(client); - userdata->nonblocking = 0; - userdata->timing_out = 0; - /* - Now that the blocking operation is done, we can finally throw any - exceptions based on errors from SQL Server. - */ - short int i; - for (i = 0; i < userdata->nonblocking_errors_length; i++) { - tinytds_errordata error = userdata->nonblocking_errors[i]; - - // lookahead to drain any info messages ahead of raising error - if (!error.is_message) { - short int j; - for (j = i; j < userdata->nonblocking_errors_length; j++) { - tinytds_errordata msg_error = userdata->nonblocking_errors[j]; - if (msg_error.is_message) { - rb_tinytds_raise_error(client, msg_error); - } - } - } - - rb_tinytds_raise_error(client, error); - } - - free(userdata->nonblocking_errors); - userdata->nonblocking_errors_length = 0; - userdata->nonblocking_errors_size = 0; -} - -static RETCODE nogvl_dbsqlok(DBPROCESS *client) { - int retcode = FAIL; - GET_CLIENT_USERDATA(client); - nogvl_setup(client); - retcode = NOGVL_DBCALL(dbsqlok, client); - nogvl_cleanup(client); - userdata->dbsqlok_sent = 1; - return retcode; -} - -static RETCODE nogvl_dbresults(DBPROCESS *client) { - int retcode = FAIL; - nogvl_setup(client); - retcode = NOGVL_DBCALL(dbresults, client); - nogvl_cleanup(client); - return retcode; -} - -static RETCODE nogvl_dbnextrow(DBPROCESS * client) { - int retcode = FAIL; - nogvl_setup(client); - retcode = NOGVL_DBCALL(dbnextrow, client); - nogvl_cleanup(client); - return retcode; -} - -// Lib Backend (Helpers) - -static RETCODE rb_tinytds_result_dbresults_retcode(VALUE self) { - VALUE ruby_rc; - RETCODE db_rc; - GET_RESULT_WRAPPER(self); - ruby_rc = rb_ary_entry(rwrap->dbresults_retcodes, rwrap->number_of_results); - if (NIL_P(ruby_rc)) { - db_rc = nogvl_dbresults(rwrap->client); - ruby_rc = INT2FIX(db_rc); - rb_ary_store(rwrap->dbresults_retcodes, rwrap->number_of_results, ruby_rc); - } else { - db_rc = FIX2INT(ruby_rc); - } - return db_rc; -} - -static RETCODE rb_tinytds_result_ok_helper(DBPROCESS *client) { - GET_CLIENT_USERDATA(client); - if (userdata->dbsqlok_sent == 0) { - userdata->dbsqlok_retcode = nogvl_dbsqlok(client); - } - return userdata->dbsqlok_retcode; -} - -static VALUE rb_tinytds_result_fetch_row(VALUE self, ID timezone, int symbolize_keys, int as_array) { - VALUE row; - /* Storing Values */ - unsigned int i; - /* Wrapper And Local Vars */ - GET_RESULT_WRAPPER(self); - /* Create Empty Row */ - row = as_array ? rb_ary_new2(rwrap->number_of_fields) : rb_hash_new(); - for (i = 0; i < rwrap->number_of_fields; i++) { - VALUE val = Qnil; - int col = i+1; - int coltype = dbcoltype(rwrap->client, col); - BYTE *data = dbdata(rwrap->client, col); - DBINT data_len = dbdatlen(rwrap->client, col); - int null_val = ((data == NULL) && (data_len == 0)); - if (!null_val) { - switch(coltype) { - case SYBINT1: - val = INT2FIX(*(DBTINYINT *)data); - break; - case SYBINT2: - val = INT2FIX(*(DBSMALLINT *)data); - break; - case SYBINT4: - val = INT2NUM(*(DBINT *)data); - break; - case SYBINT8: - val = LL2NUM(*(DBBIGINT *)data); - break; - case SYBBIT: - val = *(int *)data ? Qtrue : Qfalse; - break; - case SYBNUMERIC: - case SYBDECIMAL: { - DBTYPEINFO *data_info = dbcoltypeinfo(rwrap->client, col); - int data_slength = (int)data_info->precision + (int)data_info->scale + 1; - char converted_decimal[data_slength]; - dbconvert(rwrap->client, coltype, data, data_len, SYBVARCHAR, (BYTE *)converted_decimal, -1); - val = rb_funcall(cKernel, intern_bigd, 1, rb_str_new2((char *)converted_decimal)); - break; - } - case SYBFLT8: { - double col_to_double = *(double *)data; - val = (col_to_double == 0.000000) ? opt_float_zero : rb_float_new(col_to_double); - break; - } - case SYBREAL: { - float col_to_float = *(float *)data; - val = (col_to_float == 0.0) ? opt_float_zero : rb_float_new(col_to_float); - break; - } - case SYBMONEY: { - DBMONEY *money = (DBMONEY *)data; - char converted_money[25]; - long long money_value = ((long long)money->mnyhigh << 32) | money->mnylow; - sprintf(converted_money, "%" LONG_LONG_FORMAT, money_value); - val = rb_funcall(cKernel, intern_bigd, 2, rb_str_new2(converted_money), opt_four); - val = rb_funcall(val, intern_divide, 1, opt_tenk); - break; - } - case SYBMONEY4: { - DBMONEY4 *money = (DBMONEY4 *)data; - char converted_money[20]; - sprintf(converted_money, "%f", money->mny4 / 10000.0); - val = rb_funcall(cKernel, intern_bigd, 1, rb_str_new2(converted_money)); - break; - } - case SYBBINARY: - case SYBIMAGE: - val = rb_str_new((char *)data, (long)data_len); - #ifdef HAVE_RUBY_ENCODING_H - rb_enc_associate(val, binaryEncoding); - #endif - break; - case 36: { // SYBUNIQUE - char converted_unique[37]; - dbconvert(rwrap->client, coltype, data, 37, SYBVARCHAR, (BYTE *)converted_unique, -1); - val = ENCODED_STR_NEW2(converted_unique); - break; - } - case SYBDATETIME4: { - DBDATETIME new_data; - dbconvert(rwrap->client, coltype, data, data_len, SYBDATETIME, (BYTE *)&new_data, sizeof(new_data)); - data = (BYTE *)&new_data; - data_len = sizeof(new_data); - } - case SYBDATETIME: { - DBDATEREC dr; - dbdatecrack(rwrap->client, &dr, (DBDATETIME *)data); - if (dr.year + dr.month + dr.day + dr.hour + dr.minute + dr.second + dr.millisecond != 0) { - val = rb_funcall(rb_cTime, timezone, 7, INT2NUM(dr.year), INT2NUM(dr.month), INT2NUM(dr.day), INT2NUM(dr.hour), INT2NUM(dr.minute), INT2NUM(dr.second), INT2NUM(dr.millisecond*1000)); - } - break; - } - case SYBMSDATE: - case SYBMSTIME: - case SYBMSDATETIME2: - case SYBMSDATETIMEOFFSET: { - DBDATEREC2 dr2; - dbanydatecrack(rwrap->client, &dr2, coltype, data); - switch(coltype) { - case SYBMSDATE: { - val = rb_funcall(cDate, intern_new, 3, INT2NUM(dr2.year), INT2NUM(dr2.month), INT2NUM(dr2.day)); - break; - } - case SYBMSTIME: { - VALUE rational_nsec = rb_Rational(INT2NUM(dr2.nanosecond), opt_onek); - val = rb_funcall(rb_cTime, timezone, 7, INT2NUM(1900), INT2NUM(1), INT2NUM(1), INT2NUM(dr2.hour), INT2NUM(dr2.minute), INT2NUM(dr2.second), rational_nsec); - break; - } - case SYBMSDATETIME2: { - VALUE rational_nsec = rb_Rational(INT2NUM(dr2.nanosecond), opt_onek); - val = rb_funcall(rb_cTime, timezone, 7, INT2NUM(dr2.year), INT2NUM(dr2.month), INT2NUM(dr2.day), INT2NUM(dr2.hour), INT2NUM(dr2.minute), INT2NUM(dr2.second), rational_nsec); - break; - } - case SYBMSDATETIMEOFFSET: { - long long numerator = ((long)dr2.second * (long long)1000000000) + (long long)dr2.nanosecond; - VALUE rational_sec = rb_Rational(LL2NUM(numerator), opt_onebil); - val = rb_funcall(rb_cTime, intern_new, 7, INT2NUM(dr2.year), INT2NUM(dr2.month), INT2NUM(dr2.day), INT2NUM(dr2.hour), INT2NUM(dr2.minute), rational_sec, INT2NUM(dr2.tzone*60)); - break; - } - } - break; - } - case SYBCHAR: - case SYBTEXT: - val = ENCODED_STR_NEW(data, data_len); - break; - case 98: { // SYBVARIANT - if (data_len == 4) { - val = INT2NUM(*(DBINT *)data); - break; - } else { - val = ENCODED_STR_NEW(data, data_len); - break; - } - } - default: - val = ENCODED_STR_NEW(data, data_len); - break; - } - } - if (as_array) { - rb_ary_store(row, i, val); - } else { - VALUE key; - if (rwrap->number_of_results == 0) { - key = rb_ary_entry(rwrap->fields, i); - } else { - key = rb_ary_entry(rb_ary_entry(rwrap->fields, rwrap->number_of_results), i); - } - rb_hash_aset(row, key, val); - } - } - return row; -} - - -// TinyTds::Client (public) - -static VALUE rb_tinytds_result_fields(VALUE self) { - RETCODE dbsqlok_rc, dbresults_rc; - VALUE fields_processed; - GET_RESULT_WRAPPER(self); - dbsqlok_rc = rb_tinytds_result_ok_helper(rwrap->client); - dbresults_rc = rb_tinytds_result_dbresults_retcode(self); - fields_processed = rb_ary_entry(rwrap->fields_processed, rwrap->number_of_results); - if ((dbsqlok_rc == SUCCEED) && (dbresults_rc == SUCCEED) && (fields_processed == Qnil)) { - /* Default query options. */ - int symbolize_keys = 0; - VALUE qopts = rb_iv_get(self, "@query_options"); - if (rb_hash_aref(qopts, sym_symbolize_keys) == Qtrue) - symbolize_keys = 1; - /* Set number_of_fields count for this result set. */ - rwrap->number_of_fields = dbnumcols(rwrap->client); - if (rwrap->number_of_fields > 0) { - /* Create fields for this result set. */ - unsigned int fldi = 0; - VALUE fields = rb_ary_new2(rwrap->number_of_fields); - for (fldi = 0; fldi < rwrap->number_of_fields; fldi++) { - char *colname = dbcolname(rwrap->client, fldi+1); - VALUE field = symbolize_keys ? rb_str_intern(ENCODED_STR_NEW2(colname)) : rb_obj_freeze(ENCODED_STR_NEW2(colname)); - rb_ary_store(fields, fldi, field); - } - /* Store the fields. */ - if (rwrap->number_of_results == 0) { - rwrap->fields = fields; - } else if (rwrap->number_of_results == 1) { - VALUE multi_rs_fields = rb_ary_new(); - rb_ary_store(multi_rs_fields, 0, rwrap->fields); - rb_ary_store(multi_rs_fields, 1, fields); - rwrap->fields = multi_rs_fields; - } else { - rb_ary_store(rwrap->fields, rwrap->number_of_results, fields); - } - } - rb_ary_store(rwrap->fields_processed, rwrap->number_of_results, Qtrue); - } - return rwrap->fields; -} - -static VALUE rb_tinytds_result_each(int argc, VALUE * argv, VALUE self) { - /* Local Vars */ - VALUE qopts, opts, block; - ID timezone; - int symbolize_keys = 0, as_array = 0, cache_rows = 0, first = 0, empty_sets = 0; - tinytds_client_userdata *userdata; - GET_RESULT_WRAPPER(self); - userdata = (tinytds_client_userdata *)dbgetuserdata(rwrap->client); - /* Merge Options Hash To Query Options. Populate Opts & Block Var. */ - qopts = rb_iv_get(self, "@query_options"); - if (rb_scan_args(argc, argv, "01&", &opts, &block) == 1) - qopts = rb_funcall(qopts, intern_merge, 1, opts); - rb_iv_set(self, "@query_options", qopts); - /* Locals From Options */ - if (rb_hash_aref(qopts, sym_first) == Qtrue) - first = 1; - if (rb_hash_aref(qopts, sym_symbolize_keys) == Qtrue) - symbolize_keys = 1; - if (rb_hash_aref(qopts, sym_as) == sym_array) - as_array = 1; - if (rb_hash_aref(qopts, sym_cache_rows) == Qtrue) - cache_rows = 1; - if (rb_hash_aref(qopts, sym_timezone) == sym_local) { - timezone = intern_local; - } else if (rb_hash_aref(qopts, sym_timezone) == sym_utc) { - timezone = intern_utc; - } else { - rb_warn(":timezone option must be :utc or :local - defaulting to :local"); - timezone = intern_local; - } - if (rb_hash_aref(qopts, sym_empty_sets) == Qtrue) - empty_sets = 1; - /* Make The Results Or Yield Existing */ - if (NIL_P(rwrap->results)) { - RETCODE dbsqlok_rc, dbresults_rc; - rwrap->results = rb_ary_new(); - dbsqlok_rc = rb_tinytds_result_ok_helper(rwrap->client); - dbresults_rc = rb_tinytds_result_dbresults_retcode(self); - while ((dbsqlok_rc == SUCCEED) && (dbresults_rc == SUCCEED)) { - int has_rows = (DBROWS(rwrap->client) == SUCCEED) ? 1 : 0; - if (has_rows || empty_sets || (rwrap->number_of_results == 0)) - rb_tinytds_result_fields(self); - if ((has_rows || empty_sets) && rwrap->number_of_fields > 0) { - /* Create rows for this result set. */ - unsigned long rowi = 0; - VALUE result = rb_ary_new(); - while (nogvl_dbnextrow(rwrap->client) != NO_MORE_ROWS) { - VALUE row = rb_tinytds_result_fetch_row(self, timezone, symbolize_keys, as_array); - if (cache_rows) - rb_ary_store(result, rowi, row); - if (!NIL_P(block)) - rb_yield(row); - if (first) { - dbcanquery(rwrap->client); - userdata->dbcancel_sent = 1; - } - rowi++; - } - rwrap->number_of_rows = rowi; - /* Store the result. */ - if (cache_rows) { - if (rwrap->number_of_results == 0) { - rwrap->results = result; - } else if (rwrap->number_of_results == 1) { - VALUE multi_resultsets = rb_ary_new(); - rb_ary_store(multi_resultsets, 0, rwrap->results); - rb_ary_store(multi_resultsets, 1, result); - rwrap->results = multi_resultsets; - } else { - rb_ary_store(rwrap->results, rwrap->number_of_results, result); - } - } - // If we find results increment the counter that helpers use and setup the next loop. - rwrap->number_of_results = rwrap->number_of_results + 1; - dbresults_rc = rb_tinytds_result_dbresults_retcode(self); - rb_ary_store(rwrap->fields_processed, rwrap->number_of_results, Qnil); - } else { - // If we do not find results, side step the rb_tinytds_result_dbresults_retcode helper and - // manually populate its memoized array while nullifing any memoized fields too before loop. - dbresults_rc = nogvl_dbresults(rwrap->client); - rb_ary_store(rwrap->dbresults_retcodes, rwrap->number_of_results, INT2FIX(dbresults_rc)); - rb_ary_store(rwrap->fields_processed, rwrap->number_of_results, Qnil); - } - } - if (dbresults_rc == FAIL) - rb_warn("TinyTDS: Something in the dbresults() while loop set the return code to FAIL.\n"); - userdata->dbsql_sent = 0; - } else if (!NIL_P(block)) { - unsigned long i; - for (i = 0; i < rwrap->number_of_rows; i++) { - rb_yield(rb_ary_entry(rwrap->results, i)); - } - } - return rwrap->results; -} - -static VALUE rb_tinytds_result_cancel(VALUE self) { - tinytds_client_userdata *userdata; - GET_RESULT_WRAPPER(self); - userdata = (tinytds_client_userdata *)dbgetuserdata(rwrap->client); - if (rwrap->client && !userdata->dbcancel_sent) { - rb_tinytds_result_ok_helper(rwrap->client); - dbcancel(rwrap->client); - userdata->dbcancel_sent = 1; - userdata->dbsql_sent = 0; - } - return Qtrue; -} - -static VALUE rb_tinytds_result_affected_rows(VALUE self) { - GET_RESULT_WRAPPER(self); - if (rwrap->client) { - return LONG2NUM((long)dbcount(rwrap->client)); - } else { - return Qnil; - } -} - -/* Duplicated in client.c */ -static VALUE rb_tinytds_result_return_code(VALUE self) { - GET_RESULT_WRAPPER(self); - if (rwrap->client && dbhasretstat(rwrap->client)) { - return LONG2NUM((long)dbretstatus(rwrap->client)); - } else { - return Qnil; - } -} - -// Lib Init - -void init_tinytds_result() { - /* Data Classes */ - cKernel = rb_const_get(rb_cObject, rb_intern("Kernel")); - cDate = rb_const_get(rb_cObject, rb_intern("Date")); - /* Define TinyTds::Result */ - cTinyTdsResult = rb_define_class_under(mTinyTds, "Result", rb_cObject); - rb_undef_alloc_func(cTinyTdsResult); - /* Define TinyTds::Result Public Methods */ - rb_define_method(cTinyTdsResult, "fields", rb_tinytds_result_fields, 0); - rb_define_method(cTinyTdsResult, "each", rb_tinytds_result_each, -1); - rb_define_method(cTinyTdsResult, "cancel", rb_tinytds_result_cancel, 0); - rb_define_method(cTinyTdsResult, "affected_rows", rb_tinytds_result_affected_rows, 0); - rb_define_method(cTinyTdsResult, "return_code", rb_tinytds_result_return_code, 0); - /* Intern String Helpers */ - intern_new = rb_intern("new"); - intern_utc = rb_intern("utc"); - intern_local = rb_intern("local"); - intern_merge = rb_intern("merge"); - intern_localtime = rb_intern("localtime"); - intern_civil = rb_intern("civil"); - intern_new_offset = rb_intern("new_offset"); - intern_plus = rb_intern("+"); - intern_divide = rb_intern("/"); - intern_bigd = rb_intern("BigDecimal"); - /* Symbol Helpers */ - sym_symbolize_keys = ID2SYM(rb_intern("symbolize_keys")); - sym_as = ID2SYM(rb_intern("as")); - sym_array = ID2SYM(rb_intern("array")); - sym_cache_rows = ID2SYM(rb_intern("cache_rows")); - sym_first = ID2SYM(rb_intern("first")); - sym_local = ID2SYM(intern_local); - sym_utc = ID2SYM(intern_utc); - sym_timezone = ID2SYM(rb_intern("timezone")); - sym_empty_sets = ID2SYM(rb_intern("empty_sets")); - /* Data Conversion Options */ - opt_decimal_zero = rb_str_new2("0.0"); - rb_global_variable(&opt_decimal_zero); - opt_float_zero = rb_float_new((double)0); - rb_global_variable(&opt_float_zero); - opt_one = INT2NUM(1); - opt_zero = INT2NUM(0); - opt_four = INT2NUM(4); - opt_19hdr = INT2NUM(1900); - opt_onek = INT2NUM(1000); - opt_tenk = INT2NUM(10000); - opt_onemil = INT2NUM(1000000); - opt_onebil = INT2NUM(1000000000); - /* Encoding */ - #ifdef HAVE_RUBY_ENCODING_H - binaryEncoding = rb_enc_find("binary"); - #endif -} diff --git a/ext/tiny_tds/result.h b/ext/tiny_tds/result.h deleted file mode 100644 index 2450ef05..00000000 --- a/ext/tiny_tds/result.h +++ /dev/null @@ -1,32 +0,0 @@ - -#ifndef TINYTDS_RESULT_H -#define TINYTDS_RESULT_H - -void init_tinytds_result(); -VALUE rb_tinytds_new_result_obj(tinytds_client_wrapper *cwrap); - -typedef struct { - tinytds_client_wrapper *cwrap; - DBPROCESS *client; - VALUE local_offset; - VALUE fields; - VALUE fields_processed; - VALUE results; - rb_encoding *encoding; - VALUE dbresults_retcodes; - unsigned int number_of_results; - unsigned int number_of_fields; - unsigned long number_of_rows; -} tinytds_result_wrapper; - - -// Lib Macros - -#define GET_RESULT_WRAPPER(self) \ - tinytds_result_wrapper *rwrap; \ - Data_Get_Struct(self, tinytds_result_wrapper, rwrap) - - - - -#endif diff --git a/ext/tiny_tds/tiny_tds_ext.c b/ext/tiny_tds/tiny_tds_ext.c index fc68694b..b6eadc3c 100644 --- a/ext/tiny_tds/tiny_tds_ext.c +++ b/ext/tiny_tds/tiny_tds_ext.c @@ -10,6 +10,5 @@ void Init_tiny_tds() { mTinyTds = rb_define_module("TinyTds"); cTinyTdsError = rb_const_get(mTinyTds, rb_intern("Error")); init_tinytds_client(); - init_tinytds_result(); } diff --git a/ext/tiny_tds/tiny_tds_ext.h b/ext/tiny_tds/tiny_tds_ext.h index 55494981..9e799221 100644 --- a/ext/tiny_tds/tiny_tds_ext.h +++ b/ext/tiny_tds/tiny_tds_ext.h @@ -12,6 +12,5 @@ #include #include -#include #endif diff --git a/lib/tiny_tds.rb b/lib/tiny_tds.rb index 52247915..9d5b5451 100644 --- a/lib/tiny_tds.rb +++ b/lib/tiny_tds.rb @@ -6,8 +6,8 @@ require 'tiny_tds/version' require 'tiny_tds/error' require 'tiny_tds/client' -require 'tiny_tds/result' require 'tiny_tds/gem' +require 'tiny_tds/result' # Support multiple ruby versions, fat binaries under Windows. if RUBY_PLATFORM =~ /mingw|mswin/ && RUBY_VERSION =~ /(\d+.\d+)/ diff --git a/lib/tiny_tds/client.rb b/lib/tiny_tds/client.rb index 5565a164..e8ff161a 100644 --- a/lib/tiny_tds/client.rb +++ b/lib/tiny_tds/client.rb @@ -3,10 +3,8 @@ class Client @default_query_options = { as: :hash, - symbolize_keys: false, - cache_rows: true, - timezone: :local, - empty_sets: true + empty_sets: true, + timezone: :local } attr_reader :query_options @@ -45,7 +43,6 @@ def initialize(opts = {}) end opts[:username] = parse_username(opts) - @query_options = self.class.default_query_options.dup opts[:password] = opts[:password].to_s if opts[:password] && opts[:password].to_s.strip != '' opts[:appname] ||= 'TinyTds' opts[:tds_version] = tds_versions_setter(opts) diff --git a/lib/tiny_tds/result.rb b/lib/tiny_tds/result.rb index 618c044b..92c332bd 100644 --- a/lib/tiny_tds/result.rb +++ b/lib/tiny_tds/result.rb @@ -1,7 +1,11 @@ module TinyTds class Result + attr_reader :affected_rows, :fields, :rows, :return_code include Enumerable + def each(&bk) + rows.each(&bk) + end end end diff --git a/test/result_test.rb b/test/result_test.rb index 03c8df57..9135565e 100644 --- a/test/result_test.rb +++ b/test/result_test.rb @@ -20,101 +20,37 @@ class ResultTest < TinyTds::TestCase assert result.respond_to?(:each) end - it 'returns all results for #each with no block' do - result = @client.execute(@query1) - data = result.each - row = data.first - assert_instance_of Array, data - assert_equal 1, data.size - assert_instance_of Hash, row, 'hash is the default query option' - end - it 'returns all results for #each with a block yielding a row at a time' do result = @client.execute(@query1) data = result.each do |row| assert_instance_of Hash, row, 'hash is the default query option' end - assert_instance_of Array, data - end - - it 'allows successive calls to each returning the same data' do - result = @client.execute(@query1) - data = result.each - result.each - assert_equal data.object_id, result.each.object_id - assert_equal data.first.object_id, result.each.first.object_id - end - it 'returns hashes with string keys' do - result = @client.execute(@query1) - row = result.each(:as => :hash, :symbolize_keys => false).first - assert_instance_of Hash, row - assert_equal ['one'], row.keys - assert_equal ['one'], result.fields - end - - it 'returns hashes with symbol keys' do - result = @client.execute(@query1) - row = result.each(:as => :hash, :symbolize_keys => true).first - assert_instance_of Hash, row - assert_equal [:one], row.keys - assert_equal [:one], result.fields - end - - it 'returns arrays with string fields' do - result = @client.execute(@query1) - row = result.each(:as => :array, :symbolize_keys => false).first - assert_instance_of Array, row - assert_equal ['one'], result.fields + assert_instance_of Array, data end - it 'returns arrays with symbol fields' do - result = @client.execute(@query1) - row = result.each(:as => :array, :symbolize_keys => true).first + it 'returns arrays' do + results = @client.execute(@query1, as: :array) + row = results.first assert_instance_of Array, row - assert_equal [:one], result.fields + assert_equal ['one'], results.fields end it 'allows sql concat + to work' do rollback_transaction(@client) do @client.do("DELETE FROM [datatypes]") @client.do("INSERT INTO [datatypes] ([char_10], [varchar_50]) VALUES ('1', '2')") - result = @client.execute("SELECT TOP (1) [char_10] + 'test' + [varchar_50] AS [test] FROM [datatypes]").each.first['test'] + result = @client.execute("SELECT TOP (1) [char_10] + 'test' + [varchar_50] AS [test] FROM [datatypes]").first['test'] _(result).must_equal "1 test2" end end - it 'must be able to turn :cache_rows option off' do - result = @client.execute(@query1) - local = [] - result.each(:cache_rows => false) do |row| - local << row - end - assert local.first, 'should have iterated over each row' - assert_equal [], result.each, 'should not have been cached' - assert_equal ['one'], result.fields, 'should still cache field names' - end - - it 'must be able to get the first result row only' do - load_current_schema - big_query = "SELECT [id] FROM [datatypes]" - one = @client.execute(big_query).each(:first => true) - many = @client.execute(big_query).each - assert many.size > 1 - assert one.size == 1 - end - - it 'copes with no results when using first option' do - data = @client.execute("SELECT [id] FROM [datatypes] WHERE [id] = -1").each(:first => true) - assert_equal [], data - end - it 'must delete, insert and find data' do rollback_transaction(@client) do text = 'test insert and delete' @client.do("DELETE FROM [datatypes] WHERE [varchar_50] IS NOT NULL") @client.do("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')") - row = @client.execute("SELECT [varchar_50] FROM [datatypes] WHERE [varchar_50] IS NOT NULL").each.first + row = @client.execute("SELECT [varchar_50] FROM [datatypes] WHERE [varchar_50] IS NOT NULL").first assert row assert_equal text, row['varchar_50'] end @@ -125,7 +61,7 @@ class ResultTest < TinyTds::TestCase text = '😍' @client.do("DELETE FROM [datatypes] WHERE [nvarchar_50] IS NOT NULL") @client.do("INSERT INTO [datatypes] ([nvarchar_50]) VALUES (N'#{text}')") - row = @client.execute("SELECT [nvarchar_50] FROM [datatypes] WHERE [nvarchar_50] IS NOT NULL").each.first + row = @client.execute("SELECT [nvarchar_50] FROM [datatypes] WHERE [nvarchar_50] IS NOT NULL").first assert_equal text, row['nvarchar_50'] end end @@ -134,13 +70,13 @@ class ResultTest < TinyTds::TestCase rollback_transaction(@client) do text = 'test affected rows sql' @client.do("DELETE FROM [datatypes]") - afrows = @client.execute("SELECT @@ROWCOUNT AS AffectedRows").each.first['AffectedRows'] + afrows = @client.execute("SELECT @@ROWCOUNT AS AffectedRows").first['AffectedRows'] _(['Fixnum', 'Integer']).must_include afrows.class.name @client.do("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')") - pk1 = @client.execute(@client.identity_sql).each.first['Ident'] + pk1 = @client.execute(@client.identity_sql).first['Ident'] _(['Fixnum', 'Integer']).must_include pk1.class.name, 'we it be able to CAST to bigint' @client.do("UPDATE [datatypes] SET [varchar_50] = NULL WHERE [varchar_50] = '#{text}'") - afrows = @client.execute("SELECT @@ROWCOUNT AS AffectedRows").each.first['AffectedRows'] + afrows = @client.execute("SELECT @@ROWCOUNT AS AffectedRows").first['AffectedRows'] assert_equal 1, afrows end end @@ -150,7 +86,7 @@ class ResultTest < TinyTds::TestCase @client.do("BEGIN TRANSACTION") @client.do("DELETE FROM [datatypes]") @client.do("COMMIT TRANSACTION") - count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").each.first['count'] + count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").first['count'] assert_equal 0, count end end @@ -160,15 +96,13 @@ class ResultTest < TinyTds::TestCase @client.do("BEGIN TRANSACTION") @client.do("DELETE FROM [datatypes]") @client.do("ROLLBACK TRANSACTION") - count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").each.first['count'] + count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").first['count'] _(count).wont_equal 0 end it 'has a #fields accessor with logic default and valid outcome' do result = @client.execute(@query1) _(result.fields).must_equal ['one'] - result.each - _(result.fields).must_equal ['one'] end it 'always returns an array for fields for all sql' do @@ -181,18 +115,6 @@ class ResultTest < TinyTds::TestCase # Fields before each. result = @client.execute(no_results_query) _(result.fields).must_equal ['id', 'varchar_50'] - result.each - _(result.fields).must_equal ['id', 'varchar_50'] - # Each then fields - result = @client.execute(no_results_query) - result.each - _(result.fields).must_equal ['id', 'varchar_50'] - end - - it 'allows the result to be canceled before reading' do - result = @client.execute(@query1) - result.cancel - @client.execute(@query1).each end it 'works in tandem with the client when needing to find out if client has sql sent and result is canceled or not' do @@ -200,27 +122,28 @@ class ResultTest < TinyTds::TestCase @client = TinyTds::Client.new(connection_options) _(@client.sqlsent?).must_equal false _(@client.canceled?).must_equal false + # With active result before and after cancel. result = @client.execute(@query1) - _(@client.sqlsent?).must_equal true - _(@client.canceled?).must_equal false - result.cancel _(@client.sqlsent?).must_equal false _(@client.canceled?).must_equal true - assert result.cancel, 'must be safe to call again' + # With each and no block. @client.execute(@query1).each _(@client.sqlsent?).must_equal false - _(@client.canceled?).must_equal false + _(@client.canceled?).must_equal true + # With each and block. @client.execute(@query1).each do |row| - _(@client.sqlsent?).must_equal true, 'when iterating over each row in a block' - _(@client.canceled?).must_equal false + _(@client.sqlsent?).must_equal false + _(@client.canceled?).must_equal true end + _(@client.sqlsent?).must_equal false - _(@client.canceled?).must_equal false + _(@client.canceled?).must_equal true + # With each and block canceled half way thru. - count = @client.execute("SELECT COUNT([id]) AS [count] FROM [datatypes]").each[0]['count'] + count = @client.execute("SELECT COUNT([id]) AS [count] FROM [datatypes]").first['count'] assert count > 10, 'since we want to cancel early for test' result = @client.execute("SELECT [id] FROM [datatypes]") index = 0 @@ -228,43 +151,20 @@ class ResultTest < TinyTds::TestCase break if index > 10 index += 1 end - _(@client.sqlsent?).must_equal true - _(@client.canceled?).must_equal false - result.cancel + _(@client.sqlsent?).must_equal false _(@client.canceled?).must_equal true - # With first - @client.execute("SELECT [id] FROM [datatypes]").each(:first => true) - _(@client.sqlsent?).must_equal false - _(@client.canceled?).must_equal true - end - - it 'use same string object for hash keys' do - data = @client.execute("SELECT [id], [bigint] FROM [datatypes]").each - assert_equal data.first.keys.map { |r| r.object_id }, data.last.keys.map { |r| r.object_id } - end - - it 'has properly encoded column names with symbol keys' do - col_name = "öäüß" - @client.do("DROP TABLE [test_encoding]") rescue nil - @client.do("CREATE TABLE [dbo].[test_encoding] ( [id] int NOT NULL IDENTITY(1,1) PRIMARY KEY, [#{col_name}] [nvarchar](10) NOT NULL )") - @client.do("INSERT INTO [test_encoding] ([#{col_name}]) VALUES (N'#{col_name}')") - result = @client.execute("SELECT [#{col_name}] FROM [test_encoding]") - row = result.each(:as => :hash, :symbolize_keys => true).first - assert_instance_of Symbol, result.fields.first - assert_equal col_name.to_sym, result.fields.first - assert_instance_of Symbol, row.keys.first - assert_equal col_name.to_sym, row.keys.first end it 'allows #return_code to work with stored procedures and reset per sql batch' do assert_nil @client.return_code + result = @client.execute("EXEC tinytds_TestReturnCodes") - assert_equal [{ "one" => 1 }], result.each + assert_equal [{ "one" => 1 }], result.rows assert_equal 420, @client.return_code assert_equal 420, result.return_code + result = @client.execute('SELECT 1 as [one]') - result.each assert_nil @client.return_code assert_nil result.return_code end @@ -293,31 +193,31 @@ class ResultTest < TinyTds::TestCase it 'handles a command buffer with double selects' do result = @client.execute(@double_select) - result_sets = result.each - assert_equal 2, result_sets.size - assert_equal [{ 'rs1' => 1 }], result_sets.first - assert_equal [{ 'rs2' => 2 }], result_sets.last + assert_equal 2, result.count + assert_equal [{ 'rs1' => 1 }], result.rows.first + assert_equal [{ 'rs2' => 2 }], result.rows.last assert_equal [['rs1'], ['rs2']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' + # As array - result = @client.execute(@double_select) - result_sets = result.each(:as => :array) - assert_equal 2, result_sets.size - assert_equal [[1]], result_sets.first - assert_equal [[2]], result_sets.last + result = @client.execute(@double_select, as: :array) + assert_equal 2, result.count + assert_equal [[1]], result.rows.first + assert_equal [[2]], result.rows.last assert_equal [['rs1'], ['rs2']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' end it 'yields each row for each result set' do data = [] - result_sets = @client.execute(@double_select).each { |row| data << row } - assert_equal data.first, result_sets.first[0] - assert_equal data.last, result_sets.last[0] + + result = @client.execute(@double_select) + result.each { |row| data << row } + + assert_equal data.first, result.rows.first + assert_equal data.last, result.rows.last end it 'works from a stored procedure' do - results1, results2 = @client.execute("EXEC sp_helpconstraint '[datatypes]'").each + results1, results2 = @client.execute("EXEC sp_helpconstraint '[datatypes]'").rows assert_equal [{ "Object Name" => "[datatypes]" }], results1 constraint_info = results2.first assert constraint_info.key?("constraint_keys") @@ -340,74 +240,64 @@ class ResultTest < TinyTds::TestCase it 'handles a basic empty result set' do result = @client.execute(@empty_select) - assert_equal [], result.each + assert_equal [], result.to_a assert_equal ['rs1'], result.fields end it 'includes empty result sets by default - using 1st empty buffer' do result = @client.execute(@triple_select_1st_empty) - result_sets = result.each - assert_equal 3, result_sets.size - assert_equal [], result_sets[0] - assert_equal [{ 'rs2' => 2 }], result_sets[1] - assert_equal [{ 'rs3' => 3 }], result_sets[2] + assert_equal 3, result.count + assert_equal [], result.rows[0] + assert_equal [{ 'rs2' => 2 }], result.rows[1] + assert_equal [{ 'rs3' => 3 }], result.rows[2] assert_equal [['rs1'], ['rs2'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' + # As array - result = @client.execute(@triple_select_1st_empty) - result_sets = result.each(:as => :array) - assert_equal 3, result_sets.size - assert_equal [], result_sets[0] - assert_equal [[2]], result_sets[1] - assert_equal [[3]], result_sets[2] + result = @client.execute(@triple_select_1st_empty, as: :array) + assert_equal 3, result.count + assert_equal [], result.rows[0] + assert_equal [[2]], result.rows[1] + assert_equal [[3]], result.rows[2] assert_equal [['rs1'], ['rs2'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' end it 'includes empty result sets by default - using 2nd empty buffer' do result = @client.execute(@triple_select_2nd_empty) - result_sets = result.each - assert_equal 3, result_sets.size - assert_equal [{ 'rs1' => 1 }], result_sets[0] - assert_equal [], result_sets[1] - assert_equal [{ 'rs3' => 3 }], result_sets[2] + assert_equal 3, result.count + assert_equal [{ 'rs1' => 1 }], result.rows[0] + assert_equal [], result.rows[1] + assert_equal [{ 'rs3' => 3 }], result.rows[2] assert_equal [['rs1'], ['rs2'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' + # As array - result = @client.execute(@triple_select_2nd_empty) - result_sets = result.each(:as => :array) - assert_equal 3, result_sets.size - assert_equal [[1]], result_sets[0] - assert_equal [], result_sets[1] - assert_equal [[3]], result_sets[2] + result = @client.execute(@triple_select_2nd_empty, as: :array) + assert_equal 3, result.count + assert_equal [[1]], result.rows[0] + assert_equal [], result.rows[1] + assert_equal [[3]], result.rows[2] assert_equal [['rs1'], ['rs2'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' end it 'includes empty result sets by default - using 3rd empty buffer' do result = @client.execute(@triple_select_3rd_empty) - result_sets = result.each - assert_equal 3, result_sets.size - assert_equal [{ 'rs1' => 1 }], result_sets[0] - assert_equal [{ 'rs2' => 2 }], result_sets[1] - assert_equal [], result_sets[2] + assert_equal 3, result.count + assert_equal [{ 'rs1' => 1 }], result.rows[0] + assert_equal [{ 'rs2' => 2 }], result.rows[1] + assert_equal [], result.rows[2] assert_equal [['rs1'], ['rs2'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' + # As array - result = @client.execute(@triple_select_3rd_empty) - result_sets = result.each(:as => :array) - assert_equal 3, result_sets.size - assert_equal [[1]], result_sets[0] - assert_equal [[2]], result_sets[1] - assert_equal [], result_sets[2] + result = @client.execute(@triple_select_3rd_empty, as: :array) + assert_equal 3, result.count + assert_equal [[1]], result.rows[0] + assert_equal [[2]], result.rows[1] + assert_equal [], result.rows[2] assert_equal [['rs1'], ['rs2'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' end end describe 'using :empty_sets FALSE' do - before do close_client @old_query_option_value = TinyTds::Client.default_query_options[:empty_sets] @@ -421,66 +311,55 @@ class ResultTest < TinyTds::TestCase it 'handles a basic empty result set' do result = @client.execute(@empty_select) - assert_equal [], result.each + assert_equal [], result.rows assert_equal ['rs1'], result.fields end it 'must not include empty result sets by default - using 1st empty buffer' do result = @client.execute(@triple_select_1st_empty) - result_sets = result.each - assert_equal 2, result_sets.size - assert_equal [{ 'rs2' => 2 }], result_sets[0] - assert_equal [{ 'rs3' => 3 }], result_sets[1] + assert_equal 2, result.count + assert_equal [{ 'rs2' => 2 }], result.rows[0] + assert_equal [{ 'rs3' => 3 }], result.rows[1] assert_equal [['rs2'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' + # As array - result = @client.execute(@triple_select_1st_empty) - result_sets = result.each(:as => :array) - assert_equal 2, result_sets.size - assert_equal [[2]], result_sets[0] - assert_equal [[3]], result_sets[1] + result = @client.execute(@triple_select_1st_empty, as: :array) + assert_equal 2, result.count + assert_equal [[2]], result.rows[0] + assert_equal [[3]], result.rows[1] assert_equal [['rs2'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' end it 'must not include empty result sets by default - using 2nd empty buffer' do result = @client.execute(@triple_select_2nd_empty) - result_sets = result.each - assert_equal 2, result_sets.size - assert_equal [{ 'rs1' => 1 }], result_sets[0] - assert_equal [{ 'rs3' => 3 }], result_sets[1] + assert_equal 2, result.count + assert_equal [{ 'rs1' => 1 }], result.rows[0] + assert_equal [{ 'rs3' => 3 }], result.rows[1] assert_equal [['rs1'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' + # As array - result = @client.execute(@triple_select_2nd_empty) - result_sets = result.each(:as => :array) - assert_equal 2, result_sets.size - assert_equal [[1]], result_sets[0] - assert_equal [[3]], result_sets[1] + result = @client.execute(@triple_select_2nd_empty, as: :array) + assert_equal 2, result.count + assert_equal [[1]], result.rows[0] + assert_equal [[3]], result.rows[1] assert_equal [['rs1'], ['rs3']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' end it 'must not include empty result sets by default - using 3rd empty buffer' do result = @client.execute(@triple_select_3rd_empty) - result_sets = result.each - assert_equal 2, result_sets.size - assert_equal [{ 'rs1' => 1 }], result_sets[0] - assert_equal [{ 'rs2' => 2 }], result_sets[1] + assert_equal 2, result.count + assert_equal [{ 'rs1' => 1 }], result.rows[0] + assert_equal [{ 'rs2' => 2 }], result.rows[1] assert_equal [['rs1'], ['rs2']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' + # As array - result = @client.execute(@triple_select_3rd_empty) - result_sets = result.each(:as => :array) - assert_equal 2, result_sets.size - assert_equal [[1]], result_sets[0] - assert_equal [[2]], result_sets[1] + result = @client.execute(@triple_select_3rd_empty, as: :array) + assert_equal 2, result.count + assert_equal [[1]], result.rows[0] + assert_equal [[2]], result.rows[1] assert_equal [['rs1'], ['rs2']], result.fields - assert_equal result.each.object_id, result.each.object_id, 'same cached rows' end - end - end describe 'Complex query with multiple results sets but no actual results' do @@ -535,7 +414,7 @@ class ResultTest < TinyTds::TestCase it 'copes with nil or empty buffer' do assert_raises(TypeError) { @client.execute(nil) } - assert_equal [], @client.execute('').each + assert_equal [], @client.execute('').rows end describe 'using :message_handler option' do diff --git a/test/test_helper.rb b/test/test_helper.rb index d49348bd..251807db 100755 --- a/test/test_helper.rb +++ b/test/test_helper.rb @@ -85,7 +85,7 @@ def connection_timeout end def assert_client_works(client) - _(client.execute("SELECT 'client_works' as [client_works]").each).must_equal [{'client_works' => 'client_works'}] + _(client.execute("SELECT 'client_works' as [client_works]").rows).must_equal [{'client_works' => 'client_works'}] end def assert_new_connections_work @@ -196,9 +196,8 @@ def sp_several_prints_sql end def find_value(id, column, query_options={}) - query_options[:timezone] ||= :utc sql = "SELECT [#{column}] FROM [datatypes] WHERE [id] = #{id}" - @client.execute(sql).each(query_options).first[column.to_s] + @client.execute(sql, timezone: (query_options[:timezone] || :utc)).first[column.to_s] end def local_offset From 1a3773a9a7beab243878eae3a54d5c4ffa76c4fa Mon Sep 17 00:00:00 2001 From: Andy Pfister Date: Wed, 18 Dec 2024 13:53:07 +0100 Subject: [PATCH 4/4] Ensure test database data is loaded before running tests --- test/client_test.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/client_test.rb b/test/client_test.rb index faf2ebae..83d007dc 100644 --- a/test/client_test.rb +++ b/test/client_test.rb @@ -2,6 +2,10 @@ require 'test_helper' class ClientTest < TinyTds::TestCase + before do + @@current_schema_loaded ||= load_current_schema + end + describe 'with valid credentials' do before do @client = new_connection @@ -327,7 +331,7 @@ class ClientTest < TinyTds::TestCase it 'has a #do method that cancels result rows and returns affected rows natively' do rollback_transaction(@client) do text = 'test affected rows native' - count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").each.first['count'] + count = @client.execute("SELECT COUNT(*) AS [count] FROM [datatypes]").first['count'] deleted_rows = @client.do("DELETE FROM [datatypes]") assert_equal count, deleted_rows, 'should have deleted rows equal to count' inserted_rows = @client.do("INSERT INTO [datatypes] ([varchar_50]) VALUES ('#{text}')")