diff --git a/.ci/bindingtester/.known_testers.py.swp b/.ci/bindingtester/.known_testers.py.swp new file mode 100644 index 0000000..4bfa18e Binary files /dev/null and b/.ci/bindingtester/.known_testers.py.swp differ diff --git a/.ci/bindingtester/__init__.py b/.ci/bindingtester/__init__.py new file mode 100644 index 0000000..1f6e1e3 --- /dev/null +++ b/.ci/bindingtester/__init__.py @@ -0,0 +1,118 @@ +# +# __init__.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import math +import sys +import os + +#sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')] + +import util + +FDB_API_VERSION = 710 + +LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'simple': { + 'format': '%(message)s' + } + }, + 'handlers': { + 'console': { + 'level': 'NOTSET', + 'class': 'logging.StreamHandler', + 'stream': sys.stdout, + 'formatter': 'simple' + } + }, + 'loggers': { + 'foundationdb.bindingtester': { + 'level': 'INFO', + 'handlers': ['console'] + } + } +} + + +class Result: + def __init__(self, subspace, key, values): + self.subspace_tuple = util.subspace_to_tuple(subspace) + self.key_tuple = subspace.unpack(key) + self.values = values + + def key(self, specification): + return self.key_tuple[specification.key_start_index:] + + @staticmethod + def elements_equal(el1, el2): + if type(el1) != type(el2): + return False + + if isinstance(el1, tuple): + return Result.tuples_match(el1, el2) + + if isinstance(el1, float) and math.isnan(el1): + return math.isnan(el2) + + return el1 == el2 + + @staticmethod + def tuples_match(t1, t2): + if len(t1) != len(t2): + return False + + return all([Result.elements_equal(x,y) for x,y in zip(t1, t2)]) + + def matches_key(self, rhs, specification): + if not isinstance(rhs, Result): + return False + + return Result.tuples_match(self.key(specification), rhs.key(specification)) + + def matches(self, rhs, specification): + if not self.matches_key(rhs, specification): + return False + + for value in self.values: + for rValue in rhs.values: + if value == rValue: + return True + + return False + + def matches_global_error_filter(self, specification): + return any([specification.matches_global_error_filter(v) for v in self.values]) + + # A non-unique sequence of numbers used to align results from different testers + def sequence_num(self, specification): + if specification.ordering_index is not None: + return self.key_tuple[specification.ordering_index] + + return None + + def __str__(self): + if len(self.values) == 1: + value_str = repr(self.values[0]) + else: + value_str = repr(self.values) + + return '%s = %s' % (repr(self.subspace_tuple + self.key_tuple), value_str) diff --git a/.ci/bindingtester/__pycache__/__init__.cpython-311.pyc b/.ci/bindingtester/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..32fb33d Binary files /dev/null and b/.ci/bindingtester/__pycache__/__init__.cpython-311.pyc differ diff --git a/.ci/bindingtester/__pycache__/known_testers.cpython-311.pyc b/.ci/bindingtester/__pycache__/known_testers.cpython-311.pyc new file mode 100644 index 0000000..4b55d4c Binary files /dev/null and b/.ci/bindingtester/__pycache__/known_testers.cpython-311.pyc differ diff --git a/.ci/bindingtester/__pycache__/util.cpython-311.pyc b/.ci/bindingtester/__pycache__/util.cpython-311.pyc new file mode 100644 index 0000000..e45959e Binary files /dev/null and b/.ci/bindingtester/__pycache__/util.cpython-311.pyc differ diff --git a/.ci/bindingtester/bindingtester.py b/.ci/bindingtester/bindingtester.py new file mode 100755 index 0000000..2856d35 --- /dev/null +++ b/.ci/bindingtester/bindingtester.py @@ -0,0 +1,521 @@ +#!/usr/bin/env python3 +# +# bindingtester.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import sys +import subprocess +import struct +import random +import argparse +import math +import os +import copy +import traceback +from threading import Timer, Event + +import logging.config + +from collections import OrderedDict +from functools import reduce + +sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')] + +from bindingtester import FDB_API_VERSION +from bindingtester import Result + +from bindingtester import util +from bindingtester.tests import Test, InstructionSet + +from bindingtester.known_testers import Tester + +import fdb +import fdb.tuple + +fdb.api_version(FDB_API_VERSION) + + +class ResultSet(object): + def __init__(self, specification): + self.specification = specification + self.tester_results = OrderedDict() + + def add(self, name, results): + num = 1 + base_name = name + while name in self.tester_results: + num += 1 + name = '%s (%d)' % (base_name, num) + + self.tester_results[name] = results + + @staticmethod + def _min_tuple(t1, t2): + return t1 if fdb.tuple.compare(t1, t2) < 0 else t2 + + def check_for_errors(self): + if len(self.tester_results) == 1: + return (0, False) + + util.get_logger().info('Comparing results from \'%s\'...' % repr(util.subspace_to_tuple(self.specification.subspace))) + + num_errors = 0 + has_filtered_error = False + + # Tracks the current result being evaluated for each tester + indices = [0 for i in range(len(self.tester_results))] + + name_length = max([len(name) for name in self.tester_results.keys()]) + + while True: + # Gets the next result for each tester + results = {i: r[indices[i]] for i, r in enumerate(self.tester_results.values()) if len(r) > indices[i]} + if len(results) == 0: + break + + # Attempt to 'align' the results. If two results have matching sequence numbers, then they should be compared. + # Only those testers which have a result matching the minimum current sequence number will be included. All + # others are considered to have not produced a result and will be evaluated in a future iteration. + sequence_nums = [r.sequence_num(self.specification) for r in results.values()] + if any([s is not None for s in sequence_nums]): + results = {i: r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums)} + + # If these results aren't using sequence numbers, then we match two results based on whether they share the same key + else: + min_key = reduce(ResultSet._min_tuple, [r.key(self.specification) for r in results.values()]) + results = {i: r for i, r in results.items() if Result.tuples_match(r.key(self.specification), min_key)} + + # Increment the indices for those testers which produced a result in this iteration + for i in results.keys(): + indices[i] += 1 + + # Fill in 'None' values for testers that didn't produce a result and generate an output string describing the results + all_results = {i: results[i] if i in results else None for i in range(len(self.tester_results))} + result_keys = list(self.tester_results.keys()) + result_str = '\n'.join([' %-*s - %s' % (name_length, result_keys[i], r) for i, r in all_results.items()]) + + result_list = list(results.values()) + + # If any of our results matches the global error filter, we ignore the result + if any(r.matches_global_error_filter(self.specification) for r in result_list): + has_filtered_error = True + + # The result is considered correct if every tester produced a value and all the values meet the matching criteria + if len(results) < len(all_results) or not all(result_list[0].matches(r, self.specification) for r in result_list): + util.get_logger().error('\nIncorrect result: \n%s' % result_str) + num_errors += 1 + else: + util.get_logger().debug('\nCorrect result: \n%s' % result_str) + + if num_errors > 0: + util.get_logger().error('') + else: + util.get_logger().debug('') + + return (num_errors, has_filtered_error) + + +def choose_api_version(selected_api_version, tester_min_version, tester_max_version, test_min_version, test_max_version): + if selected_api_version is not None: + if selected_api_version < tester_min_version or selected_api_version > tester_max_version: + raise Exception('Not all testers support the API version %d (min=%d, max=%d)' % + (selected_api_version, tester_min_version, tester_max_version)) + elif selected_api_version < test_min_version or selected_api_version > test_max_version: + raise Exception('API version %d is not supported by the specified test (min=%d, max=%d)' % + (selected_api_version, test_min_version, test_max_version)) + + api_version = selected_api_version + else: + min_version = max(tester_min_version, test_min_version) + max_version = min(tester_max_version, test_max_version) + + if min_version > max_version: + raise Exception( + 'Not all testers support the API versions required by the specified test' + '(tester: min=%d, max=%d; test: min=%d, max=%d)' % (tester_min_version, tester_max_version, test_min_version, test_max_version)) + + if random.random() < 0.7: + api_version = max_version + elif random.random() < 0.7: + api_version = min_version + elif random.random() < 0.9: + api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430, + 440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700, 710] if v >= min_version and v <= max_version]) + else: + api_version = random.randint(min_version, max_version) + + return api_version + + +class TestRunner(object): + def __init__(self, args): + self.args = copy.copy(args) + + self.db = fdb.open(self.args.cluster_file) + self.test_seed = random.randint(0, 0xffffffff) + + self.testers = [Tester.get_test(self.args.test1)] + if self.args.test2 is not None: + self.testers.append(Tester.get_test(self.args.test2)) + + self.test = Test.create_test(self.args.test_name, fdb.Subspace((self.args.output_subspace,))) + + if self.test is None: + raise Exception('the test \'%s\' could not be found' % self.args.test_name) + + min_api_version = max([tester.min_api_version for tester in self.testers]) + max_api_version = min([tester.max_api_version for tester in self.testers]) + self.args.api_version = choose_api_version(self.args.api_version, min_api_version, max_api_version, + self.test.min_api_version, self.test.max_api_version) + + util.get_logger().info('\nCreating test at API version %d' % self.args.api_version) + + max_int_bits = min([tester.max_int_bits for tester in self.testers]) + if self.args.max_int_bits is None: + self.args.max_int_bits = max_int_bits + elif self.args.max_int_bits > max_int_bits: + raise Exception('The specified testers support at most %d-bit ints, but --max-int-bits was set to %d' % + (max_int_bits, self.args.max_int_bits)) + + self.args.no_threads = self.args.no_threads or any([not tester.threads_enabled for tester in self.testers]) + if self.args.no_threads and self.args.concurrency > 1: + raise Exception('Not all testers support concurrency') + + # Test types should be intersection of all tester supported types + self.args.types = list(reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers))) + + self.args.no_directory_snapshot_ops = self.args.no_directory_snapshot_ops or any([not tester.directory_snapshot_ops_enabled for tester in self.testers]) + self.args.no_tenants = self.args.no_tenants or any([not tester.tenants_enabled for tester in self.testers]) or self.args.api_version < 710 + + def print_test(self): + test_instructions = self._generate_test() + + for top_level_subspace, top_level_thread in test_instructions.items(): + for subspace, thread in top_level_thread.get_threads(top_level_subspace).items(): + util.get_logger().error('\nThread at prefix %r:' % util.subspace_to_tuple(subspace)) + if self.args.print_all: + instructions = thread + offset = 0 + else: + instructions = thread.core_instructions() + offset = thread.core_test_begin + + for i, instruction in enumerate(instructions): + if self.args.print_all or (instruction.operation != 'SWAP' and instruction.operation != 'PUSH'): + util.get_logger().error(' %d. %r' % (i + offset, instruction)) + + util.get_logger().error('') + + def run_test(self): + test_instructions = self._generate_test() + expected_results = self.test.get_expected_results() + + tester_results = {s.subspace: ResultSet(s) for s in self.test.get_result_specifications()} + for subspace, results in expected_results.items(): + tester_results[subspace].add('expected', results) + + tester_errors = {} + + for tester in self.testers: + self._insert_instructions(test_instructions) + self.test.pre_run(self.db, self.args) + return_code = self._run_tester(tester) + if return_code != 0: + util.get_logger().error('Test of type %s failed to complete successfully with random seed %d and %d operations\n' % + (self.args.test_name, self.args.seed, self.args.num_ops)) + return 2 + + tester_errors[tester] = self.test.validate(self.db, self.args) + + for spec in self.test.get_result_specifications(): + tester_results[spec.subspace].add(tester.name, self._get_results(spec.subspace)) + + return_code = self._validate_results(tester_errors, tester_results) + util.get_logger().info('Completed %s test with random seed %d and %d operations\n' % (self.args.test_name, self.args.seed, self.args.num_ops)) + + return return_code + + def insert_test(self): + test_instructions = self._generate_test() + self._insert_instructions(test_instructions) + + def _generate_test(self): + util.get_logger().info('Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)...' % + (self.args.test_name, self.args.seed, self.args.num_ops, self.args.concurrency)) + + random.seed(self.test_seed) + + if self.args.concurrency == 1: + self.test.setup(self.args) + test_instructions = {fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'),)): self.test.generate(self.args, 0)} + else: + test_instructions = {} + main_thread = InstructionSet() + for i in range(self.args.concurrency): + # thread_spec = fdb.Subspace(('thread_spec', i)) + thread_spec = b'thread_spec%d' % i + main_thread.push_args(thread_spec) + main_thread.append('START_THREAD') + self.test.setup(self.args) + test_instructions[fdb.Subspace((thread_spec,))] = self.test.generate(self.args, i) + + test_instructions[fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'),))] = main_thread + + return test_instructions + + def _insert_instructions(self, test_instructions): + util.get_logger().info('\nInserting test into database...') + del self.db[:] + + while True: + tr = self.db.create_transaction() + try: + tr.options.set_special_key_space_enable_writes() + del tr[b'\xff\xff/management/tenant_map/' : b'\xff\xff/management/tenant_map0'] + tr.commit().wait() + break + except fdb.FDBError as e: + tr.on_error(e).wait() + + for subspace, thread in test_instructions.items(): + thread.insert_operations(self.db, subspace) + + def _run_tester(self, test): + params = test.cmd.split(' ') + [self.args.instruction_prefix, str(self.args.api_version)] + if self.args.cluster_file is not None: + params += [self.args.cluster_file] + + util.get_logger().info('\nRunning tester \'%s\'...' % ' '.join(params)) + sys.stdout.flush() + proc = subprocess.Popen(params) + timed_out = Event() + + def killProc(): + proc.kill() + timed_out.set() + + timer = Timer(self.args.timeout, killProc) + try: + timer.start() + ret_code = proc.wait() + except Exception as e: + raise Exception('Unable to run tester (%s)' % e) + finally: + timer.cancel() + + if ret_code != 0: + signal_name = str(ret_code) + if ret_code < 0: + signal_name = util.signal_number_to_name(-ret_code) + + reason = 'exit code: %s' % (signal_name,) + if timed_out.is_set(): + reason = 'timed out after %d seconds' % (self.args.timeout,) + util.get_logger().error('\n\'%s\' did not complete succesfully (%s)' % (params[0], reason)) + + util.get_logger().info('') + return ret_code + + def _get_results(self, subspace, instruction_index=None): + util.get_logger().info('Reading results from \'%s\'...' % repr(util.subspace_to_tuple(subspace))) + + results = [] + next_key = subspace.range().start + while True: + next_results = self.db.get_range(next_key, subspace.range().stop, 1000) + if len(next_results) == 0: + break + + results += [Result(subspace, kv.key, (kv.value,)) for kv in next_results] + next_key = fdb.KeySelector.first_greater_than(next_results[-1].key) + + return results + + def _validate_results(self, tester_errors, tester_results): + util.get_logger().info('') + + num_incorrect = 0 + has_filtered_error = False + for r in tester_results.values(): + (count, filtered_error) = r.check_for_errors() + num_incorrect += count + has_filtered_error = has_filtered_error or filtered_error + + num_errors = sum([len(e) for e in tester_errors.values()]) + + for tester, errors in tester_errors.items(): + if len(errors) > 0: + util.get_logger().error('The %s tester reported errors:\n' % tester.name) + for i, error in enumerate(errors): + util.get_logger().error(' %d. %s' % (i + 1, error)) + + log_message = '\nTest with seed %d and concurrency %d had %d incorrect result(s) and %d error(s) at API version %d' %\ + (self.args.seed, self.args.concurrency, num_incorrect, num_errors, self.args.api_version) + if num_errors == 0 and (num_incorrect == 0 or has_filtered_error): + util.get_logger().info(log_message) + if has_filtered_error: + util.get_logger().info("Test had permissible non-deterministic errors; disregarding results...") + return 0 + else: + util.get_logger().error(log_message) + return 1 + + +def bisect(test_runner, args): + util.get_logger().info('') + + lower_bound = 0 + upper_bound = args.num_ops + + while True: + test_runner.args.num_ops = int((lower_bound + upper_bound) / 2) + result = test_runner.run_test() + + if lower_bound == upper_bound: + if result != 0: + util.get_logger().error('Found minimal failing test with %d operations' % lower_bound) + if args.print_test: + test_runner.print_test() + + return 0 + elif upper_bound < args.num_ops: + util.get_logger().error('Error finding minimal failing test for seed %d. The failure may not be deterministic' % args.seed) + return 1 + else: + util.get_logger().error('No failing test found for seed %d with %d ops. Try specifying a larger --num-ops parameter.' + % (args.seed, args.num_ops)) + return 0 + + elif result == 0: + util.get_logger().info('Test with %d operations succeeded\n' % test_runner.args.num_ops) + lower_bound = test_runner.args.num_ops + 1 + + else: + util.get_logger().info('Test with %d operations failed with error code %d\n' % (test_runner.args.num_ops, result)) + upper_bound = test_runner.args.num_ops + + +def parse_args(argv): + parser = argparse.ArgumentParser(description='FoundationDB Binding API Tester') + parser.add_argument('--test-name', default='scripted', + help='The name of the test to run. Must be the name of a test specified in the tests folder. (default=\'scripted\')') + + parser.add_argument(metavar='tester1', dest='test1', help='Name of the first tester to invoke') + parser.add_argument('--compare', metavar='tester2', nargs='?', type=str, default=None, const='python', dest='test2', + help='When specified, a second tester will be run and compared against the first. This flag takes an optional argument ' + 'for the second tester to invoke (default = \'python\').') + parser.add_argument('--print-test', action='store_true', + help='Instead of running a test, prints the set of instructions generated for that test. Unless --all is specified, all ' + 'setup, finalization, PUSH, and SWAP instructions will be excluded.') + parser.add_argument('--all', dest='print_all', action='store_true', help='Causes --print-test to print all instructions.') + parser.add_argument('--bisect', action='store_true', + help='Run the specified test varying the number of operations until a minimal failing test is found. Does not work for ' + 'concurrent tests.') + parser.add_argument('--insert-only', action='store_true', help='Insert the test instructions into the database, but do not run it.') + parser.add_argument('--concurrency', type=int, default=1, help='Number of concurrent test threads to run. (default = 1).') + parser.add_argument('--num-ops', type=int, default=100, help='The number of operations to generate per thread (default = 100)') + parser.add_argument('--seed', type=int, help='The random seed to use for generating the test') + parser.add_argument('--max-int-bits', type=int, default=None, + help='Maximum number of bits to use for int types in testers. By default, the largest value supported by the testers being ' + 'run will be chosen.') + parser.add_argument('--api-version', default=None, type=int, + help='The API version that the testers should use. Not supported in scripted mode. (default = random version supported by ' + 'all testers)') + parser.add_argument('--cluster-file', type=str, default=None, help='The cluster file for the cluster being connected to. (default None)') + parser.add_argument('--timeout', type=int, default=600, help='The timeout in seconds for running each individual tester. (default 600)') + parser.add_argument('--enable-client-trace-logging', nargs='?', type=str, default=None, const='.', + help='Enables trace file output. This flag takes an optional argument specifying the output directory (default = \'.\').') + parser.add_argument('--instruction-prefix', type=str, default='test_spec', + help='The prefix under which the main thread of test instructions are inserted (default=\'test_spec\').') + parser.add_argument('--output-subspace', type=str, default='tester_output', + help='The string used to create the output subspace for the testers. The subspace will be of the form (,). ' + '(default=\'tester_output\')') + + parser.add_argument('--logging-level', type=str, default='INFO', + choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], help='Specifies the level of detail in the tester output (default=\'INFO\').') + + # SOMEDAY: this applies only to the scripted test. Should we invoke test files specifically (as in circus), + # or invoke them here and allow tests to add arguments? + parser.add_argument('--no-threads', action='store_true', help='Disables the START_THREAD instruction in the scripted test.') + + parser.add_argument('--no-directory-snapshot-ops', action='store_true', help='Disables snapshot operations for directory instructions.') + + parser.add_argument('--no-tenants', action='store_true', help='Disables tenant operations.') + + return parser.parse_args(argv) + + +def validate_args(args): + if args.insert_only and args.bisect: + raise Exception('--bisect cannot be used with --insert-only') + if args.print_all and not args.print_test: + raise Exception('cannot specify --all without --print-test') + if args.bisect and not args.seed: + raise Exception('--seed must be specified if using --bisect') + if args.concurrency < 1: + raise Exception('--concurrency must be a positive integer') + if args.concurrency > 1 and args.test2: + raise Exception('--compare cannot be used with concurrent tests') + + +def main(argv): + args = parse_args(argv) + try: + from bindingtester import LOGGING + + logging.config.dictConfig(LOGGING) + util.initialize_logger_level(args.logging_level) + + validate_args(args) + + if args.seed is None: + args.seed = random.randint(0, 0xffffffff) + + random.seed(args.seed) + + if args.enable_client_trace_logging is not None: + fdb.options.set_trace_enable(args.enable_client_trace_logging) + + test_runner = TestRunner(args) + + if args.bisect: + return bisect(test_runner, args) + + if args.print_test: + return test_runner.print_test() + + if args.insert_only: + return test_runner.insert_test() + + return test_runner.run_test() + + except Exception as e: + util.get_logger().error('\nERROR: %s' % e) + util.get_logger().debug(traceback.format_exc()) + exit(3) + + except BaseException: + util.get_logger().error('\nERROR: %s' % sys.exc_info()[0]) + util.get_logger().info(traceback.format_exc()) + exit(3) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/.ci/bindingtester/known_testers.py b/.ci/bindingtester/known_testers.py new file mode 100644 index 0000000..2da2ca0 --- /dev/null +++ b/.ci/bindingtester/known_testers.py @@ -0,0 +1,69 @@ +# +# known_testers.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os + +MAX_API_VERSION = 710 +COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple'] +ALL_TYPES = COMMON_TYPES + ['versionstamp'] + + +class Tester: + def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES, directory_snapshot_ops_enabled=True, tenants_enabled=False): + self.name = name + self.cmd = cmd + self.max_int_bits = max_int_bits + self.min_api_version = min_api_version + self.max_api_version = max_api_version + self.threads_enabled = threads_enabled + self.types = types + self.directory_snapshot_ops_enabled = directory_snapshot_ops_enabled + self.tenants_enabled = tenants_enabled + + def supports_api_version(self, api_version): + return api_version >= self.min_api_version and api_version <= self.max_api_version + + @classmethod + def get_test(cls, test_name_or_args): + if test_name_or_args in testers: + return testers[test_name_or_args] + else: + return Tester(test_name_or_args.split(' ')[0], test_name_or_args) + + +def _absolute_path(path): + return os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', path) + + +_java_cmd = 'java -ea -cp %s:%s com.apple.foundationdb.test.' % ( + _absolute_path('java/foundationdb-client.jar'), + _absolute_path('java/foundationdb-tests.jar')) + +# We could set min_api_version lower on some of these if the testers were updated to support them +testers = { + 'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True), + 'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True), + 'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 2040, 23, MAX_API_VERSION), + 'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True), + 'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True), + 'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 2040, 200, MAX_API_VERSION, types=ALL_TYPES), + 'flow': Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION, directory_snapshot_ops_enabled=False), + 'erlang': Tester('erlang', '/opt/fdb/test/tester.es', 2040, MAX_API_VERSION, MAX_API_VERSION, types=ALL_TYPES), +} diff --git a/.ci/bindingtester/run_binding_tester.sh b/.ci/bindingtester/run_binding_tester.sh new file mode 100644 index 0000000..06c3f0a --- /dev/null +++ b/.ci/bindingtester/run_binding_tester.sh @@ -0,0 +1,383 @@ +#!/usr/bin/env bash +###################################################### +# +# FoundationDB Binding Test Script +# +# Test script for running FoundationDB binding tests +# +# Defines: +# +# Author: Alvin Moore +# Date: 16-04-28 +# Version: 1.0 +###################################################### + +# Defines +SCRIPTDIR=$( cd "${BASH_SOURCE[0]%\/*}" && pwd ) +CWD=$(pwd) +OSNAME="$(uname -s)" +DEBUGLEVEL="${DEBUGLEVEL:-1}" +DISPLAYERROR="${DISPLAYERROR:-0}" +OPERATIONS="${OPERATIONS:-1000}" +HCAOPERATIONS="${HCAOPERATIONS:-100}" +CONCURRENCY="${CONCURRENCY:-5}" +BREAKONERROR="${BREAKONERROR:-0}" +RUNSCRIPTS="${RUNSCRIPTS:-1}" +RUNTESTS="${RUNTESTS:-1}" +RANDOMTEST="${RANDOMTEST:-0}" +# BINDINGTESTS="${BINDINGTESTS:-python python3 java java_async ruby go flow}" +BINDINGTESTS="${BINDINGTESTS:-python python3 java java_async go flow}" +LOGLEVEL="${LOGLEVEL:-INFO}" +_BINDINGTESTS=(${BINDINGTESTS}) +DISABLEDTESTS=() +TESTFILE="${SCRIPTDIR}/bindingtester.py" +TESTTYPES=('API' 'Concurrent API' 'Directory' 'Directory HCA') +TESTTOTAL="${#TESTTYPES[@]}" +TESTINDEX="${TESTINDEX:-$TESTTOTAL}" +LOGSTDOUT="${LOGSTDOUT:-0}" +CONSOLELOG="${CONSOLELOG:-${CWD}/console.log}" +VERSION="1.6" + +# Display syntax +if [ "${#}" -lt 2 ] +then + echo 'run_binding_tester.sh ' + echo ' cycles: number of cycles to run test (0 => unlimitted)' + echo '' + echo ' Modifiable Environment Variables:' + echo ' CONCURRENCY: number of concurrent requests' + echo ' OPERATIONS: number of operations per test' + echo ' HCAOPERATIONS: number of HCA operations per test' + echo ' BINDINGTESTS: lists of binding tests to run' + echo ' BREAKONERROR: stop on first error, if positive number' + echo " TESTINDEX: (0-${TESTTOTAL}) ${TESTTYPES[*]}" + echo ' RANDOMTEST: select a single random test, if positive number' + echo ' LOGLEVEL: ERROR, WARNING, INFO, DEBUG' + echo '' + echo " version: ${VERSION}" + exit 1 +fi + +# Read arguments +MAXCYCLES="${1}" +ERRORFILE="${2}" + +function logError() +{ + local status=0 + + if [ "$#" -lt 3 ] + then + echo "runCommand [args ...]" + let status="${status} + 1" + else + local message="${1}" + local output="${2}" + local command="${3}" + shift + shift + shift + + let errorTotal="${errorTotal} + 1" + + # Display the error, if enabled + if [ "${DISPLAYERROR}" -gt 0 ] + then + printf '%-16s Error #%3d:\n' "$(date '+%F %H-%M-%S')" "${errorTotal}" + echo "Message: '${message}'" + echo "Command: '${command} ${@}'" + echo "Error: ${output}" + fi + + # Create the file, if not present + if [[ ! -f "${ERRORFILE}" ]] + then + dir=$(dirname "${ERRORFILE}") + + if [ ! -d "${dir}" ] && ! mkdir -p "${dir}" + then + echo "Failed to create directory: ${dir} for error file: ${ERRORFILE}" + let status="${status} + 1" + printf '\n%-16s Error #%3d:\n' "$(date '+%F %H-%M-%S')" "${errorTotal}" + echo "Message: '${message}'" + echo "Command: '${command} ${@}'" + echo "Error: ${output}" + fi + fi + + # Initialize the error log, if first error + if [[ "${errorTotal}" -eq 1 ]] + then + : + fi + + # Write the error to the log + if [[ "${status}" -eq 0 ]] + then + printf '\n%-16s Error #%3d:\n' "$(date '+%F %H-%M-%S')" "${errorTotal}" >> "${ERRORFILE}" + echo "Message: '${message}'" >> "${ERRORFILE}" + echo "Command: '${command} ${@}'" >> "${ERRORFILE}" + echo -n "Error:" >> "${ERRORFILE}" + echo "${output}" >> "${ERRORFILE}" + echo '----------------------------------------------------------------------------------------------------' >> "${ERRORFILE}" + fi + fi + + return "${status}" +} + +function runCommand() +{ + local status=0 + + if [ "$#" -lt 2 ] + then + echo "runCommand [args ...]" + let status="${status} + 1" + else + local message="${1}" + local command="${2}" + local time="${SECONDS}" + shift + shift + + if [ "${DEBUGLEVEL}" -gt 2 ]; then + printf "%-16s %-70s \n" "" "${command} ${*}" + fi + + if [ "${DEBUGLEVEL}" -gt 1 ]; then + printf "%-16s %-40s " "" "${message}" + fi + + if [ "${LOGSTDOUT}" -gt 0 ] ; then + printf "Running command: ${command} ${*}\n\n" >> "${CONSOLELOG}" + "${command}" "${@}" 2>&1 >> "${CONSOLELOG}" + result=$? + output=$(cat "${CONSOLELOG}") + else + output=$("${command}" "${@}" 2>&1) + result=$? + fi + let time="${SECONDS} - ${time}" + + # Check return code + if [ "${result}" -ne 0 ] + then + if [ "${DEBUGLEVEL}" -gt 0 ]; then + echo "failed after ${time} seconds." + fi + let status="${status} + 1" + logError "${message}" "${output}" "${command}" "${@}" + elif [ "${DEBUGLEVEL}" -gt 0 ];then + echo "passed in ${time} seconds." + fi + fi + + return "${status}" +} + +function runScriptedTest() +{ + local status=0 + + if [ "$#" -lt 1 ] + then + echo "runScriptedTest " + let status="${status} + 1" + else + local test="${1}" + + if ! runCommand "Scripting ${test} ..." 'python3' '-u' "${TESTFILE}" "${test}" --test-name scripted --logging-level "${LOGLEVEL}" + then + let status="${status} + 1" + fi + fi + + return "${status}" +} + +function runTest() +{ + local status=0 + + if [ "$#" -lt 1 ] + then + echo "runTest " + let status="${status} + 1" + else + local test="${1}" + + if [ "${DEBUGLEVEL}" -gt 0 ]; then + printf "%-16s %-40s \n" "$(date '+%F %H-%M-%S')" "Testing ${test}" + fi + + # API + if ([[ "${TESTINDEX}" -eq 0 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[0]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name api --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}" + then + let status="${status} + 1" + fi + + # Concurrent API + if ([[ "${TESTINDEX}" -eq 1 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[1]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name api --concurrency "${CONCURRENCY}" --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}" + then + let status="${status} + 1" + fi + + # Directory + if ([[ "${TESTINDEX}" -eq 2 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[2]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name directory --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}" + then + let status="${status} + 1" + fi + + # Directory HCA + if ([[ "${TESTINDEX}" -eq 3 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[3]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name directory_hca --concurrency "${CONCURRENCY}" --num-ops "${HCAOPERATIONS}" --logging-level "${LOGLEVEL}" + then + let status="${status} + 1" + fi + fi + + return "${status}" +} + +# Initialize the variables +status=0 +cycles=0 +rundate="$(date +%F_%H-%M-%S)" +errorTotal=0 + + +# Select a random test, if enabled +if [ "${RANDOMTEST}" -gt 0 ] +then + let testIndex="${RANDOM} % ${#_BINDINGTESTS[@]}" + randomTest="${_BINDINGTESTS[$testIndex]}" + # Remove the random test from the list of binding tests + _BINDINGTESTS=("${_BINDINGTESTS[@]/${randomTest}}") + DISABLEDTESTS+=("${_BINDINGTESTS[@]}") + _BINDINGTESTS=("${randomTest}") + + # Choose a random test + let TESTINDEX="${RANDOM} % ${TESTTOTAL}" + + # Select scripted or tests, if enabled + if [ "${RUNSCRIPTS}" -gt 0 ] && [ "${RUNTESTS}" -gt 0 ]; then + # Select scripted tests, if 1 out of 100 + if [ $((${RANDOM} % 100)) -eq 0 ]; then + RUNTESTS=0 + else + RUNSCRIPTS=0 + fi + fi +fi + +# Determine the name of the test type +# from the test index +if [ "${TESTINDEX}" -lt "${TESTTOTAL}" ]; then + TESTNAME="${TESTTYPES[$TESTINDEX]}" +else + TESTNAME="All Tests" + TESTINDEX="${TESTTOTAL}" +fi + +if [ "${DEBUGLEVEL}" -gt 0 ] +then + echo '' + echo '' + echo '*******************************************************************************************' + echo '' + printf "%-16s %-40s \n" "$(date '+%F %H-%M-%S')" "FoundationDb Binding Tester" + printf "%-20s Host OS: %-40s \n" "" "${OSNAME}" + printf "%-20s Max Cycles: %-40s \n" "" "${MAXCYCLES}" + printf "%-20s Operations: %-40s \n" "" "${OPERATIONS}" + printf "%-20s HCA Operations: %-40s \n" "" "${HCAOPERATIONS}" + printf "%-20s Concurrency: %-40s \n" "" "${CONCURRENCY}" + printf "%-20s Tests: (%2d) %-40s \n" "" "${#_BINDINGTESTS[@]}" "${_BINDINGTESTS[*]}" + printf "%-20s Disabled: (%2d) %-40s \n" "" "${#DISABLEDTESTS[@]}" "${DISABLEDTESTS[*]}" + printf "%-20s Error Log: %-40s \n" "" "${ERRORFILE}" + printf "%-20s Log Level: %-40s \n" "" "${LOGLEVEL}" + printf "%-20s Random Test: %-40s \n" "" "${RANDOMTEST}" + printf "%-20s Test Type: (%d) %-40s \n" "" "${TESTINDEX}" "${TESTNAME}" + printf "%-20s Run Scripts: %-40s \n" "" "${RUNSCRIPTS}" + printf "%-20s Run Tests: %-40s \n" "" "${RUNTESTS}" + printf "%-20s Debug Level: %-40s \n" "" "${DEBUGLEVEL}" + printf "%-20s Script Version: %-40s \n" "" "${VERSION}" + echo '' +fi + +# Run the scripted tests, if enabled +if [ "${RUNSCRIPTS}" -gt 0 ] +then + if [ "${DEBUGLEVEL}" -gt 0 ]; then + printf "%-16s %-40s \n" "$(date '+%F %H-%M-%S')" "Running scripted tests" + fi + + for test in "${_BINDINGTESTS[@]}" + do + # Run the specified scripted test + if ! runScriptedTest "${test}" + then + let status="${status} + 1" + + # Break Stop the test, if enabled + if [[ "${BREAKONERROR}" -ne 0 ]] + then + break + fi + fi + done +fi + +# Run the individual tests, if enabled +while [[ "${RUNTESTS}" -gt 0 ]] && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ([[ "${cycles}" -lt "${MAXCYCLES}" ]] || [[ "${MAXCYCLES}" -eq 0 ]]) +do + let cycles="${cycles} + 1" + if [ "${DEBUGLEVEL}" -gt 0 ]; then + printf "\n%-16s Cycle #%3d \n" "$(date '+%F %H-%M-%S')" "${cycles}" + fi + + for test in "${_BINDINGTESTS[@]}" + do + # Run the specified test + if ! runTest "${test}" + then + let status="${status} + 1" + + # Break Stop the test, if enabled + if [[ "${BREAKONERROR}" -ne 0 ]] + then + break + fi + fi + done +done + +# Final report +if [ "${status}" -eq 0 ] +then + if [ "${DEBUGLEVEL}" -gt 0 ]; then + printf "\n%-16s Successfully completed ${cycles} cycles of the FDB binding tester for ${#_BINDINGTESTS[@]} binding tests in %d seconds.\n" "$(date '+%F %H-%M-%S')" "${SECONDS}" + fi +elif [ "${DEBUGLEVEL}" -gt 0 ]; then + printf "\n%-16s Failed to complete all ${cycles} cycles of the FDB binding tester for ${#_BINDINGTESTS[@]} binding tests in %d seconds.\n" "$(date '+%F %H-%M-%S')" "${SECONDS}" +fi + +if [ "${DEBUGLEVEL}" -gt 0 ] +then + echo '' + echo '' + echo '*******************************************************************************************' + echo '' + printf "%-16s %-40s \n" "$(date '+%F %H-%M-%S')" "Binding Tester Results" + printf "%-20s Cycles: %-40s \n" "" "${cycles}" + printf "%-20s Failed Tests: %-40s \n" "" "${status}" + printf "%-20s Errors: %-40s \n" "" "${errorTotal}" + printf "%-20s Tests: (%2d) %-40s \n" "" "${#_BINDINGTESTS[@]}" "${_BINDINGTESTS[*]}" + printf "%-20s Version: %-40s \n" "" "${VERSION}" +fi + +# Ensure that status is a returnable number +if [[ "${status}" -ne 0 ]]; then + status=1 +fi + +exit "${status}" diff --git a/.ci/bindingtester/run_tester_loop.sh b/.ci/bindingtester/run_tester_loop.sh new file mode 100755 index 0000000..2bdcee4 --- /dev/null +++ b/.ci/bindingtester/run_tester_loop.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +LOGGING_LEVEL=WARNING + +function run() { + echo "Running $1 api" + ./bindingtester.py $1 --test-name api --cluster-file fdb.cluster --compare --num-ops 1000 --logging-level $LOGGING_LEVEL + echo "Running $1 concurrent api" + ./bindingtester.py $1 --test-name api --cluster-file fdb.cluster --num-ops 1000 --concurrency 5 --logging-level $LOGGING_LEVEL + echo "Running $1 directory" + ./bindingtester.py $1 --test-name directory --cluster-file fdb.cluster --compare --num-ops 1000 --logging-level $LOGGING_LEVEL + echo "Running $1 directory hca" + ./bindingtester.py $1 --test-name directory_hca --cluster-file fdb.cluster --num-ops 100 --concurrency 5 --logging-level $LOGGING_LEVEL +} + +function scripted() { + echo "Running $1 scripted" + ./bindingtester.py $1 --test-name scripted --cluster-file fdb.cluster --logging-level $LOGGING_LEVEL +} + +function run_scripted() { + scripted python + scripted python3 + scripted ruby + scripted java + scripted java_async + scripted go + scripted flow +} + +run_scripted + +i=1 +while `true`; do + echo "Pass $i" + i=$((i+1)) + run python + run python3 + run ruby + run java + run java_async + run go + run flow +done diff --git a/.ci/bindingtester/spec/bindingApiTester.md b/.ci/bindingtester/spec/bindingApiTester.md new file mode 100644 index 0000000..46dec89 --- /dev/null +++ b/.ci/bindingtester/spec/bindingApiTester.md @@ -0,0 +1,404 @@ +Overview +-------- + +Your API test program must implement a simple stack machine that exercises the +FoundationDB API. The program is invoked with two or three arguments. The first +argument is a prefix that is the first element of a tuple, the second is the +API version, and the third argument is the path to a cluster file. If the +third argument is not specified, your program may assume that `fdb.open()` will +succeed with no arguments (an fdb.cluster file will exist in the current +directory). Otherwise, your program should connect to the cluster specified +by the given cluster file. + +Your stack machine should begin reading the range returned by the tuple range +method of prefix and execute each instruction (stored in the value of the key) +until the range has been exhausted. When this stack machine (along with any +additional stack machines created as part of the test) have finished running, +your program should terminate. + +Upon successful termination, your program should exit with code 0. If your +program or any of your stack machines failed to run correctly, then it should +exit with a nonzero exit code. + +Instructions are also stored as packed tuples and should be expanded with the +tuple unpack method. The first element of the instruction tuple represents an +operation, and will always be returned as a unicode string. An operation may have +a second element which provides additional data, which may be of any tuple type. + +Your stack machine must maintain a small amount of state while executing +instructions: + + - A global transaction map from byte string to Transactions. This map is + shared by all tester 'threads'. + + - A stack of data items of mixed types and their associated metadata. At a + minimum, each item should be stored with the 0-based instruction number + which resulted in it being put onto the stack. Your stack must support push + and pop operations. It may be helpful if it supports random access, clear + and a peek operation. The stack is initialized to be empty. + + - A current FDB transaction name (stored as a byte string). The transaction + name should be initialized to the prefix that instructions are being read + from. + + - A last seen FDB version, which is a 64-bit integer. + + +Data Operations +--------------- + +#### PUSH <item> + + Pushes the provided item onto the stack. + +#### DUP + + Duplicates the top item on the stack. The instruction number for the + duplicate item should be the same as the original. + +#### EMPTY_STACK + + Discards all items in the stack. + +#### SWAP + + Pops the top item off of the stack as INDEX. Swaps the items in the stack at + depth 0 and depth INDEX. Does not modify the instruction numbers of the + swapped items. + +#### POP + + Pops and discards the top item on the stack. + +#### SUB + + Pops the top two items off of the stack as A and B and then pushes the + difference (A-B) onto the stack. A and B may be assumed to be integers. + +#### CONCAT + + Pops the top two items off the stack as A and B and then pushes the + concatenation of A and B onto the stack. A and B can be assumed to + be of the same type and will be either byte strings or unicode strings. + +#### LOG_STACK + + Pops the top item off the stack as PREFIX. Using a new transaction with normal + retry logic, inserts a key-value pair into the database for each item in the + stack of the form: + + PREFIX + tuple.pack((stackIndex, instructionNumber)) = tuple.pack((item,)) + + where stackIndex is the current index of the item in the stack. The oldest + item in the stack should have stackIndex 0. + + If the byte string created by tuple packing the item exceeds 40000 bytes, + then the value should be truncated to the first 40000 bytes of the packed + tuple. + + When finished, the stack should be empty. Note that because the stack may be + large, it may be necessary to commit the transaction every so often (e.g. + after every 100 sets) to avoid transaction_too_old errors. + +FoundationDB Operations +----------------------- + +All of these operations map to a portion of the FoundationDB API. When an +operation applies to a transaction, it should use the transaction stored in +the global transaction map corresponding to the current transaction name. Certain +instructions will be followed by one or both of _SNAPSHOT and _DATABASE to +indicate that they may appear with these variations. _SNAPSHOT operations should +perform the operation as a snapshot read. _DATABASE operations should (if +possible) make use of the methods available directly on the FoundationDB +database object, rather than the currently open transaction. + +If your binding does not support operations directly on a database object, you +should simulate it using an anonymous transaction. Remember that set and clear +operations must immediately commit (with appropriate retry behavior!). + +Any error that bubbles out of these operations must be caught. In the event of +an error, you must push the packed tuple of the byte string `"ERROR"` and the +error code (as a byte string, not an integer). + +Some operations may allow you to push future values onto the stack. When popping +objects from the stack, the future MUST BE waited on and errors caught before +any operations that use the result of the future. + +Whether or not you choose to push a future, any operation that supports optional +futures must apply the following rules to the result: + + - If the result is an error, then its value is to be converted to an error + string as defined above + + - If the result is void (i.e. the future was just a signal of + completion), then its value should be the byte string + `"RESULT_NOT_PRESENT"` + + - If the result is from a GET operation in which no result was + returned, then its value is to be converted to the byte string + `"RESULT_NOT_PRESENT"` + +#### NEW_TRANSACTION + + Creates a new transaction and stores it in the global transaction map + under the currently used transaction name. + +#### USE_TRANSACTION + + Pop the top item off of the stack as TRANSACTION_NAME. Begin using the + transaction stored at TRANSACTION_NAME in the transaction map for future + operations. If no entry exists in the map for the given name, a new + transaction should be inserted. + +#### ON_ERROR + + Pops the top item off of the stack as ERROR_CODE. Passes ERROR_CODE in a + language-appropriate way to the on_error method of current transaction + object and blocks on the future. If on_error re-raises the error, bubbles + the error out as indicated above. May optionally push a future onto the + stack. + +#### GET (_SNAPSHOT, _DATABASE) + + Pops the top item off of the stack as KEY and then looks up KEY in the + database using the get() method. May optionally push a future onto the + stack. + +#### GET_ESTIMATED_RANGE_SIZE + + Pops the top two items off of the stack as BEGIN_KEY and END_KEY to + construct a key range. Then call the `getEstimatedRangeSize` API of + the language binding. Make sure the API returns without error. Finally + push the string "GOT_ESTIMATED_RANGE_SIZE" onto the stack. + +#### GET_KEY (_SNAPSHOT, _DATABASE) + + Pops the top four items off of the stack as KEY, OR_EQUAL, OFFSET, PREFIX + and then constructs a key selector. This key selector is then resolved + using the get_key() method to yield RESULT. If RESULT starts with PREFIX, + then RESULT is pushed onto the stack. Otherwise, if RESULT < PREFIX, PREFIX + is pushed onto the stack. If RESULT > PREFIX, then strinc(PREFIX) is pushed + onto the stack. May optionally push a future onto the stack. + +#### GET_RANGE (_SNAPSHOT, _DATABASE) + + Pops the top five items off of the stack as BEGIN_KEY, END_KEY, LIMIT, + REVERSE and STREAMING_MODE. Performs a range read in a language-appropriate + way using these parameters. The resulting range of n key-value pairs are + packed into a tuple as [k1,v1,k2,v2,...,kn,vn], and this single packed value + is pushed onto the stack. + +#### GET_RANGE_STARTS_WITH (_SNAPSHOT, _DATABASE) + + Pops the top four items off of the stack as PREFIX, LIMIT, REVERSE and + STREAMING_MODE. Performs a prefix range read in a language-appropriate way + using these parameters. Output is pushed onto the stack as with GET_RANGE. + +#### GET_RANGE_SELECTOR (_SNAPSHOT, _DATABASE) + + Pops the top ten items off of the stack as BEGIN_KEY, BEGIN_OR_EQUAL, + BEGIN_OFFSET, END_KEY, END_OR_EQUAL, END_OFFSET, LIMIT, REVERSE, + STREAMING_MODE, and PREFIX. Constructs key selectors BEGIN and END from + the first six parameters, and then performs a range read in a language- + appropriate way using BEGIN, END, LIMIT, REVERSE and STREAMING_MODE. Output + is pushed onto the stack as with GET_RANGE, excluding any keys that do not + begin with PREFIX. + +#### GET_READ_VERSION (_SNAPSHOT) + + Gets the current read version and stores it in the internal stack machine + state as the last seen version. Pushed the string "GOT_READ_VERSION" onto + the stack. + +#### GET_VERSIONSTAMP + + Calls get_versionstamp and pushes the resulting future onto the stack. + +#### SET (_DATABASE) + + Pops the top two items off of the stack as KEY and VALUE. Sets KEY to have + the value VALUE. A SET_DATABASE call may optionally push a future onto the + stack. + +#### SET_READ_VERSION + + Sets the current transaction read version to the internal state machine last + seen version. + +#### CLEAR (_DATABASE) + + Pops the top item off of the stack as KEY and then clears KEY from the + database. A CLEAR_DATABASE call may optionally push a future onto the stack. + +#### CLEAR_RANGE (_DATABASE) + + Pops the top two items off of the stack as BEGIN_KEY and END_KEY. Clears the + range of keys from BEGIN_KEY to END_KEY in the database. A + CLEAR_RANGE_DATABASE call may optionally push a future onto the stack. + +#### CLEAR_RANGE_STARTS_WITH (_DATABASE) + + Pops the top item off of the stack as PREFIX and then clears all keys from + the database that begin with PREFIX. A CLEAR_RANGE_STARTS_WITH_DATABASE call + may optionally push a future onto the stack. + +#### ATOMIC_OP (_DATABASE) + + Pops the top three items off of the stack as OPTYPE, KEY, and VALUE. + Performs the atomic operation described by OPTYPE upon KEY with VALUE. An + ATOMIC_OP_DATABASE call may optionally push a future onto the stack. + +#### READ_CONFLICT_RANGE and WRITE_CONFLICT_RANGE + + Pops the top two items off of the stack as BEGIN_KEY and END_KEY. Adds a + read conflict range or write conflict range from BEGIN_KEY to END_KEY. + Pushes the byte string "SET_CONFLICT_RANGE" onto the stack. + +#### READ_CONFLICT_KEY and WRITE_CONFLICT_KEY + + Pops the top item off of the stack as KEY. Adds KEY as a read conflict key + or write conflict key. Pushes the byte string "SET_CONFLICT_KEY" onto the + stack. + +#### DISABLE_WRITE_CONFLICT + + Sets the NEXT_WRITE_NO_WRITE_CONFLICT_RANGE transaction option on the + current transaction. Does not modify the stack. + +#### COMMIT + + Commits the current transaction (with no retry behavior). May optionally + push a future onto the stack. + +#### RESET + + Resets the current transaction. + +#### CANCEL + + Cancels the current transaction. + +#### GET_COMMITTED_VERSION + + Gets the committed version from the current transaction and stores it in the + internal stack machine state as the last seen version. Pushes the byte + string "GOT_COMMITTED_VERSION" onto the stack. + +#### GET_APPROXIMATE_SIZE + + Calls get_approximate_size and pushes the byte string "GOT_APPROXIMATE_SIZE" + onto the stack. Note bindings may issue GET_RANGE calls with different + limits, so these bindings can obtain different sizes back. + +#### WAIT_FUTURE + + Pops the top item off the stack and pushes it back on. If the top item on + the stack is a future, this will have the side effect of waiting on the + result of the future and pushing the result on the stack. Does not change + the instruction number of the item. + +Tuple Operations +---------------- + +#### TUPLE_PACK + + Pops the top item off of the stack as N. Pops the next N items off of the + stack and packs them as the tuple [item0,item1,...,itemN], and then pushes + this single packed value onto the stack. + +#### TUPLE_PACK_WITH_VERSIONSTAMP + + Pops the top item off of the stack as a byte string prefix. Pops the next item + off of the stack as N. Pops the next N items off of the stack and packs them + as the tuple [item0,item1,...,itemN], with the provided prefix and tries to + append the position of the first incomplete versionstamp as if the byte + string were to be used as a key in a SET_VERSIONSTAMP_KEY atomic op. If there + are no incomplete versionstamp instances, then this pushes the literal byte + string 'ERROR: NONE' to the stack. If there is more than one, then this pushes + the literal byte string 'ERROR: MULTIPLE'. If there is exactly one, then it pushes + the literal byte string 'OK' and then pushes the packed tuple. (Languages that + do not contain a 'Versionstamp' tuple-type do not have to implement this + operation.) + +#### TUPLE_UNPACK + + Pops the top item off of the stack as PACKED, and then unpacks PACKED into a + tuple. For each element of the tuple, packs it as a new tuple and pushes it + onto the stack. + +#### TUPLE_RANGE + + Pops the top item off of the stack as N. Pops the next N items off of the + stack, and passes these items as a tuple (or array, or language-appropriate + structure) to the tuple range method. Pushes the begin and end elements of + the returned range onto the stack. + +#### TUPLE_SORT + + Pops the top item off of the stack as N. Pops the next N items off of the + stack as packed tuples (i.e., byte strings), unpacks them, sorts the tuples, + repacks them into byte strings, and then pushes these packed tuples onto + the stack so that the final top of the stack now has the greatest + element. If the binding has some kind of tuple comparison function, it should + use that to sort. Otherwise, it should sort them lexicographically by + their byte representation. The choice of function should not affect final sort order. + +#### ENCODE_FLOAT + + Pops the top item off of the stack. This will be a byte-string of length 4 + containing the IEEE 754 encoding of a float in big-endian order. + This is then converted into a float and pushed onto the stack. + +#### ENCODE_DOUBLE + + Pops the top item off of the stack. This will be a byte-string of length 8 + containing the IEEE 754 encoding of a double in big-endian order. + This is then converted into a double and pushed onto the stack. + +#### DECODE_FLOAT + + Pops the top item off of the stack. This will be a single-precision float. + This is converted into a (4 byte) byte-string of its IEEE 754 representation + in big-endian order, and pushed onto the stack. + +#### DECODE_DOUBLE + + Pops the top item off of the stack. This will be a double-precision float. + This is converted into a (8 byte) byte-string its IEEE 754 representation + in big-endian order, and pushed onto the stack. + + +Thread Operations +----------------- + +#### START_THREAD + + Pops the top item off of the stack as PREFIX. Creates a new stack machine + instance operating on the same database as the current stack machine, but + operating on PREFIX. The new stack machine should have independent internal + state. The new stack machine should begin executing instructions concurrent + with the current stack machine through a language-appropriate mechanism. + +#### WAIT_EMPTY + + Pops the top item off of the stack as PREFIX. Blocks execution until the + range with prefix PREFIX is not present in the database. This should be + implemented as a polling loop inside of a language- and binding-appropriate + retryable construct which synthesizes FoundationDB error 1020 when the range + is not empty. Pushes the string "WAITED_FOR_EMPTY" onto the stack when + complete. + +Miscellaneous +------------- + +#### UNIT_TESTS + + This is called during the scripted test to allow bindings to test features + which aren't supported by the stack tester. Things currently tested in the + UNIT_TESTS section: + + Transaction options + Watches + Cancellation + Retry limits + Timeouts diff --git a/.ci/bindingtester/spec/directoryLayerTester.md b/.ci/bindingtester/spec/directoryLayerTester.md new file mode 100644 index 0000000..18489b6 --- /dev/null +++ b/.ci/bindingtester/spec/directoryLayerTester.md @@ -0,0 +1,241 @@ +Overview +-------- + +The directory layer is tested by adding some additional instructions and state to +the existing stack tester. Each 'thread' of the stack tester should have its own +directory testing state. + +Additional State and Initialization +----------------------------------- + +Your tester should store three additional pieces of state. + +* directory list - The items in this list should be accessible by index. The list +should support an append operation. It will be required to store Subspaces, +DirectorySubspaces, and DirectoryLayers. + +* directory list index - an index into the directory list of the currently active +directory. + +* error index - the index to use when the directory at directory list index is not +present + +At the beginning of the test, the list should contain just the default directory +layer. The directory index and error index should both be set to 0. + +Popping Tuples +------------- + +Some instructions will require you to pop N tuples. To do this, repeat the +following procedure N times: + +Pop 1 item off the stack as M. Pop M items off the stack as +tuple = [item1, ..., itemM]. + +Errors +------ + +In the even that you encounter an error when performing a directory layer +operation, you should push the byte string: `"DIRECTORY_ERROR"` onto the stack. If +the operation being performed was supposed to append an item to the directory +list, then a null entry should be appended instead. + +New Instructions +---------------- + +Below are the new instructions that must be implemented to test the directory +layer. Some instructions specify that the current directory should be used +for the operation. In that case, use the object in the directory list specified +by the current directory list index. Operations that are not defined for a +particular object will not be called (e.g. a DirectoryLayer will never be asked +to pack a key). + +Directory/Subspace/Layer Creation +--------------------------------- + +#### DIRECTORY_CREATE_SUBSPACE + + Pop 1 tuple off the stack as [path]. Pop 1 additional item as [raw_prefix]. + Create a subspace with path as the prefix tuple and the specified + raw_prefix. Append it to the directory list. + +#### DIRECTORY_CREATE_LAYER + + Pop 3 items off the stack as [index1, index2, allow_manual_prefixes]. Let + node_subspace be the object in the directory list at index1 and + content_subspace be the object in the directory list at index2. Create a new + directory layer with the specified node_subspace and content_subspace. If + allow_manual_prefixes is 1, then enable manual prefixes on the directory + layer. Append the resulting directory layer to the directory list. + + If either of the two specified subspaces are null, then do not create a + directory layer and instead push null onto the directory list. + +#### DIRECTORY_CREATE_OR_OPEN[_DATABASE] + + Use the current directory for this operation. + + Pop 1 tuple off the stack as [path]. Pop 1 additional item as [layer]. + create_or_open a directory with the specified path and layer. If layer is + null, use the default value for that parameter. + +#### DIRECTORY_CREATE[_DATABASE] + + Pop 1 tuple off the stack as [path]. Pop 2 additional items as + [layer, prefix]. create a directory with the specified path, layer, + and prefix. If either of layer or prefix is null, use the default value for + that parameter (layer='', prefix=null). + +#### DIRECTORY_OPEN[_DATABASE|_SNAPSHOT] + + Use the current directory for this operation. + + Pop 1 tuple off the stack as [path]. Pop 1 additional item as [layer]. Open + a directory with the specified path and layer. If layer is null, use the + default value (layer=''). + +Directory Management +-------------------- + +#### DIRECTORY_CHANGE + + Pop the top item off the stack as [index]. Set the current directory list + index to index. In the event that the directory at this new index is null + (as the result of a previous error), set the directory list index to the + error index. + +#### DIRECTORY_SET_ERROR_INDEX + + Pop the top item off the stack as [error_index]. Set the current error index + to error_index. + +Directory Operations +-------------------- + +#### DIRECTORY_MOVE[_DATABASE] + + Use the current directory for this operation. + + Pop 2 tuples off the stack as [old_path, new_path]. Call move with the + specified old_path and new_path. Append the result onto the directory list. + +#### DIRECTORY_MOVE_TO[_DATABASE] + + Use the current directory for this operation. + + Pop 1 tuple off the stack as [new_absolute_path]. Call moveTo with the + specified new_absolute_path. Append the result onto the directory list. + +#### DIRECTORY_REMOVE[_DATABASE] + + Use the current directory for this operation. + + Pop 1 item off the stack as [count] (either 0 or 1). If count is 1, pop 1 + tuple off the stack as [path]. Call remove, passing it path if one was + popped. + +#### DIRECTORY_REMOVE_IF_EXISTS[_DATABASE] + + Use the current directory for this operation. + + Pop 1 item off the stack as [count] (either 0 or 1). If count is 1, pop 1 + tuple off the stack as [path]. Call remove_if_exits, passing it path if one + was popped. + +#### DIRECTORY_LIST[_DATABASE|_SNAPSHOT] + + Use the current directory for this operation. + + Pop 1 item off the stack as [count] (either 0 or 1). If count is 1, pop 1 + tuple off the stack as [path]. Call list, passing it path if one was popped. + Pack the resulting list of directories using the tuple layer and push the + packed string onto the stack. + +#### DIRECTORY_EXISTS[_DATABASE|_SNAPSHOT] + + Use the current directory for this operation. + + Pop 1 item off the stack as [count] (either 0 or 1). If count is 1, pop 1 + tuple off the stack as [path]. Call exists, passing it path if one + was popped. Push 1 onto the stack if the path exists and 0 if it does not. + +Subspace Operations +------------------- + +#### DIRECTORY_PACK_KEY + + Use the current directory for this operation. + + Pop 1 tuple off the stack as [key_tuple]. Pack key_tuple and push the result + onto the stack. + +#### DIRECTORY_UNPACK_KEY + + Use the current directory for this operation. + + Pop 1 item off the stack as [key]. Unpack key and push the resulting tuple + onto the stack one item at a time. + +#### DIRECTORY_RANGE + + Use the current directory for this operation. + + Pop 1 tuple off the stack as [tuple]. Create a range using tuple and push + range.begin and range.end onto the stack. + +#### DIRECTORY_CONTAINS + + Use the current directory for this operation. + + Pop 1 item off the stack as [key]. Check if the current directory contains + the specified key. Push 1 if it does and 0 if it doesn't. + +#### DIRECTORY_OPEN_SUBSPACE + + Use the current directory for this operation. + + Pop 1 tuple off the stack as [tuple]. Open the subspace of the current + directory specified by tuple and push it onto the directory list. + +Directory Logging +-------------------- + +#### DIRECTORY_LOG_SUBSPACE + + Use the current directory for this operation. + + Pop 1 item off the stack as [prefix]. Let key equal + prefix + tuple.pack([dir_index]). Set key to be the result of calling + directory.key() in the current transaction. + +#### DIRECTORY_LOG_DIRECTORY + + Use the current directory for this operation. + + Pop 1 item off the stack as [raw_prefix]. Create a subspace log_subspace + with path (dir_index) and the specified raw_prefix. Set: + + tr[log_subspace[u'path']] = the tuple packed path of the directory. + + tr[log_subspace[u'layer']] = the tuple packed layer of the directory. + + tr[log_subspace[u'exists']] = the packed tuple containing a 1 if the + directory exists and 0 if it doesn't. + + tr[log_subspace[u'children']] the tuple packed list of children of the + directory. + + Where log_subspace[u] is the subspace packed tuple containing only the + single specified unicode string . + +Other +----- + +#### DIRECTORY_STRIP_PREFIX + + Use the current directory for this operation. + + Pop 1 item off the stack as [byte_array]. Call .key() on the current + subspace and store the result as [prefix]. Throw an error if the popped + array does not start with prefix. Otherwise, remove the prefix from the + popped array and push the result onto the stack. diff --git a/.ci/bindingtester/spec/tenantTester.md b/.ci/bindingtester/spec/tenantTester.md new file mode 100644 index 0000000..2ba54a7 --- /dev/null +++ b/.ci/bindingtester/spec/tenantTester.md @@ -0,0 +1,77 @@ +Overview +-------- + +Tenant testing is an optional extension to the core binding tester that enables +testing of the tenant API. This testing is enabled by adding some additional +instructions and modifying the behavior of some existing instructions. + +Additional State and Initialization +----------------------------------- + +Your tester should store an additional piece of state tracking the active tenant +that is to be used to create transactions. This tenant must support an unset +state, in which case transactions will be created directly on the database. + +New Instructions +---------------- + +The tenant API introduces some new operations: + +#### TENANT_CREATE + + Pops the top item off of the stack as TENANT_NAME. Creates a new tenant + in the database with the name TENANT_NAME. May optionally push a future + onto the stack. + +#### TENANT_DELETE + + Pops the top item off of the stack as TENANT_NAME. Deletes the tenant with + the name TENANT_NAME from the database. May optionally push a future onto + the stack. + +#### TENANT_SET_ACTIVE + + Pops the top item off of the stack as TENANT_NAME. Opens the tenant with + name TENANT_NAME and stores it as the active tenant. + +#### TENANT_CLEAR_ACTIVE + + Unsets the active tenant. + +Updates to Existing Instructions +-------------------------------- + +Some existing operations in the binding tester will have slightly modified +behavior when tenants are enabled. + +#### NEW_TRANSACTION + + When creating a new transaction, the active tenant should be used. If no active + tenant is set, then the transaction should be created as normal using the + database. + +#### _TENANT suffix + + Similar to the _DATABASE suffix, an operation with the _TENANT suffix indicates + that the operation should be performed on the current active tenant object. If + there is no active tenant, then the operation should be performed on the database + as if _DATABASE was specified. In any case where the operation suffixed with + _DATABASE is allowed to push a future onto the stack, the same operation suffixed + with _TENANT is also allowed to push a future onto the stack. + + If your binding does not support operations directly on a tenant object, you should + simulate it using an anonymous transaction. Remember that set and clear operations + must immediately commit (with appropriate retry behavior!). + + Operations that can include the _TENANT prefix are: + + GET_TENANT + GET_KEY_TENANT + GET_RANGE_TENANT + GET_RANGE_STARTS_WITH_TENANT + GET_RANGE_SELECTOR_TENANT + SET_TENANT + CLEAR_TENANT + CLEAR_RANGE_TENANT + CLEAR_RANGE_STARTS_WITH_TENANT + ATOMIC_OP_TENANT diff --git a/.ci/bindingtester/tests/.api.py.swp b/.ci/bindingtester/tests/.api.py.swp new file mode 100644 index 0000000..b725aa3 Binary files /dev/null and b/.ci/bindingtester/tests/.api.py.swp differ diff --git a/.ci/bindingtester/tests/__init__.py b/.ci/bindingtester/tests/__init__.py new file mode 100644 index 0000000..46d686e --- /dev/null +++ b/.ci/bindingtester/tests/__init__.py @@ -0,0 +1,219 @@ +# +# __init__.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import math +import re +import struct + +import fdb + +from bindingtester import FDB_API_VERSION +from bindingtester import util + +fdb.api_version(FDB_API_VERSION) + + +class ResultSpecification(object): + def __init__(self, subspace, key_start_index=0, ordering_index=None, global_error_filter=None): + self.subspace = subspace + self.key_start_index = key_start_index + self.ordering_index = ordering_index + + if global_error_filter is not None: + error_str = b'|'.join([b'%d' % e for e in global_error_filter]) + self.error_regex = re.compile(rb'\x01+ERROR\x00\xff*\x01' + error_str + rb'\x00') + else: + self.error_regex = None + + def matches_global_error_filter(self, str): + if self.error_regex is None: + return False + + return self.error_regex.search(str) is not None + + +class Test(object): + def __init__(self, subspace, min_api_version=0, max_api_version=int(1e9)): + self.subspace = subspace + self.min_api_version = min_api_version + self.max_api_version = max_api_version + + # Returns nothing + def setup(self, args): + pass + + # Returns an instance of TestInstructions + def generate(self, args, thread_number): + pass + + # Returns nothing + def pre_run(self, db, args): + pass + + # Returns a list of ResultSpecifications to read data from and compare with other testers + def get_result_specifications(self): + return [] + + # Returns a dict { subspace => results } of results that the test is expected to have. + # Compared against subspaces returned by get_result_subspaces. A subspace omitted from this dictionary + # can still be compared against other testers if it is added to the list returned by get_result_subspaces. + def get_expected_results(self): + return {} + + # Returns a list of error strings + def validate(self, db, args): + return [] + + def versionstamp_key(self, raw_bytes, version_pos): + if hasattr(self, 'api_version') and self.api_version < 520: + return raw_bytes + struct.pack(' InstructionSets + def get_threads(self, subspace): + pass + + def insert_operations(self, db, subspace): + pass + + +class InstructionSet(TestInstructions, list): + def __init__(self): + TestInstructions.__init__(self) + list.__init__(self) + + self.core_test_begin = 0 + self.core_test_end = None + + def push_args(self, *args): + self.extend([PushInstruction(arg) for arg in reversed(args)]) + + def append(self, instruction): + if isinstance(instruction, Instruction): + list.append(self, instruction) + else: + list.append(self, Instruction(instruction)) + + def get_threads(self, subspace): + return {subspace: self} + + def setup_complete(self): + self.core_test_begin = len(self) + + def begin_finalization(self): + self.core_test_end = len(self) + + def core_instructions(self): + return self[self.core_test_begin: self.core_test_end] + + @fdb.transactional + def _insert_operations_transactional(self, tr, subspace, start, count): + for i, instruction in enumerate(self[start: start + count]): + tr[subspace.pack((start + i,))] = instruction.to_value() + + def insert_operations(self, db, subspace): + for i in range(0, int(math.ceil(len(self) / 5000.0))): + self._insert_operations_transactional(db, subspace, i * 5000, 5000) + + +class ThreadedInstructionSet(TestInstructions): + def __init__(self): + super(ThreadedInstructionSet, self).__init__() + self.threads = {} + + def get_threads(self, subspace): + result = dict(self.threads) + if None in self.threads: + result[subspace] = result[None] + del result[None] + + return result + + def insert_operations(self, db, subspace): + for thread_subspace, thread in self.threads.items(): + if thread_subspace is None: + thread_subspace = subspace + + thread.insert_operations(db, thread_subspace) + + def create_thread(self, subspace=None, thread_instructions=None): + if subspace in self.threads: + raise 'An instruction set with the subspace %r has already been created' % util.subspace_to_tuple(subspace) + + if thread_instructions == None: + thread_instructions = InstructionSet() + + self.threads[subspace] = thread_instructions + return thread_instructions + + +util.import_subclasses(__file__, 'bindingtester.tests') diff --git a/.ci/bindingtester/tests/__pycache__/__init__.cpython-311.pyc b/.ci/bindingtester/tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..eea2ca5 Binary files /dev/null and b/.ci/bindingtester/tests/__pycache__/__init__.cpython-311.pyc differ diff --git a/.ci/bindingtester/tests/__pycache__/api.cpython-311.pyc b/.ci/bindingtester/tests/__pycache__/api.cpython-311.pyc new file mode 100644 index 0000000..238e5d6 Binary files /dev/null and b/.ci/bindingtester/tests/__pycache__/api.cpython-311.pyc differ diff --git a/.ci/bindingtester/tests/__pycache__/directory.cpython-311.pyc b/.ci/bindingtester/tests/__pycache__/directory.cpython-311.pyc new file mode 100644 index 0000000..616d712 Binary files /dev/null and b/.ci/bindingtester/tests/__pycache__/directory.cpython-311.pyc differ diff --git a/.ci/bindingtester/tests/__pycache__/directory_hca.cpython-311.pyc b/.ci/bindingtester/tests/__pycache__/directory_hca.cpython-311.pyc new file mode 100644 index 0000000..75aa843 Binary files /dev/null and b/.ci/bindingtester/tests/__pycache__/directory_hca.cpython-311.pyc differ diff --git a/.ci/bindingtester/tests/__pycache__/directory_state_tree.cpython-311.pyc b/.ci/bindingtester/tests/__pycache__/directory_state_tree.cpython-311.pyc new file mode 100644 index 0000000..f5398e4 Binary files /dev/null and b/.ci/bindingtester/tests/__pycache__/directory_state_tree.cpython-311.pyc differ diff --git a/.ci/bindingtester/tests/__pycache__/directory_util.cpython-311.pyc b/.ci/bindingtester/tests/__pycache__/directory_util.cpython-311.pyc new file mode 100644 index 0000000..30498a9 Binary files /dev/null and b/.ci/bindingtester/tests/__pycache__/directory_util.cpython-311.pyc differ diff --git a/.ci/bindingtester/tests/__pycache__/scripted.cpython-311.pyc b/.ci/bindingtester/tests/__pycache__/scripted.cpython-311.pyc new file mode 100644 index 0000000..af64dcf Binary files /dev/null and b/.ci/bindingtester/tests/__pycache__/scripted.cpython-311.pyc differ diff --git a/.ci/bindingtester/tests/__pycache__/test_util.cpython-311.pyc b/.ci/bindingtester/tests/__pycache__/test_util.cpython-311.pyc new file mode 100644 index 0000000..16b5b79 Binary files /dev/null and b/.ci/bindingtester/tests/__pycache__/test_util.cpython-311.pyc differ diff --git a/.ci/bindingtester/tests/__pycache__/tuple.cpython-311.pyc b/.ci/bindingtester/tests/__pycache__/tuple.cpython-311.pyc new file mode 100644 index 0000000..99cdd4c Binary files /dev/null and b/.ci/bindingtester/tests/__pycache__/tuple.cpython-311.pyc differ diff --git a/.ci/bindingtester/tests/api.py b/.ci/bindingtester/tests/api.py new file mode 100644 index 0000000..fd495fa --- /dev/null +++ b/.ci/bindingtester/tests/api.py @@ -0,0 +1,673 @@ +# +# api.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import random +import struct + +import fdb +import fdb.tuple + +from bindingtester import FDB_API_VERSION +from bindingtester import util +from bindingtester.tests import Test, Instruction, InstructionSet, ResultSpecification +from bindingtester.tests import test_util + +fdb.api_version(FDB_API_VERSION) + + +class ApiTest(Test): + def __init__(self, subspace): + super(ApiTest, self).__init__(subspace) + self.workspace = self.subspace['workspace'] # The keys and values here must match between subsequent runs of the same test + self.scratch = self.subspace['scratch'] # The keys and values here can differ between runs + self.stack_subspace = self.subspace['stack'] + + self.versionstamped_values = self.scratch['versionstamped_values'] + self.versionstamped_values_2 = self.scratch['versionstamped_values_2'] + self.versionstamped_keys = self.scratch['versionstamped_keys'] + + def setup(self, args): + self.stack_size = 0 + self.string_depth = 0 + self.key_depth = 0 + self.max_keys = 1000 + + self.has_version = False + self.can_set_version = True + self.can_get_commit_version = False + self.can_use_key_selectors = True + + self.generated_keys = [] + self.outstanding_ops = [] + self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types) + self.api_version = args.api_version + self.allocated_tenants = set() + + def add_stack_items(self, num): + self.stack_size += num + self.string_depth = 0 + self.key_depth = 0 + + def add_strings(self, num): + self.stack_size += num + self.string_depth += num + self.key_depth = 0 + + def add_keys(self, num): + self.stack_size += num + self.string_depth += num + self.key_depth += num + + def remove(self, num): + self.stack_size -= num + self.string_depth = max(0, self.string_depth - num) + self.key_depth = max(0, self.key_depth - num) + + self.outstanding_ops = [i for i in self.outstanding_ops if i[0] <= self.stack_size] + + def ensure_string(self, instructions, num): + while self.string_depth < num: + instructions.push_args(self.random.random_string(random.randint(0, 100))) + self.add_strings(1) + + self.remove(num) + + def choose_key(self): + if random.random() < float(len(self.generated_keys)) / self.max_keys: + tup = random.choice(self.generated_keys) + if random.random() < 0.3: + return self.workspace.pack(tup[0:random.randint(0, len(tup))]) + + return self.workspace.pack(tup) + + tup = self.random.random_tuple(5) + self.generated_keys.append(tup) + + return self.workspace.pack(tup) + + def ensure_key(self, instructions, num): + while self.key_depth < num: + instructions.push_args(self.choose_key()) + self.add_keys(1) + + self.remove(num) + + def ensure_key_value(self, instructions): + if self.string_depth == 0: + instructions.push_args(self.choose_key(), self.random.random_string(random.randint(0, 100))) + + elif self.string_depth == 1 or self.key_depth == 0: + self.ensure_key(instructions, 1) + self.remove(1) + + else: + self.remove(2) + + def preload_database(self, instructions, num): + for i in range(num): + self.ensure_key_value(instructions) + instructions.append('SET') + + if i % 100 == 99: + test_util.blocking_commit(instructions) + + test_util.blocking_commit(instructions) + self.add_stack_items(1) + + def wait_for_reads(self, instructions): + while len(self.outstanding_ops) > 0 and self.outstanding_ops[-1][0] <= self.stack_size: + read = self.outstanding_ops.pop() + # print '%d. waiting for read at instruction %r' % (len(instructions), read) + test_util.to_front(instructions, self.stack_size - read[0]) + instructions.append('WAIT_FUTURE') + + def choose_tenant(self, new_tenant_probability): + if len(self.allocated_tenants) == 0 or random.random() < new_tenant_probability: + return self.random.random_string(random.randint(0, 30)) + else: + return random.choice(list(self.allocated_tenants)) + + def generate(self, args, thread_number): + instructions = InstructionSet() + + op_choices = ['NEW_TRANSACTION', 'COMMIT'] + + reads = ['GET', 'GET_KEY', 'GET_RANGE', 'GET_RANGE_STARTS_WITH', 'GET_RANGE_SELECTOR'] + mutations = ['SET', 'CLEAR', 'CLEAR_RANGE', 'CLEAR_RANGE_STARTS_WITH', 'ATOMIC_OP'] + snapshot_reads = [x + '_SNAPSHOT' for x in reads] + database_reads = [x + '_DATABASE' for x in reads] + database_mutations = [x + '_DATABASE' for x in mutations] + mutations += ['VERSIONSTAMP'] + versions = ['GET_READ_VERSION', 'SET_READ_VERSION', 'GET_COMMITTED_VERSION'] + snapshot_versions = ['GET_READ_VERSION_SNAPSHOT'] + tuples = ['TUPLE_PACK', 'TUPLE_UNPACK', 'TUPLE_RANGE', 'TUPLE_SORT', 'SUB', 'ENCODE_FLOAT', 'ENCODE_DOUBLE', 'DECODE_DOUBLE', 'DECODE_FLOAT'] + if 'versionstamp' in args.types: + tuples.append('TUPLE_PACK_WITH_VERSIONSTAMP') + resets = ['ON_ERROR', 'RESET', 'CANCEL'] + read_conflicts = ['READ_CONFLICT_RANGE', 'READ_CONFLICT_KEY'] + write_conflicts = ['WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT'] + txn_sizes = ['GET_APPROXIMATE_SIZE'] + storage_metrics = ['GET_ESTIMATED_RANGE_SIZE', 'GET_RANGE_SPLIT_POINTS'] + tenants = ['TENANT_CREATE', 'TENANT_DELETE', 'TENANT_SET_ACTIVE', 'TENANT_CLEAR_ACTIVE'] + + op_choices += reads + op_choices += mutations + op_choices += snapshot_reads + op_choices += database_reads + op_choices += database_mutations + op_choices += versions + op_choices += snapshot_versions + op_choices += tuples + op_choices += read_conflicts + op_choices += write_conflicts + op_choices += resets + op_choices += txn_sizes + op_choices += storage_metrics + + if not args.no_tenants: + op_choices += tenants + + idempotent_atomic_ops = ['BIT_AND', 'BIT_OR', 'MAX', 'MIN', 'BYTE_MIN', 'BYTE_MAX'] + atomic_ops = idempotent_atomic_ops + ['ADD', 'BIT_XOR', 'APPEND_IF_FITS'] + + if args.concurrency > 1: + self.max_keys = random.randint(100, 1000) + else: + self.max_keys = random.randint(100, 10000) + + instructions.append('NEW_TRANSACTION') + instructions.append('GET_READ_VERSION') + + self.preload_database(instructions, self.max_keys) + + instructions.setup_complete() + + for i in range(args.num_ops): + op = random.choice(op_choices) + index = len(instructions) + read_performed = False + + # print 'Adding instruction %s at %d' % (op, index) + + if args.concurrency == 1 and (op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']): + self.wait_for_reads(instructions) + test_util.blocking_commit(instructions) + self.can_get_commit_version = False + self.add_stack_items(1) + + if op in resets or op == 'NEW_TRANSACTION': + if args.concurrency == 1: + self.wait_for_reads(instructions) + + self.outstanding_ops = [] + + if op == 'NEW_TRANSACTION': + instructions.append(op) + self.can_get_commit_version = True + self.can_set_version = True + self.can_use_key_selectors = True + + elif op == 'ON_ERROR': + instructions.push_args(random.randint(0, 5000)) + instructions.append(op) + + self.outstanding_ops.append((self.stack_size, len(instructions) - 1)) + if args.concurrency == 1: + self.wait_for_reads(instructions) + + instructions.append('NEW_TRANSACTION') + self.can_get_commit_version = True + self.can_set_version = True + self.can_use_key_selectors = True + self.add_strings(1) + + elif op == 'GET' or op == 'GET_SNAPSHOT' or op == 'GET_DATABASE': + self.ensure_key(instructions, 1) + instructions.append(op) + self.add_strings(1) + self.can_set_version = False + read_performed = True + + elif op == 'GET_KEY' or op == 'GET_KEY_SNAPSHOT' or op == 'GET_KEY_DATABASE': + if op.endswith('_DATABASE') or self.can_use_key_selectors: + self.ensure_key(instructions, 1) + instructions.push_args(self.workspace.key()) + instructions.push_args(*self.random.random_selector_params()) + test_util.to_front(instructions, 3) + instructions.append(op) + + # Don't add key here because we may be outside of our prefix + self.add_strings(1) + self.can_set_version = False + read_performed = True + + elif op == 'GET_RANGE' or op == 'GET_RANGE_SNAPSHOT' or op == 'GET_RANGE_DATABASE': + self.ensure_key(instructions, 2) + range_params = self.random.random_range_params() + instructions.push_args(*range_params) + test_util.to_front(instructions, 4) + test_util.to_front(instructions, 4) + instructions.append(op) + + if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large + self.add_strings(1) + else: + self.add_stack_items(1) + + self.can_set_version = False + read_performed = True + + elif op == 'GET_RANGE_STARTS_WITH' or op == 'GET_RANGE_STARTS_WITH_SNAPSHOT' or op == 'GET_RANGE_STARTS_WITH_DATABASE': + # TODO: not tested well + self.ensure_key(instructions, 1) + range_params = self.random.random_range_params() + instructions.push_args(*range_params) + test_util.to_front(instructions, 3) + instructions.append(op) + + if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large + self.add_strings(1) + else: + self.add_stack_items(1) + + self.can_set_version = False + read_performed = True + + elif op == 'GET_RANGE_SELECTOR' or op == 'GET_RANGE_SELECTOR_SNAPSHOT' or op == 'GET_RANGE_SELECTOR_DATABASE': + if op.endswith('_DATABASE') or self.can_use_key_selectors: + self.ensure_key(instructions, 2) + instructions.push_args(self.workspace.key()) + range_params = self.random.random_range_params() + instructions.push_args(*range_params) + instructions.push_args(*self.random.random_selector_params()) + test_util.to_front(instructions, 6) + instructions.push_args(*self.random.random_selector_params()) + test_util.to_front(instructions, 9) + instructions.append(op) + + if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large + self.add_strings(1) + else: + self.add_stack_items(1) + + self.can_set_version = False + read_performed = True + + elif op == 'GET_READ_VERSION' or op == 'GET_READ_VERSION_SNAPSHOT': + instructions.append(op) + self.has_version = self.can_set_version + self.add_strings(1) + + elif op == 'SET' or op == 'SET_DATABASE': + self.ensure_key_value(instructions) + instructions.append(op) + if op == 'SET_DATABASE': + self.add_stack_items(1) + + elif op == 'SET_READ_VERSION': + if self.has_version and self.can_set_version: + instructions.append(op) + self.can_set_version = False + + elif op == 'CLEAR' or op == 'CLEAR_DATABASE': + self.ensure_key(instructions, 1) + instructions.append(op) + if op == 'CLEAR_DATABASE': + self.add_stack_items(1) + + elif op == 'CLEAR_RANGE' or op == 'CLEAR_RANGE_DATABASE': + # Protect against inverted range + key1 = self.workspace.pack(self.random.random_tuple(5)) + key2 = self.workspace.pack(self.random.random_tuple(5)) + + if key1 > key2: + key1, key2 = key2, key1 + + instructions.push_args(key1, key2) + + instructions.append(op) + if op == 'CLEAR_RANGE_DATABASE': + self.add_stack_items(1) + + elif op == 'CLEAR_RANGE_STARTS_WITH' or op == 'CLEAR_RANGE_STARTS_WITH_DATABASE': + self.ensure_key(instructions, 1) + instructions.append(op) + if op == 'CLEAR_RANGE_STARTS_WITH_DATABASE': + self.add_stack_items(1) + + elif op == 'ATOMIC_OP' or op == 'ATOMIC_OP_DATABASE': + self.ensure_key_value(instructions) + if op == 'ATOMIC_OP' or args.concurrency > 1: + instructions.push_args(random.choice(atomic_ops)) + else: + instructions.push_args(random.choice(idempotent_atomic_ops)) + + instructions.append(op) + if op == 'ATOMIC_OP_DATABASE': + self.add_stack_items(1) + + elif op == 'VERSIONSTAMP': + rand_str1 = self.random.random_string(100) + key1 = self.versionstamped_values.pack((rand_str1,)) + key2 = self.versionstamped_values_2.pack((rand_str1,)) + + split = random.randint(0, 70) + prefix = self.random.random_string(20 + split) + if prefix.endswith(b'\xff'): + # Necessary to make sure that the SET_VERSIONSTAMPED_VALUE check + # correctly finds where the version is supposed to fit in. + prefix += b'\x00' + suffix = self.random.random_string(70 - split) + rand_str2 = prefix + fdb.tuple.Versionstamp._UNSET_TR_VERSION + suffix + key3 = self.versionstamped_keys.pack() + rand_str2 + index = len(self.versionstamped_keys.pack()) + len(prefix) + key3 = self.versionstamp_key(key3, index) + + instructions.push_args('SET_VERSIONSTAMPED_VALUE', + key1, + self.versionstamp_value(fdb.tuple.Versionstamp._UNSET_TR_VERSION + rand_str2)) + instructions.append('ATOMIC_OP') + + if args.api_version >= 520: + instructions.push_args('SET_VERSIONSTAMPED_VALUE', key2, self.versionstamp_value(rand_str2, len(prefix))) + instructions.append('ATOMIC_OP') + + instructions.push_args('SET_VERSIONSTAMPED_KEY', key3, rand_str1) + instructions.append('ATOMIC_OP') + self.can_use_key_selectors = False + + elif op == 'READ_CONFLICT_RANGE' or op == 'WRITE_CONFLICT_RANGE': + self.ensure_key(instructions, 2) + instructions.append(op) + self.add_strings(1) + + elif op == 'READ_CONFLICT_KEY' or op == 'WRITE_CONFLICT_KEY': + self.ensure_key(instructions, 1) + instructions.append(op) + self.add_strings(1) + + elif op == 'DISABLE_WRITE_CONFLICT': + instructions.append(op) + + elif op == 'COMMIT': + if args.concurrency == 1 or i < self.max_keys or random.random() < 0.9: + if args.concurrency == 1: + self.wait_for_reads(instructions) + test_util.blocking_commit(instructions) + self.can_get_commit_version = False + self.add_stack_items(1) + self.can_set_version = True + self.can_use_key_selectors = True + else: + instructions.append(op) + self.add_strings(1) + + elif op == 'RESET': + instructions.append(op) + self.can_get_commit_version = False + self.can_set_version = True + self.can_use_key_selectors = True + + elif op == 'CANCEL': + instructions.append(op) + self.can_set_version = False + + elif op == 'GET_COMMITTED_VERSION': + if self.can_get_commit_version: + do_commit = random.random() < 0.5 + + if do_commit: + instructions.append('COMMIT') + instructions.append('WAIT_FUTURE') + self.add_stack_items(1) + + instructions.append(op) + + self.has_version = True + self.add_strings(1) + + if do_commit: + instructions.append('RESET') + self.can_get_commit_version = False + self.can_set_version = True + self.can_use_key_selectors = True + + elif op == 'GET_APPROXIMATE_SIZE': + instructions.append(op) + self.add_strings(1) + + elif op == 'TUPLE_PACK' or op == 'TUPLE_RANGE': + tup = self.random.random_tuple(10) + instructions.push_args(len(tup), *tup) + instructions.append(op) + if op == 'TUPLE_PACK': + self.add_strings(1) + else: + self.add_strings(2) + + elif op == 'TUPLE_PACK_WITH_VERSIONSTAMP': + tup = (self.random.random_string(20),) + self.random.random_tuple(10, incomplete_versionstamps=True) + prefix = self.versionstamped_keys.pack() + instructions.push_args(prefix, len(tup), *tup) + instructions.append(op) + self.add_strings(1) + + versionstamp_param = prefix + fdb.tuple.pack(tup) + first_incomplete = versionstamp_param.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + second_incomplete = -1 if first_incomplete < 0 else \ + versionstamp_param.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION, first_incomplete + len(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + 1) + + # If there is exactly one incomplete versionstamp, perform the versionstamp operation. + if first_incomplete >= 0 and second_incomplete < 0: + rand_str = self.random.random_string(100) + + instructions.push_args(rand_str) + test_util.to_front(instructions, 1) + instructions.push_args('SET_VERSIONSTAMPED_KEY') + instructions.append('ATOMIC_OP') + + if self.api_version >= 520: + version_value_key_2 = self.versionstamped_values_2.pack((rand_str,)) + versionstamped_value = self.versionstamp_value(fdb.tuple.pack(tup), first_incomplete - len(prefix)) + instructions.push_args('SET_VERSIONSTAMPED_VALUE', version_value_key_2, versionstamped_value) + instructions.append('ATOMIC_OP') + + version_value_key = self.versionstamped_values.pack((rand_str,)) + instructions.push_args('SET_VERSIONSTAMPED_VALUE', version_value_key, + self.versionstamp_value(fdb.tuple.Versionstamp._UNSET_TR_VERSION + fdb.tuple.pack(tup))) + instructions.append('ATOMIC_OP') + self.can_use_key_selectors = False + + elif op == 'TUPLE_UNPACK': + tup = self.random.random_tuple(10) + instructions.push_args(len(tup), *tup) + instructions.append('TUPLE_PACK') + instructions.append(op) + self.add_strings(len(tup)) + + elif op == 'TUPLE_SORT': + tups = self.random.random_tuple_list(10, 30) + for tup in tups: + instructions.push_args(len(tup), *tup) + instructions.append('TUPLE_PACK') + instructions.push_args(len(tups)) + instructions.append(op) + self.add_strings(len(tups)) + + # Use SUB to test if integers are correctly unpacked + elif op == 'SUB': + a = self.random.random_int() // 2 + b = self.random.random_int() // 2 + instructions.push_args(0, a, b) + instructions.append(op) + instructions.push_args(1) + instructions.append('SWAP') + instructions.append(op) + instructions.push_args(1) + instructions.append('TUPLE_PACK') + self.add_stack_items(1) + + elif op == 'ENCODE_FLOAT': + f = self.random.random_float(8) + f_bytes = struct.pack('>f', f) + instructions.push_args(f_bytes) + instructions.append(op) + self.add_stack_items(1) + + elif op == 'ENCODE_DOUBLE': + d = self.random.random_float(11) + d_bytes = struct.pack('>d', d) + instructions.push_args(d_bytes) + instructions.append(op) + self.add_stack_items(1) + + elif op == 'DECODE_FLOAT': + f = self.random.random_float(8) + instructions.push_args(fdb.tuple.SingleFloat(f)) + instructions.append(op) + self.add_strings(1) + + elif op == 'DECODE_DOUBLE': + d = self.random.random_float(11) + instructions.push_args(d) + instructions.append(op) + self.add_strings(1) + elif op == 'GET_ESTIMATED_RANGE_SIZE': + # Protect against inverted range and identical keys + key1 = self.workspace.pack(self.random.random_tuple(1)) + key2 = self.workspace.pack(self.random.random_tuple(1)) + + while key1 == key2: + key1 = self.workspace.pack(self.random.random_tuple(1)) + key2 = self.workspace.pack(self.random.random_tuple(1)) + + if key1 > key2: + key1, key2 = key2, key1 + + instructions.push_args(key1, key2) + instructions.append(op) + self.add_strings(1) + elif op == 'GET_RANGE_SPLIT_POINTS': + # Protect against inverted range and identical keys + key1 = self.workspace.pack(self.random.random_tuple(1)) + key2 = self.workspace.pack(self.random.random_tuple(1)) + + while key1 == key2: + key1 = self.workspace.pack(self.random.random_tuple(1)) + key2 = self.workspace.pack(self.random.random_tuple(1)) + + if key1 > key2: + key1, key2 = key2, key1 + + # TODO: randomize chunkSize but should not exceed 100M(shard limit) + chunkSize = 10000000 # 10M + instructions.push_args(key1, key2, chunkSize) + instructions.append(op) + self.add_strings(1) + elif op == 'TENANT_CREATE': + tenant_name = self.choose_tenant(0.8) + self.allocated_tenants.add(tenant_name) + instructions.push_args(tenant_name) + instructions.append(op) + self.add_strings(1) + elif op == 'TENANT_DELETE': + tenant_name = self.choose_tenant(0.2) + if tenant_name in self.allocated_tenants: + self.allocated_tenants.remove(tenant_name) + instructions.push_args(tenant_name) + instructions.append(op) + self.add_strings(1) + elif op == 'TENANT_SET_ACTIVE': + tenant_name = self.choose_tenant(0.8) + instructions.push_args(tenant_name) + instructions.append(op) + elif op == 'TENANT_CLEAR_ACTIVE': + instructions.append(op) + else: + assert False, 'Unknown operation: ' + op + + if read_performed and op not in database_reads: + self.outstanding_ops.append((self.stack_size, len(instructions) - 1)) + + if args.concurrency == 1 and (op in database_reads or op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']): + instructions.append('WAIT_FUTURE') + + instructions.begin_finalization() + + if not args.no_tenants: + instructions.append('TENANT_CLEAR_ACTIVE') + + if args.concurrency == 1: + self.wait_for_reads(instructions) + test_util.blocking_commit(instructions) + self.add_stack_items(1) + + instructions.append('NEW_TRANSACTION') + instructions.push_args(self.stack_subspace.key()) + instructions.append('LOG_STACK') + + test_util.blocking_commit(instructions) + + return instructions + + @fdb.transactional + def check_versionstamps(self, tr, begin_key, limit): + next_begin = None + incorrect_versionstamps = 0 + for k, v in tr.get_range(begin_key, self.versionstamped_values.range().stop, limit=limit): + next_begin = k + b'\x00' + random_id = self.versionstamped_values.unpack(k)[0] + versioned_value = v[10:].replace(fdb.tuple.Versionstamp._UNSET_TR_VERSION, v[:10], 1) + + versioned_key = self.versionstamped_keys.pack() + versioned_value + if tr[versioned_key] != random_id: + util.get_logger().error(' INCORRECT VERSIONSTAMP:') + util.get_logger().error(' %s != %s', repr(tr[versioned_key]), repr(random_id)) + incorrect_versionstamps += 1 + + if self.api_version >= 520: + k2 = self.versionstamped_values_2.pack((random_id,)) + if tr[k2] != versioned_value: + util.get_logger().error(' INCORRECT VERSIONSTAMP:') + util.get_logger().error(' %s != %s', repr(tr[k2]), repr(versioned_value)) + incorrect_versionstamps += 1 + + return (next_begin, incorrect_versionstamps) + + def validate(self, db, args): + errors = [] + + begin = self.versionstamped_values.range().start + incorrect_versionstamps = 0 + + while begin is not None: + (begin, current_incorrect_versionstamps) = self.check_versionstamps(db, begin, 100) + incorrect_versionstamps += current_incorrect_versionstamps + + if incorrect_versionstamps > 0: + errors.append('There were %d failed version stamp operations' % incorrect_versionstamps) + + return errors + + def get_result_specifications(self): + return [ + ResultSpecification(self.workspace, global_error_filter=[1007, 1009, 1021]), + ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021]) + ] diff --git a/.ci/bindingtester/tests/directory.py b/.ci/bindingtester/tests/directory.py new file mode 100644 index 0000000..e6a51d0 --- /dev/null +++ b/.ci/bindingtester/tests/directory.py @@ -0,0 +1,424 @@ +# +# directory.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import random + +import fdb + +from bindingtester import FDB_API_VERSION +from bindingtester import util + +from bindingtester.tests import Test, Instruction, InstructionSet, ResultSpecification +from bindingtester.tests import test_util, directory_util + +from bindingtester.tests.directory_state_tree import DirectoryStateTreeNode + +fdb.api_version(FDB_API_VERSION) + + +class DirectoryTest(Test): + + def __init__(self, subspace): + super(DirectoryTest, self).__init__(subspace) + self.stack_subspace = subspace['stack'] + self.directory_log = subspace['directory_log']['directory'] + self.subspace_log = subspace['directory_log']['subspace'] + self.prefix_log = subspace['prefix_log'] + + self.prepopulated_dirs = [] + self.next_path = 1 + + def ensure_default_directory_subspace(self, instructions, path): + directory_util.create_default_directory_subspace(instructions, path, self.random) + + child = self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=True)) + self.dir_list.append(child) + self.dir_index = directory_util.DEFAULT_DIRECTORY_INDEX + + def generate_layer(self, allow_partition=True): + if random.random() < 0.7: + return b'' + else: + choice = random.randint(0, 3) + if choice == 0 and allow_partition: + return b'partition' + elif choice == 1: + return b'test_layer' + else: + return self.random.random_string(random.randint(0, 5)) + + def setup(self, args): + self.dir_index = 0 + self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types) + + def generate(self, args, thread_number): + instructions = InstructionSet() + + op_choices = ['NEW_TRANSACTION', 'COMMIT'] + + general = ['DIRECTORY_CREATE_SUBSPACE', 'DIRECTORY_CREATE_LAYER'] + + op_choices += general + + directory_mutations = ['DIRECTORY_CREATE_OR_OPEN', 'DIRECTORY_CREATE', 'DIRECTORY_MOVE', 'DIRECTORY_MOVE_TO', + 'DIRECTORY_REMOVE', 'DIRECTORY_REMOVE_IF_EXISTS'] + directory_reads = ['DIRECTORY_EXISTS', 'DIRECTORY_OPEN', 'DIRECTORY_LIST'] + + directory_db_mutations = [x + '_DATABASE' for x in directory_mutations] + directory_db_reads = [x + '_DATABASE' for x in directory_reads] + directory_snapshot_reads = [x + '_SNAPSHOT' for x in directory_reads] + + directory = [] + directory += directory_mutations + directory += directory_reads + directory += directory_db_mutations + directory += directory_db_reads + + if not args.no_directory_snapshot_ops: + directory += directory_snapshot_reads + + subspace = ['DIRECTORY_PACK_KEY', 'DIRECTORY_UNPACK_KEY', 'DIRECTORY_RANGE', 'DIRECTORY_CONTAINS', 'DIRECTORY_OPEN_SUBSPACE'] + + instructions.append('NEW_TRANSACTION') + + default_path = 'default%d' % self.next_path + self.next_path += 1 + self.dir_list = directory_util.setup_directories(instructions, default_path, self.random) + self.root = self.dir_list[0] + + instructions.push_args(0) + instructions.append('DIRECTORY_CHANGE') + + # Generate some directories that we are going to create in advance. This tests that other bindings + # are compatible with the Python implementation + self.prepopulated_dirs = [(generate_path(min_length=1), self.generate_layer()) for i in range(5)] + + for path, layer in self.prepopulated_dirs: + instructions.push_args(layer) + instructions.push_args(*test_util.with_length(path)) + instructions.append('DIRECTORY_OPEN') + self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False, is_partition=(layer==b'partition')))) + # print('%d. Selected %s, dir=%s, dir_id=%s, has_known_prefix=%s, dir_list_len=%d' \ + # % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), self.dir_list[-1].dir_id, False, len(self.dir_list)-1)) + + instructions.setup_complete() + + for i in range(args.num_ops): + if random.random() < 0.5: + while True: + self.dir_index = random.randrange(0, len(self.dir_list)) + if not self.dir_list[self.dir_index].state.is_partition or not self.dir_list[self.dir_index].state.deleted: + break + + instructions.push_args(self.dir_index) + instructions.append('DIRECTORY_CHANGE') + + dir_entry = self.dir_list[self.dir_index] + + choices = op_choices[:] + if dir_entry.state.is_directory: + choices += directory + if dir_entry.state.is_subspace: + choices += subspace + + op = random.choice(choices) + + # print('%d. Selected %s, dir=%d, dir_id=%d, has_known_prefix=%d, dir_list_len=%d' \ + # % (len(instructions), op, self.dir_index, dir_entry.dir_id, dir_entry.state.has_known_prefix, len(self.dir_list))) + + if op.endswith('_DATABASE') or op.endswith('_SNAPSHOT'): + root_op = op[0:-9] + else: + root_op = op + + if root_op == 'NEW_TRANSACTION': + instructions.append(op) + + elif root_op == 'COMMIT': + test_util.blocking_commit(instructions) + + elif root_op == 'DIRECTORY_CREATE_SUBSPACE': + path = generate_path() + instructions.push_args(generate_prefix(require_unique=False, is_partition=True)) + instructions.push_args(*test_util.with_length(path)) + instructions.append(op) + self.dir_list.append(DirectoryStateTreeNode(False, True, has_known_prefix=True)) + + elif root_op == 'DIRECTORY_CREATE_LAYER': + indices = [] + + prefixes = [generate_prefix(require_unique=args.concurrency==1, is_partition=True) for i in range(2)] + for i in range(2): + instructions.push_args(prefixes[i]) + instructions.push_args(*test_util.with_length(generate_path())) + instructions.append('DIRECTORY_CREATE_SUBSPACE') + indices.append(len(self.dir_list)) + self.dir_list.append(DirectoryStateTreeNode(False, True, has_known_prefix=True)) + + instructions.push_args(random.choice([0, 1])) + instructions.push_args(*indices) + instructions.append(op) + self.dir_list.append(DirectoryStateTreeNode.get_layer(prefixes[0])) + + elif root_op == 'DIRECTORY_CREATE_OR_OPEN': + # Because allocated prefixes are non-deterministic, we cannot have overlapping + # transactions that allocate/remove these prefixes in a comparison test + if op.endswith('_DATABASE') and args.concurrency == 1: + test_util.blocking_commit(instructions) + + path = generate_path() + # Partitions that use the high-contention allocator can result in non-determinism if they fail to commit, + # so we disallow them in comparison tests + op_args = test_util.with_length(path) + (self.generate_layer(allow_partition=args.concurrency>1),) + directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log) + + if not op.endswith('_DATABASE') and args.concurrency == 1: + test_util.blocking_commit(instructions) + + child_entry = dir_entry.get_descendent(path) + if child_entry is None: + child_entry = DirectoryStateTreeNode(True, True) + + child_entry.state.has_known_prefix = False + self.dir_list.append(dir_entry.add_child(path, child_entry)) + + elif root_op == 'DIRECTORY_CREATE': + layer = self.generate_layer() + is_partition = layer == b'partition' + + prefix = generate_prefix(require_unique=is_partition and args.concurrency==1, is_partition=is_partition, min_length=0) + + # Because allocated prefixes are non-deterministic, we cannot have overlapping + # transactions that allocate/remove these prefixes in a comparison test + if op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix: + test_util.blocking_commit(instructions) + + path = generate_path() + op_args = test_util.with_length(path) + (layer, prefix) + if prefix is None: + directory_util.push_instruction_and_record_prefix( + instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log) + else: + instructions.push_args(*op_args) + instructions.append(op) + + if not op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix: + test_util.blocking_commit(instructions) + + child_entry = dir_entry.get_descendent(path) + if child_entry is None: + child_entry = DirectoryStateTreeNode(True, True, has_known_prefix=bool(prefix)) + elif not bool(prefix): + child_entry.state.has_known_prefix = False + + if is_partition: + child_entry.state.is_partition = True + + self.dir_list.append(dir_entry.add_child(path, child_entry)) + + elif root_op == 'DIRECTORY_OPEN': + path = generate_path() + instructions.push_args(self.generate_layer()) + instructions.push_args(*test_util.with_length(path)) + instructions.append(op) + + child_entry = dir_entry.get_descendent(path) + if child_entry is None: + self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False)) + else: + self.dir_list.append(dir_entry.add_child(path, child_entry)) + + elif root_op == 'DIRECTORY_MOVE': + old_path = generate_path() + new_path = generate_path() + instructions.push_args(*(test_util.with_length(old_path) + test_util.with_length(new_path))) + instructions.append(op) + + child_entry = dir_entry.get_descendent(old_path) + if child_entry is None: + self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False)) + else: + self.dir_list.append(dir_entry.add_child(new_path, child_entry)) + + # Make sure that the default directory subspace still exists after moving the specified directory + if dir_entry.state.is_directory and not dir_entry.state.is_subspace and old_path == ('',): + self.ensure_default_directory_subspace(instructions, default_path) + + elif root_op == 'DIRECTORY_MOVE_TO': + new_path = generate_path() + instructions.push_args(*test_util.with_length(new_path)) + instructions.append(op) + + child_entry = dir_entry.get_descendent(()) + if child_entry is None: + self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False)) + else: + self.dir_list.append(dir_entry.add_child(new_path, child_entry)) + + # Make sure that the default directory subspace still exists after moving the current directory + self.ensure_default_directory_subspace(instructions, default_path) + + elif root_op == 'DIRECTORY_REMOVE' or root_op == 'DIRECTORY_REMOVE_IF_EXISTS': + # Because allocated prefixes are non-deterministic, we cannot have overlapping + # transactions that allocate/remove these prefixes in a comparison test + if op.endswith('_DATABASE') and args.concurrency == 1: + test_util.blocking_commit(instructions) + + path = () + count = random.randint(0, 1) + if count == 1: + path = generate_path() + instructions.push_args(*test_util.with_length(path)) + + instructions.push_args(count) + instructions.append(op) + + dir_entry.delete(path) + + # Make sure that the default directory subspace still exists after removing the specified directory + if path == () or (dir_entry.state.is_directory and not dir_entry.state.is_subspace and path == ('',)): + self.ensure_default_directory_subspace(instructions, default_path) + + elif root_op == 'DIRECTORY_LIST' or root_op == 'DIRECTORY_EXISTS': + path = () + count = random.randint(0, 1) + if count == 1: + path = generate_path() + instructions.push_args(*test_util.with_length(path)) + instructions.push_args(count) + instructions.append(op) + + elif root_op == 'DIRECTORY_PACK_KEY': + t = self.random.random_tuple(5) + instructions.push_args(*test_util.with_length(t)) + instructions.append(op) + instructions.append('DIRECTORY_STRIP_PREFIX') + + elif root_op == 'DIRECTORY_UNPACK_KEY' or root_op == 'DIRECTORY_CONTAINS': + if not dir_entry.state.has_known_prefix or random.random() < 0.2 or root_op == 'DIRECTORY_UNPACK_KEY': + t = self.random.random_tuple(5) + instructions.push_args(*test_util.with_length(t)) + instructions.append('DIRECTORY_PACK_KEY') + instructions.append(op) + else: + instructions.push_args(fdb.tuple.pack(self.random.random_tuple(5))) + instructions.append(op) + + elif root_op == 'DIRECTORY_RANGE' or root_op == 'DIRECTORY_OPEN_SUBSPACE': + t = self.random.random_tuple(5) + instructions.push_args(*test_util.with_length(t)) + instructions.append(op) + if root_op == 'DIRECTORY_OPEN_SUBSPACE': + self.dir_list.append(DirectoryStateTreeNode(False, True, dir_entry.state.has_known_prefix)) + else: + test_util.to_front(instructions, 1) + instructions.append('DIRECTORY_STRIP_PREFIX') + test_util.to_front(instructions, 1) + instructions.append('DIRECTORY_STRIP_PREFIX') + + instructions.begin_finalization() + + test_util.blocking_commit(instructions) + + instructions.append('NEW_TRANSACTION') + + for i, dir_entry in enumerate(self.dir_list): + instructions.push_args(i) + instructions.append('DIRECTORY_CHANGE') + if dir_entry.state.is_directory: + instructions.push_args(self.directory_log.key()) + instructions.append('DIRECTORY_LOG_DIRECTORY') + if dir_entry.state.has_known_prefix and dir_entry.state.is_subspace: + # print('%d. Logging subspace: %d' % (i, dir_entry.dir_id)) + instructions.push_args(self.subspace_log.key()) + instructions.append('DIRECTORY_LOG_SUBSPACE') + if (i + 1) % 100 == 0: + test_util.blocking_commit(instructions) + + test_util.blocking_commit(instructions) + + instructions.push_args(self.stack_subspace.key()) + instructions.append('LOG_STACK') + + test_util.blocking_commit(instructions) + return instructions + + def pre_run(self, db, args): + for (path, layer) in self.prepopulated_dirs: + try: + util.get_logger().debug('Prepopulating directory: %r (layer=%r)' % (path, layer)) + fdb.directory.create_or_open(db, path, layer) + except Exception as e: + util.get_logger().debug('Could not create directory %r: %r' % (path, e)) + pass + + def validate(self, db, args): + errors = [] + # This check doesn't work in the current test because of the way we use partitions. + # If a partition is created, allocates a prefix, and then is removed, subsequent prefix + # allocations could collide with prior ones. We can get around this by not allowing + # a removed directory (or partition) to be used, but that weakens the test in another way. + # errors += directory_util.check_for_duplicate_prefixes(db, self.prefix_log) + return errors + + def get_result_specifications(self): + return [ + ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021]), + ResultSpecification(self.directory_log, ordering_index=0), + ResultSpecification(self.subspace_log, ordering_index=0) + ] + + +# Utility functions + + +def generate_path(min_length=0): + length = int(random.random() * random.random() * (4 - min_length)) + min_length + path = () + for i in range(length): + if random.random() < 0.05: + path = path + ('',) + else: + path = path + (random.choice(['1', '2', '3']),) + + return path + + +def generate_prefix(require_unique=False, is_partition=False, min_length=1): + fixed_prefix = b'abcdefg' + if not require_unique and min_length == 0 and random.random() < 0.8: + return None + elif require_unique or is_partition or min_length > len(fixed_prefix) or random.random() < 0.5: + if require_unique: + min_length = max(min_length, 16) + + length = random.randint(min_length, min_length+5) + if length == 0: + return b'' + + if not is_partition: + first = random.randint(ord('\x1d'), 255) % 255 + return bytes([first] + [random.randrange(0, 256) for i in range(0, length - 1)]) + else: + return bytes([random.randrange(ord('\x02'), ord('\x14')) for i in range(0, length)]) + else: + prefix = fixed_prefix + generated = prefix[0:random.randrange(min_length, len(prefix))] + return generated diff --git a/.ci/bindingtester/tests/directory_hca.py b/.ci/bindingtester/tests/directory_hca.py new file mode 100644 index 0000000..ab706d0 --- /dev/null +++ b/.ci/bindingtester/tests/directory_hca.py @@ -0,0 +1,131 @@ +# +# directory_hca.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import random + +import fdb + +from bindingtester import FDB_API_VERSION +from bindingtester import util + +from bindingtester.tests import Test, Instruction, InstructionSet, ResultSpecification +from bindingtester.tests import test_util, directory_util + +fdb.api_version(FDB_API_VERSION) + + +class DirectoryHcaTest(Test): + def __init__(self, subspace): + super(DirectoryHcaTest, self).__init__(subspace) + self.coordination = subspace['coordination'] + self.prefix_log = subspace['prefix_log'] + self.next_path = 1 + + def setup(self, args): + self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types) + self.transactions = [b'tr%d' % i for i in range(3)] # SOMEDAY: parameterize this number? + self.barrier_num = 0 + + self.max_directories_per_transaction = 30 + if args.api_version < 300: + if args.concurrency > 8: + raise Exception('Directory HCA test does not support concurrency larger than 8 with API version less than 300') + + self.max_directories_per_transaction = 8.0 / args.concurrency + + def commit_transactions(self, instructions, args): + for tr in self.transactions: + if random.random() < 0.8 or args.api_version < 300: + instructions.push_args(tr) + instructions.append('USE_TRANSACTION') + test_util.blocking_commit(instructions) + + def barrier(self, instructions, thread_number, thread_ending=False): + if not thread_ending: + instructions.push_args(self.coordination[(self.barrier_num + 1)][thread_number].key(), b'') + instructions.append('SET_DATABASE') + instructions.append('WAIT_FUTURE') + + instructions.push_args(self.coordination[self.barrier_num][thread_number].key()) + instructions.append('CLEAR_DATABASE') + instructions.append('WAIT_FUTURE') + instructions.push_args(self.coordination[self.barrier_num].key()) + instructions.append('WAIT_EMPTY') + + self.barrier_num += 1 + + def generate(self, args, thread_number): + instructions = InstructionSet() + + instructions.append('NEW_TRANSACTION') + + default_path = 'default%d' % self.next_path + self.next_path += 1 + dir_list = directory_util.setup_directories(instructions, default_path, self.random) + num_dirs = len(dir_list) + + instructions.push_args(directory_util.DEFAULT_DIRECTORY_INDEX) + instructions.append('DIRECTORY_CHANGE') + + instructions.setup_complete() + + current_op = 0 + + while current_op < args.num_ops: + if args.concurrency > 1: + self.barrier(instructions, thread_number) + + instructions.push_args(random.choice(self.transactions)) + instructions.append('USE_TRANSACTION') + + if thread_number == 0 and args.concurrency > 1: + num_directories = 1 + else: + num_directories = int(max(1, pow(random.random(), 4) * min(self.max_directories_per_transaction, args.num_ops - current_op))) + + for i in range(num_directories): + path = (self.random.random_unicode_str(16),) + op_args = test_util.with_length(path) + (b'', None) + directory_util.push_instruction_and_record_prefix(instructions, 'DIRECTORY_CREATE', + op_args, path, num_dirs, self.random, self.prefix_log) + num_dirs += 1 + + current_op += num_directories + + if args.concurrency > 1: + self.barrier(instructions, thread_number, thread_ending=(current_op >= args.num_ops)) + + if thread_number == 0: + self.commit_transactions(instructions, args) + + return instructions + + @fdb.transactional + def pre_run(self, tr, args): + if args.concurrency > 1: + for i in range(args.concurrency): + tr[self.coordination[0][i]] = b'' + + def validate(self, db, args): + errors = [] + errors += directory_util.check_for_duplicate_prefixes(db, self.prefix_log) + errors += directory_util.validate_hca_state(db) + + return errors diff --git a/.ci/bindingtester/tests/directory_state_tree.py b/.ci/bindingtester/tests/directory_state_tree.py new file mode 100644 index 0000000..508d18f --- /dev/null +++ b/.ci/bindingtester/tests/directory_state_tree.py @@ -0,0 +1,259 @@ +import sys + +class TreeNodeState: + def __init__(self, node, dir_id, is_directory, is_subspace, has_known_prefix, root, is_partition): + self.dir_id = dir_id + self.is_directory = is_directory + self.is_subspace = is_subspace + self.has_known_prefix = has_known_prefix + self.root = root + self.is_partition = is_partition + + self.parents = { node } + self.children = {} + self.deleted = False + +# Represents an element of the directory hierarchy. As a result of various operations (e.g. moves) that +# may or may not have succeeded, a node can represent multiple possible states. +class DirectoryStateTreeNode: + # A cache of directory layers. We mustn't have multiple entries for the same layer + layers = {} + + # Because our operations may be applied to the default directory in the case that + # the current directory failed to open/create, we compute the result of each operation + # as if it was performed on the current directory and the default directory. + default_directory = None + + # Used for debugging + dir_id = 0 + + @classmethod + def reset(cls): + cls.dir_id = 0 + cls.layers = {} + cls.default_directory = None + + @classmethod + def set_default_directory(cls, default_directory): + cls.default_directory = default_directory + + @classmethod + def get_layer(cls, node_subspace_prefix): + if node_subspace_prefix not in DirectoryStateTreeNode.layers: + DirectoryStateTreeNode.layers[node_subspace_prefix] = DirectoryStateTreeNode(True, False, has_known_prefix=False) + + return DirectoryStateTreeNode.layers[node_subspace_prefix] + + def __init__(self, is_directory, is_subspace, has_known_prefix=True, root=None, is_partition=False): + self.state = TreeNodeState(self, DirectoryStateTreeNode.dir_id + 1, is_directory, is_subspace, has_known_prefix, + root or self, is_partition) + DirectoryStateTreeNode.dir_id += 1 + + def __repr__(self): + return '{DirEntry %d: %d}' % (self.state.dir_id, self.state.has_known_prefix) + + def _get_descendent(self, subpath, default): + if not subpath: + if default is not None: + self._merge(default) + return self + + default_child = None + if default is not None: + default_child = default.state.children.get(subpath[0]) + + self_child = self.state.children.get(subpath[0]) + + if self_child is None: + if default_child is None: + return None + else: + return default_child._get_descendent(subpath[1:], None) + + return self_child._get_descendent(subpath[1:], default_child) + + def get_descendent(self, subpath): + return self._get_descendent(subpath, DirectoryStateTreeNode.default_directory) + + def add_child(self, subpath, child): + child.state.root = self.state.root + if DirectoryStateTreeNode.default_directory: + # print('Adding child %r to default directory at %r' % (child, subpath)) + child = DirectoryStateTreeNode.default_directory._add_child_impl(subpath, child) + # print('Added %r' % child) + + # print('Adding child %r to directory at %r' % (child, subpath)) + c = self._add_child_impl(subpath, child) + + # print('Added %r' % c) + return c + + def _add_child_impl(self, subpath, child): + # print('%d, %d. Adding child %r (recursive): %r' % (self.state.dir_id, child.state.dir_id, child, subpath)) + if len(subpath) == 0: + # print('%d, %d. Setting child: %d, %d' % (self.state.dir_id, child.state.dir_id, self.state.has_known_prefix, child.state.has_known_prefix)) + self._merge(child) + return self + else: + if not subpath[0] in self.state.children: + # print('%d, %d. Path %r was absent from %r (%r)' % (self.state.dir_id, child.state.dir_id, subpath[0:1], self, self.state.children)) + subdir = DirectoryStateTreeNode(True, True, root=self.state.root) + self.state.children[subpath[0]] = subdir + else: + subdir = self.state.children[subpath[0]] + # print('%d, %d. Path was present' % (self.state.dir_id, child.state.dir_id)) + + if len(subpath) > 1: + subdir.state.has_known_prefix = False + + return subdir._add_child_impl(subpath[1:], child) + + def _merge(self, other): + if self.state.dir_id == other.state.dir_id: + return + + self.dir_id = other.dir_id + self.state.dir_id = min(other.state.dir_id, self.state.dir_id) + self.state.is_directory = self.state.is_directory and other.state.is_directory + self.state.is_subspace = self.state.is_subspace and other.state.is_subspace + self.state.has_known_prefix = self.state.has_known_prefix and other.state.has_known_prefix + self.state.deleted = self.state.deleted or other.state.deleted + self.state.is_partition = self.state.is_partition or other.state.is_partition + + other_children = other.state.children.copy() + other_parents = other.state.parents.copy() + + for node in other_parents: + node.state = self.state + self.state.parents.add(node) + + for c in other_children: + if c not in self.state.children: + self.state.children[c] = other_children[c] + else: + self.state.children[c]._merge(other_children[c]) + + def _delete_impl(self): + if not self.state.deleted: + self.state.deleted = True + for c in self.state.children.values(): + c._delete_impl() + + def delete(self, path): + child = self.get_descendent(path) + if child: + child._delete_impl() + +def validate_dir(dir, root): + if dir.state.is_directory: + assert dir.state.root == root + else: + assert dir.state.root == dir + +def run_test(): + all_entries = [] + + root = DirectoryStateTreeNode.get_layer('\xfe') + all_entries.append(root) + + default_dir = root.add_child(('default',), DirectoryStateTreeNode(True, True, has_known_prefix=True)) + DirectoryStateTreeNode.set_default_directory(default_dir) + all_entries.append(default_dir) + + all_entries.append(default_dir.add_child(('1',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + all_entries.append(default_dir.add_child(('1', '1'), DirectoryStateTreeNode(True, False, has_known_prefix=True))) + all_entries.append(default_dir.add_child(('2',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + all_entries.append(default_dir.add_child(('3',), DirectoryStateTreeNode(True, True, has_known_prefix=False))) + all_entries.append(default_dir.add_child(('5',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + all_entries.append(default_dir.add_child(('3', '1'), DirectoryStateTreeNode(True, True, has_known_prefix=False))) + all_entries.append(default_dir.add_child(('1', '3'), DirectoryStateTreeNode(True, True, has_known_prefix=False))) + + entry = all_entries[-1] + child_entries = [] + child_entries.append(entry.add_child(('1',), DirectoryStateTreeNode(True, False, has_known_prefix=True))) + child_entries.append(entry.add_child(('2',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + child_entries.append(entry.add_child(('3',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + child_entries.append(entry.add_child(('4',), DirectoryStateTreeNode(True, False, has_known_prefix=False))) + child_entries.append(entry.add_child(('5',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + + all_entries.append(root.add_child(('1', '2'), DirectoryStateTreeNode(True, True, has_known_prefix=False))) + all_entries.append(root.add_child(('2',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + all_entries.append(root.add_child(('3',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + all_entries.append(root.add_child(('1', '3',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + + # This directory was merged with the default, but both have readable prefixes + entry = root.get_descendent(('2',)) + assert entry.state.has_known_prefix + + entry = all_entries[-1] + all_entries.append(entry.add_child(('1',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + all_entries.append(entry.add_child(('2',), DirectoryStateTreeNode(True, True, has_known_prefix=False))) + all_entries.append(entry.add_child(('3',), DirectoryStateTreeNode(True, False, has_known_prefix=True))) + + entry_to_move = all_entries[-1] + + all_entries.append(entry.add_child(('5',), DirectoryStateTreeNode(True, False, has_known_prefix=True))) + child_entries.append(entry.add_child(('6',), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + + all_entries.extend(child_entries) + + # This directory has an unknown prefix + entry = root.get_descendent(('1', '2')) + assert not entry.state.has_known_prefix + + # This directory was default created and should have an unknown prefix + # It will merge with the default directory's child, which is not a subspace + entry = root.get_descendent(('1',)) + assert not entry.state.has_known_prefix + assert not entry.state.is_subspace + + # Multiple merges will have made this prefix unreadable + entry = root.get_descendent(('2',)) + assert not entry.state.has_known_prefix + + # Merge with default directory's child that has an unknown prefix + entry = root.get_descendent(('3',)) + assert not entry.state.has_known_prefix + + # Merge with default directory's child that has an unknown prefix and merged children + entry = root.get_descendent(('1', '3')) + assert set(entry.state.children.keys()) == {'1', '2', '3', '4', '5', '6'} + + # This child entry should be the combination of ['default', '3'], ['default', '1', '3'], and ['1', '3'] + entry = entry.get_descendent(('3',)) + assert not entry.state.has_known_prefix + assert not entry.state.is_subspace + + # Verify the merge of the children + assert not child_entries[0].state.has_known_prefix + assert not child_entries[0].state.is_subspace + + assert not child_entries[1].state.has_known_prefix + assert child_entries[1].state.is_subspace + + assert not child_entries[2].state.has_known_prefix + assert not child_entries[2].state.is_subspace + + assert not child_entries[3].state.has_known_prefix + assert not child_entries[3].state.is_subspace + + assert child_entries[4].state.has_known_prefix + assert not child_entries[4].state.is_subspace + + assert child_entries[5].state.has_known_prefix + assert child_entries[5].state.is_subspace + + entry = root.add_child(('3',), entry_to_move) + all_entries.append(entry) + + # Test moving an entry + assert not entry.state.has_known_prefix + assert not entry.state.is_subspace + assert list(entry.state.children.keys()) == ['1'] + + for e in all_entries: + validate_dir(e, root) + +if __name__ == '__main__': + sys.exit(run_test()) + diff --git a/.ci/bindingtester/tests/directory_util.py b/.ci/bindingtester/tests/directory_util.py new file mode 100644 index 0000000..831527e --- /dev/null +++ b/.ci/bindingtester/tests/directory_util.py @@ -0,0 +1,142 @@ +# +# directory_util.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import random +import struct + +import fdb + +from bindingtester import FDB_API_VERSION +from bindingtester import util + +from bindingtester.tests import test_util +from bindingtester.tests.directory_state_tree import DirectoryStateTreeNode + +fdb.api_version(FDB_API_VERSION) + +DEFAULT_DIRECTORY_INDEX = 4 +DEFAULT_DIRECTORY_PREFIX = b'default' +DIRECTORY_ERROR_STRING = b'DIRECTORY_ERROR' + +def setup_directories(instructions, default_path, random): + # Clients start with the default directory layer in the directory list + DirectoryStateTreeNode.reset() + dir_list = [DirectoryStateTreeNode.get_layer(b'\xfe')] + + instructions.push_args(0, b'\xfe') + instructions.append('DIRECTORY_CREATE_SUBSPACE') + dir_list.append(DirectoryStateTreeNode(False, True)) + + instructions.push_args(0, b'') + instructions.append('DIRECTORY_CREATE_SUBSPACE') + dir_list.append(DirectoryStateTreeNode(False, True)) + + instructions.push_args(1, 2, 1) + instructions.append('DIRECTORY_CREATE_LAYER') + dir_list.append(DirectoryStateTreeNode.get_layer(b'\xfe')) + + create_default_directory_subspace(instructions, default_path, random) + dir_list.append(dir_list[0].add_child((default_path,), DirectoryStateTreeNode(True, True, has_known_prefix=True))) + DirectoryStateTreeNode.set_default_directory(dir_list[-1]) + + instructions.push_args(DEFAULT_DIRECTORY_INDEX) + instructions.append('DIRECTORY_SET_ERROR_INDEX') + + return dir_list + + +def create_default_directory_subspace(instructions, path, random): + test_util.blocking_commit(instructions) + instructions.push_args(3) + instructions.append('DIRECTORY_CHANGE') + prefix = random.random_string(16) + instructions.push_args(1, path, b'', b'%s-%s' % (DEFAULT_DIRECTORY_PREFIX, prefix)) + instructions.append('DIRECTORY_CREATE_DATABASE') + + instructions.push_args(DEFAULT_DIRECTORY_INDEX) + instructions.append('DIRECTORY_CHANGE') + + +def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_index, random, subspace): + if not op.endswith('_DATABASE'): + instructions.push_args(1, *test_util.with_length(path)) + instructions.append('DIRECTORY_EXISTS') + + # This op must leave the stack in the state it is in at this point, with the exception + # that it may leave an error on the stack + instructions.push_args(*op_args) + instructions.append(op) + + if not op.endswith('_DATABASE'): + instructions.push_args(dir_index) + instructions.append('DIRECTORY_CHANGE') + + instructions.push_args(1, b'', random.random_string(16), b'') + instructions.append('DIRECTORY_PACK_KEY') + test_util.to_front(instructions, 3) # move the existence result up to the front of the stack + + t = util.subspace_to_tuple(subspace) + instructions.push_args(len(t) + 3, *t) + + instructions.append('TUPLE_PACK') # subspace[][][random.random_string(16)] = b'' + instructions.append('SET') + + instructions.push_args(DEFAULT_DIRECTORY_INDEX) + instructions.append('DIRECTORY_CHANGE') + + +def check_for_duplicate_prefixes(db, subspace): + last_prefix = None + start_key = subspace[0].range().start + + duplicates = set() + count = 0 + while True: + prefixes = db.get_range(start_key, subspace[0].range().stop, limit=1000) + if len(prefixes) == 0: + break + + start_key = fdb.KeySelector.first_greater_than(prefixes[-1].key) + + prefixes = [subspace[0].unpack(kv.key)[0] for kv in prefixes] + prefixes = [p for p in prefixes if not (p.startswith(DEFAULT_DIRECTORY_PREFIX) or p == DIRECTORY_ERROR_STRING)] + count += len(prefixes) + + prefixes = [last_prefix] + prefixes + duplicates.update([p for i, p in enumerate(prefixes[1:]) if p == prefixes[i]]) + last_prefix = prefixes[-1] + + util.get_logger().info('Checked %d directory prefixes for duplicates' % count) + return ['The prefix %r was allocated multiple times' % d[:-2] for d in set(duplicates)] + + +def validate_hca_state(db): + hca = fdb.Subspace((b'\xfe', b'hca'), b'\xfe') + counters = hca[0] + recent = hca[1] + + last_counter = db.get_range(counters.range().start, counters.range().stop, limit=1, reverse=True) + [(start, reported_count)] = [(counters.unpack(kv.key)[0], struct.unpack(' reported_count: + return ['The HCA reports %d prefixes allocated in current window, but it actually allocated %d' % (reported_count, actual_count)] + + return [] diff --git a/.ci/bindingtester/tests/scripted.py b/.ci/bindingtester/tests/scripted.py new file mode 100644 index 0000000..c250b9d --- /dev/null +++ b/.ci/bindingtester/tests/scripted.py @@ -0,0 +1,410 @@ +# +# scripted.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import random + +import fdb + +from bindingtester import FDB_API_VERSION +from bindingtester import Result + +from bindingtester.tests import Test, Instruction, ThreadedInstructionSet, ResultSpecification +from bindingtester.tests import test_util + +fdb.api_version(FDB_API_VERSION) + +# SOMEDAY: This should probably be broken up into smaller tests + + +class ScriptedTest(Test): + TEST_API_VERSION = 710 + + def __init__(self, subspace): + super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION) + self.workspace = self.subspace['workspace'] + self.results_subspace = self.subspace['results'] + # self.thread_subspace = self.subspace['threads'] # TODO: update START_THREAD so that we can create threads in subspaces + + def setup(self, args): + if args.concurrency > 1: + raise Exception('Scripted tests cannot be run with a concurrency greater than 1') + + # SOMEDAY: this is only a limitation because we don't know how many operations the bisection should start with + # it should be fixable. + # + # We also need to enable the commented out support for num_ops in this file and make it so the default value runs + # the entire test + if args.bisect: + raise Exception('Scripted tests cannot be bisected') + + self.api_version = args.api_version + + def generate(self, args, thread_number): + self.results = [] + + test_instructions = ThreadedInstructionSet() + main_thread = test_instructions.create_thread() + + foo = [self.workspace.pack((b'foo%d' % i,)) for i in range(0, 6)] + + main_thread.append('NEW_TRANSACTION') + main_thread.push_args(1020) + main_thread.append('ON_ERROR') + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + main_thread.append('GET_READ_VERSION') + main_thread.push_args(foo[1], b'bar') + main_thread.append('SET') + main_thread.push_args(foo[1]) + main_thread.append('GET') + self.add_result(main_thread, args, b'bar') + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.push_args(2000) + main_thread.append('ON_ERROR') + self.add_result(main_thread, args, test_util.error_string(2000)) + + main_thread.append('NEW_TRANSACTION') + main_thread.push_args(0) + main_thread.append('ON_ERROR') + self.add_result(main_thread, args, test_util.error_string(2000)) + + main_thread.append('NEW_TRANSACTION') + main_thread.push_args(foo[1]) + main_thread.append('DUP') + main_thread.append('DUP') + main_thread.append('GET') + self.add_result(main_thread, args, b'bar') + main_thread.append('CLEAR') + main_thread.append('GET_SNAPSHOT') + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + main_thread.push_args(foo[1]) + main_thread.append('GET_DATABASE') + self.add_result(main_thread, args, b'bar') + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.append('SET_READ_VERSION') + main_thread.push_args(foo[1]) + main_thread.append('DUP') + main_thread.append('GET') + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + main_thread.append('CLEAR') + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, test_util.error_string(1020)) + + main_thread.push_args(foo[1]) + main_thread.append('GET_SNAPSHOT') + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + main_thread.push_args(foo[1]) + main_thread.append('CLEAR') + main_thread.append('COMMIT') + main_thread.append('WAIT_FUTURE') + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + main_thread.append('GET_COMMITTED_VERSION') + main_thread.append('RESET') + main_thread.append('EMPTY_STACK') + + main_thread.append('NEW_TRANSACTION') + main_thread.push_args(1, b'bar', foo[1], foo[2], b'bar2', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5') + main_thread.append('SWAP') + main_thread.append('SET') + main_thread.append('SET') + main_thread.append('SET') + main_thread.append('SET') + main_thread.append('SET_DATABASE') + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.append('SET_READ_VERSION') + main_thread.push_args(foo[2]) + main_thread.append('GET') + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.append('NEW_TRANSACTION') + main_thread.push_args(b'', 0, -1, b'') + main_thread.append('GET_KEY') + self.add_result(main_thread, args, b'') + + main_thread.append('NEW_TRANSACTION') + main_thread.append('GET_READ_VERSION_SNAPSHOT') + main_thread.push_args(b'random', foo[1], foo[3], 0, 1, 1) + main_thread.append('POP') + main_thread.append('GET_RANGE') + self.add_result(main_thread, args, fdb.tuple.pack((foo[2], b'bar2', foo[1], b'bar'))) + main_thread.push_args(foo[1], foo[3], 1, 1, 0) + main_thread.append('GET_RANGE_SNAPSHOT') + self.add_result(main_thread, args, fdb.tuple.pack((foo[2], b'bar2'))) + main_thread.push_args(foo[1], foo[3], 0, 0, 4) + main_thread.append('GET_RANGE_DATABASE') + self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2'))) + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.push_args(foo[3], foo[5]) + main_thread.append('CLEAR_RANGE') + main_thread.push_args(foo[1], 0, 3, b'') + main_thread.append('GET_KEY') + self.add_result(main_thread, args, foo[5]) + main_thread.push_args(foo[1], 1, 2, b'') + main_thread.append('GET_KEY_SNAPSHOT') + self.add_result(main_thread, args, foo[5]) + main_thread.push_args(foo[5], 0, -2, b'') + main_thread.append('GET_KEY_DATABASE') + self.add_result(main_thread, args, foo[2]) + main_thread.push_args(self.workspace.key(), 2, 0, 2) + main_thread.append('GET_RANGE_STARTS_WITH') + self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2'))) + main_thread.push_args(self.workspace.key(), 4, 0, 3) + main_thread.append('GET_RANGE_STARTS_WITH_SNAPSHOT') + self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[5], b'bar5'))) + main_thread.push_args(self.workspace.key(), 3, 1, -1) + main_thread.append('GET_RANGE_STARTS_WITH_DATABASE') + self.add_result(main_thread, args, fdb.tuple.pack((foo[5], b'bar5', foo[4], b'bar4', foo[3], b'bar3'))) + main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, b'') + main_thread.append('GET_RANGE_SELECTOR') + self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2'))) + main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, b'') + main_thread.append('GET_RANGE_SELECTOR_SNAPSHOT') + self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[5], b'bar5'))) + main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, b'') + main_thread.append('GET_RANGE_SELECTOR_DATABASE') + self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[3], b'bar3'))) + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.push_args(self.workspace.key()) + main_thread.append('CLEAR_RANGE_STARTS_WITH') + main_thread.push_args(self.workspace.key(), 0, 0, -1) + main_thread.append('GET_RANGE_STARTS_WITH') + self.add_result(main_thread, args, b'') + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.append('SET_READ_VERSION') + main_thread.push_args(foo[1]) + main_thread.append('GET') + self.add_result(main_thread, args, b'bar') + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.push_args(foo[1], b'bar', foo[2], b'bar2', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5') + main_thread.append('SET') + main_thread.append('SET') + main_thread.append('SET') + main_thread.append('SET') + main_thread.append('SET') + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.push_args(foo[2]) + main_thread.append('CLEAR_DATABASE') + main_thread.append('WAIT_FUTURE') + main_thread.push_args(self.workspace.key(), 0, 0, -1) + main_thread.append('GET_RANGE_STARTS_WITH_DATABASE') + self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5'))) + + main_thread.push_args(foo[3], foo[5]) + main_thread.append('CLEAR_RANGE_DATABASE') + main_thread.append('WAIT_FUTURE') + main_thread.push_args(self.workspace.key(), 0, 0, -1) + main_thread.append('GET_RANGE_STARTS_WITH_DATABASE') + self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[5], b'bar5'))) + + main_thread.push_args(self.workspace.key()) + main_thread.append('CLEAR_RANGE_STARTS_WITH_DATABASE') + main_thread.append('WAIT_FUTURE') + main_thread.push_args(self.workspace.key(), 0, 0, -1) + main_thread.append('GET_RANGE_STARTS_WITH_DATABASE') + self.add_result(main_thread, args, b'') + + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + + main_thread.append('NEW_TRANSACTION') + main_thread.push_args(foo[1], foo[5], 0, 0, 0) + main_thread.append('GET_RANGE') + self.add_result(main_thread, args, test_util.error_string(2210)) + main_thread.push_args(foo[1], foo[5], 0, 0, 0) + main_thread.append('GET_RANGE_DATABASE') + self.add_result(main_thread, args, test_util.error_string(2210)) + + self.append_range_test(main_thread, args, 100, 256) + self.append_range_test(main_thread, args, 1000, 8) + + main_thread.append('EMPTY_STACK') + tup = (0, b'foo', -1093, 'unicode\u9348test', 0xffffffff + 100, b'bar\x00\xff') + main_thread.push_args(*test_util.with_length(tup)) + main_thread.append('TUPLE_PACK') + main_thread.append('DUP') + self.add_result(main_thread, args, fdb.tuple.pack(tup)) + main_thread.append('TUPLE_UNPACK') + for item in reversed(tup): + self.add_result(main_thread, args, fdb.tuple.pack((item,))) + + main_thread.push_args(0xffffffff, -100) + main_thread.append('SUB') + main_thread.push_args(1) + main_thread.append('TUPLE_PACK') + self.add_result(main_thread, args, fdb.tuple.pack((0xffffffff + 100,))) + + main_thread.append('EMPTY_STACK') + main_thread.push_args(*test_util.with_length(tup)) + main_thread.append('TUPLE_RANGE') + rng = fdb.tuple.range(tup) + self.add_result(main_thread, args, rng.stop) + self.add_result(main_thread, args, rng.start) + + stampKey = b'stampedXXXXXXXXXXsuffix' + stampKeyIndex = stampKey.find(b'XXXXXXXXXX') + main_thread.push_args('SET_VERSIONSTAMPED_KEY', self.versionstamp_key(stampKey, stampKeyIndex), b'stampedBar') + main_thread.append('ATOMIC_OP') + main_thread.push_args('SET_VERSIONSTAMPED_VALUE', b'stampedValue', self.versionstamp_value(b'XXXXXXXXXX')) + main_thread.append('ATOMIC_OP') + + if self.api_version >= 520: + stampValue = b'stampedXXXXXXXXXXsuffix' + stampValueIndex = stampValue.find(b'XXXXXXXXXX') + main_thread.push_args('SET_VERSIONSTAMPED_VALUE', b'stampedValue2', self.versionstamp_value(stampValue, stampValueIndex)) + main_thread.append('ATOMIC_OP') + + main_thread.push_args(b'suffix') + main_thread.append('GET_VERSIONSTAMP') + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + main_thread.push_args(b'stamped') + main_thread.append('CONCAT') + main_thread.append('CONCAT') + main_thread.append('GET') + self.add_result(main_thread, args, b'stampedBar') + + main_thread.push_args(b'stampedValue', b'suffix') + main_thread.append('GET') + main_thread.push_args(b'stamped') + main_thread.append('CONCAT') + main_thread.append('CONCAT') + main_thread.append('GET') + self.add_result(main_thread, args, b'stampedBar') + + if self.api_version >= 520: + main_thread.push_args(b'stampedValue2') + main_thread.append('GET') + main_thread.append('GET') + self.add_result(main_thread, args, b'stampedBar') + + main_thread.append('GET_VERSIONSTAMP') + test_util.blocking_commit(main_thread) + self.add_result(main_thread, args, b'RESULT_NOT_PRESENT') + self.add_result(main_thread, args, test_util.error_string(2021)) + + main_thread.push_args(b'sentinel') + main_thread.append('UNIT_TESTS') + self.add_result(main_thread, args, b'sentinel') + + if not args.no_threads: + wait_key = b'waitKey' + # threads = [self.thread_subspace[i] for i in range(0, 2)] + threads = [b'thread_spec%d' % i for i in range(0, 2)] + for thread_spec in threads: + main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), b'') + main_thread.append('SET_DATABASE') + main_thread.append('WAIT_FUTURE') + + for thread_spec in threads: + main_thread.push_args(thread_spec) + # if len(main_thread) < args.num_ops: + main_thread.append('START_THREAD') + thread = test_instructions.create_thread(fdb.Subspace((thread_spec,))) + thread.append('NEW_TRANSACTION') + thread.push_args(foo[1], foo[1], b'bar%s' % thread_spec, self.workspace.pack( + (wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec))) + thread.append('GET') + thread.append('POP') + thread.append('SET') + thread.append('CLEAR') + test_util.blocking_commit(thread) + thread.append('POP') + thread.append('CLEAR_DATABASE') + thread.push_args(self.workspace.pack((wait_key,))) + thread.append('WAIT_EMPTY') + + thread.append('NEW_TRANSACTION') + thread.push_args(foo[1]) + thread.append('GET') + self.add_result(thread, args, b'barthread_spec0', b'barthread_spec1') + + main_thread.append('EMPTY_STACK') + # if len(main_thread) > args.num_ops: + # main_thread[args.num_ops:] = [] + + return test_instructions + + def get_result_specifications(self): + return [ + ResultSpecification(self.results_subspace, ordering_index=0, global_error_filter=[1007, 1009, 1021]) + ] + + def get_expected_results(self): + return {self.results_subspace: self.results} + + def append_range_test(self, instructions, args, num_pairs, kv_length): + instructions.append('NEW_TRANSACTION') + + instructions.push_args(self.workspace.key()) + instructions.append('CLEAR_RANGE_STARTS_WITH') + + kvpairs = [] + for i in range(0, num_pairs * 2): + kvpairs.append(self.workspace.pack((b'foo', bytes([random.randint(0, 254) for i in range(0, kv_length)])))) + + kvpairs = list(set(kvpairs)) + if len(kvpairs) % 2 == 1: + kvpairs = kvpairs[:-1] + kvpairs.sort() + + instructions.push_args(*kvpairs) + for i in range(0, len(kvpairs) // 2): + instructions.append('SET') + if i % 100 == 99: + test_util.blocking_commit(instructions) + self.add_result(instructions, args, b'RESULT_NOT_PRESENT') + + foo_range = self.workspace.range((b'foo',)) + instructions.push_args(foo_range.start, foo_range.stop, 0, 0, -1) + instructions.append('GET_RANGE') + self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs))) + instructions.push_args(self.workspace.key(), 0, 0, -1) + instructions.append('GET_RANGE_STARTS_WITH') + self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs))) + instructions.push_args(foo_range.start, 0, 1, foo_range.stop, 0, 1, 0, 0, -1, b'') + instructions.append('GET_RANGE_SELECTOR') + self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs))) + test_util.blocking_commit(instructions) + self.add_result(instructions, args, b'RESULT_NOT_PRESENT') + + def add_result(self, instructions, args, *values): + key = self.results_subspace.pack((len(self.results),)) + instructions.push_args(key) + instructions.append('SET_DATABASE') + + # if len(instructions) <= args.num_ops: + self.results.append(Result(self.results_subspace, key, values)) + + instructions.append('POP') diff --git a/.ci/bindingtester/tests/test_util.py b/.ci/bindingtester/tests/test_util.py new file mode 100644 index 0000000..b930cb1 --- /dev/null +++ b/.ci/bindingtester/tests/test_util.py @@ -0,0 +1,204 @@ +# +# test_util.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import random +import uuid +import unicodedata +import ctypes +import math + +import fdb +import fdb.tuple + +from bindingtester import util +from bindingtester import FDB_API_VERSION +from bindingtester.known_testers import COMMON_TYPES + + +class RandomGenerator(object): + def __init__(self, max_int_bits=64, api_version=FDB_API_VERSION, types=COMMON_TYPES): + self.max_int_bits = max_int_bits + self.api_version = api_version + self.types = list(types) + + def random_unicode_str(self, length): + return ''.join(self.random_unicode_char() for i in range(0, length)) + + def random_int(self): + num_bits = random.randint(0, self.max_int_bits) # This way, we test small numbers with higher probability + + max_value = (1 << num_bits) - 1 + min_value = -max_value - 1 + num = random.randint(min_value, max_value) + + # util.get_logger().debug('generating int (%d): %d - %s' % (num_bits, num, repr(fdb.tuple.pack((num,))))) + return num + + def random_float(self, exp_bits): + if random.random() < 0.05: + # Choose a special value. + return random.choice([float('-nan'), float('-inf'), -0.0, 0.0, float('inf'), float('nan')]) + else: + # Choose a value from all over the range of acceptable floats for this precision. + sign = -1 if random.random() < 0.5 else 1 + exponent = random.randint(-(1 << (exp_bits - 1)) - 10, (1 << (exp_bits - 1) - 1)) + mantissa = random.random() + + result = sign * math.pow(2, exponent) * mantissa + if random.random() < 0.05: + result = float(int(result)) + + return result + + def random_tuple(self, max_size, incomplete_versionstamps=False): + size = random.randint(1, max_size) + tup = [] + + for i in range(size): + choice = random.choice(self.types) + if choice == 'int': + tup.append(self.random_int()) + elif choice == 'null': + tup.append(None) + elif choice == 'bytes': + tup.append(self.random_string(random.randint(0, 100))) + elif choice == 'string': + tup.append(self.random_unicode_str(random.randint(0, 100))) + elif choice == 'uuid': + tup.append(uuid.uuid4()) + elif choice == 'bool': + b = random.random() < 0.5 + if self.api_version < 500: + tup.append(int(b)) + else: + tup.append(b) + elif choice == 'float': + tup.append(fdb.tuple.SingleFloat(self.random_float(8))) + elif choice == 'double': + tup.append(self.random_float(11)) + elif choice == 'tuple': + length = random.randint(0, max_size - size) + if length == 0: + tup.append(()) + else: + tup.append(self.random_tuple(length)) + elif choice == 'versionstamp': + if incomplete_versionstamps and random.random() < 0.5: + tr_version = fdb.tuple.Versionstamp._UNSET_TR_VERSION + else: + tr_version = self.random_string(10) + user_version = random.randint(0, 0xffff) + tup.append(fdb.tuple.Versionstamp(tr_version, user_version)) + else: + assert false + + return tuple(tup) + + def random_tuple_list(self, max_size, max_list_size): + size = random.randint(1, max_list_size) + tuples = [] + + for i in range(size): + to_add = self.random_tuple(max_size) + tuples.append(to_add) + if len(to_add) > 1 and random.random() < 0.25: + # Add a smaller one to test prefixes. + smaller_size = random.randint(1, len(to_add)) + tuples.append(to_add[:smaller_size]) + else: + non_empty = [x for x in enumerate(to_add) if (isinstance(x[1], list) or isinstance(x[1], tuple)) and len(x[1]) > 0] + if len(non_empty) > 0 and random.random() < 0.25: + # Add a smaller list to test prefixes of nested structures. + idx, choice = random.choice(non_empty) + smaller_size = random.randint(0, len(to_add[idx])) + tuples.append(to_add[:idx] + (choice[:smaller_size],) + to_add[idx + 1:]) + + random.shuffle(tuples) + return tuples + + def random_range_params(self): + if random.random() < 0.75: + limit = random.randint(1, 1e3) + elif random.random() < 0.75: + limit = 0 + else: + limit = random.randint(1e8, (1 << 31) - 1) + + return (limit, random.randint(0, 1), random.randint(-2, 4)) + + def random_selector_params(self): + if random.random() < 0.9: + offset = random.randint(-20, 20) + else: + offset = random.randint(-1000, 1000) + + return (random.randint(0, 1), offset) + + def random_string(self, length): + if length == 0: + return b'' + + return bytes([random.randint(0, 254)] + [random.randint(0, 255) for i in range(0, length - 1)]) + + def random_unicode_char(self): + while True: + if random.random() < 0.05: + # Choose one of these special character sequences. + specials = ['\U0001f4a9', '\U0001f63c', '\U0001f3f3\ufe0f\u200d\U0001f308', '\U0001f1f5\U0001f1f2', '\uf8ff', + '\U0002a2b2', '\u05e9\u05dc\u05d5\u05dd'] + return random.choice(specials) + c = random.randint(0, 0xffff) + if unicodedata.category(chr(c))[0] in 'LMNPSZ': + return chr(c) + + +def error_string(error_code): + return fdb.tuple.pack((b'ERROR', bytes(str(error_code), 'utf-8'))) + + +def blocking_commit(instructions): + instructions.append('COMMIT') + instructions.append('WAIT_FUTURE') + instructions.append('RESET') + + +def to_front(instructions, index): + if index == 0: + pass + elif index == 1: + instructions.push_args(1) + instructions.append('SWAP') + elif index == 2: + instructions.push_args(index - 1) + instructions.append('SWAP') + instructions.push_args(index) + instructions.append('SWAP') + else: + instructions.push_args(index - 1) + instructions.append('SWAP') + instructions.push_args(index) + instructions.append('SWAP') + instructions.push_args(index - 1) + instructions.append('SWAP') + to_front(instructions, index - 1) + + +def with_length(tup): + return (len(tup),) + tup diff --git a/.ci/bindingtester/tests/tuple.py b/.ci/bindingtester/tests/tuple.py new file mode 100644 index 0000000..3f2305f --- /dev/null +++ b/.ci/bindingtester/tests/tuple.py @@ -0,0 +1,91 @@ +# +# tuple.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import random +import struct + +import fdb +import fdb.tuple + +from bindingtester import FDB_API_VERSION +from bindingtester import util +from bindingtester.tests import Test, Instruction, InstructionSet, ResultSpecification +from bindingtester.tests import test_util + +fdb.api_version(FDB_API_VERSION) + +class TupleTest(Test): + def __init__(self, subspace): + super(TupleTest, self).__init__(subspace) + self.workspace = self.subspace['workspace'] # The keys and values here must match between subsequent runs of the same test + self.stack_subspace = self.subspace['stack'] + + def setup(self, args): + self.max_int_bits = args.max_int_bits + self.api_version = args.api_version + + def generate(self, args, thread_number): + instructions = InstructionSet() + + min_value = -2**self.max_int_bits+1 + max_value = 2**self.max_int_bits-1 + + instructions.append('NEW_TRANSACTION') + + # Test integer encoding + mutations = 0 + for i in range(0, self.max_int_bits+1): + for sign in [-1, 1]: + sign_str = '' if sign == 1 else '-' + for offset in range(-10, 11): + val = (2**i) * sign + offset + if val >= min_value and val <= max_value: + if offset == 0: + add_str = '' + elif offset > 0: + add_str = '+%d' % offset + else: + add_str = '%d' % offset + + instructions.push_args(1, val) + instructions.append('TUPLE_PACK') + instructions.push_args(self.workspace.pack(('%s2^%d%s' % (sign_str, i, add_str),))) + instructions.append('SET') + mutations += 1 + + if mutations >= 5000: + test_util.blocking_commit(instructions) + mutations = 0 + + instructions.begin_finalization() + + test_util.blocking_commit(instructions) + instructions.push_args(self.stack_subspace.key()) + instructions.append('LOG_STACK') + + test_util.blocking_commit(instructions) + + return instructions + + def get_result_specifications(self): + return [ + ResultSpecification(self.workspace, global_error_filter=[1007, 1009, 1021]), + ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021]), + ] diff --git a/.ci/bindingtester/util.py b/.ci/bindingtester/util.py new file mode 100644 index 0000000..82bc0e3 --- /dev/null +++ b/.ci/bindingtester/util.py @@ -0,0 +1,76 @@ +# +# util.py +# +# This source file is part of the FoundationDB open source project +# +# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import logging +import signal +import os +import glob + +import fdb + + +def initialize_logger_level(logging_level): + logger = get_logger() + + assert logging_level in ['DEBUG', 'INFO', 'WARNING', 'ERROR'] + + if logging_level == 'DEBUG': + logger.setLevel(logging.DEBUG) + elif logging_level == 'INFO': + logger.setLevel(logging.INFO) + elif logging_level == 'WARNING': + logger.setLevel(logging.WARNING) + elif logging_level == 'ERROR': + logger.setLevel(logging.ERROR) + + +def get_logger(): + return logging.getLogger('foundationdb.bindingtester') + + +# Attempts to get the name associated with a process termination signal +def signal_number_to_name(signal_num): + name = [] + for key in signal.__dict__.keys(): + if key.startswith('SIG') and getattr(signal, key) == signal_num: + name.append(key) + if len(name) == 1: + return name[0] + else: + return str(signal_num) + + +def import_subclasses(filename, module_path): + for f in glob.glob(os.path.join(os.path.dirname(filename), '*.py')): + fn = os.path.basename(f) + if fn == '__init__.py': + continue + __import__('%s.%s' % (module_path, os.path.splitext(fn)[0])) + + +# Attempts to unpack a subspace +# This throws an exception if the subspace cannot be unpacked as a tuple +# As a result, the binding tester cannot use subspaces that have non-tuple raw prefixes +def subspace_to_tuple(subspace): + try: + return fdb.tuple.unpack(subspace.key()) + except Exception as e: + get_logger().debug(e) + raise Exception('The binding tester does not support subspaces with non-tuple raw prefixes') diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8b5b64c..63d6f04 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,7 +2,7 @@ name: CI on: push: - branches: [ main ] + branches: [ main, dev ] pull_request: permissions: @@ -13,8 +13,8 @@ jobs: otp_fdb_matrix: strategy: matrix: - otp-version: ['22', '23', '24'] - fdb-version: ['6.3.22'] + otp-version: ['24', '25'] + fdb-version: ['7.1.43'] runs-on: ubuntu-latest env: FDB_VERSION: ${{ matrix.fdb-version }} @@ -22,6 +22,7 @@ jobs: DEBUG: 0 # Set to 1 for even more verbose rebar3 logging DIAGNOSTIC: 0 + ImageOS: ubuntu20 steps: - name: Check out repository code uses: actions/checkout@v2 @@ -53,15 +54,8 @@ jobs: strategy: matrix: # erlef/setup-beam action does not support macos yet - os: [ubuntu-latest, windows-latest] - fdb-version: ['6.2.30', '6.3.22'] - # Windows builds are not being published beyond 6.3.9 right now - exclude: - - os: windows-latest - fdb-version: '6.3.22' - include: - - os: windows-latest - fdb-version: '6.3.9' + os: [ubuntu-latest] + fdb-version: ['7.1.43'] runs-on: ${{ matrix.os }} env: FDB_VERSION: ${{ matrix.fdb-version }} @@ -121,27 +115,20 @@ jobs: strategy: matrix: test-name: [api, directory, directory_hca, tuple] - api-version: [610, 620, 630] + api-version: [710] include: # The `scripted` test only supports the latest API version - test-name: scripted - api-version: 630 + api-version: 710 container: - image: apache/couchdbci-debian:erlfdb-erlang-24.1.5.0-fdb-6.3.18-1 + image: debian:testing-slim services: foundationdb: - image: foundationdb/foundationdb:6.3.18 + image: foundationdb/foundationdb:7.1.43 env: # This profile just ensures we use the FDB server in the service container REBAR_PROFILE: devcontainer steps: - - name: Create FDB cluster file - env: - # This needs to match the name of the service above so the script can do - # a DNS lookup to write down the coordinator IP in the fdb.cluster file - FDB_COORDINATOR: foundationdb - shell: bash - run: /usr/local/bin/create_cluster_file.bash - name: Initialize FDB database run: fdbcli --exec "configure new single ssd" - name: Check out repository code @@ -157,11 +144,18 @@ jobs: TEST_NAME: ${{ matrix.test-name }} API_VERSION: ${{ matrix.api-version }} COVER_ENABLED: true - ERL_LIBS: _build/devcontainer+test/lib/erlfdb/ + ERL_LIBS: _build/test/lib/erlfdb/ run: | - mkdir -p /usr/src/erlfdb/test/ - ln -s $GITHUB_WORKSPACE/test/tester.es /usr/src/erlfdb/test/tester.es - /usr/src/foundationdb/bindings/bindingtester/bindingtester.py erlang \ + mkdir -p /opt/fdb + cd /opt/fdb + apt update + apt install python3 + apt install python3.11-venv + python3 -m venv py + ./py/bin/pip install -Iv foundationdb==7.1.43 + mkdir test + ln -s $GITHUB_WORKSPACE/test/tester.es /opt/fdb/test/tester.es + ./py/bin/python $GITHUB_WORKSPACE/.ci/bindingtester/bindingtester.py erlang \ --test-name $TEST_NAME \ --api-version $API_VERSION \ --instruction-prefix $TEST_NAME \ diff --git a/.gitignore b/.gitignore index c7b7b0c..786e19e 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,7 @@ _build/ # Artifacts when an umbrella app (e.g. CouchDB) builds erlfdb using rebar2 .rebar/ ebin/ + +# ccls +.ccls +.ccls-cache \ No newline at end of file diff --git a/c_src/atom_names.h b/c_src/atom_names.h index 1e524b3..39f26af 100644 --- a/c_src/atom_names.h +++ b/c_src/atom_names.h @@ -35,38 +35,6 @@ ATOM_MAP(invalid_future_type); ATOM_MAP(writes_not_allowed); -// Network Options -ATOM_MAP(local_address); -ATOM_MAP(cluster_file); -ATOM_MAP(trace_enable); -ATOM_MAP(trace_format); -ATOM_MAP(trace_roll_size); -ATOM_MAP(trace_max_logs_size); -ATOM_MAP(trace_log_group); -ATOM_MAP(knob); -ATOM_MAP(tls_plugin); -ATOM_MAP(tls_cert_bytes); -ATOM_MAP(tls_cert_path); -ATOM_MAP(tls_key_bytes); -ATOM_MAP(tls_key_path); -ATOM_MAP(tls_verify_peers); -ATOM_MAP(client_buggify_enable); -ATOM_MAP(client_buggify_disable); -ATOM_MAP(client_buggify_section_activated_probability); -ATOM_MAP(client_buggify_section_fired_probability); -ATOM_MAP(tls_ca_bytes); -ATOM_MAP(tls_ca_path); -ATOM_MAP(tls_password); -ATOM_MAP(disable_multi_version_client_api); -ATOM_MAP(callbacks_on_external_threads); -ATOM_MAP(external_client_library); -ATOM_MAP(external_client_directory); -ATOM_MAP(disable_local_client); -ATOM_MAP(disable_client_statistics_logging); -ATOM_MAP(enable_slow_task_profiling); -ATOM_MAP(enable_run_loop_profiling); - - // Database Options ATOM_MAP(location_cache_size); ATOM_MAP(max_watches); diff --git a/c_src/main.c b/c_src/main.c index 8cb03b0..9725b9c 100644 --- a/c_src/main.c +++ b/c_src/main.c @@ -441,6 +441,7 @@ erlfdb_network_set_option(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) FDBNetworkOption option; ErlNifBinary value; fdb_error_t err; + int option_value; if(st->lib_state != ErlFDB_API_SELECTED) { return enif_make_badarg(env); @@ -450,70 +451,12 @@ erlfdb_network_set_option(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) return enif_make_badarg(env); } - if(IS_ATOM(argv[0], local_address)) { - option = FDB_NET_OPTION_LOCAL_ADDRESS; - } else if(IS_ATOM(argv[0], cluster_file)) { - option = FDB_NET_OPTION_CLUSTER_FILE; - } else if(IS_ATOM(argv[0], trace_enable)) { - option = FDB_NET_OPTION_TRACE_ENABLE; - } else if(IS_ATOM(argv[0], trace_format)) { - option = FDB_NET_OPTION_TRACE_FORMAT; - } else if(IS_ATOM(argv[0], trace_roll_size)) { - option = FDB_NET_OPTION_TRACE_ROLL_SIZE; - } else if(IS_ATOM(argv[0], trace_max_logs_size)) { - option = FDB_NET_OPTION_TRACE_MAX_LOGS_SIZE; - } else if(IS_ATOM(argv[0], trace_log_group)) { - option = FDB_NET_OPTION_TRACE_LOG_GROUP; - } else if(IS_ATOM(argv[0], knob)) { - option = FDB_NET_OPTION_KNOB; - } else if(IS_ATOM(argv[0], tls_plugin)) { - option = FDB_NET_OPTION_TLS_PLUGIN; - } else if(IS_ATOM(argv[0], tls_cert_bytes)) { - option = FDB_NET_OPTION_TLS_CERT_BYTES; - } else if(IS_ATOM(argv[0], tls_cert_path)) { - option = FDB_NET_OPTION_TLS_CERT_PATH; - } else if(IS_ATOM(argv[0], tls_key_bytes)) { - option = FDB_NET_OPTION_TLS_KEY_BYTES; - } else if(IS_ATOM(argv[0], tls_key_path)) { - option = FDB_NET_OPTION_TLS_KEY_PATH; - } else if(IS_ATOM(argv[0], tls_verify_peers)) { - option = FDB_NET_OPTION_TLS_VERIFY_PEERS; - } else if(IS_ATOM(argv[0], client_buggify_enable)) { - option = FDB_NET_OPTION_CLIENT_BUGGIFY_ENABLE; - } else if(IS_ATOM(argv[0], client_buggify_disable)) { - option = FDB_NET_OPTION_CLIENT_BUGGIFY_DISABLE; - } else if(IS_ATOM(argv[0], client_buggify_section_activated_probability)) { - option = FDB_NET_OPTION_CLIENT_BUGGIFY_SECTION_ACTIVATED_PROBABILITY; - } else if(IS_ATOM(argv[0], client_buggify_section_fired_probability)) { - option = FDB_NET_OPTION_CLIENT_BUGGIFY_SECTION_FIRED_PROBABILITY; - } else if(IS_ATOM(argv[0], tls_ca_bytes)) { - option = FDB_NET_OPTION_TLS_CA_BYTES; - } else if(IS_ATOM(argv[0], tls_password)) { - option = FDB_NET_OPTION_TLS_PASSWORD; - } else if(IS_ATOM(argv[0], disable_multi_version_client_api)) { - option = FDB_NET_OPTION_DISABLE_MULTI_VERSION_CLIENT_API; - } else if(IS_ATOM(argv[0], callbacks_on_external_threads)) { - option = FDB_NET_OPTION_CALLBACKS_ON_EXTERNAL_THREADS; - } else if(IS_ATOM(argv[0], external_client_library)) { - option = FDB_NET_OPTION_EXTERNAL_CLIENT_LIBRARY; - } else if(IS_ATOM(argv[0], external_client_directory)) { - option = FDB_NET_OPTION_EXTERNAL_CLIENT_DIRECTORY; - } else if(IS_ATOM(argv[0], disable_local_client)) { - option = FDB_NET_OPTION_DISABLE_LOCAL_CLIENT; - } else if(IS_ATOM(argv[0], disable_client_statistics_logging)) { - option = FDB_NET_OPTION_DISABLE_CLIENT_STATISTICS_LOGGING; - } else if(IS_ATOM(argv[0], enable_slow_task_profiling)) { - option = FDB_NET_OPTION_ENABLE_SLOW_TASK_PROFILING; - } - #if FDB_API_VERSION >= 630 - else if(IS_ATOM(argv[0], enable_run_loop_profiling)) { - option = FDB_NET_OPTION_ENABLE_RUN_LOOP_PROFILING; - } - #endif - else { + if(!enif_get_int(env, argv[0], &option_value)) { return enif_make_badarg(env); } + // this cast is unsafe, but we guarantee it in the Erlang layer + option = option_value; if(!enif_inspect_binary(env, argv[1], &value)) { return enif_make_badarg(env); } diff --git a/include/fdb_options.hrl b/include/fdb_options.hrl new file mode 100644 index 0000000..a1972a9 --- /dev/null +++ b/include/fdb_options.hrl @@ -0,0 +1,515 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +%% Semi-automatically generated from the fdb_c_options.g.h + +-ifndef(FDB_C_OPTIONS_G_H). +-define(FDB_C_OPTIONS_G_H, true). + +%% Deprecated +%% Parameter: (String) IP:PORT +-define(FDB_NET_OPTION_LOCAL_ADDRESS, 10). + +%% Deprecated +%% Parameter: (String) path to cluster file +-define(FDB_NET_OPTION_CLUSTER_FILE, 20). + +%% Enables trace output to a file in a directory of the clients choosing +%% Parameter: (String) path to output directory (or NULL for current working directory) +-define(FDB_NET_OPTION_TRACE_ENABLE, 30). + +%% Sets the maximum size in bytes of a single trace output file. This value should be in the range ``[0, INT64_MAX]``. If the value is set to 0, there is no limit on individual file size. The default is a maximum size of 10,485,760 bytes. +%% Parameter: (Int) max size of a single trace output file +-define(FDB_NET_OPTION_TRACE_ROLL_SIZE, 31). + +%% Sets the maximum size of all the trace output files put together. This value should be in the range ``[0, INT64_MAX]``. If the value is set to 0, there is no limit on the total size of the files. The default is a maximum size of 104,857,600 bytes. If the default roll size is used, this means that a maximum of 10 trace files will be written at a time. +%% Parameter: (Int) max total size of trace files +-define(FDB_NET_OPTION_TRACE_MAX_LOGS_SIZE, 32). + +%% Sets the 'LogGroup' attribute with the specified value for all events in the trace output files. The default log group is 'default'. +%% Parameter: (String) value of the LogGroup attribute +-define(FDB_NET_OPTION_TRACE_LOG_GROUP, 33). + +%% Select the format of the log files. xml (the default) and json are supported. +%% Parameter: (String) Format of trace files +-define(FDB_NET_OPTION_TRACE_FORMAT, 34). + +%% Select clock source for trace files. now (the default) or realtime are supported. +%% Parameter: (String) Trace clock source +-define(FDB_NET_OPTION_TRACE_CLOCK_SOURCE, 35). + +%% Once provided, this string will be used to replace the port/PID in the log file names. +%% Parameter: (String) The identifier that will be part of all trace file names +-define(FDB_NET_OPTION_TRACE_FILE_IDENTIFIER, 36). + +%% Set file suffix for partially written log files. +%% Parameter: (String) Append this suffix to partially written log files. When a log file is complete, it is renamed to remove the suffix. No separator is added between the file and the suffix. If you want to add a file extension, you should include the separator - e.g. '.tmp' instead of 'tmp' to add the 'tmp' extension. +-define(FDB_NET_OPTION_TRACE_PARTIAL_FILE_SUFFIX, 39). + +%% Set internal tuning or debugging knobs +%% Parameter: (String) knob_name=knob_value +-define(FDB_NET_OPTION_KNOB, 40). + +%% Deprecated +%% Parameter: (String) file path or linker-resolved name +-define(FDB_NET_OPTION_TLS_PLUGIN, 41). + +%% Set the certificate chain +%% Parameter: (Bytes) certificates +-define(FDB_NET_OPTION_TLS_CERT_BYTES, 42). + +%% Set the file from which to load the certificate chain +%% Parameter: (String) file path +-define(FDB_NET_OPTION_TLS_CERT_PATH, 43). + +%% Set the private key corresponding to your own certificate +%% Parameter: (Bytes) key +-define(FDB_NET_OPTION_TLS_KEY_BYTES, 45). + +%% Set the file from which to load the private key corresponding to your own certificate +%% Parameter: (String) file path +-define(FDB_NET_OPTION_TLS_KEY_PATH, 46). + +%% Set the peer certificate field verification criteria +%% Parameter: (Bytes) verification pattern +-define(FDB_NET_OPTION_TLS_VERIFY_PEERS, 47). + +%% +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_BUGGIFY_ENABLE, 48). + +%% +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_BUGGIFY_DISABLE, 49). + +%% Set the probability of a BUGGIFY section being active for the current execution. Only applies to code paths first traversed AFTER this option is changed. +%% Parameter: (Int) probability expressed as a percentage between 0 and 100 +-define(FDB_NET_OPTION_BUGGIFY_SECTION_ACTIVATED_PROBABILITY, 50). + +%% Set the probability of an active BUGGIFY section being fired +%% Parameter: (Int) probability expressed as a percentage between 0 and 100 +-define(FDB_NET_OPTION_BUGGIFY_SECTION_FIRED_PROBABILITY, 51). + +%% Set the ca bundle +%% Parameter: (Bytes) ca bundle +-define(FDB_NET_OPTION_TLS_CA_BYTES, 52). + +%% Set the file from which to load the certificate authority bundle +%% Parameter: (String) file path +-define(FDB_NET_OPTION_TLS_CA_PATH, 53). + +%% Set the passphrase for encrypted private key. Password should be set before setting the key for the password to be used. +%% Parameter: (String) key passphrase +-define(FDB_NET_OPTION_TLS_PASSWORD, 54). + +%% Disables the multi-version client API and instead uses the local client directly. Must be set before setting up the network. +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_DISABLE_MULTI_VERSION_CLIENT_API, 60). + +%% If set, callbacks from external client libraries can be called from threads created by the FoundationDB client library. Otherwise, callbacks will be called from either the thread used to add the callback or the network thread. Setting this option can improve performance when connected using an external client, but may not be safe to use in all environments. Must be set before setting up the network. WARNING: This feature is considered experimental at this time. +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_CALLBACKS_ON_EXTERNAL_THREADS, 61). + +%% Adds an external client library for use by the multi-version client API. Must be set before setting up the network. +%% Parameter: (String) path to client library +-define(FDB_NET_OPTION_EXTERNAL_CLIENT_LIBRARY, 62). + +%% Searches the specified path for dynamic libraries and adds them to the list of client libraries for use by the multi-version client API. Must be set before setting up the network. +%% Parameter: (String) path to directory containing client libraries +-define(FDB_NET_OPTION_EXTERNAL_CLIENT_DIRECTORY, 63). + +%% Prevents connections through the local client, allowing only connections through externally loaded client libraries. +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_DISABLE_LOCAL_CLIENT, 64). + +%% Spawns multiple worker threads for each version of the client that is loaded. Setting this to a number greater than one implies disable_local_client. +%% Parameter: (Int) Number of client threads to be spawned. Each cluster will be serviced by a single client thread. +-define(FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION, 65). + +%% Retain temporary external client library copies that are created for enabling multi-threading. +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_RETAIN_CLIENT_LIBRARY_COPIES, 67). + +%% Disables logging of client statistics, such as sampled transaction activity. +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_DISABLE_CLIENT_STATISTICS_LOGGING, 70). + +%% Deprecated +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_ENABLE_SLOW_TASK_PROFILING, 71). + +%% Enables debugging feature to perform run loop profiling. Requires trace logging to be enabled. WARNING: this feature is not recommended for use in production. +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_ENABLE_RUN_LOOP_PROFILING, 71). + +%% Enable client buggify - will make requests randomly fail (intended for client testing) +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_CLIENT_BUGGIFY_ENABLE, 80). + +%% Disable client buggify +%% Parameter: Option takes no parameter +-define(FDB_NET_OPTION_CLIENT_BUGGIFY_DISABLE, 81). + +%% Set the probability of a CLIENT_BUGGIFY section being active for the current execution. +%% Parameter: (Int) probability expressed as a percentage between 0 and 100 +-define(FDB_NET_OPTION_CLIENT_BUGGIFY_SECTION_ACTIVATED_PROBABILITY, 82). + +%% Set the probability of an active CLIENT_BUGGIFY section being fired. A section will only fire if it was activated +%% Parameter: (Int) probability expressed as a percentage between 0 and 100 +-define(FDB_NET_OPTION_CLIENT_BUGGIFY_SECTION_FIRED_PROBABILITY, 83). + +%% Set a tracer to run on the client. Should be set to the same value as the tracer set on the server. +%% Parameter: (String) Distributed tracer type. Choose from none, log_file, or network_lossy +-define(FDB_NET_OPTION_DISTRIBUTED_CLIENT_TRACER, 90). + +%% This option is set automatically to communicate the list of supported clients to the active client. +%% Parameter: (String) [release version],[source version],[protocol version];... This is a hidden parameter and should not be used directly by applications. +-define(FDB_NET_OPTION_SUPPORTED_CLIENT_VERSIONS, 1000). + +%% This option is set automatically on all clients loaded externally using the multi-version API. +%% Parameter: Option takes no parameter This is a hidden parameter and should not be used directly by applications. +-define(FDB_NET_OPTION_EXTERNAL_CLIENT, 1001). + +%% This option tells a child on a multiversion client what transport ID to use. +%% Parameter: (Int) Transport ID for the child connection This is a hidden parameter and should not be used directly by applications. +-define(FDB_NET_OPTION_EXTERNAL_CLIENT_TRANSPORT_ID, 1002). + +%% Set the size of the client location cache. Raising this value can boost performance in very large databases where clients access data in a near-random pattern. Defaults to 100000. +%% Parameter: (Int) Max location cache entries +-define(FDB_DB_OPTION_LOCATION_CACHE_SIZE, 10). + +%% Set the maximum number of watches allowed to be outstanding on a database connection. Increasing this number could result in increased resource usage. Reducing this number will not cancel any outstanding watches. Defaults to 10000 and cannot be larger than 1000000. +%% Parameter: (Int) Max outstanding watches +-define(FDB_DB_OPTION_MAX_WATCHES, 20). + +%% Specify the machine ID that was passed to fdbserver processes running on the same machine as this client, for better location-aware load balancing. +%% Parameter: (String) Hexadecimal ID +-define(FDB_DB_OPTION_MACHINE_ID, 21). + +%% Specify the datacenter ID that was passed to fdbserver processes running in the same datacenter as this client, for better location-aware load balancing. +%% Parameter: (String) Hexadecimal ID +-define(FDB_DB_OPTION_DATACENTER_ID, 22). + +%% Snapshot read operations will see the results of writes done in the same transaction. This is the default behavior. +%% Parameter: Option takes no parameter +-define(FDB_DB_OPTION_SNAPSHOT_RYW_ENABLE, 26). + +%% Snapshot read operations will not see the results of writes done in the same transaction. This was the default behavior prior to API version 300. +%% Parameter: Option takes no parameter +-define(FDB_DB_OPTION_SNAPSHOT_RYW_DISABLE, 27). + +%% Sets the maximum escaped length of key and value fields to be logged to the trace file via the LOG_TRANSACTION option. This sets the ``transaction_logging_max_field_length`` option of each transaction created by this database. See the transaction option description for more information. +%% Parameter: (Int) Maximum length of escaped key and value fields. +-define(FDB_DB_OPTION_TRANSACTION_LOGGING_MAX_FIELD_LENGTH, 405). + +%% Set a timeout in milliseconds which, when elapsed, will cause each transaction automatically to be cancelled. This sets the ``timeout`` option of each transaction created by this database. See the transaction option description for more information. Using this option requires that the API version is 610 or higher. +%% Parameter: (Int) value in milliseconds of timeout +-define(FDB_DB_OPTION_TRANSACTION_TIMEOUT, 500). + +%% Set a maximum number of retries after which additional calls to ``onError`` will throw the most recently seen error code. This sets the ``retry_limit`` option of each transaction created by this database. See the transaction option description for more information. +%% Parameter: (Int) number of times to retry +-define(FDB_DB_OPTION_TRANSACTION_RETRY_LIMIT, 501). + +%% Set the maximum amount of backoff delay incurred in the call to ``onError`` if the error is retryable. This sets the ``max_retry_delay`` option of each transaction created by this database. See the transaction option description for more information. +%% Parameter: (Int) value in milliseconds of maximum delay +-define(FDB_DB_OPTION_TRANSACTION_MAX_RETRY_DELAY, 502). + +%% Set the maximum transaction size in bytes. This sets the ``size_limit`` option on each transaction created by this database. See the transaction option description for more information. +%% Parameter: (Int) value in bytes +-define(FDB_DB_OPTION_TRANSACTION_SIZE_LIMIT, 503). + +%% The read version will be committed, and usually will be the latest committed, but might not be the latest committed in the event of a simultaneous fault and misbehaving clock. +%% Parameter: Option takes no parameter +-define(FDB_DB_OPTION_TRANSACTION_CAUSAL_READ_RISKY, 504). + +%% Deprecated. Addresses returned by get_addresses_for_key include the port when enabled. As of api version 630, this option is enabled by default and setting this has no effect. +%% Parameter: Option takes no parameter +-define(FDB_DB_OPTION_TRANSACTION_INCLUDE_PORT_IN_ADDRESS, 505). + +%% Allows ``get`` operations to read from sections of keyspace that have become unreadable because of versionstamp operations. This sets the ``bypass_unreadable`` option of each transaction created by this database. See the transaction option description for more information. +%% Parameter: Option takes no parameter +-define(FDB_DB_OPTION_TRANSACTION_BYPASS_UNREADABLE, 700). + +%% Use configuration database. +%% Parameter: Option takes no parameter +-define(FDB_DB_OPTION_USE_CONFIG_DATABASE, 800). + +%% An integer between 0 and 100 (default is 0) expressing the probability that a client will verify it can't read stale data whenever it detects a recovery. +%% Parameter: Option takes no parameter +-define(FDB_DB_OPTION_TEST_CAUSAL_READ_RISKY, 900). + +%% The transaction, if not self-conflicting, may be committed a second time after commit succeeds, in the event of a fault +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_CAUSAL_WRITE_RISKY, 10). + +%% The read version will be committed, and usually will be the latest committed, but might not be the latest committed in the event of a simultaneous fault and misbehaving clock. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_CAUSAL_READ_RISKY, 20). + +%% +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_CAUSAL_READ_DISABLE, 21). + +%% Addresses returned by get_addresses_for_key include the port when enabled. As of api version 630, this option is enabled by default and setting this has no effect. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_INCLUDE_PORT_IN_ADDRESS, 23). + +%% The next write performed on this transaction will not generate a write conflict range. As a result, other transactions which read the key(s) being modified by the next write will not conflict with this transaction. Care needs to be taken when using this option on a transaction that is shared between multiple threads. When setting this option, write conflict ranges will be disabled on the next write operation, regardless of what thread it is on. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_NEXT_WRITE_NO_WRITE_CONFLICT_RANGE, 30). + +%% Committing this transaction will bypass the normal load balancing across commit proxies and go directly to the specifically nominated 'first commit proxy'. +%% Parameter: Option takes no parameter This is a hidden parameter and should not be used directly by applications. +-define(FDB_TR_OPTION_COMMIT_ON_FIRST_PROXY, 40). + +%% +%% Parameter: Option takes no parameter This is a hidden parameter and should not be used directly by applications. +-define(FDB_TR_OPTION_CHECK_WRITES_ENABLE, 50). + +%% Reads performed by a transaction will not see any prior mutations that occured in that transaction, instead seeing the value which was in the database at the transaction's read version. This option may provide a small performance benefit for the client, but also disables a number of client-side optimizations which are beneficial for transactions which tend to read and write the same keys within a single transaction. It is an error to set this option after performing any reads or writes on the transaction. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, 51). + +%% Deprecated +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_READ_AHEAD_DISABLE, 52). + +%% +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_DURABILITY_DATACENTER, 110). + +%% +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_DURABILITY_RISKY, 120). + +%% Deprecated +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_DURABILITY_DEV_NULL_IS_WEB_SCALE, 130). + +%% Specifies that this transaction should be treated as highest priority and that lower priority transactions should block behind this one. Use is discouraged outside of low-level tools +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE, 200). + +%% Specifies that this transaction should be treated as low priority and that default priority transactions will be processed first. Batch priority transactions will also be throttled at load levels smaller than for other types of transactions and may be fully cut off in the event of machine failures. Useful for doing batch work simultaneously with latency-sensitive work +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_PRIORITY_BATCH, 201). + +%% This is a write-only transaction which sets the initial configuration. This option is designed for use by database system tools only. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_INITIALIZE_NEW_DATABASE, 300). + +%% Allows this transaction to read and modify system keys (those that start with the byte 0xFF). Implies raw_access. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_ACCESS_SYSTEM_KEYS, 301). + +%% Allows this transaction to read system keys (those that start with the byte 0xFF). Implies raw_access. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_READ_SYSTEM_KEYS, 302). + +%% Allows this transaction to access the raw key-space when tenant mode is on. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_RAW_ACCESS, 303). + +%% +%% Parameter: Option takes no parameter This is a hidden parameter and should not be used directly by applications. +-define(FDB_TR_OPTION_DEBUG_DUMP, 400). + +%% +%% Parameter: (String) Optional transaction name +-define(FDB_TR_OPTION_DEBUG_RETRY_LOGGING, 401). + +%% Deprecated +%% Parameter: (String) String identifier to be used in the logs when tracing this transaction. The identifier must not exceed 100 characters. +-define(FDB_TR_OPTION_TRANSACTION_LOGGING_ENABLE, 402). + +%% Sets a client provided identifier for the transaction that will be used in scenarios like tracing or profiling. Client trace logging or transaction profiling must be separately enabled. +%% Parameter: (String) String identifier to be used when tracing or profiling this transaction. The identifier must not exceed 100 characters. +-define(FDB_TR_OPTION_DEBUG_TRANSACTION_IDENTIFIER, 403). + +%% Enables tracing for this transaction and logs results to the client trace logs. The DEBUG_TRANSACTION_IDENTIFIER option must be set before using this option, and client trace logging must be enabled to get log output. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_LOG_TRANSACTION, 404). + +%% Sets the maximum escaped length of key and value fields to be logged to the trace file via the LOG_TRANSACTION option, after which the field will be truncated. A negative value disables truncation. +%% Parameter: (Int) Maximum length of escaped key and value fields. +-define(FDB_TR_OPTION_TRANSACTION_LOGGING_MAX_FIELD_LENGTH, 405). + +%% Sets an identifier for server tracing of this transaction. When committed, this identifier triggers logging when each part of the transaction authority encounters it, which is helpful in diagnosing slowness in misbehaving clusters. The identifier is randomly generated. When there is also a debug_transaction_identifier, both IDs are logged together. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_SERVER_REQUEST_TRACING, 406). + +%% Set a timeout in milliseconds which, when elapsed, will cause the transaction automatically to be cancelled. Valid parameter values are ``[0, INT_MAX]``. If set to 0, will disable all timeouts. All pending and any future uses of the transaction will throw an exception. The transaction can be used again after it is reset. Prior to API version 610, like all other transaction options, the timeout must be reset after a call to ``onError``. If the API version is 610 or greater, the timeout is not reset after an ``onError`` call. This allows the user to specify a longer timeout on specific transactions than the default timeout specified through the ``transaction_timeout`` database option without the shorter database timeout cancelling transactions that encounter a retryable error. Note that at all API versions, it is safe and legal to set the timeout each time the transaction begins, so most code written assuming the older behavior can be upgraded to the newer behavior without requiring any modification, and the caller is not required to implement special logic in retry loops to only conditionally set this option. +%% Parameter: (Int) value in milliseconds of timeout +-define(FDB_TR_OPTION_TIMEOUT, 500). + +%% Set a maximum number of retries after which additional calls to ``onError`` will throw the most recently seen error code. Valid parameter values are ``[-1, INT_MAX]``. If set to -1, will disable the retry limit. Prior to API version 610, like all other transaction options, the retry limit must be reset after a call to ``onError``. If the API version is 610 or greater, the retry limit is not reset after an ``onError`` call. Note that at all API versions, it is safe and legal to set the retry limit each time the transaction begins, so most code written assuming the older behavior can be upgraded to the newer behavior without requiring any modification, and the caller is not required to implement special logic in retry loops to only conditionally set this option. +%% Parameter: (Int) number of times to retry +-define(FDB_TR_OPTION_RETRY_LIMIT, 501). + +%% Set the maximum amount of backoff delay incurred in the call to ``onError`` if the error is retryable. Defaults to 1000 ms. Valid parameter values are ``[0, INT_MAX]``. If the maximum retry delay is less than the current retry delay of the transaction, then the current retry delay will be clamped to the maximum retry delay. Prior to API version 610, like all other transaction options, the maximum retry delay must be reset after a call to ``onError``. If the API version is 610 or greater, the retry limit is not reset after an ``onError`` call. Note that at all API versions, it is safe and legal to set the maximum retry delay each time the transaction begins, so most code written assuming the older behavior can be upgraded to the newer behavior without requiring any modification, and the caller is not required to implement special logic in retry loops to only conditionally set this option. +%% Parameter: (Int) value in milliseconds of maximum delay +-define(FDB_TR_OPTION_MAX_RETRY_DELAY, 502). + +%% Set the transaction size limit in bytes. The size is calculated by combining the sizes of all keys and values written or mutated, all key ranges cleared, and all read and write conflict ranges. (In other words, it includes the total size of all data included in the request to the cluster to commit the transaction.) Large transactions can cause performance problems on FoundationDB clusters, so setting this limit to a smaller value than the default can help prevent the client from accidentally degrading the cluster's performance. This value must be at least 32 and cannot be set to higher than 10,000,000, the default transaction size limit. +%% Parameter: (Int) value in bytes +-define(FDB_TR_OPTION_SIZE_LIMIT, 503). + +%% Snapshot read operations will see the results of writes done in the same transaction. This is the default behavior. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_SNAPSHOT_RYW_ENABLE, 600). + +%% Snapshot read operations will not see the results of writes done in the same transaction. This was the default behavior prior to API version 300. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_SNAPSHOT_RYW_DISABLE, 601). + +%% The transaction can read and write to locked databases, and is responsible for checking that it took the lock. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_LOCK_AWARE, 700). + +%% By default, operations that are performed on a transaction while it is being committed will not only fail themselves, but they will attempt to fail other in-flight operations (such as the commit) as well. This behavior is intended to help developers discover situations where operations could be unintentionally executed after the transaction has been reset. Setting this option removes that protection, causing only the offending operation to fail. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_USED_DURING_COMMIT_PROTECTION_DISABLE, 701). + +%% The transaction can read from locked databases. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_READ_LOCK_AWARE, 702). + +%% No other transactions will be applied before this transaction within the same commit version. +%% Parameter: Option takes no parameter This is a hidden parameter and should not be used directly by applications. +-define(FDB_TR_OPTION_FIRST_IN_BATCH, 710). + +%% This option should only be used by tools which change the database configuration. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_USE_PROVISIONAL_PROXIES, 711). + +%% The transaction can retrieve keys that are conflicting with other transactions. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_REPORT_CONFLICTING_KEYS, 712). + +%% By default, the special key space will only allow users to read from exactly one module (a subspace in the special key space). Use this option to allow reading from zero or more modules. Users who set this option should be prepared for new modules, which may have different behaviors than the modules they're currently reading. For example, a new module might block or return an error. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_SPECIAL_KEY_SPACE_RELAXED, 713). + +%% By default, users are not allowed to write to special keys. Enable this option will implicitly enable all options required to achieve the configuration change. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, 714). + +%% Adds a tag to the transaction that can be used to apply manual targeted throttling. At most 5 tags can be set on a transaction. +%% Parameter: (String) String identifier used to associated this transaction with a throttling group. Must not exceed 16 characters. +-define(FDB_TR_OPTION_TAG, 800). + +%% Adds a tag to the transaction that can be used to apply manual or automatic targeted throttling. At most 5 tags can be set on a transaction. +%% Parameter: (String) String identifier used to associated this transaction with a throttling group. Must not exceed 16 characters. +-define(FDB_TR_OPTION_AUTO_THROTTLE_TAG, 801). + +%% Adds a parent to the Span of this transaction. Used for transaction tracing. A span can be identified with any 16 bytes +%% Parameter: (Bytes) A byte string of length 16 used to associate the span of this transaction with a parent +-define(FDB_TR_OPTION_SPAN_PARENT, 900). + +%% Asks storage servers for how many bytes a clear key range contains. Otherwise uses the location cache to roughly estimate this. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_EXPENSIVE_CLEAR_COST_ESTIMATION_ENABLE, 1000). + +%% Allows ``get`` operations to read from sections of keyspace that have become unreadable because of versionstamp operations. These reads will view versionstamp operations as if they were set operations that did not fill in the versionstamp. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_BYPASS_UNREADABLE, 1100). + +%% Allows this transaction to use cached GRV from the database context. Defaults to off. Upon first usage, starts a background updater to periodically update the cache to avoid stale read versions. +%% Parameter: Option takes no parameter +-define(FDB_TR_OPTION_USE_GRV_CACHE, 1101). + +%% Specifically instruct this transaction to NOT use cached GRV. Primarily used for the read version cache's background updater to avoid attempting to read a cached entry in specific situations. +%% Parameter: Option takes no parameter This is a hidden parameter and should not be used directly by applications. +-define(FDB_TR_OPTION_SKIP_GRV_CACHE, 1102). + +%% Client intends to consume the entire range and would like it all transferred as early as possible. +-define(FDB_STREAMING_MODE_WANT_ALL, -2). + +%% The default. The client doesn't know how much of the range it is likely to used and wants different performance concerns to be balanced. Only a small portion of data is transferred to the client initially (in order to minimize costs if the client doesn't read the entire range), and as the caller iterates over more items in the range larger batches will be transferred in order to minimize latency. After enough iterations, the iterator mode will eventually reach the same byte limit as ``WANT_ALL`` +-define(FDB_STREAMING_MODE_ITERATOR, -1). + +%% Infrequently used. The client has passed a specific row limit and wants that many rows delivered in a single batch. Because of iterator operation in client drivers make request batches transparent to the user, consider ``WANT_ALL`` StreamingMode instead. A row limit must be specified if this mode is used. +-define(FDB_STREAMING_MODE_EXACT, 0). + +%% Infrequently used. Transfer data in batches small enough to not be much more expensive than reading individual rows, to minimize cost if iteration stops early. +-define(FDB_STREAMING_MODE_SMALL, 1). + +%% Infrequently used. Transfer data in batches sized in between small and large. +-define(FDB_STREAMING_MODE_MEDIUM, 2). + +%% Infrequently used. Transfer data in batches large enough to be, in a high-concurrency environment, nearly as efficient as possible. If the client stops iteration early, some disk and network bandwidth may be wasted. The batch size may still be too small to allow a single client to get high throughput from the database, so if that is what you need consider the SERIAL StreamingMode. +-define(FDB_STREAMING_MODE_LARGE, 3). + +%% Transfer data in batches large enough that an individual client can get reasonable read bandwidth from the database. If the client stops iteration early, considerable disk and network bandwidth may be wasted. +-define(FDB_STREAMING_MODE_SERIAL, 4). + +%% Performs an addition of little-endian integers. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The integers to be added must be stored in a little-endian representation. They can be signed in two's complement representation or unsigned. You can add to an integer at a known offset in the value by prepending the appropriate number of zero bytes to ``param`` and padding with zero bytes to match the length of the value. However, this offset technique requires that you know the addition will not cause the integer field within the value to overflow. +-define(FDB_MUTATION_TYPE_ADD, 2). + +%% Deprecated +-define(FDB_MUTATION_TYPE_AND, 6). + +%% Performs a bitwise ``and`` operation. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. +-define(FDB_MUTATION_TYPE_BIT_AND, 6). + +%% Deprecated +-define(FDB_MUTATION_TYPE_OR, 7). + +%% Performs a bitwise ``or`` operation. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. +-define(FDB_MUTATION_TYPE_BIT_OR, 7). + +%% Deprecated +-define(FDB_MUTATION_TYPE_XOR, 8). + +%% Performs a bitwise ``xor`` operation. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. +-define(FDB_MUTATION_TYPE_BIT_XOR, 8). + +%% Appends ``param`` to the end of the existing value already in the database at the given key (or creates the key and sets the value to ``param`` if the key is empty). This will only append the value if the final concatenated value size is less than or equal to the maximum value size (i.e. if it fits). WARNING: No error is surfaced back to the user if the final value is too large because the mutation will not be applied until after the transaction has been committed. Therefore, it is only safe to use this mutation type if one can guarantee that one will keep the total value size under the maximum size. +-define(FDB_MUTATION_TYPE_APPEND_IF_FITS, 9). + +%% Performs a little-endian comparison of byte strings. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The larger of the two values is then stored in the database. +-define(FDB_MUTATION_TYPE_MAX, 12). + +%% Performs a little-endian comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The smaller of the two values is then stored in the database. +-define(FDB_MUTATION_TYPE_MIN, 13). + +%% Transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java, Python, and Go bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes. +-define(FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_KEY, 14). + +%% Transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java, Python, and Go bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset. +-define(FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE, 15). + +%% Performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the smaller of the two values is then stored in the database. +-define(FDB_MUTATION_TYPE_BYTE_MIN, 16). + +%% Performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the larger of the two values is then stored in the database. +-define(FDB_MUTATION_TYPE_BYTE_MAX, 17). + +%% Performs an atomic ``compare and clear`` operation. If the existing value in the database is equal to the given value, then given key is cleared. +-define(FDB_MUTATION_TYPE_COMPARE_AND_CLEAR, 20). + +%% Used to add a read conflict range +-define(FDB_CONFLICT_RANGE_TYPE_READ, 0). + +%% Used to add a write conflict range +-define(FDB_CONFLICT_RANGE_TYPE_WRITE, 1). + +%% Returns ``true`` if the error indicates the operations in the transactions should be retried because of transient error. +-define(FDB_ERROR_PREDICATE_RETRYABLE, 50000). + +%% Returns ``true`` if the error indicates the transaction may have succeeded, though not in a way the system can verify. +-define(FDB_ERROR_PREDICATE_MAYBE_COMMITTED, 50001). + +%% Returns ``true`` if the error indicates the transaction has not committed, though in a way that can be retried. +-define(FDB_ERROR_PREDICATE_RETRYABLE_NOT_COMMITTED, 50002). + +-endif. diff --git a/src/erlfdb_nif.erl b/src/erlfdb_nif.erl index 4e70303..1c2568f 100644 --- a/src/erlfdb_nif.erl +++ b/src/erlfdb_nif.erl @@ -62,7 +62,7 @@ error_predicate/2 ]). --define(DEFAULT_API_VERSION, 620). +-define(DEFAULT_API_VERSION, 710). -type error() :: {erlfdb_error, Code :: integer()}. -type future() :: {erlfdb_future, reference(), reference()}. @@ -87,10 +87,13 @@ local_address | cluster_file | trace_enable - | trace_format | trace_roll_size | trace_max_logs_size | trace_log_group + | trace_format + | trace_clock_source + | trace_file_identifier + | trace_partial_file_suffix | knob | tls_plugin | tls_cert_bytes @@ -110,10 +113,16 @@ | external_client_library | external_client_directory | disable_local_client + | client_threads_per_version + | retain_client_library_copies | disable_client_statistics_logging | enable_slow_task_profiling - % API version 630+ - | enable_run_loop_profiling. + | enable_run_loop_profiling + | buggify_enable + | buggify_disable + | buggify_section_activated_probability + | buggify_section_fired_probability + | distributed_client_tracer. -type database_option() :: location_cache_size @@ -485,12 +494,13 @@ select_api_version(Version) when is_integer(Version), Version > 0 -> -spec network_set_option(Option :: network_option(), Value :: option_value()) -> ok | error(). network_set_option(Name, Value) -> + Option = erlfdb_nif_option:to_network_option(Name), BinValue = case Value of B when is_binary(B) -> B; I when is_integer(I) -> <> end, - erlfdb_network_set_option(Name, BinValue). + erlfdb_network_set_option(Option, BinValue). % Sentinel Check erlfdb_can_initialize() -> ?NOT_LOADED. diff --git a/src/erlfdb_nif_option.erl b/src/erlfdb_nif_option.erl new file mode 100644 index 0000000..8a1b8e4 --- /dev/null +++ b/src/erlfdb_nif_option.erl @@ -0,0 +1,100 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +%% Semi-automatically generated + +-module(erlfdb_nif_option). + +-export([to_network_option/1]). + +-include("fdb_options.hrl"). + +to_network_option(local_address) -> + ?FDB_NET_OPTION_LOCAL_ADDRESS; +to_network_option(cluster_file) -> + ?FDB_NET_OPTION_CLUSTER_FILE; +to_network_option(trace_enable) -> + ?FDB_NET_OPTION_TRACE_ENABLE; +to_network_option(trace_roll_size) -> + ?FDB_NET_OPTION_TRACE_ROLL_SIZE; +to_network_option(trace_max_logs_size) -> + ?FDB_NET_OPTION_TRACE_MAX_LOGS_SIZE; +to_network_option(trace_log_group) -> + ?FDB_NET_OPTION_TRACE_LOG_GROUP; +to_network_option(trace_format) -> + ?FDB_NET_OPTION_TRACE_FORMAT; +to_network_option(trace_clock_source) -> + ?FDB_NET_OPTION_TRACE_CLOCK_SOURCE; +to_network_option(trace_file_identifier) -> + ?FDB_NET_OPTION_TRACE_FILE_IDENTIFIER; +to_network_option(trace_partial_file_suffix) -> + ?FDB_NET_OPTION_TRACE_PARTIAL_FILE_SUFFIX; +to_network_option(knob) -> + ?FDB_NET_OPTION_KNOB; +to_network_option(tls_plugin) -> + ?FDB_NET_OPTION_TLS_PLUGIN; +to_network_option(tls_cert_bytes) -> + ?FDB_NET_OPTION_TLS_CERT_BYTES; +to_network_option(tls_cert_path) -> + ?FDB_NET_OPTION_TLS_CERT_PATH; +to_network_option(tls_key_bytes) -> + ?FDB_NET_OPTION_TLS_KEY_BYTES; +to_network_option(tls_key_path) -> + ?FDB_NET_OPTION_TLS_KEY_PATH; +to_network_option(tls_verify_peers) -> + ?FDB_NET_OPTION_TLS_VERIFY_PEERS; +to_network_option(client_buggify_enable) -> + ?FDB_NET_OPTION_CLIENT_BUGGIFY_ENABLE; +to_network_option(client_buggify_disable) -> + ?FDB_NET_OPTION_CLIENT_BUGGIFY_DISABLE; +to_network_option(client_buggify_section_activated_probability) -> + ?FDB_NET_OPTION_CLIENT_BUGGIFY_SECTION_ACTIVATED_PROBABILITY; +to_network_option(client_buggify_section_fired_probability) -> + ?FDB_NET_OPTION_CLIENT_BUGGIFY_SECTION_FIRED_PROBABILITY; +to_network_option(tls_ca_bytes) -> + ?FDB_NET_OPTION_TLS_CA_BYTES; +to_network_option(tls_ca_path) -> + ?FDB_NET_OPTION_TLS_CA_PATH; +to_network_option(tls_password) -> + ?FDB_NET_OPTION_TLS_PASSWORD; +to_network_option(disable_multi_version_client_api) -> + ?FDB_NET_OPTION_DISABLE_MULTI_VERSION_CLIENT_API; +to_network_option(callbacks_on_external_threads) -> + ?FDB_NET_OPTION_CALLBACKS_ON_EXTERNAL_THREADS; +to_network_option(external_client_library) -> + ?FDB_NET_OPTION_EXTERNAL_CLIENT_LIBRARY; +to_network_option(external_client_directory) -> + ?FDB_NET_OPTION_EXTERNAL_CLIENT_DIRECTORY; +to_network_option(disable_local_client) -> + ?FDB_NET_OPTION_DISABLE_LOCAL_CLIENT; +to_network_option(client_threads_per_version) -> + ?FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION; +to_network_option(retain_client_library_copies) -> + ?FDB_NET_OPTION_RETAIN_CLIENT_LIBRARY_COPIES; +to_network_option(disable_client_statistics_logging) -> + ?FDB_NET_OPTION_DISABLE_CLIENT_STATISTICS_LOGGING; +to_network_option(enable_slow_task_profiling) -> + ?FDB_NET_OPTION_ENABLE_SLOW_TASK_PROFILING; +to_network_option(enable_run_loop_profiling) -> + ?FDB_NET_OPTION_ENABLE_RUN_LOOP_PROFILING; +to_network_option(buggify_enable) -> + ?FDB_NET_OPTION_BUGGIFY_ENABLE; +to_network_option(buggify_disable) -> + ?FDB_NET_OPTION_BUGGIFY_DISABLE; +to_network_option(buggify_section_activated_probability) -> + ?FDB_NET_OPTION_BUGGIFY_SECTION_ACTIVATED_PROBABILITY; +to_network_option(buggify_section_fired_probability) -> + ?FDB_NET_OPTION_BUGGIFY_SECTION_FIRED_PROBABILITY; +to_network_option(distributed_client_tracer) -> + ?FDB_NET_OPTION_DISTRIBUTED_CLIENT_TRACER; +to_network_option(_) -> + error(badarg).