diff --git a/poetry.lock b/poetry.lock index bae008dc..f6fe451d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1193,7 +1193,7 @@ hiredis = ["hiredis (>=0.1.3)"] [[package]] name = "redisbench-admin" -version = "0.6.12" +version = "0.6.21" description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )." category = "main" optional = false @@ -2419,8 +2419,8 @@ redis-py-cluster = [ {file = "redis_py_cluster-2.1.3-py2.py3-none-any.whl", hash = "sha256:38f08850fde469ffd76bced7309721114acc487e52b76f374a0502c34c69b4ec"}, ] redisbench-admin = [ - {file = "redisbench-admin-0.6.12.tar.gz", hash = "sha256:f801fc39accad0b9a53cda64d3986173004eabefe05c27a74651d0dfab409811"}, - {file = "redisbench_admin-0.6.12-py3-none-any.whl", hash = "sha256:ab1aff4f19c3dbbf07fb17fbfeab742941611aedf77fb78394a0786cefe1220d"}, + {file = "redisbench-admin-0.6.21.tar.gz", hash = "sha256:1e356555ff3aacff658acc1cb46aa6b3031fed7d48dedc60d91e2c4d1fd44950"}, + {file = "redisbench_admin-0.6.21-py3-none-any.whl", hash = "sha256:dce08e800beb1f23da859ecb9ed33db5edd31cfcd8a387eb9489345d741f4f9d"}, ] redistimeseries = [ {file = "redistimeseries-1.4.3-py2-none-any.whl", hash = "sha256:f6a828acbdf440ca66caf8ac0193ed122bd5a2e0e936bb9989ccabefb3e5b509"}, diff --git a/pyproject.toml b/pyproject.toml index 6aa4e4a4..4d414c93 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "redis-benchmarks-specification" -version = "0.1.20" +version = "0.1.21" description = "The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute." authors = ["filipecosta90 ","Redis Performance Group "] readme = "Readme.md" @@ -16,7 +16,7 @@ argparse = "^1.4.0" Flask-HTTPAuth = "^4.4.0" PyYAML = "^5.4.1" docker = "^5.0.0" -redisbench-admin = "^0.6.12" +redisbench-admin = "^0.6.21" #redisbench-admin = {path = "../redisbench-admin", develop = true} psutil = "^5.8.0" tox-docker = "^3.1.0" diff --git a/redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py b/redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py index 3153569d..607cc11b 100644 --- a/redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +++ b/redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py @@ -18,11 +18,13 @@ get_start_time_vars, prepare_benchmark_parameters, ) +from redisbench_admin.utils.benchmark_config import ( + get_final_benchmark_config, +) from redisbench_admin.run.redistimeseries import timeseries_test_sucess_flow from redisbench_admin.run.run import calculate_client_tool_duration_and_check from redisbench_admin.utils.benchmark_config import ( extract_redis_dbconfig_parameters, - get_final_benchmark_config, ) from redisbench_admin.utils.local import get_local_run_full_filename from redisbench_admin.utils.results import post_process_benchmark_results @@ -311,318 +313,347 @@ def process_self_contained_coordinator_stream( topologies_map, running_platform, ): - stream_id, testDetails = newTestInfo[0][1][0] - stream_id = stream_id.decode() - logging.info("Received work . Stream id {}.".format(stream_id)) + stream_id = "n/a" overall_result = False total_test_suite_runs = 0 - - if b"git_hash" in testDetails: - ( - build_variant_name, - metadata, - build_artifacts, - git_hash, - git_branch, - git_version, - run_image, - use_git_timestamp, - git_timestamp_ms, - ) = extract_build_info_from_streamdata(testDetails) - - overall_result = True - for test_file in testsuite_spec_files: - redis_containers = [] - client_containers = [] - - with open(test_file, "r") as stream: - benchmark_config, test_name = get_final_benchmark_config( - None, stream, "" - ) - - ( - _, - _, - redis_configuration_parameters, - _, - ) = extract_redis_dbconfig_parameters(benchmark_config, "dbconfig") - build_variants = extract_build_variant_variations(benchmark_config) - if build_variants is not None: - logging.info("Detected build variant filter") - if build_variant_name not in build_variants: + try: + stream_id, testDetails = newTestInfo[0][1][0] + stream_id = stream_id.decode() + logging.info("Received work . Stream id {}.".format(stream_id)) + + if b"git_hash" in testDetails: + ( + build_variant_name, + metadata, + build_artifacts, + git_hash, + git_branch, + git_version, + run_image, + use_git_timestamp, + git_timestamp_ms, + ) = extract_build_info_from_streamdata(testDetails) + + overall_result = True + for test_file in testsuite_spec_files: + redis_containers = [] + client_containers = [] + + with open(test_file, "r") as stream: + result, benchmark_config, test_name = get_final_benchmark_config( + None, stream, "" + ) + if result is False: logging.error( - "Skipping {} given it's not part of build-variants for this test-suite {}".format( - build_variant_name, build_variants + "Skipping {} given there were errors while calling get_final_benchmark_config()".format( + test_file ) ) continue - else: - logging.error( - "Running build variant {} given it's present on the build-variants spec {}".format( - build_variant_name, build_variants + ( + _, + _, + redis_configuration_parameters, + _, + _, + ) = extract_redis_dbconfig_parameters(benchmark_config, "dbconfig") + build_variants = extract_build_variant_variations(benchmark_config) + if build_variants is not None: + logging.info("Detected build variant filter") + if build_variant_name not in build_variants: + logging.error( + "Skipping {} given it's not part of build-variants for this test-suite {}".format( + build_variant_name, build_variants + ) ) - ) - for topology_spec_name in benchmark_config["redis-topologies"]: - test_result = False - try: - current_cpu_pos = 0 - ceil_db_cpu_limit = extract_db_cpu_limit( - topologies_map, topology_spec_name - ) - temporary_dir = tempfile.mkdtemp(dir=home) - temporary_dir_client = tempfile.mkdtemp(dir=home) - logging.info( - "Using local temporary dir to persist redis build artifacts. Path: {}".format( - temporary_dir + continue + else: + logging.error( + "Running build variant {} given it's present on the build-variants spec {}".format( + build_variant_name, build_variants + ) ) - ) - tf_github_org = "redis" - tf_github_repo = "redis" - tf_triggering_env = "ci" - - restore_build_artifacts_from_test_details( - build_artifacts, conn, temporary_dir, testDetails - ) - port = 6379 - mnt_point = "/mnt/redis/" - command = generate_standalone_redis_server_args( - "{}redis-server".format(mnt_point), - port, - mnt_point, - redis_configuration_parameters, - ) - command_str = " ".join(command) - db_cpuset_cpus, current_cpu_pos = generate_cpuset_cpus( - ceil_db_cpu_limit, current_cpu_pos - ) - logging.info( - "Running redis-server on docker image {} (cpuset={}) with the following args: {}".format( - run_image, db_cpuset_cpus, command_str + for topology_spec_name in benchmark_config["redis-topologies"]: + test_result = False + try: + current_cpu_pos = 0 + ceil_db_cpu_limit = extract_db_cpu_limit( + topologies_map, topology_spec_name ) - ) - container = docker_client.containers.run( - image=run_image, - volumes={ - temporary_dir: {"bind": mnt_point, "mode": "rw"}, - }, - auto_remove=True, - privileged=True, - working_dir=mnt_point, - command=command_str, - network_mode="host", - detach=True, - cpuset_cpus=db_cpuset_cpus, - ) - redis_containers.append(container) - r = redis.StrictRedis(port=6379) - r.ping() - ceil_client_cpu_limit = extract_client_cpu_limit( - benchmark_config - ) - client_cpuset_cpus, current_cpu_pos = generate_cpuset_cpus( - ceil_client_cpu_limit, current_cpu_pos - ) - client_mnt_point = "/mnt/client/" - benchmark_tool_workdir = client_mnt_point + temporary_dir = tempfile.mkdtemp(dir=home) + temporary_dir_client = tempfile.mkdtemp(dir=home) + logging.info( + "Using local temporary dir to persist redis build artifacts. Path: {}".format( + temporary_dir + ) + ) + tf_github_org = "redis" + tf_github_repo = "redis" + tf_triggering_env = "ci" - if "preload_tool" in benchmark_config["dbconfig"]: - data_prepopulation_step( - benchmark_config, - benchmark_tool_workdir, - client_cpuset_cpus, - docker_client, - git_hash, + restore_build_artifacts_from_test_details( + build_artifacts, conn, temporary_dir, testDetails + ) + port = 6379 + mnt_point = "/mnt/redis/" + command = generate_standalone_redis_server_args( + "{}redis-server".format(mnt_point), port, - temporary_dir, - test_name, + mnt_point, + redis_configuration_parameters, + ) + command_str = " ".join(command) + db_cpuset_cpus, current_cpu_pos = generate_cpuset_cpus( + ceil_db_cpu_limit, current_cpu_pos ) + logging.info( + "Running redis-server on docker image {} (cpuset={}) with the following args: {}".format( + run_image, db_cpuset_cpus, command_str + ) + ) + container = docker_client.containers.run( + image=run_image, + volumes={ + temporary_dir: {"bind": mnt_point, "mode": "rw"}, + }, + auto_remove=True, + privileged=True, + working_dir=mnt_point, + command=command_str, + network_mode="host", + detach=True, + cpuset_cpus=db_cpuset_cpus, + ) + redis_containers.append(container) + r = redis.StrictRedis(port=6379) + r.ping() + ceil_client_cpu_limit = extract_client_cpu_limit( + benchmark_config + ) + client_cpuset_cpus, current_cpu_pos = generate_cpuset_cpus( + ceil_client_cpu_limit, current_cpu_pos + ) + client_mnt_point = "/mnt/client/" + benchmark_tool_workdir = client_mnt_point + + if "preload_tool" in benchmark_config["dbconfig"]: + data_prepopulation_step( + benchmark_config, + benchmark_tool_workdir, + client_cpuset_cpus, + docker_client, + git_hash, + port, + temporary_dir, + test_name, + ) - benchmark_tool = extract_client_tool(benchmark_config) - # backwards compatible - if benchmark_tool is None: - benchmark_tool = "redis-benchmark" - full_benchmark_path = "/usr/local/bin/{}".format(benchmark_tool) - - # setup the benchmark - ( - start_time, - start_time_ms, - start_time_str, - ) = get_start_time_vars() - local_benchmark_output_filename = get_local_run_full_filename( - start_time_str, - git_hash, - test_name, - "oss-standalone", - ) - logging.info( - "Will store benchmark json output to local file {}".format( - local_benchmark_output_filename + benchmark_tool = extract_client_tool(benchmark_config) + # backwards compatible + if benchmark_tool is None: + benchmark_tool = "redis-benchmark" + full_benchmark_path = "/usr/local/bin/{}".format( + benchmark_tool ) - ) - if "memtier_benchmark" not in benchmark_tool: - # prepare the benchmark command + + # setup the benchmark ( - benchmark_command, - benchmark_command_str, - ) = prepare_benchmark_parameters( - benchmark_config, - full_benchmark_path, - port, - "localhost", - local_benchmark_output_filename, - False, - benchmark_tool_workdir, - False, + start_time, + start_time_ms, + start_time_str, + ) = get_start_time_vars() + local_benchmark_output_filename = ( + get_local_run_full_filename( + start_time_str, + git_hash, + test_name, + "oss-standalone", + ) ) - else: - ( - _, - benchmark_command_str, - ) = prepare_memtier_benchmark_parameters( - benchmark_config["clientconfig"], - full_benchmark_path, - port, - "localhost", - local_benchmark_output_filename, - benchmark_tool_workdir, + logging.info( + "Will store benchmark json output to local file {}".format( + local_benchmark_output_filename + ) ) + if "memtier_benchmark" not in benchmark_tool: + # prepare the benchmark command + ( + benchmark_command, + benchmark_command_str, + ) = prepare_benchmark_parameters( + benchmark_config, + full_benchmark_path, + port, + "localhost", + local_benchmark_output_filename, + False, + benchmark_tool_workdir, + False, + ) + else: + ( + _, + benchmark_command_str, + ) = prepare_memtier_benchmark_parameters( + benchmark_config["clientconfig"], + full_benchmark_path, + port, + "localhost", + local_benchmark_output_filename, + benchmark_tool_workdir, + ) - client_container_image = extract_client_container_image( - benchmark_config - ) - logging.info( - "Using docker image {} as benchmark client image (cpuset={}) with the following args: {}".format( - client_container_image, - client_cpuset_cpus, - benchmark_command_str, + client_container_image = extract_client_container_image( + benchmark_config ) - ) - # run the benchmark - benchmark_start_time = datetime.datetime.now() - - client_container_stdout = docker_client.containers.run( - image=client_container_image, - volumes={ - temporary_dir_client: { - "bind": client_mnt_point, - "mode": "rw", + logging.info( + "Using docker image {} as benchmark client image (cpuset={}) with the following args: {}".format( + client_container_image, + client_cpuset_cpus, + benchmark_command_str, + ) + ) + # run the benchmark + benchmark_start_time = datetime.datetime.now() + + client_container_stdout = docker_client.containers.run( + image=client_container_image, + volumes={ + temporary_dir_client: { + "bind": client_mnt_point, + "mode": "rw", + }, }, - }, - auto_remove=False, - privileged=True, - working_dir=benchmark_tool_workdir, - command=benchmark_command_str, - network_mode="host", - detach=False, - cpuset_cpus=client_cpuset_cpus, - ) + auto_remove=False, + privileged=True, + working_dir=benchmark_tool_workdir, + command=benchmark_command_str, + network_mode="host", + detach=False, + cpuset_cpus=client_cpuset_cpus, + ) - benchmark_end_time = datetime.datetime.now() - benchmark_duration_seconds = ( - calculate_client_tool_duration_and_check( - benchmark_end_time, benchmark_start_time + benchmark_end_time = datetime.datetime.now() + benchmark_duration_seconds = ( + calculate_client_tool_duration_and_check( + benchmark_end_time, benchmark_start_time + ) ) - ) - logging.info("output {}".format(client_container_stdout)) - r.shutdown(save=False) - datapoint_time_ms = start_time_ms - if use_git_timestamp is True and git_timestamp_ms is not None: - datapoint_time_ms = git_timestamp_ms - post_process_benchmark_results( - benchmark_tool, - local_benchmark_output_filename, - datapoint_time_ms, - start_time_str, - client_container_stdout, - None, - ) - full_result_path = local_benchmark_output_filename - if "memtier_benchmark" in benchmark_tool: - full_result_path = "{}/{}".format( - temporary_dir_client, local_benchmark_output_filename + logging.info("output {}".format(client_container_stdout)) + r.shutdown(save=False) + datapoint_time_ms = start_time_ms + if ( + use_git_timestamp is True + and git_timestamp_ms is not None + ): + datapoint_time_ms = git_timestamp_ms + post_process_benchmark_results( + benchmark_tool, + local_benchmark_output_filename, + datapoint_time_ms, + start_time_str, + client_container_stdout, + None, + ) + full_result_path = local_benchmark_output_filename + if "memtier_benchmark" in benchmark_tool: + full_result_path = "{}/{}".format( + temporary_dir_client, + local_benchmark_output_filename, + ) + logging.critical( + "Reading results json from {}".format(full_result_path) ) - logging.critical( - "Reading results json from {}".format(full_result_path) - ) - - with open( - full_result_path, - "r", - ) as json_file: - results_dict = json.load(json_file) - logging.info("Final JSON result {}".format(results_dict)) - dataset_load_duration_seconds = 0 - logging.error( - "Using datapoint_time_ms: {}".format(datapoint_time_ms) - ) + with open( + full_result_path, + "r", + ) as json_file: + results_dict = json.load(json_file) + logging.info( + "Final JSON result {}".format(results_dict) + ) + dataset_load_duration_seconds = 0 - timeseries_test_sucess_flow( - datasink_push_results_redistimeseries, - git_version, - benchmark_config, - benchmark_duration_seconds, - dataset_load_duration_seconds, - None, - topology_spec_name, - "oss-standalone", - None, - results_dict, - rts, - datapoint_time_ms, - test_name, - git_branch, - tf_github_org, - tf_github_repo, - tf_triggering_env, - metadata, - build_variant_name, - running_platform, - ) - test_result = True - total_test_suite_runs = total_test_suite_runs + 1 + logging.error( + "Using datapoint_time_ms: {}".format(datapoint_time_ms) + ) - except: - logging.critical( - "Some unexpected exception was caught " - "during local work. Failing test...." - ) - logging.critical(sys.exc_info()[0]) - print("-" * 60) - traceback.print_exc(file=sys.stdout) - print("-" * 60) - test_result = False - # tear-down - logging.info("Tearing down setup") - for container in redis_containers: - try: - container.stop() - except docker.errors.NotFound: - logging.info( - "When trying to stop DB container with id {} and image {} it was already stopped".format( - container.id, container.image - ) + timeseries_test_sucess_flow( + datasink_push_results_redistimeseries, + git_version, + benchmark_config, + benchmark_duration_seconds, + dataset_load_duration_seconds, + None, + topology_spec_name, + "oss-standalone", + None, + results_dict, + rts, + datapoint_time_ms, + test_name, + git_branch, + tf_github_org, + tf_github_repo, + tf_triggering_env, + metadata, + build_variant_name, + running_platform, ) - pass + test_result = True + total_test_suite_runs = total_test_suite_runs + 1 - for container in client_containers: - if type(container) == Container: + except: + logging.critical( + "Some unexpected exception was caught " + "during local work. Failing test...." + ) + logging.critical(sys.exc_info()[0]) + print("-" * 60) + traceback.print_exc(file=sys.stdout) + print("-" * 60) + test_result = False + # tear-down + logging.info("Tearing down setup") + for container in redis_containers: try: container.stop() except docker.errors.NotFound: logging.info( - "When trying to stop Client container with id {} and image {} it was already stopped".format( + "When trying to stop DB container with id {} and image {} it was already stopped".format( container.id, container.image ) ) pass - shutil.rmtree(temporary_dir, ignore_errors=True) - overall_result &= test_result + for container in client_containers: + if type(container) == Container: + try: + container.stop() + except docker.errors.NotFound: + logging.info( + "When trying to stop Client container with id {} and image {} it was already stopped".format( + container.id, container.image + ) + ) + pass + shutil.rmtree(temporary_dir, ignore_errors=True) - else: - logging.error("Missing commit information within received message.") + overall_result &= test_result + + else: + logging.error("Missing commit information within received message.") + except: + logging.critical( + "Some unexpected exception was caught " + "during local work on stream {}. Failing test....".format(stream_id) + ) + logging.critical(sys.exc_info()[0]) + print("-" * 60) + traceback.print_exc(file=sys.stdout) + print("-" * 60) + overall_result = False return stream_id, overall_result, total_test_suite_runs diff --git a/utils/tests/test_commands.py b/utils/tests/test_commands.py deleted file mode 100644 index d53914db..00000000 --- a/utils/tests/test_commands.py +++ /dev/null @@ -1,12 +0,0 @@ -import json - -from redis_benchmarks_specification.commands.commands import generate_command_groups - - -def test_generate_command_groups(): - with open( - "./redis_benchmarks_specification/setups/topologies/topologies.yml", "r" - ) as json_fd: - commands_json = json.load(json_fd) - command_groups = generate_command_groups(commands_json) - assert "server" in command_groups.keys() diff --git a/utils/tests/test_data/dump.rdb b/utils/tests/test_data/dump.rdb index a51ac607..2c8e78d9 100644 Binary files a/utils/tests/test_data/dump.rdb and b/utils/tests/test_data/dump.rdb differ