From be027678055a849300c019c5c1910131ae49cb89 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Fri, 26 Sep 2025 16:17:14 -0700 Subject: [PATCH 01/18] Checkpoint --- doc/performance-test-requirements.md | 79 +++ sdk/core/azure_core_test/Cargo.toml | 1 + sdk/core/azure_core_test/src/lib.rs | 5 +- sdk/core/azure_core_test/src/perf/README.md | 1 + sdk/core/azure_core_test/src/perf/mod.rs | 235 +++++++++ sdk/core/azure_core_test/src/perf/tests.rs | 548 ++++++++++++++++++++ 6 files changed, 868 insertions(+), 1 deletion(-) create mode 100644 doc/performance-test-requirements.md create mode 100644 sdk/core/azure_core_test/src/perf/README.md create mode 100644 sdk/core/azure_core_test/src/perf/mod.rs create mode 100644 sdk/core/azure_core_test/src/perf/tests.rs diff --git a/doc/performance-test-requirements.md b/doc/performance-test-requirements.md new file mode 100644 index 0000000000..6b0a3dd38b --- /dev/null +++ b/doc/performance-test-requirements.md @@ -0,0 +1,79 @@ +# Requirements for performance tests + +Each performance test consists of three phases: + +1) Warmup +1) Test operation +1) Cleanup + +## Common test inputs + +* Duration of the test in seconds +* Number of iterations of the main test loop +* Parallel - number of operations to execute in parallel +* Disable test cleanup +* Test Proxy servers. +* Results file - location to write test outputs +* Warmup - Duration of the warmup in seconds. +* TLS + * Allow untrusted TLS certificates +* Advanced options + * Print job statistics (?) + * Track latency and print per-operation latency statistics + * Target throughput (operations/second) (?) +* Language specific options + * Max I/O completion threads + * Minimum number of asynchronous I/O threads in the thread pool + * Minimum number of worker threads the thread pool creates on demand + * Sync - run a synchronous version of the test + +## Expected test outputs + +Each test is expected to generate the following elements: + +* Package Versions - a set of packages tested and their versions. +* Operations per second - Double precision float +* Standard Output of the test +* Standard Error of the test +* Exception - Text of any exceptions thrown during the test. +* Average CPU Use during the test - Double precision float. +* Average memory use during the test - Double precision float. + +## Perf Test Harness + +Each performance test defines a `get_metadata()` function which returns a `TestMetadata` structure. + +A `TestMetadata` structure contains the following fields + +```rust +pub struct TestMetadata { + name: &'static str + description: &'static str + options: &'static[&'static TestOption] +} +``` + +A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. + +```rust +pub struct TestOption { + /// The name of the test option. This is used as the key in the `TestArguments` map. + name: &'static str, + + long_activator: &str, + + short_activator:&str, + + /// Display message - displayed in the --help message. + display_message: &[str], + + /// Expected argument count + expected_args_len: u16, + + /// Required + mandatory: bool, + + /// Argument value is sensitive and should be sanitized. + sensitive: bool, +} +``` diff --git a/sdk/core/azure_core_test/Cargo.toml b/sdk/core/azure_core_test/Cargo.toml index 13dc3eb703..34c6cdf22b 100644 --- a/sdk/core/azure_core_test/Cargo.toml +++ b/sdk/core/azure_core_test/Cargo.toml @@ -23,6 +23,7 @@ async-trait.workspace = true azure_core = { workspace = true, features = ["test"] } azure_core_test_macros.workspace = true azure_identity.workspace = true +clap.workspace = true dotenvy = "0.15.7" futures.workspace = true rand.workspace = true diff --git a/sdk/core/azure_core_test/src/lib.rs b/sdk/core/azure_core_test/src/lib.rs index 76a936ca32..19a2cd23d1 100644 --- a/sdk/core/azure_core_test/src/lib.rs +++ b/sdk/core/azure_core_test/src/lib.rs @@ -7,6 +7,7 @@ pub mod credentials; #[cfg(doctest)] mod docs; pub mod http; +pub mod perf; pub mod proxy; pub mod recorded; mod recording; @@ -14,7 +15,7 @@ mod recording; mod root_readme; pub mod stream; pub mod tracing; - +use crate::perf::PerfRunner; use azure_core::Error; pub use azure_core::{error::ErrorKind, test::TestMode}; pub use proxy::{matchers::*, sanitizers::*}; @@ -36,6 +37,7 @@ pub struct TestContext { module_name: &'static str, name: &'static str, recording: Option, + _performance: Option, } impl TestContext { @@ -59,6 +61,7 @@ impl TestContext { module_name: test_module, name, recording: None, + _performance: None, }) } diff --git a/sdk/core/azure_core_test/src/perf/README.md b/sdk/core/azure_core_test/src/perf/README.md new file mode 100644 index 0000000000..09b20269d9 --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/README.md @@ -0,0 +1 @@ +# Performance Tests diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs new file mode 100644 index 0000000000..f3ca1c6a67 --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -0,0 +1,235 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#![doc = include_str!("README.md")] + +use clap::{parser::MatchesError, ArgMatches}; +use std::any::Any; + +/// Metadata about a performance test. +#[derive(Debug, Clone)] +pub struct TestMetadata { + /// The name of the test. + pub name: &'static str, + /// A brief description of the test. + pub description: &'static str, + /// The set of test options supported by this test. + pub options: &'static [&'static TestOption], +} + +/// #A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. +#[derive(Debug, Default)] +pub struct TestOption { + /// The name of the test option. This is used as the key in the `TestArguments` map. + pub name: &'static str, + + /// The short form activator for this argument e.g., `-t`. Does not include the hyphen. + pub short_activator: char, + + /// The long form activator for this argument e.g., `--test-option`. Does not include the hyphens. + pub long_activator: &'static str, + + /// Display message - displayed in the --help message. + pub display_message: &'static str, + + /// Expected argument count + pub expected_args_len: usize, + + /// Required + pub mandatory: bool, + + /// Argument value is sensitive and should be sanitized. + pub sensitive: bool, +} + +#[derive(Debug)] +#[allow(dead_code)] +struct PerfRunnerOptions { + no_cleanup: bool, + iterations: u32, + parallel: u32, + test: Option, + duration: u32, + warmup: u32, + test_results_filename: String, +} + +impl PerfRunnerOptions {} + +impl From<&ArgMatches> for PerfRunnerOptions { + fn from(matches: &ArgMatches) -> Self { + Self { + no_cleanup: matches.get_flag("no-cleanup"), + iterations: *matches + .get_one::("iterations") + .expect("defaulted by clap"), + parallel: *matches + .get_one::("parallel") + .expect("defaulted by clap"), + test: matches.get_one::("test").cloned(), + duration: *matches + .get_one::("duration") + .expect("defaulted by clap"), + warmup: *matches.get_one::("warmup").expect("defaulted by clap"), + test_results_filename: matches + .get_one::("test-results") + .expect("defaulted by clap") + .to_string(), + } + } +} + +/// Context information required by performance tests. +#[derive(Debug)] +pub struct PerfRunner { + options: PerfRunnerOptions, + arguments: ArgMatches, +} + +impl PerfRunner { + pub fn new(tests: Vec) -> azure_core::Result { + let command = Self::get_command_from_metadata(tests); + let arguments = command.get_matches(); + Ok(Self { + options: PerfRunnerOptions::from(&arguments), + arguments, + }) + } + + #[cfg(test)] + pub fn with_command_line( + tests: Vec, + args: Vec<&str>, + ) -> azure_core::Result { + let command = Self::get_command_from_metadata(tests); + let arguments = command.try_get_matches_from(args).map_err(|e| { + azure_core::error::Error::with_error( + azure_core::error::ErrorKind::Other, + e, + "Failed to parse command line arguments.", + ) + })?; + Ok(Self { + options: PerfRunnerOptions::from(&arguments), + arguments, + }) + } + + /// Gets a reference to a typed argument by its id. + pub fn try_get_one(&self, id: &str) -> Result, MatchesError> + where + T: Any + Clone + Send + Sync + 'static, + { + self.arguments.try_get_one::(id) + } + + pub fn try_get_one_subcommand( + &self, + subcommand: &str, + id: &str, + ) -> Result, MatchesError> + where + T: Any + Clone + Send + Sync + 'static, + { + let subcommand = self.arguments.subcommand_matches(subcommand); + if let Some(subcommand) = subcommand { + subcommand.try_get_one::(id) + } else { + Ok(None) + } + } + + #[allow(dead_code)] + async fn run_test(&self, test: F) -> azure_core::Result<()> + where + F: Fn(u32, u32) -> Fut, + Fut: std::future::Future>, + { + test(self.options.iterations, self.options.parallel).await + } + + // * Disable test cleanup + // * Test Proxy servers. + // * TLS + // * Allow untrusted TLS certificates + // * Advanced options + // * Print job statistics (?) + // * Track latency and print per-operation latency statistics + // * Target throughput (operations/second) (?) + // * Language specific options + // * Max I/O completion threads + // * Minimum number of asynchronous I/O threads in the thread pool + // * Minimum number of worker threads the thread pool creates on demand + // * Sync - run a synchronous version of the test + + /// Constructs a `clap::Command` from the provided test metadata. + fn get_command_from_metadata(tests: Vec) -> clap::Command { + let mut command = clap::Command::new("perf-tests") + .about("Run performance tests for the Azure SDK for Rust") + .arg( + clap::arg!(--iterations "The number of iterations to run each test") + .required(false) + .default_value("1") + .value_parser(clap::value_parser!(u32)) + .global(true), + ) + .arg( + clap::arg!(--parallel "The number of concurrent tasks to use when running each test") + .required(false) + .default_value("1") + .value_parser(clap::value_parser!(u32)) + .global(true), + ) + .arg( + clap::arg!(--test "The name of the test to run. If not specified, all tests will be run.") + .required(false) + .global(true), + ) + .arg( + clap::arg!(--duration "The duration of each test in seconds") + .required(false) + .default_value("30") + .value_parser(clap::value_parser!(u32)) + .global(true), + ) + .arg( + clap::arg!(--warmup "The duration of the warmup period in seconds") + .required(false) + .default_value("5") + .value_parser(clap::value_parser!(u32)) + .global(true), + ).arg( + clap::arg!(--"test-results" "The file to write test results to") + .required(false) + .default_value("./tests/results.json") + .global(true), + ) + .arg(clap::arg!(--"no-cleanup" "Disable test cleanup") + .required(false).global(true)) + ; + for test in &tests { + let mut subcommand = clap::Command::new(test.name).about(test.description); + for option in test.options { + let mut arg = clap::Arg::new(option.name) + .help(option.display_message) + .long(option.long_activator) + .num_args(option.expected_args_len..=option.expected_args_len) + .required(option.mandatory) + .global(false); + if option.short_activator != '\0' { + arg = arg.short(option.short_activator); + } + if option.sensitive { + arg = arg.hide(true); + } + subcommand = subcommand.arg(arg); + } + command = command.subcommand(subcommand); + } + + command + } +} + +#[cfg(test)] +mod tests; diff --git a/sdk/core/azure_core_test/src/perf/tests.rs b/sdk/core/azure_core_test/src/perf/tests.rs new file mode 100644 index 0000000000..7312eaf691 --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/tests.rs @@ -0,0 +1,548 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +use super::*; +use std::error::Error; + +// Helper function to create a basic test metadata for testing +fn create_basic_test_metadata() -> TestMetadata { + TestMetadata { + name: "basic_test", + description: "A basic test for testing purposes", + options: &[&TestOption { + name: "test-option", + short_activator: 't', + long_activator: "test-option", + display_message: "Test option for basic test", + expected_args_len: 1, + mandatory: false, + sensitive: false, + }], + } +} + +// Helper function to create test metadata with multiple options +fn create_complex_test_metadata() -> TestMetadata { + TestMetadata { + name: "complex_test", + description: "A complex test with multiple options", + options: &[ + &TestOption { + name: "mandatory-option", + short_activator: 'm', + long_activator: "mandatory", + display_message: "Mandatory option", + expected_args_len: 1, + mandatory: true, + sensitive: false, + }, + &TestOption { + name: "sensitive-option", + short_activator: 's', + long_activator: "sensitive", + display_message: "Sensitive option", + expected_args_len: 1, + mandatory: false, + sensitive: true, + }, + &TestOption { + name: "flag-option", + short_activator: 'f', + long_activator: "flag", + display_message: "Flag option", + expected_args_len: 0, + mandatory: false, + sensitive: false, + }, + ], + } +} + +// Helper function to create test metadata without short activators +fn create_no_short_activator_test_metadata() -> TestMetadata { + TestMetadata { + name: "no_short_test", + description: "Test without short activators", + options: &[&TestOption { + name: "long-only", + short_activator: '\0', + long_activator: "long-only", + display_message: "Long activator only", + expected_args_len: 1, + mandatory: false, + sensitive: false, + }], + } +} + +#[test] +fn test_perf_runner_new_with_empty_tests() { + let tests = vec![]; + let result = PerfRunner::with_command_line(tests, vec!["perf-tests"]); + + assert!( + result.is_ok(), + "PerfRunner::new should succeed with empty tests" + ); + let runner = result.unwrap(); + + // Test default values + assert_eq!(runner.options.iterations, 1); + assert_eq!(runner.options.parallel, 1); + assert_eq!(runner.options.duration, 30); + assert_eq!(runner.options.warmup, 5); + assert_eq!(runner.options.test_results_filename, "./tests/results.json"); + assert!(!runner.options.no_cleanup); + assert!(runner.options.test.is_none()); +} + +#[test] +fn test_perf_runner_new_with_single_test() { + let tests = vec![create_basic_test_metadata()]; + let result = PerfRunner::with_command_line(tests, vec!["perf-tests"]); + + assert!( + result.is_ok(), + "PerfRunner::new should succeed with single test" + ); + let runner = result.unwrap(); + + // Verify default values are set + assert_eq!(runner.options.iterations, 1); + assert_eq!(runner.options.parallel, 1); + assert_eq!(runner.options.duration, 30); + assert_eq!(runner.options.warmup, 5); +} + +#[test] +fn test_perf_runner_new_with_multiple_tests() { + let tests = vec![ + create_basic_test_metadata(), + create_complex_test_metadata(), + create_no_short_activator_test_metadata(), + ]; + let result = PerfRunner::with_command_line(tests, vec!["perf-tests"]); + + assert!( + result.is_ok(), + "PerfRunner::new should succeed with multiple tests" + ); + let _runner = result.unwrap(); +} + +#[test] +fn test_perf_runner_with_command_line_default_args() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with default args" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.iterations, 1); + assert_eq!(runner.options.parallel, 1); + assert_eq!(runner.options.duration, 30); + assert_eq!(runner.options.warmup, 5); + assert!(!runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_with_command_line_custom_iterations() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "10"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom iterations" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.iterations, 10); +} + +#[test] +fn test_perf_runner_with_command_line_custom_parallel() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--parallel", "5"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom parallel" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.parallel, 5); +} + +#[test] +fn test_perf_runner_with_command_line_custom_duration() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--duration", "60"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom duration" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.duration, 60); +} + +#[test] +fn test_perf_runner_with_command_line_custom_warmup() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--warmup", "10"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom warmup" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.warmup, 10); +} + +#[test] +fn test_perf_runner_with_command_line_test_results_file() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--test-results", "/tmp/results.json"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom test results file" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.test_results_filename, "/tmp/results.json"); +} + +#[test] +fn test_perf_runner_with_command_line_no_cleanup() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--no-cleanup"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with no-cleanup flag" + ); + + let runner = result.unwrap(); + assert!(runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_with_command_line_test_name() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--test", "my_test"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with test name" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.test.as_ref().unwrap(), "my_test"); +} + +#[test] +fn test_perf_runner_with_command_line_all_options() { + let tests = vec![create_basic_test_metadata()]; + let args = vec![ + "perf-tests", + "--iterations", + "20", + "--parallel", + "8", + "--duration", + "120", + "--warmup", + "15", + "--test-results", + "/custom/results.json", + "--test", + "specific_test", + "--no-cleanup", + ]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with all options" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.iterations, 20); + assert_eq!(runner.options.parallel, 8); + assert_eq!(runner.options.duration, 120); + assert_eq!(runner.options.warmup, 15); + assert_eq!(runner.options.test_results_filename, "/custom/results.json"); + assert_eq!(runner.options.test.as_ref().unwrap(), "specific_test"); + assert!(runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_command_line_help() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--help"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_err(), + "PerfRunner::with_command_line should fail with help flag" + ); + + println!("{}", result.as_ref().err().unwrap().source().unwrap()); + + let error = result.err().unwrap(); + assert_eq!(error.kind(), &azure_core::error::ErrorKind::Other); + assert!(error.to_string().contains("Failed to parse")); +} + +#[test] +fn test_perf_runner_with_subcommand() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "basic_test", "--test-option", "value"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with subcommand" + ); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner + .try_get_one_subcommand("basic_test", "test-option") + .ok() + .flatten(); + assert!(option_value.is_some()); + assert_eq!(option_value.unwrap(), "value"); +} + +#[test] +fn test_perf_runner_with_subcommand_short_activator() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "basic_test", "-t", "short_value"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with short activator" + ); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner + .try_get_one_subcommand("basic_test", "test-option") + .ok() + .flatten(); + assert!(option_value.is_some()); + assert_eq!(option_value.unwrap(), "short_value"); +} + +#[test] +fn test_perf_runner_with_complex_subcommand() { + let tests = vec![create_complex_test_metadata()]; + let args = vec![ + "perf-tests", + "complex_test", + "--mandatory", + "required_value", + "--sensitive", + "secret_value", + "--flag", + ]; + + println!( + "Help: {}", + PerfRunner::with_command_line(tests.clone(), vec!["perf-tests", "--help"]) + .unwrap_err() + .source() + .unwrap() + ); + println!( + "Help2 : {}", + PerfRunner::with_command_line(tests.clone(), vec!["perf-tests", "complex_test", "--help"]) + .unwrap_err() + .source() + .unwrap() + ); + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with complex subcommand" + ); + + let runner = result.unwrap(); + + let mandatory_value: Result, _> = + runner.try_get_one_subcommand("complex_test", "mandatory-option"); + println!("{:?}", mandatory_value); + assert!(mandatory_value.is_ok()); + let mandatory_value = mandatory_value.unwrap(); + assert!(mandatory_value.is_some()); + assert_eq!(mandatory_value.unwrap(), "required_value"); + + let sensitive_value: Option<&String> = runner + .try_get_one_subcommand("complex_test", "sensitive-option") + .ok() + .flatten(); + assert!(sensitive_value.is_some()); + assert_eq!(sensitive_value.unwrap(), "secret_value"); + + let flag_value = runner + .try_get_one_subcommand("complex_test", "flag-option") + .ok() + .flatten(); + assert!(flag_value.is_some()); + let flag_value: bool = *flag_value.unwrap(); + assert!(flag_value); +} + +#[test] +fn test_perf_runner_with_no_short_activator() { + let tests = vec![create_no_short_activator_test_metadata()]; + let args = vec!["perf-tests", "no_short_test", "--long-only", "value"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with long-only activator" + ); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner + .try_get_one_subcommand("no_short_test", "long-only") + .ok() + .flatten(); + assert!(option_value.is_some()); + assert_eq!(option_value.unwrap(), "value"); +} + +#[test] +fn test_perf_runner_get_one_nonexistent() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests"]; + + let runner = PerfRunner::with_command_line(tests, args).unwrap(); + let result: Result, _> = runner.try_get_one("nonexistent"); + assert!(result.is_err()); +} + +#[test] +fn test_perf_runner_get_one_different_types() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "42"]; + + let runner = PerfRunner::with_command_line(tests, args).unwrap(); + + // Test getting u32 value + let iterations: Option<&u32> = runner.try_get_one("iterations").ok().flatten(); + assert!(iterations.is_some()); + assert_eq!(*iterations.unwrap(), 42); + + // Test getting wrong type returns None + let iterations_as_string: Option<&String> = runner.try_get_one("iterations").ok().flatten(); + assert!(iterations_as_string.is_none()); +} + +#[test] +fn test_perf_runner_options_debug() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "5"]; + + let runner = PerfRunner::with_command_line(tests, args).unwrap(); + + // Test that Debug is implemented for PerfRunner + let debug_output = format!("{:?}", runner); + assert!(debug_output.contains("PerfRunner")); + assert!(debug_output.contains("options")); + + // Test that PerfRunnerOptions Debug works + let options_debug = format!("{:?}", runner.options); + assert!(options_debug.contains("PerfRunnerOptions")); + assert!(options_debug.contains("iterations: 5")); + + let options = PerfRunnerOptions::from(&runner.arguments); + assert_eq!(options.iterations, 5); +} + +#[test] +fn test_test_option_debug_and_default() { + let option = TestOption::default(); + + // Test default values + assert_eq!(option.name, ""); + assert_eq!(option.short_activator, '\0'); + assert_eq!(option.long_activator, ""); + assert_eq!(option.display_message, ""); + assert_eq!(option.expected_args_len, 0); + assert!(!option.mandatory); + assert!(!option.sensitive); + + // Test Debug implementation + let debug_output = format!("{:?}", option); + assert!(debug_output.contains("TestOption")); +} + +#[test] +fn test_perf_runner_with_invalid_numeric_value() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "not_a_number"]; + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_err(), + "PerfRunner::with_command_line should fail with invalid numeric value" + ); +} + +#[test] +fn test_perf_runner_with_missing_mandatory_option() { + let tests = vec![create_complex_test_metadata()]; + let args = vec!["perf-tests", "complex_test"]; // Missing mandatory option + + let result = PerfRunner::with_command_line(tests, args); + assert!( + result.is_err(), + "PerfRunner::with_command_line should fail with missing mandatory option" + ); +} + +#[test] +fn test_perf_runner_with_multiple_tests_and_subcommands() { + let tests = vec![create_basic_test_metadata(), create_complex_test_metadata()]; + + // Test with first subcommand + let args = vec!["perf-tests", "basic_test", "--test-option", "value1"]; + let result = PerfRunner::with_command_line(tests.clone(), args); + assert!(result.is_ok()); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner + .try_get_one_subcommand("basic_test", "test-option") + .ok() + .flatten(); + assert_eq!(option_value.unwrap(), "value1"); + + // Test with second subcommand + let args = vec!["perf-tests", "complex_test", "--mandatory", "required"]; + let result = PerfRunner::with_command_line(tests, args); + assert!(result.is_ok()); + + let runner = result.unwrap(); + let mandatory_value: Option<&String> = runner + .try_get_one_subcommand("complex_test", "mandatory-option") + .ok() + .flatten(); + assert_eq!(mandatory_value.unwrap(), "required"); +} From fde683f6a6d0692275aedf7787f8090512f95bff Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Wed, 1 Oct 2025 16:12:32 -0700 Subject: [PATCH 02/18] Test structure works; parallel doesn't --- sdk/core/azure_core_test/src/lib.rs | 3 - .../src/perf/{tests.rs => config_tests.rs} | 340 ++++++++++++------ .../src/perf/framework_tests.rs | 111 ++++++ sdk/core/azure_core_test/src/perf/mod.rs | 262 +++++++++++--- 4 files changed, 556 insertions(+), 160 deletions(-) rename sdk/core/azure_core_test/src/perf/{tests.rs => config_tests.rs} (56%) create mode 100644 sdk/core/azure_core_test/src/perf/framework_tests.rs diff --git a/sdk/core/azure_core_test/src/lib.rs b/sdk/core/azure_core_test/src/lib.rs index 19a2cd23d1..752f10731b 100644 --- a/sdk/core/azure_core_test/src/lib.rs +++ b/sdk/core/azure_core_test/src/lib.rs @@ -15,7 +15,6 @@ mod recording; mod root_readme; pub mod stream; pub mod tracing; -use crate::perf::PerfRunner; use azure_core::Error; pub use azure_core::{error::ErrorKind, test::TestMode}; pub use proxy::{matchers::*, sanitizers::*}; @@ -37,7 +36,6 @@ pub struct TestContext { module_name: &'static str, name: &'static str, recording: Option, - _performance: Option, } impl TestContext { @@ -61,7 +59,6 @@ impl TestContext { module_name: test_module, name, recording: None, - _performance: None, }) } diff --git a/sdk/core/azure_core_test/src/perf/tests.rs b/sdk/core/azure_core_test/src/perf/config_tests.rs similarity index 56% rename from sdk/core/azure_core_test/src/perf/tests.rs rename to sdk/core/azure_core_test/src/perf/config_tests.rs index 7312eaf691..b2571bb6a1 100644 --- a/sdk/core/azure_core_test/src/perf/tests.rs +++ b/sdk/core/azure_core_test/src/perf/config_tests.rs @@ -1,15 +1,30 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +//! Tests for configuration of the performance test runner. +//! +//! These tests cover various scenarios for initializing the `PerfRunner` with different sets of +//! command-line arguments and test metadata. They ensure that the runner correctly parses +//! arguments, handles defaults, and manages errors appropriately. +//! use super::*; -use std::error::Error; +use std::{env, error::Error}; + +fn create_failed_test(_runner: &PerfRunner) -> CreatePerfTestReturn { + Box::pin(async { + Err(azure_core::Error::with_message( + azure_core::error::ErrorKind::Other, + "Intentional failure to create test instance", + )) + }) +} // Helper function to create a basic test metadata for testing fn create_basic_test_metadata() -> TestMetadata { TestMetadata { name: "basic_test", description: "A basic test for testing purposes", - options: &[&TestOption { + options: vec![TestOption { name: "test-option", short_activator: 't', long_activator: "test-option", @@ -18,6 +33,7 @@ fn create_basic_test_metadata() -> TestMetadata { mandatory: false, sensitive: false, }], + create_test: create_failed_test, } } @@ -26,8 +42,8 @@ fn create_complex_test_metadata() -> TestMetadata { TestMetadata { name: "complex_test", description: "A complex test with multiple options", - options: &[ - &TestOption { + options: vec![ + TestOption { name: "mandatory-option", short_activator: 'm', long_activator: "mandatory", @@ -36,7 +52,7 @@ fn create_complex_test_metadata() -> TestMetadata { mandatory: true, sensitive: false, }, - &TestOption { + TestOption { name: "sensitive-option", short_activator: 's', long_activator: "sensitive", @@ -45,16 +61,15 @@ fn create_complex_test_metadata() -> TestMetadata { mandatory: false, sensitive: true, }, - &TestOption { + TestOption { name: "flag-option", short_activator: 'f', long_activator: "flag", display_message: "Flag option", - expected_args_len: 0, - mandatory: false, - sensitive: false, + ..Default::default() }, ], + create_test: create_failed_test, } } @@ -63,7 +78,7 @@ fn create_no_short_activator_test_metadata() -> TestMetadata { TestMetadata { name: "no_short_test", description: "Test without short activators", - options: &[&TestOption { + options: vec![TestOption { name: "long-only", short_activator: '\0', long_activator: "long-only", @@ -72,13 +87,19 @@ fn create_no_short_activator_test_metadata() -> TestMetadata { mandatory: false, sensitive: false, }], + create_test: create_failed_test, } } #[test] fn test_perf_runner_new_with_empty_tests() { let tests = vec![]; - let result = PerfRunner::with_command_line(tests, vec!["perf-tests"]); + let result = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests, + vec!["perf-tests"], + ); assert!( result.is_ok(), @@ -89,17 +110,21 @@ fn test_perf_runner_new_with_empty_tests() { // Test default values assert_eq!(runner.options.iterations, 1); assert_eq!(runner.options.parallel, 1); - assert_eq!(runner.options.duration, 30); - assert_eq!(runner.options.warmup, 5); + assert_eq!(runner.options.duration, Duration::seconds(30)); + assert_eq!(runner.options.warmup, Duration::seconds(5)); assert_eq!(runner.options.test_results_filename, "./tests/results.json"); assert!(!runner.options.no_cleanup); - assert!(runner.options.test.is_none()); } #[test] fn test_perf_runner_new_with_single_test() { let tests = vec![create_basic_test_metadata()]; - let result = PerfRunner::with_command_line(tests, vec!["perf-tests"]); + let result = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests, + vec!["perf-tests"], + ); assert!( result.is_ok(), @@ -110,8 +135,8 @@ fn test_perf_runner_new_with_single_test() { // Verify default values are set assert_eq!(runner.options.iterations, 1); assert_eq!(runner.options.parallel, 1); - assert_eq!(runner.options.duration, 30); - assert_eq!(runner.options.warmup, 5); + assert_eq!(runner.options.duration, Duration::seconds(30)); + assert_eq!(runner.options.warmup, Duration::seconds(5)); } #[test] @@ -121,7 +146,12 @@ fn test_perf_runner_new_with_multiple_tests() { create_complex_test_metadata(), create_no_short_activator_test_metadata(), ]; - let result = PerfRunner::with_command_line(tests, vec!["perf-tests"]); + let result = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests, + vec!["perf-tests"], + ); assert!( result.is_ok(), @@ -135,7 +165,7 @@ fn test_perf_runner_with_command_line_default_args() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with default args" @@ -144,8 +174,8 @@ fn test_perf_runner_with_command_line_default_args() { let runner = result.unwrap(); assert_eq!(runner.options.iterations, 1); assert_eq!(runner.options.parallel, 1); - assert_eq!(runner.options.duration, 30); - assert_eq!(runner.options.warmup, 5); + assert_eq!(runner.options.duration, Duration::seconds(30)); + assert_eq!(runner.options.warmup, Duration::seconds(5)); assert!(!runner.options.no_cleanup); } @@ -154,7 +184,7 @@ fn test_perf_runner_with_command_line_custom_iterations() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--iterations", "10"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with custom iterations" @@ -169,7 +199,7 @@ fn test_perf_runner_with_command_line_custom_parallel() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--parallel", "5"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with custom parallel" @@ -184,14 +214,14 @@ fn test_perf_runner_with_command_line_custom_duration() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--duration", "60"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with custom duration" ); let runner = result.unwrap(); - assert_eq!(runner.options.duration, 60); + assert_eq!(runner.options.duration, Duration::seconds(60)); } #[test] @@ -199,14 +229,14 @@ fn test_perf_runner_with_command_line_custom_warmup() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--warmup", "10"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with custom warmup" ); let runner = result.unwrap(); - assert_eq!(runner.options.warmup, 10); + assert_eq!(runner.options.warmup, Duration::seconds(10)); } #[test] @@ -214,7 +244,7 @@ fn test_perf_runner_with_command_line_test_results_file() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--test-results", "/tmp/results.json"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with custom test results file" @@ -229,7 +259,7 @@ fn test_perf_runner_with_command_line_no_cleanup() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--no-cleanup"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with no-cleanup flag" @@ -239,21 +269,6 @@ fn test_perf_runner_with_command_line_no_cleanup() { assert!(runner.options.no_cleanup); } -#[test] -fn test_perf_runner_with_command_line_test_name() { - let tests = vec![create_basic_test_metadata()]; - let args = vec!["perf-tests", "--test", "my_test"]; - - let result = PerfRunner::with_command_line(tests, args); - assert!( - result.is_ok(), - "PerfRunner::with_command_line should succeed with test name" - ); - - let runner = result.unwrap(); - assert_eq!(runner.options.test.as_ref().unwrap(), "my_test"); -} - #[test] fn test_perf_runner_with_command_line_all_options() { let tests = vec![create_basic_test_metadata()]; @@ -269,12 +284,10 @@ fn test_perf_runner_with_command_line_all_options() { "15", "--test-results", "/custom/results.json", - "--test", - "specific_test", "--no-cleanup", ]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with all options" @@ -283,10 +296,9 @@ fn test_perf_runner_with_command_line_all_options() { let runner = result.unwrap(); assert_eq!(runner.options.iterations, 20); assert_eq!(runner.options.parallel, 8); - assert_eq!(runner.options.duration, 120); - assert_eq!(runner.options.warmup, 15); + assert_eq!(runner.options.duration, Duration::seconds(120)); + assert_eq!(runner.options.warmup, Duration::seconds(15)); assert_eq!(runner.options.test_results_filename, "/custom/results.json"); - assert_eq!(runner.options.test.as_ref().unwrap(), "specific_test"); assert!(runner.options.no_cleanup); } @@ -295,7 +307,7 @@ fn test_perf_runner_command_line_help() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--help"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_err(), "PerfRunner::with_command_line should fail with help flag" @@ -313,17 +325,19 @@ fn test_perf_runner_with_subcommand() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "basic_test", "--test-option", "value"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with subcommand" ); let runner = result.unwrap(); - let option_value: Option<&String> = runner - .try_get_one_subcommand("basic_test", "test-option") - .ok() - .flatten(); + + let selected_test = runner + .get_selected_test_name() + .expect("A test should be selected"); + assert_eq!(selected_test, "basic_test"); + let option_value: Option<&String> = runner.try_get_test_arg("test-option").ok().flatten(); assert!(option_value.is_some()); assert_eq!(option_value.unwrap(), "value"); } @@ -333,17 +347,14 @@ fn test_perf_runner_with_subcommand_short_activator() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "basic_test", "-t", "short_value"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with short activator" ); let runner = result.unwrap(); - let option_value: Option<&String> = runner - .try_get_one_subcommand("basic_test", "test-option") - .ok() - .flatten(); + let option_value: Option<&String> = runner.try_get_test_arg("test-option").ok().flatten(); assert!(option_value.is_some()); assert_eq!(option_value.unwrap(), "short_value"); } @@ -363,20 +374,30 @@ fn test_perf_runner_with_complex_subcommand() { println!( "Help: {}", - PerfRunner::with_command_line(tests.clone(), vec!["perf-tests", "--help"]) - .unwrap_err() - .source() - .unwrap() + PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests.clone(), + vec!["perf-tests", "--help"] + ) + .unwrap_err() + .source() + .unwrap() ); println!( "Help2 : {}", - PerfRunner::with_command_line(tests.clone(), vec!["perf-tests", "complex_test", "--help"]) - .unwrap_err() - .source() - .unwrap() + PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests.clone(), + vec!["perf-tests", "complex_test", "--help"] + ) + .unwrap_err() + .source() + .unwrap() ); - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with complex subcommand" @@ -384,25 +405,19 @@ fn test_perf_runner_with_complex_subcommand() { let runner = result.unwrap(); - let mandatory_value: Result, _> = - runner.try_get_one_subcommand("complex_test", "mandatory-option"); + let mandatory_value: Result> = runner.try_get_test_arg("mandatory-option"); println!("{:?}", mandatory_value); assert!(mandatory_value.is_ok()); let mandatory_value = mandatory_value.unwrap(); assert!(mandatory_value.is_some()); assert_eq!(mandatory_value.unwrap(), "required_value"); - let sensitive_value: Option<&String> = runner - .try_get_one_subcommand("complex_test", "sensitive-option") - .ok() - .flatten(); + let sensitive_value: Option<&String> = + runner.try_get_test_arg("sensitive-option").ok().flatten(); assert!(sensitive_value.is_some()); assert_eq!(sensitive_value.unwrap(), "secret_value"); - let flag_value = runner - .try_get_one_subcommand("complex_test", "flag-option") - .ok() - .flatten(); + let flag_value = runner.try_get_test_arg("flag-option").ok().flatten(); assert!(flag_value.is_some()); let flag_value: bool = *flag_value.unwrap(); assert!(flag_value); @@ -413,17 +428,14 @@ fn test_perf_runner_with_no_short_activator() { let tests = vec![create_no_short_activator_test_metadata()]; let args = vec!["perf-tests", "no_short_test", "--long-only", "value"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_ok(), "PerfRunner::with_command_line should succeed with long-only activator" ); let runner = result.unwrap(); - let option_value: Option<&String> = runner - .try_get_one_subcommand("no_short_test", "long-only") - .ok() - .flatten(); + let option_value: Option<&String> = runner.try_get_test_arg("long-only").ok().flatten(); assert!(option_value.is_some()); assert_eq!(option_value.unwrap(), "value"); } @@ -433,8 +445,9 @@ fn test_perf_runner_get_one_nonexistent() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests"]; - let runner = PerfRunner::with_command_line(tests, args).unwrap(); - let result: Result, _> = runner.try_get_one("nonexistent"); + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args).unwrap(); + let result: Result> = runner.try_get_global_arg("nonexistent"); assert!(result.is_err()); } @@ -443,15 +456,17 @@ fn test_perf_runner_get_one_different_types() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--iterations", "42"]; - let runner = PerfRunner::with_command_line(tests, args).unwrap(); + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args).unwrap(); // Test getting u32 value - let iterations: Option<&u32> = runner.try_get_one("iterations").ok().flatten(); + let iterations: Option<&u32> = runner.try_get_global_arg("iterations").ok().flatten(); assert!(iterations.is_some()); assert_eq!(*iterations.unwrap(), 42); // Test getting wrong type returns None - let iterations_as_string: Option<&String> = runner.try_get_one("iterations").ok().flatten(); + let iterations_as_string: Option<&String> = + runner.try_get_global_arg("iterations").ok().flatten(); assert!(iterations_as_string.is_none()); } @@ -460,7 +475,8 @@ fn test_perf_runner_options_debug() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--iterations", "5"]; - let runner = PerfRunner::with_command_line(tests, args).unwrap(); + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args).unwrap(); // Test that Debug is implemented for PerfRunner let debug_output = format!("{:?}", runner); @@ -499,7 +515,7 @@ fn test_perf_runner_with_invalid_numeric_value() { let tests = vec![create_basic_test_metadata()]; let args = vec!["perf-tests", "--iterations", "not_a_number"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_err(), "PerfRunner::with_command_line should fail with invalid numeric value" @@ -511,7 +527,7 @@ fn test_perf_runner_with_missing_mandatory_option() { let tests = vec![create_complex_test_metadata()]; let args = vec!["perf-tests", "complex_test"]; // Missing mandatory option - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!( result.is_err(), "PerfRunner::with_command_line should fail with missing mandatory option" @@ -524,25 +540,143 @@ fn test_perf_runner_with_multiple_tests_and_subcommands() { // Test with first subcommand let args = vec!["perf-tests", "basic_test", "--test-option", "value1"]; - let result = PerfRunner::with_command_line(tests.clone(), args); + let result = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests.clone(), args); assert!(result.is_ok()); let runner = result.unwrap(); - let option_value: Option<&String> = runner - .try_get_one_subcommand("basic_test", "test-option") - .ok() - .flatten(); + let option_value: Option<&String> = runner.try_get_test_arg("test-option").ok().flatten(); assert_eq!(option_value.unwrap(), "value1"); // Test with second subcommand let args = vec!["perf-tests", "complex_test", "--mandatory", "required"]; - let result = PerfRunner::with_command_line(tests, args); + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); assert!(result.is_ok()); let runner = result.unwrap(); - let mandatory_value: Option<&String> = runner - .try_get_one_subcommand("complex_test", "mandatory-option") - .ok() - .flatten(); + let mandatory_value: Option<&String> = + runner.try_get_test_arg("mandatory-option").ok().flatten(); assert_eq!(mandatory_value.unwrap(), "required"); } + +struct ComplexTest {} + +#[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +impl PerfTest for ComplexTest { + async fn setup(&self, _context: &TestContext) -> azure_core::Result<()> { + println!("Setting up ComplexTest..."); + // Simulate some async setup work + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + Ok(()) + } + + async fn cleanup(&self, _context: &TestContext) -> azure_core::Result<()> { + println!("Cleaning up ComplexTest..."); + // Simulate some async cleanup work + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + Ok(()) + } + + async fn run(&self /*, _context: &TestContext*/) -> azure_core::Result<()> { + // Simulate some async test work + println!("Running ComplexTest..."); + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + Ok(()) + } +} + +fn complex_test_create(_runner: &PerfRunner) -> CreatePerfTestReturn { + Box::pin(async { Ok(Box::new(ComplexTest {}) as Box) }) +} + +#[tokio::test] +async fn test_perf_runner_with_test_functions() { + let tests = vec![TestMetadata { + name: "complex_test", + description: "A complex test with multiple options", + options: vec![ + TestOption { + name: "mandatory-option", + short_activator: 'm', + long_activator: "mandatory", + display_message: "Mandatory option", + expected_args_len: 1, + mandatory: true, + sensitive: false, + }, + TestOption { + name: "sensitive-option", + short_activator: 's', + long_activator: "sensitive", + display_message: "Sensitive option", + expected_args_len: 1, + mandatory: false, + sensitive: true, + }, + TestOption { + name: "flag-option", + short_activator: 'f', + long_activator: "flag", + display_message: "Flag option", + expected_args_len: 0, + mandatory: false, + sensitive: false, + }, + ], + create_test: complex_test_create, + }]; + let args = vec![ + "perf-tests", + "complex_test", + "--mandatory", + "required_value", + "--sensitive", + "secret_value", + "--flag", + ]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with complex subcommand" + ); + + let runner = result.unwrap(); + + let mandatory_value: Result> = runner.try_get_test_arg("mandatory-option"); + println!("{:?}", mandatory_value); + assert!(mandatory_value.is_ok()); + let mandatory_value = mandatory_value.unwrap(); + assert!(mandatory_value.is_some()); + assert_eq!(mandatory_value.unwrap(), "required_value"); + + let sensitive_value: Option<&String> = + runner.try_get_test_arg("sensitive-option").ok().flatten(); + assert!(sensitive_value.is_some()); + assert_eq!(sensitive_value.unwrap(), "secret_value"); + + let flag_value = runner.try_get_test_arg("flag-option").ok().flatten(); + assert!(flag_value.is_some()); + let flag_value: bool = *flag_value.unwrap(); + assert!(flag_value); + + let perf_tests_impl = (runner.tests[0].create_test)(&runner) + .await + .expect("Failed to create test instance"); + + let crate_dir = env!("CARGO_MANIFEST_DIR"); + + let test_context = TestContext::new(crate_dir, crate_dir, runner.tests[0].name) + .expect("Failed to create TestContext"); + + perf_tests_impl + .setup(&test_context) + .await + .expect("Setup failed"); + perf_tests_impl.run(/*&context */).await.expect("Run failed"); + perf_tests_impl + .cleanup(&test_context) + .await + .expect("Cleanup failed"); +} diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs new file mode 100644 index 0000000000..265176a792 --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -0,0 +1,111 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +//! Tests for functioning of the performance test runner. +//! +//! These tests cover various scenarios for running the `PerfRunner` with different options and measurements. +//! +use super::*; +use std::boxed::Box; + +#[tokio::test] +async fn test_perf_runner_with_no_tests() { + let args = vec!["perf_test", "--iterations", "1", "--duration", "1"]; + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), vec![], args).unwrap(); + + let result = runner.run().await; + assert!(result.is_err()); +} + +fn create_fibonacci1_test(runner: &PerfRunner) -> CreatePerfTestReturn { + struct Fibonacci1Test { + count: u32, + } + + impl Fibonacci1Test { + fn fibonacci(n: u32) -> u32 { + if n <= 1 { + n + } else { + Self::fibonacci(n - 1) + Self::fibonacci(n - 2) + } + } + } + + #[async_trait::async_trait] + impl PerfTest for Fibonacci1Test { + async fn setup(&self, _context: &TestContext) -> azure_core::Result<()> { + Ok(()) + } + async fn run(&self /*, _context: &TestContext*/) -> azure_core::Result<()> { + let _result = Self::fibonacci(self.count); + Ok(()) + } + async fn cleanup(&self, _context: &TestContext) -> azure_core::Result<()> { + Ok(()) + } + } + // Manually handle the Result instead of using ? because this function does not return a Result. + let count: Option<&String> = match runner.try_get_test_arg("count") { + Ok(v) => v, + Err(e) => { + // Return a future that immediately yields the error. + return Box::pin(async move { Err(e) }); + } + }; + println!("Fibonacci1Test with count: {:?}", count); + let count = count.expect("count argument is mandatory"); + let count = match count.parse::() { + Ok(v) => v, + Err(e) => { + let err = azure_core::Error::with_message( + azure_core::error::ErrorKind::Other, + format!("invalid count argument: {}", e), + ); + return Box::pin(async move { Err(err) }); + } + }; + Box::pin(async move { Ok(Box::new(Fibonacci1Test { count }) as Box) }) +} + +#[tokio::test] +async fn test_perf_runner_with_single_test() { + let args = vec![ + "perf_test", + "--iterations", + "1", + "--parallel", + "10", + "--duration", + "1", + "--warmup", + "1", + "basic_test", + "-c", + "10", + ]; + let runner = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![TestMetadata { + name: "basic_test", + description: "A basic test for testing purposes", + options: vec![TestOption { + name: "count", + mandatory: true, + short_activator: 'c', + expected_args_len: 1, + display_message: "The Fibonacci number to compute", + ..Default::default() + }], + create_test: create_fibonacci1_test, + }], + args, + ) + .unwrap(); + + let result = runner.run().await; + assert!(result.is_ok()); + println!("Result: {:?}", result); +} diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index f3ca1c6a67..1a3bdf4119 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -3,22 +3,48 @@ #![doc = include_str!("README.md")] -use clap::{parser::MatchesError, ArgMatches}; -use std::any::Any; +use azure_core::{time::Duration, Error, Result}; +use clap::ArgMatches; +use std::{ + any::Any, + future::Future, + pin::Pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; +use tokio::select; + +use crate::TestContext; + +#[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +pub trait PerfTest: Send + Sync { + async fn setup(&self, context: &TestContext) -> azure_core::Result<()>; + async fn run(&self /*context: &TestContext*/) -> azure_core::Result<()>; + async fn cleanup(&self, context: &TestContext) -> azure_core::Result<()>; +} + +pub type CreatePerfTestReturn = + Pin>>>>; /// Metadata about a performance test. #[derive(Debug, Clone)] pub struct TestMetadata { - /// The name of the test. + /// The name of the test suite. pub name: &'static str, - /// A brief description of the test. + /// A brief description of the test suite. pub description: &'static str, /// The set of test options supported by this test. - pub options: &'static [&'static TestOption], + pub options: Vec, + + /// A function used to create the performance test. + pub create_test: fn(&PerfRunner) -> CreatePerfTestReturn, } /// #A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. -#[derive(Debug, Default)] +#[derive(Debug, Default, Clone)] pub struct TestOption { /// The name of the test option. This is used as the key in the `TestArguments` map. pub name: &'static str, @@ -47,10 +73,9 @@ pub struct TestOption { struct PerfRunnerOptions { no_cleanup: bool, iterations: u32, - parallel: u32, - test: Option, - duration: u32, - warmup: u32, + parallel: usize, + duration: Duration, + warmup: Duration, test_results_filename: String, } @@ -64,13 +89,16 @@ impl From<&ArgMatches> for PerfRunnerOptions { .get_one::("iterations") .expect("defaulted by clap"), parallel: *matches - .get_one::("parallel") - .expect("defaulted by clap"), - test: matches.get_one::("test").cloned(), - duration: *matches - .get_one::("duration") + .get_one::("parallel") .expect("defaulted by clap"), - warmup: *matches.get_one::("warmup").expect("defaulted by clap"), + duration: Duration::seconds( + *matches + .get_one::("duration") + .expect("defaulted by clap"), + ), + warmup: Duration::seconds( + *matches.get_one::("warmup").expect("defaulted by clap"), + ), test_results_filename: matches .get_one::("test-results") .expect("defaulted by clap") @@ -83,25 +111,38 @@ impl From<&ArgMatches> for PerfRunnerOptions { #[derive(Debug)] pub struct PerfRunner { options: PerfRunnerOptions, + #[allow(dead_code)] + tests: Vec, arguments: ArgMatches, + package_dir: &'static str, + module_name: &'static str, } impl PerfRunner { - pub fn new(tests: Vec) -> azure_core::Result { - let command = Self::get_command_from_metadata(tests); + pub fn new( + package_dir: &'static str, + module_name: &'static str, + tests: Vec, + ) -> azure_core::Result { + let command = Self::get_command_from_metadata(&tests); let arguments = command.get_matches(); Ok(Self { options: PerfRunnerOptions::from(&arguments), + tests, arguments, + package_dir, + module_name, }) } #[cfg(test)] pub fn with_command_line( + package_dir: &'static str, + module_name: &'static str, tests: Vec, args: Vec<&str>, ) -> azure_core::Result { - let command = Self::get_command_from_metadata(tests); + let command = Self::get_command_from_metadata(&tests); let arguments = command.try_get_matches_from(args).map_err(|e| { azure_core::error::Error::with_error( azure_core::error::ErrorKind::Other, @@ -111,41 +152,155 @@ impl PerfRunner { })?; Ok(Self { options: PerfRunnerOptions::from(&arguments), + tests, arguments, + package_dir, + module_name, }) } /// Gets a reference to a typed argument by its id. - pub fn try_get_one(&self, id: &str) -> Result, MatchesError> + pub fn try_get_global_arg(&self, id: &str) -> Result> where T: Any + Clone + Send + Sync + 'static, { - self.arguments.try_get_one::(id) + self.arguments.try_get_one::(id).map_err(|e| { + Error::with_error( + azure_core::error::ErrorKind::Other, + e, + format!("Failed to get argument '{}'.", id), + ) + }) } - pub fn try_get_one_subcommand( - &self, - subcommand: &str, - id: &str, - ) -> Result, MatchesError> + pub fn try_get_test_arg(&self, id: &str) -> Result> where T: Any + Clone + Send + Sync + 'static, { - let subcommand = self.arguments.subcommand_matches(subcommand); - if let Some(subcommand) = subcommand { - subcommand.try_get_one::(id) + if let Some((_, args)) = self.arguments.subcommand() { + args.try_get_one::(id).map_err(|e| { + Error::with_error( + azure_core::error::ErrorKind::Other, + e, + format!("Failed to get argument '{}' for test.", id), + ) + }) } else { Ok(None) } } - #[allow(dead_code)] - async fn run_test(&self, test: F) -> azure_core::Result<()> - where - F: Fn(u32, u32) -> Fut, - Fut: std::future::Future>, - { - test(self.options.iterations, self.options.parallel).await + pub fn get_selected_test_name(&self) -> Result<&str> { + match self.arguments.subcommand_name() { + Some(name) => Ok(name), + None => Err(Error::with_message( + azure_core::error::ErrorKind::Other, + "No test was selected.", + )), + } + } + + pub async fn run(&self) -> azure_core::Result<()> { + // We can only run tests if there was a test selected. + let test_name = self.get_selected_test_name()?; + + let test = self + .tests + .iter() + .find(|t| t.name == test_name) + .ok_or_else(|| { + Error::with_message( + azure_core::error::ErrorKind::Other, + format!("Test '{}' not found.", test_name), + ) + })?; + let test_instance = (test.create_test)(self).await?; + let test_instance: Arc = Arc::from(test_instance); + + let context = TestContext::new(self.package_dir, self.module_name, test.name)?; + + for iteration in 0..self.options.iterations { + println!( + "Running test iteration {}/{}", + iteration + 1, + self.options.iterations + ); + + println!("========== Starting test setup =========="); + test_instance.setup(&context).await?; + + println!("========== Starting test warmup =========="); + + self.run_test_for(Arc::clone(&test_instance), &context, self.options.warmup) + .await?; + + println!("========== Starting test run =========="); + println!("Running test for {} seconds", self.options.duration); + println!("Parallelism: {}", self.options.parallel); + let iteration_count = self + .run_test_for(Arc::clone(&test_instance), &context, self.options.duration) + .await?; + if !self.options.no_cleanup { + println!("========== Starting test cleanup =========="); + test_instance.cleanup(&context).await?; + } + println!("========== Starting test cleanup =========="); + test_instance.cleanup(&context).await?; + + println!( + "Completed test iteration {}/{} - {} iterations run in {} seconds - {} iterations/second", + iteration + 1, + self.options.iterations, + iteration_count, + self.options.duration.as_seconds_f64(), + iteration_count as f64 / self.options.duration.as_seconds_f64() + ); + println!( + "Completed test iteration {}/{} - {} iterations run in {} seconds - {} seconds/iteration", + iteration + 1, + self.options.iterations, + iteration_count, + self.options.duration.as_seconds_f64(), + self.options.duration.as_seconds_f64() / iteration_count as f64 + ); + } + Ok(()) + } + pub async fn run_test_for( + &self, + test_instance: Arc, + _context: &TestContext, + duration: Duration, + ) -> azure_core::Result { + let iteration_count = Arc::new(AtomicU64::new(0)); + let mut tasks = Vec::with_capacity(self.options.parallel); + for _ in 0..self.options.parallel { + let test_instance_clone = Arc::clone(&test_instance); + let ic = Arc::clone(&iteration_count); + let task: tokio::task::JoinHandle> = tokio::spawn(async move { + loop { + if ic.load(Ordering::SeqCst) % 1000 == 0 { + println!("Iteration {}", ic.load(Ordering::SeqCst)); + } + test_instance_clone.run().await?; + ic.fetch_add(1, Ordering::SeqCst); + } + #[allow(unreachable_code)] + Ok(()) + }); + tasks.push(task); + } + let timeout = std::time::Duration::from_secs_f64(duration.as_seconds_f64()); + select!( + _ = futures::future::join_all(tasks) => { + println!("All tasks completed unexpectedly."); + // All tasks completed (should not happen in normal operation). + } + _ = tokio::time::sleep(timeout) => { + println!("Duration elapsed, stopping tasks."); + } + ); + Ok(iteration_count.load(Ordering::SeqCst)) } // * Disable test cleanup @@ -163,7 +318,7 @@ impl PerfRunner { // * Sync - run a synchronous version of the test /// Constructs a `clap::Command` from the provided test metadata. - fn get_command_from_metadata(tests: Vec) -> clap::Command { + fn get_command_from_metadata(tests: &[TestMetadata]) -> clap::Command { let mut command = clap::Command::new("perf-tests") .about("Run performance tests for the Azure SDK for Rust") .arg( @@ -171,45 +326,41 @@ impl PerfRunner { .required(false) .default_value("1") .value_parser(clap::value_parser!(u32)) - .global(true), + .global(false), ) .arg( clap::arg!(--parallel "The number of concurrent tasks to use when running each test") .required(false) .default_value("1") - .value_parser(clap::value_parser!(u32)) - .global(true), - ) - .arg( - clap::arg!(--test "The name of the test to run. If not specified, all tests will be run.") - .required(false) - .global(true), + .value_parser(clap::value_parser!(usize)) + .global(false), ) .arg( clap::arg!(--duration "The duration of each test in seconds") .required(false) .default_value("30") - .value_parser(clap::value_parser!(u32)) - .global(true), + .value_parser(clap::value_parser!(i64)) + .global(false), ) .arg( clap::arg!(--warmup "The duration of the warmup period in seconds") .required(false) .default_value("5") - .value_parser(clap::value_parser!(u32)) - .global(true), - ).arg( + .value_parser(clap::value_parser!(i64)) + .global(false), + ) + .arg( clap::arg!(--"test-results" "The file to write test results to") .required(false) .default_value("./tests/results.json") - .global(true), + .global(false), ) .arg(clap::arg!(--"no-cleanup" "Disable test cleanup") .required(false).global(true)) ; - for test in &tests { + for test in tests { let mut subcommand = clap::Command::new(test.name).about(test.description); - for option in test.options { + for option in test.options.iter() { let mut arg = clap::Arg::new(option.name) .help(option.display_message) .long(option.long_activator) @@ -232,4 +383,7 @@ impl PerfRunner { } #[cfg(test)] -mod tests; +mod config_tests; + +#[cfg(test)] +mod framework_tests; From 8d58f99881dccbf5a03ef2094c2e307474e217b0 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Thu, 2 Oct 2025 13:26:47 -0700 Subject: [PATCH 03/18] Added progress tracker --- .../azure_core_test/src/perf/config_tests.rs | 5 +- .../src/perf/framework_tests.rs | 13 ++- sdk/core/azure_core_test/src/perf/mod.rs | 92 ++++++++++--------- 3 files changed, 61 insertions(+), 49 deletions(-) diff --git a/sdk/core/azure_core_test/src/perf/config_tests.rs b/sdk/core/azure_core_test/src/perf/config_tests.rs index b2571bb6a1..564d7b7849 100644 --- a/sdk/core/azure_core_test/src/perf/config_tests.rs +++ b/sdk/core/azure_core_test/src/perf/config_tests.rs @@ -674,7 +674,10 @@ async fn test_perf_runner_with_test_functions() { .setup(&test_context) .await .expect("Setup failed"); - perf_tests_impl.run(/*&context */).await.expect("Run failed"); + perf_tests_impl + .run(/*&test_context*/) + .await + .expect("Run failed"); perf_tests_impl .cleanup(&test_context) .await diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs index 265176a792..b6de810b80 100644 --- a/sdk/core/azure_core_test/src/perf/framework_tests.rs +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -40,6 +40,11 @@ fn create_fibonacci1_test(runner: &PerfRunner) -> CreatePerfTestReturn { } async fn run(&self /*, _context: &TestContext*/) -> azure_core::Result<()> { let _result = Self::fibonacci(self.count); + // This is a CPU bound test, so yield to allow other tasks to run. Otherwise we jam the tokio scheduler. + // Note that this significantly reduces the performance of the test, but it is necessary to allow parallelism. + // + // In a real-world scenario, the test would be doing async work (e.g. network I/O) which would yield naturally. + tokio::task::yield_now().await; Ok(()) } async fn cleanup(&self, _context: &TestContext) -> azure_core::Result<()> { @@ -76,12 +81,12 @@ async fn test_perf_runner_with_single_test() { "--iterations", "1", "--parallel", - "10", + "30", "--duration", - "1", + "10", "--warmup", "1", - "basic_test", + "fibonacci1", "-c", "10", ]; @@ -89,7 +94,7 @@ async fn test_perf_runner_with_single_test() { env!("CARGO_MANIFEST_DIR"), file!(), vec![TestMetadata { - name: "basic_test", + name: "fibonacci1", description: "A basic test for testing purposes", options: vec![TestOption { name: "count", diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index 1a3bdf4119..397e3c89a0 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -3,6 +3,7 @@ #![doc = include_str!("README.md")] +use crate::TestContext; use azure_core::{time::Duration, Error, Result}; use clap::ArgMatches; use std::{ @@ -14,15 +15,13 @@ use std::{ Arc, }, }; -use tokio::select; - -use crate::TestContext; +use tokio::{select, task::JoinSet}; #[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?Send))] #[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] pub trait PerfTest: Send + Sync { async fn setup(&self, context: &TestContext) -> azure_core::Result<()>; - async fn run(&self /*context: &TestContext*/) -> azure_core::Result<()>; + async fn run(&self /*, context: &TestContext*/) -> azure_core::Result<()>; async fn cleanup(&self, context: &TestContext) -> azure_core::Result<()>; } @@ -76,6 +75,7 @@ struct PerfRunnerOptions { parallel: usize, duration: Duration, warmup: Duration, + disable_progress: bool, test_results_filename: String, } @@ -91,6 +91,7 @@ impl From<&ArgMatches> for PerfRunnerOptions { parallel: *matches .get_one::("parallel") .expect("defaulted by clap"), + disable_progress: matches.get_flag("no-progress"), duration: Duration::seconds( *matches .get_one::("duration") @@ -116,6 +117,7 @@ pub struct PerfRunner { arguments: ArgMatches, package_dir: &'static str, module_name: &'static str, + progress: Arc, } impl PerfRunner { @@ -132,6 +134,7 @@ impl PerfRunner { arguments, package_dir, module_name, + progress: Arc::new(AtomicU64::new(0)), }) } @@ -156,6 +159,7 @@ impl PerfRunner { arguments, package_dir, module_name, + progress: Arc::new(AtomicU64::new(0)), }) } @@ -229,32 +233,26 @@ impl PerfRunner { println!("========== Starting test setup =========="); test_instance.setup(&context).await?; - println!("========== Starting test warmup =========="); + println!( + "========== Starting test warmup for {} ==========", + self.options.warmup + ); - self.run_test_for(Arc::clone(&test_instance), &context, self.options.warmup) + self.run_test_for(Arc::clone(&test_instance), test.name, self.options.warmup) .await?; - println!("========== Starting test run =========="); - println!("Running test for {} seconds", self.options.duration); - println!("Parallelism: {}", self.options.parallel); - let iteration_count = self - .run_test_for(Arc::clone(&test_instance), &context, self.options.duration) + println!( + "========== Starting test run for {} ==========", + self.options.duration + ); + self.run_test_for(Arc::clone(&test_instance), test.name, self.options.duration) .await?; if !self.options.no_cleanup { println!("========== Starting test cleanup =========="); test_instance.cleanup(&context).await?; } - println!("========== Starting test cleanup =========="); - test_instance.cleanup(&context).await?; - println!( - "Completed test iteration {}/{} - {} iterations run in {} seconds - {} iterations/second", - iteration + 1, - self.options.iterations, - iteration_count, - self.options.duration.as_seconds_f64(), - iteration_count as f64 / self.options.duration.as_seconds_f64() - ); + let iteration_count = self.progress.load(Ordering::SeqCst); println!( "Completed test iteration {}/{} - {} iterations run in {} seconds - {} seconds/iteration", iteration + 1, @@ -263,44 +261,49 @@ impl PerfRunner { self.options.duration.as_seconds_f64(), self.options.duration.as_seconds_f64() / iteration_count as f64 ); + let operations_per_second = + self.options.duration.as_seconds_f64() / iteration_count as f64; + let duration_per_operation = Duration::seconds_f64(operations_per_second); + println!("{} seconds/operation", duration_per_operation); } Ok(()) } pub async fn run_test_for( &self, test_instance: Arc, - _context: &TestContext, + _test_name: &str, duration: Duration, - ) -> azure_core::Result { - let iteration_count = Arc::new(AtomicU64::new(0)); - let mut tasks = Vec::with_capacity(self.options.parallel); + ) -> azure_core::Result<()> { + let mut tasks: JoinSet> = JoinSet::new(); for _ in 0..self.options.parallel { let test_instance_clone = Arc::clone(&test_instance); - let ic = Arc::clone(&iteration_count); - let task: tokio::task::JoinHandle> = tokio::spawn(async move { + let progress = self.progress.clone(); + // let package_dir = self.package_dir; + // let module_name = self.module_name; + tasks.spawn(async move { + // let context = + // TestContext::new(package_dir, module_name, " test_name_copy.as_str()")?; + loop { - if ic.load(Ordering::SeqCst) % 1000 == 0 { - println!("Iteration {}", ic.load(Ordering::SeqCst)); - } - test_instance_clone.run().await?; - ic.fetch_add(1, Ordering::SeqCst); + test_instance_clone.run(/*&context*/).await?; + progress.fetch_add(1, Ordering::SeqCst); } - #[allow(unreachable_code)] - Ok(()) }); - tasks.push(task); } - let timeout = std::time::Duration::from_secs_f64(duration.as_seconds_f64()); + let start = tokio::time::Instant::now(); + let timeout = tokio::time::Duration::from_secs_f64(duration.as_seconds_f64()); select!( - _ = futures::future::join_all(tasks) => { - println!("All tasks completed unexpectedly."); - // All tasks completed (should not happen in normal operation). - } - _ = tokio::time::sleep(timeout) => { - println!("Duration elapsed, stopping tasks."); - } + _ = tokio::time::sleep(timeout) => {println!("Timeout reached, stopping test tasks: {:?}", start.elapsed());}, + _ = tasks.join_all() => {println!("All test tasks completed: {:?}", start.elapsed());}, + _ = async { + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + println!("{:?} elapsed: {} per operation.", start.elapsed(), Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); + } + }, if !self.options.disable_progress => {}, ); - Ok(iteration_count.load(Ordering::SeqCst)) + println!("Task time elapsed: {:?}", start.elapsed()); + Ok(()) } // * Disable test cleanup @@ -335,6 +338,7 @@ impl PerfRunner { .value_parser(clap::value_parser!(usize)) .global(false), ) + .arg(clap::arg!(--"no-progress" "Disable progress reporting").required(false).global(false)) .arg( clap::arg!(--duration "The duration of each test in seconds") .required(false) From b168643ceda0e91ccbbf8a5a65b2fe1dc82a522e Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Thu, 2 Oct 2025 14:43:20 -0700 Subject: [PATCH 04/18] Added KeyVault test and aligned tracker output with that of C++ --- sdk/core/azure_core_test/src/perf/mod.rs | 5 +- .../Cargo.toml | 5 + .../perf/get_secret.rs | 123 ++++++++++++++++++ 3 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index 397e3c89a0..7aaa1061ea 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -298,7 +298,10 @@ impl PerfRunner { _ = async { loop { tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - println!("{:?} elapsed: {} per operation.", start.elapsed(), Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); + println!("{:?} elapsed: {} op/sec, {} sec/ operation.", + start.elapsed(), + self.progress.load(Ordering::SeqCst) as f64 / start.elapsed().as_secs_f64(), + Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); } }, if !self.options.disable_progress => {}, ); diff --git a/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml b/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml index 857666e90c..321ad9ed2f 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml +++ b/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml @@ -40,3 +40,8 @@ rustc_version.workspace = true [lints] workspace = true + +[[test]] +name = "performance_tests" +path = "perf/get_secret.rs" +harness = false diff --git a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs new file mode 100644 index 0000000000..75905e6e05 --- /dev/null +++ b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs @@ -0,0 +1,123 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +use std::sync::OnceLock; + +use azure_core::Result; +use azure_core_test::{ + perf::{CreatePerfTestReturn, PerfRunner, PerfTest, TestMetadata, TestOption}, + TestContext, +}; +use azure_security_keyvault_secrets::{models::SetSecretParameters, SecretClient}; +use rand::{distr::Alphanumeric, Rng}; +struct GetSecrets { + vault_url: String, + random_key_name: OnceLock, + client: OnceLock, +} + +impl GetSecrets { + fn test_metadata() -> TestMetadata { + TestMetadata { + name: "get_secret", + description: "Get a secret from Key Vault", + options: vec![TestOption { + name: "vault_url", + display_message: "The URL of the Key Vault to use in the test", + mandatory: true, + short_activator: 'u', + long_activator: "vault-url", + expected_args_len: 1, + ..Default::default() + }], + create_test: Self::create_new_test, + } + } + + fn create_new_test(runner: &PerfRunner) -> CreatePerfTestReturn { + let vault_url_ref: Option<&String> = match runner.try_get_test_arg("vault_url") { + Ok(v) => v, + Err(e) => { + // Return a future that immediately yields the error. + return Box::pin(async move { Err(e) }); + } + }; + // Own the String so the future can be 'static. + let vault_url = vault_url_ref + .expect("vault_url argument is mandatory") + .clone(); + Box::pin(async move { + Ok(Box::new(GetSecrets { + vault_url, + random_key_name: OnceLock::new(), + client: OnceLock::new(), + }) as Box) + }) + } + + fn create_random_key_name() -> String { + let random_suffix: String = rand::rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect(); + format!("perf-{}", random_suffix) + } + + fn get_random_key_name(&self) -> &String { + self.random_key_name + .get_or_init(Self::create_random_key_name) + } +} + +#[cfg_attr(target_arch="wasm32", async_trait::async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +impl PerfTest for GetSecrets { + async fn setup(&self, _context: &TestContext) -> azure_core::Result<()> { + let credential = azure_identity::DeveloperToolsCredential::new(None)?; + let client = SecretClient::new(self.vault_url.as_str(), credential.clone(), None)?; + self.client.get_or_init(|| client); + + self.client + .get() + .unwrap() + .set_secret( + self.get_random_key_name(), + SetSecretParameters { + value: Some("secret_value".into()), + ..Default::default() + } + .try_into()?, + None, + ) + .await?; + Ok(()) + } + async fn cleanup(&self, _context: &TestContext) -> azure_core::Result<()> { + Ok(()) + } + async fn run(&self) -> Result<()> { + let _secret = self + .client + .get() + .unwrap() + .get_secret(self.get_random_key_name(), None) + .await? + .into_body() + .await?; + Ok(()) + } +} + +#[tokio::main] +async fn main() -> azure_core::Result<()> { + let runner = PerfRunner::new( + env!("CARGO_MANIFEST_DIR"), + "foo", + vec![GetSecrets::test_metadata()], + )?; + + runner.run().await?; + + Ok(()) +} From 9e603ac97ca58a8ac0054ab1ba94dde337166198 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Thu, 2 Oct 2025 16:06:24 -0700 Subject: [PATCH 05/18] Cleaned up test creation logic --- sdk/core/azure_core/CHANGELOG.md | 1 - .../src/perf/framework_tests.rs | 38 +++++++++---------- sdk/core/azure_core_test/src/perf/mod.rs | 15 ++++---- .../perf/get_secret.rs | 24 +++++------- 4 files changed, 34 insertions(+), 44 deletions(-) diff --git a/sdk/core/azure_core/CHANGELOG.md b/sdk/core/azure_core/CHANGELOG.md index 6225987745..8b79b35be1 100644 --- a/sdk/core/azure_core/CHANGELOG.md +++ b/sdk/core/azure_core/CHANGELOG.md @@ -16,7 +16,6 @@ ### Breaking Changes - - Changed `ClientOptions::retry` from `Option` to `RetryOptions`. - Changed `DeserializeWith::deserialize_with()` to be sync. - Changed `Pipeline::send()` to return a `Result`. diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs index b6de810b80..0410062274 100644 --- a/sdk/core/azure_core_test/src/perf/framework_tests.rs +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -51,27 +51,25 @@ fn create_fibonacci1_test(runner: &PerfRunner) -> CreatePerfTestReturn { Ok(()) } } - // Manually handle the Result instead of using ? because this function does not return a Result. - let count: Option<&String> = match runner.try_get_test_arg("count") { - Ok(v) => v, - Err(e) => { - // Return a future that immediately yields the error. - return Box::pin(async move { Err(e) }); - } - }; - println!("Fibonacci1Test with count: {:?}", count); - let count = count.expect("count argument is mandatory"); - let count = match count.parse::() { - Ok(v) => v, - Err(e) => { - let err = azure_core::Error::with_message( + + // Helper function to handle the async creation of the test. + async fn create_test(runner: PerfRunner) -> Result> { + let count: Option<&String> = runner.try_get_test_arg("count")?; + + println!("Fibonacci1Test with count: {:?}", count); + let count = count.expect("count argument is mandatory"); + let count = count.parse::().map_err(|e| { + azure_core::Error::with_error( azure_core::error::ErrorKind::Other, - format!("invalid count argument: {}", e), - ); - return Box::pin(async move { Err(err) }); - } - }; - Box::pin(async move { Ok(Box::new(Fibonacci1Test { count }) as Box) }) + e, + "Invalid count argument", + ) + })?; + Ok(Box::new(Fibonacci1Test { count }) as Box) + } + + // Return a pinned future that creates the test. + Box::pin(create_test(runner.clone())) } #[tokio::test] diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index 7aaa1061ea..cbb9dee41c 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -2,6 +2,7 @@ // Licensed under the MIT License. #![doc = include_str!("README.md")] +#![cfg(not(target_arch = "wasm32"))] use crate::TestContext; use azure_core::{time::Duration, Error, Result}; @@ -17,8 +18,7 @@ use std::{ }; use tokio::{select, task::JoinSet}; -#[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?Send))] -#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +#[async_trait::async_trait] pub trait PerfTest: Send + Sync { async fn setup(&self, context: &TestContext) -> azure_core::Result<()>; async fn run(&self /*, context: &TestContext*/) -> azure_core::Result<()>; @@ -67,8 +67,7 @@ pub struct TestOption { pub sensitive: bool, } -#[derive(Debug)] -#[allow(dead_code)] +#[derive(Debug, Clone)] struct PerfRunnerOptions { no_cleanup: bool, iterations: u32, @@ -76,6 +75,7 @@ struct PerfRunnerOptions { duration: Duration, warmup: Duration, disable_progress: bool, + #[allow(dead_code)] test_results_filename: String, } @@ -109,10 +109,9 @@ impl From<&ArgMatches> for PerfRunnerOptions { } /// Context information required by performance tests. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct PerfRunner { options: PerfRunnerOptions, - #[allow(dead_code)] tests: Vec, arguments: ArgMatches, package_dir: &'static str, @@ -264,7 +263,7 @@ impl PerfRunner { let operations_per_second = self.options.duration.as_seconds_f64() / iteration_count as f64; let duration_per_operation = Duration::seconds_f64(operations_per_second); - println!("{} seconds/operation", duration_per_operation); + println!("{:4} seconds/operation", duration_per_operation); } Ok(()) } @@ -298,7 +297,7 @@ impl PerfRunner { _ = async { loop { tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - println!("{:?} elapsed: {} op/sec, {} sec/ operation.", + println!("{:<10?} elapsed: {:.5} op/sec, {:4} sec/operation.", start.elapsed(), self.progress.load(Ordering::SeqCst) as f64 / start.elapsed().as_secs_f64(), Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); diff --git a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs index 75905e6e05..d9587679f9 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs +++ b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs @@ -35,24 +35,19 @@ impl GetSecrets { } fn create_new_test(runner: &PerfRunner) -> CreatePerfTestReturn { - let vault_url_ref: Option<&String> = match runner.try_get_test_arg("vault_url") { - Ok(v) => v, - Err(e) => { - // Return a future that immediately yields the error. - return Box::pin(async move { Err(e) }); - } - }; - // Own the String so the future can be 'static. - let vault_url = vault_url_ref - .expect("vault_url argument is mandatory") - .clone(); - Box::pin(async move { + async fn create_secret_client(runner: PerfRunner) -> Result> { + let vault_url_ref: Option<&String> = runner.try_get_test_arg("vault_url")?; + let vault_url = vault_url_ref + .expect("vault_url argument is mandatory") + .clone(); Ok(Box::new(GetSecrets { vault_url, random_key_name: OnceLock::new(), client: OnceLock::new(), }) as Box) - }) + } + + Box::pin(create_secret_client(runner.clone())) } fn create_random_key_name() -> String { @@ -103,8 +98,7 @@ impl PerfTest for GetSecrets { .unwrap() .get_secret(self.get_random_key_name(), None) .await? - .into_body() - .await?; + .into_body()?; Ok(()) } } From fd7c7ccb14d328e56d2da0d807e8ff26ca6912e1 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Fri, 3 Oct 2025 14:41:55 -0700 Subject: [PATCH 06/18] Renamed perf structures; cleaned up perf traces; added initial storage performance tests --- .../azure_core_test/src/perf/config_tests.rs | 32 +++--- .../src/perf/framework_tests.rs | 4 +- sdk/core/azure_core_test/src/perf/mod.rs | 28 +++-- .../perf/get_secret.rs | 20 +++- sdk/storage/azure_storage_blob/Cargo.toml | 5 + .../azure_storage_blob/perf/list_blob_test.rs | 106 ++++++++++++++++++ .../azure_storage_blob/perf/perf_tests.rs | 21 ++++ 7 files changed, 183 insertions(+), 33 deletions(-) create mode 100644 sdk/storage/azure_storage_blob/perf/list_blob_test.rs create mode 100644 sdk/storage/azure_storage_blob/perf/perf_tests.rs diff --git a/sdk/core/azure_core_test/src/perf/config_tests.rs b/sdk/core/azure_core_test/src/perf/config_tests.rs index 564d7b7849..ab4424a8ff 100644 --- a/sdk/core/azure_core_test/src/perf/config_tests.rs +++ b/sdk/core/azure_core_test/src/perf/config_tests.rs @@ -20,11 +20,11 @@ fn create_failed_test(_runner: &PerfRunner) -> CreatePerfTestReturn { } // Helper function to create a basic test metadata for testing -fn create_basic_test_metadata() -> TestMetadata { - TestMetadata { +fn create_basic_test_metadata() -> PerfTestMetadata { + PerfTestMetadata { name: "basic_test", description: "A basic test for testing purposes", - options: vec![TestOption { + options: vec![PerfTestOption { name: "test-option", short_activator: 't', long_activator: "test-option", @@ -38,12 +38,12 @@ fn create_basic_test_metadata() -> TestMetadata { } // Helper function to create test metadata with multiple options -fn create_complex_test_metadata() -> TestMetadata { - TestMetadata { +fn create_complex_test_metadata() -> PerfTestMetadata { + PerfTestMetadata { name: "complex_test", description: "A complex test with multiple options", options: vec![ - TestOption { + PerfTestOption { name: "mandatory-option", short_activator: 'm', long_activator: "mandatory", @@ -52,7 +52,7 @@ fn create_complex_test_metadata() -> TestMetadata { mandatory: true, sensitive: false, }, - TestOption { + PerfTestOption { name: "sensitive-option", short_activator: 's', long_activator: "sensitive", @@ -61,7 +61,7 @@ fn create_complex_test_metadata() -> TestMetadata { mandatory: false, sensitive: true, }, - TestOption { + PerfTestOption { name: "flag-option", short_activator: 'f', long_activator: "flag", @@ -74,11 +74,11 @@ fn create_complex_test_metadata() -> TestMetadata { } // Helper function to create test metadata without short activators -fn create_no_short_activator_test_metadata() -> TestMetadata { - TestMetadata { +fn create_no_short_activator_test_metadata() -> PerfTestMetadata { + PerfTestMetadata { name: "no_short_test", description: "Test without short activators", - options: vec![TestOption { + options: vec![PerfTestOption { name: "long-only", short_activator: '\0', long_activator: "long-only", @@ -494,7 +494,7 @@ fn test_perf_runner_options_debug() { #[test] fn test_test_option_debug_and_default() { - let option = TestOption::default(); + let option = PerfTestOption::default(); // Test default values assert_eq!(option.name, ""); @@ -592,11 +592,11 @@ fn complex_test_create(_runner: &PerfRunner) -> CreatePerfTestReturn { #[tokio::test] async fn test_perf_runner_with_test_functions() { - let tests = vec![TestMetadata { + let tests = vec![PerfTestMetadata { name: "complex_test", description: "A complex test with multiple options", options: vec![ - TestOption { + PerfTestOption { name: "mandatory-option", short_activator: 'm', long_activator: "mandatory", @@ -605,7 +605,7 @@ async fn test_perf_runner_with_test_functions() { mandatory: true, sensitive: false, }, - TestOption { + PerfTestOption { name: "sensitive-option", short_activator: 's', long_activator: "sensitive", @@ -614,7 +614,7 @@ async fn test_perf_runner_with_test_functions() { mandatory: false, sensitive: true, }, - TestOption { + PerfTestOption { name: "flag-option", short_activator: 'f', long_activator: "flag", diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs index 0410062274..6d7597b172 100644 --- a/sdk/core/azure_core_test/src/perf/framework_tests.rs +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -91,10 +91,10 @@ async fn test_perf_runner_with_single_test() { let runner = PerfRunner::with_command_line( env!("CARGO_MANIFEST_DIR"), file!(), - vec![TestMetadata { + vec![PerfTestMetadata { name: "fibonacci1", description: "A basic test for testing purposes", - options: vec![TestOption { + options: vec![PerfTestOption { name: "count", mandatory: true, short_activator: 'c', diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index cbb9dee41c..219181b8e8 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -30,13 +30,13 @@ pub type CreatePerfTestReturn = /// Metadata about a performance test. #[derive(Debug, Clone)] -pub struct TestMetadata { +pub struct PerfTestMetadata { /// The name of the test suite. pub name: &'static str, /// A brief description of the test suite. pub description: &'static str, /// The set of test options supported by this test. - pub options: Vec, + pub options: Vec, /// A function used to create the performance test. pub create_test: fn(&PerfRunner) -> CreatePerfTestReturn, @@ -44,7 +44,7 @@ pub struct TestMetadata { /// #A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. #[derive(Debug, Default, Clone)] -pub struct TestOption { +pub struct PerfTestOption { /// The name of the test option. This is used as the key in the `TestArguments` map. pub name: &'static str, @@ -112,7 +112,7 @@ impl From<&ArgMatches> for PerfRunnerOptions { #[derive(Debug, Clone)] pub struct PerfRunner { options: PerfRunnerOptions, - tests: Vec, + tests: Vec, arguments: ArgMatches, package_dir: &'static str, module_name: &'static str, @@ -123,7 +123,7 @@ impl PerfRunner { pub fn new( package_dir: &'static str, module_name: &'static str, - tests: Vec, + tests: Vec, ) -> azure_core::Result { let command = Self::get_command_from_metadata(&tests); let arguments = command.get_matches(); @@ -141,7 +141,7 @@ impl PerfRunner { pub fn with_command_line( package_dir: &'static str, module_name: &'static str, - tests: Vec, + tests: Vec, args: Vec<&str>, ) -> azure_core::Result { let command = Self::get_command_from_metadata(&tests); @@ -273,6 +273,7 @@ impl PerfRunner { _test_name: &str, duration: Duration, ) -> azure_core::Result<()> { + self.progress.store(0, Ordering::SeqCst); let mut tasks: JoinSet> = JoinSet::new(); for _ in 0..self.options.parallel { let test_instance_clone = Arc::clone(&test_instance); @@ -283,6 +284,7 @@ impl PerfRunner { // let context = // TestContext::new(package_dir, module_name, " test_name_copy.as_str()")?; + tokio::task::yield_now().await; loop { test_instance_clone.run(/*&context*/).await?; progress.fetch_add(1, Ordering::SeqCst); @@ -295,12 +297,16 @@ impl PerfRunner { _ = tokio::time::sleep(timeout) => {println!("Timeout reached, stopping test tasks: {:?}", start.elapsed());}, _ = tasks.join_all() => {println!("All test tasks completed: {:?}", start.elapsed());}, _ = async { + let mut last_count = 0; loop { tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - println!("{:<10?} elapsed: {:.5} op/sec, {:4} sec/operation.", - start.elapsed(), - self.progress.load(Ordering::SeqCst) as f64 / start.elapsed().as_secs_f64(), - Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); + let current_total = self.progress.load(Ordering::SeqCst); + // println!("{:<10?} elapsed: {:.5} op/sec, {:4} sec/operation.", + // start.elapsed(), + // self.progress.load(Ordering::SeqCst) as f64 / start.elapsed().as_secs_f64(), + // Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); + println!("Current {:3}, Total {:5} {:4}", current_total - last_count, current_total, Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); + last_count = current_total; } }, if !self.options.disable_progress => {}, ); @@ -323,7 +329,7 @@ impl PerfRunner { // * Sync - run a synchronous version of the test /// Constructs a `clap::Command` from the provided test metadata. - fn get_command_from_metadata(tests: &[TestMetadata]) -> clap::Command { + fn get_command_from_metadata(tests: &[PerfTestMetadata]) -> clap::Command { let mut command = clap::Command::new("perf-tests") .about("Run performance tests for the Azure SDK for Rust") .arg( diff --git a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs index d9587679f9..cb0ae18733 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs +++ b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs @@ -1,11 +1,23 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +//! Keyvault Secrets performance tests. +//! +//! This test measures the performance of getting a secret from Azure Key Vault. +//! It sets up a secret in the Key Vault during the setup phase and then repeatedly retrieves it +//! during the run phase. The test can be configured with the vault URL via command line arguments +//! to target different Key Vault instances. +//! +//! To run the test, use the following command line arguments: +//! +//! cargo test --package azure_security_keyvault_secrets --test performance_tests -- --duration 10 --parallel 20 get_secret -u https://.vault.azure.net/ +//! + use std::sync::OnceLock; use azure_core::Result; use azure_core_test::{ - perf::{CreatePerfTestReturn, PerfRunner, PerfTest, TestMetadata, TestOption}, + perf::{CreatePerfTestReturn, PerfRunner, PerfTest, PerfTestMetadata, PerfTestOption}, TestContext, }; use azure_security_keyvault_secrets::{models::SetSecretParameters, SecretClient}; @@ -17,11 +29,11 @@ struct GetSecrets { } impl GetSecrets { - fn test_metadata() -> TestMetadata { - TestMetadata { + fn test_metadata() -> PerfTestMetadata { + PerfTestMetadata { name: "get_secret", description: "Get a secret from Key Vault", - options: vec![TestOption { + options: vec![PerfTestOption { name: "vault_url", display_message: "The URL of the Key Vault to use in the test", mandatory: true, diff --git a/sdk/storage/azure_storage_blob/Cargo.toml b/sdk/storage/azure_storage_blob/Cargo.toml index 6ef074ae46..9cbe122431 100644 --- a/sdk/storage/azure_storage_blob/Cargo.toml +++ b/sdk/storage/azure_storage_blob/Cargo.toml @@ -37,3 +37,8 @@ azure_storage_blob_test.path = "../azure_storage_blob_test" futures.workspace = true tokio = { workspace = true, features = ["macros"] } tracing.workspace = true + +[[test]] +name = "performance_tests" +path = "perf/perf_tests.rs" +harness = false diff --git a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs new file mode 100644 index 0000000000..8152460d10 --- /dev/null +++ b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +use azure_core::{Bytes, Result}; +use azure_core_test::{ + perf::{CreatePerfTestReturn, PerfRunner, PerfTest, PerfTestMetadata, PerfTestOption}, + TestContext, +}; +use azure_identity::DeveloperToolsCredential; +use azure_storage_blob::BlobContainerClient; +use futures::TryStreamExt; + +pub struct ListBlobTest { + count: u32, + client: BlobContainerClient, +} + +impl ListBlobTest { + fn create_list_blob_test(runner: &PerfRunner) -> CreatePerfTestReturn { + async fn create_test(runner: PerfRunner) -> Result> { + let count: Option<&String> = runner.try_get_test_arg("count")?; + + println!("ListBlobTest with count: {:?}", count); + let count = count.expect("count argument is mandatory").parse::()?; + println!("Parsed count: {}", count); + + let endpoint: Option<&String> = runner.try_get_test_arg("endpoint")?; + let endpoint = endpoint.expect("endpoint argument is mandatory").clone(); + println!("Using endpoint: {}", endpoint); + + let container_name = format!("perf-container-{}", uuid::Uuid::new_v4()); + let credential = DeveloperToolsCredential::new(None)?; + let client = BlobContainerClient::new(&endpoint, container_name, credential, None)?; + + Ok(Box::new(ListBlobTest { count, client }) as Box) + } + // Here you would create and return an instance of your performance test. + // For example: + Box::pin(create_test(runner.clone())) + } + + pub fn test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "list_blob", + description: "List blobs in a container", + options: vec![ + PerfTestOption { + name: "count", + display_message: "The number of blobs to list", + mandatory: true, + short_activator: 'c', + long_activator: "count", + expected_args_len: 1, + ..Default::default() + }, + PerfTestOption { + name: "endpoint", + display_message: "The endpoint of the blob storage", + mandatory: true, + short_activator: 'e', + long_activator: "endpoint", + expected_args_len: 1, + ..Default::default() + }, + ], + create_test: Self::create_list_blob_test, + } + } +} + +#[async_trait::async_trait] +impl PerfTest for ListBlobTest { + async fn setup(&self, _context: &TestContext) -> azure_core::Result<()> { + // Setup code before running the test + + let result = self.client.create_container(None).await?; + + for i in 0..self.count { + let blob_name = format!("blob-{}", i); + let blob_client = self.client.blob_client(blob_name); + + let body = vec![0u8; 1024 * 1024]; // 1 MB blob + let body_bytes = Bytes::from(body); + + let result = blob_client.upload(body_bytes.into(), true, 5, None).await?; + } + + Ok(()) + } + + async fn run(&self) -> azure_core::Result<()> { + // The actual performance test code + + let mut iterator = self.client.list_blobs(None)?; + while let Some(blob_segment) = iterator.try_next().await? { + let body = blob_segment.into_body()?; + } + + Ok(()) + } + + async fn cleanup(&self, _context: &TestContext) -> azure_core::Result<()> { + // Cleanup code after running the test + Ok(()) + } +} diff --git a/sdk/storage/azure_storage_blob/perf/perf_tests.rs b/sdk/storage/azure_storage_blob/perf/perf_tests.rs new file mode 100644 index 0000000000..9786572c6f --- /dev/null +++ b/sdk/storage/azure_storage_blob/perf/perf_tests.rs @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/// list_blob performance test. +mod list_blob_test; + +use azure_core_test::perf::PerfRunner; +use list_blob_test::ListBlobTest; + +#[tokio::main] +async fn main() -> azure_core::Result<()> { + let runner = PerfRunner::new( + env!("CARGO_MANIFEST_DIR"), + "foo", + vec![ListBlobTest::test_metadata()], + )?; + + runner.run().await?; + + Ok(()) +} From 90fb6606ad989ded3e92afc8f980eb50f869e632 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Fri, 3 Oct 2025 15:16:50 -0700 Subject: [PATCH 07/18] Cleaned up warnings --- sdk/storage/azure_storage_blob/perf/list_blob_test.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs index 8152460d10..0794f2a6e8 100644 --- a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs +++ b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs @@ -73,7 +73,7 @@ impl PerfTest for ListBlobTest { async fn setup(&self, _context: &TestContext) -> azure_core::Result<()> { // Setup code before running the test - let result = self.client.create_container(None).await?; + let _result = self.client.create_container(None).await?; for i in 0..self.count { let blob_name = format!("blob-{}", i); @@ -82,7 +82,7 @@ impl PerfTest for ListBlobTest { let body = vec![0u8; 1024 * 1024]; // 1 MB blob let body_bytes = Bytes::from(body); - let result = blob_client.upload(body_bytes.into(), true, 5, None).await?; + let _result = blob_client.upload(body_bytes.into(), true, 5, None).await?; } Ok(()) @@ -93,7 +93,7 @@ impl PerfTest for ListBlobTest { let mut iterator = self.client.list_blobs(None)?; while let Some(blob_segment) = iterator.try_next().await? { - let body = blob_segment.into_body()?; + let _body = blob_segment.into_body()?; } Ok(()) From 51bbcf189107e752715f146c5ffd8c0501f62233 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Fri, 3 Oct 2025 15:39:46 -0700 Subject: [PATCH 08/18] Don't fail tests if no test is selected --- sdk/core/azure_core_test/src/perf/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index 219181b8e8..f2e484328d 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -205,7 +205,13 @@ impl PerfRunner { pub async fn run(&self) -> azure_core::Result<()> { // We can only run tests if there was a test selected. - let test_name = self.get_selected_test_name()?; + let test_name = match self.get_selected_test_name() { + Ok(name) => name, + Err(e) => { + eprintln!("Error getting selected test name: {}", e); + return Ok(()); + } + }; let test = self .tests From 84a650029d65c6ba73ed2bb2f836466a298a6f02 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Mon, 6 Oct 2025 15:34:19 -0700 Subject: [PATCH 09/18] Start hooking test context into perf logic --- sdk/core/azure_core_test/src/lib.rs | 2 + .../azure_core_test/src/perf/config_tests.rs | 18 +++-- .../src/perf/framework_tests.rs | 6 +- sdk/core/azure_core_test/src/perf/mod.rs | 79 ++++++++++++++----- sdk/core/azure_core_test/src/recording.rs | 13 ++- .../perf/get_secret.rs | 10 +-- sdk/storage/azure_storage_blob/assets.json | 2 +- .../azure_storage_blob/perf/list_blob_test.rs | 8 +- .../azure_storage_blob/perf/perf_tests.rs | 2 +- 9 files changed, 93 insertions(+), 47 deletions(-) diff --git a/sdk/core/azure_core_test/src/lib.rs b/sdk/core/azure_core_test/src/lib.rs index 752f10731b..960954761f 100644 --- a/sdk/core/azure_core_test/src/lib.rs +++ b/sdk/core/azure_core_test/src/lib.rs @@ -38,6 +38,8 @@ pub struct TestContext { recording: Option, } +unsafe impl Send for TestContext {} + impl TestContext { pub(crate) fn new( crate_dir: &'static str, diff --git a/sdk/core/azure_core_test/src/perf/config_tests.rs b/sdk/core/azure_core_test/src/perf/config_tests.rs index ab4424a8ff..0609c9ee42 100644 --- a/sdk/core/azure_core_test/src/perf/config_tests.rs +++ b/sdk/core/azure_core_test/src/perf/config_tests.rs @@ -564,21 +564,21 @@ struct ComplexTest {} #[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?send))] #[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] impl PerfTest for ComplexTest { - async fn setup(&self, _context: &TestContext) -> azure_core::Result<()> { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { println!("Setting up ComplexTest..."); // Simulate some async setup work tokio::time::sleep(std::time::Duration::from_millis(100)).await; Ok(()) } - async fn cleanup(&self, _context: &TestContext) -> azure_core::Result<()> { + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { println!("Cleaning up ComplexTest..."); // Simulate some async cleanup work tokio::time::sleep(std::time::Duration::from_millis(100)).await; Ok(()) } - async fn run(&self /*, _context: &TestContext*/) -> azure_core::Result<()> { + async fn run(&self, _context: Arc) -> azure_core::Result<()> { // Simulate some async test work println!("Running ComplexTest..."); tokio::time::sleep(std::time::Duration::from_millis(200)).await; @@ -667,19 +667,21 @@ async fn test_perf_runner_with_test_functions() { let crate_dir = env!("CARGO_MANIFEST_DIR"); - let test_context = TestContext::new(crate_dir, crate_dir, runner.tests[0].name) - .expect("Failed to create TestContext"); + let test_context = Arc::new( + TestContext::new(crate_dir, crate_dir, runner.tests[0].name) + .expect("Failed to create TestContext"), + ); perf_tests_impl - .setup(&test_context) + .setup(test_context.clone()) .await .expect("Setup failed"); perf_tests_impl - .run(/*&test_context*/) + .run(test_context.clone()) .await .expect("Run failed"); perf_tests_impl - .cleanup(&test_context) + .cleanup(test_context.clone()) .await .expect("Cleanup failed"); } diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs index 6d7597b172..d5b375c12b 100644 --- a/sdk/core/azure_core_test/src/perf/framework_tests.rs +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -35,10 +35,10 @@ fn create_fibonacci1_test(runner: &PerfRunner) -> CreatePerfTestReturn { #[async_trait::async_trait] impl PerfTest for Fibonacci1Test { - async fn setup(&self, _context: &TestContext) -> azure_core::Result<()> { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { Ok(()) } - async fn run(&self /*, _context: &TestContext*/) -> azure_core::Result<()> { + async fn run(&self, _context: Arc) -> azure_core::Result<()> { let _result = Self::fibonacci(self.count); // This is a CPU bound test, so yield to allow other tasks to run. Otherwise we jam the tokio scheduler. // Note that this significantly reduces the performance of the test, but it is necessary to allow parallelism. @@ -47,7 +47,7 @@ fn create_fibonacci1_test(runner: &PerfRunner) -> CreatePerfTestReturn { tokio::task::yield_now().await; Ok(()) } - async fn cleanup(&self, _context: &TestContext) -> azure_core::Result<()> { + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { Ok(()) } } diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index f2e484328d..97d11b45e2 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -18,11 +18,25 @@ use std::{ }; use tokio::{select, task::JoinSet}; +/// A trait representing a performance test. +/// +/// Performance tests have three phases: +/// 1. `setup`: Prepare the test environment. This is called once per iteration. +/// 2. `run`: Execute the performance test. This is called repeatedly for the duration of the test. +/// 3. `cleanup`: Clean up the test environment. This is called once +/// +/// Note that the "run" phase will be executed in parallel across multiple tasks, so it must be thread-safe. #[async_trait::async_trait] pub trait PerfTest: Send + Sync { - async fn setup(&self, context: &TestContext) -> azure_core::Result<()>; - async fn run(&self /*, context: &TestContext*/) -> azure_core::Result<()>; - async fn cleanup(&self, context: &TestContext) -> azure_core::Result<()>; + /// Set up the test environment. + /// + /// Performs whatever steps are needed to set up the test environment. This method is called once per iteration of the test. + /// + /// # Arguments + /// - `context`: An `Arc` to a `TestContext` that provides context information for the test. + async fn setup(&self, context: Arc) -> azure_core::Result<()>; + async fn run(&self, context: Arc) -> azure_core::Result<()>; + async fn cleanup(&self, context: Arc) -> azure_core::Result<()>; } pub type CreatePerfTestReturn = @@ -226,7 +240,18 @@ impl PerfRunner { let test_instance = (test.create_test)(self).await?; let test_instance: Arc = Arc::from(test_instance); - let context = TestContext::new(self.package_dir, self.module_name, test.name)?; + let test_mode = crate::TestMode::current()?; + + let context = Arc::new( + crate::recorded::start( + test_mode, + self.package_dir, + self.module_name, + test.name, + None, + ) + .await?, + ); for iteration in 0..self.options.iterations { println!( @@ -236,25 +261,45 @@ impl PerfRunner { ); println!("========== Starting test setup =========="); - test_instance.setup(&context).await?; + test_instance.setup(context.clone()).await?; println!( "========== Starting test warmup for {} ==========", self.options.warmup ); - self.run_test_for(Arc::clone(&test_instance), test.name, self.options.warmup) + let mut test_contexts = Vec::new(); + for _ in 0..self.options.parallel { + let context = Arc::new( + crate::recorded::start( + test_mode, + self.package_dir, + self.module_name, + test.name, + None, + ) + .await?, + ); + test_contexts.push(context); + } + + self.run_test_for(test_instance.clone(), &test_contexts, self.options.warmup) .await?; println!( "========== Starting test run for {} ==========", self.options.duration ); - self.run_test_for(Arc::clone(&test_instance), test.name, self.options.duration) - .await?; + + self.run_test_for( + Arc::clone(&test_instance), + &test_contexts, + self.options.duration, + ) + .await?; if !self.options.no_cleanup { println!("========== Starting test cleanup =========="); - test_instance.cleanup(&context).await?; + test_instance.cleanup(context.clone()).await?; } let iteration_count = self.progress.load(Ordering::SeqCst); @@ -276,27 +321,23 @@ impl PerfRunner { pub async fn run_test_for( &self, test_instance: Arc, - _test_name: &str, + test_contexts: &[Arc], duration: Duration, ) -> azure_core::Result<()> { + // Reset the performance measurements before starting the test. self.progress.store(0, Ordering::SeqCst); let mut tasks: JoinSet> = JoinSet::new(); - for _ in 0..self.options.parallel { + (0..self.options.parallel).for_each(|i| { let test_instance_clone = Arc::clone(&test_instance); let progress = self.progress.clone(); - // let package_dir = self.package_dir; - // let module_name = self.module_name; + let test_context = test_contexts[i].clone(); tasks.spawn(async move { - // let context = - // TestContext::new(package_dir, module_name, " test_name_copy.as_str()")?; - - tokio::task::yield_now().await; loop { - test_instance_clone.run(/*&context*/).await?; + test_instance_clone.run(test_context.clone()).await?; progress.fetch_add(1, Ordering::SeqCst); } }); - } + }); let start = tokio::time::Instant::now(); let timeout = tokio::time::Duration::from_secs_f64(duration.as_seconds_f64()); select!( diff --git a/sdk/core/azure_core_test/src/recording.rs b/sdk/core/azure_core_test/src/recording.rs index 4b572268ce..9c115e0bf9 100644 --- a/sdk/core/azure_core_test/src/recording.rs +++ b/sdk/core/azure_core_test/src/recording.rs @@ -36,7 +36,6 @@ use rand::{ use rand_chacha::ChaCha20Rng; use std::{ borrow::Cow, - cell::OnceCell, collections::HashMap, env, sync::{Arc, Mutex, OnceLock, RwLock}, @@ -51,8 +50,8 @@ pub struct Recording { #[allow(dead_code)] span: EnteredSpan, proxy: Option>, - test_mode_policy: OnceCell>, - recording_policy: OnceCell>, + test_mode_policy: OnceLock>, + recording_policy: OnceLock>, service_directory: String, recording_file: String, recording_assets_file: Option, @@ -361,8 +360,8 @@ impl Recording { test_mode, span, proxy, - test_mode_policy: OnceCell::new(), - recording_policy: OnceCell::new(), + test_mode_policy: OnceLock::new(), + recording_policy: OnceLock::new(), service_directory: service_directory.into(), recording_file, recording_assets_file, @@ -380,8 +379,8 @@ impl Recording { test_mode: TestMode::Playback, span: span.entered(), proxy: None, - test_mode_policy: OnceCell::new(), - recording_policy: OnceCell::new(), + test_mode_policy: OnceLock::new(), + recording_policy: OnceLock::new(), service_directory: String::from("sdk/core"), recording_file: String::from("none"), recording_assets_file: None, diff --git a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs index cb0ae18733..72d70b92d0 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs +++ b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs @@ -13,7 +13,7 @@ //! cargo test --package azure_security_keyvault_secrets --test performance_tests -- --duration 10 --parallel 20 get_secret -u https://.vault.azure.net/ //! -use std::sync::OnceLock; +use std::sync::{Arc, OnceLock}; use azure_core::Result; use azure_core_test::{ @@ -80,7 +80,7 @@ impl GetSecrets { #[cfg_attr(target_arch="wasm32", async_trait::async_trait(?Send))] #[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] impl PerfTest for GetSecrets { - async fn setup(&self, _context: &TestContext) -> azure_core::Result<()> { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { let credential = azure_identity::DeveloperToolsCredential::new(None)?; let client = SecretClient::new(self.vault_url.as_str(), credential.clone(), None)?; self.client.get_or_init(|| client); @@ -100,10 +100,10 @@ impl PerfTest for GetSecrets { .await?; Ok(()) } - async fn cleanup(&self, _context: &TestContext) -> azure_core::Result<()> { + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { Ok(()) } - async fn run(&self) -> Result<()> { + async fn run(&self, _context: Arc) -> Result<()> { let _secret = self .client .get() @@ -119,7 +119,7 @@ impl PerfTest for GetSecrets { async fn main() -> azure_core::Result<()> { let runner = PerfRunner::new( env!("CARGO_MANIFEST_DIR"), - "foo", + file!(), vec![GetSecrets::test_metadata()], )?; diff --git a/sdk/storage/azure_storage_blob/assets.json b/sdk/storage/azure_storage_blob/assets.json index 3bb1e158e9..e095ad34cf 100644 --- a/sdk/storage/azure_storage_blob/assets.json +++ b/sdk/storage/azure_storage_blob/assets.json @@ -1,6 +1,6 @@ { "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "rust", - "Tag": "rust/azure_storage_blob_fc6c153d44", + "Tag": "rust/azure_storage_blob_4dd8ebabce", "TagPrefix": "rust/azure_storage_blob" } diff --git a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs index 0794f2a6e8..19b34e1107 100644 --- a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs +++ b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +use std::sync::Arc; + use azure_core::{Bytes, Result}; use azure_core_test::{ perf::{CreatePerfTestReturn, PerfRunner, PerfTest, PerfTestMetadata, PerfTestOption}, @@ -70,7 +72,7 @@ impl ListBlobTest { #[async_trait::async_trait] impl PerfTest for ListBlobTest { - async fn setup(&self, _context: &TestContext) -> azure_core::Result<()> { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { // Setup code before running the test let _result = self.client.create_container(None).await?; @@ -88,7 +90,7 @@ impl PerfTest for ListBlobTest { Ok(()) } - async fn run(&self) -> azure_core::Result<()> { + async fn run(&self, _context: Arc) -> azure_core::Result<()> { // The actual performance test code let mut iterator = self.client.list_blobs(None)?; @@ -99,7 +101,7 @@ impl PerfTest for ListBlobTest { Ok(()) } - async fn cleanup(&self, _context: &TestContext) -> azure_core::Result<()> { + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { // Cleanup code after running the test Ok(()) } diff --git a/sdk/storage/azure_storage_blob/perf/perf_tests.rs b/sdk/storage/azure_storage_blob/perf/perf_tests.rs index 9786572c6f..ea01bc9069 100644 --- a/sdk/storage/azure_storage_blob/perf/perf_tests.rs +++ b/sdk/storage/azure_storage_blob/perf/perf_tests.rs @@ -11,7 +11,7 @@ use list_blob_test::ListBlobTest; async fn main() -> azure_core::Result<()> { let runner = PerfRunner::new( env!("CARGO_MANIFEST_DIR"), - "foo", + file!(), vec![ListBlobTest::test_metadata()], )?; From 113b883d3b07dce05951085703f453f769dcdabe Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Wed, 8 Oct 2025 14:01:49 -0700 Subject: [PATCH 10/18] Generate output json file with perf output --- sdk/storage/perf-resources.bicep | 115 +++++++++++++++++++++++++++++++ sdk/storage/perf-tests.yml | 35 ++++++++++ sdk/storage/perf.yml | 38 ++++++++++ 3 files changed, 188 insertions(+) create mode 100644 sdk/storage/perf-resources.bicep create mode 100644 sdk/storage/perf-tests.yml create mode 100644 sdk/storage/perf.yml diff --git a/sdk/storage/perf-resources.bicep b/sdk/storage/perf-resources.bicep new file mode 100644 index 0000000000..8a802cb3e4 --- /dev/null +++ b/sdk/storage/perf-resources.bicep @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +param baseName string = resourceGroup().name +param testApplicationOid string +param location string = resourceGroup().location + +var blobDataContributorRoleId = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' +var blobDataOwnerRoleId = 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b' +var encryption = { + keySource: 'Microsoft.Storage' + services: { + blob: { + enabled: true + } + file: { + enabled: true + } + } +} +var networkAcls = { + bypass: 'AzureServices' + defaultAction: 'Allow' + ipRules: [] + virtualNetworkRules: [] +} + +resource blobDataContributor 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(blobDataContributorRoleId, resourceGroup().id) + properties: { + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataContributorRoleId) + principalId: testApplicationOid + } +} + +resource blobDataOwner 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(blobDataOwnerRoleId, resourceGroup().id) + properties: { + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataOwnerRoleId) + principalId: testApplicationOid + } +} + +resource storage 'Microsoft.Storage/storageAccounts@2024-01-01' = { + name: '${baseName}blob' + location: location + kind: 'BlockBlobStorage' + sku: { + name: 'Premium_LRS' + } + properties: { + accessTier: 'Hot' + allowSharedKeyAccess: false + encryption: encryption + networkAcls: networkAcls + supportsHttpsTrafficOnly: true + } +} + +output AZURE_STORAGE_ACCOUNT_NAME string = storage.name + +// param baseName string = resourceGroup().name +// param location string = resourceGroup().location +// param testApplicationOid string + +// var blobDataContributorRoleId = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' +// var blobDataOwnerRoleId = 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b' + +// var networkAcls = { +// bypass: 'AzureServices' +// defaultAction: 'Allow' +// ipRules: [] +// virtualNetworkRules: [] +// } + +// resource blobDataContributor 'Microsoft.Authorization/roleAssignments@2022-04-01' = { +// name: guid(blobDataContributorRoleId, resourceGroup().id) +// properties: { +// roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataContributorRoleId) +// principalId: testApplicationOid +// } +// } + +// resource blobDataOwner 'Microsoft.Authorization/roleAssignments@2022-04-01' = { +// name: guid(blobDataOwnerRoleId, resourceGroup().id) +// properties: { +// roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataOwnerRoleId) +// principalId: testApplicationOid +// } +// } + +// resource storageAccount 'Microsoft.Storage/storageAccounts@2019-06-01' = { +// name: '${baseName}blob' +// location: location +// kind: 'BlockBlobStorage' +// sku: { +// name: 'Premium_LRS' +// } +// properties: { +// allowSharedKeyAccess: false +// publicNetworkAccess: 'SecuredByPerimeter' +// supportsHttpsTrafficOnly: true +// networkAcls: networkAcls +// } +// } + +// var name = storageAccount.name +// var key = storageAccount.listKeys().keys[0].value +// var connectionString = 'DefaultEndpointsProtocol=https;AccountName=${name};AccountKey=${key}' + +// output AZURE_STORAGE_ACCOUNT_NAME string = name +// output AZURE_STORAGE_ACCOUNT_KEY string = key +// output AZURE_STORAGE_CONNECTION_STRING string = connectionString +// output STANDARD_STORAGE_CONNECTION_STRING string = connectionString +// output STORAGE_CONNECTION_STRING string = connectionString diff --git a/sdk/storage/perf-tests.yml b/sdk/storage/perf-tests.yml new file mode 100644 index 0000000000..e070cadfae --- /dev/null +++ b/sdk/storage/perf-tests.yml @@ -0,0 +1,35 @@ +Service: storage-blob + +Project: azure-storage-blobs-perf + +PrimaryPackage: azure_storage_blobs + +PackageVersions: +- azure_storage_blobs: 12.6.2 + azure_core: 1.7.2 +- azure_storage_blobs: source + azure_core: source + +Tests: +- Test: download + Class: DownloadBlob + Arguments: + - --size 10240 --parallel 64 + - --size 10485760 --parallel 32 + - --size 1073741824 --parallel 1 --warmup 60 --duration 60 + - --size 1073741824 --parallel 8 --warmup 60 --duration 60 + +- Test: upload + Class: UploadBlob + Arguments: + - --size 10240 --parallel 64 + - --size 10485760 --parallel 32 + - --size 1073741824 --parallel 1 --warmup 60 --duration 60 + - --size 1073741824 --parallel 8 --warmup 60 --duration 60 + +- Test: list-blobs + Class: ListBlob + Arguments: + - --count 5 --parallel 64 + - --count 500 --parallel 32 + - --count 50000 --parallel 32 --warmup 60 --duration 60 diff --git a/sdk/storage/perf.yml b/sdk/storage/perf.yml new file mode 100644 index 0000000000..a43c7c913d --- /dev/null +++ b/sdk/storage/perf.yml @@ -0,0 +1,38 @@ +parameters: +- name: PackageVersions + displayName: PackageVersions (regex of package versions to run) + type: string + default: '12|source' +- name: Tests + displayName: Tests (regex of tests to run) + type: string + default: '^(download|upload|list-blobs)$' +- name: Arguments + displayName: Arguments (regex of arguments to run) + type: string + default: '(10240)|(10485760)|(1073741824)|(5 )|(500 )|(50000 )' +- name: Iterations + displayName: Iterations (times to run each test) + type: number + default: '5' +- name: Profile + type: boolean + default: false +- name: AdditionalArguments + displayName: AdditionalArguments (passed to PerfAutomation) + type: string + default: ' ' + +extends: + template: /eng/pipelines/templates/jobs/perf.yml + parameters: + ServiceDirectory: storage/azure_storage_blobs + PackageVersions: ${{ parameters.PackageVersions }} + Tests: ${{ parameters.Tests }} + Arguments: ${{ parameters.Arguments }} + Iterations: ${{ parameters.Iterations }} + AdditionalArguments: ${{ parameters.AdditionalArguments }} + Profile: ${{ parameters.Profile }} + EnvVars: + # This is set in the InstallLanguageSteps + VCPKG_BINARY_SOURCES_SECRET: $(VCPKG_BINARY_SOURCES_SECRET) From e5b24c007eb0ef8f85433a1efa5d0105ccb57680 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Wed, 8 Oct 2025 14:30:27 -0700 Subject: [PATCH 11/18] Updated tests --- sdk/core/azure_core_test/src/perf/mod.rs | 90 +++++++++++++++++-- .../perf/get_secret.rs | 41 +++++---- 2 files changed, 108 insertions(+), 23 deletions(-) diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index 97d11b45e2..91d1603d3a 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -7,8 +7,10 @@ use crate::TestContext; use azure_core::{time::Duration, Error, Result}; use clap::ArgMatches; +use serde::Serialize; use std::{ any::Any, + fmt::Display, future::Future, pin::Pin, sync::{ @@ -81,6 +83,17 @@ pub struct PerfTestOption { pub sensitive: bool, } +#[derive(Debug, Clone, Default, Serialize)] +#[allow(dead_code)] +struct PerfTestOutputs { + // * Package Versions - a set of packages tested and their versions. + pub package_versions: Vec, + pub test_name: String, + pub operations_per_second: f64, + pub average_cpu_use: Option, + pub average_memory_use: Option, +} + #[derive(Debug, Clone)] struct PerfRunnerOptions { no_cleanup: bool, @@ -89,10 +102,25 @@ struct PerfRunnerOptions { duration: Duration, warmup: Duration, disable_progress: bool, - #[allow(dead_code)] test_results_filename: String, } +impl Display for PerfRunnerOptions { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "PerfRunnerOptions {{ no_cleanup: {}, iterations: {}, parallel: {}, duration: {}, warmup: {}, disable_progress: {}, test_results_filename: '{}' }}", + self.no_cleanup, + self.iterations, + self.parallel, + self.duration, + self.warmup, + self.disable_progress, + self.test_results_filename + ) + } +} + impl PerfRunnerOptions {} impl From<&ArgMatches> for PerfRunnerOptions { @@ -134,13 +162,27 @@ pub struct PerfRunner { } impl PerfRunner { + /// Run the performance tests in `tests` using the current process command line. + /// + /// # Arguments + /// + /// * package_dir - The directory containing the package with the tests. Typically `env!("CARGO_PACKAGE_DIR")` + /// * module_name - the name of the module containing the test, typically `file!()` + /// * tests - the set of tests to configure. + /// pub fn new( package_dir: &'static str, module_name: &'static str, tests: Vec, ) -> azure_core::Result { let command = Self::get_command_from_metadata(&tests); - let arguments = command.get_matches(); + let arguments = command.try_get_matches().map_err(|e| { + azure_core::error::Error::with_error( + azure_core::error::ErrorKind::Other, + e, + "Failed to parse command line arguments.", + ) + })?; Ok(Self { options: PerfRunnerOptions::from(&arguments), tests, @@ -151,7 +193,7 @@ impl PerfRunner { }) } - #[cfg(test)] + /// Run the performance tests in `tests` with the command line specified in `args` pub fn with_command_line( package_dir: &'static str, module_name: &'static str, @@ -253,6 +295,8 @@ impl PerfRunner { .await?, ); + println!("Test Configuration: {:#}", self.options); + for iteration in 0..self.options.iterations { println!( "Running test iteration {}/{}", @@ -304,17 +348,51 @@ impl PerfRunner { let iteration_count = self.progress.load(Ordering::SeqCst); println!( - "Completed test iteration {}/{} - {} iterations run in {} seconds - {} seconds/iteration", + "Completed test iteration {}/{} - {} iterations run in {} seconds - {} iterations/second, {} seconds/iteration", iteration + 1, self.options.iterations, iteration_count, self.options.duration.as_seconds_f64(), + iteration_count as f64 / self.options.duration.as_seconds_f64(), self.options.duration.as_seconds_f64() / iteration_count as f64 ); let operations_per_second = + iteration_count as f64 / self.options.duration.as_seconds_f64(); + let seconds_per_operation = self.options.duration.as_seconds_f64() / iteration_count as f64; - let duration_per_operation = Duration::seconds_f64(operations_per_second); - println!("{:4} seconds/operation", duration_per_operation); + let duration_per_operation = Duration::seconds_f64(seconds_per_operation); + println!("{operations_per_second:4} operations/second, {duration_per_operation:4} seconds/operation"); + + if !self.options.test_results_filename.is_empty() { + // Write out the results to a file. + println!( + "Writing test results to {}", + self.options.test_results_filename + ); + let results = PerfTestOutputs { + test_name: test.name.to_string(), + package_versions: vec![self.package_dir.to_string()], + operations_per_second, + average_cpu_use: None, + average_memory_use: None, + }; + + let json = serde_json::to_string_pretty(&results).map_err(|e| { + Error::with_error( + azure_core::error::ErrorKind::Other, + e, + "Failed to serialize test results to JSON.", + ) + })?; + println!("Test results: {}", json); + std::fs::write(&self.options.test_results_filename, json).map_err(|e| { + Error::with_error( + azure_core::error::ErrorKind::Io, + e, + "Failed to write test results to file.", + ) + })?; + } } Ok(()) } diff --git a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs index 72d70b92d0..c6f3028a4e 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs +++ b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs @@ -18,10 +18,11 @@ use std::sync::{Arc, OnceLock}; use azure_core::Result; use azure_core_test::{ perf::{CreatePerfTestReturn, PerfRunner, PerfTest, PerfTestMetadata, PerfTestOption}, - TestContext, + Recording, TestContext, +}; +use azure_security_keyvault_secrets::{ + models::SetSecretParameters, SecretClient, SecretClientOptions, }; -use azure_security_keyvault_secrets::{models::SetSecretParameters, SecretClient}; -use rand::{distr::Alphanumeric, Rng}; struct GetSecrets { vault_url: String, random_key_name: OnceLock, @@ -62,34 +63,39 @@ impl GetSecrets { Box::pin(create_secret_client(runner.clone())) } - fn create_random_key_name() -> String { - let random_suffix: String = rand::rng() - .sample_iter(&Alphanumeric) - .take(8) - .map(char::from) - .collect(); + fn create_random_key_name(recording: &Recording) -> String { + let random_suffix: String = recording.random_string::<8>(Some("perf-")); format!("perf-{}", random_suffix) } - fn get_random_key_name(&self) -> &String { + fn get_random_key_name(&self, recording: &Recording) -> &String { self.random_key_name - .get_or_init(Self::create_random_key_name) + .get_or_init(|| Self::create_random_key_name(recording)) } } #[cfg_attr(target_arch="wasm32", async_trait::async_trait(?Send))] #[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] impl PerfTest for GetSecrets { - async fn setup(&self, _context: Arc) -> azure_core::Result<()> { - let credential = azure_identity::DeveloperToolsCredential::new(None)?; - let client = SecretClient::new(self.vault_url.as_str(), credential.clone(), None)?; + async fn setup(&self, context: Arc) -> azure_core::Result<()> { + let recording = context.recording(); + let credential = recording.credential(); + + let mut client_options = SecretClientOptions::default(); + recording.instrument(&mut client_options.client_options); + + let client = SecretClient::new( + self.vault_url.as_str(), + credential.clone(), + Some(client_options), + )?; self.client.get_or_init(|| client); self.client .get() .unwrap() .set_secret( - self.get_random_key_name(), + self.get_random_key_name(recording), SetSecretParameters { value: Some("secret_value".into()), ..Default::default() @@ -103,12 +109,13 @@ impl PerfTest for GetSecrets { async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { Ok(()) } - async fn run(&self, _context: Arc) -> Result<()> { + async fn run(&self, context: Arc) -> Result<()> { + let recording = context.recording(); let _secret = self .client .get() .unwrap() - .get_secret(self.get_random_key_name(), None) + .get_secret(self.get_random_key_name(recording), None) .await? .into_body()?; Ok(()) From 13e5e2d1384f553e6462213bf609a632dce5c144 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Wed, 8 Oct 2025 16:32:20 -0700 Subject: [PATCH 12/18] Updates to get perf automation to work --- sdk/core/azure_core_test/src/perf/mod.rs | 15 +++++--- .../azure_storage_blob/perf/list_blob_test.rs | 11 ++++-- sdk/storage/perf-tests.yml | 36 +++++++++---------- 3 files changed, 38 insertions(+), 24 deletions(-) diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index 91d1603d3a..b2ae1e7afc 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -430,7 +430,13 @@ impl PerfRunner { // start.elapsed(), // self.progress.load(Ordering::SeqCst) as f64 / start.elapsed().as_secs_f64(), // Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); - println!("Current {:3}, Total {:5} {:4}", current_total - last_count, current_total, Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); + if start.elapsed().as_secs_f64() != 0f64 && current_total != 0 { + println!("Current {:3}, Total {:5} {:4}", current_total - last_count, current_total, Duration::seconds_f64( start.elapsed().as_secs_f64() / current_total as f64 )); + } + else{ + println!("Current {:3}, Total {:5} ---", current_total - last_count, current_total); + } + last_count = current_total; } }, if !self.options.disable_progress => {}, @@ -464,12 +470,13 @@ impl PerfRunner { .value_parser(clap::value_parser!(u32)) .global(false), ) + .arg(clap::arg!(--sync).global(true).required(false)) .arg( clap::arg!(--parallel "The number of concurrent tasks to use when running each test") .required(false) .default_value("1") .value_parser(clap::value_parser!(usize)) - .global(false), + .global(true), ) .arg(clap::arg!(--"no-progress" "Disable progress reporting").required(false).global(false)) .arg( @@ -477,14 +484,14 @@ impl PerfRunner { .required(false) .default_value("30") .value_parser(clap::value_parser!(i64)) - .global(false), + .global(true), ) .arg( clap::arg!(--warmup "The duration of the warmup period in seconds") .required(false) .default_value("5") .value_parser(clap::value_parser!(i64)) - .global(false), + .global(true), ) .arg( clap::arg!(--"test-results" "The file to write test results to") diff --git a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs index 19b34e1107..4e8311f196 100644 --- a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs +++ b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs @@ -27,7 +27,14 @@ impl ListBlobTest { println!("Parsed count: {}", count); let endpoint: Option<&String> = runner.try_get_test_arg("endpoint")?; - let endpoint = endpoint.expect("endpoint argument is mandatory").clone(); + let endpoint = match endpoint { + Some(e) => e.clone(), + None => format!( + "https://{}.blob.core.windows.net", + std::env::var("AZURE_STORAGE_ACCOUNT_NAME") + .expect("AZURE_STORAGE_ACCOUNT_NAME is not set") + ), + }; println!("Using endpoint: {}", endpoint); let container_name = format!("perf-container-{}", uuid::Uuid::new_v4()); @@ -58,7 +65,7 @@ impl ListBlobTest { PerfTestOption { name: "endpoint", display_message: "The endpoint of the blob storage", - mandatory: true, + mandatory: false, short_activator: 'e', long_activator: "endpoint", expected_args_len: 1, diff --git a/sdk/storage/perf-tests.yml b/sdk/storage/perf-tests.yml index e070cadfae..39fe0005ab 100644 --- a/sdk/storage/perf-tests.yml +++ b/sdk/storage/perf-tests.yml @@ -2,33 +2,33 @@ Service: storage-blob Project: azure-storage-blobs-perf -PrimaryPackage: azure_storage_blobs +PrimaryPackage: azure_storage_blob PackageVersions: -- azure_storage_blobs: 12.6.2 +- azure_storage_blob: 0.6.0 azure_core: 1.7.2 -- azure_storage_blobs: source +- azure_storage_blob: source azure_core: source Tests: -- Test: download - Class: DownloadBlob - Arguments: - - --size 10240 --parallel 64 - - --size 10485760 --parallel 32 - - --size 1073741824 --parallel 1 --warmup 60 --duration 60 - - --size 1073741824 --parallel 8 --warmup 60 --duration 60 +# - Test: download +# Class: DownloadBlob +# Arguments: +# - --size 10240 --parallel 64 +# - --size 10485760 --parallel 32 +# - --size 1073741824 --parallel 1 --warmup 60 --duration 60 +# - --size 1073741824 --parallel 8 --warmup 60 --duration 60 -- Test: upload - Class: UploadBlob - Arguments: - - --size 10240 --parallel 64 - - --size 10485760 --parallel 32 - - --size 1073741824 --parallel 1 --warmup 60 --duration 60 - - --size 1073741824 --parallel 8 --warmup 60 --duration 60 +# - Test: upload +# Class: UploadBlob +# Arguments: +# - --size 10240 --parallel 64 +# - --size 10485760 --parallel 32 +# - --size 1073741824 --parallel 1 --warmup 60 --duration 60 +# - --size 1073741824 --parallel 8 --warmup 60 --duration 60 - Test: list-blobs - Class: ListBlob + Class: list_blob Arguments: - --count 5 --parallel 64 - --count 500 --parallel 32 From 830cabcd44e91dd89531f281617192befda10097 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Wed, 8 Oct 2025 16:57:23 -0700 Subject: [PATCH 13/18] Removed specific versioned packages --- sdk/storage/perf-tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/storage/perf-tests.yml b/sdk/storage/perf-tests.yml index 39fe0005ab..ea7e63f9a4 100644 --- a/sdk/storage/perf-tests.yml +++ b/sdk/storage/perf-tests.yml @@ -5,8 +5,8 @@ Project: azure-storage-blobs-perf PrimaryPackage: azure_storage_blob PackageVersions: -- azure_storage_blob: 0.6.0 - azure_core: 1.7.2 +# - azure_storage_blob: 0.6.0 +# azure_core: 1.7.2 - azure_storage_blob: source azure_core: source From cf5e9fe4ac6f1cf4e1d85009c88ba84e524f0c21 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Thu, 9 Oct 2025 14:42:45 -0700 Subject: [PATCH 14/18] Cleaned up some test declaration logic; added start to perf autoring readme --- sdk/core/azure_core_test/src/perf/README.md | 117 ++++++++++++++++++ .../azure_core_test/src/perf/config_tests.rs | 6 +- .../src/perf/framework_tests.rs | 28 +++-- sdk/core/azure_core_test/src/perf/mod.rs | 29 +++-- .../Cargo.toml | 2 +- .../perf/get_secret.rs | 11 +- sdk/storage/azure_storage_blob/Cargo.toml | 2 +- .../azure_storage_blob/perf/list_blob_test.rs | 12 +- 8 files changed, 169 insertions(+), 38 deletions(-) diff --git a/sdk/core/azure_core_test/src/perf/README.md b/sdk/core/azure_core_test/src/perf/README.md index 09b20269d9..07d31ad300 100644 --- a/sdk/core/azure_core_test/src/perf/README.md +++ b/sdk/core/azure_core_test/src/perf/README.md @@ -1 +1,118 @@ # Performance Tests + +The Azure SDK defines a standardized set of performance tests which use a test framework defined by the [PerfAutomation tool](https://github.com/Azure/azure-sdk-tools/tree/main/tools/perf-automation). + +Performance tests are defined in a "perf" directory under the package root. + +By convention, all performance tests are named "perf" and are invoked via: + +```bash +cargo test --package --test perf -- +``` + +where `package name` is the name of the rust package, `perf test name` is the name of the test you want to run, and `perf test arguments` is the arguments to that test. + +Each performance test has the following standardized parameters: + +* `--iterations ` - the number of iterations to run the test for. Default: 1 +* `--sync` - Run only synchronous tests. (ignored) +* `--parallel ` - the number of concurrent tasks to use when running each test. Default: 1 +* `--no-progress` - disable the once per second progress report. +* `--duration ` - the duration of each test in seconds. Default: 30 +* `--warmup ` - the duration of the warmup period in seconds. Default: 5 +* `--test-results ` - the file to write test results to (Default: tests/results.json) +* `--help` - show help. + +Each test has its own set of parameters which are specific to the test. + +## Test authoring + +Performance tests have three phases: + +1) Setup - Establish any resources needed to run the test. +1) Run - Actually perform the test. +1) Cleanup - Cleanup any resources used by the test. + +Each is defined by functions on the `PerfTest` trait. + +### Test Metadata + +Tests are defined by an instance of a `PerfTestMetadata` structure, which defines the name of the test, and other information about the test. + +A perf test has a name (`get_secret`, `list_blobs`, `upload_blob`, etc), a short description, a set of test options, and a pointer to a function which returns an instance of the test. + +Each perf test also has a set of command line options that are specific to the individual test, these are defined by a `PerfTestOptions` structure. It contains fields like help text for the option, activators + +Here is an example of test metadata for a performance test: + +```rust +PerfTestMetadata { + name: "get_secret", + description: "Get a secret from Key Vault", + options: vec![PerfTestOption { + name: "vault_url", + display_message: "The URL of the Key Vault to use in the test", + mandatory: true, + short_activator: 'u', + long_activator: "vault-url", + expected_args_len: 1, + ..Default::default() + }], + create_test: Self::create_new_test, +} +``` + +This defines a test named `get_secret` with a single required "vault_url" option. + +For this test, the `create_new_test` function looks like: + +```rust +fn create_new_test(runner: PerfRunner) -> CreatePerfTestReturn { + async move { + let vault_url_ref: Option<&String> = runner.try_get_test_arg("vault_url")?; + let vault_url = vault_url_ref + .expect("vault_url argument is mandatory") + .clone(); + Ok(Box::new(GetSecrets { + vault_url, + random_key_name: OnceLock::new(), + client: OnceLock::new(), + }) as Box) + } + .boxed() +} +``` + +### Declaring Tests + +The process of authoring tests starts with the cargo.toml file for your package. + +Add the following to the `cargo.toml` file: + +```toml +[[test]] +name = "perf" +path = "perf/get_secret.rs" +harness = false +``` + +This declares a test named `perf` (which is required for the perf automation tests) located in a directory named `perf` in a module named `get_secret.rs`. It also declares the test as *not* requiring the standard test harness - that's because the test defines its own test harness. + +The contents of the test file should have the following: + +```rust +#[tokio::main] +async fn main() -> azure_core::Result<()> { + let runner = PerfRunner::new( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![GetSecrets::test_metadata()], + )?; + + runner.run().await?; + + Ok(()) +} +``` + +This declares a perf test runner with the defined test metadata and runs the performance test. If your performance test has more than one performance test, then it should be added to the final parameter to the `PerfRunner::new()` function. diff --git a/sdk/core/azure_core_test/src/perf/config_tests.rs b/sdk/core/azure_core_test/src/perf/config_tests.rs index 0609c9ee42..94cd19d415 100644 --- a/sdk/core/azure_core_test/src/perf/config_tests.rs +++ b/sdk/core/azure_core_test/src/perf/config_tests.rs @@ -10,7 +10,7 @@ use super::*; use std::{env, error::Error}; -fn create_failed_test(_runner: &PerfRunner) -> CreatePerfTestReturn { +fn create_failed_test(_runner: PerfRunner) -> CreatePerfTestReturn { Box::pin(async { Err(azure_core::Error::with_message( azure_core::error::ErrorKind::Other, @@ -586,7 +586,7 @@ impl PerfTest for ComplexTest { } } -fn complex_test_create(_runner: &PerfRunner) -> CreatePerfTestReturn { +fn complex_test_create(_runner: PerfRunner) -> CreatePerfTestReturn { Box::pin(async { Ok(Box::new(ComplexTest {}) as Box) }) } @@ -661,7 +661,7 @@ async fn test_perf_runner_with_test_functions() { let flag_value: bool = *flag_value.unwrap(); assert!(flag_value); - let perf_tests_impl = (runner.tests[0].create_test)(&runner) + let perf_tests_impl = (runner.tests[0].create_test)(runner.clone()) .await .expect("Failed to create test instance"); diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs index d5b375c12b..15f4f56da4 100644 --- a/sdk/core/azure_core_test/src/perf/framework_tests.rs +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -18,7 +18,7 @@ async fn test_perf_runner_with_no_tests() { assert!(result.is_err()); } -fn create_fibonacci1_test(runner: &PerfRunner) -> CreatePerfTestReturn { +fn create_fibonacci1_test(runner: PerfRunner) -> CreatePerfTestReturn { struct Fibonacci1Test { count: u32, } @@ -52,8 +52,25 @@ fn create_fibonacci1_test(runner: &PerfRunner) -> CreatePerfTestReturn { } } - // Helper function to handle the async creation of the test. - async fn create_test(runner: PerfRunner) -> Result> { + // // Helper function to handle the async creation of the test. + // async fn create_test(runner: PerfRunner) -> Result> { + // let count: Option<&String> = runner.try_get_test_arg("count")?; + + // println!("Fibonacci1Test with count: {:?}", count); + // let count = count.expect("count argument is mandatory"); + // let count = count.parse::().map_err(|e| { + // azure_core::Error::with_error( + // azure_core::error::ErrorKind::Other, + // e, + // "Invalid count argument", + // ) + // })?; + // Ok(Box::new(Fibonacci1Test { count }) as Box) + // } + + // Return a pinned future that creates the test. + // Box::pin(create_test(runner.clone())) + Box::pin(async move { let count: Option<&String> = runner.try_get_test_arg("count")?; println!("Fibonacci1Test with count: {:?}", count); @@ -66,10 +83,7 @@ fn create_fibonacci1_test(runner: &PerfRunner) -> CreatePerfTestReturn { ) })?; Ok(Box::new(Fibonacci1Test { count }) as Box) - } - - // Return a pinned future that creates the test. - Box::pin(create_test(runner.clone())) + }) } #[tokio::test] diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index b2ae1e7afc..5ad671179c 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -44,6 +44,10 @@ pub trait PerfTest: Send + Sync { pub type CreatePerfTestReturn = Pin>>>>; +/// Type alias for an async function that creates a PerfTest instance. +/// Takes a PerfRunner reference and returns a future that resolves to a PerfTest trait object. +pub type CreatePerfTestFn = fn(PerfRunner) -> CreatePerfTestReturn; + /// Metadata about a performance test. #[derive(Debug, Clone)] pub struct PerfTestMetadata { @@ -54,8 +58,9 @@ pub struct PerfTestMetadata { /// The set of test options supported by this test. pub options: Vec, - /// A function used to create the performance test. - pub create_test: fn(&PerfRunner) -> CreatePerfTestReturn, + /// An async function used to create the performance test. + /// Takes a PerfRunner reference and returns a future that resolves to a PerfTest trait object. + pub create_test: CreatePerfTestFn, } /// #A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. @@ -86,8 +91,6 @@ pub struct PerfTestOption { #[derive(Debug, Clone, Default, Serialize)] #[allow(dead_code)] struct PerfTestOutputs { - // * Package Versions - a set of packages tested and their versions. - pub package_versions: Vec, pub test_name: String, pub operations_per_second: f64, pub average_cpu_use: Option, @@ -176,13 +179,14 @@ impl PerfRunner { tests: Vec, ) -> azure_core::Result { let command = Self::get_command_from_metadata(&tests); - let arguments = command.try_get_matches().map_err(|e| { - azure_core::error::Error::with_error( - azure_core::error::ErrorKind::Other, - e, - "Failed to parse command line arguments.", - ) - })?; + let arguments = command.try_get_matches(); + let arguments = match arguments { + Ok(a) => a, + Err(e) => { + eprintln!("{}", e); + std::process::exit(1); + } + }; Ok(Self { options: PerfRunnerOptions::from(&arguments), tests, @@ -279,7 +283,7 @@ impl PerfRunner { format!("Test '{}' not found.", test_name), ) })?; - let test_instance = (test.create_test)(self).await?; + let test_instance = (test.create_test)(self.clone()).await?; let test_instance: Arc = Arc::from(test_instance); let test_mode = crate::TestMode::current()?; @@ -371,7 +375,6 @@ impl PerfRunner { ); let results = PerfTestOutputs { test_name: test.name.to_string(), - package_versions: vec![self.package_dir.to_string()], operations_per_second, average_cpu_use: None, average_memory_use: None, diff --git a/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml b/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml index 321ad9ed2f..7cb7cf216e 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml +++ b/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml @@ -42,6 +42,6 @@ rustc_version.workspace = true workspace = true [[test]] -name = "performance_tests" +name = "perf" path = "perf/get_secret.rs" harness = false diff --git a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs index c6f3028a4e..d238940e42 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs +++ b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs @@ -23,6 +23,7 @@ use azure_core_test::{ use azure_security_keyvault_secrets::{ models::SetSecretParameters, SecretClient, SecretClientOptions, }; +use futures::FutureExt; struct GetSecrets { vault_url: String, random_key_name: OnceLock, @@ -47,8 +48,8 @@ impl GetSecrets { } } - fn create_new_test(runner: &PerfRunner) -> CreatePerfTestReturn { - async fn create_secret_client(runner: PerfRunner) -> Result> { + fn create_new_test(runner: PerfRunner) -> CreatePerfTestReturn { + async move { let vault_url_ref: Option<&String> = runner.try_get_test_arg("vault_url")?; let vault_url = vault_url_ref .expect("vault_url argument is mandatory") @@ -59,8 +60,7 @@ impl GetSecrets { client: OnceLock::new(), }) as Box) } - - Box::pin(create_secret_client(runner.clone())) + .boxed() } fn create_random_key_name(recording: &Recording) -> String { @@ -74,8 +74,7 @@ impl GetSecrets { } } -#[cfg_attr(target_arch="wasm32", async_trait::async_trait(?Send))] -#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +#[async_trait::async_trait] impl PerfTest for GetSecrets { async fn setup(&self, context: Arc) -> azure_core::Result<()> { let recording = context.recording(); diff --git a/sdk/storage/azure_storage_blob/Cargo.toml b/sdk/storage/azure_storage_blob/Cargo.toml index 9cbe122431..b60cf310e6 100644 --- a/sdk/storage/azure_storage_blob/Cargo.toml +++ b/sdk/storage/azure_storage_blob/Cargo.toml @@ -39,6 +39,6 @@ tokio = { workspace = true, features = ["macros"] } tracing.workspace = true [[test]] -name = "performance_tests" +name = "perf" path = "perf/perf_tests.rs" harness = false diff --git a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs index 4e8311f196..0f12eb6f28 100644 --- a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs +++ b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs @@ -3,14 +3,14 @@ use std::sync::Arc; -use azure_core::{Bytes, Result}; +use azure_core::Bytes; use azure_core_test::{ perf::{CreatePerfTestReturn, PerfRunner, PerfTest, PerfTestMetadata, PerfTestOption}, TestContext, }; use azure_identity::DeveloperToolsCredential; use azure_storage_blob::BlobContainerClient; -use futures::TryStreamExt; +use futures::{FutureExt, TryStreamExt}; pub struct ListBlobTest { count: u32, @@ -18,8 +18,8 @@ pub struct ListBlobTest { } impl ListBlobTest { - fn create_list_blob_test(runner: &PerfRunner) -> CreatePerfTestReturn { - async fn create_test(runner: PerfRunner) -> Result> { + fn create_list_blob_test(runner: PerfRunner) -> CreatePerfTestReturn { + async move { let count: Option<&String> = runner.try_get_test_arg("count")?; println!("ListBlobTest with count: {:?}", count); @@ -43,9 +43,7 @@ impl ListBlobTest { Ok(Box::new(ListBlobTest { count, client }) as Box) } - // Here you would create and return an instance of your performance test. - // For example: - Box::pin(create_test(runner.clone())) + .boxed() } pub fn test_metadata() -> PerfTestMetadata { From 008e3401c040ed20d9953a8c868b509f826c46da Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Thu, 9 Oct 2025 14:50:50 -0700 Subject: [PATCH 15/18] Removed commented out bicep logic --- sdk/storage/perf-resources.bicep | 55 -------------------------------- 1 file changed, 55 deletions(-) diff --git a/sdk/storage/perf-resources.bicep b/sdk/storage/perf-resources.bicep index 8a802cb3e4..542f7b958d 100644 --- a/sdk/storage/perf-resources.bicep +++ b/sdk/storage/perf-resources.bicep @@ -58,58 +58,3 @@ resource storage 'Microsoft.Storage/storageAccounts@2024-01-01' = { } output AZURE_STORAGE_ACCOUNT_NAME string = storage.name - -// param baseName string = resourceGroup().name -// param location string = resourceGroup().location -// param testApplicationOid string - -// var blobDataContributorRoleId = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' -// var blobDataOwnerRoleId = 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b' - -// var networkAcls = { -// bypass: 'AzureServices' -// defaultAction: 'Allow' -// ipRules: [] -// virtualNetworkRules: [] -// } - -// resource blobDataContributor 'Microsoft.Authorization/roleAssignments@2022-04-01' = { -// name: guid(blobDataContributorRoleId, resourceGroup().id) -// properties: { -// roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataContributorRoleId) -// principalId: testApplicationOid -// } -// } - -// resource blobDataOwner 'Microsoft.Authorization/roleAssignments@2022-04-01' = { -// name: guid(blobDataOwnerRoleId, resourceGroup().id) -// properties: { -// roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataOwnerRoleId) -// principalId: testApplicationOid -// } -// } - -// resource storageAccount 'Microsoft.Storage/storageAccounts@2019-06-01' = { -// name: '${baseName}blob' -// location: location -// kind: 'BlockBlobStorage' -// sku: { -// name: 'Premium_LRS' -// } -// properties: { -// allowSharedKeyAccess: false -// publicNetworkAccess: 'SecuredByPerimeter' -// supportsHttpsTrafficOnly: true -// networkAcls: networkAcls -// } -// } - -// var name = storageAccount.name -// var key = storageAccount.listKeys().keys[0].value -// var connectionString = 'DefaultEndpointsProtocol=https;AccountName=${name};AccountKey=${key}' - -// output AZURE_STORAGE_ACCOUNT_NAME string = name -// output AZURE_STORAGE_ACCOUNT_KEY string = key -// output AZURE_STORAGE_CONNECTION_STRING string = connectionString -// output STANDARD_STORAGE_CONNECTION_STRING string = connectionString -// output STORAGE_CONNECTION_STRING string = connectionString From 9266de96222bfdea695678842936efacb84b4090 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Thu, 9 Oct 2025 14:52:44 -0700 Subject: [PATCH 16/18] Removed commented out test logic --- .../src/perf/framework_tests.rs | 24 ++++--------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs index 15f4f56da4..8b4cc0a5b1 100644 --- a/sdk/core/azure_core_test/src/perf/framework_tests.rs +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -5,6 +5,8 @@ //! //! These tests cover various scenarios for running the `PerfRunner` with different options and measurements. //! +use futures::FutureExt; + use super::*; use std::boxed::Box; @@ -52,25 +54,8 @@ fn create_fibonacci1_test(runner: PerfRunner) -> CreatePerfTestReturn { } } - // // Helper function to handle the async creation of the test. - // async fn create_test(runner: PerfRunner) -> Result> { - // let count: Option<&String> = runner.try_get_test_arg("count")?; - - // println!("Fibonacci1Test with count: {:?}", count); - // let count = count.expect("count argument is mandatory"); - // let count = count.parse::().map_err(|e| { - // azure_core::Error::with_error( - // azure_core::error::ErrorKind::Other, - // e, - // "Invalid count argument", - // ) - // })?; - // Ok(Box::new(Fibonacci1Test { count }) as Box) - // } - // Return a pinned future that creates the test. - // Box::pin(create_test(runner.clone())) - Box::pin(async move { + async move { let count: Option<&String> = runner.try_get_test_arg("count")?; println!("Fibonacci1Test with count: {:?}", count); @@ -83,7 +68,8 @@ fn create_fibonacci1_test(runner: PerfRunner) -> CreatePerfTestReturn { ) })?; Ok(Box::new(Fibonacci1Test { count }) as Box) - }) + } + .boxed() } #[tokio::test] From 1d2dd32cd5aa454b3024cec6cce0385b9ddd1ff4 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Thu, 9 Oct 2025 15:22:40 -0700 Subject: [PATCH 17/18] Test fixes --- sdk/core/azure_core_test/src/perf/config_tests.rs | 2 +- .../azure_core_test/src/perf/framework_tests.rs | 14 +++----------- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/sdk/core/azure_core_test/src/perf/config_tests.rs b/sdk/core/azure_core_test/src/perf/config_tests.rs index 94cd19d415..114ea51565 100644 --- a/sdk/core/azure_core_test/src/perf/config_tests.rs +++ b/sdk/core/azure_core_test/src/perf/config_tests.rs @@ -507,7 +507,7 @@ fn test_test_option_debug_and_default() { // Test Debug implementation let debug_output = format!("{:?}", option); - assert!(debug_output.contains("TestOption")); + assert!(debug_output.contains("PerfTestOption")); } #[test] diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs index 8b4cc0a5b1..f632020b18 100644 --- a/sdk/core/azure_core_test/src/perf/framework_tests.rs +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -10,16 +10,6 @@ use futures::FutureExt; use super::*; use std::boxed::Box; -#[tokio::test] -async fn test_perf_runner_with_no_tests() { - let args = vec!["perf_test", "--iterations", "1", "--duration", "1"]; - let runner = - PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), vec![], args).unwrap(); - - let result = runner.run().await; - assert!(result.is_err()); -} - fn create_fibonacci1_test(runner: PerfRunner) -> CreatePerfTestReturn { struct Fibonacci1Test { count: u32, @@ -82,6 +72,8 @@ async fn test_perf_runner_with_single_test() { "30", "--duration", "10", + "--test-results", + "", "--warmup", "1", "fibonacci1", @@ -109,6 +101,6 @@ async fn test_perf_runner_with_single_test() { .unwrap(); let result = runner.run().await; - assert!(result.is_ok()); println!("Result: {:?}", result); + assert!(result.is_ok()); } From 26b347e5dac93dfbd096679f12239637d99c5cd2 Mon Sep 17 00:00:00 2001 From: Larry Osterman Date: Thu, 9 Oct 2025 16:32:55 -0700 Subject: [PATCH 18/18] PR feedback --- doc/performance-test-requirements.md | 79 ------------ sdk/core/azure_core_test/src/lib.rs | 2 - sdk/core/azure_core_test/src/perf/README.md | 68 ++++++++--- .../azure_core_test/src/perf/config_tests.rs | 18 +-- .../src/perf/framework_tests.rs | 2 +- sdk/core/azure_core_test/src/perf/mod.rs | 112 +++++++++--------- sdk/core/azure_core_test/src/recording.rs | 4 + .../perf/get_secret.rs | 2 +- .../azure_storage_blob/perf/list_blob_test.rs | 4 +- 9 files changed, 125 insertions(+), 166 deletions(-) delete mode 100644 doc/performance-test-requirements.md diff --git a/doc/performance-test-requirements.md b/doc/performance-test-requirements.md deleted file mode 100644 index 6b0a3dd38b..0000000000 --- a/doc/performance-test-requirements.md +++ /dev/null @@ -1,79 +0,0 @@ -# Requirements for performance tests - -Each performance test consists of three phases: - -1) Warmup -1) Test operation -1) Cleanup - -## Common test inputs - -* Duration of the test in seconds -* Number of iterations of the main test loop -* Parallel - number of operations to execute in parallel -* Disable test cleanup -* Test Proxy servers. -* Results file - location to write test outputs -* Warmup - Duration of the warmup in seconds. -* TLS - * Allow untrusted TLS certificates -* Advanced options - * Print job statistics (?) - * Track latency and print per-operation latency statistics - * Target throughput (operations/second) (?) -* Language specific options - * Max I/O completion threads - * Minimum number of asynchronous I/O threads in the thread pool - * Minimum number of worker threads the thread pool creates on demand - * Sync - run a synchronous version of the test - -## Expected test outputs - -Each test is expected to generate the following elements: - -* Package Versions - a set of packages tested and their versions. -* Operations per second - Double precision float -* Standard Output of the test -* Standard Error of the test -* Exception - Text of any exceptions thrown during the test. -* Average CPU Use during the test - Double precision float. -* Average memory use during the test - Double precision float. - -## Perf Test Harness - -Each performance test defines a `get_metadata()` function which returns a `TestMetadata` structure. - -A `TestMetadata` structure contains the following fields - -```rust -pub struct TestMetadata { - name: &'static str - description: &'static str - options: &'static[&'static TestOption] -} -``` - -A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. - -```rust -pub struct TestOption { - /// The name of the test option. This is used as the key in the `TestArguments` map. - name: &'static str, - - long_activator: &str, - - short_activator:&str, - - /// Display message - displayed in the --help message. - display_message: &[str], - - /// Expected argument count - expected_args_len: u16, - - /// Required - mandatory: bool, - - /// Argument value is sensitive and should be sanitized. - sensitive: bool, -} -``` diff --git a/sdk/core/azure_core_test/src/lib.rs b/sdk/core/azure_core_test/src/lib.rs index 960954761f..752f10731b 100644 --- a/sdk/core/azure_core_test/src/lib.rs +++ b/sdk/core/azure_core_test/src/lib.rs @@ -38,8 +38,6 @@ pub struct TestContext { recording: Option, } -unsafe impl Send for TestContext {} - impl TestContext { pub(crate) fn new( crate_dir: &'static str, diff --git a/sdk/core/azure_core_test/src/perf/README.md b/sdk/core/azure_core_test/src/perf/README.md index 07d31ad300..b2129254b5 100644 --- a/sdk/core/azure_core_test/src/perf/README.md +++ b/sdk/core/azure_core_test/src/perf/README.md @@ -7,7 +7,7 @@ Performance tests are defined in a "perf" directory under the package root. By convention, all performance tests are named "perf" and are invoked via: ```bash -cargo test --package --test perf -- +cargo test --package --test perf -- {perf test name} {perf test arguments} ``` where `package name` is the name of the rust package, `perf test name` is the name of the test you want to run, and `perf test arguments` is the arguments to that test. @@ -29,9 +29,9 @@ Each test has its own set of parameters which are specific to the test. Performance tests have three phases: -1) Setup - Establish any resources needed to run the test. -1) Run - Actually perform the test. -1) Cleanup - Cleanup any resources used by the test. +1.. Setup - Establish any resources needed to run the test. +2.. Run - Actually perform the test. +3.. Cleanup - Cleanup any resources used by the test. Each is defined by functions on the `PerfTest` trait. @@ -83,6 +83,27 @@ fn create_new_test(runner: PerfRunner) -> CreatePerfTestReturn { } ``` +### Test invocation + +The final piece of code which is necessary to run the performance tests is logic to hook up the tests with a test runner. + +```rust +#[tokio::main] +async fn main() -> azure_core::Result<()> { + let runner = PerfRunner::new( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![GetSecrets::test_metadata()], + )?; + + runner.run().await?; + + Ok(()) +} +``` + +This declares a perf test runner with a set of defined test metadata and runs the performance test. If your performance test suite has more than one performance test, then it should be added to the final parameter to the `PerfRunner::new()` function. + ### Declaring Tests The process of authoring tests starts with the cargo.toml file for your package. @@ -98,21 +119,34 @@ harness = false This declares a test named `perf` (which is required for the perf automation tests) located in a directory named `perf` in a module named `get_secret.rs`. It also declares the test as *not* requiring the standard test harness - that's because the test defines its own test harness. -The contents of the test file should have the following: +After this, to invoke your perf test, you simply use: -```rust -#[tokio::main] -async fn main() -> azure_core::Result<()> { - let runner = PerfRunner::new( - env!("CARGO_MANIFEST_DIR"), - file!(), - vec![GetSecrets::test_metadata()], - )?; +```bash +cargo test --package azure_storage_blob --test perf -- +``` - runner.run().await?; +For example, - Ok(()) -} +```bash +cargo test --package azure_storage_blob --test perf -- list_blob --help +``` + +returns the help text for the `list_blob`test: + +```text +List blobs in a container + +Usage: perf-070114707c71388a.exe list_blob [OPTIONS] --count + +Options: + -c, --count The number of blobs to list + -e, --endpoint The endpoint of the blob storage + --sync + --parallel The number of concurrent tasks to use when running each test [default: 1] + --duration The duration of each test in seconds [default: 30] + --warmup The duration of the warmup period in seconds [default: 5] + --no-cleanup Disable test cleanup + -h, --help Print help ``` -This declares a perf test runner with the defined test metadata and runs the performance test. If your performance test has more than one performance test, then it should be added to the final parameter to the `PerfRunner::new()` function. +Note that some of these test options are not specific to the `list_blobs` test. This is to allow test options to be provided in any order in the command line. diff --git a/sdk/core/azure_core_test/src/perf/config_tests.rs b/sdk/core/azure_core_test/src/perf/config_tests.rs index 114ea51565..d321b1a5dd 100644 --- a/sdk/core/azure_core_test/src/perf/config_tests.rs +++ b/sdk/core/azure_core_test/src/perf/config_tests.rs @@ -26,7 +26,7 @@ fn create_basic_test_metadata() -> PerfTestMetadata { description: "A basic test for testing purposes", options: vec![PerfTestOption { name: "test-option", - short_activator: 't', + short_activator: Some('t'), long_activator: "test-option", display_message: "Test option for basic test", expected_args_len: 1, @@ -45,7 +45,7 @@ fn create_complex_test_metadata() -> PerfTestMetadata { options: vec![ PerfTestOption { name: "mandatory-option", - short_activator: 'm', + short_activator: Some('m'), long_activator: "mandatory", display_message: "Mandatory option", expected_args_len: 1, @@ -54,7 +54,7 @@ fn create_complex_test_metadata() -> PerfTestMetadata { }, PerfTestOption { name: "sensitive-option", - short_activator: 's', + short_activator: None, long_activator: "sensitive", display_message: "Sensitive option", expected_args_len: 1, @@ -63,7 +63,7 @@ fn create_complex_test_metadata() -> PerfTestMetadata { }, PerfTestOption { name: "flag-option", - short_activator: 'f', + short_activator: Some('f'), long_activator: "flag", display_message: "Flag option", ..Default::default() @@ -80,7 +80,7 @@ fn create_no_short_activator_test_metadata() -> PerfTestMetadata { description: "Test without short activators", options: vec![PerfTestOption { name: "long-only", - short_activator: '\0', + short_activator: None, long_activator: "long-only", display_message: "Long activator only", expected_args_len: 1, @@ -498,7 +498,7 @@ fn test_test_option_debug_and_default() { // Test default values assert_eq!(option.name, ""); - assert_eq!(option.short_activator, '\0'); + assert_eq!(option.short_activator, None); assert_eq!(option.long_activator, ""); assert_eq!(option.display_message, ""); assert_eq!(option.expected_args_len, 0); @@ -598,7 +598,7 @@ async fn test_perf_runner_with_test_functions() { options: vec![ PerfTestOption { name: "mandatory-option", - short_activator: 'm', + short_activator: Some('m'), long_activator: "mandatory", display_message: "Mandatory option", expected_args_len: 1, @@ -607,7 +607,7 @@ async fn test_perf_runner_with_test_functions() { }, PerfTestOption { name: "sensitive-option", - short_activator: 's', + short_activator: Some('s'), long_activator: "sensitive", display_message: "Sensitive option", expected_args_len: 1, @@ -616,7 +616,7 @@ async fn test_perf_runner_with_test_functions() { }, PerfTestOption { name: "flag-option", - short_activator: 'f', + short_activator: Some('f'), long_activator: "flag", display_message: "Flag option", expected_args_len: 0, diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs index f632020b18..e40a0ccec7 100644 --- a/sdk/core/azure_core_test/src/perf/framework_tests.rs +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -89,7 +89,7 @@ async fn test_perf_runner_with_single_test() { options: vec![PerfTestOption { name: "count", mandatory: true, - short_activator: 'c', + short_activator: Some('c'), expected_args_len: 1, display_message: "The Fibonacci number to compute", ..Default::default() diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs index 5ad671179c..a11e414ab4 100644 --- a/sdk/core/azure_core_test/src/perf/mod.rs +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -5,11 +5,14 @@ #![cfg(not(target_arch = "wasm32"))] use crate::TestContext; -use azure_core::{time::Duration, Error, Result}; +use azure_core::{ + error::{ErrorKind, ResultExt}, + time::Duration, + Error, Result, +}; use clap::ArgMatches; use serde::Serialize; use std::{ - any::Any, fmt::Display, future::Future, pin::Pin, @@ -63,14 +66,14 @@ pub struct PerfTestMetadata { pub create_test: CreatePerfTestFn, } -/// #A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. +/// A `PerfTestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. #[derive(Debug, Default, Clone)] pub struct PerfTestOption { /// The name of the test option. This is used as the key in the `TestArguments` map. pub name: &'static str, /// The short form activator for this argument e.g., `-t`. Does not include the hyphen. - pub short_activator: char, + pub short_activator: Option, /// The long form activator for this argument e.g., `--test-option`. Does not include the hyphens. pub long_activator: &'static str, @@ -101,7 +104,7 @@ struct PerfTestOutputs { struct PerfRunnerOptions { no_cleanup: bool, iterations: u32, - parallel: usize, + parallel: u32, duration: Duration, warmup: Duration, disable_progress: bool, @@ -124,8 +127,6 @@ impl Display for PerfRunnerOptions { } } -impl PerfRunnerOptions {} - impl From<&ArgMatches> for PerfRunnerOptions { fn from(matches: &ArgMatches) -> Self { Self { @@ -134,7 +135,7 @@ impl From<&ArgMatches> for PerfRunnerOptions { .get_one::("iterations") .expect("defaulted by clap"), parallel: *matches - .get_one::("parallel") + .get_one::("parallel") .expect("defaulted by clap"), disable_progress: matches.get_flag("no-progress"), duration: Duration::seconds( @@ -177,7 +178,7 @@ impl PerfRunner { package_dir: &'static str, module_name: &'static str, tests: Vec, - ) -> azure_core::Result { + ) -> Result { let command = Self::get_command_from_metadata(&tests); let arguments = command.try_get_matches(); let arguments = match arguments { @@ -205,13 +206,10 @@ impl PerfRunner { args: Vec<&str>, ) -> azure_core::Result { let command = Self::get_command_from_metadata(&tests); - let arguments = command.try_get_matches_from(args).map_err(|e| { - azure_core::error::Error::with_error( - azure_core::error::ErrorKind::Other, - e, - "Failed to parse command line arguments.", - ) - })?; + let arguments = command + .try_get_matches_from(args) + .with_context(ErrorKind::Other, "Failed to parse command line arguments.")?; + Ok(Self { options: PerfRunnerOptions::from(&arguments), tests, @@ -225,34 +223,38 @@ impl PerfRunner { /// Gets a reference to a typed argument by its id. pub fn try_get_global_arg(&self, id: &str) -> Result> where - T: Any + Clone + Send + Sync + 'static, + T: Clone + Send + Sync + 'static, { - self.arguments.try_get_one::(id).map_err(|e| { - Error::with_error( - azure_core::error::ErrorKind::Other, - e, - format!("Failed to get argument '{}'.", id), - ) - }) + self.arguments.try_get_one::(id).with_context( + ErrorKind::Other, + format!("Failed to get argument '{}'.", id), + ) } + /// Gets a reference to a typed argument for the selected test by its id. + /// + /// # Arguments + /// + /// * `id` - The id of the argument to get. + /// + /// # Returns + /// + /// A reference to the argument if it exists, or None. pub fn try_get_test_arg(&self, id: &str) -> Result> where - T: Any + Clone + Send + Sync + 'static, + T: Clone + Send + Sync + 'static, { if let Some((_, args)) = self.arguments.subcommand() { - args.try_get_one::(id).map_err(|e| { - Error::with_error( - azure_core::error::ErrorKind::Other, - e, - format!("Failed to get argument '{}' for test.", id), - ) - }) + args.try_get_one::(id).with_context( + ErrorKind::Other, + format!("Failed to get argument '{}' for test.", id), + ) } else { Ok(None) } } + /// Gets the name of the selected test. pub fn get_selected_test_name(&self) -> Result<&str> { match self.arguments.subcommand_name() { Some(name) => Ok(name), @@ -263,6 +265,16 @@ impl PerfRunner { } } + /// Runs the selected performance test. + /// + /// This will run the selected test for the configured number of iterations, parallel tasks, and duration. + /// + /// If no test has been selected, this will print an error message and return Ok(()). + /// + /// # Returns + /// + /// A result indicating the success or failure of the test run. + /// pub async fn run(&self) -> azure_core::Result<()> { // We can only run tests if there was a test selected. let test_name = match self.get_selected_test_name() { @@ -380,21 +392,14 @@ impl PerfRunner { average_memory_use: None, }; - let json = serde_json::to_string_pretty(&results).map_err(|e| { - Error::with_error( - azure_core::error::ErrorKind::Other, - e, - "Failed to serialize test results to JSON.", - ) - })?; + let json = serde_json::to_string_pretty(&results).with_context( + ErrorKind::DataConversion, + "Failed to serialize test results to JSON.", + )?; + println!("Test results: {}", json); - std::fs::write(&self.options.test_results_filename, json).map_err(|e| { - Error::with_error( - azure_core::error::ErrorKind::Io, - e, - "Failed to write test results to file.", - ) - })?; + std::fs::write(&self.options.test_results_filename, json) + .with_context(ErrorKind::Io, "Failed to write test results to file.")?; } } Ok(()) @@ -411,7 +416,7 @@ impl PerfRunner { (0..self.options.parallel).for_each(|i| { let test_instance_clone = Arc::clone(&test_instance); let progress = self.progress.clone(); - let test_context = test_contexts[i].clone(); + let test_context = test_contexts[i as usize].clone(); tasks.spawn(async move { loop { test_instance_clone.run(test_context.clone()).await?; @@ -429,10 +434,7 @@ impl PerfRunner { loop { tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; let current_total = self.progress.load(Ordering::SeqCst); - // println!("{:<10?} elapsed: {:.5} op/sec, {:4} sec/operation.", - // start.elapsed(), - // self.progress.load(Ordering::SeqCst) as f64 / start.elapsed().as_secs_f64(), - // Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); + if start.elapsed().as_secs_f64() != 0f64 && current_total != 0 { println!("Current {:3}, Total {:5} {:4}", current_total - last_count, current_total, Duration::seconds_f64( start.elapsed().as_secs_f64() / current_total as f64 )); } @@ -448,7 +450,7 @@ impl PerfRunner { Ok(()) } - // * Disable test cleanup + // Future command line switches: // * Test Proxy servers. // * TLS // * Allow untrusted TLS certificates @@ -478,7 +480,7 @@ impl PerfRunner { clap::arg!(--parallel "The number of concurrent tasks to use when running each test") .required(false) .default_value("1") - .value_parser(clap::value_parser!(usize)) + .value_parser(clap::value_parser!(u32)) .global(true), ) .arg(clap::arg!(--"no-progress" "Disable progress reporting").required(false).global(false)) @@ -514,8 +516,8 @@ impl PerfRunner { .num_args(option.expected_args_len..=option.expected_args_len) .required(option.mandatory) .global(false); - if option.short_activator != '\0' { - arg = arg.short(option.short_activator); + if let Some(short_activator) = option.short_activator { + arg = arg.short(short_activator); } if option.sensitive { arg = arg.hide(true); diff --git a/sdk/core/azure_core_test/src/recording.rs b/sdk/core/azure_core_test/src/recording.rs index 9c115e0bf9..62bb6c3f78 100644 --- a/sdk/core/azure_core_test/src/recording.rs +++ b/sdk/core/azure_core_test/src/recording.rs @@ -60,6 +60,10 @@ pub struct Recording { rand: OnceLock>, } +// It's not 100% clear to me that Recording is Send, but it seems to be. +// TODO: See if there's a way to remove this explicit unsafe impl. +unsafe impl Send for Recording {} + impl Recording { /// Adds a [`Sanitizer`] to sanitize PII for the current test. pub async fn add_sanitizer(&self, sanitizer: S) -> azure_core::Result<()> diff --git a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs index d238940e42..962505a781 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs +++ b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs @@ -39,7 +39,7 @@ impl GetSecrets { name: "vault_url", display_message: "The URL of the Key Vault to use in the test", mandatory: true, - short_activator: 'u', + short_activator: Some('u'), long_activator: "vault-url", expected_args_len: 1, ..Default::default() diff --git a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs index 0f12eb6f28..74e381d21e 100644 --- a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs +++ b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs @@ -55,7 +55,7 @@ impl ListBlobTest { name: "count", display_message: "The number of blobs to list", mandatory: true, - short_activator: 'c', + short_activator: Some('c'), long_activator: "count", expected_args_len: 1, ..Default::default() @@ -64,7 +64,7 @@ impl ListBlobTest { name: "endpoint", display_message: "The endpoint of the blob storage", mandatory: false, - short_activator: 'e', + short_activator: Some('e'), long_activator: "endpoint", expected_args_len: 1, ..Default::default()