diff --git a/sdk/core/azure_core/CHANGELOG.md b/sdk/core/azure_core/CHANGELOG.md index 6225987745..8b79b35be1 100644 --- a/sdk/core/azure_core/CHANGELOG.md +++ b/sdk/core/azure_core/CHANGELOG.md @@ -16,7 +16,6 @@ ### Breaking Changes - - Changed `ClientOptions::retry` from `Option` to `RetryOptions`. - Changed `DeserializeWith::deserialize_with()` to be sync. - Changed `Pipeline::send()` to return a `Result`. diff --git a/sdk/core/azure_core_test/Cargo.toml b/sdk/core/azure_core_test/Cargo.toml index 13dc3eb703..34c6cdf22b 100644 --- a/sdk/core/azure_core_test/Cargo.toml +++ b/sdk/core/azure_core_test/Cargo.toml @@ -23,6 +23,7 @@ async-trait.workspace = true azure_core = { workspace = true, features = ["test"] } azure_core_test_macros.workspace = true azure_identity.workspace = true +clap.workspace = true dotenvy = "0.15.7" futures.workspace = true rand.workspace = true diff --git a/sdk/core/azure_core_test/src/lib.rs b/sdk/core/azure_core_test/src/lib.rs index 76a936ca32..752f10731b 100644 --- a/sdk/core/azure_core_test/src/lib.rs +++ b/sdk/core/azure_core_test/src/lib.rs @@ -7,6 +7,7 @@ pub mod credentials; #[cfg(doctest)] mod docs; pub mod http; +pub mod perf; pub mod proxy; pub mod recorded; mod recording; @@ -14,7 +15,6 @@ mod recording; mod root_readme; pub mod stream; pub mod tracing; - use azure_core::Error; pub use azure_core::{error::ErrorKind, test::TestMode}; pub use proxy::{matchers::*, sanitizers::*}; diff --git a/sdk/core/azure_core_test/src/perf/README.md b/sdk/core/azure_core_test/src/perf/README.md new file mode 100644 index 0000000000..b2129254b5 --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/README.md @@ -0,0 +1,152 @@ +# Performance Tests + +The Azure SDK defines a standardized set of performance tests which use a test framework defined by the [PerfAutomation tool](https://github.com/Azure/azure-sdk-tools/tree/main/tools/perf-automation). + +Performance tests are defined in a "perf" directory under the package root. + +By convention, all performance tests are named "perf" and are invoked via: + +```bash +cargo test --package --test perf -- {perf test name} {perf test arguments} +``` + +where `package name` is the name of the rust package, `perf test name` is the name of the test you want to run, and `perf test arguments` is the arguments to that test. + +Each performance test has the following standardized parameters: + +* `--iterations ` - the number of iterations to run the test for. Default: 1 +* `--sync` - Run only synchronous tests. (ignored) +* `--parallel ` - the number of concurrent tasks to use when running each test. Default: 1 +* `--no-progress` - disable the once per second progress report. +* `--duration ` - the duration of each test in seconds. Default: 30 +* `--warmup ` - the duration of the warmup period in seconds. Default: 5 +* `--test-results ` - the file to write test results to (Default: tests/results.json) +* `--help` - show help. + +Each test has its own set of parameters which are specific to the test. + +## Test authoring + +Performance tests have three phases: + +1.. Setup - Establish any resources needed to run the test. +2.. Run - Actually perform the test. +3.. Cleanup - Cleanup any resources used by the test. + +Each is defined by functions on the `PerfTest` trait. + +### Test Metadata + +Tests are defined by an instance of a `PerfTestMetadata` structure, which defines the name of the test, and other information about the test. + +A perf test has a name (`get_secret`, `list_blobs`, `upload_blob`, etc), a short description, a set of test options, and a pointer to a function which returns an instance of the test. + +Each perf test also has a set of command line options that are specific to the individual test, these are defined by a `PerfTestOptions` structure. It contains fields like help text for the option, activators + +Here is an example of test metadata for a performance test: + +```rust +PerfTestMetadata { + name: "get_secret", + description: "Get a secret from Key Vault", + options: vec![PerfTestOption { + name: "vault_url", + display_message: "The URL of the Key Vault to use in the test", + mandatory: true, + short_activator: 'u', + long_activator: "vault-url", + expected_args_len: 1, + ..Default::default() + }], + create_test: Self::create_new_test, +} +``` + +This defines a test named `get_secret` with a single required "vault_url" option. + +For this test, the `create_new_test` function looks like: + +```rust +fn create_new_test(runner: PerfRunner) -> CreatePerfTestReturn { + async move { + let vault_url_ref: Option<&String> = runner.try_get_test_arg("vault_url")?; + let vault_url = vault_url_ref + .expect("vault_url argument is mandatory") + .clone(); + Ok(Box::new(GetSecrets { + vault_url, + random_key_name: OnceLock::new(), + client: OnceLock::new(), + }) as Box) + } + .boxed() +} +``` + +### Test invocation + +The final piece of code which is necessary to run the performance tests is logic to hook up the tests with a test runner. + +```rust +#[tokio::main] +async fn main() -> azure_core::Result<()> { + let runner = PerfRunner::new( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![GetSecrets::test_metadata()], + )?; + + runner.run().await?; + + Ok(()) +} +``` + +This declares a perf test runner with a set of defined test metadata and runs the performance test. If your performance test suite has more than one performance test, then it should be added to the final parameter to the `PerfRunner::new()` function. + +### Declaring Tests + +The process of authoring tests starts with the cargo.toml file for your package. + +Add the following to the `cargo.toml` file: + +```toml +[[test]] +name = "perf" +path = "perf/get_secret.rs" +harness = false +``` + +This declares a test named `perf` (which is required for the perf automation tests) located in a directory named `perf` in a module named `get_secret.rs`. It also declares the test as *not* requiring the standard test harness - that's because the test defines its own test harness. + +After this, to invoke your perf test, you simply use: + +```bash +cargo test --package azure_storage_blob --test perf -- +``` + +For example, + +```bash +cargo test --package azure_storage_blob --test perf -- list_blob --help +``` + +returns the help text for the `list_blob`test: + +```text +List blobs in a container + +Usage: perf-070114707c71388a.exe list_blob [OPTIONS] --count + +Options: + -c, --count The number of blobs to list + -e, --endpoint The endpoint of the blob storage + --sync + --parallel The number of concurrent tasks to use when running each test [default: 1] + --duration The duration of each test in seconds [default: 30] + --warmup The duration of the warmup period in seconds [default: 5] + --no-cleanup Disable test cleanup + -h, --help Print help +``` + +Note that some of these test options are not specific to the `list_blobs` test. This is to allow test options to be provided in any order in the command line. diff --git a/sdk/core/azure_core_test/src/perf/config_tests.rs b/sdk/core/azure_core_test/src/perf/config_tests.rs new file mode 100644 index 0000000000..d321b1a5dd --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/config_tests.rs @@ -0,0 +1,687 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +//! Tests for configuration of the performance test runner. +//! +//! These tests cover various scenarios for initializing the `PerfRunner` with different sets of +//! command-line arguments and test metadata. They ensure that the runner correctly parses +//! arguments, handles defaults, and manages errors appropriately. +//! +use super::*; +use std::{env, error::Error}; + +fn create_failed_test(_runner: PerfRunner) -> CreatePerfTestReturn { + Box::pin(async { + Err(azure_core::Error::with_message( + azure_core::error::ErrorKind::Other, + "Intentional failure to create test instance", + )) + }) +} + +// Helper function to create a basic test metadata for testing +fn create_basic_test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "basic_test", + description: "A basic test for testing purposes", + options: vec![PerfTestOption { + name: "test-option", + short_activator: Some('t'), + long_activator: "test-option", + display_message: "Test option for basic test", + expected_args_len: 1, + mandatory: false, + sensitive: false, + }], + create_test: create_failed_test, + } +} + +// Helper function to create test metadata with multiple options +fn create_complex_test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "complex_test", + description: "A complex test with multiple options", + options: vec![ + PerfTestOption { + name: "mandatory-option", + short_activator: Some('m'), + long_activator: "mandatory", + display_message: "Mandatory option", + expected_args_len: 1, + mandatory: true, + sensitive: false, + }, + PerfTestOption { + name: "sensitive-option", + short_activator: None, + long_activator: "sensitive", + display_message: "Sensitive option", + expected_args_len: 1, + mandatory: false, + sensitive: true, + }, + PerfTestOption { + name: "flag-option", + short_activator: Some('f'), + long_activator: "flag", + display_message: "Flag option", + ..Default::default() + }, + ], + create_test: create_failed_test, + } +} + +// Helper function to create test metadata without short activators +fn create_no_short_activator_test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "no_short_test", + description: "Test without short activators", + options: vec![PerfTestOption { + name: "long-only", + short_activator: None, + long_activator: "long-only", + display_message: "Long activator only", + expected_args_len: 1, + mandatory: false, + sensitive: false, + }], + create_test: create_failed_test, + } +} + +#[test] +fn test_perf_runner_new_with_empty_tests() { + let tests = vec![]; + let result = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests, + vec!["perf-tests"], + ); + + assert!( + result.is_ok(), + "PerfRunner::new should succeed with empty tests" + ); + let runner = result.unwrap(); + + // Test default values + assert_eq!(runner.options.iterations, 1); + assert_eq!(runner.options.parallel, 1); + assert_eq!(runner.options.duration, Duration::seconds(30)); + assert_eq!(runner.options.warmup, Duration::seconds(5)); + assert_eq!(runner.options.test_results_filename, "./tests/results.json"); + assert!(!runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_new_with_single_test() { + let tests = vec![create_basic_test_metadata()]; + let result = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests, + vec!["perf-tests"], + ); + + assert!( + result.is_ok(), + "PerfRunner::new should succeed with single test" + ); + let runner = result.unwrap(); + + // Verify default values are set + assert_eq!(runner.options.iterations, 1); + assert_eq!(runner.options.parallel, 1); + assert_eq!(runner.options.duration, Duration::seconds(30)); + assert_eq!(runner.options.warmup, Duration::seconds(5)); +} + +#[test] +fn test_perf_runner_new_with_multiple_tests() { + let tests = vec![ + create_basic_test_metadata(), + create_complex_test_metadata(), + create_no_short_activator_test_metadata(), + ]; + let result = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests, + vec!["perf-tests"], + ); + + assert!( + result.is_ok(), + "PerfRunner::new should succeed with multiple tests" + ); + let _runner = result.unwrap(); +} + +#[test] +fn test_perf_runner_with_command_line_default_args() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with default args" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.iterations, 1); + assert_eq!(runner.options.parallel, 1); + assert_eq!(runner.options.duration, Duration::seconds(30)); + assert_eq!(runner.options.warmup, Duration::seconds(5)); + assert!(!runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_with_command_line_custom_iterations() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "10"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom iterations" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.iterations, 10); +} + +#[test] +fn test_perf_runner_with_command_line_custom_parallel() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--parallel", "5"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom parallel" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.parallel, 5); +} + +#[test] +fn test_perf_runner_with_command_line_custom_duration() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--duration", "60"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom duration" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.duration, Duration::seconds(60)); +} + +#[test] +fn test_perf_runner_with_command_line_custom_warmup() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--warmup", "10"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom warmup" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.warmup, Duration::seconds(10)); +} + +#[test] +fn test_perf_runner_with_command_line_test_results_file() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--test-results", "/tmp/results.json"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom test results file" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.test_results_filename, "/tmp/results.json"); +} + +#[test] +fn test_perf_runner_with_command_line_no_cleanup() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--no-cleanup"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with no-cleanup flag" + ); + + let runner = result.unwrap(); + assert!(runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_with_command_line_all_options() { + let tests = vec![create_basic_test_metadata()]; + let args = vec![ + "perf-tests", + "--iterations", + "20", + "--parallel", + "8", + "--duration", + "120", + "--warmup", + "15", + "--test-results", + "/custom/results.json", + "--no-cleanup", + ]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with all options" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.iterations, 20); + assert_eq!(runner.options.parallel, 8); + assert_eq!(runner.options.duration, Duration::seconds(120)); + assert_eq!(runner.options.warmup, Duration::seconds(15)); + assert_eq!(runner.options.test_results_filename, "/custom/results.json"); + assert!(runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_command_line_help() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--help"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_err(), + "PerfRunner::with_command_line should fail with help flag" + ); + + println!("{}", result.as_ref().err().unwrap().source().unwrap()); + + let error = result.err().unwrap(); + assert_eq!(error.kind(), &azure_core::error::ErrorKind::Other); + assert!(error.to_string().contains("Failed to parse")); +} + +#[test] +fn test_perf_runner_with_subcommand() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "basic_test", "--test-option", "value"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with subcommand" + ); + + let runner = result.unwrap(); + + let selected_test = runner + .get_selected_test_name() + .expect("A test should be selected"); + assert_eq!(selected_test, "basic_test"); + let option_value: Option<&String> = runner.try_get_test_arg("test-option").ok().flatten(); + assert!(option_value.is_some()); + assert_eq!(option_value.unwrap(), "value"); +} + +#[test] +fn test_perf_runner_with_subcommand_short_activator() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "basic_test", "-t", "short_value"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with short activator" + ); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner.try_get_test_arg("test-option").ok().flatten(); + assert!(option_value.is_some()); + assert_eq!(option_value.unwrap(), "short_value"); +} + +#[test] +fn test_perf_runner_with_complex_subcommand() { + let tests = vec![create_complex_test_metadata()]; + let args = vec![ + "perf-tests", + "complex_test", + "--mandatory", + "required_value", + "--sensitive", + "secret_value", + "--flag", + ]; + + println!( + "Help: {}", + PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests.clone(), + vec!["perf-tests", "--help"] + ) + .unwrap_err() + .source() + .unwrap() + ); + println!( + "Help2 : {}", + PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests.clone(), + vec!["perf-tests", "complex_test", "--help"] + ) + .unwrap_err() + .source() + .unwrap() + ); + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with complex subcommand" + ); + + let runner = result.unwrap(); + + let mandatory_value: Result> = runner.try_get_test_arg("mandatory-option"); + println!("{:?}", mandatory_value); + assert!(mandatory_value.is_ok()); + let mandatory_value = mandatory_value.unwrap(); + assert!(mandatory_value.is_some()); + assert_eq!(mandatory_value.unwrap(), "required_value"); + + let sensitive_value: Option<&String> = + runner.try_get_test_arg("sensitive-option").ok().flatten(); + assert!(sensitive_value.is_some()); + assert_eq!(sensitive_value.unwrap(), "secret_value"); + + let flag_value = runner.try_get_test_arg("flag-option").ok().flatten(); + assert!(flag_value.is_some()); + let flag_value: bool = *flag_value.unwrap(); + assert!(flag_value); +} + +#[test] +fn test_perf_runner_with_no_short_activator() { + let tests = vec![create_no_short_activator_test_metadata()]; + let args = vec!["perf-tests", "no_short_test", "--long-only", "value"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with long-only activator" + ); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner.try_get_test_arg("long-only").ok().flatten(); + assert!(option_value.is_some()); + assert_eq!(option_value.unwrap(), "value"); +} + +#[test] +fn test_perf_runner_get_one_nonexistent() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests"]; + + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args).unwrap(); + let result: Result> = runner.try_get_global_arg("nonexistent"); + assert!(result.is_err()); +} + +#[test] +fn test_perf_runner_get_one_different_types() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "42"]; + + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args).unwrap(); + + // Test getting u32 value + let iterations: Option<&u32> = runner.try_get_global_arg("iterations").ok().flatten(); + assert!(iterations.is_some()); + assert_eq!(*iterations.unwrap(), 42); + + // Test getting wrong type returns None + let iterations_as_string: Option<&String> = + runner.try_get_global_arg("iterations").ok().flatten(); + assert!(iterations_as_string.is_none()); +} + +#[test] +fn test_perf_runner_options_debug() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "5"]; + + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args).unwrap(); + + // Test that Debug is implemented for PerfRunner + let debug_output = format!("{:?}", runner); + assert!(debug_output.contains("PerfRunner")); + assert!(debug_output.contains("options")); + + // Test that PerfRunnerOptions Debug works + let options_debug = format!("{:?}", runner.options); + assert!(options_debug.contains("PerfRunnerOptions")); + assert!(options_debug.contains("iterations: 5")); + + let options = PerfRunnerOptions::from(&runner.arguments); + assert_eq!(options.iterations, 5); +} + +#[test] +fn test_test_option_debug_and_default() { + let option = PerfTestOption::default(); + + // Test default values + assert_eq!(option.name, ""); + assert_eq!(option.short_activator, None); + assert_eq!(option.long_activator, ""); + assert_eq!(option.display_message, ""); + assert_eq!(option.expected_args_len, 0); + assert!(!option.mandatory); + assert!(!option.sensitive); + + // Test Debug implementation + let debug_output = format!("{:?}", option); + assert!(debug_output.contains("PerfTestOption")); +} + +#[test] +fn test_perf_runner_with_invalid_numeric_value() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "not_a_number"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_err(), + "PerfRunner::with_command_line should fail with invalid numeric value" + ); +} + +#[test] +fn test_perf_runner_with_missing_mandatory_option() { + let tests = vec![create_complex_test_metadata()]; + let args = vec!["perf-tests", "complex_test"]; // Missing mandatory option + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_err(), + "PerfRunner::with_command_line should fail with missing mandatory option" + ); +} + +#[test] +fn test_perf_runner_with_multiple_tests_and_subcommands() { + let tests = vec![create_basic_test_metadata(), create_complex_test_metadata()]; + + // Test with first subcommand + let args = vec!["perf-tests", "basic_test", "--test-option", "value1"]; + let result = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests.clone(), args); + assert!(result.is_ok()); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner.try_get_test_arg("test-option").ok().flatten(); + assert_eq!(option_value.unwrap(), "value1"); + + // Test with second subcommand + let args = vec!["perf-tests", "complex_test", "--mandatory", "required"]; + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!(result.is_ok()); + + let runner = result.unwrap(); + let mandatory_value: Option<&String> = + runner.try_get_test_arg("mandatory-option").ok().flatten(); + assert_eq!(mandatory_value.unwrap(), "required"); +} + +struct ComplexTest {} + +#[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +impl PerfTest for ComplexTest { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { + println!("Setting up ComplexTest..."); + // Simulate some async setup work + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + Ok(()) + } + + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { + println!("Cleaning up ComplexTest..."); + // Simulate some async cleanup work + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + Ok(()) + } + + async fn run(&self, _context: Arc) -> azure_core::Result<()> { + // Simulate some async test work + println!("Running ComplexTest..."); + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + Ok(()) + } +} + +fn complex_test_create(_runner: PerfRunner) -> CreatePerfTestReturn { + Box::pin(async { Ok(Box::new(ComplexTest {}) as Box) }) +} + +#[tokio::test] +async fn test_perf_runner_with_test_functions() { + let tests = vec![PerfTestMetadata { + name: "complex_test", + description: "A complex test with multiple options", + options: vec![ + PerfTestOption { + name: "mandatory-option", + short_activator: Some('m'), + long_activator: "mandatory", + display_message: "Mandatory option", + expected_args_len: 1, + mandatory: true, + sensitive: false, + }, + PerfTestOption { + name: "sensitive-option", + short_activator: Some('s'), + long_activator: "sensitive", + display_message: "Sensitive option", + expected_args_len: 1, + mandatory: false, + sensitive: true, + }, + PerfTestOption { + name: "flag-option", + short_activator: Some('f'), + long_activator: "flag", + display_message: "Flag option", + expected_args_len: 0, + mandatory: false, + sensitive: false, + }, + ], + create_test: complex_test_create, + }]; + let args = vec![ + "perf-tests", + "complex_test", + "--mandatory", + "required_value", + "--sensitive", + "secret_value", + "--flag", + ]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with complex subcommand" + ); + + let runner = result.unwrap(); + + let mandatory_value: Result> = runner.try_get_test_arg("mandatory-option"); + println!("{:?}", mandatory_value); + assert!(mandatory_value.is_ok()); + let mandatory_value = mandatory_value.unwrap(); + assert!(mandatory_value.is_some()); + assert_eq!(mandatory_value.unwrap(), "required_value"); + + let sensitive_value: Option<&String> = + runner.try_get_test_arg("sensitive-option").ok().flatten(); + assert!(sensitive_value.is_some()); + assert_eq!(sensitive_value.unwrap(), "secret_value"); + + let flag_value = runner.try_get_test_arg("flag-option").ok().flatten(); + assert!(flag_value.is_some()); + let flag_value: bool = *flag_value.unwrap(); + assert!(flag_value); + + let perf_tests_impl = (runner.tests[0].create_test)(runner.clone()) + .await + .expect("Failed to create test instance"); + + let crate_dir = env!("CARGO_MANIFEST_DIR"); + + let test_context = Arc::new( + TestContext::new(crate_dir, crate_dir, runner.tests[0].name) + .expect("Failed to create TestContext"), + ); + + perf_tests_impl + .setup(test_context.clone()) + .await + .expect("Setup failed"); + perf_tests_impl + .run(test_context.clone()) + .await + .expect("Run failed"); + perf_tests_impl + .cleanup(test_context.clone()) + .await + .expect("Cleanup failed"); +} diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs new file mode 100644 index 0000000000..e40a0ccec7 --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +//! Tests for functioning of the performance test runner. +//! +//! These tests cover various scenarios for running the `PerfRunner` with different options and measurements. +//! +use futures::FutureExt; + +use super::*; +use std::boxed::Box; + +fn create_fibonacci1_test(runner: PerfRunner) -> CreatePerfTestReturn { + struct Fibonacci1Test { + count: u32, + } + + impl Fibonacci1Test { + fn fibonacci(n: u32) -> u32 { + if n <= 1 { + n + } else { + Self::fibonacci(n - 1) + Self::fibonacci(n - 2) + } + } + } + + #[async_trait::async_trait] + impl PerfTest for Fibonacci1Test { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { + Ok(()) + } + async fn run(&self, _context: Arc) -> azure_core::Result<()> { + let _result = Self::fibonacci(self.count); + // This is a CPU bound test, so yield to allow other tasks to run. Otherwise we jam the tokio scheduler. + // Note that this significantly reduces the performance of the test, but it is necessary to allow parallelism. + // + // In a real-world scenario, the test would be doing async work (e.g. network I/O) which would yield naturally. + tokio::task::yield_now().await; + Ok(()) + } + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { + Ok(()) + } + } + + // Return a pinned future that creates the test. + async move { + let count: Option<&String> = runner.try_get_test_arg("count")?; + + println!("Fibonacci1Test with count: {:?}", count); + let count = count.expect("count argument is mandatory"); + let count = count.parse::().map_err(|e| { + azure_core::Error::with_error( + azure_core::error::ErrorKind::Other, + e, + "Invalid count argument", + ) + })?; + Ok(Box::new(Fibonacci1Test { count }) as Box) + } + .boxed() +} + +#[tokio::test] +async fn test_perf_runner_with_single_test() { + let args = vec![ + "perf_test", + "--iterations", + "1", + "--parallel", + "30", + "--duration", + "10", + "--test-results", + "", + "--warmup", + "1", + "fibonacci1", + "-c", + "10", + ]; + let runner = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![PerfTestMetadata { + name: "fibonacci1", + description: "A basic test for testing purposes", + options: vec![PerfTestOption { + name: "count", + mandatory: true, + short_activator: Some('c'), + expected_args_len: 1, + display_message: "The Fibonacci number to compute", + ..Default::default() + }], + create_test: create_fibonacci1_test, + }], + args, + ) + .unwrap(); + + let result = runner.run().await; + println!("Result: {:?}", result); + assert!(result.is_ok()); +} diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs new file mode 100644 index 0000000000..a11e414ab4 --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -0,0 +1,538 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#![doc = include_str!("README.md")] +#![cfg(not(target_arch = "wasm32"))] + +use crate::TestContext; +use azure_core::{ + error::{ErrorKind, ResultExt}, + time::Duration, + Error, Result, +}; +use clap::ArgMatches; +use serde::Serialize; +use std::{ + fmt::Display, + future::Future, + pin::Pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; +use tokio::{select, task::JoinSet}; + +/// A trait representing a performance test. +/// +/// Performance tests have three phases: +/// 1. `setup`: Prepare the test environment. This is called once per iteration. +/// 2. `run`: Execute the performance test. This is called repeatedly for the duration of the test. +/// 3. `cleanup`: Clean up the test environment. This is called once +/// +/// Note that the "run" phase will be executed in parallel across multiple tasks, so it must be thread-safe. +#[async_trait::async_trait] +pub trait PerfTest: Send + Sync { + /// Set up the test environment. + /// + /// Performs whatever steps are needed to set up the test environment. This method is called once per iteration of the test. + /// + /// # Arguments + /// - `context`: An `Arc` to a `TestContext` that provides context information for the test. + async fn setup(&self, context: Arc) -> azure_core::Result<()>; + async fn run(&self, context: Arc) -> azure_core::Result<()>; + async fn cleanup(&self, context: Arc) -> azure_core::Result<()>; +} + +pub type CreatePerfTestReturn = + Pin>>>>; + +/// Type alias for an async function that creates a PerfTest instance. +/// Takes a PerfRunner reference and returns a future that resolves to a PerfTest trait object. +pub type CreatePerfTestFn = fn(PerfRunner) -> CreatePerfTestReturn; + +/// Metadata about a performance test. +#[derive(Debug, Clone)] +pub struct PerfTestMetadata { + /// The name of the test suite. + pub name: &'static str, + /// A brief description of the test suite. + pub description: &'static str, + /// The set of test options supported by this test. + pub options: Vec, + + /// An async function used to create the performance test. + /// Takes a PerfRunner reference and returns a future that resolves to a PerfTest trait object. + pub create_test: CreatePerfTestFn, +} + +/// A `PerfTestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. +#[derive(Debug, Default, Clone)] +pub struct PerfTestOption { + /// The name of the test option. This is used as the key in the `TestArguments` map. + pub name: &'static str, + + /// The short form activator for this argument e.g., `-t`. Does not include the hyphen. + pub short_activator: Option, + + /// The long form activator for this argument e.g., `--test-option`. Does not include the hyphens. + pub long_activator: &'static str, + + /// Display message - displayed in the --help message. + pub display_message: &'static str, + + /// Expected argument count + pub expected_args_len: usize, + + /// Required + pub mandatory: bool, + + /// Argument value is sensitive and should be sanitized. + pub sensitive: bool, +} + +#[derive(Debug, Clone, Default, Serialize)] +#[allow(dead_code)] +struct PerfTestOutputs { + pub test_name: String, + pub operations_per_second: f64, + pub average_cpu_use: Option, + pub average_memory_use: Option, +} + +#[derive(Debug, Clone)] +struct PerfRunnerOptions { + no_cleanup: bool, + iterations: u32, + parallel: u32, + duration: Duration, + warmup: Duration, + disable_progress: bool, + test_results_filename: String, +} + +impl Display for PerfRunnerOptions { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "PerfRunnerOptions {{ no_cleanup: {}, iterations: {}, parallel: {}, duration: {}, warmup: {}, disable_progress: {}, test_results_filename: '{}' }}", + self.no_cleanup, + self.iterations, + self.parallel, + self.duration, + self.warmup, + self.disable_progress, + self.test_results_filename + ) + } +} + +impl From<&ArgMatches> for PerfRunnerOptions { + fn from(matches: &ArgMatches) -> Self { + Self { + no_cleanup: matches.get_flag("no-cleanup"), + iterations: *matches + .get_one::("iterations") + .expect("defaulted by clap"), + parallel: *matches + .get_one::("parallel") + .expect("defaulted by clap"), + disable_progress: matches.get_flag("no-progress"), + duration: Duration::seconds( + *matches + .get_one::("duration") + .expect("defaulted by clap"), + ), + warmup: Duration::seconds( + *matches.get_one::("warmup").expect("defaulted by clap"), + ), + test_results_filename: matches + .get_one::("test-results") + .expect("defaulted by clap") + .to_string(), + } + } +} + +/// Context information required by performance tests. +#[derive(Debug, Clone)] +pub struct PerfRunner { + options: PerfRunnerOptions, + tests: Vec, + arguments: ArgMatches, + package_dir: &'static str, + module_name: &'static str, + progress: Arc, +} + +impl PerfRunner { + /// Run the performance tests in `tests` using the current process command line. + /// + /// # Arguments + /// + /// * package_dir - The directory containing the package with the tests. Typically `env!("CARGO_PACKAGE_DIR")` + /// * module_name - the name of the module containing the test, typically `file!()` + /// * tests - the set of tests to configure. + /// + pub fn new( + package_dir: &'static str, + module_name: &'static str, + tests: Vec, + ) -> Result { + let command = Self::get_command_from_metadata(&tests); + let arguments = command.try_get_matches(); + let arguments = match arguments { + Ok(a) => a, + Err(e) => { + eprintln!("{}", e); + std::process::exit(1); + } + }; + Ok(Self { + options: PerfRunnerOptions::from(&arguments), + tests, + arguments, + package_dir, + module_name, + progress: Arc::new(AtomicU64::new(0)), + }) + } + + /// Run the performance tests in `tests` with the command line specified in `args` + pub fn with_command_line( + package_dir: &'static str, + module_name: &'static str, + tests: Vec, + args: Vec<&str>, + ) -> azure_core::Result { + let command = Self::get_command_from_metadata(&tests); + let arguments = command + .try_get_matches_from(args) + .with_context(ErrorKind::Other, "Failed to parse command line arguments.")?; + + Ok(Self { + options: PerfRunnerOptions::from(&arguments), + tests, + arguments, + package_dir, + module_name, + progress: Arc::new(AtomicU64::new(0)), + }) + } + + /// Gets a reference to a typed argument by its id. + pub fn try_get_global_arg(&self, id: &str) -> Result> + where + T: Clone + Send + Sync + 'static, + { + self.arguments.try_get_one::(id).with_context( + ErrorKind::Other, + format!("Failed to get argument '{}'.", id), + ) + } + + /// Gets a reference to a typed argument for the selected test by its id. + /// + /// # Arguments + /// + /// * `id` - The id of the argument to get. + /// + /// # Returns + /// + /// A reference to the argument if it exists, or None. + pub fn try_get_test_arg(&self, id: &str) -> Result> + where + T: Clone + Send + Sync + 'static, + { + if let Some((_, args)) = self.arguments.subcommand() { + args.try_get_one::(id).with_context( + ErrorKind::Other, + format!("Failed to get argument '{}' for test.", id), + ) + } else { + Ok(None) + } + } + + /// Gets the name of the selected test. + pub fn get_selected_test_name(&self) -> Result<&str> { + match self.arguments.subcommand_name() { + Some(name) => Ok(name), + None => Err(Error::with_message( + azure_core::error::ErrorKind::Other, + "No test was selected.", + )), + } + } + + /// Runs the selected performance test. + /// + /// This will run the selected test for the configured number of iterations, parallel tasks, and duration. + /// + /// If no test has been selected, this will print an error message and return Ok(()). + /// + /// # Returns + /// + /// A result indicating the success or failure of the test run. + /// + pub async fn run(&self) -> azure_core::Result<()> { + // We can only run tests if there was a test selected. + let test_name = match self.get_selected_test_name() { + Ok(name) => name, + Err(e) => { + eprintln!("Error getting selected test name: {}", e); + return Ok(()); + } + }; + + let test = self + .tests + .iter() + .find(|t| t.name == test_name) + .ok_or_else(|| { + Error::with_message( + azure_core::error::ErrorKind::Other, + format!("Test '{}' not found.", test_name), + ) + })?; + let test_instance = (test.create_test)(self.clone()).await?; + let test_instance: Arc = Arc::from(test_instance); + + let test_mode = crate::TestMode::current()?; + + let context = Arc::new( + crate::recorded::start( + test_mode, + self.package_dir, + self.module_name, + test.name, + None, + ) + .await?, + ); + + println!("Test Configuration: {:#}", self.options); + + for iteration in 0..self.options.iterations { + println!( + "Running test iteration {}/{}", + iteration + 1, + self.options.iterations + ); + + println!("========== Starting test setup =========="); + test_instance.setup(context.clone()).await?; + + println!( + "========== Starting test warmup for {} ==========", + self.options.warmup + ); + + let mut test_contexts = Vec::new(); + for _ in 0..self.options.parallel { + let context = Arc::new( + crate::recorded::start( + test_mode, + self.package_dir, + self.module_name, + test.name, + None, + ) + .await?, + ); + test_contexts.push(context); + } + + self.run_test_for(test_instance.clone(), &test_contexts, self.options.warmup) + .await?; + + println!( + "========== Starting test run for {} ==========", + self.options.duration + ); + + self.run_test_for( + Arc::clone(&test_instance), + &test_contexts, + self.options.duration, + ) + .await?; + if !self.options.no_cleanup { + println!("========== Starting test cleanup =========="); + test_instance.cleanup(context.clone()).await?; + } + + let iteration_count = self.progress.load(Ordering::SeqCst); + println!( + "Completed test iteration {}/{} - {} iterations run in {} seconds - {} iterations/second, {} seconds/iteration", + iteration + 1, + self.options.iterations, + iteration_count, + self.options.duration.as_seconds_f64(), + iteration_count as f64 / self.options.duration.as_seconds_f64(), + self.options.duration.as_seconds_f64() / iteration_count as f64 + ); + let operations_per_second = + iteration_count as f64 / self.options.duration.as_seconds_f64(); + let seconds_per_operation = + self.options.duration.as_seconds_f64() / iteration_count as f64; + let duration_per_operation = Duration::seconds_f64(seconds_per_operation); + println!("{operations_per_second:4} operations/second, {duration_per_operation:4} seconds/operation"); + + if !self.options.test_results_filename.is_empty() { + // Write out the results to a file. + println!( + "Writing test results to {}", + self.options.test_results_filename + ); + let results = PerfTestOutputs { + test_name: test.name.to_string(), + operations_per_second, + average_cpu_use: None, + average_memory_use: None, + }; + + let json = serde_json::to_string_pretty(&results).with_context( + ErrorKind::DataConversion, + "Failed to serialize test results to JSON.", + )?; + + println!("Test results: {}", json); + std::fs::write(&self.options.test_results_filename, json) + .with_context(ErrorKind::Io, "Failed to write test results to file.")?; + } + } + Ok(()) + } + pub async fn run_test_for( + &self, + test_instance: Arc, + test_contexts: &[Arc], + duration: Duration, + ) -> azure_core::Result<()> { + // Reset the performance measurements before starting the test. + self.progress.store(0, Ordering::SeqCst); + let mut tasks: JoinSet> = JoinSet::new(); + (0..self.options.parallel).for_each(|i| { + let test_instance_clone = Arc::clone(&test_instance); + let progress = self.progress.clone(); + let test_context = test_contexts[i as usize].clone(); + tasks.spawn(async move { + loop { + test_instance_clone.run(test_context.clone()).await?; + progress.fetch_add(1, Ordering::SeqCst); + } + }); + }); + let start = tokio::time::Instant::now(); + let timeout = tokio::time::Duration::from_secs_f64(duration.as_seconds_f64()); + select!( + _ = tokio::time::sleep(timeout) => {println!("Timeout reached, stopping test tasks: {:?}", start.elapsed());}, + _ = tasks.join_all() => {println!("All test tasks completed: {:?}", start.elapsed());}, + _ = async { + let mut last_count = 0; + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let current_total = self.progress.load(Ordering::SeqCst); + + if start.elapsed().as_secs_f64() != 0f64 && current_total != 0 { + println!("Current {:3}, Total {:5} {:4}", current_total - last_count, current_total, Duration::seconds_f64( start.elapsed().as_secs_f64() / current_total as f64 )); + } + else{ + println!("Current {:3}, Total {:5} ---", current_total - last_count, current_total); + } + + last_count = current_total; + } + }, if !self.options.disable_progress => {}, + ); + println!("Task time elapsed: {:?}", start.elapsed()); + Ok(()) + } + + // Future command line switches: + // * Test Proxy servers. + // * TLS + // * Allow untrusted TLS certificates + // * Advanced options + // * Print job statistics (?) + // * Track latency and print per-operation latency statistics + // * Target throughput (operations/second) (?) + // * Language specific options + // * Max I/O completion threads + // * Minimum number of asynchronous I/O threads in the thread pool + // * Minimum number of worker threads the thread pool creates on demand + // * Sync - run a synchronous version of the test + + /// Constructs a `clap::Command` from the provided test metadata. + fn get_command_from_metadata(tests: &[PerfTestMetadata]) -> clap::Command { + let mut command = clap::Command::new("perf-tests") + .about("Run performance tests for the Azure SDK for Rust") + .arg( + clap::arg!(--iterations "The number of iterations to run each test") + .required(false) + .default_value("1") + .value_parser(clap::value_parser!(u32)) + .global(false), + ) + .arg(clap::arg!(--sync).global(true).required(false)) + .arg( + clap::arg!(--parallel "The number of concurrent tasks to use when running each test") + .required(false) + .default_value("1") + .value_parser(clap::value_parser!(u32)) + .global(true), + ) + .arg(clap::arg!(--"no-progress" "Disable progress reporting").required(false).global(false)) + .arg( + clap::arg!(--duration "The duration of each test in seconds") + .required(false) + .default_value("30") + .value_parser(clap::value_parser!(i64)) + .global(true), + ) + .arg( + clap::arg!(--warmup "The duration of the warmup period in seconds") + .required(false) + .default_value("5") + .value_parser(clap::value_parser!(i64)) + .global(true), + ) + .arg( + clap::arg!(--"test-results" "The file to write test results to") + .required(false) + .default_value("./tests/results.json") + .global(false), + ) + .arg(clap::arg!(--"no-cleanup" "Disable test cleanup") + .required(false).global(true)) + ; + for test in tests { + let mut subcommand = clap::Command::new(test.name).about(test.description); + for option in test.options.iter() { + let mut arg = clap::Arg::new(option.name) + .help(option.display_message) + .long(option.long_activator) + .num_args(option.expected_args_len..=option.expected_args_len) + .required(option.mandatory) + .global(false); + if let Some(short_activator) = option.short_activator { + arg = arg.short(short_activator); + } + if option.sensitive { + arg = arg.hide(true); + } + subcommand = subcommand.arg(arg); + } + command = command.subcommand(subcommand); + } + + command + } +} + +#[cfg(test)] +mod config_tests; + +#[cfg(test)] +mod framework_tests; diff --git a/sdk/core/azure_core_test/src/recording.rs b/sdk/core/azure_core_test/src/recording.rs index 4b572268ce..62bb6c3f78 100644 --- a/sdk/core/azure_core_test/src/recording.rs +++ b/sdk/core/azure_core_test/src/recording.rs @@ -36,7 +36,6 @@ use rand::{ use rand_chacha::ChaCha20Rng; use std::{ borrow::Cow, - cell::OnceCell, collections::HashMap, env, sync::{Arc, Mutex, OnceLock, RwLock}, @@ -51,8 +50,8 @@ pub struct Recording { #[allow(dead_code)] span: EnteredSpan, proxy: Option>, - test_mode_policy: OnceCell>, - recording_policy: OnceCell>, + test_mode_policy: OnceLock>, + recording_policy: OnceLock>, service_directory: String, recording_file: String, recording_assets_file: Option, @@ -61,6 +60,10 @@ pub struct Recording { rand: OnceLock>, } +// It's not 100% clear to me that Recording is Send, but it seems to be. +// TODO: See if there's a way to remove this explicit unsafe impl. +unsafe impl Send for Recording {} + impl Recording { /// Adds a [`Sanitizer`] to sanitize PII for the current test. pub async fn add_sanitizer(&self, sanitizer: S) -> azure_core::Result<()> @@ -361,8 +364,8 @@ impl Recording { test_mode, span, proxy, - test_mode_policy: OnceCell::new(), - recording_policy: OnceCell::new(), + test_mode_policy: OnceLock::new(), + recording_policy: OnceLock::new(), service_directory: service_directory.into(), recording_file, recording_assets_file, @@ -380,8 +383,8 @@ impl Recording { test_mode: TestMode::Playback, span: span.entered(), proxy: None, - test_mode_policy: OnceCell::new(), - recording_policy: OnceCell::new(), + test_mode_policy: OnceLock::new(), + recording_policy: OnceLock::new(), service_directory: String::from("sdk/core"), recording_file: String::from("none"), recording_assets_file: None, diff --git a/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml b/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml index 857666e90c..7cb7cf216e 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml +++ b/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml @@ -40,3 +40,8 @@ rustc_version.workspace = true [lints] workspace = true + +[[test]] +name = "perf" +path = "perf/get_secret.rs" +harness = false diff --git a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs new file mode 100644 index 0000000000..962505a781 --- /dev/null +++ b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +//! Keyvault Secrets performance tests. +//! +//! This test measures the performance of getting a secret from Azure Key Vault. +//! It sets up a secret in the Key Vault during the setup phase and then repeatedly retrieves it +//! during the run phase. The test can be configured with the vault URL via command line arguments +//! to target different Key Vault instances. +//! +//! To run the test, use the following command line arguments: +//! +//! cargo test --package azure_security_keyvault_secrets --test performance_tests -- --duration 10 --parallel 20 get_secret -u https://.vault.azure.net/ +//! + +use std::sync::{Arc, OnceLock}; + +use azure_core::Result; +use azure_core_test::{ + perf::{CreatePerfTestReturn, PerfRunner, PerfTest, PerfTestMetadata, PerfTestOption}, + Recording, TestContext, +}; +use azure_security_keyvault_secrets::{ + models::SetSecretParameters, SecretClient, SecretClientOptions, +}; +use futures::FutureExt; +struct GetSecrets { + vault_url: String, + random_key_name: OnceLock, + client: OnceLock, +} + +impl GetSecrets { + fn test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "get_secret", + description: "Get a secret from Key Vault", + options: vec![PerfTestOption { + name: "vault_url", + display_message: "The URL of the Key Vault to use in the test", + mandatory: true, + short_activator: Some('u'), + long_activator: "vault-url", + expected_args_len: 1, + ..Default::default() + }], + create_test: Self::create_new_test, + } + } + + fn create_new_test(runner: PerfRunner) -> CreatePerfTestReturn { + async move { + let vault_url_ref: Option<&String> = runner.try_get_test_arg("vault_url")?; + let vault_url = vault_url_ref + .expect("vault_url argument is mandatory") + .clone(); + Ok(Box::new(GetSecrets { + vault_url, + random_key_name: OnceLock::new(), + client: OnceLock::new(), + }) as Box) + } + .boxed() + } + + fn create_random_key_name(recording: &Recording) -> String { + let random_suffix: String = recording.random_string::<8>(Some("perf-")); + format!("perf-{}", random_suffix) + } + + fn get_random_key_name(&self, recording: &Recording) -> &String { + self.random_key_name + .get_or_init(|| Self::create_random_key_name(recording)) + } +} + +#[async_trait::async_trait] +impl PerfTest for GetSecrets { + async fn setup(&self, context: Arc) -> azure_core::Result<()> { + let recording = context.recording(); + let credential = recording.credential(); + + let mut client_options = SecretClientOptions::default(); + recording.instrument(&mut client_options.client_options); + + let client = SecretClient::new( + self.vault_url.as_str(), + credential.clone(), + Some(client_options), + )?; + self.client.get_or_init(|| client); + + self.client + .get() + .unwrap() + .set_secret( + self.get_random_key_name(recording), + SetSecretParameters { + value: Some("secret_value".into()), + ..Default::default() + } + .try_into()?, + None, + ) + .await?; + Ok(()) + } + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { + Ok(()) + } + async fn run(&self, context: Arc) -> Result<()> { + let recording = context.recording(); + let _secret = self + .client + .get() + .unwrap() + .get_secret(self.get_random_key_name(recording), None) + .await? + .into_body()?; + Ok(()) + } +} + +#[tokio::main] +async fn main() -> azure_core::Result<()> { + let runner = PerfRunner::new( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![GetSecrets::test_metadata()], + )?; + + runner.run().await?; + + Ok(()) +} diff --git a/sdk/storage/azure_storage_blob/Cargo.toml b/sdk/storage/azure_storage_blob/Cargo.toml index 6ef074ae46..b60cf310e6 100644 --- a/sdk/storage/azure_storage_blob/Cargo.toml +++ b/sdk/storage/azure_storage_blob/Cargo.toml @@ -37,3 +37,8 @@ azure_storage_blob_test.path = "../azure_storage_blob_test" futures.workspace = true tokio = { workspace = true, features = ["macros"] } tracing.workspace = true + +[[test]] +name = "perf" +path = "perf/perf_tests.rs" +harness = false diff --git a/sdk/storage/azure_storage_blob/assets.json b/sdk/storage/azure_storage_blob/assets.json index 3bb1e158e9..e095ad34cf 100644 --- a/sdk/storage/azure_storage_blob/assets.json +++ b/sdk/storage/azure_storage_blob/assets.json @@ -1,6 +1,6 @@ { "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "rust", - "Tag": "rust/azure_storage_blob_fc6c153d44", + "Tag": "rust/azure_storage_blob_4dd8ebabce", "TagPrefix": "rust/azure_storage_blob" } diff --git a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs new file mode 100644 index 0000000000..74e381d21e --- /dev/null +++ b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs @@ -0,0 +1,113 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +use std::sync::Arc; + +use azure_core::Bytes; +use azure_core_test::{ + perf::{CreatePerfTestReturn, PerfRunner, PerfTest, PerfTestMetadata, PerfTestOption}, + TestContext, +}; +use azure_identity::DeveloperToolsCredential; +use azure_storage_blob::BlobContainerClient; +use futures::{FutureExt, TryStreamExt}; + +pub struct ListBlobTest { + count: u32, + client: BlobContainerClient, +} + +impl ListBlobTest { + fn create_list_blob_test(runner: PerfRunner) -> CreatePerfTestReturn { + async move { + let count: Option<&String> = runner.try_get_test_arg("count")?; + + println!("ListBlobTest with count: {:?}", count); + let count = count.expect("count argument is mandatory").parse::()?; + println!("Parsed count: {}", count); + + let endpoint: Option<&String> = runner.try_get_test_arg("endpoint")?; + let endpoint = match endpoint { + Some(e) => e.clone(), + None => format!( + "https://{}.blob.core.windows.net", + std::env::var("AZURE_STORAGE_ACCOUNT_NAME") + .expect("AZURE_STORAGE_ACCOUNT_NAME is not set") + ), + }; + println!("Using endpoint: {}", endpoint); + + let container_name = format!("perf-container-{}", uuid::Uuid::new_v4()); + let credential = DeveloperToolsCredential::new(None)?; + let client = BlobContainerClient::new(&endpoint, container_name, credential, None)?; + + Ok(Box::new(ListBlobTest { count, client }) as Box) + } + .boxed() + } + + pub fn test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "list_blob", + description: "List blobs in a container", + options: vec![ + PerfTestOption { + name: "count", + display_message: "The number of blobs to list", + mandatory: true, + short_activator: Some('c'), + long_activator: "count", + expected_args_len: 1, + ..Default::default() + }, + PerfTestOption { + name: "endpoint", + display_message: "The endpoint of the blob storage", + mandatory: false, + short_activator: Some('e'), + long_activator: "endpoint", + expected_args_len: 1, + ..Default::default() + }, + ], + create_test: Self::create_list_blob_test, + } + } +} + +#[async_trait::async_trait] +impl PerfTest for ListBlobTest { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { + // Setup code before running the test + + let _result = self.client.create_container(None).await?; + + for i in 0..self.count { + let blob_name = format!("blob-{}", i); + let blob_client = self.client.blob_client(blob_name); + + let body = vec![0u8; 1024 * 1024]; // 1 MB blob + let body_bytes = Bytes::from(body); + + let _result = blob_client.upload(body_bytes.into(), true, 5, None).await?; + } + + Ok(()) + } + + async fn run(&self, _context: Arc) -> azure_core::Result<()> { + // The actual performance test code + + let mut iterator = self.client.list_blobs(None)?; + while let Some(blob_segment) = iterator.try_next().await? { + let _body = blob_segment.into_body()?; + } + + Ok(()) + } + + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { + // Cleanup code after running the test + Ok(()) + } +} diff --git a/sdk/storage/azure_storage_blob/perf/perf_tests.rs b/sdk/storage/azure_storage_blob/perf/perf_tests.rs new file mode 100644 index 0000000000..ea01bc9069 --- /dev/null +++ b/sdk/storage/azure_storage_blob/perf/perf_tests.rs @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/// list_blob performance test. +mod list_blob_test; + +use azure_core_test::perf::PerfRunner; +use list_blob_test::ListBlobTest; + +#[tokio::main] +async fn main() -> azure_core::Result<()> { + let runner = PerfRunner::new( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![ListBlobTest::test_metadata()], + )?; + + runner.run().await?; + + Ok(()) +} diff --git a/sdk/storage/perf-resources.bicep b/sdk/storage/perf-resources.bicep new file mode 100644 index 0000000000..542f7b958d --- /dev/null +++ b/sdk/storage/perf-resources.bicep @@ -0,0 +1,60 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +param baseName string = resourceGroup().name +param testApplicationOid string +param location string = resourceGroup().location + +var blobDataContributorRoleId = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' +var blobDataOwnerRoleId = 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b' +var encryption = { + keySource: 'Microsoft.Storage' + services: { + blob: { + enabled: true + } + file: { + enabled: true + } + } +} +var networkAcls = { + bypass: 'AzureServices' + defaultAction: 'Allow' + ipRules: [] + virtualNetworkRules: [] +} + +resource blobDataContributor 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(blobDataContributorRoleId, resourceGroup().id) + properties: { + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataContributorRoleId) + principalId: testApplicationOid + } +} + +resource blobDataOwner 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(blobDataOwnerRoleId, resourceGroup().id) + properties: { + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataOwnerRoleId) + principalId: testApplicationOid + } +} + +resource storage 'Microsoft.Storage/storageAccounts@2024-01-01' = { + name: '${baseName}blob' + location: location + kind: 'BlockBlobStorage' + sku: { + name: 'Premium_LRS' + } + properties: { + accessTier: 'Hot' + allowSharedKeyAccess: false + encryption: encryption + networkAcls: networkAcls + supportsHttpsTrafficOnly: true + } +} + +output AZURE_STORAGE_ACCOUNT_NAME string = storage.name diff --git a/sdk/storage/perf-tests.yml b/sdk/storage/perf-tests.yml new file mode 100644 index 0000000000..ea7e63f9a4 --- /dev/null +++ b/sdk/storage/perf-tests.yml @@ -0,0 +1,35 @@ +Service: storage-blob + +Project: azure-storage-blobs-perf + +PrimaryPackage: azure_storage_blob + +PackageVersions: +# - azure_storage_blob: 0.6.0 +# azure_core: 1.7.2 +- azure_storage_blob: source + azure_core: source + +Tests: +# - Test: download +# Class: DownloadBlob +# Arguments: +# - --size 10240 --parallel 64 +# - --size 10485760 --parallel 32 +# - --size 1073741824 --parallel 1 --warmup 60 --duration 60 +# - --size 1073741824 --parallel 8 --warmup 60 --duration 60 + +# - Test: upload +# Class: UploadBlob +# Arguments: +# - --size 10240 --parallel 64 +# - --size 10485760 --parallel 32 +# - --size 1073741824 --parallel 1 --warmup 60 --duration 60 +# - --size 1073741824 --parallel 8 --warmup 60 --duration 60 + +- Test: list-blobs + Class: list_blob + Arguments: + - --count 5 --parallel 64 + - --count 500 --parallel 32 + - --count 50000 --parallel 32 --warmup 60 --duration 60 diff --git a/sdk/storage/perf.yml b/sdk/storage/perf.yml new file mode 100644 index 0000000000..a43c7c913d --- /dev/null +++ b/sdk/storage/perf.yml @@ -0,0 +1,38 @@ +parameters: +- name: PackageVersions + displayName: PackageVersions (regex of package versions to run) + type: string + default: '12|source' +- name: Tests + displayName: Tests (regex of tests to run) + type: string + default: '^(download|upload|list-blobs)$' +- name: Arguments + displayName: Arguments (regex of arguments to run) + type: string + default: '(10240)|(10485760)|(1073741824)|(5 )|(500 )|(50000 )' +- name: Iterations + displayName: Iterations (times to run each test) + type: number + default: '5' +- name: Profile + type: boolean + default: false +- name: AdditionalArguments + displayName: AdditionalArguments (passed to PerfAutomation) + type: string + default: ' ' + +extends: + template: /eng/pipelines/templates/jobs/perf.yml + parameters: + ServiceDirectory: storage/azure_storage_blobs + PackageVersions: ${{ parameters.PackageVersions }} + Tests: ${{ parameters.Tests }} + Arguments: ${{ parameters.Arguments }} + Iterations: ${{ parameters.Iterations }} + AdditionalArguments: ${{ parameters.AdditionalArguments }} + Profile: ${{ parameters.Profile }} + EnvVars: + # This is set in the InstallLanguageSteps + VCPKG_BINARY_SOURCES_SECRET: $(VCPKG_BINARY_SOURCES_SECRET)