Skip to content

feat: add profile_with_tracy feature which plays nicely with bevy's bevy/trace_tracy feature #393

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 26, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ rhai = ["bevy_mod_scripting_rhai", "bevy_mod_scripting_functions/rhai_bindings"]
## rune
# rune = ["bevy_mod_scripting_rune"]

### Profiling
profile_with_tracy = ["bevy/trace_tracy"]

[dependencies]
bevy = { workspace = true }
bevy_mod_scripting_core = { workspace = true }
Expand Down Expand Up @@ -85,6 +88,7 @@ ladfile_builder = { path = "crates/ladfile_builder", version = "0.2.6" }
script_integration_test_harness = { workspace = true }
test_utils = { workspace = true }
libtest-mimic = "0.8"
tracing-tracy = "0.11"

[workspace]
members = [
Expand Down
41 changes: 34 additions & 7 deletions benches/benchmarks.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
use std::{path::PathBuf, time::Duration};

use bevy::utils::HashMap;
use bevy::log::tracing_subscriber;
use bevy::log::tracing_subscriber::layer::SubscriberExt;
use bevy::utils::{tracing, HashMap};
use criterion::{criterion_main, measurement::Measurement, BenchmarkGroup, Criterion};
use script_integration_test_harness::{run_lua_benchmark, run_rhai_benchmark};
use std::{path::PathBuf, sync::LazyLock, time::Duration};
use test_utils::{discover_all_tests, Test};

extern crate bevy_mod_scripting;
extern crate script_integration_test_harness;
extern crate test_utils;

static ENABLE_PROFILING: LazyLock<bool> =
LazyLock::new(|| std::env::var("ENABLE_PROFILING").is_ok());

pub trait BenchmarkExecutor {
fn benchmark_group(&self) -> String;
fn benchmark_name(&self) -> String;
Expand Down Expand Up @@ -89,10 +93,33 @@ fn script_benchmarks(criterion: &mut Criterion) {
}
}

fn maybe_with_profiler(f: impl Fn(bool)) {
if *ENABLE_PROFILING {
println!("profiling enabled, make sure to run tracy. If using it across windows/WSL you can use something like `tracy-capture.exe -o output.tracy -a localhost` on windows");
// set global tracing subscriber so bevy doesn't set it itself first
let subscriber = tracing_subscriber::Registry::default();
let tracy_layer = tracing_tracy::TracyLayer::default();

let subscriber = subscriber.with(tracy_layer);

tracing::subscriber::set_global_default(subscriber).unwrap();

let _ = tracing_tracy::client::span!("test2");
tracing::info_span!("test");

f(true);
} else {
f(false);
}
}

pub fn benches() {
let mut criterion: criterion::Criterion<_> = (criterion::Criterion::default())
.configure_from_args()
.measurement_time(Duration::from_secs(10));
script_benchmarks(&mut criterion);
maybe_with_profiler(|_profiler| {
let mut criterion: criterion::Criterion<_> = (criterion::Criterion::default())
.configure_from_args()
.measurement_time(Duration::from_secs(10));

script_benchmarks(&mut criterion);
});
}
criterion_main!(benches);
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ impl DynamicScriptFunction {
args: I,
context: FunctionCallContext,
) -> Result<ScriptValue, InteropError> {
profiling::scope!("Dynamic Call ", self.name().to_string());
profiling::scope!("Dynamic Call ", self.name().deref());
let args = args.into_iter().collect::<VecDeque<_>>();
// should we be inlining call errors into the return value?
let return_val = (self.func)(context, args);
Expand Down Expand Up @@ -159,7 +159,7 @@ impl DynamicScriptFunctionMut {
args: I,
context: FunctionCallContext,
) -> Result<ScriptValue, InteropError> {
profiling::scope!("Dynamic Call Mut", self.name().to_string());
profiling::scope!("Dynamic Call Mut", self.name().deref());
let args = args.into_iter().collect::<VecDeque<_>>();
// should we be inlining call errors into the return value?
let mut write = self.func.write();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,7 @@ pub fn run_lua_benchmark<M: criterion::measurement::Measurement>(
label: &str,
criterion: &mut criterion::BenchmarkGroup<M>,
) -> Result<(), String> {
use bevy::utils::tracing;
use bevy_mod_scripting_lua::mlua::Function;

let plugin = make_test_lua_plugin();
Expand All @@ -333,6 +334,7 @@ pub fn run_lua_benchmark<M: criterion::measurement::Measurement>(
if let Some(pre_bencher) = &pre_bencher {
pre_bencher.call::<()>(()).unwrap();
}
tracing::info_span!("profiling_iter", label);
c.iter(|| {
bencher.call::<()>(()).unwrap();
})
Expand All @@ -348,6 +350,7 @@ pub fn run_rhai_benchmark<M: criterion::measurement::Measurement>(
label: &str,
criterion: &mut criterion::BenchmarkGroup<M>,
) -> Result<(), String> {
use bevy::utils::tracing;
use bevy_mod_scripting_rhai::rhai::Dynamic;

let plugin = make_test_rhai_plugin();
Expand All @@ -367,6 +370,8 @@ pub fn run_rhai_benchmark<M: criterion::measurement::Measurement>(
.call_fn::<Dynamic>(&mut ctxt.scope, &ctxt.ast, "pre_bench", ARGS)
.unwrap();
}
tracing::info_span!("profiling_iter", label);

c.iter(|| {
let _ = runtime
.call_fn::<Dynamic>(&mut ctxt.scope, &ctxt.ast, "bench", ARGS)
Expand Down
2 changes: 1 addition & 1 deletion crates/testing_crates/test_utils/src/test_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ pub fn setup_integration_test<F: FnOnce(&mut World, &mut TypeRegistry)>(init: F)
HierarchyPlugin,
DiagnosticsPlugin,
LogPlugin {
filter: "bevy_mod_scripting_core=debug".to_string(),
filter: "bevy_mod_scripting_core=trace".to_string(),
..Default::default()
},
));
Expand Down
71 changes: 53 additions & 18 deletions crates/xtask/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,7 @@ enum Feature {
// Rune,

// Profiling
#[strum(serialize = "bevy/trace_tracy")]
Tracy,
ProfileWithTracy,
}

#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, strum::EnumIter)]
Expand Down Expand Up @@ -101,10 +100,10 @@ impl IntoFeatureGroup for Feature {
Feature::MluaAsync
| Feature::MluaMacros
| Feature::MluaSerialize
| Feature::UnsafeLuaModules
| Feature::Tracy => FeatureGroup::ForExternalCrate,
Feature::BevyBindings | Feature::CoreFunctions => FeatureGroup::BMSFeature,
// don't use wildcard here, we want to be explicit
| Feature::UnsafeLuaModules => FeatureGroup::ForExternalCrate,
Feature::BevyBindings | Feature::CoreFunctions | Feature::ProfileWithTracy => {
FeatureGroup::BMSFeature
} // don't use wildcard here, we want to be explicit
}
}
}
Expand All @@ -119,7 +118,6 @@ impl Default for Features {
Feature::Lua54,
Feature::CoreFunctions,
Feature::BevyBindings,
Feature::Tracy,
])
}
}
Expand Down Expand Up @@ -356,8 +354,19 @@ impl App {
cmd.arg("--publish");
}
}
Xtasks::Bench {} => {
Xtasks::Bench {
name,
enable_profiling: profile,
} => {
cmd.arg("bench");

if let Some(name) = name {
cmd.arg("--name").arg(name);
}

if profile {
cmd.arg("--profile");
}
}
}

Expand Down Expand Up @@ -652,7 +661,14 @@ enum Xtasks {
publish: bool,
},
/// Runs criterion benchmarks generates json required to be published by bencher and generates html performance report
Bench {},
Bench {
/// Whether or not to enable tracy profiling
#[clap(long, default_value = "false", help = "Enable tracy profiling")]
enable_profiling: bool,
/// The name argument passed to `cargo bench`, can be used in combination with profile to selectively profile benchmarks
#[clap(long, help = "The name argument passed to `cargo bench`")]
name: Option<String>,
},
}

#[derive(Serialize, Clone)]
Expand Down Expand Up @@ -731,7 +747,10 @@ impl Xtasks {
} => Self::codegen(app_settings, output_dir, bevy_features),
Xtasks::Install { binary } => Self::install(app_settings, binary),
Xtasks::Bencher { publish } => Self::bencher(app_settings, publish),
Xtasks::Bench {} => Self::bench(app_settings),
Xtasks::Bench {
name,
enable_profiling,
} => Self::bench(app_settings, enable_profiling, name),
}?;

Ok("".into())
Expand Down Expand Up @@ -1231,18 +1250,34 @@ impl Xtasks {
Ok(())
}

fn bench(app_settings: GlobalArgs) -> Result<()> {
fn bench(app_settings: GlobalArgs, profile: bool, name: Option<String>) -> Result<()> {
log::info!("Profiling enabled: {profile}");

let mut features = vec![
Feature::Lua54,
Feature::Rhai,
Feature::CoreFunctions,
Feature::BevyBindings,
];

if profile {
std::env::set_var("ENABLE_PROFILING", "1");
// features.push(Feature::BevyTracy);
features.push(Feature::ProfileWithTracy);
}

let args = if let Some(name) = name {
vec!["--".to_owned(), name]
} else {
vec![]
};

Self::run_workspace_command(
// run with just lua54
&app_settings.with_features(Features::new(vec![
Feature::Lua54,
Feature::Rhai,
Feature::CoreFunctions,
Feature::BevyBindings,
])),
&app_settings.with_features(Features::new(features)),
"bench",
"Failed to run benchmarks",
Vec::<String>::default(),
args,
None,
)
.with_context(|| "when executing criterion benchmarks")?;
Expand Down
Loading