Skip to content

Commit fed6500

Browse files
authored
feat: add profile_with_tracy feature which plays nicely with bevy's bevy/trace_tracy feature (#393)
# Summary Adds `xtask` utilities for profiling a specific benchmark, as well as a feature which plays nicely with bevy's. I tried using an independent `profiling` setup but I think this one is the least confusing and most in line with what bevy consumers will already be familiar with
1 parent 1c91ba8 commit fed6500

File tree

6 files changed

+99
-28
lines changed

6 files changed

+99
-28
lines changed

Cargo.toml

+4
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,9 @@ rhai = ["bevy_mod_scripting_rhai", "bevy_mod_scripting_functions/rhai_bindings"]
5353
## rune
5454
# rune = ["bevy_mod_scripting_rune"]
5555

56+
### Profiling
57+
profile_with_tracy = ["bevy/trace_tracy"]
58+
5659
[dependencies]
5760
bevy = { workspace = true }
5861
bevy_mod_scripting_core = { workspace = true }
@@ -85,6 +88,7 @@ ladfile_builder = { path = "crates/ladfile_builder", version = "0.2.6" }
8588
script_integration_test_harness = { workspace = true }
8689
test_utils = { workspace = true }
8790
libtest-mimic = "0.8"
91+
tracing-tracy = "0.11"
8892

8993
[workspace]
9094
members = [

benches/benchmarks.rs

+34-7
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,18 @@
1-
use std::{path::PathBuf, time::Duration};
2-
3-
use bevy::utils::HashMap;
1+
use bevy::log::tracing_subscriber;
2+
use bevy::log::tracing_subscriber::layer::SubscriberExt;
3+
use bevy::utils::{tracing, HashMap};
44
use criterion::{criterion_main, measurement::Measurement, BenchmarkGroup, Criterion};
55
use script_integration_test_harness::{run_lua_benchmark, run_rhai_benchmark};
6+
use std::{path::PathBuf, sync::LazyLock, time::Duration};
67
use test_utils::{discover_all_tests, Test};
78

89
extern crate bevy_mod_scripting;
910
extern crate script_integration_test_harness;
1011
extern crate test_utils;
1112

13+
static ENABLE_PROFILING: LazyLock<bool> =
14+
LazyLock::new(|| std::env::var("ENABLE_PROFILING").is_ok());
15+
1216
pub trait BenchmarkExecutor {
1317
fn benchmark_group(&self) -> String;
1418
fn benchmark_name(&self) -> String;
@@ -89,10 +93,33 @@ fn script_benchmarks(criterion: &mut Criterion) {
8993
}
9094
}
9195

96+
fn maybe_with_profiler(f: impl Fn(bool)) {
97+
if *ENABLE_PROFILING {
98+
println!("profiling enabled, make sure to run tracy. If using it across windows/WSL you can use something like `tracy-capture.exe -o output.tracy -a localhost` on windows");
99+
// set global tracing subscriber so bevy doesn't set it itself first
100+
let subscriber = tracing_subscriber::Registry::default();
101+
let tracy_layer = tracing_tracy::TracyLayer::default();
102+
103+
let subscriber = subscriber.with(tracy_layer);
104+
105+
tracing::subscriber::set_global_default(subscriber).unwrap();
106+
107+
let _ = tracing_tracy::client::span!("test2");
108+
tracing::info_span!("test");
109+
110+
f(true);
111+
} else {
112+
f(false);
113+
}
114+
}
115+
92116
pub fn benches() {
93-
let mut criterion: criterion::Criterion<_> = (criterion::Criterion::default())
94-
.configure_from_args()
95-
.measurement_time(Duration::from_secs(10));
96-
script_benchmarks(&mut criterion);
117+
maybe_with_profiler(|_profiler| {
118+
let mut criterion: criterion::Criterion<_> = (criterion::Criterion::default())
119+
.configure_from_args()
120+
.measurement_time(Duration::from_secs(10));
121+
122+
script_benchmarks(&mut criterion);
123+
});
97124
}
98125
criterion_main!(benches);

crates/bevy_mod_scripting_core/src/bindings/function/script_function.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ impl DynamicScriptFunction {
103103
args: I,
104104
context: FunctionCallContext,
105105
) -> Result<ScriptValue, InteropError> {
106-
profiling::scope!("Dynamic Call ", self.name().to_string());
106+
profiling::scope!("Dynamic Call ", self.name().deref());
107107
let args = args.into_iter().collect::<VecDeque<_>>();
108108
// should we be inlining call errors into the return value?
109109
let return_val = (self.func)(context, args);
@@ -159,7 +159,7 @@ impl DynamicScriptFunctionMut {
159159
args: I,
160160
context: FunctionCallContext,
161161
) -> Result<ScriptValue, InteropError> {
162-
profiling::scope!("Dynamic Call Mut", self.name().to_string());
162+
profiling::scope!("Dynamic Call Mut", self.name().deref());
163163
let args = args.into_iter().collect::<VecDeque<_>>();
164164
// should we be inlining call errors into the return value?
165165
let mut write = self.func.write();

crates/testing_crates/script_integration_test_harness/src/lib.rs

+5
Original file line numberDiff line numberDiff line change
@@ -318,6 +318,7 @@ pub fn run_lua_benchmark<M: criterion::measurement::Measurement>(
318318
label: &str,
319319
criterion: &mut criterion::BenchmarkGroup<M>,
320320
) -> Result<(), String> {
321+
use bevy::utils::tracing;
321322
use bevy_mod_scripting_lua::mlua::Function;
322323

323324
let plugin = make_test_lua_plugin();
@@ -333,6 +334,7 @@ pub fn run_lua_benchmark<M: criterion::measurement::Measurement>(
333334
if let Some(pre_bencher) = &pre_bencher {
334335
pre_bencher.call::<()>(()).unwrap();
335336
}
337+
tracing::info_span!("profiling_iter", label);
336338
c.iter(|| {
337339
bencher.call::<()>(()).unwrap();
338340
})
@@ -348,6 +350,7 @@ pub fn run_rhai_benchmark<M: criterion::measurement::Measurement>(
348350
label: &str,
349351
criterion: &mut criterion::BenchmarkGroup<M>,
350352
) -> Result<(), String> {
353+
use bevy::utils::tracing;
351354
use bevy_mod_scripting_rhai::rhai::Dynamic;
352355

353356
let plugin = make_test_rhai_plugin();
@@ -367,6 +370,8 @@ pub fn run_rhai_benchmark<M: criterion::measurement::Measurement>(
367370
.call_fn::<Dynamic>(&mut ctxt.scope, &ctxt.ast, "pre_bench", ARGS)
368371
.unwrap();
369372
}
373+
tracing::info_span!("profiling_iter", label);
374+
370375
c.iter(|| {
371376
let _ = runtime
372377
.call_fn::<Dynamic>(&mut ctxt.scope, &ctxt.ast, "bench", ARGS)

crates/testing_crates/test_utils/src/test_data.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -346,7 +346,7 @@ pub fn setup_integration_test<F: FnOnce(&mut World, &mut TypeRegistry)>(init: F)
346346
HierarchyPlugin,
347347
DiagnosticsPlugin,
348348
LogPlugin {
349-
filter: "bevy_mod_scripting_core=debug".to_string(),
349+
filter: "bevy_mod_scripting_core=trace".to_string(),
350350
..Default::default()
351351
},
352352
));

crates/xtask/src/main.rs

+53-18
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,7 @@ enum Feature {
5151
// Rune,
5252

5353
// Profiling
54-
#[strum(serialize = "bevy/trace_tracy")]
55-
Tracy,
54+
ProfileWithTracy,
5655
}
5756

5857
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, strum::EnumIter)]
@@ -101,10 +100,10 @@ impl IntoFeatureGroup for Feature {
101100
Feature::MluaAsync
102101
| Feature::MluaMacros
103102
| Feature::MluaSerialize
104-
| Feature::UnsafeLuaModules
105-
| Feature::Tracy => FeatureGroup::ForExternalCrate,
106-
Feature::BevyBindings | Feature::CoreFunctions => FeatureGroup::BMSFeature,
107-
// don't use wildcard here, we want to be explicit
103+
| Feature::UnsafeLuaModules => FeatureGroup::ForExternalCrate,
104+
Feature::BevyBindings | Feature::CoreFunctions | Feature::ProfileWithTracy => {
105+
FeatureGroup::BMSFeature
106+
} // don't use wildcard here, we want to be explicit
108107
}
109108
}
110109
}
@@ -119,7 +118,6 @@ impl Default for Features {
119118
Feature::Lua54,
120119
Feature::CoreFunctions,
121120
Feature::BevyBindings,
122-
Feature::Tracy,
123121
])
124122
}
125123
}
@@ -356,8 +354,19 @@ impl App {
356354
cmd.arg("--publish");
357355
}
358356
}
359-
Xtasks::Bench {} => {
357+
Xtasks::Bench {
358+
name,
359+
enable_profiling: profile,
360+
} => {
360361
cmd.arg("bench");
362+
363+
if let Some(name) = name {
364+
cmd.arg("--name").arg(name);
365+
}
366+
367+
if profile {
368+
cmd.arg("--profile");
369+
}
361370
}
362371
}
363372

@@ -652,7 +661,14 @@ enum Xtasks {
652661
publish: bool,
653662
},
654663
/// Runs criterion benchmarks generates json required to be published by bencher and generates html performance report
655-
Bench {},
664+
Bench {
665+
/// Whether or not to enable tracy profiling
666+
#[clap(long, default_value = "false", help = "Enable tracy profiling")]
667+
enable_profiling: bool,
668+
/// The name argument passed to `cargo bench`, can be used in combination with profile to selectively profile benchmarks
669+
#[clap(long, help = "The name argument passed to `cargo bench`")]
670+
name: Option<String>,
671+
},
656672
}
657673

658674
#[derive(Serialize, Clone)]
@@ -731,7 +747,10 @@ impl Xtasks {
731747
} => Self::codegen(app_settings, output_dir, bevy_features),
732748
Xtasks::Install { binary } => Self::install(app_settings, binary),
733749
Xtasks::Bencher { publish } => Self::bencher(app_settings, publish),
734-
Xtasks::Bench {} => Self::bench(app_settings),
750+
Xtasks::Bench {
751+
name,
752+
enable_profiling,
753+
} => Self::bench(app_settings, enable_profiling, name),
735754
}?;
736755

737756
Ok("".into())
@@ -1231,18 +1250,34 @@ impl Xtasks {
12311250
Ok(())
12321251
}
12331252

1234-
fn bench(app_settings: GlobalArgs) -> Result<()> {
1253+
fn bench(app_settings: GlobalArgs, profile: bool, name: Option<String>) -> Result<()> {
1254+
log::info!("Profiling enabled: {profile}");
1255+
1256+
let mut features = vec![
1257+
Feature::Lua54,
1258+
Feature::Rhai,
1259+
Feature::CoreFunctions,
1260+
Feature::BevyBindings,
1261+
];
1262+
1263+
if profile {
1264+
std::env::set_var("ENABLE_PROFILING", "1");
1265+
// features.push(Feature::BevyTracy);
1266+
features.push(Feature::ProfileWithTracy);
1267+
}
1268+
1269+
let args = if let Some(name) = name {
1270+
vec!["--".to_owned(), name]
1271+
} else {
1272+
vec![]
1273+
};
1274+
12351275
Self::run_workspace_command(
12361276
// run with just lua54
1237-
&app_settings.with_features(Features::new(vec![
1238-
Feature::Lua54,
1239-
Feature::Rhai,
1240-
Feature::CoreFunctions,
1241-
Feature::BevyBindings,
1242-
])),
1277+
&app_settings.with_features(Features::new(features)),
12431278
"bench",
12441279
"Failed to run benchmarks",
1245-
Vec::<String>::default(),
1280+
args,
12461281
None,
12471282
)
12481283
.with_context(|| "when executing criterion benchmarks")?;

0 commit comments

Comments
 (0)