diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..89a825b1 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "wpt"] + path = wpt + url = git@github.com:orottier/wpt.git diff --git a/Cargo.toml b/Cargo.toml index 99231278..26dc0f34 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,9 +10,9 @@ version = "0.14.0" crate-type = ["cdylib"] [dependencies] -napi = {version="2.13", features=["napi6"]} +napi = {version="2.13", features=["napi6", "tokio_rt"]} napi-derive = "2.13" -web-audio-api = "0.38" +web-audio-api = "0.39" # web-audio-api = { path = "../web-audio-api-rs" } [target.'cfg(all(any(windows, unix), target_arch = "x86_64", not(target_env = "musl")))'.dependencies] diff --git a/README.md b/README.md index ea1aadd4..7e9f327c 100644 --- a/README.md +++ b/README.md @@ -129,6 +129,26 @@ The npm `postversion` script rely on [`cargo-bump`](https://crates.io/crates/car cargo install cargo-bump ``` +## Running the web-platform-test suite + +Follow the steps for 'Manual Build' first. Then checkout the web-platform-tests submodule with: + +``` +git submodule init +git submodule update +``` + +Then run: + +``` +npm run wpt # build in debug mode and run all wpt test +npm run wpt:only # run all wpt test without build +npm run wpt -- --list # list all wpt test files +npm run wpt -- --filter # apply filter on executed/listed wpt tests +``` + +Avai + ## License [BSD-3-Clause](./LICENSE) diff --git a/bin/wpt-harness.mjs b/bin/wpt-harness.mjs new file mode 100644 index 00000000..bc958f5c --- /dev/null +++ b/bin/wpt-harness.mjs @@ -0,0 +1,105 @@ +import path from 'path'; +import wptRunner from 'wpt-runner'; +import chalk from 'chalk'; +import { program } from 'commander'; + +import * as nodeWebAudioAPI from '../index.mjs'; + +program + .option('--list', 'List the name of the test files') + .option('--with_crashtests', 'Also run crashtests') + .option('--filter ', 'Filter executed OR listed test files', '.*'); + +program.parse(process.argv); + +const options = program.opts(); + +// ------------------------------------------------------- +// Some helpers +// ------------------------------------------------------- +const INDENT_SIZE = 2; + +function indent(string, times) { + const prefix = " ".repeat(times); + return string.split("\n").map(l => prefix + l).join("\n"); +} + +// ------------------------------------------------------- +// WPT Runner configuration options +// ------------------------------------------------------- +const testsPath = 'wpt/webaudio'; +const rootURL = 'webaudio'; + +// monkey patch `window` with our web audio API +const setup = window => { + Object.assign(window, nodeWebAudioAPI); + + // seems required (weirdly...), cf. `the-audiobuffer-interface/audiobuffer.html` + window.Float32Array = Float32Array; +} + +const filterRe = new RegExp(`${options.filter}`); + +const filter = (name) => { + if (!options.with_crashtests && name.includes('/crashtests/')) { + return false; + } + if (name.includes('/resources/')) { + return false; + } + if (filterRe.test(name)) { + if (options.list) { + console.log(name); + return false; + } else { + return true; + } + } else { + return false; + } +}; + +// reporter, adapted from default console reporter +// https://github.com/domenic/wpt-runner/blob/master/lib/console-reporter.js +let numPass = 0; +let numFail = 0; +let typeErrorFail = 0; + +const reporter = { + startSuite: name => { + console.log(`\n ${chalk.bold.underline(path.join(testsPath, name))}\n`); + }, + pass: message => { + numPass += 1; + console.log(chalk.dim(indent(chalk.green("√ ") + message, INDENT_SIZE))); + }, + fail: message => { + if (/threw "Error" instead of/.test(message)) { + typeErrorFail += 1; + console.log(chalk.bold.yellow(indent(`| ${message}`, INDENT_SIZE))); + } else { + numFail += 1; + console.log(chalk.bold.red(indent(`\u00D7 ${message}`, INDENT_SIZE))); + } + }, + reportStack: stack => { + // console.log(chalk.dim(indent(stack, INDENT_SIZE * 2))) + }, +}; + +// ------------------------------------------------------- +// Run test suite +// ------------------------------------------------------- +try { + const failures = await wptRunner(testsPath, { rootURL, setup, filter, reporter }); + + console.log(`\n ${chalk.bold.underline('RESULTS:')}`); + console.log(chalk.bold(` - # pass: ${numPass}`)); + console.log(chalk.bold(` - # fail: ${numFail}`)); + console.log(chalk.bold(` - # type error issues: ${typeErrorFail}`)); + + process.exit(failures); +} catch (e) { + console.error(e.stack); + process.exit(1); +} diff --git a/examples/offline.mjs b/examples/offline.mjs index 600566ee..ab5a1cb5 100644 --- a/examples/offline.mjs +++ b/examples/offline.mjs @@ -1,23 +1,55 @@ import { AudioContext, OfflineAudioContext } from '../index.mjs'; -const offline = new OfflineAudioContext(1, 44100, 44100); +const offline = new OfflineAudioContext(1, 48000, 48000); -const osc = offline.createOscillator(); -osc.connect(offline.destination); -osc.frequency.value = 220; -osc.start(0); -osc.stop(1); +offline.suspend(128 / 48000).then(() => { + console.log("suspend"); + + const osc = offline.createOscillator(); + osc.connect(offline.destination); + osc.frequency.value = 220; + osc.start(0); + + console.log("resume"); + offline.resume(); +}); const buffer = await offline.startRendering(); +console.log("buffer duration:", buffer.duration); + +// dirty check the audio buffer +const channelData = buffer.getChannelData(0); + +for (let i = 0; i < 48000; i++) { + // before suspend the graph is empty + if (i < 128) { + if (channelData[i] !== 0) { + throw new Error('should be zero') + } + // first sine sample is zero + } else if (i === 128) { + if (channelData[i] !== 0) { + throw new Error('should be zero') + } + } else { + // should ha ve a sine wave, hopefully without zero values :) + if (channelData[i] === 0) { + throw new Error(`should not be zero ${i}`); + console.log(channelData[i]) + } + } +} const latencyHint = process.env.WEB_AUDIO_LATENCY === 'playback' ? 'playback' : 'interactive'; const online = new AudioContext({ latencyHint }); const src = online.createBufferSource(); +// src.loop = true; src.buffer = buffer; +src.loop = true; src.connect(online.destination); src.start(); -await new Promise(resolve => setTimeout(resolve, 1000)); +await new Promise(resolve => setTimeout(resolve, 2000)); await online.close(); diff --git a/generator/templates/audio_context.tmpl.rs b/generator/templates/audio_context.tmpl.rs index 9d8b50bd..9610f915 100644 --- a/generator/templates/audio_context.tmpl.rs +++ b/generator/templates/audio_context.tmpl.rs @@ -1,4 +1,5 @@ use std::io::Cursor; +use std::sync::Arc; use napi::*; use napi_derive::js_function; @@ -6,7 +7,7 @@ use web_audio_api::context::*; use crate::*; -pub(crate) struct ${d.napiName(d.node)}(${d.name(d.node)}); +pub(crate) struct ${d.napiName(d.node)}(Arc<${d.name(d.node)}>); impl ${d.napiName(d.node)} { pub fn create_js_class(env: &Env) -> Result { @@ -22,6 +23,8 @@ impl ${d.napiName(d.node)} { Property::new("createPeriodicWave")?.with_method(create_periodic_wave), Property::new("createBuffer")?.with_method(create_buffer), + Property::new("state")?.with_getter(get_state), + // ---------------------------------------------------- // Factory methods // ---------------------------------------------------- @@ -34,12 +37,6 @@ impl ${d.napiName(d.node)} { ${d.name(d.node) === 'AudioContext' ? ` - // @todo - expose in OfflineAudioContext as well - Property::new("state")?.with_getter(get_state), - Property::new("resume")?.with_method(resume), - Property::new("suspend")?.with_method(suspend), - Property::new("close")?.with_method(close), - // ---------------------------------------------------- // Methods and attributes specific to AudioContext // ---------------------------------------------------- @@ -47,12 +44,19 @@ impl ${d.napiName(d.node)} { Property::new("outputLatency")?.with_getter(get_output_latency), Property::new("setSinkId")?.with_method(set_sink_id), Property::new("createMediaStreamSource")?.with_method(create_media_stream_source), + // implementation specific to online audio context + Property::new("resume")?.with_method(resume), + Property::new("suspend")?.with_method(suspend), + Property::new("close")?.with_method(close), ` : ` // ---------------------------------------------------- // Methods and attributes specifc to OfflineAudioContext // ---------------------------------------------------- Property::new("length")?.with_getter(get_length), Property::new("startRendering")?.with_method(start_rendering), + // implementation specific to offline audio context + Property::new("suspend")?.with_method(suspend_offline), + Property::new("resume")?.with_method(resume_offline), ` } ], @@ -141,7 +145,7 @@ fn constructor(ctx: CallContext) -> Result { // ------------------------------------------------- // Wrap context // ------------------------------------------------- - let napi_audio_context = ${d.napiName(d.node)}(audio_context); + let napi_audio_context = ${d.napiName(d.node)}(Arc::new(audio_context)); ctx.env.wrap(&mut js_this, napi_audio_context)?; js_this.define_properties(&[ @@ -203,6 +207,22 @@ fn get_listener(ctx: CallContext) -> Result { } } +#[js_function] +fn get_state(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::<${d.napiName(d.node)}>(&js_this)?; + let obj = napi_obj.unwrap(); + + let state = obj.state(); + let state_str = match state { + AudioContextState::Suspended => "suspended", + AudioContextState::Running => "running", + AudioContextState::Closed => "closed", + }; + + ctx.env.create_string(state_str) +} + // ---------------------------------------------------- // METHODS // ---------------------------------------------------- @@ -339,31 +359,11 @@ fn ${d.slug(factoryName)}(ctx: CallContext) -> Result { `; }).join('')} - ${d.name(d.node) === 'AudioContext' ? ` // ---------------------------------------------------- // Methods and attributes specific to AudioContext // ---------------------------------------------------- - -// @todo - expose in OfflineAudioContext -// see https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state -#[js_function] -fn get_state(ctx: CallContext) -> Result { - let js_this = ctx.this_unchecked::(); - let napi_obj = ctx.env.unwrap::<${d.napiName(d.node)}>(&js_this)?; - let obj = napi_obj.unwrap(); - - let state = obj.state(); - let state_str = match state { - AudioContextState::Suspended => "suspended", - AudioContextState::Running => "running", - AudioContextState::Closed => "closed", - }; - - ctx.env.create_string(state_str) -} - ${['resume', 'suspend', 'close'].map(method => ` // @todo - async version #[js_function] @@ -451,22 +451,64 @@ fn get_length(ctx: CallContext) -> Result { fn start_rendering(ctx: CallContext) -> Result { let js_this = ctx.this_unchecked::(); let napi_obj = ctx.env.unwrap::<${d.napiName(d.node)}>(&js_this)?; + let clone = napi_obj.0.clone(); - let audio_buffer = napi_obj.0.start_rendering_sync(); + ctx.env.execute_tokio_future( + async move { + let audio_buffer = clone.start_rendering().await; + Ok(audio_buffer) + }, + |&mut env, audio_buffer| { + // create js audio buffer instance + let store_ref: &mut napi::Ref<()> = env.get_instance_data()?.unwrap(); + let store: JsObject = env.get_reference_value(store_ref)?; + let ctor: JsFunction = store.get_named_property("AudioBuffer")?; + // this should be cleaned + let mut options = env.create_object()?; + options.set("__internal_caller__", env.get_null())?; + // populate with audio buffer + let js_audio_buffer = ctor.new_instance(&[options])?; + let napi_audio_buffer = env.unwrap::(&js_audio_buffer)?; + napi_audio_buffer.populate(audio_buffer); - // create js audio buffer instance - let store_ref: &mut napi::Ref<()> = ctx.env.get_instance_data()?.unwrap(); - let store: JsObject = ctx.env.get_reference_value(store_ref)?; - let ctor: JsFunction = store.get_named_property("AudioBuffer")?; - let mut options = ctx.env.create_object()?; - options.set("__internal_caller__", ctx.env.get_null())?; + Ok(js_audio_buffer) + }, + ) +} - // populate with audio buffer - let js_audio_buffer = ctor.new_instance(&[options])?; - let napi_audio_buffer = ctx.env.unwrap::(&js_audio_buffer)?; - napi_audio_buffer.populate(audio_buffer); +#[js_function(1)] +fn suspend_offline(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let clone = napi_obj.0.clone(); - Ok(js_audio_buffer) + let when = match ctx.try_get::(0)? { + Either::A(value) => value.get_double()?, + Either::B(_) => 0. + }; + + ctx.env.execute_tokio_future( + async move { + clone.suspend(when).await; + Ok(()) + }, + |&mut env, _val| env.get_undefined(), + ) +} + +#[js_function] +fn resume_offline(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let clone = napi_obj.0.clone(); + + ctx.env.execute_tokio_future( + async move { + clone.resume().await; + Ok(()) + }, + |&mut env, _val| env.get_undefined(), + ) } ` } diff --git a/generator/templates/audio_nodes.tmpl.rs b/generator/templates/audio_nodes.tmpl.rs index 75b4b6bd..20d01ec6 100644 --- a/generator/templates/audio_nodes.tmpl.rs +++ b/generator/templates/audio_nodes.tmpl.rs @@ -384,8 +384,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -418,8 +418,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), @@ -755,8 +755,8 @@ fn set_${d.slug(attr)}(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() {${idl.values.map(v => ` + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() {${idl.values.map(v => ` "${v.value}" => ${idl.name}::${d.camelcase(v.value)},`).join('')} _ => panic!("undefined value for ${idl.name}"), }; diff --git a/generator/templates/audio_param.tmpl.rs b/generator/templates/audio_param.tmpl.rs index af14cbe5..9db1b621 100644 --- a/generator/templates/audio_param.tmpl.rs +++ b/generator/templates/audio_param.tmpl.rs @@ -1,6 +1,6 @@ use napi::*; use napi_derive::js_function; -use web_audio_api::AudioParam; +use web_audio_api::{AudioParam, AutomationRate}; pub(crate) struct NapiAudioParam(AudioParam); @@ -16,10 +16,20 @@ impl NapiAudioParam { Property::new("Symbol.toStringTag")? .with_value(&env.create_string("AudioParam")?) .with_property_attributes(PropertyAttributes::Static), - + // Attributes + Property::new("automationRate")? + .with_getter(get_automation_rate) + .with_setter(set_automation_rate), + Property::new("defaultValue")? + .with_getter(get_default_value), + Property::new("maxValue")? + .with_getter(get_max_value), + Property::new("minValue")? + .with_getter(get_min_value), Property::new("value")? .with_getter(get_value) .with_setter(set_value), + // Methods Property::new("setValueAtTime")?.with_method(set_value_at_time), Property::new("linearRampToValueAtTime")?.with_method(linear_ramp_to_value_at_time), Property::new("exponentialRampToValueAtTime")? @@ -38,6 +48,70 @@ impl NapiAudioParam { } } +// Attributes +#[js_function] +fn get_automation_rate(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let value = obj.automation_rate(); + let value_str = match value { + AutomationRate::A => "a-rate", + AutomationRate::K => "k-rate", + }; + + ctx.env.create_string(value_str) +} + +#[js_function(1)] +fn set_automation_rate(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let js_str = ctx.get::(0)?; + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { + "a-rate" => AutomationRate::A, + "k-rate" => AutomationRate::K, + _ => panic!("The provided value '{:?}' is not a valid enum value of type AutomationRate.", utf8_str), + }; + obj.set_automation_rate(value); + + ctx.env.get_undefined() +} + +#[js_function] +fn get_default_value(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let value = obj.default_value(); + ctx.env.create_double(value as f64) +} + +#[js_function] +fn get_max_value(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let value = obj.max_value(); + ctx.env.create_double(value as f64) +} + +#[js_function] +fn get_min_value(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let value = obj.min_value(); + ctx.env.create_double(value as f64) +} + #[js_function] fn get_value(ctx: CallContext) -> Result { let js_this = ctx.this_unchecked::(); @@ -60,6 +134,7 @@ fn set_value(ctx: CallContext) -> Result { ctx.env.get_undefined() } +// Methods #[js_function(2)] fn set_value_at_time(ctx: CallContext) -> Result { let js_this = ctx.this_unchecked::(); diff --git a/monkey-patch.js b/monkey-patch.js index 6705c795..b47f28d4 100644 --- a/monkey-patch.js +++ b/monkey-patch.js @@ -132,9 +132,9 @@ function patchOfflineAudioContext(nativeBinding) { } // promisify sync APIs - startRendering() { + async startRendering() { try { - const audioBuffer = super.startRendering(); + const audioBuffer = await super.startRendering(); clearTimeout(this.__keepAwakeId); return Promise.resolve(audioBuffer); diff --git a/package.json b/package.json index d384da2f..5fc18496 100644 --- a/package.json +++ b/package.json @@ -41,7 +41,9 @@ "lint": "eslint monkey-patch.js index.cjs index.mjs && eslint examples/*.mjs", "preversion": "yarn install && npm run generate", "postversion": "cargo bump $npm_package_version && git commit -am \"v$npm_package_version\" && node bin/check-changelog.mjs", - "test": "mocha" + "test": "mocha", + "wpt": "npm run generate && napi build --platform && node ./bin/wpt-harness.mjs", + "wpt:only": "node ./bin/wpt-harness.mjs" }, "devDependencies": { "@ircam/eslint-config": "^1.3.0", @@ -49,8 +51,9 @@ "@sindresorhus/slugify": "^2.1.1", "camelcase": "^7.0.1", "chai": "^4.3.7", - "chalk": "^5.2.0", + "chalk": "^5.3.0", "cli-table": "^0.3.11", + "commander": "^11.1.0", "dotenv": "^16.0.3", "eslint": "^8.32.0", "mocha": "^10.2.0", @@ -59,7 +62,8 @@ "ping": "^0.4.2", "template-literal": "^1.0.4", "waves-masters": "^2.3.1", - "webidl2": "^24.2.0" + "webidl2": "^24.2.0", + "wpt-runner": "^5.0.0" }, "dependencies": { "@napi-rs/cli": "^2.14.3", diff --git a/src/analyser_node.rs b/src/analyser_node.rs index 53fb77de..ec5d70aa 100644 --- a/src/analyser_node.rs +++ b/src/analyser_node.rs @@ -282,8 +282,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -316,8 +316,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/audio_buffer_source_node.rs b/src/audio_buffer_source_node.rs index 086004a6..8b53b01e 100644 --- a/src/audio_buffer_source_node.rs +++ b/src/audio_buffer_source_node.rs @@ -257,8 +257,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -291,8 +291,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/audio_context.rs b/src/audio_context.rs index f2508622..983e6c78 100644 --- a/src/audio_context.rs +++ b/src/audio_context.rs @@ -18,6 +18,7 @@ // -------------------------------------------------------------------------- // use std::io::Cursor; +use std::sync::Arc; use napi::*; use napi_derive::js_function; @@ -25,7 +26,7 @@ use web_audio_api::context::*; use crate::*; -pub(crate) struct NapiAudioContext(AudioContext); +pub(crate) struct NapiAudioContext(Arc); impl NapiAudioContext { pub fn create_js_class(env: &Env) -> Result { @@ -39,6 +40,7 @@ impl NapiAudioContext { Property::new("decodeAudioData")?.with_method(decode_audio_data), Property::new("createPeriodicWave")?.with_method(create_periodic_wave), Property::new("createBuffer")?.with_method(create_buffer), + Property::new("state")?.with_getter(get_state), // ---------------------------------------------------- // Factory methods // ---------------------------------------------------- @@ -57,11 +59,6 @@ impl NapiAudioContext { Property::new("createPanner")?.with_method(create_panner), Property::new("createStereoPanner")?.with_method(create_stereo_panner), Property::new("createWaveShaper")?.with_method(create_wave_shaper), - // @todo - expose in OfflineAudioContext as well - Property::new("state")?.with_getter(get_state), - Property::new("resume")?.with_method(resume), - Property::new("suspend")?.with_method(suspend), - Property::new("close")?.with_method(close), // ---------------------------------------------------- // Methods and attributes specific to AudioContext // ---------------------------------------------------- @@ -69,6 +66,10 @@ impl NapiAudioContext { Property::new("outputLatency")?.with_getter(get_output_latency), Property::new("setSinkId")?.with_method(set_sink_id), Property::new("createMediaStreamSource")?.with_method(create_media_stream_source), + // implementation specific to online audio context + Property::new("resume")?.with_method(resume), + Property::new("suspend")?.with_method(suspend), + Property::new("close")?.with_method(close), ], ) } @@ -143,7 +144,7 @@ fn constructor(ctx: CallContext) -> Result { // ------------------------------------------------- // Wrap context // ------------------------------------------------- - let napi_audio_context = NapiAudioContext(audio_context); + let napi_audio_context = NapiAudioContext(Arc::new(audio_context)); ctx.env.wrap(&mut js_this, napi_audio_context)?; js_this.define_properties(&[ @@ -204,6 +205,22 @@ fn get_listener(ctx: CallContext) -> Result { } } +#[js_function] +fn get_state(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let state = obj.state(); + let state_str = match state { + AudioContextState::Suspended => "suspended", + AudioContextState::Running => "running", + AudioContextState::Closed => "closed", + }; + + ctx.env.create_string(state_str) +} + // ---------------------------------------------------- // METHODS // ---------------------------------------------------- @@ -495,24 +512,6 @@ fn create_wave_shaper(ctx: CallContext) -> Result { // Methods and attributes specific to AudioContext // ---------------------------------------------------- -// @todo - expose in OfflineAudioContext -// see https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state -#[js_function] -fn get_state(ctx: CallContext) -> Result { - let js_this = ctx.this_unchecked::(); - let napi_obj = ctx.env.unwrap::(&js_this)?; - let obj = napi_obj.unwrap(); - - let state = obj.state(); - let state_str = match state { - AudioContextState::Suspended => "suspended", - AudioContextState::Running => "running", - AudioContextState::Closed => "closed", - }; - - ctx.env.create_string(state_str) -} - // @todo - async version #[js_function] fn resume(ctx: CallContext) -> Result { diff --git a/src/audio_listener.rs b/src/audio_listener.rs index aaea696d..1e109583 100644 --- a/src/audio_listener.rs +++ b/src/audio_listener.rs @@ -8,14 +8,24 @@ pub(crate) struct NapiAudioListener(AudioListener); impl NapiAudioListener { pub fn create_js_class(env: &Env) -> Result { - env.define_class("AudioListener", constructor, &[]) + env.define_class( + "AudioListener", + constructor, + &[ + Property::new("setPosition")?.with_method(set_position), + Property::new("setOrientation")?.with_method(set_orientation), + ], + ) + } + + pub fn unwrap(&mut self) -> &mut AudioListener { + &mut self.0 } } // https://webaudio.github.io/web-audio-api/#AudioListener // // @note: should be a private constructor -// #todo: implement deprecateds methods: `setOrientation` and `setPosition` #[js_function(1)] fn constructor(ctx: CallContext) -> Result { let mut js_this = ctx.this_unchecked::(); @@ -112,3 +122,54 @@ fn constructor(ctx: CallContext) -> Result { ctx.env.get_undefined() } + +#[js_function(3)] +fn set_position(ctx: CallContext) -> Result { + // TODO https://webaudio.github.io/web-audio-api/#dom-audiolistener-setposition + // + // When any of the positionX, positionY, and positionZ AudioParams for this AudioListener have + // an automation curve set using setValueCurveAtTime() at the time this method is called, a + // NotSupportedError MUST be thrown. + let js_this = ctx.this_unchecked::(); + let napi_node = ctx.env.unwrap::(&js_this)?; + let node = napi_node.unwrap(); + + let x = ctx.get::(0)?.get_double()? as f32; + let y = ctx.get::(1)?.get_double()? as f32; + let z = ctx.get::(2)?.get_double()? as f32; + + node.position_x().set_value(x); + node.position_y().set_value(y); + node.position_z().set_value(z); + + ctx.env.get_undefined() +} + +#[js_function(6)] +fn set_orientation(ctx: CallContext) -> Result { + // TODO https://webaudio.github.io/web-audio-api/#dom-audiolistener-setorientation + // + // If any of the forwardX, forwardY, forwardZ, upX, upY and upZ AudioParams have an automation + // curve set using setValueCurveAtTime() at the time this method is called, a NotSupportedError + // MUST be thrown. + let js_this = ctx.this_unchecked::(); + let napi_node = ctx.env.unwrap::(&js_this)?; + let node = napi_node.unwrap(); + + let x_forward = ctx.get::(0)?.get_double()? as f32; + let y_forward = ctx.get::(1)?.get_double()? as f32; + let z_forward = ctx.get::(2)?.get_double()? as f32; + let x_up = ctx.get::(3)?.get_double()? as f32; + let y_up = ctx.get::(4)?.get_double()? as f32; + let z_up = ctx.get::(5)?.get_double()? as f32; + + node.forward_x().set_value(x_forward); + node.forward_y().set_value(y_forward); + node.forward_z().set_value(z_forward); + + node.up_x().set_value(x_up); + node.up_y().set_value(y_up); + node.up_z().set_value(z_up); + + ctx.env.get_undefined() +} diff --git a/src/audio_param.rs b/src/audio_param.rs index 163476ec..58a7dbe0 100644 --- a/src/audio_param.rs +++ b/src/audio_param.rs @@ -19,7 +19,7 @@ use napi::*; use napi_derive::js_function; -use web_audio_api::AudioParam; +use web_audio_api::{AudioParam, AutomationRate}; pub(crate) struct NapiAudioParam(AudioParam); @@ -35,9 +35,17 @@ impl NapiAudioParam { Property::new("Symbol.toStringTag")? .with_value(&env.create_string("AudioParam")?) .with_property_attributes(PropertyAttributes::Static), + // Attributes + Property::new("automationRate")? + .with_getter(get_automation_rate) + .with_setter(set_automation_rate), + Property::new("defaultValue")?.with_getter(get_default_value), + Property::new("maxValue")?.with_getter(get_max_value), + Property::new("minValue")?.with_getter(get_min_value), Property::new("value")? .with_getter(get_value) .with_setter(set_value), + // Methods Property::new("setValueAtTime")?.with_method(set_value_at_time), Property::new("linearRampToValueAtTime")?.with_method(linear_ramp_to_value_at_time), Property::new("exponentialRampToValueAtTime")? @@ -56,6 +64,73 @@ impl NapiAudioParam { } } +// Attributes +#[js_function] +fn get_automation_rate(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let value = obj.automation_rate(); + let value_str = match value { + AutomationRate::A => "a-rate", + AutomationRate::K => "k-rate", + }; + + ctx.env.create_string(value_str) +} + +#[js_function(1)] +fn set_automation_rate(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let js_str = ctx.get::(0)?; + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { + "a-rate" => AutomationRate::A, + "k-rate" => AutomationRate::K, + _ => panic!( + "The provided value '{:?}' is not a valid enum value of type AutomationRate.", + utf8_str + ), + }; + obj.set_automation_rate(value); + + ctx.env.get_undefined() +} + +#[js_function] +fn get_default_value(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let value = obj.default_value(); + ctx.env.create_double(value as f64) +} + +#[js_function] +fn get_max_value(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let value = obj.max_value(); + ctx.env.create_double(value as f64) +} + +#[js_function] +fn get_min_value(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let value = obj.min_value(); + ctx.env.create_double(value as f64) +} + #[js_function] fn get_value(ctx: CallContext) -> Result { let js_this = ctx.this_unchecked::(); @@ -78,6 +153,7 @@ fn set_value(ctx: CallContext) -> Result { ctx.env.get_undefined() } +// Methods #[js_function(2)] fn set_value_at_time(ctx: CallContext) -> Result { let js_this = ctx.this_unchecked::(); diff --git a/src/biquad_filter_node.rs b/src/biquad_filter_node.rs index f099bd67..cb9bab05 100644 --- a/src/biquad_filter_node.rs +++ b/src/biquad_filter_node.rs @@ -304,8 +304,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -338,8 +338,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), @@ -395,8 +395,8 @@ fn set_type(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "lowpass" => BiquadFilterType::Lowpass, "highpass" => BiquadFilterType::Highpass, "bandpass" => BiquadFilterType::Bandpass, diff --git a/src/channel_merger_node.rs b/src/channel_merger_node.rs index d691a0ef..5cac4e3e 100644 --- a/src/channel_merger_node.rs +++ b/src/channel_merger_node.rs @@ -227,8 +227,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -261,8 +261,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/channel_splitter_node.rs b/src/channel_splitter_node.rs index b65a46dd..06e5c0e5 100644 --- a/src/channel_splitter_node.rs +++ b/src/channel_splitter_node.rs @@ -227,8 +227,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -261,8 +261,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/constant_source_node.rs b/src/constant_source_node.rs index b9552fdc..d2ad91ac 100644 --- a/src/constant_source_node.rs +++ b/src/constant_source_node.rs @@ -192,8 +192,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -226,8 +226,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/convolver_node.rs b/src/convolver_node.rs index 60ead53b..f7102fa3 100644 --- a/src/convolver_node.rs +++ b/src/convolver_node.rs @@ -245,8 +245,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -279,8 +279,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/delay_node.rs b/src/delay_node.rs index 40f628ef..45d51e3d 100644 --- a/src/delay_node.rs +++ b/src/delay_node.rs @@ -241,8 +241,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -275,8 +275,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/dynamics_compressor_node.rs b/src/dynamics_compressor_node.rs index 60ccd547..7e233cd9 100644 --- a/src/dynamics_compressor_node.rs +++ b/src/dynamics_compressor_node.rs @@ -295,8 +295,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -329,8 +329,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/gain_node.rs b/src/gain_node.rs index 75698524..cd893d85 100644 --- a/src/gain_node.rs +++ b/src/gain_node.rs @@ -233,8 +233,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -267,8 +267,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/iir_filter_node.rs b/src/iir_filter_node.rs index 3ebb722b..06ded9f4 100644 --- a/src/iir_filter_node.rs +++ b/src/iir_filter_node.rs @@ -252,8 +252,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -286,8 +286,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/offline_audio_context.rs b/src/offline_audio_context.rs index ef02b97b..b95cc702 100644 --- a/src/offline_audio_context.rs +++ b/src/offline_audio_context.rs @@ -18,6 +18,7 @@ // -------------------------------------------------------------------------- // use std::io::Cursor; +use std::sync::Arc; use napi::*; use napi_derive::js_function; @@ -25,7 +26,7 @@ use web_audio_api::context::*; use crate::*; -pub(crate) struct NapiOfflineAudioContext(OfflineAudioContext); +pub(crate) struct NapiOfflineAudioContext(Arc); impl NapiOfflineAudioContext { pub fn create_js_class(env: &Env) -> Result { @@ -39,6 +40,7 @@ impl NapiOfflineAudioContext { Property::new("decodeAudioData")?.with_method(decode_audio_data), Property::new("createPeriodicWave")?.with_method(create_periodic_wave), Property::new("createBuffer")?.with_method(create_buffer), + Property::new("state")?.with_getter(get_state), // ---------------------------------------------------- // Factory methods // ---------------------------------------------------- @@ -62,6 +64,9 @@ impl NapiOfflineAudioContext { // ---------------------------------------------------- Property::new("length")?.with_getter(get_length), Property::new("startRendering")?.with_method(start_rendering), + // implementation specific to offline audio context + Property::new("suspend")?.with_method(suspend_offline), + Property::new("resume")?.with_method(resume_offline), ], ) } @@ -87,7 +92,7 @@ fn constructor(ctx: CallContext) -> Result { // ------------------------------------------------- // Wrap context // ------------------------------------------------- - let napi_audio_context = NapiOfflineAudioContext(audio_context); + let napi_audio_context = NapiOfflineAudioContext(Arc::new(audio_context)); ctx.env.wrap(&mut js_this, napi_audio_context)?; js_this.define_properties(&[ @@ -148,6 +153,22 @@ fn get_listener(ctx: CallContext) -> Result { } } +#[js_function] +fn get_state(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let obj = napi_obj.unwrap(); + + let state = obj.state(); + let state_str = match state { + AudioContextState::Suspended => "suspended", + AudioContextState::Running => "running", + AudioContextState::Closed => "closed", + }; + + ctx.env.create_string(state_str) +} + // ---------------------------------------------------- // METHODS // ---------------------------------------------------- @@ -453,20 +474,62 @@ fn get_length(ctx: CallContext) -> Result { fn start_rendering(ctx: CallContext) -> Result { let js_this = ctx.this_unchecked::(); let napi_obj = ctx.env.unwrap::(&js_this)?; + let clone = napi_obj.0.clone(); + + ctx.env.execute_tokio_future( + async move { + let audio_buffer = clone.start_rendering().await; + Ok(audio_buffer) + }, + |&mut env, audio_buffer| { + // create js audio buffer instance + let store_ref: &mut napi::Ref<()> = env.get_instance_data()?.unwrap(); + let store: JsObject = env.get_reference_value(store_ref)?; + let ctor: JsFunction = store.get_named_property("AudioBuffer")?; + // this should be cleaned + let mut options = env.create_object()?; + options.set("__internal_caller__", env.get_null())?; + // populate with audio buffer + let js_audio_buffer = ctor.new_instance(&[options])?; + let napi_audio_buffer = env.unwrap::(&js_audio_buffer)?; + napi_audio_buffer.populate(audio_buffer); - let audio_buffer = napi_obj.0.start_rendering_sync(); + Ok(js_audio_buffer) + }, + ) +} - // create js audio buffer instance - let store_ref: &mut napi::Ref<()> = ctx.env.get_instance_data()?.unwrap(); - let store: JsObject = ctx.env.get_reference_value(store_ref)?; - let ctor: JsFunction = store.get_named_property("AudioBuffer")?; - let mut options = ctx.env.create_object()?; - options.set("__internal_caller__", ctx.env.get_null())?; +#[js_function(1)] +fn suspend_offline(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let clone = napi_obj.0.clone(); + + let when = match ctx.try_get::(0)? { + Either::A(value) => value.get_double()?, + Either::B(_) => 0., + }; - // populate with audio buffer - let js_audio_buffer = ctor.new_instance(&[options])?; - let napi_audio_buffer = ctx.env.unwrap::(&js_audio_buffer)?; - napi_audio_buffer.populate(audio_buffer); + ctx.env.execute_tokio_future( + async move { + clone.suspend(when).await; + Ok(()) + }, + |&mut env, _val| env.get_undefined(), + ) +} - Ok(js_audio_buffer) +#[js_function] +fn resume_offline(ctx: CallContext) -> Result { + let js_this = ctx.this_unchecked::(); + let napi_obj = ctx.env.unwrap::(&js_this)?; + let clone = napi_obj.0.clone(); + + ctx.env.execute_tokio_future( + async move { + clone.resume().await; + Ok(()) + }, + |&mut env, _val| env.get_undefined(), + ) } diff --git a/src/oscillator_node.rs b/src/oscillator_node.rs index 252112c9..992811d5 100644 --- a/src/oscillator_node.rs +++ b/src/oscillator_node.rs @@ -287,8 +287,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -321,8 +321,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), @@ -411,8 +411,8 @@ fn set_type(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "sine" => OscillatorType::Sine, "square" => OscillatorType::Square, "sawtooth" => OscillatorType::Sawtooth, diff --git a/src/panner_node.rs b/src/panner_node.rs index ac1c3697..a5a13bce 100644 --- a/src/panner_node.rs +++ b/src/panner_node.rs @@ -422,8 +422,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -456,8 +456,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), @@ -583,8 +583,8 @@ fn set_panning_model(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "equalpower" => PanningModelType::EqualPower, "HRTF" => PanningModelType::HRTF, _ => panic!("undefined value for PanningModelType"), @@ -602,8 +602,8 @@ fn set_distance_model(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "linear" => DistanceModelType::Linear, "inverse" => DistanceModelType::Inverse, "exponential" => DistanceModelType::Exponential, diff --git a/src/stereo_panner_node.rs b/src/stereo_panner_node.rs index fbd260b6..5e1ef5d2 100644 --- a/src/stereo_panner_node.rs +++ b/src/stereo_panner_node.rs @@ -234,8 +234,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -268,8 +268,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), diff --git a/src/wave_shaper_node.rs b/src/wave_shaper_node.rs index 8b56d43c..f400a6d3 100644 --- a/src/wave_shaper_node.rs +++ b/src/wave_shaper_node.rs @@ -251,8 +251,8 @@ fn set_channel_count_mode(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "max" => ChannelCountMode::Max, "clamped-max" => ChannelCountMode::ClampedMax, "explicit" => ChannelCountMode::Explicit, @@ -285,8 +285,8 @@ fn set_channel_interpretation(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "speakers" => ChannelInterpretation::Speakers, "discrete" => ChannelInterpretation::Discrete, _ => panic!("undefined value for ChannelInterpretation"), @@ -369,8 +369,8 @@ fn set_oversample(ctx: CallContext) -> Result { let node = napi_node.unwrap(); let js_str = ctx.get::(0)?; - let uf8_str = js_str.into_utf8()?.into_owned()?; - let value = match uf8_str.as_str() { + let utf8_str = js_str.into_utf8()?.into_owned()?; + let value = match utf8_str.as_str() { "none" => OverSampleType::None, "2x" => OverSampleType::X2, "4x" => OverSampleType::X4, diff --git a/tests/AudioParam.spec.mjs b/tests/AudioParam.spec.mjs new file mode 100644 index 00000000..f764ad4a --- /dev/null +++ b/tests/AudioParam.spec.mjs @@ -0,0 +1,28 @@ +import { assert } from 'chai'; +import { AudioContext } from '../index.mjs'; + +describe('# AudioBuffer', () => { + let audioContext; + + beforeEach(() => { + audioContext = new AudioContext(); + }); + + afterEach(() => { + audioContext.close(); + }); + + describe('attributes', () => { + it(`should implement all attributes`, () => { + const gain = audioContext.createGain(); + + assert.equal(gain.gain.automationRate, 'a-rate'); + assert.equal(gain.gain.defaultValue, 1); + // should accept some delta + assert.equal(gain.gain.maxValue, 3.4028234663852886e+38); + assert.equal(gain.gain.minValue, -3.4028234663852886e+38); + + assert.equal(gain.gain.value, 1); + }); + }); +}); diff --git a/wpt b/wpt new file mode 160000 index 00000000..83f318a1 --- /dev/null +++ b/wpt @@ -0,0 +1 @@ +Subproject commit 83f318a105a1a9c58a0c7bff65607d753d3ad9ad