diff --git a/Project.toml b/Project.toml index 8ae3938..f0fb8a7 100644 --- a/Project.toml +++ b/Project.toml @@ -7,6 +7,7 @@ version = "0.1.0" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" +PolyChaos = "8d666b04-775d-5f6e-b778-5ac7c70f65a3" RandomizedLinAlg = "0448d7d9-159c-5637-8537-fd72090fea46" Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" @@ -17,9 +18,11 @@ TSVD = "9449cd9e-2762-5aa3-a617-5413e99d722e" [compat] DocStringExtensions = "0.8, 0.9" ModelingToolkit = "8.21" +PolyChaos = "0.2" RandomizedLinAlg = "0.1" Setfield = "0.8, 1" SymbolicUtils = "0.19" Symbolics = "4.10" TSVD = "0.4" julia = "1.6" + diff --git a/examples/PCE/mean.png b/examples/PCE/mean.png new file mode 100644 index 0000000..b2337a3 Binary files /dev/null and b/examples/PCE/mean.png differ diff --git a/examples/PCE/traces.png b/examples/PCE/traces.png new file mode 100644 index 0000000..4bc5295 Binary files /dev/null and b/examples/PCE/traces.png differ diff --git a/examples/PCE/tutorial.jl b/examples/PCE/tutorial.jl new file mode 100644 index 0000000..93eff3a --- /dev/null +++ b/examples/PCE/tutorial.jl @@ -0,0 +1,123 @@ +using ModelOrderReduction, ModelingToolkit, DifferentialEquations, + PolyChaos, Plots, LinearAlgebra + +# Reaction system +# k[1](1+θ[1]/2)/k[2]: A + B ⇌ C +# k[3](1+θ[2]/2): C → D +# k[4]: B → D +# +# state: c[1:4] = [A, B, C, D] +# certain parameters: k[1:4] +# uncertain parameters: θ[1:2] ∼ U[-1,1] + +@parameters k[1:4], θ[1:2] +@variables t, c(t)[1:4] +D = Differential(t) +reactor = [D(c[1]) ~ -k[1] * (1 + 0.5 * θ[1]) * c[1] * c[2] + k[2] * c[3]; + D(c[2]) ~ -k[1] * (1 + 0.5 * θ[1]) * c[1] * c[2] - k[4] * c[2] + k[2] * c[3]; + D(c[3]) ~ k[1] * c[1] * c[2] - k[3] * c[3]; + D(c[4]) ~ k[3] * (1 + 0.5 * θ[2]) * c[3] + k[4] * c[2]]; + +@named reactor_model = ODESystem(reactor, t, c, vcat(k, θ)) + +# 1. choose/generate PCE of the system state +d_pce = 3 +pce = PCE(c, [θ[i] => Uniform_11OrthoPoly(d_pce) for i in eachindex(θ)]) + +# 2. generate moment equations +moment_eqs, pce_eval = moment_equations(reactor_model, pce) + +# 3. solve the moment equations +# 3a. find initial moments via Galerkin projection (note that c0 could depend on an uncertain parameter) +c0 = [1.0, 2.0, 0.0, 0.0] +z0 = reduce(vcat, pce_galerkin(c0, pce)) + +# 3b. solve the initial value problem for the moment equations +moment_prob = ODEProblem(moment_eqs, z0, (0.0, 10.0), + [k[1] => 1.0, k[2] => 0.2, k[3] => 12, k[4] => 0.1]) +moment_sol = solve(moment_prob, Tsit5()) + +# 3c. define a function approximating the parametric solution to the IVP. +pce_sol(t, θ) = pce_eval(moment_sol(t), θ) + +# 4. now let's compare to true solution +# take 100 uniform samples +n = 5 +θsamples = Iterators.product(range(-1, 1, length = n), range(-1, 1, length = n)) +ps = [k[1] => 1.0, k[2] => 0.2, k[3] => 12, k[4] => 0.1, θ[1] => 0.0, θ[2] => 0.0] +pvals = [p[2] for p in ps] +reactor_problem = ODEProblem(reactor_model, c0, (0.0, 10.0), ps) +t_range = range(0.0, 10.0, length = 100) +sols = [] +for θval in θsamples + pvals[end - 1] = θval[1] + pvals[end] = θval[2] + _prob = remake(reactor_problem, p = pvals) + push!(sols, Array(solve(_prob, Tsit5(), saveat = t_range))) +end + +species = ["A", "B", "C", "D"] +color = [:red, :green, :blue, :orange] +plots = [plot(title = "Species $(species[i])", xlabel = "time", ylabel = "concentration", + legend = false) for i in 1:4] +for sol in sols + for i in 1:4 + plot!(plots[i], t_range, sol[i, :], color = color[i]) + end +end +for θval in θsamples + pce_predictions = [pce_sol(t, [θval...]) for t in t_range] + for i in 1:4 + plot!(plots[i], t_range, [c[i] for c in pce_predictions], color = "black", + linestyle = :dash) + end +end + +fig = plot(plots...) +savefig(fig, string(@__DIR__, "/traces.png")) + +n = 100 +θsamples = Iterators.product(range(-1, 1, length = n), range(-1, 1, length = n)) +ps = [k[1] => 1.0, k[2] => 0.2, k[3] => 12, k[4] => 0.1, θ[1] => 0.0, θ[2] => 0.0] +pvals = [p[2] for p in ps] +reactor_problem = ODEProblem(reactor_model, c0, (0.0, 10.0), ps) +t_range = range(0.0, 10.0, length = 100) +sols = [] +for θval in θsamples + pvals[end - 1] = θval[1] + pvals[end] = θval[2] + _prob = remake(reactor_problem, p = pvals) + push!(sols, Array(solve(_prob, Tsit5(), saveat = t_range))) +end + +mean_solution = mean(sols) +var_solution = mean([sol .^ 2 for sol in sols]) - mean_solution .^ 2 + +L = length(pce.sym_basis) +mean_PCE = [[moment_sol(t)[((i - 1) * L + 1):(i * L)][1] for i in 1:4] for t in t_range] +var_weightings = computeSP2(pce.tensor_basis) +var_PCE = [[dot(moment_sol(t)[((i - 1) * L + 1):(i * L)] .^ 2, var_weightings) for i in 1:4] + for t in t_range] .- [m .^ 2 for m in mean_PCE] + +species = ["A", "B", "C", "D"] +color = [:red, :green, :blue, :orange] +plots = [plot(title = "Species $(species[i])", xlabel = "time", ylabel = "mean", + legend = false) for i in 1:4] + +for i in 1:4 + plot!(plots[i], t_range, [mean_solution[i, k] for k in axes(mean_solution, 2)], + color = color[i]) + plot!(plots[i], t_range, [s[i] for s in mean_PCE], color = "black", linestyle = :dash) +end +fig = plot(plots...) +savefig(fig, string(@__DIR__, "/mean.png")) + +plots = [plot(title = "Species $(species[i])", xlabel = "time", ylabel = "variance", + legend = false) for i in 1:4] +for i in 1:4 + plot!(plots[i], t_range, [var_solution[i, k] for k in axes(var_solution, 2)], + color = color[i]) + plot!(plots[i], t_range, [s[i] for s in var_PCE], color = "black", linestyle = :dash) +end +fig = plot(plots...) +savefig(fig, string(@__DIR__, "/var.png")) diff --git a/examples/PCE/tutorial.md b/examples/PCE/tutorial.md new file mode 100644 index 0000000..c0119ba --- /dev/null +++ b/examples/PCE/tutorial.md @@ -0,0 +1,109 @@ +# Polynomial Chaos Expansion for Random ODEs +## Sketch of the Theory +This section reviews the basic concept behind the use of Polynomial Chaos Expansion (PCE) for approximate solution to random ODEs as well as how `ModelOrderReduction` facilitates its use within the SciML ecosystem. + +In essence, PCE is nothing more than a function expansion technique for square integrable functions where the basis functions are chosen to be orthogonal polynomials; for a detailed treatment of the mathematical underpinnings the interested reader is referred to Gautschi (2004) [1], for example. For the sake of this tutorial we will remain much more informal and discuss only aspects that are critical for basic use of PCE in computational practice. To that end, we will consider the problem of finding a parametric solution to the following *random* ODE: +```math +\begin{aligned} + \frac{dx}{dt}(p) = f(x,p,t), \quad x(0) = x_0(p) +\end{aligned} +``` +where the parameters $p$ are *apriori* only known to follow a given distribution $\mu$. Specifically, we will seek to find an approximation to the solution $x(t,p)$ in the form of a PCE that matches the statistics of the random process $x(t,p)$ with $p \sim \mu$. To that end, we approximate +```math +\begin{aligned} + x(t,p) \approx \hat{x}(t,p) = \sum_{i=0}^L z_i(t) \zeta_i(p) +\end{aligned} +``` +where $\zeta_0(p), \dots, \zeta_L(p)$ are mutually orthogonal polynomial with respect to the distribution $\mu$, i.e., +```math +\begin{aligned} + \left\langle \zeta_i, \zeta_j \right\rangle := \int \zeta_i(p) \zeta_j(p) d\mu(p) = \delta_{ij} +\end{aligned} +``` +where $\delta_{ij}$ is the Kronecker delta. These polynomials can be efficiently constructed for many distributions. In particular, they are known analytically for many standard distributions including but not limited to Gaussians, uniform or beta distributions; moreover, they can be approximated numerically for distributions with known smooth densities and even empirically known distributions. For more details, please review the documentation of [PolyChaos.jl](github.com/SciML/PolyChaos.jl) or Gautschi (2004) [1] for instance. + +Inserting the PCE-Ansatz $\hat{x}$ into the random ODE of interest yields +```math +\begin{aligned} + \sum_{i=0}^L \frac{dz_i}{dt}(t) \zeta_i(p) = f\left(\sum_{i=0}^L z_i(t) \zeta_i(p), p, t \right), \sum_{i=0}^L z_i(0) \zeta_i(p) +\end{aligned} +``` +To derive evolution equations for the coefficients (or *moments*) $z_i$ we simply demand that the residual of the above equations to be orthogonal to the space spanned by $\zeta_0, \dots, \zeta_L$. Orthogonality here shall be understood with respect to the inner product as described above. This is equivalent to demanding minimal expected mismatch when evaluating the above equation for $p \sim \mu$. Mathematically, this condition translates into a set of deterministic ODEs for the moments $z_j$: +```math +\begin{aligned} + \left\langle\sum_{i=0}^L \frac{dz_i}{dt}(t) \zeta_i(p), \zeta_j(p) \right\rangle = \left \langle f\left(\sum_{i=0}^L z_i(t) \zeta_i(p), p, t \right), \sum_{i=0}^L z_i(0) \zeta_i(p), \zeta_j(p) \right\rangle +\end{aligned} +``` +and since $\left\langle \zeta_i, \zeta_j\right\rangle = \delta_{ij}$ it follows that +```math +\begin{aligned} + \frac{dz_0}{dt} &= \left \langle f\left(\sum_{i=0}^L z_i(t) \zeta_i(p), p, t \right), \sum_{i=0}^L z_i(0) \zeta_i(p), \zeta_0(p) \right\rangle\\ + & \vdots\\ + \frac{dz_L}{dt} &= \left \langle f\left(\sum_{i=0}^L z_i(t) \zeta_i(p), p, t \right), \sum_{i=0}^L z_i(0) \zeta_i(p), \zeta_L(p) \right\rangle +\end{aligned} +``` +with initial condition +```math +\begin{aligned} + z_i(0) = \left\langle x_0, \zeta_i \right\rangle. +\end{aligned} +``` +These equations are referred to as the moment equations and can in principle be solved with any suitable IVP solution technique. + +## Example +In this section, we will showcase how `ModelOrderReduction.jl` may be used to apply PCE for the approximate solution of random ODEs as described in the previous section. To that end, we will consider a simple nonlinear reactor in which the following reaction occurs +```math +\begin{aligned} + &A + B \xleftrightharpoons[k_1(1-\frac{\theta_1}{2})]{k_2} C\\ + &C \xrightarrow{k_3(1-\frac{\theta_2}{2})} D\\ + &B \xrightarrow{k_4} D +\end{aligned} +``` +where the rate parameters $\theta_1 \sim U[-1,1]$, $\theta_2 \sim U[-1,1]$ are uncertain. Using [ModelingToolkit.jl]{github.com/SciML/ModelingToolkit.jl} the following code creates the associated model. +``` +using ModelOrderReduction, ModelingToolkit, DifferentialEquations, PolyChaos, Plots + +@parameters k[1:4], θ[1:2] +@variables t, c(t)[1:4] +D = Differential(t) +reactor = [D(c[1]) ~ -k[1]*(1+0.5*θ[1])*c[1]*c[2] + k[2]*c[3]; + D(c[2]) ~ -k[1]*(1+0.5*θ[1])*c[1]*c[2] - k[4]*c[2] + k[2]*c[3]; + D(c[3]) ~ k[1]*c[1]*c[2] - k[3]*c[3]; + D(c[4]) ~ k[3]*(1+0.5*θ[2])*c[3] + k[4]*c[2]]; + +@named reactor_model = ODESystem(reactor, t, c, vcat(k, θ)) +``` + +Next, we are going to define the PCE Ansatz with the help of which we are going to approximate the parametric solution. To that end, we heavily rely on [PolyChaos.jl]{github.com/SciML/PolyChaos.jl}. In this example, we choose a PCE of degree 3. +``` +d_pce = 3 +pce = PCE(c, [θ[i] => Uniform_11OrthoPoly(d_pce) for i in eachindex(θ)]) +``` + +Next, we are constructing the corresponding moment equations via a simple function call of `moment_equations`. This function returns and `MTK.ODESystem` describing the deterministic evolution equation of the PCE moments and a function that maps the state of this model via the PCE Ansatz back to the original model state (in this example, the concentration of the different species). +``` +moment_eqs, pce_eval = moment_equations(reactor_model, pce) +``` + +Next, we are solving the evolution equation for the PCE moments. The initial condition for the moments is also obtained via Galerkin projection onto the space spanned by the basis functions in the PCE Ansatz. +``` +c0 = [1.0, 2.0, 0.0, 0.0] +z0 = reduce(vcat, pce_galerkin(c0, pce)) +moment_prob = ODEProblem(moment_eqs, z0, (0.0, 10.0), [k[1] => 1.0, k[2] => 0.2, k[3] => 12, k[4] => 0.1]) +moment_sol = solve(moment_prob, Tsit5()) +``` + +Lastly, we wish to demonstrate that this approximation can be remarkably accurate in practice. The figure below shows the PCE approximation (dashed black lines) compared to the full model for several realization of the uncertain paramters. + +![traces](traces.png) + +While PCE can even provide a good parametric surrogate model of the true solution of the random ODE, it is really designed to capture the statistics of the solution of the random ODE well. Moreover, any statistics that can be computed in terms of the moments of the states of the random ODE are remarkably cheap to compute from the PCE coefficients/moments alone. The figures below shows an example for the mean and variances of the different species. + + +![mean](mean.png) +![var](var.png) + + + +## References +Gautschi, Walter. Orthogonal polynomials: computation and approximation. OUP Oxford, 2004. \ No newline at end of file diff --git a/examples/PCE/var.png b/examples/PCE/var.png new file mode 100644 index 0000000..903a460 Binary files /dev/null and b/examples/PCE/var.png differ diff --git a/src/deim.jl b/src/DEIM/deim.jl similarity index 100% rename from src/deim.jl rename to src/DEIM/deim.jl diff --git a/src/utils.jl b/src/DEIM/utils.jl similarity index 100% rename from src/utils.jl rename to src/DEIM/utils.jl diff --git a/src/ModelOrderReduction.jl b/src/ModelOrderReduction.jl index fa2422d..eae492b 100644 --- a/src/ModelOrderReduction.jl +++ b/src/ModelOrderReduction.jl @@ -1,23 +1,26 @@ module ModelOrderReduction - +#========================Data Reduction=========================================# using DocStringExtensions - using Symbolics using ModelingToolkit using LinearAlgebra -using Setfield - -include("utils.jl") - include("Types.jl") include("ErrorHandle.jl") - include("DataReduction/POD.jl") + export SVD, TSVD, RSVD -export POD, reduce! +export POD, reduce!, matricize -include("deim.jl") +#========================Model Reduction========================================# +# Discrete Empirical Interpolation +using Setfield +include("DEIM/utils.jl") +include("DEIM/deim.jl") export deim +# Polynomial Chaos Expansion +include("PCE/PCE.jl") +include("PCE/pseudo_spectral.jl") + end diff --git a/src/PCE/PCE.jl b/src/PCE/PCE.jl new file mode 100644 index 0000000..fbe1010 --- /dev/null +++ b/src/PCE/PCE.jl @@ -0,0 +1,244 @@ +using PolyChaos, Symbolics, ModelingToolkit, LinearAlgebra + +export PCE, moment_equations, pce_galerkin, mean, var + +include("PCE_utils.jl") + +""" +$(SIGNATURES) + +`PCE` object for symbolic representation of a dense multivariate polynomial chaos expansion +of a given set of states ``x`` in terms of a given set of uncertain parameters ``p``: +```math + x = ∑ᵢ zᵢ ζᵢ(p). +``` +Here ``x`` denotes the states of the PCE, ``p`` the parameters, ``zᵢ`` refers to the ``i``th moments and ``ζᵢ`` to the +``i``th basis function. + +# Fields +- `states`: Vector of states (symbolic variables) representing the state of the PCE. +- `parameters`: `Vector` of parameters (symbolic variables) being expanded. +- `uni_basis`: `Vector` of `Pair`s mapping parameters to a `PolyChaos.AbstractOrthoPoly` representing + the basis in which the parametric dependence is expanded. +- `tensor_basis`: `TensorProductOrthoPoly` representing the tensorproduct-based multi-variate basis underpinning the PCE +- `sym_basis`: `Vector` of symbolic variables representing the basis functions: ``[ζ₁, …, ζₘ]``. +- `ansatz`: `Vector` of `Pair`s mapping state to corresponding PCE ansatz +- `moments`: `Vector` of `Vector`s carrying the moments for each state. +""" +struct PCE + states::Vector{<:Num} + parameters::Vector{<:Num} + uni_basis::Vector{Pair{<:Num, <:Union{AbstractOrthoPoly, AbstractCanonicalOrthoPoly}}} + tensor_basis::TensorProductOrthoPoly + sym_basis::Vector{<:Num} + ansatz::Vector{Pair{<:Num, <:Num}} + moments::Vector{Vector{<:Num}} +end + +""" +$(SIGNATURES) + +Create `PCE` object from a `Vector` of states (symbolic variables) and a `Vector` of `Pair`s mapping +the parameters to the corresponding `PolyChaos.AbstractOrthoPoly` basis used for expansion. +""" +function PCE(states, uni_basis::AbstractVector{<:Pair}) + # to deal with symbolic arrays + states = collect(states) + + parameters = [p for (p, op) in uni_basis] + ops = [op for (p, op) in uni_basis] + + @assert all(deg(op) > 0 for op in ops) "Every basis considered must at least include the linear function" + + tensor_basis = TensorProductOrthoPoly(ops) + n_basis = size(tensor_basis.ind, 1) + n_states = length(states) + + @variables ζ(parameters...)[1:n_basis] + sym_basis = collect(ζ) + + moments = Vector{Num}[] + for (i, state) in enumerate(collect(states)) + ind_vars = get_independent_vars(state) + create_var = isempty(ind_vars) ? name -> (@variables $(name))[1] : + name -> (@variables $(name)(ind_vars...))[1] + push!(moments, [create_var(moment_name(i, j)) for j in 1:n_basis]) + end + ansatz = [states[i] => sum(moments[i][j] * sym_basis[j] for j in 1:n_basis) + for i in 1:n_states] + return PCE(states, parameters, uni_basis, tensor_basis, sym_basis, ansatz, moments) +end +function (pce::PCE)(moment_vals, parameter_vals::AbstractVector) + basis = evaluate(parameter_vals, pce.tensor_basis) + return [dot(moments, basis) for moments in moment_vals] +end + +# 1. apply PCE ansatz +""" +$(TYPEDSIGNATURES) + +Generate linear PCEs for the uncertain parameters. +""" +function generate_parameter_pce(pce::PCE) + par_dim = length(pce.parameters) + par_pce = Vector{Pair{eltype(pce.parameters), eltype(pce.sym_basis)}}(undef, par_dim) + for (i, basis) in enumerate(pce.uni_basis) + p, op = basis + par_pce[i] = p => pce.sym_basis[i + 1] + op.α[1] + end + return par_pce +end + +""" +$(TYPEDSIGNATURES) + +Substitute parameters shared between a set of symbolic `eqs` and the PCE `pce` for the corresponding linear PCEs. +""" +function substitute_parameters(eqs::AbstractVector, pce::PCE) + par_pce = generate_parameter_pce(pce) + subs_eqs = [substitute(eq, par_pce) for eq in eqs] + return subs_eqs +end + +""" +$(TYPEDSIGNATURES) + +Substitute PCE Ansatz defined in `pce` into a set of symbolic equations `eqs`. +""" +function substitute_pce_ansatz(eqs::AbstractVector, pce::PCE) + subs_eqs = [expand(expand(substitute(eq, pce.ansatz))) for eq in eqs] + return subs_eqs +end + +""" +$(TYPEDSIGNATURES) + +Apply PCE ansatz defined in `pce` to a given set of symbolic equations `eqs`. +""" +function apply_ansatz(eqs::AbstractVector, pce::PCE) + return substitute_pce_ansatz(substitute_parameters(eqs, pce), pce) +end + +# 2. extract PCE expansion coeffs +""" +$(TYPEDSIGNATURES) + +Given a set of symbolic equations `eqs` involving the basis functions of `pce`, +extract monomials of the basis functions and the corresponding coeffiecients. + +# Returns +`Vector` of `Dict`s mapping monomial of basis functions to its coefficient in the individual equations. +""" +function extract_basismonomial_coeffs(eqs::AbstractVector, pce::PCE) + basismonomial_coeffs = [extract_coeffs(eq, pce.sym_basis) for eq in eqs] + basismonomial_indices = [] + for coeffs in basismonomial_coeffs + union!(basismonomial_indices, + [mono => get_basis_indices(mono) for mono in keys(coeffs)]) + end + return basismonomial_coeffs, basismonomial_indices +end + +# 3. compute inner products +""" +$(TYPEDSIGNATURES) + +Evaluate scalar products between all basis functions in `pce` and +basis monomials as characterized by `mono_indices`. +""" +function eval_scalar_products(mono_indices, pce::PCE) + uni_degs = [deg(op) for op in pce.tensor_basis.uni] + max_degs = uni_degs + for (mono, id) in mono_indices + max_degs = max.(max_degs, vec(sum(pce.tensor_basis.ind[id .+ 1, :], dims = 1))) + end + quad_deg = max.(ceil.(Int, 0.5 * (max_degs + uni_degs .+ 1))) + + integrators = map((uni, deg) -> bump_degree(uni, deg), pce.tensor_basis.uni, quad_deg) + scalar_products = Dict() + for k in 1:dim(pce.tensor_basis) + scalar_products[k] = Dict(mono => computeSP(vcat(id, k - 1), pce.tensor_basis, + integrators) + for (mono, id) in mono_indices) + end + return scalar_products +end + +# 4. Galerkin projection +""" +$(TYPEDSIGNATURES) + +perform Galerkin projection of polynomial expressions characterized by `Dict`s mapping +basis monomials to coefficients. +""" +function galerkin_projection(bm_coeffs::Vector{<:Dict}, scalar_products::Dict, + pce::PCE) + projected_eqs = [] + scaling_factors = computeSP2(pce.tensor_basis) + for i in eachindex(bm_coeffs) + eqs = [] + for k in 1:dim(pce.tensor_basis) + push!(eqs, + sum(bm_coeffs[i][mono] * scalar_products[k][mono] + for mono in keys(bm_coeffs[i]))) + end + push!(projected_eqs, eqs ./ scaling_factors) + end + return projected_eqs +end + +# 5. combine everything +""" +$(TYPEDSIGNATURES) + +perform Galerkin projection onto the `pce`. +""" +function pce_galerkin(eqs::AbstractVector, pce::PCE) + expanded_eqs = apply_ansatz(eqs, pce) + basismono_coeffs, basismono_idcs = extract_basismonomial_coeffs(expanded_eqs, pce) + scalar_products = eval_scalar_products(basismono_idcs, pce) + projected_eqs = galerkin_projection(basismono_coeffs, scalar_products, pce) + return projected_eqs +end + +# 6. high-level interface +# 6a. apply pce to explicit ODE +""" +$(TYPEDSIGNATURES) + +Generate moment equations of an `ODESystem` from a given `PCE`-Ansatz via Galerkin projection. +""" +function moment_equations(sys::ODESystem, pce::PCE) + eqs = [eq.rhs for eq in equations(sys)] + projected_eqs = pce_galerkin(eqs, pce) + moment_eqs = reduce(vcat, projected_eqs) + iv = independent_variable(sys) + params = setdiff(parameters(sys), pce.parameters) + D = Differential(iv) + moments = reduce(vcat, pce.moments) + name = Symbol(String(nameof(sys)) * "_pce") + pce_system = ODESystem([D(moments[i]) ~ moment_eqs[i] for i in eachindex(moments)], + iv, moments, params, name = name) + + n_moments = dim(pce.tensor_basis) + n_states = length(states(sys)) + pce_eval = function (moment_vals, parameter_values) + shape_state = [moment_vals[(i * n_moments + 1):((i + 1) * n_moments)] + for i in 0:(n_states - 1)] + return pce(shape_state, parameter_values) + end + return pce_system, pce_eval +end + +# 6b. apply pce to implicit & mass-matrix ODE/DAEs + +# 6c. apply pce to algebraic equations + +# 6d. apply pce to control problems + +# 6e. ? + +# ToDo: +# better support to evaluate the PCE +# in particular => make evaluation of means, variances, etc evaluable from the object itself upon specification of the moment values +# hinderance -> how do you provide the moments in a convenient format? diff --git a/src/PCE/PCE_utils.jl b/src/PCE/PCE_utils.jl new file mode 100644 index 0000000..0e0440a --- /dev/null +++ b/src/PCE/PCE_utils.jl @@ -0,0 +1,379 @@ +import PolyChaos: computeSP2, computeSP, dim, deg + +# moment names +function moment_name(i, j) + return Symbol("z" * Symbolics.map_subscripts(i) * + "₋" * Symbolics.map_subscripts(j)) +end + +# getting independent variables +function get_independent_vars(var) + return [] +end +function get_independent_vars(var::Symbolics.Term) where {T} + if operation(var) isa Symbolics.Sym + return arguments(var) + else + return reduce(vcat, get_independent_vars(arguments(var))) + end +end +function get_independent_vars(var::Num) + return get_independent_vars(Symbolics.unwrap(var)) +end +function get_independent_vars(vars::AbstractVector) + return [get_independent_vars(var) for var in vars] +end + +# utiltiites to extracting coefficients of a polynomial in monomial basis in variables `vars` +function split_term(term::Symbolics.Mul, vars) + coeff = term.coeff + mono = Num(1.0) + for var in keys(term.dict) + if var in vars + mono *= var^term.dict[var] + else + coeff *= var^term.dict[var] + end + end + return coeff, mono +end + +function split_term(term::Symbolics.Pow, vars) + if term.base in vars + return 1.0, term + else + return term, Val(1) + end +end + +function split_term(term::T, vars) where {T <: Union{Symbolics.Term, Symbolics.Sym}} + if term in vars + return 1.0, term + else + return term, Val(1) + end +end + +function extract_coeffs(expr::Symbolics.Add, vars::Set) + coeffs = Dict() + if !iszero(expr.coeff) + coeffs[Val(1)] = expr.coeff + end + for term in keys(expr.dict) + num_coeff = expr.dict[term] + var_coeff, mono = split_term(term, vars) + try + coeffs[mono] += num_coeff * var_coeff + catch + coeffs[mono] = num_coeff * var_coeff + end + end + return coeffs +end + +function extract_coeffs(expr::T, vars::Set) where {T <: Union{Symbolics.Sym, Symbolics.Mul}} + coeff, mono = split_term(expr, vars) + return Dict(mono => coeff) +end + +extract_coeffs(expr::Num, vars::Set) = extract_coeffs(Symbolics.unwrap.(expr), vars) +function extract_coeffs(expr::Num, vars::AbstractArray) + extract_coeffs(Symbolics.unwrap.(expr), vars) +end +function extract_coeffs(expr, vars::AbstractArray{<:Num}) + extract_coeffs(expr, Symbolics.unwrap.(vars)) +end +extract_coeffs(expr, vars::AbstractArray) = extract_coeffs(expr, Set(vars)) +extract_coeffs(expr::Number, vars::Set) = Dict(Val(1) => expr) + +# extracting the indices of the factors of as basismonomial +function get_basis_indices(mono::Symbolics.Mul) + basis_indices = Int[] + for (term, pow) in mono.dict + append!(basis_indices, (arguments(term)[end] - 1) * ones(Int, pow)) + end + return basis_indices +end +function get_basis_indices(mono::Symbolics.Term) + return [arguments(mono)[end] - 1] +end +function get_basis_indices(mono::Symbolics.Pow) + return (arguments(mono.base)[end] - 1) * ones(Int, mono.exp) +end +function get_basis_indices(mono::Num) + return get_basis_indices(Symbolics.unwrap(mono)) +end +function get_basis_indices(::Val{1}) + return [0] +end + +""" +`TensorProductOrthoPoly` objects represent bases formed as the tensor product of univariate `PolyChaos.AbstractOrthoPoly` bases. +By default the basis elements of the tensor product are restricted to polynomials with total degree up to the maximum degree among the +univariate bases. This maximum degree can be manually specified, however. +""" +struct TensorProductOrthoPoly{M, U} + ind::Matrix + measure::M + deg::Vector{Int} + uni::U +end +function TensorProductOrthoPoly(ops::AbstractVector{T}) where { + T <: + Union{AbstractOrthoPoly, + AbstractCanonicalOrthoPoly + }} + n = length(ops) + degrees = [deg(op) for op in ops] + ind = grevlex(n, 0:maximum(degrees), degrees) + measures = [op.measure for op in ops] + prod_measure = ProductMeasure(t -> prod(measure.w for measure in measures), + measures) + + return TensorProductOrthoPoly(ind, prod_measure, degrees, ops) +end +function TensorProductOrthoPoly(ops::AbstractVector{T}, + max_deg::Int) where { + T <: Union{AbstractOrthoPoly, + AbstractCanonicalOrthoPoly}} + n = length(ops) + degrees = [deg(op) for op in ops] + ind = grevlex(n, 0:max_deg, degrees) + measures = [op.measure for op in ops] + prod_measure = ProductMeasure(t -> prod(measure.w for measure in measures), + measures) + + return TensorProductOrthoPoly(ind, prod_measure, degrees, ops) +end + +""" +$(TYPEDSIGNATURES) + +computes inner product between basis functions of a `TensorProductOrthoPoly` via +`PolyChaos`'s infrastructure (exploiting the tensor product form). +""" +function computeSP(basis_fxns, tpop::TensorProductOrthoPoly, + integrators = tpop.uni) + multi_indices = tpop.ind[basis_fxns .+ 1, :] + # columns of multi_indices refer to inner products to be computed + sp = 1.0 + for k in axes(multi_indices, 2) + sp *= computeSP(multi_indices[:, k], integrators[k]) + end + return round(sp, digits = 12) +end + +function computeSP2(tpop::TensorProductOrthoPoly) + sp2 = [computeSP2(op) for op in tpop.uni] + return [prod(sp2[i][j + 1] for (i, j) in enumerate(tpop.ind[k, :])) + for k in axes(tpop.ind, 1)] +end + +function evaluate(x, tpop::TensorProductOrthoPoly) + uni_vals = [_evaluate_uni_op(x[i], tpop.uni[i]) for i in eachindex(x)] + tensor_vals = zeros(size(tpop.ind, 1)) + for idx in axes(tpop.ind, 1) + tensor_vals[idx] = prod(uni_vals[i][j + 1] + for (i, j) in enumerate(tpop.ind[idx, :])) + end + return tensor_vals +end + +function _evaluate_uni_op(x, + op::T) where { + T <: + Union{AbstractOrthoPoly, AbstractCanonicalOrthoPoly + }} + vals = zeros(deg(op) + 2) + vals[1], vals[2] = 0.0, 1.0 + for k in 2:(deg(op) + 1) + vals[k + 1] = (x - op.α[k - 1]) * vals[k] - vals[k - 1] * op.β[k - 1] + end + popfirst!(vals) + return vals +end +""" +$(TYPEDSIGNATURES) + +Compute the an ascending list of `n`-dimensional multi-indices with fixed `grade` (= sum of entries) +in graded reverse lexicographic order. Constraints on the degrees considered can be incorporated. +""" +function grevlex(n::Int, grade::Int) + if n == 1 + return reshape([grade], 1, 1) + end + + if grade == 0 + return zeros(Int, 1, n) + end + + sub_ind = grevlex(n - 1, grade) + ind = hcat(sub_ind, zeros(Int, size(sub_ind, 1))) + for k in 1:grade + sub_ind = grevlex(n - 1, grade - k) + ind = vcat(ind, hcat(sub_ind, k * ones(Int, size(sub_ind, 1)))) + end + return ind +end + +function grevlex(n::Int, grades::AbstractVector{Int}) + return reduce(vcat, [grevlex(n, grade) for grade in grades]) +end + +function grevlex(n::Int, grade::Int, max_degrees::Vector{Int}) + return grevlex(n, grade, [0:d for d in max_degrees]) +end + +function grevlex(n::Int, grades::AbstractVector{Int}, max_degrees::Vector{Int}) + return reduce(vcat, [grevlex(n, grade, max_degrees) for grade in grades]) +end + +function grevlex(n::Int, grade::Int, degree_constraints::Vector{<:AbstractVector}) + if n == 1 + return grade in degree_constraints[1] ? reshape([grade], 1, 1) : zeros(Int, 0, 1) + end + + if grade == 0 + return all(0 in degs for degs in degree_constraints) ? zeros(Int, 1, n) : + zeros(Int, 0, n) + end + + filtered_grades = filter(x -> x <= grade, degree_constraints[end]) + sub_ind = grevlex(n - 1, grade - filtered_grades[1], degree_constraints[1:(end - 1)]) + ind = hcat(sub_ind, filtered_grades[1] * ones(Int, size(sub_ind, 1))) + for k in filtered_grades[2:end] + sub_ind = grevlex(n - 1, grade - k, degree_constraints[1:(end - 1)]) + ind = vcat(ind, hcat(sub_ind, k * ones(Int, size(sub_ind, 1)))) + end + return ind +end + +function grevlex(n::Int, grades::AbstractVector{Int}, + degree_constraints::Vector{<:AbstractVector}) + return reduce(vcat, [grevlex(n, grade, degree_constraints) for grade in grades]) +end + +""" +$(TYPEDSIGNATURES) + +returns dimension of `TensorProductOrthoPoly` object, i.e., the number of basis functions encoded. +""" +dim(tpop::TensorProductOrthoPoly) = size(tpop.ind, 1) + +""" +$(TYPEDSIGNATURES) + +returns degrees of the bases forming a `TensorProductOrthoPoly` object. +""" +deg(tpop::TensorProductOrthoPoly) = tpop.deg + +""" +$(TYPEDSIGNATURES) + +returns maximum degree featured in a `TensorProductOrthoPoly` object. +""" +max_degree(tpop::TensorProductOrthoPoly) = sum(tpop.ind[end, :]) + +# bumping the degree of a PolyChaos OrthoPoly object up to ensure exact integration +# PR to PolyChaos -> remove unnecessarily restrictive constructors and allow construction from measures +# -> also expose number of points used for quadrature generation for general orthogonal polys +# + +measure_parameters(m::AbstractMeasure) = [] +measure_parameters(m::Measure) = m.pars +measure_parameters(m::JacobiMeasure) = [m.ashapeParameter, m.bshapeParameter] +measure_parameters(m::genLaguerreMeasure) = [m.shapeParameter] +measure_parameters(m::genHermiteMeasure) = [m.muParameter] +measure_parameters(m::MeixnerPollaczekMeasure) = [m.λParameter, m.ϕParameter] +measure_parameters(m::Beta01Measure) = [m.ashapeParameter, m.bshapeParameter] +measure_parameters(m::GammaMeasure) = [m.shapeParameter, m.rateParameter] + +function bump_degree(op::OrthoPoly, deg::Int) + return OrthoPoly(op.name, deg, op.measure) +end + +function bump_degree(op::JacobiOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return JacobiOrthoPoly(deg, ps...) +end + +function bump_degree(op::genLaguerreOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return genLaguerreOrthoPoly(deg, ps...) +end + +function bump_degree(op::MeixnerPollaczekOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return MeixnerPollaczekOrthoPoly(deg, ps...) +end + +function bump_degree(op::Beta01OrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return Beta01OrthoPoly(deg, ps...) +end + +function bump_degree(op::GammaOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return GammaOrthoPoly(deg, ps...) +end + +function bump_degree(op::genHermiteOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return genHermiteOrthoPoly(deg, ps...) +end + +function bump_degree(op::HermiteOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return HermiteOrthoPoly(deg, ps...) +end + +function bump_degree(op::LaguerreOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return LaguerreOrthoPoly(deg, ps...) +end + +function bump_degree(op::Uniform01OrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return Uniform01OrthoPoly(deg, ps...) +end + +function bump_degree(op::Uniform_11OrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return Uniform_11OrthoPoly(deg, ps...) +end + +function bump_degree(op::GaussOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return GaussOrthoPoly(deg, ps...) +end + +function bump_degree(op::LegendreOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return LegendreOrthoPoly(deg, ps...) +end + +function bump_degree(op::LogisticOrthoPoly, deg::Int) + ps = measure_parameters(op.measure) + return LogisticOrthoPoly(deg, ps...) +end + +function bump_degree(op::MultiOrthoPoly, deg::Int) + return MultiOrthoPoly(bump_degree.(op.uni, deg), deg) +end + +function bump_degree(op::TensorProductOrthoPoly, deg::Vector{Int}) + return TensorProductOrthoPoly(bump_degree.(op.uni, deg)) +end + +function bump_degree(op::TensorProductOrthoPoly, deg::Vector{Int}, max_deg::Int) + return TensorProductOrthoPoly(bump_degree.(op.uni, deg), max_deg) +end + +# extending computeSP2 for multivariate orthogonal polys +function computeSP2(pc::MultiOrthoPoly) + n = length(pc.uni) + m = dim(pc) + uni_SP2 = [computeSP2(op) for op in pc.uni] + multi_SP2 = [prod(uni_SP2[j][pc.ind[i, j] + 1] for j in 1:n) for i in 1:m] + return multi_SP2 +end diff --git a/src/PCE/pseudo_spectral.jl b/src/PCE/pseudo_spectral.jl new file mode 100644 index 0000000..fbf0237 --- /dev/null +++ b/src/PCE/pseudo_spectral.jl @@ -0,0 +1,5 @@ +""" +$(TYPEDSIGNATURES) +""" +function pseudo_spectral(sys, pce::PCE) +end diff --git a/test/DataReduction.jl b/test/DataReduction.jl index da9ea80..bed00d4 100644 --- a/test/DataReduction.jl +++ b/test/DataReduction.jl @@ -1,6 +1,6 @@ using Test, ModelOrderReduction using DifferentialEquations - +const MOR = ModelOrderReduction function lorenz_prob() function lorenz!(du, u, p, t) du[1] = p[1] * (u[2] - u[1]) @@ -29,7 +29,7 @@ end solution = Array(sol) order = 2 - solver = SVD() + solver = MOR.SVD() matrix_reducer = POD(solution, order) snapshot_reducer = POD(sol.u, order) reduce!(matrix_reducer, solver) @@ -48,7 +48,7 @@ end @test reducer.nmodes == 1 order = 2 - solver = TSVD() + solver = MOR.TSVD() reducer = POD(solution, order) reduce!(reducer, solver) @@ -57,7 +57,7 @@ end @test reducer.renergy > 0.7 order = 2 - solver = RSVD() + solver = MOR.RSVD() reducer = POD(solution, order) reduce!(reducer, solver) diff --git a/test/PCETestUtils.jl b/test/PCETestUtils.jl new file mode 100644 index 0000000..3d1a67b --- /dev/null +++ b/test/PCETestUtils.jl @@ -0,0 +1,25 @@ +function isapprox_sym(exprA, exprB, atol = 1e-6) + exprA = Symbolics.unwrap(exprA) + exprB = Symbolics.unwrap(exprB) + return isapprox_sym(exprA, exprB) +end + +function isapprox_sym(exprA::TA, exprB::TB, + atol = 1e-6) where {TA, TB <: Union{Symbolics.Mul, Symbolics.Add}} + approx_equal = true + approx_equal = isapprox(exprA.coeff, exprB.coeff, atol = atol) + approx_equal = keys(exprA.dict) == keys(exprB.dict) + if approx_equal + for subexpr in keys(exprA.dict) + approx_equal = isapprox(exprA.dict[subexpr], exprB.dict[subexpr], atol = atol) + if !approx_equal + break + end + end + end + return approx_equal +end + +function isapprox_sym(exprA::Symbolics.Term, exprB::Symbolics.Term, atol = 1e-6) + return isequal(exprA, exprB) +end diff --git a/test/PCETests.jl b/test/PCETests.jl new file mode 100644 index 0000000..d3bcf83 --- /dev/null +++ b/test/PCETests.jl @@ -0,0 +1,237 @@ +using Test, ModelOrderReduction +using Symbolics, PolyChaos, ModelingToolkit, LinearAlgebra +const MOR = ModelOrderReduction + +include("PCETestUtils.jl") + +# testing GRevLex capabilities +@testset "PCE: GRevLeX ordering" begin + @test MOR.grevlex(1, 5) == reshape([5], 1, 1) + + ind = [2 0 0; + 1 1 0; + 0 2 0; + 1 0 1; + 0 1 1; + 0 0 2] + @test ind == MOR.grevlex(3, 2) + + ind = [1 1 0; + 1 0 1; + 0 1 1; + 0 0 2] + @test ind == MOR.grevlex(3, 2, [1, 1, 2]) + + ind = [1 1 0; + 0 1 1] + @test ind == MOR.grevlex(3, 2, [0:1, [1], 0:2]) + + @test zeros(Int, 0, 3) == MOR.grevlex(3, 0, [0:1, [1], 0:2]) + + ind0 = [0 0 0] + ind1 = [1 0 0; + 0 1 0; + 0 0 1] + ind2 = [2 0 0; + 1 1 0; + 0 2 0; + 1 0 1; + 0 1 1; + 0 0 2] + @test vcat(ind0, ind1, ind2) == MOR.grevlex(3, 0:2) + @test vcat(ind1, ind2) == MOR.grevlex(3, 1:2) + @test vcat(ind0, ind2) == MOR.grevlex(3, [0, 2]) + @test vcat(ind2, ind0) == MOR.grevlex(3, [2, 0]) + + degree_constraints = [0:1, [1], 1:2] + ind0 = zeros(Int, 0, 3) + ind1 = zeros(Int, 0, 3) + ind2 = [0 1 1] + @test vcat(ind0, ind1, ind2) == MOR.grevlex(3, 0:2, degree_constraints) + @test vcat(ind1, ind2) == MOR.grevlex(3, 1:2, degree_constraints) + @test vcat(ind0, ind2) == MOR.grevlex(3, [0, 2], degree_constraints) + @test vcat(ind2, ind0) == MOR.grevlex(3, [2, 0], degree_constraints) +end + +# testing TensorProductOrthoPoly +@testset "PCE: TensorProductOrthoPoly test" begin + ops = [GaussOrthoPoly(4), Uniform01OrthoPoly(2), LaguerreOrthoPoly(3)] + tpop = MOR.TensorProductOrthoPoly(ops) + @test tpop.uni == ops + @test MOR.max_degree(tpop) == 4 + @test MOR.deg(tpop) == [4, 2, 3] + + tpop = MOR.TensorProductOrthoPoly(ops, 3) + @test MOR.max_degree(tpop) == 3 + + ops = [GaussOrthoPoly(4), Uniform01OrthoPoly(2)] + tpop = MOR.TensorProductOrthoPoly(ops) + ind = [0 0; + 1 0; + 0 1; + 2 0; + 1 1; + 0 2; + 3 0; + 2 1; + 1 2; + 4 0; + 3 1; + 2 2] # GRevLex-ordered multi-indices + @test tpop.ind == ind + + ops = [GaussOrthoPoly(3), Uniform01OrthoPoly(3), LaguerreOrthoPoly(3)] + mop = MultiOrthoPoly(ops, 3) + tpop = MOR.TensorProductOrthoPoly(ops) + sp2_mop = computeSP2(mop) + sp2_tpop = computeSP2(tpop) + mop_multi_indices = [mop.ind[i, :] for i in axes(mop.ind, 1)] + tpop_multi_indices = [tpop.ind[i, :] for i in axes(mop.ind, 1)] + idx_map = [findfirst(x -> x == mult_index, mop_multi_indices) + for mult_index in tpop_multi_indices] + @test sp2_mop[idx_map] == sp2_tpop + @test isapprox(computeSP(idx_map[[5, 5, 2, 2]], mop), computeSP([5, 5, 2, 2], tpop)) + + tpop = MOR.bump_degree(tpop, [2, 4, 1]) + @test deg(tpop) == [2, 4, 1] + + tpop = MOR.bump_degree(tpop, [2, 4, 1], 5) + @test tpop.ind[end, :] == [0, 4, 1] +end + +# testing extraction of independent variables +@testset "PCE: get_independent_vars test" begin + @variables t, z, u(t), v(t)[1:4], w(t, z), x(t, z)[1:4] + @test isequal(MOR.get_independent_vars(u), [t]) + @test isequal(MOR.get_independent_vars(v[1]), [t]) + @test isequal(MOR.get_independent_vars(v[2]), [t]) + @test isequal(MOR.get_independent_vars(w), [t, z]) + @test isequal(MOR.get_independent_vars(x[2]), [t, z]) + @test isequal(MOR.get_independent_vars(collect(v)), [[t] for i in 1:length(v)]) + @test isequal(MOR.get_independent_vars(collect(x)), [[t, z] for i in 1:length(v)]) +end + +# test PCE generation +@testset "PCE: constructor test" begin + @parameters a, b + @variables y + n = 5 + + test_basis = [a => GaussOrthoPoly(n), b => Uniform01OrthoPoly(n)] + pce = PCE([y], test_basis) + @test length(pce.moments[1]) == binomial(n + 2, 2) + @test length(pce.sym_basis) == binomial(n + 2, 2) + @test isequal(pce.parameters, [a, b]) +end + +# test equation for throughout: +@parameters a, b +@variables t, y(t) +D = Differential(t) +test_equation = [D(y) ~ a * y + 4 * b] + +# set up pce +n = 5 +bases = [a => GaussOrthoPoly(n)] +pce = PCE([y], bases) +eq = [eq.rhs for eq in test_equation] +pce_eq = MOR.apply_ansatz(eq, pce)[1] + +@testset "PCE: apply_ansatz test" begin + true_eq = expand(pce.sym_basis[2] * dot(pce.moments[1], pce.sym_basis) + 4 * b) + @test isequal(pce_eq, true_eq) +end + +# test extraction of monomial coefficients +coeffs = Dict{Any, Any}(pce.sym_basis[i] * pce.sym_basis[2] => pce.moments[1][i] + for i in 1:(n + 1)) +coeffs[Val(1)] = 4.0 * b +basis_indices = Dict{Any, Any}(pce.sym_basis[i] * pce.sym_basis[2] => ([i - 1, 1], + [1, i - 1]) + for i in 1:(n + 1)) +basis_indices[Val(1)] = [[0], [0]] + +@testset "PCE: basismonomial extraction test" begin + extracted_coeffs = MOR.extract_coeffs(pce_eq, pce.sym_basis) + @test all(isequal(coeffs[mono], extracted_coeffs[mono]) for mono in keys(coeffs)) + + extracted_coeffs, extracted_basis_indices = MOR.extract_basismonomial_coeffs([pce_eq], + pce) + extracted_basis_indices = Dict(extracted_basis_indices) + test1 = [isequal(basis_indices[mono][1], extracted_basis_indices[mono]) + for mono in keys(basis_indices)] + test2 = [isequal(basis_indices[mono][2], extracted_basis_indices[mono]) + for mono in keys(basis_indices)] + @test all(test1 + test2 .>= 1) +end + +# test bump_degree +@testset "PCE: bump_degree test" begin + n = 5 + n_bumped = 10 + shape_a = 0.1 + shape_b = 0.2 + shape = 0.3 + mu = 0.4 + λ = 0.5 + ϕ = 0.6 + rate = 1.0 + my_measure = Measure("my_measure", t -> 1 + t, (-1, 1), false, Dict()) + my_poly = OrthoPoly("my_poly", n, my_measure) + orthogonal_polynomials = [ + my_poly => Dict(), + GaussOrthoPoly(n) => [], + Uniform01OrthoPoly(n) => [], + Uniform_11OrthoPoly(n) => [], + GammaOrthoPoly(n, shape, rate) => [shape, rate], + HermiteOrthoPoly(n) => [], + JacobiOrthoPoly(n, shape_a, shape_b) => [shape_a, shape_b], + LaguerreOrthoPoly(n) => [], + LogisticOrthoPoly(n) => [], + MeixnerPollaczekOrthoPoly(n, λ, ϕ) => [λ, ϕ], + genHermiteOrthoPoly(n, mu) => [mu], + genLaguerreOrthoPoly(n, shape) => [shape], + LegendreOrthoPoly(n) => [], + Beta01OrthoPoly(n, shape_a, shape_b) => [shape_a, shape_b], + ] + + for (op, params) in orthogonal_polynomials + bumped_op = MOR.bump_degree(op, n_bumped) + @test deg(bumped_op) == n_bumped + @test MOR.measure_parameters(bumped_op.measure) == params + end +end + +# test Galerkin projection +@testset "PCE: galerkin projection test" begin + moment_eqs = MOR.pce_galerkin(eq, pce) + + integrator = map((uni, deg) -> MOR.bump_degree(uni, deg), pce.tensor_basis.uni, + [n + 1, n + 1]) + + true_moment_eqs = Num[] + scaling_factor = computeSP2(pce.tensor_basis) + for j in 0:n + mom_eq = 0.0 + for mono in keys(basis_indices) + ind = basis_indices[mono][2] + c = computeSP(vcat(ind, j), pce.tensor_basis, integrator) + mom_eq += c * coeffs[mono] + end + push!(true_moment_eqs, 1 / scaling_factor[j + 1] * mom_eq) + end + + @test all([isapprox_sym(moment_eqs[1][i], true_moment_eqs[i]) + for i in eachindex(true_moment_eqs)]) + + # check generation of moment equations + @named test_system = ODESystem(test_equation, t, [y], [a, b]) + moment_system, pce_eval = moment_equations(test_system, pce) + moment_eqs = equations(moment_system) + moment_eqs = [moment_eqs[i].rhs for i in eachindex(moment_eqs)] + @test isequal(parameters(moment_system), [b]) + @test nameof(moment_system) == :test_system_pce + @test isequal(states(moment_system), reduce(vcat, pce.moments)) + @test all([isapprox_sym(moment_eqs[i], true_moment_eqs[i]) + for i in eachindex(true_moment_eqs)]) +end diff --git a/test/Project.toml b/test/Project.toml index e8910b8..09cbf80 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,4 +1,6 @@ [deps] +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +PolyChaos = "8d666b04-775d-5f6e-b778-5ac7c70f65a3" DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" MethodOfLines = "94925ecb-adb7-4558-8ed8-f975c56a0bf4" ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" diff --git a/test/runtests.jl b/test/runtests.jl index 8d64d7f..6bdcf97 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,5 +1,8 @@ -using SafeTestsets +using ModelOrderReduction +#---------- Model Reduction ----------------# +using SafeTestsets @safetestset "POD" begin include("DataReduction.jl") end @safetestset "utils" begin include("utils.jl") end @safetestset "DEIM" begin include("deim.jl") end +@safetestset "PCE" begin include("PCETests.jl") end