Skip to content

Commit 99a3639

Browse files
Merge pull request #841 from SciML/sb/format
chore: format the whole repo
2 parents 86ab97c + b0daed7 commit 99a3639

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+1150
-1001
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,9 +93,9 @@ xs, ys = [infimum(d.domain):(dx / 10):supremum(d.domain) for d in domains]
9393
analytic_sol_func(x, y) = (sin(pi * x) * sin(pi * y)) / (2pi^2)
9494

9595
u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys],
96-
(length(xs), length(ys)))
96+
(length(xs), length(ys)))
9797
u_real = reshape([analytic_sol_func(x, y) for x in xs for y in ys],
98-
(length(xs), length(ys)))
98+
(length(xs), length(ys)))
9999
diff_u = abs.(u_predict .- u_real)
100100

101101
using Plots

docs/make.jl

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,13 @@ using Plots
1010
include("pages.jl")
1111

1212
makedocs(sitename = "NeuralPDE.jl",
13-
authors = "#",
14-
modules = [NeuralPDE],
15-
clean = true, doctest = false, linkcheck = true,
16-
warnonly = [:missing_docs],
17-
format = Documenter.HTML(assets = ["assets/favicon.ico"],
18-
canonical = "https://docs.sciml.ai/NeuralPDE/stable/"),
19-
pages = pages)
13+
authors = "#",
14+
modules = [NeuralPDE],
15+
clean = true, doctest = false, linkcheck = true,
16+
warnonly = [:missing_docs],
17+
format = Documenter.HTML(assets = ["assets/favicon.ico"],
18+
canonical = "https://docs.sciml.ai/NeuralPDE/stable/"),
19+
pages = pages)
2020

2121
deploydocs(repo = "github.com/SciML/NeuralPDE.jl.git";
22-
push_preview = true)
22+
push_preview = true)

docs/pages.jl

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,36 +1,36 @@
11
pages = ["index.md",
22
"ODE PINN Tutorials" => Any["Introduction to NeuralPDE for ODEs" => "tutorials/ode.md",
3-
"Bayesian PINNs for Coupled ODEs" => "tutorials/Lotka_Volterra_BPINNs.md",
4-
"PINNs DAEs" => "tutorials/dae.md",
5-
"Parameter Estimation with PINNs for ODEs" => "tutorials/ode_parameter_estimation.md",
6-
"Deep Galerkin Method" => "tutorials/dgm.md"
7-
#"examples/nnrode_example.md", # currently incorrect
8-
],
9-
"PDE PINN Tutorials" => Any["Introduction to NeuralPDE for PDEs" => "tutorials/pdesystem.md",
10-
"Bayesian PINNs for PDEs" => "tutorials/low_level_2.md",
11-
"Using GPUs" => "tutorials/gpu.md",
12-
"Defining Systems of PDEs" => "tutorials/systems.md",
13-
"Imposing Constraints" => "tutorials/constraints.md",
14-
"The symbolic_discretize Interface" => "tutorials/low_level.md",
15-
"Optimising Parameters (Solving Inverse Problems)" => "tutorials/param_estim.md",
16-
"Solving Integro Differential Equations" => "tutorials/integro_diff.md",
17-
"Transfer Learning with Neural Adapter" => "tutorials/neural_adapter.md",
18-
"The Derivative Neural Network Approximation" => "tutorials/derivative_neural_network.md"],
3+
"Bayesian PINNs for Coupled ODEs" => "tutorials/Lotka_Volterra_BPINNs.md",
4+
"PINNs DAEs" => "tutorials/dae.md",
5+
"Parameter Estimation with PINNs for ODEs" => "tutorials/ode_parameter_estimation.md",
6+
"Deep Galerkin Method" => "tutorials/dgm.md" #"examples/nnrode_example.md", # currently incorrect
7+
],
8+
"PDE PINN Tutorials" => Any[
9+
"Introduction to NeuralPDE for PDEs" => "tutorials/pdesystem.md",
10+
"Bayesian PINNs for PDEs" => "tutorials/low_level_2.md",
11+
"Using GPUs" => "tutorials/gpu.md",
12+
"Defining Systems of PDEs" => "tutorials/systems.md",
13+
"Imposing Constraints" => "tutorials/constraints.md",
14+
"The symbolic_discretize Interface" => "tutorials/low_level.md",
15+
"Optimising Parameters (Solving Inverse Problems)" => "tutorials/param_estim.md",
16+
"Solving Integro Differential Equations" => "tutorials/integro_diff.md",
17+
"Transfer Learning with Neural Adapter" => "tutorials/neural_adapter.md",
18+
"The Derivative Neural Network Approximation" => "tutorials/derivative_neural_network.md"],
1919
"Extended Examples" => Any["examples/wave.md",
20-
"examples/3rd.md",
21-
"examples/ks.md",
22-
"examples/heterogeneous.md",
23-
"examples/linear_parabolic.md",
24-
"examples/nonlinear_elliptic.md",
25-
"examples/nonlinear_hyperbolic.md",
26-
"examples/complex.md"],
20+
"examples/3rd.md",
21+
"examples/ks.md",
22+
"examples/heterogeneous.md",
23+
"examples/linear_parabolic.md",
24+
"examples/nonlinear_elliptic.md",
25+
"examples/nonlinear_hyperbolic.md",
26+
"examples/complex.md"],
2727
"Manual" => Any["manual/ode.md",
28-
"manual/dae.md",
29-
"manual/pinns.md",
30-
"manual/bpinns.md",
31-
"manual/training_strategies.md",
32-
"manual/adaptive_losses.md",
33-
"manual/logging.md",
34-
"manual/neural_adapters.md"],
35-
"Developer Documentation" => Any["developer/debugging.md"],
28+
"manual/dae.md",
29+
"manual/pinns.md",
30+
"manual/bpinns.md",
31+
"manual/training_strategies.md",
32+
"manual/adaptive_losses.md",
33+
"manual/logging.md",
34+
"manual/neural_adapters.md"],
35+
"Developer Documentation" => Any["developer/debugging.md"]
3636
]

docs/src/developer/debugging.md

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,8 @@ strategy = NeuralPDE.GridTraining(dx)
5858
integral = NeuralPDE.get_numeric_integral(strategy, indvars, multioutput, chain, derivative)
5959

6060
_pde_loss_function = NeuralPDE.build_loss_function(eq, indvars, depvars, phi, derivative,
61-
integral, multioutput, init_params,
62-
strategy)
61+
integral, multioutput, init_params,
62+
strategy)
6363
```
6464

6565
```
@@ -83,9 +83,9 @@ julia> bc_indvars = NeuralPDE.get_variables(bcs,indvars,depvars)
8383

8484
```julia
8585
_bc_loss_functions = [NeuralPDE.build_loss_function(bc, indvars, depvars,
86-
phi, derivative, integral, multioutput,
87-
init_params, strategy,
88-
bc_indvars = bc_indvar)
86+
phi, derivative, integral, multioutput,
87+
init_params, strategy,
88+
bc_indvars = bc_indvar)
8989
for (bc, bc_indvar) in zip(bcs, bc_indvars)]
9090
```
9191

@@ -126,7 +126,7 @@ julia> expr_bc_loss_functions = [NeuralPDE.build_symbolic_loss_function(bc,indva
126126

127127
```julia
128128
train_sets = NeuralPDE.generate_training_sets(domains, dx, [eq], bcs, eltypeθ, indvars,
129-
depvars)
129+
depvars)
130130
pde_train_set, bcs_train_set = train_sets
131131
```
132132

@@ -145,8 +145,9 @@ julia> bcs_train_set
145145
```
146146

147147
```julia
148-
pde_bounds, bcs_bounds = NeuralPDE.get_bounds(domains, [eq], bcs, eltypeθ, indvars, depvars,
149-
NeuralPDE.StochasticTraining(100))
148+
pde_bounds, bcs_bounds = NeuralPDE.get_bounds(
149+
domains, [eq], bcs, eltypeθ, indvars, depvars,
150+
NeuralPDE.StochasticTraining(100))
150151
```
151152

152153
```

docs/src/examples/complex.md

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,27 +17,29 @@ function bloch_equations(u, p, t)
1717
γ = Γ / 2
1818
ρ₁₁, ρ₂₂, ρ₁₂, ρ₂₁ = u
1919
d̢ρ = [im * Ω * (ρ₁₂ - ρ₂₁) + Γ * ρ₂₂;
20-
-im * Ω * (ρ₁₂ - ρ₂₁) - Γ * ρ₂₂;
21-
-(γ + im * Δ) * ρ₁₂ - im * Ω * (ρ₂₂ - ρ₁₁);
22-
conj(-(γ + im * Δ) * ρ₁₂ - im * Ω * (ρ₂₂ - ρ₁₁))]
20+
-im * Ω * (ρ₁₂ - ρ₂₁) - Γ * ρ₂₂;
21+
-(γ + im * Δ) * ρ₁₂ - im * Ω * (ρ₂₂ - ρ₁₁);
22+
conj(-(γ + im * Δ) * ρ₁₂ - im * Ω * (ρ₂₂ - ρ₁₁))]
2323
return d̢ρ
2424
end
2525
2626
u0 = zeros(ComplexF64, 4)
2727
u0[1] = 1.0
28-
time_span = (0.0, 2.0)
28+
time_span = (0.0, 2.0)
2929
parameters = [2.0, 0.0, 1.0]
3030
3131
problem = ODEProblem(bloch_equations, u0, time_span, parameters)
3232
3333
chain = Lux.Chain(
34-
Lux.Dense(1, 16, tanh; init_weight = (rng, a...) -> Lux.kaiming_normal(rng, ComplexF64, a...)) ,
35-
Lux.Dense(16, 4; init_weight = (rng, a...) -> Lux.kaiming_normal(rng, ComplexF64, a...))
36-
)
34+
Lux.Dense(1, 16, tanh;
35+
init_weight = (rng, a...) -> Lux.kaiming_normal(rng, ComplexF64, a...)),
36+
Lux.Dense(
37+
16, 4; init_weight = (rng, a...) -> Lux.kaiming_normal(rng, ComplexF64, a...))
38+
)
3739
ps, st = Lux.setup(rng, chain)
3840
3941
opt = OptimizationOptimisers.Adam(0.01)
40-
ground_truth = solve(problem, Tsit5(), saveat = 0.01)
42+
ground_truth = solve(problem, Tsit5(), saveat = 0.01)
4143
alg = NNODE(chain, opt, ps; strategy = StochasticTraining(500))
4244
sol = solve(problem, alg, verbose = false, maxiters = 5000, saveat = 0.01)
4345
```

docs/src/examples/heterogeneous.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,9 @@ domains = [x ∈ Interval(0.0, 1.0),
3232
3333
numhid = 3
3434
chains = [[Lux.Chain(Dense(1, numhid, Lux.σ), Dense(numhid, numhid, Lux.σ),
35-
Dense(numhid, 1)) for i in 1:2]
35+
Dense(numhid, 1)) for i in 1:2]
3636
[Lux.Chain(Dense(2, numhid, Lux.σ), Dense(numhid, numhid, Lux.σ),
37-
Dense(numhid, 1)) for i in 1:2]]
37+
Dense(numhid, 1)) for i in 1:2]]
3838
discretization = NeuralPDE.PhysicsInformedNN(chains, QuadratureTraining())
3939
4040
@named pde_system = PDESystem(eq, bcs, domains, [x, y], [p(x), q(y), r(x, y), s(y, x)])

docs/src/examples/linear_parabolic.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ w(t, 1) = \frac{e^{\lambda_1} cos(\frac{x}{a})-e^{\lambda_2}cos(\frac{x}{a})}{\l
2424
with a physics-informed neural network.
2525

2626
```@example linear_parabolic
27-
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimisers, OptimizationOptimJL, LineSearches
27+
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimisers,
28+
OptimizationOptimJL, LineSearches
2829
using Plots
2930
using ModelingToolkit: Interval, infimum, supremum
3031

docs/src/examples/nonlinear_hyperbolic.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,8 @@ where k is a root of the algebraic (transcendental) equation f(k) = g(k), j0 and
3333
We solve this with Neural:
3434

3535
```@example nonlinear_hyperbolic
36-
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, Roots, LineSearches
36+
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, Roots,
37+
LineSearches
3738
using SpecialFunctions
3839
using Plots
3940
using ModelingToolkit: Interval, infimum, supremum
@@ -121,7 +122,6 @@ for i in 1:2
121122
end
122123
```
123124

124-
125125
```@example nonlinear_hyperbolic
126126
ps[1]
127127
```

docs/src/examples/wave.md

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,9 @@ function analytic_sol_func(t, x)
7070
end
7171
7272
u_predict = reshape([first(phi([t, x], res.u)) for t in ts for x in xs],
73-
(length(ts), length(xs)))
73+
(length(ts), length(xs)))
7474
u_real = reshape([analytic_sol_func(t, x) for t in ts for x in xs],
75-
(length(ts), length(xs)))
75+
(length(ts), length(xs)))
7676
7777
diff_u = abs.(u_predict .- u_real)
7878
p1 = plot(ts, xs, u_real, linetype = :contourf, title = "analytic");
@@ -121,7 +121,7 @@ eq = Dx(Dxu(t, x)) ~ 1 / v^2 * Dt(Dtu(t, x)) + b * Dtu(t, x)
121121
bcs_ = [u(t, 0) ~ 0.0,# for all t > 0
122122
u(t, L) ~ 0.0,# for all t > 0
123123
u(0, x) ~ x * (1.0 - x), # for all 0 < x < 1
124-
Dtu(0, x) ~ 1 - 2x, # for all 0 < x < 1
124+
Dtu(0, x) ~ 1 - 2x # for all 0 < x < 1
125125
]
126126
127127
ep = (cbrt(eps(eltype(Float64))))^2 / 6
@@ -139,16 +139,16 @@ domains = [t ∈ Interval(0.0, L),
139139
inn = 25
140140
innd = 4
141141
chain = [[Lux.Chain(Dense(2, inn, Lux.tanh),
142-
Dense(inn, inn, Lux.tanh),
143-
Dense(inn, inn, Lux.tanh),
144-
Dense(inn, 1)) for _ in 1:3]
142+
Dense(inn, inn, Lux.tanh),
143+
Dense(inn, inn, Lux.tanh),
144+
Dense(inn, 1)) for _ in 1:3]
145145
[Lux.Chain(Dense(2, innd, Lux.tanh), Dense(innd, 1)) for _ in 1:2]]
146146
147147
strategy = GridTraining(0.02)
148148
discretization = PhysicsInformedNN(chain, strategy;)
149149
150150
@named pde_system = PDESystem(eq, bcs, domains, [t, x],
151-
[u(t, x), Dxu(t, x), Dtu(t, x), O1(t, x), O2(t, x)])
151+
[u(t, x), Dxu(t, x), Dtu(t, x), O1(t, x), O2(t, x)])
152152
prob = discretize(pde_system, discretization)
153153
sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization)
154154
@@ -201,10 +201,11 @@ gif(anim, "1Dwave_damped_adaptive.gif", fps = 200)
201201
202202
# Surface plot
203203
ts, xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains]
204-
u_predict = reshape([first(phi([t, x], res.u.depvar.u)) for
205-
t in ts for x in xs], (length(ts), length(xs)))
204+
u_predict = reshape(
205+
[first(phi([t, x], res.u.depvar.u)) for
206+
t in ts for x in xs], (length(ts), length(xs)))
206207
u_real = reshape([analytic_sol_func(t, x) for t in ts for x in xs],
207-
(length(ts), length(xs)))
208+
(length(ts), length(xs)))
208209
209210
diff_u = abs.(u_predict .- u_real)
210211
p1 = plot(ts, xs, u_real, linetype = :contourf, title = "analytic");

docs/src/index.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# NeuralPDE.jl: Automatic Physics-Informed Neural Networks (PINNs)
22

3-
[NeuralPDE.jl](https://github.com/SciML/NeuralPDE.jl) is a solver package which
4-
consists of neural network solvers for partial differential equations using
3+
[NeuralPDE.jl](https://github.com/SciML/NeuralPDE.jl) is a solver package which
4+
consists of neural network solvers for partial differential equations using
55
physics-informed neural networks (PINNs) and the ability to generate neural
66
networks which both approximate physical laws and real data simultaneously.
77

0 commit comments

Comments
 (0)