From b2853ba2fa4bf934797f073afe7a7e83997a3b91 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 22 Mar 2023 15:12:54 +1100 Subject: [PATCH 01/34] Added BlockDiagonalSmoothers --- Project.toml | 1 + src/GridapSolvers.jl | 1 + src/LinearSolvers/BlockDiagonalSmoothers.jl | 103 ++++++++++++++++++++ src/LinearSolvers/LinearSolvers.jl | 4 + test/seq/BlockDiagonalSmoothersTests.jl | 56 +++++++++++ 5 files changed, 165 insertions(+) create mode 100644 src/LinearSolvers/BlockDiagonalSmoothers.jl create mode 100644 test/seq/BlockDiagonalSmoothersTests.jl diff --git a/Project.toml b/Project.toml index 94be9d40..8f28a24f 100644 --- a/Project.toml +++ b/Project.toml @@ -5,6 +5,7 @@ version = "0.1.0" [deps] ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" +BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" Gridap = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" GridapDistributed = "f9701e48-63b3-45aa-9a63-9bc6c271f355" diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 988c541f..8e20f0eb 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -27,6 +27,7 @@ module GridapSolvers export JacobiLinearSolver export RichardsonSmoother export GMGLinearSolver + export BlockDiagonalSmoother # PatchBasedSmoothers export PatchDecomposition diff --git a/src/LinearSolvers/BlockDiagonalSmoothers.jl b/src/LinearSolvers/BlockDiagonalSmoothers.jl new file mode 100644 index 00000000..3c5aea83 --- /dev/null +++ b/src/LinearSolvers/BlockDiagonalSmoothers.jl @@ -0,0 +1,103 @@ +struct BlockDiagonalSmoother{A,B,C} <: Gridap.Algebra.LinearSolver + num_blocks :: Int32 + ranges :: A + blocks :: B + solvers :: C + + function BlockDiagonalSmoother(ranges,blocks,solvers) + num_blocks = length(ranges) + @check length(blocks) == num_blocks + @check length(solvers) == num_blocks + + A = typeof(ranges) + B = typeof(blocks) + C = typeof(solvers) + return new{A,B,C}(num_blocks,ranges,blocks,solvers) + end +end + +function BlockDiagonalSmoother(biforms :: AbstractArray{<:Function}, + trials :: AbstractArray{<:FESpace}, + tests :: AbstractArray{<:FESpace}, + solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}) + ranges = map(num_free_dofs,tests) + blocks = compute_block_matrices(biforms,trials,tests) + return BlockDiagonalSmoother(ranges,blocks,solvers) +end + +function BlockDiagonalSmoother(biforms :: AbstractArray{<:Function}, + U :: MultiFieldFESpace, + V :: MultiFieldFESpace, + solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}) + dof_ids = get_free_dof_ids(V) + ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) + blocks = compute_block_matrices(biforms,U.spaces,V.spaces) + return BlockDiagonalSmoother(ranges,blocks,solvers) +end + +function BlockDiagonalSmoother(A :: AbstractMatrix, + ranges :: AbstractArray{<:AbstractRange}, + solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}; + lazy_mode=false) + blocks = extract_diagonal_blocks(A,ranges;lazy_mode=lazy_mode) + return BlockDiagonalSmoother(ranges,blocks,solvers) +end + +function compute_block_matrices(biforms :: AbstractArray{<:Function}, + trials :: AbstractArray{<:FESpace}, + tests :: AbstractArray{<:FESpace}) + @check length(biforms) == length(tests) == length(trials) + @check all(U -> isa(U,TrialFESpace),trials) + + blocks = map(assemble_matrix,biforms,tests,trials) + return blocks +end + +function extract_diagonal_blocks(A::AbstractMatrix,ranges;lazy_mode=false) + blocks = map(ranges) do range + if lazy_mode + view(A,range,range) + else + A[range,range] + end + end + return blocks +end + +struct BlockDiagonalSmootherSS{A,B} <: Gridap.Algebra.SymbolicSetup + solver :: A + block_ss :: B +end + +function Gridap.Algebra.symbolic_setup(solver::BlockDiagonalSmoother,mat::AbstractMatrix) + block_ss = map(symbolic_setup,solver.solvers,solver.blocks) + return BlockDiagonalSmootherSS(solver,block_ss) +end + +struct BlockDiagonalSmootherNS{A,B} <: Gridap.Algebra.NumericalSetup + solver :: A + block_ns :: B +end + +function Gridap.Algebra.numerical_setup(ss::BlockDiagonalSmootherSS,mat::AbstractMatrix) + solver = ss.solver + block_ns = map(numerical_setup,ss.block_ss,solver.blocks) + return BlockDiagonalSmootherNS(solver,block_ns) +end + +# TODO: Should we consider overlapping block smoothers? +function Gridap.Algebra.solve!(x::AbstractVector,ns::BlockDiagonalSmootherNS,b::AbstractVector) + solver, block_ns = ns.solver, ns.block_ns + num_blocks, ranges = solver.num_blocks, solver.ranges + + for iB in 1:num_blocks + xi = view(x,ranges[iB]) + bi = view(b,ranges[iB]) + solve!(xi,block_ns[iB],bi) + end + return x +end + +function LinearAlgebra.ldiv!(x,ns::BlockDiagonalSmootherNS,b) + solve!(x,ns,b) +end \ No newline at end of file diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index 4fe84ae9..c7124874 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -2,7 +2,9 @@ module LinearSolvers using Printf using LinearAlgebra + using Gridap +using Gridap.Helpers using Gridap.Algebra using PartitionedArrays using GridapPETSc @@ -14,9 +16,11 @@ import LinearAlgebra: mul!, ldiv! export JacobiLinearSolver export RichardsonSmoother export GMGLinearSolver +export BlockDiagonalSmoother include("JacobiLinearSolvers.jl") include("RichardsonSmoothers.jl") include("GMGLinearSolvers.jl") +include("BlockDiagonalSmoothers.jl") end \ No newline at end of file diff --git a/test/seq/BlockDiagonalSmoothersTests.jl b/test/seq/BlockDiagonalSmoothersTests.jl new file mode 100644 index 00000000..0e632b52 --- /dev/null +++ b/test/seq/BlockDiagonalSmoothersTests.jl @@ -0,0 +1,56 @@ +module BlockDiagonalSmoothersTests + +using Gridap +using Gridap.MultiField +using BlockArrays +using LinearAlgebra +using FillArrays + +using GridapSolvers + +u(x) = VectorValue(x[1],x[2]) +f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) + +p(x) = x[1] + x[2] +g(x) = -Δ(p)(x) + +D = 2 +n = 10 +domain = Tuple(repeat([0,1],D)) +partition = (n,n) +model = CartesianDiscreteModel(domain,partition) + +order = 1 +reffeᵤ = ReferenceFE(lagrangian,VectorValue{D,Float64},order) +V = TestFESpace(model,reffeᵤ,conformity=:H1,dirichlet_tags=["boundary"]) + +reffeₚ = ReferenceFE(lagrangian,Float64,order) +Q = TestFESpace(model,reffeₚ,conformity=:H1,dirichlet_tags=["boundary"]) + +U = TrialFESpace(V,u) +P = TrialFESpace(Q,p) + +Y = MultiFieldFESpace([V, Q]) +X = MultiFieldFESpace([U, P]) + +degree = 2*order + 1 +Ωₕ = Triangulation(model) +dΩ = Measure(Ωₕ,degree) + +a((u,p),(v,q)) = ∫( v⊙u + q⋅p)dΩ + +A,b = AffineFEOperator(a,l,X,Y) + +dof_ids = get_free_dof_ids(X) +ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) +solvers = Fill(BackslashSolver(),2) + +P = BlockDiagonalPreconditioner(A,ranges,solvers) +Pss = symbolic_setup(P,A) +Pns = numerical_setup(Pss,A) + +x = get_free_dof_values(zero(X)) +ldiv!(x,Pns,b) + + +end \ No newline at end of file From 057ed6205b48137edd0b38ce5a9ff57748658eb1 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 22 Mar 2023 15:33:19 +1100 Subject: [PATCH 02/34] Added more tests --- src/LinearSolvers/LinearSolvers.jl | 1 + test/seq/BlockDiagonalSmoothersTests.jl | 43 +++++++++++++++++++------ 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index c7124874..47f4aafc 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -2,6 +2,7 @@ module LinearSolvers using Printf using LinearAlgebra +using BlockArrays using Gridap using Gridap.Helpers diff --git a/test/seq/BlockDiagonalSmoothersTests.jl b/test/seq/BlockDiagonalSmoothersTests.jl index 0e632b52..b76f02ab 100644 --- a/test/seq/BlockDiagonalSmoothersTests.jl +++ b/test/seq/BlockDiagonalSmoothersTests.jl @@ -5,6 +5,7 @@ using Gridap.MultiField using BlockArrays using LinearAlgebra using FillArrays +using IterativeSolvers using GridapSolvers @@ -20,7 +21,7 @@ domain = Tuple(repeat([0,1],D)) partition = (n,n) model = CartesianDiscreteModel(domain,partition) -order = 1 +order = 2 reffeᵤ = ReferenceFE(lagrangian,VectorValue{D,Float64},order) V = TestFESpace(model,reffeᵤ,conformity=:H1,dirichlet_tags=["boundary"]) @@ -33,24 +34,46 @@ P = TrialFESpace(Q,p) Y = MultiFieldFESpace([V, Q]) X = MultiFieldFESpace([U, P]) -degree = 2*order + 1 -Ωₕ = Triangulation(model) -dΩ = Measure(Ωₕ,degree) +degree = 2*(order + 1) +Ω = Triangulation(model) +dΩ = Measure(Ω,degree) -a((u,p),(v,q)) = ∫( v⊙u + q⋅p)dΩ -A,b = AffineFEOperator(a,l,X,Y) +# Global problem +a((u,p),(v,q)) = ∫( v⊙u + ∇(v)⊙∇(u) + q⋅p + ∇(q)⊙∇(p))dΩ +l((v,q)) = ∫( v⋅f + q⋅g)dΩ + +op = AffineFEOperator(a,l,X,Y) +A,b = get_matrix(op), get_vector(op) +xh_star = solve(op) +x_star = get_free_dof_values(xh_star) dof_ids = get_free_dof_ids(X) ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) solvers = Fill(BackslashSolver(),2) -P = BlockDiagonalPreconditioner(A,ranges,solvers) -Pss = symbolic_setup(P,A) -Pns = numerical_setup(Pss,A) +# Build using the global matrix +BDS = BlockDiagonalSmoother(A,ranges,solvers) +BDSss = symbolic_setup(BDS,A) +BDSns = numerical_setup(BDSss,A) + +x = get_free_dof_values(zero(X)) +x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) + +norm(x-x_star) + +# Build using local weakforms +a1(u,v) = ∫(v⊙u + ∇(v)⊙∇(u))dΩ +a2(p,q) = ∫(q⋅p + ∇(q)⊙∇(p))dΩ +biforms = [a1,a2] + +BDS = BlockDiagonalSmoother(biforms,X,Y,solvers) +BDSss = symbolic_setup(BDS,A) +BDSns = numerical_setup(BDSss,A) x = get_free_dof_values(zero(X)) -ldiv!(x,Pns,b) +x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) +norm(x-x_star) end \ No newline at end of file From 62d7549e1999eabbc5f65e21c301e7f4bf1292e1 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 22 Mar 2023 15:52:29 +1100 Subject: [PATCH 03/34] Added tests for BlockSmoothers with PETSc solvers --- test/seq/BlockDiagonalSmoothersPETScTests.jl | 87 ++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 test/seq/BlockDiagonalSmoothersPETScTests.jl diff --git a/test/seq/BlockDiagonalSmoothersPETScTests.jl b/test/seq/BlockDiagonalSmoothersPETScTests.jl new file mode 100644 index 00000000..e60c30c3 --- /dev/null +++ b/test/seq/BlockDiagonalSmoothersPETScTests.jl @@ -0,0 +1,87 @@ +module BlockDiagonalSmoothersPETScTests + +using Gridap +using Gridap.MultiField +using BlockArrays +using LinearAlgebra +using FillArrays +using IterativeSolvers + +using GridapPETSc + +using GridapSolvers + +function set_ksp_options(ksp) + pc = Ref{GridapPETSc.PETSC.PC}() + mumpsmat = Ref{GridapPETSc.PETSC.Mat}() + @check_error_code GridapPETSc.PETSC.KSPView(ksp[],C_NULL) + @check_error_code GridapPETSc.PETSC.KSPSetType(ksp[],GridapPETSc.PETSC.KSPPREONLY) + @check_error_code GridapPETSc.PETSC.KSPGetPC(ksp[],pc) + @check_error_code GridapPETSc.PETSC.PCSetType(pc[],GridapPETSc.PETSC.PCLU) + @check_error_code GridapPETSc.PETSC.PCFactorSetMatSolverType(pc[],GridapPETSc.PETSC.MATSOLVERMUMPS) + @check_error_code GridapPETSc.PETSC.PCFactorSetUpMatSolverType(pc[]) + @check_error_code GridapPETSc.PETSC.PCFactorGetMatrix(pc[],mumpsmat) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 4, 1) + # percentage increase in the estimated working space + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 14, 1000) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2) + @check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6) +end + +u(x) = VectorValue(x[1],x[2]) +f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) + +p(x) = x[1] + x[2] +g(x) = -Δ(p)(x) + +GridapPETSc.with() do + D = 2 + n = 10 + domain = Tuple(repeat([0,1],D)) + partition = (n,n) + model = CartesianDiscreteModel(domain,partition) + + order = 2 + reffeᵤ = ReferenceFE(lagrangian,VectorValue{D,Float64},order) + V = TestFESpace(model,reffeᵤ,conformity=:H1,dirichlet_tags=["boundary"]) + + reffeₚ = ReferenceFE(lagrangian,Float64,order) + Q = TestFESpace(model,reffeₚ,conformity=:H1,dirichlet_tags=["boundary"]) + + U = TrialFESpace(V,u) + P = TrialFESpace(Q,p) + + Y = MultiFieldFESpace([V, Q]) + X = MultiFieldFESpace([U, P]) + + degree = 2*(order + 1) + Ω = Triangulation(model) + dΩ = Measure(Ω,degree) + + + # Global problem + a((u,p),(v,q)) = ∫( v⊙u + ∇(v)⊙∇(u) + q⋅p + ∇(q)⊙∇(p))dΩ + l((v,q)) = ∫( v⋅f + q⋅g)dΩ + + op = AffineFEOperator(a,l,X,Y) + A,b = get_matrix(op), get_vector(op) + xh_star = solve(op) + x_star = get_free_dof_values(xh_star) + + dof_ids = get_free_dof_ids(X) + ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) + solvers = Fill(PETScLinearSolver(set_ksp_options),2) + + # Build using the global matrix + BDS = BlockDiagonalSmoother(A,ranges,solvers) + BDSss = symbolic_setup(BDS,A) + BDSns = numerical_setup(BDSss,A) + + x = get_free_dof_values(zero(X)) + x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) + + println("Error: ",norm(x-x_star)) +end + +end \ No newline at end of file From cc42c58017cc2018e24ec602b221bb768dd13633 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 23 Mar 2023 15:33:52 +1100 Subject: [PATCH 04/34] Added wrappers for IterativeSolvers.jl solvers --- src/GridapSolvers.jl | 4 + src/LinearSolvers/IterativeLinearSolvers.jl | 96 ++++++++++++++++++++ src/LinearSolvers/LinearSolvers.jl | 6 ++ test/seq/BlockDiagonalSmoothersPETScTests.jl | 5 +- test/seq/IterativeSolversTests.jl | 42 +++++++++ 5 files changed, 149 insertions(+), 4 deletions(-) create mode 100644 src/LinearSolvers/IterativeLinearSolvers.jl create mode 100644 test/seq/IterativeSolversTests.jl diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 8e20f0eb..639d6cef 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -29,6 +29,10 @@ module GridapSolvers export GMGLinearSolver export BlockDiagonalSmoother + export ConjugateGradientSolver + export GMRESSolver + export MINRESSolver + # PatchBasedSmoothers export PatchDecomposition export PatchFESpace diff --git a/src/LinearSolvers/IterativeLinearSolvers.jl b/src/LinearSolvers/IterativeLinearSolvers.jl new file mode 100644 index 00000000..ee451b44 --- /dev/null +++ b/src/LinearSolvers/IterativeLinearSolvers.jl @@ -0,0 +1,96 @@ + +abstract type IterativeLinearSolverType end +struct CGIterativeSolverType <: IterativeLinearSolverType end +struct GMRESIterativeSolverType <: IterativeLinearSolverType end +struct MINRESIterativeSolverType <: IterativeLinearSolverType end + +# Solvers + +""" + Wrappers for [IterativeSolvers.jl](https://github.com/JuliaLinearAlgebra/IterativeSolvers.jl) + krylov-like iterative solvers. +""" +struct IterativeLinearSolver{A} <: Gridap.Algebra.LinearSolver + kwargs + + function IterativeLinearSolver(type::IterativeLinearSolverType,kwargs) + A = typeof(type) + return new{A}(kwargs) + end +end + +SolverType(::IterativeLinearSolver{T}) where T = T() + +function ConjugateGradientSolver(;kwargs...) + options = [:statevars,:initially_zero,:Pl,:abstol,:reltol,:maxiter,:verbose,:log] + @check all(map(opt -> opt ∈ options,keys(kwargs))) + return IterativeLinearSolver(CGIterativeSolverType(),kwargs) +end + +function GMRESSolver(;kwargs...) + options = [:initially_zero,:abstol,:reltol,:restart,:maxiter,:Pl,:Pr,:log,:verbose,:orth_meth] + @check all(map(opt -> opt ∈ options,keys(kwargs))) + return IterativeLinearSolver(GMRESIterativeSolverType(),kwargs) +end + +function MINRESSolver(;kwargs...) + options = [:initially_zero,:skew_hermitian,:abstol,:reltol,:maxiter,:log,:verbose] + @check all(map(opt -> opt ∈ options,keys(kwargs))) + return IterativeLinearSolver(MINRESIterativeSolverType(),kwargs) +end + +# Symbolic setup + +struct IterativeLinearSolverSS <: Gridap.Algebra.SymbolicSetup + solver +end + +function Gridap.Algebra.symbolic_setup(solver::IterativeLinearSolver,A::AbstractMatrix) + IterativeLinearSolverSS(solver) +end + +# Numerical setup + +struct IterativeLinearSolverNS <: Gridap.Algebra.NumericalSetup + solver + A +end + +function Gridap.Algebra.numerical_setup(ss::IterativeLinearSolverSS,A::AbstractMatrix) + IterativeLinearSolverNS(ss.solver,A) +end + +function Gridap.Algebra.solve!(x::AbstractVector, + ns::IterativeLinearSolverNS, + y::AbstractVector) + solver_type = SolverType(ns.solver) + solve!(solver_type,x,ns,y) +end + +function(::IterativeLinearSolverType,::AbstractVector,::IterativeLinearSolverNS,::AbstractVector) + @abstractmethod +end + +function Gridap.Algebra.solve!(::CGIterativeSolverType, + x::AbstractVector, + ns::IterativeLinearSolverNS, + y::AbstractVector) + A, kwargs = ns.A, ns.solver.kwargs + return cg!(x,A,y;kwargs...) +end + +function Gridap.Algebra.solve!(::GMRESIterativeSolverType, + x::AbstractVector, + ns::IterativeLinearSolverNS, + y::AbstractVector) + A, kwargs = ns.A, ns.solver.kwargs + return gmres!(x,A,y;kwargs...) +end + +function Gridap.Algebra.solve!(::MINRESIterativeSolverType, + x::AbstractVector, + ns::IterativeLinearSolverNS, + y::AbstractVector) + A, kwargs = ns.A, ns.solver.kwargs + return minres!(x,A,y;kwargs...) +end \ No newline at end of file diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index 47f4aafc..d01ca0d6 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -3,6 +3,7 @@ module LinearSolvers using Printf using LinearAlgebra using BlockArrays +using IterativeSolvers using Gridap using Gridap.Helpers @@ -19,9 +20,14 @@ export RichardsonSmoother export GMGLinearSolver export BlockDiagonalSmoother +export ConjugateGradientSolver +export GMRESSolver +export MINRESSolver + include("JacobiLinearSolvers.jl") include("RichardsonSmoothers.jl") include("GMGLinearSolvers.jl") include("BlockDiagonalSmoothers.jl") +include("IterativeLinearSolvers.jl") end \ No newline at end of file diff --git a/test/seq/BlockDiagonalSmoothersPETScTests.jl b/test/seq/BlockDiagonalSmoothersPETScTests.jl index e60c30c3..44910010 100644 --- a/test/seq/BlockDiagonalSmoothersPETScTests.jl +++ b/test/seq/BlockDiagonalSmoothersPETScTests.jl @@ -59,8 +59,6 @@ GridapPETSc.with() do Ω = Triangulation(model) dΩ = Measure(Ω,degree) - - # Global problem a((u,p),(v,q)) = ∫( v⊙u + ∇(v)⊙∇(u) + q⋅p + ∇(q)⊙∇(p))dΩ l((v,q)) = ∫( v⋅f + q⋅g)dΩ @@ -73,8 +71,7 @@ GridapPETSc.with() do ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) solvers = Fill(PETScLinearSolver(set_ksp_options),2) - # Build using the global matrix - BDS = BlockDiagonalSmoother(A,ranges,solvers) + BDS = BlockDiagonalSmoother(A,ranges,solvers;lazy_mode=true) BDSss = symbolic_setup(BDS,A) BDSns = numerical_setup(BDSss,A) diff --git a/test/seq/IterativeSolversTests.jl b/test/seq/IterativeSolversTests.jl new file mode 100644 index 00000000..35442cf1 --- /dev/null +++ b/test/seq/IterativeSolversTests.jl @@ -0,0 +1,42 @@ +module IterativeSolversTests + +using Test +using Gridap +using IterativeSolvers +using LinearAlgebra + +using GridapSolvers + +A = Matrix(1.0I,3,3) + +# CG +solver = ConjugateGradientSolver(;maxiter=100,reltol=1.e-12) +ss = symbolic_setup(solver,A) +ns = numerical_setup(ss,A) + +x = zeros(3) +y = [1.0,2.0,3.0] +solve!(x,ns,y) +@test x ≈ y + +# GMRES +solver = GMRESSolver(;maxiter=100,reltol=1.e-12) +ss = symbolic_setup(solver,A) +ns = numerical_setup(ss,A) + +x = zeros(3) +y = [1.0,2.0,3.0] +solve!(x,ns,y) +@test x ≈ y + +# MINRES +solver = MINRESSolver(;maxiter=100,reltol=1.e-12) +ss = symbolic_setup(solver,A) +ns = numerical_setup(ss,A) + +x = zeros(3) +y = [1.0,2.0,3.0] +solve!(x,ns,y) +@test x ≈ y + +end \ No newline at end of file From d1812b1a87b604c6f8eee1902282ba38ff90cb1c Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 4 Apr 2023 08:26:41 +1000 Subject: [PATCH 05/34] Added block SSOR smoother --- Project.toml | 1 + src/GridapSolvers.jl | 1 + src/LinearSolvers/GMGLinearSolvers.jl | 10 +- src/LinearSolvers/Helpers.jl | 23 ++++ src/LinearSolvers/IterativeLinearSolvers.jl | 104 +++++++++++++++++-- src/LinearSolvers/LinearSolvers.jl | 3 + src/LinearSolvers/RichardsonSmoothers.jl | 25 ++--- test/seq/DistributedIterativeSolversTests.jl | 62 +++++++++++ test/seq/IterativeSolversTests.jl | 13 ++- 9 files changed, 210 insertions(+), 32 deletions(-) create mode 100644 src/LinearSolvers/Helpers.jl create mode 100644 test/seq/DistributedIterativeSolversTests.jl diff --git a/Project.toml b/Project.toml index 8f28a24f..623b26f5 100644 --- a/Project.toml +++ b/Project.toml @@ -16,6 +16,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [compat] ArgParse = "1" diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 639d6cef..e89bcdad 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -32,6 +32,7 @@ module GridapSolvers export ConjugateGradientSolver export GMRESSolver export MINRESSolver + export SSORSolver # PatchBasedSmoothers export PatchDecomposition diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index f9a20e0f..a2e37686 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -88,7 +88,7 @@ function setup_finest_level_cache(mh::ModelHierarchy,smatrices::Vector{<:Abstrac parts = get_level_parts(mh,1) if i_am_in(parts) Ah = smatrices[1] - rh = PVector(0.0, Ah.cols) + rh = allocate_col_vector(Ah) cache = rh end return cache @@ -156,14 +156,14 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi end function allocate_level_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMatrix},lev::Integer) - dxh = PVector(0.0, smatrices[lev].cols) - Adxh = PVector(0.0, smatrices[lev].rows) + dxh = allocate_col_vector(smatrices[lev]) + Adxh = allocate_row_vector(smatrices[lev]) cparts = get_level_parts(mh,lev+1) if i_am_in(cparts) AH = smatrices[lev+1] - rH = PVector(0.0,AH.cols) - dxH = PVector(0.0,AH.cols) + rH = allocate_col_vector(AH) + dxH = allocate_col_vector(AH) else rH = nothing dxH = nothing diff --git a/src/LinearSolvers/Helpers.jl b/src/LinearSolvers/Helpers.jl new file mode 100644 index 00000000..95089ca9 --- /dev/null +++ b/src/LinearSolvers/Helpers.jl @@ -0,0 +1,23 @@ + +# Row/Col vectors + +function allocate_row_vector(A::AbstractMatrix{T}) where T + return zeros(T,size(A,1)) +end + +function allocate_col_vector(A::AbstractMatrix{T}) where T + return zeros(T,size(A,2)) +end + + +function allocate_row_vector(A::PSparseMatrix) + T = eltype(A) + return PVector(zero(T),A.rows) +end + +function allocate_col_vector(A::PSparseMatrix) + T = eltype(A) + return PVector(zero(T),A.cols) +end + + diff --git a/src/LinearSolvers/IterativeLinearSolvers.jl b/src/LinearSolvers/IterativeLinearSolvers.jl index ee451b44..9b601759 100644 --- a/src/LinearSolvers/IterativeLinearSolvers.jl +++ b/src/LinearSolvers/IterativeLinearSolvers.jl @@ -3,19 +3,26 @@ abstract type IterativeLinearSolverType end struct CGIterativeSolverType <: IterativeLinearSolverType end struct GMRESIterativeSolverType <: IterativeLinearSolverType end struct MINRESIterativeSolverType <: IterativeLinearSolverType end +struct SSORIterativeSolverType <: IterativeLinearSolverType end -# Solvers +# Constructors """ Wrappers for [IterativeSolvers.jl](https://github.com/JuliaLinearAlgebra/IterativeSolvers.jl) - krylov-like iterative solvers. + krylov-like iterative solvers. + + Currently supported: + - ConjugateGradientSolver + - GMRESSolver + - MINRESSolver """ struct IterativeLinearSolver{A} <: Gridap.Algebra.LinearSolver + args kwargs - function IterativeLinearSolver(type::IterativeLinearSolverType,kwargs) + function IterativeLinearSolver(type::IterativeLinearSolverType,args,kwargs) A = typeof(type) - return new{A}(kwargs) + return new{A}(args,kwargs) end end @@ -24,19 +31,26 @@ SolverType(::IterativeLinearSolver{T}) where T = T() function ConjugateGradientSolver(;kwargs...) options = [:statevars,:initially_zero,:Pl,:abstol,:reltol,:maxiter,:verbose,:log] @check all(map(opt -> opt ∈ options,keys(kwargs))) - return IterativeLinearSolver(CGIterativeSolverType(),kwargs) + return IterativeLinearSolver(CGIterativeSolverType(),nothing,kwargs) end function GMRESSolver(;kwargs...) options = [:initially_zero,:abstol,:reltol,:restart,:maxiter,:Pl,:Pr,:log,:verbose,:orth_meth] @check all(map(opt -> opt ∈ options,keys(kwargs))) - return IterativeLinearSolver(GMRESIterativeSolverType(),kwargs) + return IterativeLinearSolver(GMRESIterativeSolverType(),nothing,kwargs) end function MINRESSolver(;kwargs...) options = [:initially_zero,:skew_hermitian,:abstol,:reltol,:maxiter,:log,:verbose] @check all(map(opt -> opt ∈ options,keys(kwargs))) - return IterativeLinearSolver(MINRESIterativeSolverType(),kwargs) + return IterativeLinearSolver(MINRESIterativeSolverType(),nothing,kwargs) +end + +function SSORSolver(ω::Real;kwargs...) + options = [:maxiter] + @check all(map(opt -> opt ∈ options,keys(kwargs))) + args = Dict(:ω => ω) + return IterativeLinearSolver(SSORIterativeSolverType(),args,kwargs) end # Symbolic setup @@ -54,10 +68,49 @@ end struct IterativeLinearSolverNS <: Gridap.Algebra.NumericalSetup solver A + caches end function Gridap.Algebra.numerical_setup(ss::IterativeLinearSolverSS,A::AbstractMatrix) - IterativeLinearSolverNS(ss.solver,A) + solver_type = SolverType(ss.solver) + numerical_setup(solver_type,ss,A) +end + +function Gridap.Algebra.numerical_setup(::IterativeLinearSolverType, + ss::IterativeLinearSolverSS, + A::AbstractMatrix) + IterativeLinearSolverNS(ss.solver,A,nothing) +end + +function Gridap.Algebra.numerical_setup(::SSORIterativeSolverType, + ss::IterativeLinearSolverSS, + A::AbstractMatrix) + x = allocate_row_vector(A) + b = allocate_col_vector(A) + ω = ss.solver.args[:ω] + maxiter = ss.solver.kwargs[:maxiter] + caches = IterativeSolvers.ssor_iterable(x,A,b,ω;maxiter=maxiter) + return IterativeLinearSolverNS(ss.solver,A,caches) +end + +function IterativeSolvers.ssor_iterable(x::PVector, + A::PSparseMatrix, + b::PVector, + ω::Real; + maxiter::Int = 10) + iterables = map_parts(x.owned_values,A.owned_owned_values,b.owned_values) do _xi,_Aii,_bi + xi = Vector(_xi) + Aii = SparseMatrixCSC(_Aii) + bi = Vector(_bi) + return IterativeSolvers.ssor_iterable(xi,Aii,bi,ω;maxiter=maxiter) + end + return iterables +end + +# Solve + +function LinearAlgebra.ldiv!(x::AbstractVector,ns::IterativeLinearSolverNS,b::AbstractVector) + solve!(x,ns,b) end function Gridap.Algebra.solve!(x::AbstractVector, @@ -67,7 +120,10 @@ function Gridap.Algebra.solve!(x::AbstractVector, solve!(solver_type,x,ns,y) end -function(::IterativeLinearSolverType,::AbstractVector,::IterativeLinearSolverNS,::AbstractVector) +function Gridap.Algebra.solve!(::IterativeLinearSolverType, + ::AbstractVector, + ::IterativeLinearSolverNS, + ::AbstractVector) @abstractmethod end @@ -93,4 +149,32 @@ function Gridap.Algebra.solve!(::MINRESIterativeSolverType, y::AbstractVector) A, kwargs = ns.A, ns.solver.kwargs return minres!(x,A,y;kwargs...) -end \ No newline at end of file +end + +function Gridap.Algebra.solve!(::SSORIterativeSolverType, + x::AbstractVector, + ns::IterativeLinearSolverNS, + y::AbstractVector) + iterable = ns.caches + iterable.x = x + iterable.b = y + + for item = iterable end + return x +end + +function Gridap.Algebra.solve!(::SSORIterativeSolverType, + x::PVector, + ns::IterativeLinearSolverNS, + y::PVector) + iterables = ns.caches + map_parts(iterables,x.owned_values,y.owned_values) do iterable, xi, yi + iterable.x .= xi + iterable.b .= yi + for item = iterable end + xi .= iterable.x + yi .= iterable.b + end + #exchange!(x) + return x +end diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index d01ca0d6..552e2b5d 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -2,6 +2,7 @@ module LinearSolvers using Printf using LinearAlgebra +using SparseArrays using BlockArrays using IterativeSolvers @@ -23,7 +24,9 @@ export BlockDiagonalSmoother export ConjugateGradientSolver export GMRESSolver export MINRESSolver +export SSORSolver +include("Helpers.jl") include("JacobiLinearSolvers.jl") include("RichardsonSmoothers.jl") include("GMGLinearSolvers.jl") diff --git a/src/LinearSolvers/RichardsonSmoothers.jl b/src/LinearSolvers/RichardsonSmoothers.jl index c36d086c..498bc626 100644 --- a/src/LinearSolvers/RichardsonSmoothers.jl +++ b/src/LinearSolvers/RichardsonSmoothers.jl @@ -20,28 +20,21 @@ struct RichardsonSmootherSymbolicSetup{A} <: Gridap.Algebra.SymbolicSetup end function Gridap.Algebra.symbolic_setup(smoother::RichardsonSmoother,mat::AbstractMatrix) - Mss=symbolic_setup(smoother.M,mat) + Mss = symbolic_setup(smoother.M,mat) return RichardsonSmootherSymbolicSetup(smoother,Mss) end mutable struct RichardsonSmootherNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup - smoother :: RichardsonSmoother - A :: A - Adx :: B - dx :: C - Mns :: D -end - -function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A::AbstractMatrix{T}) where T - Adx = zeros(size(A,1)) - dx = zeros(size(A,2)) - Mns = numerical_setup(ss.Mss,A) - return RichardsonSmootherNumericalSetup(ss.smoother,A,Adx,dx,Mns) + smoother :: RichardsonSmoother + A :: A + Adx :: B + dx :: C + Mns :: D end -function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A::PSparseMatrix) - Adx = PVector(0.0,A.rows) - dx = PVector(0.0,A.cols) +function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A::AbstractMatrix) + Adx = allocate_row_vector(A) + dx = allocate_col_vector(A) Mns = numerical_setup(ss.Mss,A) return RichardsonSmootherNumericalSetup(ss.smoother,A,Adx,dx,Mns) end diff --git a/test/seq/DistributedIterativeSolversTests.jl b/test/seq/DistributedIterativeSolversTests.jl new file mode 100644 index 00000000..cbbf5652 --- /dev/null +++ b/test/seq/DistributedIterativeSolversTests.jl @@ -0,0 +1,62 @@ +module DistributedIterativeSolversTests + +using Test +using Gridap +using IterativeSolvers +using LinearAlgebra +using SparseArrays + +using PartitionedArrays +using GridapSolvers +using GridapSolvers.LinearSolvers + +function l2_error(uh,vh,dΩ) + eh = uh-vh + return sum(∫(eh⋅eh)dΩ) +end + +sol(x) = sum(x) + +backend = SequentialBackend() +ranks = (1,2) +parts = get_part_ids(backend,ranks) + +model = CartesianDiscreteModel(parts,(0,1,0,1),(4,8)) + +order = 1 +reffe = ReferenceFE(lagrangian,Float64,order) +Vh = TestFESpace(model,reffe;dirichlet_tags="boundary") +Uh = TrialFESpace(Vh,sol) +Ω = Triangulation(model) +dΩ = Measure(Ω,2*order+1) +a(u,v) = ∫(v⋅u)*dΩ +l(v) = ∫(1*v)*dΩ + +op = AffineFEOperator(a,l,Uh,Vh) +sol_h = solve(op) + +A = get_matrix(op) +b = get_vector(op) + +# CG +solver = ConjugateGradientSolver(;maxiter=100,reltol=1.e-12) +ss = symbolic_setup(solver,A) +ns = numerical_setup(ss,A) + +x = LinearSolvers.allocate_col_vector(A) +y = copy(b) +solve!(x,ns,y) +@test l2_error(FEFunction(Uh,x),sol_h,dΩ) < 1.e-10 + +# SSOR +solver = SSORSolver(2.0/3.0;maxiter=100) +ss = symbolic_setup(solver,A) +ns = numerical_setup(ss,A) + +x = LinearSolvers.allocate_col_vector(A) +y = copy(b) +cg!(x,A,y;verbose=true,Pl=ns) +@test l2_error(FEFunction(Uh,x),sol_h,dΩ) < 1.e-10 + + +end \ No newline at end of file diff --git a/test/seq/IterativeSolversTests.jl b/test/seq/IterativeSolversTests.jl index 35442cf1..27eef482 100644 --- a/test/seq/IterativeSolversTests.jl +++ b/test/seq/IterativeSolversTests.jl @@ -4,10 +4,11 @@ using Test using Gridap using IterativeSolvers using LinearAlgebra +using SparseArrays using GridapSolvers -A = Matrix(1.0I,3,3) +A = SparseMatrixCSC(Matrix(1.0I,3,3)) # CG solver = ConjugateGradientSolver(;maxiter=100,reltol=1.e-12) @@ -39,4 +40,14 @@ y = [1.0,2.0,3.0] solve!(x,ns,y) @test x ≈ y +# SSOR +solver = SSORSolver(2.0/3.0;maxiter=100) +ss = symbolic_setup(solver,A) +ns = numerical_setup(ss,A) + +x = zeros(3) +y = [1.0,2.0,3.0] +solve!(x,ns,y) +@test x ≈ y + end \ No newline at end of file From 45b5e54464060bc7993a294533ff91e5af2c81a0 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 14 Apr 2023 12:16:46 +1000 Subject: [PATCH 06/34] Small change for SequantialData compatibility --- src/LinearSolvers/GMGLinearSolvers.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index a2e37686..4ceab313 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -120,7 +120,7 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::LinearS ss = symbolic_setup(coarsest_solver, Ah) numerical_setup(ss, Ah) end - cache = cache.part + cache = get_part(cache) else # Parallel ss = symbolic_setup(coarsest_solver, mat) cache = numerical_setup(ss, mat) @@ -143,7 +143,7 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi ns = numerical_setup(ss, Ah) return ns, xh, rh end - cache = cache.part + cache = get_part(cache) else # Parallel rh = convert(PETScVector,PVector(0.0,mat.cols)) xh = convert(PETScVector,PVector(0.0,mat.cols)) From b3fabc78e909c749f980e321ed48dc01c6260966 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 18 Apr 2023 10:03:17 +1000 Subject: [PATCH 07/34] Added block gauss seidel --- src/GridapSolvers.jl | 1 + src/LinearSolvers/IterativeLinearSolvers.jl | 2 +- src/LinearSolvers/LinearSolvers.jl | 2 + src/LinearSolvers/RichardsonSmoothers.jl | 1 - src/LinearSolvers/SymGaussSeidelSmoothers.jl | 107 +++++++++++++++++++ test/mpi/GMGLinearSolversPoissonTests.jl | 4 +- test/mpi/RichardsonSmoothersTests.jl | 83 +++++++------- test/mpi/SymGaussSeidelSmoothersTests.jl | 63 +++++++++++ test/seq/SymGaussSeidelSmoothersTests.jl | 50 +++++++++ 9 files changed, 267 insertions(+), 46 deletions(-) create mode 100644 src/LinearSolvers/SymGaussSeidelSmoothers.jl create mode 100644 test/mpi/SymGaussSeidelSmoothersTests.jl create mode 100644 test/seq/SymGaussSeidelSmoothersTests.jl diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index e89bcdad..e63f1da0 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -26,6 +26,7 @@ module GridapSolvers # LinearSolvers export JacobiLinearSolver export RichardsonSmoother + export SymGaussSeidelSmoother export GMGLinearSolver export BlockDiagonalSmoother diff --git a/src/LinearSolvers/IterativeLinearSolvers.jl b/src/LinearSolvers/IterativeLinearSolvers.jl index 9b601759..491d1b60 100644 --- a/src/LinearSolvers/IterativeLinearSolvers.jl +++ b/src/LinearSolvers/IterativeLinearSolvers.jl @@ -175,6 +175,6 @@ function Gridap.Algebra.solve!(::SSORIterativeSolverType, xi .= iterable.x yi .= iterable.b end - #exchange!(x) + exchange!(x) return x end diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index 552e2b5d..60c87002 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -18,6 +18,7 @@ import LinearAlgebra: mul!, ldiv! export JacobiLinearSolver export RichardsonSmoother +export SymGaussSeidelSmoother export GMGLinearSolver export BlockDiagonalSmoother @@ -29,6 +30,7 @@ export SSORSolver include("Helpers.jl") include("JacobiLinearSolvers.jl") include("RichardsonSmoothers.jl") +include("SymGaussSeidelSmoothers.jl") include("GMGLinearSolvers.jl") include("BlockDiagonalSmoothers.jl") include("IterativeLinearSolvers.jl") diff --git a/src/LinearSolvers/RichardsonSmoothers.jl b/src/LinearSolvers/RichardsonSmoothers.jl index 498bc626..86cfb176 100644 --- a/src/LinearSolvers/RichardsonSmoothers.jl +++ b/src/LinearSolvers/RichardsonSmoothers.jl @@ -63,4 +63,3 @@ function LinearAlgebra.ldiv!(x::AbstractVector,ns::RichardsonSmootherNumericalSe solve!(x,ns,aux) return x end - diff --git a/src/LinearSolvers/SymGaussSeidelSmoothers.jl b/src/LinearSolvers/SymGaussSeidelSmoothers.jl new file mode 100644 index 00000000..008d5a27 --- /dev/null +++ b/src/LinearSolvers/SymGaussSeidelSmoothers.jl @@ -0,0 +1,107 @@ + +struct SymGaussSeidelSmoother <: Gridap.Algebra.LinearSolver + num_iters::Int +end + +struct SymGaussSeidelSymbolicSetup <: Gridap.Algebra.SymbolicSetup + solver :: SymGaussSeidelSmoother +end + +function Gridap.Algebra.symbolic_setup(s::SymGaussSeidelSmoother,A::AbstractMatrix) + SymGaussSeidelSymbolicSetup(s) +end + +# Numerical setup + +struct SymGaussSeidelNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup + solver :: SymGaussSeidelSmoother + mat :: A + L :: B + U :: C + caches :: D +end + +function _gs_get_caches(A::AbstractMatrix) + dx = allocate_col_vector(A) + Adx = allocate_row_vector(A) + return dx, Adx +end + +function _gs_decompose_matrix(A::AbstractMatrix) + D = IterativeSolvers.DiagonalIndices(A) + L = IterativeSolvers.FastLowerTriangular(A, D) + U = IterativeSolvers.FastUpperTriangular(A, D) + return L,U +end + +function Gridap.Algebra.numerical_setup(ss::SymGaussSeidelSymbolicSetup,A::AbstractMatrix) + L, U = _gs_decompose_matrix(A) + caches = _gs_get_caches(A) + return SymGaussSeidelNumericalSetup(ss.solver,A,L,U,caches) +end + +function Gridap.Algebra.numerical_setup(ss::SymGaussSeidelSymbolicSetup,A::PSparseMatrix) + L,U = map_parts(A.owned_owned_values) do A + # TODO: Unfortunately, we need to convert to CSC because the type is hardcoded in IterativeSolvers + _gs_decompose_matrix(SparseMatrixCSC(A)) + end + caches = _gs_get_caches(A) + return SymGaussSeidelNumericalSetup(ss.solver,A,L,U,caches) +end + +# Forward/backward substitution + +function forward_sub!(L,dx::AbstractArray) + IterativeSolvers.forward_sub!(L, dx) +end + +function forward_sub!(L,dx::PVector) + map_parts(L,dx.owned_values) do L, dx + IterativeSolvers.forward_sub!(L, dx) + end +end + +function backward_sub!(U,dx::AbstractArray) + IterativeSolvers.backward_sub!(U, dx) +end + +function backward_sub!(U,dx::PVector) + map_parts(U,dx.owned_values) do U, dx + IterativeSolvers.backward_sub!(U, dx) + end +end + +# Solve + +function Gridap.Algebra.solve!(x::AbstractVector, ns::SymGaussSeidelNumericalSetup, r::AbstractVector) + A, L, U, caches = ns.mat, ns.L, ns.U, ns.caches + dx, Adx = caches + niter = ns.solver.num_iters + + iter = 1 + while iter <= niter + # Forward pass + copy!(dx,r) + forward_sub!(L, dx) + x .= x .+ dx + mul!(Adx, A, dx) + r .= r .- Adx + + # Backward pass + copy!(dx,r) + backward_sub!(U, dx) + x .= x .+ dx + mul!(Adx, A, dx) + r .= r .- Adx + + iter += 1 + end + + return x +end + +function LinearAlgebra.ldiv!(x::AbstractVector, ns::SymGaussSeidelNumericalSetup, b::AbstractVector) + fill!(x,0.0) + aux = copy(b) + solve!(x,ns,aux) +end diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index 72dfc714..57467c92 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -37,7 +37,9 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) # Preconditioner - smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + #smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,9.0/8.0),num_levels-1) + smoothers = Fill(RichardsonSmoother(SSORSolver(1.0;maxiter=1),10,2.0/3.0),num_levels-1) + #smoothers = Fill(SSORSolver(1.0;maxiter=1),num_levels-1) restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) gmg = GMGLinearSolver(mh, diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl index ca0d2835..f4775776 100644 --- a/test/mpi/RichardsonSmoothersTests.jl +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -5,56 +5,53 @@ using MPI using Gridap using GridapDistributed using PartitionedArrays -using GridapP4est using IterativeSolvers using GridapSolvers using GridapSolvers.LinearSolvers function main(parts,partition) - GridapP4est.with(parts) do - domain = (0,1,0,1) - model = CartesianDiscreteModel(parts,domain,partition) - - sol(x) = x[1] + x[2] - f(x) = -Δ(sol)(x) - - order = 1 - qorder = order*2 + 1 - reffe = ReferenceFE(lagrangian,Float64,order) - Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") - Uh = TrialFESpace(Vh,sol) - u = interpolate(sol,Uh) - - Ω = Triangulation(model) - dΩ = Measure(Ω,qorder) - a(u,v) = ∫(∇(v)⋅∇(u))*dΩ - l(v) = ∫(v⋅f)*dΩ - - op = AffineFEOperator(a,l,Uh,Vh) - A, b = get_matrix(op), get_vector(op) - - P = RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0) - ss = symbolic_setup(P,A) - ns = numerical_setup(ss,A) - - x = PVector(1.0,A.cols) - x, history = IterativeSolvers.cg!(x,A,b; - verbose=i_am_main(parts), - reltol=1.0e-8, - Pl=ns, - log=true) - - u = interpolate(sol,Uh) - uh = FEFunction(Uh,x) - eh = uh - u - E = sum(∫(eh*eh)*dΩ) - if i_am_main(parts) - println("L2 Error: ", E) - end - - @test E < 1.e-8 + domain = (0,1,0,1) + model = CartesianDiscreteModel(parts,domain,partition) + + sol(x) = x[1] + x[2] + f(x) = -Δ(sol)(x) + + order = 1 + qorder = order*2 + 1 + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(Vh,sol) + u = interpolate(sol,Uh) + + Ω = Triangulation(model) + dΩ = Measure(Ω,qorder) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(v⋅f)*dΩ + + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) + + P = RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0) + ss = symbolic_setup(P,A) + ns = numerical_setup(ss,A) + + x = PVector(1.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-8, + Pl=ns, + log=true) + + u = interpolate(sol,Uh) + uh = FEFunction(Uh,x) + eh = uh - u + E = sum(∫(eh*eh)*dΩ) + if i_am_main(parts) + println("L2 Error: ", E) end + + @test E < 1.e-8 end partition = (32,32) diff --git a/test/mpi/SymGaussSeidelSmoothersTests.jl b/test/mpi/SymGaussSeidelSmoothersTests.jl new file mode 100644 index 00000000..2edc236d --- /dev/null +++ b/test/mpi/SymGaussSeidelSmoothersTests.jl @@ -0,0 +1,63 @@ +module RichardsonSmoothersTests + +using Test +using MPI +using Gridap +using GridapDistributed +using PartitionedArrays +using IterativeSolvers + +using GridapSolvers +using GridapSolvers.LinearSolvers + +function main(parts,partition) + domain = (0,1,0,1) + model = CartesianDiscreteModel(parts,domain,partition) + + sol(x) = x[1] + x[2] + f(x) = -Δ(sol)(x) + + order = 1 + qorder = order*2 + 1 + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(Vh,sol) + u = interpolate(sol,Uh) + + Ω = Triangulation(model) + dΩ = Measure(Ω,qorder) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(v⋅f)*dΩ + + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) + + P = SymGaussSeidelSmoother(10) + ss = symbolic_setup(P,A) + ns = numerical_setup(ss,A) + + x = PVector(1.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-8, + Pl=ns, + log=true) + + u = interpolate(sol,Uh) + uh = FEFunction(Uh,x) + eh = uh - u + E = sum(∫(eh*eh)*dΩ) + if i_am_main(parts) + println("L2 Error: ", E) + end + + @test E < 1.e-8 +end + +partition = (32,32) +ranks = (2,2) + +with_backend(main,MPIBackend(),ranks,partition) +MPI.Finalize() + +end \ No newline at end of file diff --git a/test/seq/SymGaussSeidelSmoothersTests.jl b/test/seq/SymGaussSeidelSmoothersTests.jl new file mode 100644 index 00000000..8175893f --- /dev/null +++ b/test/seq/SymGaussSeidelSmoothersTests.jl @@ -0,0 +1,50 @@ +using Test +using MPI +using Gridap +using GridapDistributed +using PartitionedArrays +using IterativeSolvers + +using GridapSolvers +using GridapSolvers.LinearSolvers + +partition = (8,8) +domain = (0,1,0,1) +model = CartesianDiscreteModel(domain,partition) + +sol(x) = x[1] + x[2] +f(x) = -Δ(sol)(x) + +order = 1 +qorder = order*2 + 1 +reffe = ReferenceFE(lagrangian,Float64,order) +Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") +Uh = TrialFESpace(Vh,sol) +u = interpolate(sol,Uh) + +Ω = Triangulation(model) +dΩ = Measure(Ω,qorder) +a(u,v) = ∫(∇(v)⋅∇(u))*dΩ +l(v) = ∫(v⋅f)*dΩ + +op = AffineFEOperator(a,l,Uh,Vh) +A, b = get_matrix(op), get_vector(op) + +P = SymGaussSeidelSmoother(10) +ss = symbolic_setup(P,A) +ns = numerical_setup(ss,A) + +x = LinearSolvers.allocate_row_vector(A) +x, history = IterativeSolvers.cg!(x,A,b; + verbose=true, + reltol=1.0e-8, + Pl=ns, + log=true) + +u = interpolate(sol,Uh) +uh = FEFunction(Uh,x) +eh = uh - u +E = sum(∫(eh*eh)*dΩ) +println("L2 Error: ", E) + +@test E < 1.e-8 \ No newline at end of file From 792988ece6220f924e8004a6e86c160ab1e239ee Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 24 Apr 2023 17:59:09 +1000 Subject: [PATCH 08/34] Implemented own version of SymGaussSeidel --- Project.toml | 1 + src/LinearSolvers/LinearSolvers.jl | 1 + src/LinearSolvers/SymGaussSeidelSmoothers.jl | 150 ++++++++++++++----- test/mpi/GMGLinearSolversPoissonTests.jl | 3 +- test/seq/SymGaussSeidelSmoothersTests.jl | 84 ++++++----- 5 files changed, 168 insertions(+), 71 deletions(-) diff --git a/Project.toml b/Project.toml index 623b26f5..a336d072 100644 --- a/Project.toml +++ b/Project.toml @@ -17,6 +17,7 @@ MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1" [compat] ArgParse = "1" diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index 60c87002..3e8a9ef9 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -3,6 +3,7 @@ module LinearSolvers using Printf using LinearAlgebra using SparseArrays +using SparseMatricesCSR using BlockArrays using IterativeSolvers diff --git a/src/LinearSolvers/SymGaussSeidelSmoothers.jl b/src/LinearSolvers/SymGaussSeidelSmoothers.jl index 008d5a27..87194d1c 100644 --- a/src/LinearSolvers/SymGaussSeidelSmoothers.jl +++ b/src/LinearSolvers/SymGaussSeidelSmoothers.jl @@ -1,4 +1,102 @@ +# Extensions of IterativeSolvers.jl to support parallel matrices +struct DiagonalIndices{Tv,Ti,A,B} + mat :: A + diag :: B + last :: B + function DiagonalIndices(mat ::AbstractSparseMatrix{Tv,Ti}, + diag::AbstractVector{Ti}, + last::AbstractVector{Ti}) where {Tv,Ti} + A = typeof(mat) + B = typeof(diag) + @check typeof(last) == B + new{Tv,Ti,A,B}(mat,diag,last) + end +end + +function DiagonalIndices(A::SparseMatrixCSR{Tv,Ti},row_range) where {Tv,Ti} + @notimplemented +end + +function DiagonalIndices(A::SparseMatrixCSC{Tv,Ti},col_range) where {Tv,Ti} + n = length(col_range) + diag = Vector{Ti}(undef, n) + last = Vector{Ti}(undef, n) + + for col in col_range + # Diagonal index + r1 = Int(A.colptr[col]) + r2 = Int(A.colptr[col + 1] - 1) + r1 = searchsortedfirst(A.rowval, col, r1, r2, Base.Order.Forward) + if r1 > r2 || A.rowval[r1] != col || iszero(A.nzval[r1]) + throw(LinearAlgebra.SingularException(col)) + end + diag[col] = r1 + + # Last owned index + r1 = Int(A.colptr[col]) + r2 = Int(A.colptr[col + 1] - 1) + r1 = searchsortedfirst(A.rowval, n+1, r1, r2, Base.Order.Forward) - 1 + last[col] = r1 + end + return DiagonalIndices(A,diag,last) +end + +struct LowerTriangular{Tv,Ti,A,B} + mat :: A + diag :: DiagonalIndices{Tv,Ti,A,B} +end + +struct UpperTriangular{Tv,Ti,A,B} + mat :: A + diag :: DiagonalIndices{Tv,Ti,A,B} +end + +function forward_sub!(L::LowerTriangular{Tv,Ti,<:SparseMatrixCSC},x::AbstractVector) where {Tv,Ti} + A, diag, last = L.mat, L.diag.diag, L.diag.last + n = length(diag) + for col = 1 : n + # Solve for diagonal element + idx = diag[col] + x[col] /= A.nzval[idx] + + # Substitute next values involving x[col] + for i = idx + 1 : last[col] + x[A.rowval[i]] -= A.nzval[i] * x[col] + end + end + return x +end + +function forward_sub!(L::AbstractPData{<:LowerTriangular},x::PVector) + map_parts(L,x.owned_values) do L, x + forward_sub!(L, x) + end +end + +function backward_sub!(U::UpperTriangular{Tv,Ti,<:SparseMatrixCSC}, x::AbstractVector) where {Tv,Ti} + A, diag = U.mat, U.diag.diag + n = length(diag) + for col = n : -1 : 1 + # Solve for diagonal element + idx = diag[col] + x[col] = x[col] / A.nzval[idx] + + # Substitute next values involving x[col] + for i = A.colptr[col] : idx - 1 + x[A.rowval[i]] -= A.nzval[i] * x[col] + end + end + return x +end + +function backward_sub!(U::AbstractPData{<:UpperTriangular},x::PVector) + map_parts(U,x.owned_values) do U, x + backward_sub!(U, x) + end +end + +# Smoother struct SymGaussSeidelSmoother <: Gridap.Algebra.LinearSolver num_iters::Int end @@ -27,50 +125,34 @@ function _gs_get_caches(A::AbstractMatrix) return dx, Adx end +_get_partition(A::PSparseMatrix,::Type{<:SparseMatrixCSC}) = A.cols.partition +_get_partition(A::PSparseMatrix,::Type{<:SparseMatrixCSR}) = A.rows.partition + function _gs_decompose_matrix(A::AbstractMatrix) - D = IterativeSolvers.DiagonalIndices(A) - L = IterativeSolvers.FastLowerTriangular(A, D) - U = IterativeSolvers.FastUpperTriangular(A, D) + idx_range = 1:minimum(size(A)) + D = DiagonalIndices(A,idx_range) + L = LowerTriangular(A, D) + U = UpperTriangular(A, D) return L,U end -function Gridap.Algebra.numerical_setup(ss::SymGaussSeidelSymbolicSetup,A::AbstractMatrix) - L, U = _gs_decompose_matrix(A) - caches = _gs_get_caches(A) - return SymGaussSeidelNumericalSetup(ss.solver,A,L,U,caches) +function _gs_decompose_matrix(A::PSparseMatrix{T,<:AbstractPData{MatType}}) where {T, MatType} + partition = _get_partition(A,MatType) + L,U = map_parts(A.values,partition) do A, partition + D = DiagonalIndices(A,partition.oid_to_lid) + L = LowerTriangular(A,D) + U = UpperTriangular(A,D) + return L,U + end + return L,U end -function Gridap.Algebra.numerical_setup(ss::SymGaussSeidelSymbolicSetup,A::PSparseMatrix) - L,U = map_parts(A.owned_owned_values) do A - # TODO: Unfortunately, we need to convert to CSC because the type is hardcoded in IterativeSolvers - _gs_decompose_matrix(SparseMatrixCSC(A)) - end +function Gridap.Algebra.numerical_setup(ss::SymGaussSeidelSymbolicSetup,A::AbstractMatrix) + L, U = _gs_decompose_matrix(A) caches = _gs_get_caches(A) return SymGaussSeidelNumericalSetup(ss.solver,A,L,U,caches) end -# Forward/backward substitution - -function forward_sub!(L,dx::AbstractArray) - IterativeSolvers.forward_sub!(L, dx) -end - -function forward_sub!(L,dx::PVector) - map_parts(L,dx.owned_values) do L, dx - IterativeSolvers.forward_sub!(L, dx) - end -end - -function backward_sub!(U,dx::AbstractArray) - IterativeSolvers.backward_sub!(U, dx) -end - -function backward_sub!(U,dx::PVector) - map_parts(U,dx.owned_values) do U, dx - IterativeSolvers.backward_sub!(U, dx) - end -end - # Solve function Gridap.Algebra.solve!(x::AbstractVector, ns::SymGaussSeidelNumericalSetup, r::AbstractVector) diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index 57467c92..ef69f48d 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -38,8 +38,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, # Preconditioner #smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,9.0/8.0),num_levels-1) - smoothers = Fill(RichardsonSmoother(SSORSolver(1.0;maxiter=1),10,2.0/3.0),num_levels-1) - #smoothers = Fill(SSORSolver(1.0;maxiter=1),num_levels-1) + smoothers = Fill(SymGaussSeidelSmoother(5),num_levels-1) restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) gmg = GMGLinearSolver(mh, diff --git a/test/seq/SymGaussSeidelSmoothersTests.jl b/test/seq/SymGaussSeidelSmoothersTests.jl index 8175893f..e63aa740 100644 --- a/test/seq/SymGaussSeidelSmoothersTests.jl +++ b/test/seq/SymGaussSeidelSmoothersTests.jl @@ -1,3 +1,5 @@ +module SymGaussSeidelSmoothersTests + using Test using MPI using Gridap @@ -8,43 +10,55 @@ using IterativeSolvers using GridapSolvers using GridapSolvers.LinearSolvers +sol(x) = x[1] + x[2] +f(x) = -Δ(sol)(x) + +function main(model) + order = 1 + qorder = order*2 + 1 + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(Vh,sol) + u = interpolate(sol,Uh) + + Ω = Triangulation(model) + dΩ = Measure(Ω,qorder) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(v⋅f)*dΩ + + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op); + + P = SymGaussSeidelSmoother(10) + ss = symbolic_setup(P,A) + ns = numerical_setup(ss,A) + + x = LinearSolvers.allocate_col_vector(A) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=true, + reltol=1.0e-8, + Pl=ns, + log=true); + + u = interpolate(sol,Uh) + uh = FEFunction(Uh,x) + eh = uh - u + E = sum(∫(eh*eh)*dΩ) + return E < 1.e-8 +end + +# Completely serial partition = (8,8) domain = (0,1,0,1) model = CartesianDiscreteModel(domain,partition) +@test main(model) -sol(x) = x[1] + x[2] -f(x) = -Δ(sol)(x) +# Sequential +backend = SequentialBackend() +ranks = (1,2) +parts = get_part_ids(backend,ranks) + +model = CartesianDiscreteModel(parts,domain,partition) +@test main(model) -order = 1 -qorder = order*2 + 1 -reffe = ReferenceFE(lagrangian,Float64,order) -Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") -Uh = TrialFESpace(Vh,sol) -u = interpolate(sol,Uh) - -Ω = Triangulation(model) -dΩ = Measure(Ω,qorder) -a(u,v) = ∫(∇(v)⋅∇(u))*dΩ -l(v) = ∫(v⋅f)*dΩ - -op = AffineFEOperator(a,l,Uh,Vh) -A, b = get_matrix(op), get_vector(op) - -P = SymGaussSeidelSmoother(10) -ss = symbolic_setup(P,A) -ns = numerical_setup(ss,A) - -x = LinearSolvers.allocate_row_vector(A) -x, history = IterativeSolvers.cg!(x,A,b; - verbose=true, - reltol=1.0e-8, - Pl=ns, - log=true) - -u = interpolate(sol,Uh) -uh = FEFunction(Uh,x) -eh = uh - u -E = sum(∫(eh*eh)*dΩ) -println("L2 Error: ", E) - -@test E < 1.e-8 \ No newline at end of file +end \ No newline at end of file From 9a02adbb2f2b2dc0baa93d10db6b6ff7b8e8b971 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 25 Apr 2023 18:49:09 +1000 Subject: [PATCH 09/34] Implemented GMRES --- src/GridapSolvers.jl | 6 +- src/LinearSolvers/GMRESSolvers.jl | 105 ++++++++++++++++ src/LinearSolvers/IterativeLinearSolvers.jl | 8 +- src/LinearSolvers/LinearSolvers.jl | 10 +- test/seq/DistributedIterativeSolversTests.jl | 62 --------- test/seq/GMRESSolversTests.jl | 56 +++++++++ test/seq/IterativeSolversTests.jl | 125 +++++++++++++------ 7 files changed, 260 insertions(+), 112 deletions(-) create mode 100644 src/LinearSolvers/GMRESSolvers.jl delete mode 100644 test/seq/DistributedIterativeSolversTests.jl create mode 100644 test/seq/GMRESSolversTests.jl diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index e63f1da0..87294562 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -31,9 +31,11 @@ module GridapSolvers export BlockDiagonalSmoother export ConjugateGradientSolver + export IS_GMRESSolver + export IS_MINRESSolver + export IS_SSORSolver + export GMRESSolver - export MINRESSolver - export SSORSolver # PatchBasedSmoothers export PatchDecomposition diff --git a/src/LinearSolvers/GMRESSolvers.jl b/src/LinearSolvers/GMRESSolvers.jl new file mode 100644 index 00000000..5c0d63ba --- /dev/null +++ b/src/LinearSolvers/GMRESSolvers.jl @@ -0,0 +1,105 @@ + +# Orthogonalization + + + + +# GMRES Solver +struct GMRESSolver <: Gridap.Algebra.LinearSolver + m ::Int + Pl + tol::Float64 +end + +struct GMRESSymbolicSetup <: Gridap.Algebra.SymbolicSetup + solver +end + +function Gridap.Algebra.symbolic_setup(solver::GMRESSolver, A::AbstractMatrix) + return GMRESSymbolicSetup(solver) +end + +struct GMRESNumericalSetup <: Gridap.Algebra.NumericalSetup + solver + A + Pl_ns + caches +end + +function get_gmres_caches(m,A) + w = allocate_col_vector(A) + V = [allocate_col_vector(A) for i in 1:m+1] + Z = [allocate_col_vector(A) for i in 1:m] + + H = zeros(m+1,m) # Hessenberg matrix + g = zeros(m+1) # Residual vector + c = zeros(m) # Gibens rotation cosines + s = zeros(m) # Gibens rotation sines + return (w,V,Z,H,g,c,s) +end + +function Gridap.Algebra.numerical_setup(ss::GMRESSymbolicSetup, A::AbstractMatrix) + solver = ss.solver + Pl_ns = numerical_setup(symbolic_setup(solver.Pl,A),A) + caches = get_gmres_caches(solver.m,A) + return GMRESNumericalSetup(solver,A,Pl_ns,caches) +end + +function Gridap.Algebra.solve!(x::AbstractVector,ns::GMRESNumericalSetup,b::AbstractVector) + solver, A, Pl, caches = ns.solver, ns.A, ns.Pl_ns, ns.caches + m, tol = solver.m, solver.tol + w, V, Z, H, g, c, s = caches + + # Initial residual + mul!(w,A,x); w .= b .- w + + β = norm(w) + iter = 0; println("Iteration ", iter, " - Residual: ", β) + while (β > tol) + fill!(H,0.0) + + # Arnoldi process + fill!(g,0.0); g[1] = β + V[1] .= w ./ β + for j in 1:m + # Arnoldi orthogonalization by Modified Gram-Schmidt + solve!(Z[j],Pl,V[j]) + mul!(w,A,Z[j]) + for i in 1:j + H[i,j] = dot(w,V[i]) + w .= w .- H[i,j] .* V[i] + end + H[j+1,j] = norm(w) + V[j+1] = w ./ H[j+1,j] + + # Update QR + for i in 1:j-1 + γ = c[i]*H[i,j] + s[i]*H[i+1,j] + H[i+1,j] = -s[i]*H[i,j] + c[i]*H[i+1,j] + H[i,j] = γ + end + + # New Givens rotation, update QR and residual + c[j], s[j], _ = LinearAlgebra.givensAlgorithm(H[j,j],H[j+1,j]) + H[j,j] = c[j]*H[j,j] + s[j]*H[j+1,j]; H[j+1,j] = 0.0 + g[j+1] = -s[j]*g[j]; g[j] = c[j]*g[j] + + β = abs(g[j+1]) + end + + # Solve least squares problem Hy = g by backward substitution + for i in m:-1:1 + g[i] = (g[i] - dot(H[i,i+1:m],g[i+1:m])) / H[i,i] + end + + # Update solution & residual + for i in 1:m + x .+= g[i] .* Z[i] + end + mul!(w,A,x); w .= b .- w + + iter += 1; println("Iteration ", iter, " - Residual: ", β) + end + + return x +end diff --git a/src/LinearSolvers/IterativeLinearSolvers.jl b/src/LinearSolvers/IterativeLinearSolvers.jl index 491d1b60..552dfa9f 100644 --- a/src/LinearSolvers/IterativeLinearSolvers.jl +++ b/src/LinearSolvers/IterativeLinearSolvers.jl @@ -28,25 +28,25 @@ end SolverType(::IterativeLinearSolver{T}) where T = T() -function ConjugateGradientSolver(;kwargs...) +function IS_ConjugateGradientSolver(;kwargs...) options = [:statevars,:initially_zero,:Pl,:abstol,:reltol,:maxiter,:verbose,:log] @check all(map(opt -> opt ∈ options,keys(kwargs))) return IterativeLinearSolver(CGIterativeSolverType(),nothing,kwargs) end -function GMRESSolver(;kwargs...) +function IS_GMRESSolver(;kwargs...) options = [:initially_zero,:abstol,:reltol,:restart,:maxiter,:Pl,:Pr,:log,:verbose,:orth_meth] @check all(map(opt -> opt ∈ options,keys(kwargs))) return IterativeLinearSolver(GMRESIterativeSolverType(),nothing,kwargs) end -function MINRESSolver(;kwargs...) +function IS_MINRESSolver(;kwargs...) options = [:initially_zero,:skew_hermitian,:abstol,:reltol,:maxiter,:log,:verbose] @check all(map(opt -> opt ∈ options,keys(kwargs))) return IterativeLinearSolver(MINRESIterativeSolverType(),nothing,kwargs) end -function SSORSolver(ω::Real;kwargs...) +function IS_SSORSolver(ω::Real;kwargs...) options = [:maxiter] @check all(map(opt -> opt ∈ options,keys(kwargs))) args = Dict(:ω => ω) diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index 3e8a9ef9..42968313 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -23,10 +23,13 @@ export SymGaussSeidelSmoother export GMGLinearSolver export BlockDiagonalSmoother -export ConjugateGradientSolver +# Wrappers for IterativeSolvers.jl +export IS_ConjugateGradientSolver +export IS_GMRESSolver +export IS_MINRESSolver +export IS_SSORSolver + export GMRESSolver -export MINRESSolver -export SSORSolver include("Helpers.jl") include("JacobiLinearSolvers.jl") @@ -35,5 +38,6 @@ include("SymGaussSeidelSmoothers.jl") include("GMGLinearSolvers.jl") include("BlockDiagonalSmoothers.jl") include("IterativeLinearSolvers.jl") +include("GMRESSolvers.jl") end \ No newline at end of file diff --git a/test/seq/DistributedIterativeSolversTests.jl b/test/seq/DistributedIterativeSolversTests.jl deleted file mode 100644 index cbbf5652..00000000 --- a/test/seq/DistributedIterativeSolversTests.jl +++ /dev/null @@ -1,62 +0,0 @@ -module DistributedIterativeSolversTests - -using Test -using Gridap -using IterativeSolvers -using LinearAlgebra -using SparseArrays - -using PartitionedArrays -using GridapSolvers -using GridapSolvers.LinearSolvers - -function l2_error(uh,vh,dΩ) - eh = uh-vh - return sum(∫(eh⋅eh)dΩ) -end - -sol(x) = sum(x) - -backend = SequentialBackend() -ranks = (1,2) -parts = get_part_ids(backend,ranks) - -model = CartesianDiscreteModel(parts,(0,1,0,1),(4,8)) - -order = 1 -reffe = ReferenceFE(lagrangian,Float64,order) -Vh = TestFESpace(model,reffe;dirichlet_tags="boundary") -Uh = TrialFESpace(Vh,sol) -Ω = Triangulation(model) -dΩ = Measure(Ω,2*order+1) -a(u,v) = ∫(v⋅u)*dΩ -l(v) = ∫(1*v)*dΩ - -op = AffineFEOperator(a,l,Uh,Vh) -sol_h = solve(op) - -A = get_matrix(op) -b = get_vector(op) - -# CG -solver = ConjugateGradientSolver(;maxiter=100,reltol=1.e-12) -ss = symbolic_setup(solver,A) -ns = numerical_setup(ss,A) - -x = LinearSolvers.allocate_col_vector(A) -y = copy(b) -solve!(x,ns,y) -@test l2_error(FEFunction(Uh,x),sol_h,dΩ) < 1.e-10 - -# SSOR -solver = SSORSolver(2.0/3.0;maxiter=100) -ss = symbolic_setup(solver,A) -ns = numerical_setup(ss,A) - -x = LinearSolvers.allocate_col_vector(A) -y = copy(b) -cg!(x,A,y;verbose=true,Pl=ns) -@test l2_error(FEFunction(Uh,x),sol_h,dΩ) < 1.e-10 - - -end \ No newline at end of file diff --git a/test/seq/GMRESSolversTests.jl b/test/seq/GMRESSolversTests.jl new file mode 100644 index 00000000..182c5a66 --- /dev/null +++ b/test/seq/GMRESSolversTests.jl @@ -0,0 +1,56 @@ + +using Test +using Gridap +using GridapDistributed +using PartitionedArrays +using IterativeSolvers + +using GridapSolvers +using GridapSolvers.LinearSolvers + +sol(x) = x[1] + x[2] +f(x) = -Δ(sol)(x) + +function main(model) + order = 1 + qorder = order*2 + 1 + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(Vh,sol) + u = interpolate(sol,Uh) + + Ω = Triangulation(model) + dΩ = Measure(Ω,qorder) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(v⋅f)*dΩ + + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op); + + Pl = JacobiLinearSolver() + solver = LinearSolvers.GMRESSolver(20,Pl,1.e-8) + ns = numerical_setup(symbolic_setup(solver,A),A) + + x = LinearSolvers.allocate_col_vector(A) + solve!(x,ns,b) + + u = interpolate(sol,Uh) + uh = FEFunction(Uh,x) + eh = uh - u + E = sum(∫(eh*eh)*dΩ) + return E < 1.e-8 +end + +# Completely serial +partition = (10,10) +domain = (0,1,0,1) +model = CartesianDiscreteModel(domain,partition) +@test main(model) + +# Sequential +backend = SequentialBackend() +ranks = (1,2) +parts = get_part_ids(backend,ranks) + +model = CartesianDiscreteModel(parts,domain,partition) +@test main(model) diff --git a/test/seq/IterativeSolversTests.jl b/test/seq/IterativeSolversTests.jl index 27eef482..46d94875 100644 --- a/test/seq/IterativeSolversTests.jl +++ b/test/seq/IterativeSolversTests.jl @@ -5,49 +5,92 @@ using Gridap using IterativeSolvers using LinearAlgebra using SparseArrays +using PartitionedArrays using GridapSolvers +using GridapSolvers.LinearSolvers -A = SparseMatrixCSC(Matrix(1.0I,3,3)) - -# CG -solver = ConjugateGradientSolver(;maxiter=100,reltol=1.e-12) -ss = symbolic_setup(solver,A) -ns = numerical_setup(ss,A) - -x = zeros(3) -y = [1.0,2.0,3.0] -solve!(x,ns,y) -@test x ≈ y - -# GMRES -solver = GMRESSolver(;maxiter=100,reltol=1.e-12) -ss = symbolic_setup(solver,A) -ns = numerical_setup(ss,A) - -x = zeros(3) -y = [1.0,2.0,3.0] -solve!(x,ns,y) -@test x ≈ y - -# MINRES -solver = MINRESSolver(;maxiter=100,reltol=1.e-12) -ss = symbolic_setup(solver,A) -ns = numerical_setup(ss,A) - -x = zeros(3) -y = [1.0,2.0,3.0] -solve!(x,ns,y) -@test x ≈ y - -# SSOR -solver = SSORSolver(2.0/3.0;maxiter=100) -ss = symbolic_setup(solver,A) -ns = numerical_setup(ss,A) - -x = zeros(3) -y = [1.0,2.0,3.0] -solve!(x,ns,y) -@test x ≈ y +sol(x) = x[1] + x[2] +f(x) = -Δ(sol)(x) + +function l2_error(x,Uh,dΩ) + u = interpolate(sol,Uh) + uh = FEFunction(Uh,x) + eh = uh - u + return sum(∫(eh*eh)*dΩ) +end + +function main(model,is_distributed) + order = 1 + qorder = order*2 + 1 + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(Vh,sol) + u = interpolate(sol,Uh) + + Ω = Triangulation(model) + dΩ = Measure(Ω,qorder) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(v⋅f)*dΩ + + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op); + + # CG + solver = IS_ConjugateGradientSolver(;maxiter=100,reltol=1.e-12) + ss = symbolic_setup(solver,A) + ns = numerical_setup(ss,A) + + x = LinearSolvers.allocate_col_vector(A) + y = copy(b) + solve!(x,ns,y) + @test l2_error(x,Uh,dΩ) < 1.e-8 + + # SSOR + solver = IS_SSORSolver(2.0/3.0;maxiter=100) + ss = symbolic_setup(solver,A) + ns = numerical_setup(ss,A) + + x = LinearSolvers.allocate_row_vector(A) + y = copy(b) + solve!(x,ns,y) + !is_distributed && (@test l2_error(x,Uh,dΩ) < 1.e-8) + + if !is_distributed + # GMRES + solver = IS_GMRESSolver(;maxiter=100,reltol=1.e-12) + ss = symbolic_setup(solver,A) + ns = numerical_setup(ss,A) + + x = LinearSolvers.allocate_row_vector(A) + y = copy(b) + solve!(x,ns,y) + @test l2_error(x,Uh,dΩ) < 1.e-8 + + # MINRES + solver = IS_MINRESSolver(;maxiter=100,reltol=1.e-12) + ss = symbolic_setup(solver,A) + ns = numerical_setup(ss,A) + + x = LinearSolvers.allocate_row_vector(A) + y = copy(b) + solve!(x,ns,y) + @test l2_error(x,Uh,dΩ) < 1.e-8 + end +end + +# Completely serial +partition = (8,8) +domain = (0,1,0,1) +model = CartesianDiscreteModel(domain,partition) +main(model,false) + +# Sequential +backend = SequentialBackend() +ranks = (1,2) +parts = get_part_ids(backend,ranks) + +model = CartesianDiscreteModel(parts,domain,partition) +main(model,true) end \ No newline at end of file From 52f59da33eebd745965e48164c7970c7c3c05117 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 25 Apr 2023 19:02:59 +1000 Subject: [PATCH 10/34] Added tests --- test/runtests.jl | 11 +++++++---- test/seq/GMRESSolversTests.jl | 3 +++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index 0f62f474..db2d6c38 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -46,11 +46,8 @@ function run_tests(testdir) elseif f in ["ModelHierarchiesTests.jl"] np = 6 extra_args = "" - elseif f in [""] - np = 1 - extra_args = "" else - np = nprocs + np = 4 # nprocs extra_args = "" end if ! image_file_exists @@ -69,4 +66,10 @@ end run_tests(joinpath(@__DIR__, "mpi")) # Sequential tests +@time @testset "BlockDiagonalSmoothersPETScTests" begin include("seq/BlockDiagonalSmoothersPETScTests.jl") end +@time @testset "BlockDiagonalSmoothersTests" begin include("seq/BlockDiagonalSmoothersTests.jl") end +@time @testset "DistributedPatchFESpacesTests" begin include("seq/DistributedPatchFESpacesTests.jl") end +@time @testset "GMRESSolversTests" begin include("seq/GMRESSolversTests.jl") end +@time @testset "IterativeSolversTests" begin include("seq/IterativeSolversTests.jl") end @time @testset "PatchLinearSolverTests" begin include("seq/PatchLinearSolverTests.jl") end +@time @testset "SymGaussSeidelSmoothersTests" begin include("seq/SymGaussSeidelSmoothersTests.jl") end diff --git a/test/seq/GMRESSolversTests.jl b/test/seq/GMRESSolversTests.jl index 182c5a66..fa3e1579 100644 --- a/test/seq/GMRESSolversTests.jl +++ b/test/seq/GMRESSolversTests.jl @@ -1,3 +1,4 @@ +module GMRESSolversTests using Test using Gridap @@ -54,3 +55,5 @@ parts = get_part_ids(backend,ranks) model = CartesianDiscreteModel(parts,domain,partition) @test main(model) + +end \ No newline at end of file From aabe6b14acdf68ec3c15ba0aa390f0a19487264b Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 28 Apr 2023 12:38:18 +0930 Subject: [PATCH 11/34] Added SchurComplementSolver --- src/LinearSolvers/GMRESSolvers.jl | 7 +- src/LinearSolvers/IdentityLinearSolvers.jl | 26 ++++ src/LinearSolvers/LinearSolvers.jl | 7 + src/LinearSolvers/SchurComplementSolvers.jl | 124 +++++++++++++++++ test/seq/SchurComplementSolversTests.jl | 145 ++++++++++++++++++++ 5 files changed, 307 insertions(+), 2 deletions(-) create mode 100644 src/LinearSolvers/IdentityLinearSolvers.jl create mode 100644 src/LinearSolvers/SchurComplementSolvers.jl create mode 100644 test/seq/SchurComplementSolversTests.jl diff --git a/src/LinearSolvers/GMRESSolvers.jl b/src/LinearSolvers/GMRESSolvers.jl index 5c0d63ba..7262b2da 100644 --- a/src/LinearSolvers/GMRESSolvers.jl +++ b/src/LinearSolvers/GMRESSolvers.jl @@ -49,13 +49,15 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::GMRESNumericalSetup,b::Abst solver, A, Pl, caches = ns.solver, ns.A, ns.Pl_ns, ns.caches m, tol = solver.m, solver.tol w, V, Z, H, g, c, s = caches + println(" > Starting GMRES solve: ") # Initial residual mul!(w,A,x); w .= b .- w β = norm(w) - iter = 0; println("Iteration ", iter, " - Residual: ", β) + iter = 0 while (β > tol) + println(" > Iteration ", iter," - Residual: ", β) fill!(H,0.0) # Arnoldi process @@ -98,8 +100,9 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::GMRESNumericalSetup,b::Abst end mul!(w,A,x); w .= b .- w - iter += 1; println("Iteration ", iter, " - Residual: ", β) + iter += 1 end + println(" > Iteration ", iter," - Residual: ", β) return x end diff --git a/src/LinearSolvers/IdentityLinearSolvers.jl b/src/LinearSolvers/IdentityLinearSolvers.jl new file mode 100644 index 00000000..93a24d64 --- /dev/null +++ b/src/LinearSolvers/IdentityLinearSolvers.jl @@ -0,0 +1,26 @@ + +# Identity solver, for testing purposes +struct IdentitySolver <: Gridap.Algebra.LinearSolver +end + +struct IdentitySymbolicSetup <: Gridap.Algebra.SymbolicSetup + solver +end + +function Gridap.Algebra.symbolic_setup(s::IdentitySolver,A::AbstractMatrix) + IdentitySymbolicSetup(s) +end + +struct IdentityNumericalSetup <: Gridap.Algebra.NumericalSetup + solver +end + +function Gridap.Algebra.numerical_setup(ss::IdentitySymbolicSetup,mat::AbstractMatrix) + s = ss.solver + return IdentityNumericalSetup(s) +end + +function Gridap.Algebra.solve!(x::AbstractVector,ns::IdentityNumericalSetup,y::AbstractVector) + copy!(x,y) + return x +end diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index 42968313..35009b2d 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -10,9 +10,13 @@ using IterativeSolvers using Gridap using Gridap.Helpers using Gridap.Algebra +using Gridap.FESpaces +using Gridap.MultiField using PartitionedArrays using GridapPETSc +using GridapDistributed + using GridapSolvers.MultilevelTools import LinearAlgebra: mul!, ldiv! @@ -30,8 +34,10 @@ export IS_MINRESSolver export IS_SSORSolver export GMRESSolver +export SchurComplementSolver include("Helpers.jl") +include("IdentityLinearSolvers.jl") include("JacobiLinearSolvers.jl") include("RichardsonSmoothers.jl") include("SymGaussSeidelSmoothers.jl") @@ -39,5 +45,6 @@ include("GMGLinearSolvers.jl") include("BlockDiagonalSmoothers.jl") include("IterativeLinearSolvers.jl") include("GMRESSolvers.jl") +include("SchurComplementSolvers.jl") end \ No newline at end of file diff --git a/src/LinearSolvers/SchurComplementSolvers.jl b/src/LinearSolvers/SchurComplementSolvers.jl new file mode 100644 index 00000000..d97f07d5 --- /dev/null +++ b/src/LinearSolvers/SchurComplementSolvers.jl @@ -0,0 +1,124 @@ + +""" + Schur complement solver + + [A B] ^ -1 [Ip -A^-1 B] [A^-1 ] [ Ip ] + [C D] = [ Iq ] ⋅ [ S^-1] ⋅ [-C A^-1 Iq] + + where S = D - C A^-1 B +""" +struct SchurComplementSolver{T1,T2,T3,T4} <: Gridap.Algebra.LinearSolver + A :: T1 + B :: T2 + C :: T3 + S :: T4 + function SchurComplementSolver(A::Gridap.Algebra.NumericalSetup, + B::AbstractMatrix, + C::AbstractMatrix, + S::Gridap.Algebra.NumericalSetup) + T1 = typeof(A) + T2 = typeof(B) + T3 = typeof(C) + T4 = typeof(S) + return new{T1,T2,T3,T4}(A,B,C,S) + end +end + +struct SchurComplementSymbolicSetup <: Gridap.Algebra.SymbolicSetup + solver +end + +function Gridap.Algebra.symbolic_setup(s::SchurComplementSolver,A::AbstractMatrix) + SchurComplementSymbolicSetup(s) +end + +struct SchurComplementNumericalSetup <: Gridap.Algebra.NumericalSetup + solver + mat + ranges + caches +end + +function get_shur_complement_caches(B::AbstractMatrix,C::AbstractMatrix) + du1 = LinearSolvers.allocate_col_vector(C) + du2 = LinearSolvers.allocate_col_vector(C) + dp = LinearSolvers.allocate_col_vector(B) + + rv_u = LinearSolvers.allocate_row_vector(B) + rv_p = LinearSolvers.allocate_row_vector(C) + return (du1,du2,dp,rv_u,rv_p) +end + +function get_block_ranges(B::AbstractMatrix,C::AbstractMatrix) + u_range = 1:size(C,2) + p_range = size(C,2) .+ (1:size(B,2)) + return u_range, p_range +end + +function get_block_ranges(B::PSparseMatrix,C::PSparseMatrix) + ranges = map_parts(B.owned_owned_values,C.owned_owned_values) do B,C + get_block_ranges(B,C) + end + return ranges +end + +function Gridap.Algebra.numerical_setup(ss::SchurComplementSymbolicSetup,mat::AbstractMatrix) + s = ss.solver + B,C = s.B, s.C + ranges = get_block_ranges(B,C) + caches = get_shur_complement_caches(B,C) + return SchurComplementNumericalSetup(s,mat,ranges,caches) +end + +function to_blocks!(x::AbstractVector,u,p,ranges) + u_range, p_range = ranges + u .= x[u_range] + p .= x[p_range] + return u,p +end + +function to_blocks!(x::PVector,u,p,ranges) + map_parts(x.owned_values,u.owned_values,p.owned_values,ranges) do x,u,p,ranges + to_blocks!(x,u,p,ranges) + end + exchange!(u) + exchange!(p) + return u,p +end + +function to_global!(x::AbstractVector,u,p,ranges) + u_range, p_range = ranges + x[u_range] .= u + x[p_range] .= p + return x +end + +function to_global!(x::PVector,u,p,ranges) + map_parts(x.owned_values,u.owned_values,p.owned_values,ranges) do x,u,p,ranges + to_global!(x,u,p,ranges) + end + exchange!(x) + return x +end + +function Gridap.Algebra.solve!(x::AbstractVector,ns::SchurComplementNumericalSetup,y::AbstractVector) + s = ns.solver + A,B,C,S = s.A,s.B,s.C,s.S + du1,du2,dp,rv_u,rv_p = ns.caches + + # Split y into blocks + to_blocks!(y,rv_u,rv_p,ns.ranges) + + # Solve Schur complement + solve!(du1,A,rv_u) # du1 = A^-1 y_u + mul!(rv_p,C,du1,1.0,-1.0) # b1 = C*du1 - y_p + solve!(dp,S,rv_p) # dp = S^-1 b1 + mul!(rv_u,B,dp) # b2 = B*dp + solve!(du2,A,rv_u) # du2 = A^-1 b2 + du1 .-= du2 # du = du1 - du2 + + # Assemble into global + to_global!(x,du1,dp,ns.ranges) + + return x +end diff --git a/test/seq/SchurComplementSolversTests.jl b/test/seq/SchurComplementSolversTests.jl new file mode 100644 index 00000000..5fd15c98 --- /dev/null +++ b/test/seq/SchurComplementSolversTests.jl @@ -0,0 +1,145 @@ +using Gridap +using Gridap.MultiField +using Gridap.Algebra +using Gridap.Geometry +using Gridap.FESpaces +using Gridap.ReferenceFEs + +using PartitionedArrays +using GridapDistributed + +using GridapSolvers +using GridapSolvers.LinearSolvers +using GridapSolvers.MultilevelTools + +function l2_error(xh,sol,dΩ) + eh = xh - sol + e = sum(∫(eh⋅eh)dΩ) + return e +end + +function l2_error(x,sol,X,dΩ) + xh = FEFunction(X,x) + return l2_error(xh,sol,dΩ) +end + +backend = SequentialBackend() +ranks = (2,2) +parts = get_part_ids(backend,ranks) + +# Darcy solution +u_ref(x) = VectorValue(x[1]+x[2],-x[2]) +p_ref(x) = 2.0*x[1]-1.0 +f_ref(x) = u_ref(x) + ∇(p_ref)(x) + +D = 2 +n = 20 +domain = Tuple(repeat([0,1],D)) +partition = (n,n) +model = CartesianDiscreteModel(parts,domain,partition) + +labels = get_face_labeling(model) +add_tag_from_tags!(labels,"dirichlet",[1,2,3,4,5,6,7]) +add_tag_from_tags!(labels,"newmann",[8,]) + +order = 0 +reffeᵤ = ReferenceFE(raviart_thomas,Float64,order) +V = TestFESpace(model,reffeᵤ,conformity=:HDiv,dirichlet_tags="dirichlet") +U = TrialFESpace(V,u_ref) + +reffeₚ = ReferenceFE(lagrangian,Float64,order;space=:P) +Q = TestFESpace(model,reffeₚ,conformity=:L2) +P = TrialFESpace(Q,p_ref) + +Y = MultiFieldFESpace([V, Q]) +X = MultiFieldFESpace([U, P]) + +qdegree = 4 +Ω = Triangulation(model) +dΩ = Measure(Ω,qdegree) + +Γ_N = BoundaryTriangulation(model;tags="newmann") +dΓ_N = Measure(Γ_N,qdegree) +n_Γ_N = get_normal_vector(Γ_N) + +const β_U = 50.0 +const γ = 100.0 + +a(u,v) = ∫(v⊙u)dΩ + ∫(γ*(∇⋅v)*(∇⋅u))dΩ +b(p,v) = ∫(-(∇⋅v)*p)dΩ +c(u,q) = ∫(- q*(∇⋅u))dΩ + +biform((u,p),(v,q)) = a(u,v) + b(p,v) + c(u,q) +liform((v,q)) = ∫(f_ref⋅v)dΩ - ∫((v⋅n_Γ_N)⋅p_ref)dΓ_N + +op = AffineFEOperator(biform,liform,X,Y) +sysmat, sysvec = get_matrix(op), get_vector(op); + +############################################################################################ +# Solve by global matrix factorization + +xh = solve(op) +uh, ph = xh +err_u1 = l2_error(uh,u_ref,dΩ) +err_p1 = l2_error(ph,p_ref,dΩ) + +############################################################################################ +# Solve by exact Schur complement + +A = assemble_matrix(a,U,V) +B = assemble_matrix(b,P,V) +C = assemble_matrix(c,U,Q) + +#= Adense = Matrix(A) +Ainv = inv(Adense) +S = - B * Ainv * C + +A_solver = BackslashSolver() +A_ns = numerical_setup(symbolic_setup(A_solver,Adense),Adense) + +S_solver = BackslashSolver() +S_ns = numerical_setup(symbolic_setup(S_solver,S),S) + +sc_solver = SchurComplementSolver(X,A_ns,B,C,S_ns) +sc_ns = numerical_setup(symbolic_setup(sc_solver,sysmat),sysmat) + +x = zero_free_values(X) +solve!(x,sc_ns,sysvec) + +xh = FEFunction(X,x) +uh, ph = xhdu1 +err_u2 = l2_error(uh,u_ref,dΩ) +err_p2 = l2_error(ph,p_ref,dΩ) =# + +############################################################################################ +# Solve by GMRES preconditioned with inexact Schur complement + +s(p,q) = ∫(γ*p*q)dΩ +PS = assemble_matrix(s,P,Q) +PS_solver = BackslashSolver() +PS_ns = numerical_setup(symbolic_setup(PS_solver,PS),PS) + +A_solver = BackslashSolver() +A_ns = numerical_setup(symbolic_setup(A_solver,A),A) + +psc_solver = SchurComplementSolver(A_ns,B,C,PS_ns); +psc_ns = numerical_setup(symbolic_setup(psc_solver,sysmat),sysmat) + +x = LinearSolvers.allocate_col_vector(sysmat) +b0 = copy(sysvec) +solve!(x,psc_ns,b0) + + + +id_solver = LinearSolvers.IdentitySolver() + +gmres = GMRESSolver(20,psc_solver,1e-6) +gmres_ns = numerical_setup(symbolic_setup(gmres,sysmat),sysmat) + +x = LinearSolvers.allocate_col_vector(sysmat) +solve!(x,gmres_ns,sysvec) + +xh = FEFunction(X,x) +uh, ph = xh +err_u3 = l2_error(uh,u_ref,dΩ) +err_p3 = l2_error(ph,p_ref,dΩ) From 2826fb09ae8f11f5f2c9df57dce0dcb04726b286 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 28 Apr 2023 12:48:06 +0930 Subject: [PATCH 12/34] Added SchurComplementSolversTests --- test/runtests.jl | 1 + test/seq/SchurComplementSolversTests.jl | 165 +++++++++++------------- 2 files changed, 76 insertions(+), 90 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index db2d6c38..135129cb 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -73,3 +73,4 @@ run_tests(joinpath(@__DIR__, "mpi")) @time @testset "IterativeSolversTests" begin include("seq/IterativeSolversTests.jl") end @time @testset "PatchLinearSolverTests" begin include("seq/PatchLinearSolverTests.jl") end @time @testset "SymGaussSeidelSmoothersTests" begin include("seq/SymGaussSeidelSmoothersTests.jl") end +@time @testset "SchurComplementSolversTests" begin include("seq/SchurComplementSolversTests.jl") end diff --git a/test/seq/SchurComplementSolversTests.jl b/test/seq/SchurComplementSolversTests.jl index 5fd15c98..18e12f1a 100644 --- a/test/seq/SchurComplementSolversTests.jl +++ b/test/seq/SchurComplementSolversTests.jl @@ -1,3 +1,6 @@ +module SchurComplementSolversTests + +using Test using Gridap using Gridap.MultiField using Gridap.Algebra @@ -23,123 +26,105 @@ function l2_error(x,sol,X,dΩ) return l2_error(xh,sol,dΩ) end -backend = SequentialBackend() -ranks = (2,2) -parts = get_part_ids(backend,ranks) - # Darcy solution +const β_U = 50.0 +const γ = 100.0 + u_ref(x) = VectorValue(x[1]+x[2],-x[2]) p_ref(x) = 2.0*x[1]-1.0 f_ref(x) = u_ref(x) + ∇(p_ref)(x) -D = 2 -n = 20 -domain = Tuple(repeat([0,1],D)) -partition = (n,n) -model = CartesianDiscreteModel(parts,domain,partition) +function main(model) -labels = get_face_labeling(model) -add_tag_from_tags!(labels,"dirichlet",[1,2,3,4,5,6,7]) -add_tag_from_tags!(labels,"newmann",[8,]) + labels = get_face_labeling(model) + add_tag_from_tags!(labels,"dirichlet",[1,2,3,4,5,6,7]) + add_tag_from_tags!(labels,"newmann",[8,]) -order = 0 -reffeᵤ = ReferenceFE(raviart_thomas,Float64,order) -V = TestFESpace(model,reffeᵤ,conformity=:HDiv,dirichlet_tags="dirichlet") -U = TrialFESpace(V,u_ref) + order = 0 + reffeᵤ = ReferenceFE(raviart_thomas,Float64,order) + V = TestFESpace(model,reffeᵤ,conformity=:HDiv,dirichlet_tags="dirichlet") + U = TrialFESpace(V,u_ref) -reffeₚ = ReferenceFE(lagrangian,Float64,order;space=:P) -Q = TestFESpace(model,reffeₚ,conformity=:L2) -P = TrialFESpace(Q,p_ref) + reffeₚ = ReferenceFE(lagrangian,Float64,order;space=:P) + Q = TestFESpace(model,reffeₚ,conformity=:L2) + P = TrialFESpace(Q,p_ref) -Y = MultiFieldFESpace([V, Q]) -X = MultiFieldFESpace([U, P]) + Y = MultiFieldFESpace([V, Q]) + X = MultiFieldFESpace([U, P]) -qdegree = 4 -Ω = Triangulation(model) -dΩ = Measure(Ω,qdegree) + qdegree = 4 + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) -Γ_N = BoundaryTriangulation(model;tags="newmann") -dΓ_N = Measure(Γ_N,qdegree) -n_Γ_N = get_normal_vector(Γ_N) - -const β_U = 50.0 -const γ = 100.0 + Γ_N = BoundaryTriangulation(model;tags="newmann") + dΓ_N = Measure(Γ_N,qdegree) + n_Γ_N = get_normal_vector(Γ_N) -a(u,v) = ∫(v⊙u)dΩ + ∫(γ*(∇⋅v)*(∇⋅u))dΩ -b(p,v) = ∫(-(∇⋅v)*p)dΩ -c(u,q) = ∫(- q*(∇⋅u))dΩ + a(u,v) = ∫(v⊙u)dΩ + ∫(γ*(∇⋅v)*(∇⋅u))dΩ + b(p,v) = ∫(-(∇⋅v)*p)dΩ + c(u,q) = ∫(- q*(∇⋅u))dΩ -biform((u,p),(v,q)) = a(u,v) + b(p,v) + c(u,q) -liform((v,q)) = ∫(f_ref⋅v)dΩ - ∫((v⋅n_Γ_N)⋅p_ref)dΓ_N + biform((u,p),(v,q)) = a(u,v) + b(p,v) + c(u,q) + liform((v,q)) = ∫(f_ref⋅v)dΩ - ∫((v⋅n_Γ_N)⋅p_ref)dΓ_N -op = AffineFEOperator(biform,liform,X,Y) -sysmat, sysvec = get_matrix(op), get_vector(op); + op = AffineFEOperator(biform,liform,X,Y) + sysmat, sysvec = get_matrix(op), get_vector(op); -############################################################################################ -# Solve by global matrix factorization + A = assemble_matrix(a,U,V) + B = assemble_matrix(b,P,V) + C = assemble_matrix(c,U,Q) -xh = solve(op) -uh, ph = xh -err_u1 = l2_error(uh,u_ref,dΩ) -err_p1 = l2_error(ph,p_ref,dΩ) + ############################################################################################ + # Solve by global matrix factorization -############################################################################################ -# Solve by exact Schur complement + xh = solve(op) + uh, ph = xh + err_u1 = l2_error(uh,u_ref,dΩ) + err_p1 = l2_error(ph,p_ref,dΩ) -A = assemble_matrix(a,U,V) -B = assemble_matrix(b,P,V) -C = assemble_matrix(c,U,Q) + ############################################################################################ + # Solve by GMRES preconditioned with inexact Schur complement -#= Adense = Matrix(A) -Ainv = inv(Adense) -S = - B * Ainv * C + s(p,q) = ∫(γ*p*q)dΩ + PS = assemble_matrix(s,P,Q) + PS_solver = BackslashSolver() + PS_ns = numerical_setup(symbolic_setup(PS_solver,PS),PS) -A_solver = BackslashSolver() -A_ns = numerical_setup(symbolic_setup(A_solver,Adense),Adense) + A_solver = BackslashSolver() + A_ns = numerical_setup(symbolic_setup(A_solver,A),A) -S_solver = BackslashSolver() -S_ns = numerical_setup(symbolic_setup(S_solver,S),S) + psc_solver = SchurComplementSolver(A_ns,B,C,PS_ns); -sc_solver = SchurComplementSolver(X,A_ns,B,C,S_ns) -sc_ns = numerical_setup(symbolic_setup(sc_solver,sysmat),sysmat) + gmres = GMRESSolver(20,psc_solver,1e-6) + gmres_ns = numerical_setup(symbolic_setup(gmres,sysmat),sysmat) -x = zero_free_values(X) -solve!(x,sc_ns,sysvec) + x = LinearSolvers.allocate_col_vector(sysmat) + solve!(x,gmres_ns,sysvec) -xh = FEFunction(X,x) -uh, ph = xhdu1 -err_u2 = l2_error(uh,u_ref,dΩ) -err_p2 = l2_error(ph,p_ref,dΩ) =# - -############################################################################################ -# Solve by GMRES preconditioned with inexact Schur complement - -s(p,q) = ∫(γ*p*q)dΩ -PS = assemble_matrix(s,P,Q) -PS_solver = BackslashSolver() -PS_ns = numerical_setup(symbolic_setup(PS_solver,PS),PS) - -A_solver = BackslashSolver() -A_ns = numerical_setup(symbolic_setup(A_solver,A),A) - -psc_solver = SchurComplementSolver(A_ns,B,C,PS_ns); -psc_ns = numerical_setup(symbolic_setup(psc_solver,sysmat),sysmat) - -x = LinearSolvers.allocate_col_vector(sysmat) -b0 = copy(sysvec) -solve!(x,psc_ns,b0) + xh = FEFunction(X,x) + uh, ph = xh + err_u3 = l2_error(uh,u_ref,dΩ) + err_p3 = l2_error(ph,p_ref,dΩ) + @test err_u3 ≈ err_u1 + @test err_p3 ≈ err_p1 +end +backend = SequentialBackend() +ranks = (2,2) +parts = get_part_ids(backend,ranks) -id_solver = LinearSolvers.IdentitySolver() +D = 2 +n = 40 +domain = Tuple(repeat([0,1],D)) +partition = (n,n) -gmres = GMRESSolver(20,psc_solver,1e-6) -gmres_ns = numerical_setup(symbolic_setup(gmres,sysmat),sysmat) +# Serial +model = CartesianDiscreteModel(domain,partition) +main(model) -x = LinearSolvers.allocate_col_vector(sysmat) -solve!(x,gmres_ns,sysvec) +# Distributed, sequential +model = CartesianDiscreteModel(parts,domain,partition) +main(model) -xh = FEFunction(X,x) -uh, ph = xh -err_u3 = l2_error(uh,u_ref,dΩ) -err_p3 = l2_error(ph,p_ref,dΩ) +end \ No newline at end of file From 4536e097a4ebf24432782c1f8699f6c332c0c268 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 28 Apr 2023 14:33:13 +0930 Subject: [PATCH 13/34] Fixed tests --- test/seq/DistributedPatchFESpacesTests.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index 18dc37c3..7a0a4ac0 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -21,10 +21,10 @@ domain = (0.0,1.0,0.0,1.0) partition = (2,4) model = CartesianDiscreteModel(parts,domain,partition) -# order = 1 -# reffe = ReferenceFE(lagrangian,Float64,order) -order = 0 -reffe = ReferenceFE(raviart_thomas,Float64,order) +order = 1 +reffe = ReferenceFE(lagrangian,Float64,order) +#order = 0 +#reffe = ReferenceFE(raviart_thomas,Float64,order) Vh = TestFESpace(model,reffe) PD = PBS.PatchDecomposition(model) Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) From 888bf78f5d63466f4b5d2f5c81258d8f96c97a55 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 28 Apr 2023 22:45:33 +0930 Subject: [PATCH 14/34] Working concept for integratioin on patch skeletons --- test/seq/PatchBasedTesting.jl | 126 ++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 test/seq/PatchBasedTesting.jl diff --git a/test/seq/PatchBasedTesting.jl b/test/seq/PatchBasedTesting.jl new file mode 100644 index 00000000..7e8ca313 --- /dev/null +++ b/test/seq/PatchBasedTesting.jl @@ -0,0 +1,126 @@ + +using LinearAlgebra +using Test +using PartitionedArrays +using Gridap +using Gridap.Arrays +using Gridap.Helpers +using Gridap.Geometry +using Gridap.ReferenceFEs +using GridapDistributed +using FillArrays + +using GridapSolvers +import GridapSolvers.PatchBasedSmoothers as PBS + + +function Gridap.Geometry.SkeletonTriangulation(PD::PatchDecomposition) + model = PD.model + labeling = get_face_labeling(model) + topo = get_grid_topology(model) + + patch_cells = Gridap.Arrays.Table(PD.patch_cells) + + c2e_map = get_faces(topo,2,1) + patch_cells_edges = map(Reindex(c2e_map),patch_cells.data) + + is_boundary = get_face_mask(labeling,["boundary"],1) + interior_edges = zeros(Int64,length(is_boundary)) + count = 1 + for i in 1:length(is_boundary) + if !is_boundary[i] + interior_edges[i] = count + count += 1 + end + end + + edges_on_boundary = PD.patch_cells_faces_on_boundary[2] + _patch_edges = map((E,mask)->E[.!mask],patch_cells_edges,edges_on_boundary) + __patch_edges = map(E-> filter(e -> !is_boundary[e],E), _patch_edges) + patch_edges = Gridap.Arrays.Table(__patch_edges) + + patch_edges_data = lazy_map(Reindex(interior_edges),patch_edges.data) + + Λ = SkeletonTriangulation(model) + return view(Λ,patch_edges_data) +end + +backend = SequentialBackend() +ranks = (1,2) +parts = get_part_ids(backend,ranks) + +domain = (0.0,1.0,0.0,1.0) +partition = (2,4) +model = CartesianDiscreteModel(domain,partition) + +order = 1; reffe = ReferenceFE(lagrangian,Float64,order;space=:P); conformity = L2Conformity(); +#order = 1; reffe = ReferenceFE(lagrangian,Float64,order); conformity = H1Conformity(); +#order = 0; reffe = ReferenceFE(raviart_thomas,Float64,order); conformity = HDivConformity(); +Vh = TestFESpace(model,reffe,conformity=conformity) +PD = PBS.PatchDecomposition(model) +Ph = PBS.PatchFESpace(model,reffe,conformity,PD,Vh) + +# ---- Assemble systems ---- # + +Ω = Triangulation(model) +dΩ = Measure(Ω,2*order+1) +a(u,v) = ∫(v⋅u)*dΩ +l(v) = ∫(1*v)*dΩ + +assembler = SparseMatrixAssembler(Vh,Vh) +Ah = assemble_matrix(a,assembler,Vh,Vh) +fh = assemble_vector(l,assembler,Vh) + +sol_h = solve(LUSolver(),Ah,fh) + +Ωₚ = Triangulation(PD) +dΩₚ = Measure(Ωₚ,2*order+1) +ap(u,v) = ∫(v⋅u)*dΩₚ +lp(v) = ∫(1*v)*dΩₚ + +assembler_P = SparseMatrixAssembler(Ph,Ph) +Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) +fhp = assemble_vector(lp,assembler_P,Ph) + +# Skeleton Triangulation +labeling = get_face_labeling(model) +topo = get_grid_topology(model) + +patch_cells = Gridap.Arrays.Table(PD.patch_cells) + +c2e_map = get_faces(topo,2,1) +patch_cells_edges = map(Reindex(c2e_map),patch_cells.data) + +is_boundary = get_face_mask(labeling,["boundary"],1) +interior_edges = zeros(Int64,length(is_boundary)) +count = 1 +for i in 1:length(is_boundary) + if !is_boundary[i] + interior_edges[i] = count + count += 1 + end +end + +edges_on_boundary = PD.patch_cells_faces_on_boundary[2] +_patch_edges = map((E,mask)->E[.!mask],patch_cells_edges,edges_on_boundary) +__patch_edges = map(E-> filter(e -> !is_boundary[e],E), _patch_edges) +patch_edges = Gridap.Arrays.Table(__patch_edges) + +patch_edges_data = lazy_map(Reindex(interior_edges),patch_edges.data) + +Λ = SkeletonTriangulation(model) +Λₚ = view(Λ,patch_edges_data) +dΛₚ = Measure(Λₚ,3) + +β = 10 +aΓ(u,v) = ∫(β⋅jump(v)⋅jump(u))*dΛₚ + +v = get_fe_basis(Ph) +u = get_trial_fe_basis(Ph) +cf = (β⋅jump(v)⋅jump(u)) +contr = aΓ(u,v) + +matdata_edges = first(contr.dict)[2] + +patch_edges_overlapped = Gridap.Arrays.Table(collect(1:length(patch_edges.data)),patch_edges.ptrs) +matdata_cells = lazy_map(Geometry.CombineContributionsMap(matdata_edges),patch_edges_overlapped) From bffa924ee201f7cdbff63c259e312fa46134587d Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 9 May 2023 17:28:25 +1000 Subject: [PATCH 15/34] Added PatchTriangulations --- .../PatchBasedSmoothers.jl | 1 + .../seq/PatchDecompositions.jl | 198 +++++++++++++++++- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 11 +- .../seq/PatchTriangulations.jl | 102 +++++++++ test/seq/PatchBasedTesting.jl | 113 +++++----- 5 files changed, 357 insertions(+), 68 deletions(-) create mode 100644 src/PatchBasedSmoothers/seq/PatchTriangulations.jl diff --git a/src/PatchBasedSmoothers/PatchBasedSmoothers.jl b/src/PatchBasedSmoothers/PatchBasedSmoothers.jl index e6a03299..d4673061 100644 --- a/src/PatchBasedSmoothers/PatchBasedSmoothers.jl +++ b/src/PatchBasedSmoothers/PatchBasedSmoothers.jl @@ -19,6 +19,7 @@ export PatchFESpace export PatchBasedLinearSolver include("seq/PatchDecompositions.jl") +include("seq/PatchTriangulations.jl") include("seq/PatchFESpaces.jl") include("seq/PatchBasedLinearSolvers.jl") diff --git a/src/PatchBasedSmoothers/seq/PatchDecompositions.jl b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl index 913f0c3d..c549e76e 100644 --- a/src/PatchBasedSmoothers/seq/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl @@ -2,6 +2,8 @@ abstract type PatchBoundaryStyle end ; struct PatchBoundaryExclude <: PatchBoundaryStyle end ; struct PatchBoundaryInclude <: PatchBoundaryStyle end ; +# TODO: Make patch_cells a Table + # Question? Might a patch decomposition involve patches # with roots of different topological dimension? # This is not currently supported. @@ -56,11 +58,6 @@ function PatchDecomposition( patch_cells_faces_on_boundary) end -function Gridap.Geometry.Triangulation(a::PatchDecomposition) - patch_cells = Gridap.Arrays.Table(a.patch_cells) - return view(Triangulation(a.model),patch_cells.data) -end - function setup_patch_cells_overlapped_mesh(patch_cells) num_patches = length(patch_cells) cache = array_cache(patch_cells) @@ -222,3 +219,194 @@ function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, end end end + +# Patch cell faces: +# patch_faces[pcell] = [face1, face2, ...] +# where face1, face2, ... are the faces of the overlapped cell `pcell` such that +# - they are NOT on the boundary of the patch +# - they are flagged `true` in faces_mask +function get_patch_cell_faces(PD::PatchDecomposition,Df::Integer) + model = PD.model + topo = get_grid_topology(model) + faces_mask = Fill(true,num_faces(topo,Df)) + return get_patch_cell_faces(PD,Df,faces_mask) +end + +function get_patch_cell_faces(PD::PatchDecomposition{Dc},Df::Integer,faces_mask) where Dc + model = PD.model + topo = get_grid_topology(model) + + c2e_map = Gridap.Geometry.get_faces(topo,Dc,Df) + patch_cells = Gridap.Arrays.Table(PD.patch_cells) + patch_cell_faces = map(Reindex(c2e_map),patch_cells.data) + faces_on_boundary = PD.patch_cells_faces_on_boundary[Df+1] + + patch_faces = _allocate_patch_cell_faces(patch_cell_faces,faces_on_boundary,faces_mask) + _generate_patch_cell_faces!(patch_faces,patch_cell_faces,faces_on_boundary,faces_mask) + + return patch_faces +end + +function _allocate_patch_cell_faces(patch_cell_faces,faces_on_boundary,faces_mask) + num_patch_cells = length(patch_cell_faces) + + num_patch_faces = 0 + patch_cells_faces_cache = array_cache(patch_cell_faces) + faces_on_boundary_cache = array_cache(faces_on_boundary) + for iC in 1:num_patch_cells + cell_faces = getindex!(patch_cells_faces_cache,patch_cell_faces,iC) + on_boundary = getindex!(faces_on_boundary_cache,faces_on_boundary,iC) + for (iF,face) in enumerate(cell_faces) + if (!on_boundary[iF] && faces_mask[face]) + num_patch_faces += 1 + end + end + end + + patch_faces_data = zeros(Int64,num_patch_faces) + patch_faces_ptrs = zeros(Int64,num_patch_cells+1) + return Gridap.Arrays.Table(patch_faces_data,patch_faces_ptrs) +end + +function _generate_patch_cell_faces!(patch_faces,patch_cell_faces,faces_on_boundary,faces_mask) + num_patch_cells = length(patch_cell_faces) + patch_faces_data, patch_faces_ptrs = patch_faces.data, patch_faces.ptrs + + pface = 1 + patch_faces_ptrs[1] = 1 + patch_cells_faces_cache = array_cache(patch_cell_faces) + faces_on_boundary_cache = array_cache(faces_on_boundary) + for iC in 1:num_patch_cells + cell_faces = getindex!(patch_cells_faces_cache,patch_cell_faces,iC) + on_boundary = getindex!(faces_on_boundary_cache,faces_on_boundary,iC) + patch_faces_ptrs[iC+1] = patch_faces_ptrs[iC] + for (iF,face) in enumerate(cell_faces) + if (!on_boundary[iF] && faces_mask[face]) + patch_faces_data[pface] = face + patch_faces_ptrs[iC+1] += 1 + pface += 1 + end + end + end + + return patch_faces +end + +# Patch faces: +# patch_faces[patch] = [face1, face2, ...] +# where face1, face2, ... are the faces of the patch such that +# - they are NOT on the boundary of the patch +# - they are flagged `true` in faces_mask +function get_patch_faces(PD::PatchDecomposition{Dc},Df::Integer,faces_mask) where Dc + model = PD.model + topo = get_grid_topology(model) + + c2e_map = Gridap.Geometry.get_faces(topo,Dc,Df) + patch_cells = Gridap.Arrays.Table(PD.patch_cells) + patch_cell_faces = map(Reindex(c2e_map),patch_cells.data) + faces_on_boundary = PD.patch_cells_faces_on_boundary[Df+1] + + patch_faces = _allocate_patch_faces(patch_cells,patch_cell_faces,faces_on_boundary,faces_mask) + _generate_patch_faces!(patch_faces,patch_cells,patch_cell_faces,faces_on_boundary,faces_mask) + + return patch_faces +end + +function _allocate_patch_faces(patch_cells,patch_cell_faces,faces_on_boundary,faces_mask) + num_patches = length(patch_cells) + + touched = Dict{Int,Bool}() + pcell = 1 + num_patch_faces = 0 + patch_cells_cache = array_cache(patch_cells) + patch_cells_faces_cache = array_cache(patch_cell_faces) + faces_on_boundary_cache = array_cache(faces_on_boundary) + for patch in 1:num_patches + current_patch_cells = getindex!(patch_cells_cache,patch_cells,patch) + for iC_local in 1:length(current_patch_cells) + cell_faces = getindex!(patch_cells_faces_cache,patch_cell_faces,pcell) + on_boundary = getindex!(faces_on_boundary_cache,faces_on_boundary,pcell) + for (iF,face) in enumerate(cell_faces) + if (!on_boundary[iF] && faces_mask[face] && !haskey(touched,face)) + num_patch_faces += 1 + touched[face] = true + end + end + pcell += 1 + end + empty!(touched) + end + + patch_faces_data = zeros(Int64,num_patch_faces) + patch_faces_ptrs = zeros(Int64,num_patches+1) + return Gridap.Arrays.Table(patch_faces_data,patch_faces_ptrs) +end + +function _generate_patch_faces!(patch_faces,patch_cells,patch_cell_faces,faces_on_boundary,faces_mask) + num_patches = length(patch_cells) + patch_faces_data, patch_faces_ptrs = patch_faces.data, patch_faces.ptrs + + touched = Dict{Int,Bool}() + pcell = 1 + pface = 1 + patch_faces_ptrs[1] = 1 + patch_cells_cache = array_cache(patch_cells) + patch_cells_faces_cache = array_cache(patch_cell_faces) + faces_on_boundary_cache = array_cache(faces_on_boundary) + for patch in 1:num_patches + current_patch_cells = getindex!(patch_cells_cache,patch_cells,patch) + patch_faces_ptrs[patch+1] = patch_faces_ptrs[patch] + for _ in 1:length(current_patch_cells) + cell_faces = getindex!(patch_cells_faces_cache,patch_cell_faces,pcell) + on_boundary = getindex!(faces_on_boundary_cache,faces_on_boundary,pcell) + for (iF,face) in enumerate(cell_faces) + if (!on_boundary[iF] && faces_mask[face] && !haskey(touched,face)) + patch_faces_data[pface] = face + patch_faces_ptrs[patch+1] += 1 + touched[face] = true + pface += 1 + end + end + pcell += 1 + end + empty!(touched) + end + + return patch_faces +end + +# Face connectivity for the patches +# pfaces_to_pcells[pface] = [pcell1, pcell2, ...] +# This would be the Gridap equivalent to `get_faces(patch_topology,Df,Dc)`. +# The argument `patch_faces` allows to select only some pfaces (i.e boundary/skeleton/etc...). +function get_pfaces_to_pcells(PD::PatchDecomposition{Dc},Df::Integer,patch_faces) where Dc + model = PD.model + topo = get_grid_topology(model) + + faces_to_cells = Gridap.Geometry.get_faces(topo,Df,Dc) + pfaces_to_cells = lazy_map(Reindex(faces_to_cells),patch_faces.data) + patch_cells = Gridap.Arrays.Table(PD.patch_cells) + patch_cells_overlapped = PD.patch_cells_overlapped_mesh + + num_patches = length(patch_cells) + pf2pc_ptrs = Gridap.Adaptivity.counts_to_ptrs(map(length,pfaces_to_cells)) + pf2pc_data = zeros(Int64,pf2pc_ptrs[end]-1) + + patch_cells_cache = array_cache(patch_cells) + patch_cells_overlapped_cache = array_cache(patch_cells_overlapped) + pfaces_to_cells_cache = array_cache(pfaces_to_cells) + for patch in 1:num_patches + cells = getindex!(patch_cells_cache,patch_cells,patch) + cells_overlapped = getindex!(patch_cells_overlapped_cache,patch_cells_overlapped,patch) + for pface in patch_faces.ptrs[patch]:patch_faces.ptrs[patch+1]-1 + pface_to_cells = getindex!(pfaces_to_cells_cache,pfaces_to_cells,pface) + for (lid,cell) in enumerate(pface_to_cells) + lid_patch = findfirst(c->c==cell,cells) + pf2pc_data[pf2pc_ptrs[pface]+lid-1] = cells_overlapped[lid_patch] + end + end + end + + pfaces_to_pcells = Gridap.Arrays.Table(pf2pc_data,pf2pc_ptrs) + return pfaces_to_pcells +end diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index 9c6358f6..ebaa21eb 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -77,13 +77,20 @@ end Gridap.FESpaces.get_dof_value_type(a::PatchFESpace) = Gridap.FESpaces.get_dof_value_type(a.Vh) Gridap.FESpaces.get_free_dof_ids(a::PatchFESpace) = Base.OneTo(a.num_dofs) -Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace) = a.patch_cell_dofs_ids -Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::Triangulation) = a.patch_cell_dofs_ids Gridap.FESpaces.get_fe_basis(a::PatchFESpace) = get_fe_basis(a.Vh) Gridap.FESpaces.ConstraintStyle(::PatchFESpace) = Gridap.FESpaces.UnConstrained() Gridap.FESpaces.get_vector_type(a::PatchFESpace) = get_vector_type(a.Vh) Gridap.FESpaces.get_fe_dof_basis(a::PatchFESpace) = get_fe_dof_basis(a.Vh) +Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace) = a.patch_cell_dofs_ids +Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::Triangulation) = @notimplemented +Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::PatchTriangulation) = a.patch_cell_dofs_ids + +function Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,trian::Gridap.Geometry.TriangulationView) + cell_dof_ids = get_cell_dof_ids(a,trian.parent) + return lazy_map(Reindex(cell_dof_ids),trian.cell_to_parent_cell) +end + function Gridap.FESpaces.scatter_free_and_dirichlet_values(f::PatchFESpace,free_values,dirichlet_values) cell_vals = Gridap.Fields.PosNegReindex(free_values,dirichlet_values) return lazy_map(Broadcasting(cell_vals),f.patch_cell_dofs_ids) diff --git a/src/PatchBasedSmoothers/seq/PatchTriangulations.jl b/src/PatchBasedSmoothers/seq/PatchTriangulations.jl new file mode 100644 index 00000000..1c2e90f7 --- /dev/null +++ b/src/PatchBasedSmoothers/seq/PatchTriangulations.jl @@ -0,0 +1,102 @@ + +struct PatchTriangulation{Dc,Dp,A,B,C} <: Gridap.Geometry.Triangulation{Dc,Dp} + trian :: A + PD :: B + patch_faces :: C + + function PatchTriangulation(trian::Triangulation{Dc,Dp},PD::PatchDecomposition,patch_faces) where {Dc,Dp} + A = typeof(trian) + B = typeof(PD) + C = typeof(patch_faces) + new{Dc,Dp,A,B,C}(trian,PD,patch_faces) + end +end + +# Triangulation API + +function Geometry.get_background_model(t::PatchTriangulation) + get_background_model(t.trian) +end + +function Geometry.get_grid(t::PatchTriangulation) + get_grid(t.trian) +end + +function Geometry.get_glue(t::PatchTriangulation,::Val{d}) where d + get_glue(t.trian,Val(d)) +end + +function Geometry.get_facet_normal(trian::PatchTriangulation) + get_facet_normal(trian.trian) +end + +# Constructors + +function Gridap.Geometry.Triangulation(PD::PatchDecomposition) + patch_cells = Gridap.Arrays.Table(PD.patch_cells) + trian = view(Triangulation(PD.model),patch_cells.data) + return PatchTriangulation(trian,PD,patch_cells) +end + +function Gridap.Geometry.BoundaryTriangulation(PD::PatchDecomposition{Dc}) where Dc + Df = Dc -1 + model = PD.model + labeling = get_face_labeling(model) + + is_boundary = get_face_mask(labeling,["boundary"],Df) + patch_edges = get_patch_cell_faces(PD,1,is_boundary) + + Γ = BoundaryTriangulation(model) + glue = get_glue(Γ,Val(Df)) + mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) + patch_edges_data = lazy_map(Reindex(mface_to_tface),patch_edges.data) + + trian = view(Γ,patch_edges_data) + return PatchTriangulation(trian,PD,patch_edges) +end + +function Gridap.Geometry.SkeletonTriangulation(PD::PatchDecomposition{Dc}) where Dc + Df = Dc -1 + model = PD.model + labeling = get_face_labeling(model) + + is_interior = get_face_mask(labeling,["interior"],Df) + patch_edges = get_patch_cell_faces(PD,Df,is_interior) + + Λ = SkeletonTriangulation(model) + glue = get_glue(Λ,Val(Df)) + mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) + patch_edges_data = lazy_map(Reindex(mface_to_tface),patch_edges.data) + + trian = view(Λ,patch_edges_data) + return PatchTriangulation(trian,PD,patch_edges) +end + +# Integration + +function Gridap.Geometry.move_contributions(scell_to_val::AbstractArray,strian::PatchTriangulation) + return move_contributions(scell_to_val,strian,strian.PD) +end + +function Gridap.Geometry.move_contributions( + scell_to_val::AbstractArray, + strian::PatchTriangulation{Df}, + PD::PatchDecomposition{Dc}) where {Dc,Df} + + # If cell-wise triangulation, + if Df == Dc + return scell_to_val, strian + end + + # If not cell-wise, combine contributions in overlapped cells + patch_faces = strian.patch_faces + patch_faces_overlapped = Gridap.Arrays.Table(collect(1:length(patch_faces.data)),patch_faces.ptrs) + _scell_to_val = lazy_map(Geometry.CombineContributionsMap(scell_to_val),patch_faces_overlapped) + + touched_cells = findall(map(i->patch_faces.ptrs[i] != patch_faces.ptrs[i+1],1:length(patch_faces))) + touched_cell_to_val = lazy_map(Reindex(_scell_to_val),touched_cells) + cell_trian = Triangulation(PD) + touched_cell_trian = view(cell_trian,touched_cells) + + return touched_cell_to_val, touched_cell_trian +end diff --git a/test/seq/PatchBasedTesting.jl b/test/seq/PatchBasedTesting.jl index 7e8ca313..2da512f7 100644 --- a/test/seq/PatchBasedTesting.jl +++ b/test/seq/PatchBasedTesting.jl @@ -7,44 +7,13 @@ using Gridap.Arrays using Gridap.Helpers using Gridap.Geometry using Gridap.ReferenceFEs +using Gridap.FESpaces using GridapDistributed using FillArrays using GridapSolvers import GridapSolvers.PatchBasedSmoothers as PBS - -function Gridap.Geometry.SkeletonTriangulation(PD::PatchDecomposition) - model = PD.model - labeling = get_face_labeling(model) - topo = get_grid_topology(model) - - patch_cells = Gridap.Arrays.Table(PD.patch_cells) - - c2e_map = get_faces(topo,2,1) - patch_cells_edges = map(Reindex(c2e_map),patch_cells.data) - - is_boundary = get_face_mask(labeling,["boundary"],1) - interior_edges = zeros(Int64,length(is_boundary)) - count = 1 - for i in 1:length(is_boundary) - if !is_boundary[i] - interior_edges[i] = count - count += 1 - end - end - - edges_on_boundary = PD.patch_cells_faces_on_boundary[2] - _patch_edges = map((E,mask)->E[.!mask],patch_cells_edges,edges_on_boundary) - __patch_edges = map(E-> filter(e -> !is_boundary[e],E), _patch_edges) - patch_edges = Gridap.Arrays.Table(__patch_edges) - - patch_edges_data = lazy_map(Reindex(interior_edges),patch_edges.data) - - Λ = SkeletonTriangulation(model) - return view(Λ,patch_edges_data) -end - backend = SequentialBackend() ranks = (1,2) parts = get_part_ids(backend,ranks) @@ -64,8 +33,16 @@ Ph = PBS.PatchFESpace(model,reffe,conformity,PD,Vh) Ω = Triangulation(model) dΩ = Measure(Ω,2*order+1) -a(u,v) = ∫(v⋅u)*dΩ -l(v) = ∫(1*v)*dΩ +Λ = Skeleton(model) +dΛ = Measure(Λ,3) +Γ = Boundary(model) +dΓ = Measure(Γ,3) + +aΩ(u,v) = ∫(v⋅u)*dΩ +aΛ(u,v) = ∫(jump(v)⋅jump(u))*dΛ +aΓ(u,v) = ∫(v⋅u)*dΓ +a(u,v) = aΩ(u,v) + aΛ(u,v) + aΓ(u,v) +l(v) = ∫(1*v)*dΩ assembler = SparseMatrixAssembler(Vh,Vh) Ah = assemble_matrix(a,assembler,Vh,Vh) @@ -82,45 +59,59 @@ assembler_P = SparseMatrixAssembler(Ph,Ph) Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) fhp = assemble_vector(lp,assembler_P,Ph) -# Skeleton Triangulation +############################################################################################ +# Integration + +Dc = 2 +Df = Dc -1 +model = PD.model labeling = get_face_labeling(model) -topo = get_grid_topology(model) -patch_cells = Gridap.Arrays.Table(PD.patch_cells) +u = get_trial_fe_basis(Vh) +v = get_fe_basis(Vh) -c2e_map = get_faces(topo,2,1) -patch_cells_edges = map(Reindex(c2e_map),patch_cells.data) +patch_cells = PD.patch_cells + +# Boundary +is_boundary = get_face_mask(labeling,["boundary"],Df) +patch_faces = PBS.get_patch_faces(PD,1,is_boundary) +pfaces_to_pcells = PBS.get_pfaces_to_pcells(PD,Df,patch_faces) + +glue = get_glue(Γ,Val(Df)) +mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) +patch_faces_data = lazy_map(Reindex(mface_to_tface),patch_faces.data) + +contr = aΓ(u,v) +vecdata = first(contr.dict)[2] +patch_vecdata = lazy_map(Reindex(vecdata),patch_faces_data) -is_boundary = get_face_mask(labeling,["boundary"],1) -interior_edges = zeros(Int64,length(is_boundary)) -count = 1 -for i in 1:length(is_boundary) - if !is_boundary[i] - interior_edges[i] = count - count += 1 - end -end +cell_dof_ids = get_cell_dof_ids(Ph) +face_dof_ids = lazy_map(Reindex(cell_dof_ids),lazy_map(x->x[1],pfaces_to_pcells)) -edges_on_boundary = PD.patch_cells_faces_on_boundary[2] -_patch_edges = map((E,mask)->E[.!mask],patch_cells_edges,edges_on_boundary) -__patch_edges = map(E-> filter(e -> !is_boundary[e],E), _patch_edges) -patch_edges = Gridap.Arrays.Table(__patch_edges) +res = ([patch_vecdata],[face_dof_ids],[face_dof_ids]) +assemble_matrix(assembler_P,res) -patch_edges_data = lazy_map(Reindex(interior_edges),patch_edges.data) +# Interior +is_interior = get_face_mask(labeling,["interior"],Df) +patch_faces = PBS.get_patch_faces(PD,Df,is_interior) +pfaces_to_pcells = PBS.get_pfaces_to_pcells(PD,Df,patch_faces) -Λ = SkeletonTriangulation(model) -Λₚ = view(Λ,patch_edges_data) -dΛₚ = Measure(Λₚ,3) + +############################################################################################ β = 10 +aΩ(u,v) = ∫(v⋅u)*dΩₚ aΓ(u,v) = ∫(β⋅jump(v)⋅jump(u))*dΛₚ +ap(u,v) = aΩ(u,v) + aΓ(u,v) + +assembler_P = SparseMatrixAssembler(Ph,Ph) + v = get_fe_basis(Ph) u = get_trial_fe_basis(Ph) -cf = (β⋅jump(v)⋅jump(u)) -contr = aΓ(u,v) +contr = ap(u,v) + +cellmat,rows,cols = collect_cell_matrix(Ph,Ph,contr) -matdata_edges = first(contr.dict)[2] -patch_edges_overlapped = Gridap.Arrays.Table(collect(1:length(patch_edges.data)),patch_edges.ptrs) -matdata_cells = lazy_map(Geometry.CombineContributionsMap(matdata_edges),patch_edges_overlapped) +Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) From d495004af35813a615199960de52540bbdff44a1 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 10 May 2023 11:17:52 +1000 Subject: [PATCH 16/34] Saved progress on PatchTriangulations --- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 31 +++++++- .../seq/PatchTriangulations.jl | 75 +++++++++---------- test/seq/PatchBasedTesting.jl | 15 ++++ 3 files changed, 78 insertions(+), 43 deletions(-) diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index ebaa21eb..2605a8ab 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -82,20 +82,43 @@ Gridap.FESpaces.ConstraintStyle(::PatchFESpace) = Gridap.FESpaces.UnConstrai Gridap.FESpaces.get_vector_type(a::PatchFESpace) = get_vector_type(a.Vh) Gridap.FESpaces.get_fe_dof_basis(a::PatchFESpace) = get_fe_dof_basis(a.Vh) +# get_cell_dof_ids + Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace) = a.patch_cell_dofs_ids Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::Triangulation) = @notimplemented -Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::PatchTriangulation) = a.patch_cell_dofs_ids -function Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,trian::Gridap.Geometry.TriangulationView) - cell_dof_ids = get_cell_dof_ids(a,trian.parent) - return lazy_map(Reindex(cell_dof_ids),trian.cell_to_parent_cell) +function Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,trian::PatchTriangulation) + return get_cell_dof_ids(trian.trian,a,trian) +end + +function Gridap.FESpaces.get_cell_dof_ids(::Triangulation,a::PatchFESpace,trian::PatchTriangulation) + return a.patch_cell_dofs_ids +end + +function Gridap.FESpaces.get_cell_dof_ids(::BoundaryTriangulation,a::PatchFESpace,trian::PatchTriangulation) + cell_dof_ids = get_cell_dof_ids(a) + pfaces_to_pcells = trian.pfaces_to_pcells + return lazy_map(Reindex(cell_dof_ids),lazy_map(x->x[1],pfaces_to_pcells)) end +function Gridap.FESpaces.get_cell_dof_ids(::SkeletonTriangulation,a::PatchFESpace,trian::PatchTriangulation) + cell_dof_ids = get_cell_dof_ids(a) + pfaces_to_pcells = trian.pfaces_to_pcells + + plus = lazy_map(Reindex(cell_dof_ids),lazy_map(x->x[1],pfaces_to_pcells)) + minus = lazy_map(Reindex(cell_dof_ids),lazy_map(x->x[2],pfaces_to_pcells)) + return lazy_map(Gridap.Fields.BlockMap(2,[1,2]),plus,minus) +end + +# scatter dof values + function Gridap.FESpaces.scatter_free_and_dirichlet_values(f::PatchFESpace,free_values,dirichlet_values) cell_vals = Gridap.Fields.PosNegReindex(free_values,dirichlet_values) return lazy_map(Broadcasting(cell_vals),f.patch_cell_dofs_ids) end +# Construction of the patch cell dofs ids + function setup_cell_reffe(model::DiscreteModel,reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}; kwargs...) basis, reffe_args,reffe_kwargs = reffe cell_reffe = ReferenceFE(model,basis,reffe_args...;reffe_kwargs...) diff --git a/src/PatchBasedSmoothers/seq/PatchTriangulations.jl b/src/PatchBasedSmoothers/seq/PatchTriangulations.jl index 1c2e90f7..e73de1a0 100644 --- a/src/PatchBasedSmoothers/seq/PatchTriangulations.jl +++ b/src/PatchBasedSmoothers/seq/PatchTriangulations.jl @@ -1,14 +1,20 @@ -struct PatchTriangulation{Dc,Dp,A,B,C} <: Gridap.Geometry.Triangulation{Dc,Dp} - trian :: A - PD :: B - patch_faces :: C - - function PatchTriangulation(trian::Triangulation{Dc,Dp},PD::PatchDecomposition,patch_faces) where {Dc,Dp} +struct PatchTriangulation{Dc,Dp,A,B,C,D,E} <: Gridap.Geometry.Triangulation{Dc,Dp} + trian :: A + PD :: B + patch_faces :: C + pfaces_to_pcells :: D + mface_to_tface :: E + + function PatchTriangulation(trian::Triangulation{Dc,Dp}, + PD::PatchDecomposition, + patch_faces,pfaces_to_pcells,mface_to_tface) where {Dc,Dp} A = typeof(trian) B = typeof(PD) C = typeof(patch_faces) - new{Dc,Dp,A,B,C}(trian,PD,patch_faces) + D = typeof(pfaces_to_pcells) + E = typeof(mface_to_tface) + new{Dc,Dp,A,B,C,D,E}(trian,PD,patch_faces,pfaces_to_pcells,mface_to_tface) end end @@ -34,8 +40,8 @@ end function Gridap.Geometry.Triangulation(PD::PatchDecomposition) patch_cells = Gridap.Arrays.Table(PD.patch_cells) - trian = view(Triangulation(PD.model),patch_cells.data) - return PatchTriangulation(trian,PD,patch_cells) + trian = Triangulation(PD.model) + return PatchTriangulation(trian,PD,patch_cells,nothing,nothing) end function Gridap.Geometry.BoundaryTriangulation(PD::PatchDecomposition{Dc}) where Dc @@ -44,15 +50,14 @@ function Gridap.Geometry.BoundaryTriangulation(PD::PatchDecomposition{Dc}) where labeling = get_face_labeling(model) is_boundary = get_face_mask(labeling,["boundary"],Df) - patch_edges = get_patch_cell_faces(PD,1,is_boundary) + patch_faces = get_patch_faces(PD,Df,is_boundary) + pfaces_to_pcells = get_pfaces_to_pcells(PD,Df,patch_faces) Γ = BoundaryTriangulation(model) glue = get_glue(Γ,Val(Df)) mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) - patch_edges_data = lazy_map(Reindex(mface_to_tface),patch_edges.data) - trian = view(Γ,patch_edges_data) - return PatchTriangulation(trian,PD,patch_edges) + return PatchTriangulation(trian,PD,patch_faces,pfaces_to_pcells,mface_to_tface) end function Gridap.Geometry.SkeletonTriangulation(PD::PatchDecomposition{Dc}) where Dc @@ -61,42 +66,34 @@ function Gridap.Geometry.SkeletonTriangulation(PD::PatchDecomposition{Dc}) where labeling = get_face_labeling(model) is_interior = get_face_mask(labeling,["interior"],Df) - patch_edges = get_patch_cell_faces(PD,Df,is_interior) + patch_faces = get_patch_cell_faces(PD,Df,is_interior) + pfaces_to_pcells = get_pfaces_to_pcells(PD,Df,patch_faces) Λ = SkeletonTriangulation(model) glue = get_glue(Λ,Val(Df)) mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) - patch_edges_data = lazy_map(Reindex(mface_to_tface),patch_edges.data) - trian = view(Λ,patch_edges_data) - return PatchTriangulation(trian,PD,patch_edges) + return PatchTriangulation(trian,PD,patch_faces,pfaces_to_pcells,mface_to_tface) end -# Integration +# Move contributions function Gridap.Geometry.move_contributions(scell_to_val::AbstractArray,strian::PatchTriangulation) - return move_contributions(scell_to_val,strian,strian.PD) + return move_contributions(strian.trian,scell_to_val,strian) end -function Gridap.Geometry.move_contributions( - scell_to_val::AbstractArray, - strian::PatchTriangulation{Df}, - PD::PatchDecomposition{Dc}) where {Dc,Df} - - # If cell-wise triangulation, - if Df == Dc - return scell_to_val, strian - end - - # If not cell-wise, combine contributions in overlapped cells - patch_faces = strian.patch_faces - patch_faces_overlapped = Gridap.Arrays.Table(collect(1:length(patch_faces.data)),patch_faces.ptrs) - _scell_to_val = lazy_map(Geometry.CombineContributionsMap(scell_to_val),patch_faces_overlapped) - - touched_cells = findall(map(i->patch_faces.ptrs[i] != patch_faces.ptrs[i+1],1:length(patch_faces))) - touched_cell_to_val = lazy_map(Reindex(_scell_to_val),touched_cells) - cell_trian = Triangulation(PD) - touched_cell_trian = view(cell_trian,touched_cells) +function Gridap.Geometry.move_contributions(::Triangulation, + scell_to_val::AbstractArray, + strian::PatchTriangulation) + patch_cells = strian.patch_faces + return lazy_map(Reindex(scell_to_val),patch_cells.data) +end - return touched_cell_to_val, touched_cell_trian +function Gridap.Geometry.move_contributions(::Union{<:BoundaryTriangulation,<:SkeletonTriangulation}, + scell_to_val::AbstractArray, + strian::PatchTriangulation) + patch_faces = strian.patch_faces + mface_to_tface = strian.mface_to_tface + patch_faces_data = lazy_map(Reindex(mface_to_tface),patch_faces.data) + return lazy_map(Reindex(scell_to_val),patch_faces_data) end diff --git a/test/seq/PatchBasedTesting.jl b/test/seq/PatchBasedTesting.jl index 2da512f7..dfec80d0 100644 --- a/test/seq/PatchBasedTesting.jl +++ b/test/seq/PatchBasedTesting.jl @@ -96,6 +96,21 @@ is_interior = get_face_mask(labeling,["interior"],Df) patch_faces = PBS.get_patch_faces(PD,Df,is_interior) pfaces_to_pcells = PBS.get_pfaces_to_pcells(PD,Df,patch_faces) +glue = get_glue(Λ,Val(Df)) +mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) +patch_faces_data = lazy_map(Reindex(mface_to_tface),patch_faces.data) + +contr = aΛ(u,v) +vecdata = first(contr.dict)[2] +patch_vecdata = lazy_map(Reindex(vecdata),patch_faces_data) + +cell_dof_ids = get_cell_dof_ids(Ph) +plus = lazy_map(Reindex(cell_dof_ids),lazy_map(x->x[1],pfaces_to_pcells)) +minus = lazy_map(Reindex(cell_dof_ids),lazy_map(x->x[2],pfaces_to_pcells)) +face_dof_ids = lazy_map(Gridap.Fields.BlockMap(2,[1,2]),plus,minus) + +res = ([patch_vecdata],[face_dof_ids],[face_dof_ids]) +assemble_matrix(assembler_P,res) ############################################################################################ From b55f2d057276ad0cff76a360e2b00b85e19869ab Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 10 May 2023 11:52:33 +1000 Subject: [PATCH 17/34] Working version of PatchTriangulation --- .../seq/PatchTriangulations.jl | 16 ++-- test/seq/PatchBasedTesting.jl | 83 +++---------------- 2 files changed, 18 insertions(+), 81 deletions(-) diff --git a/src/PatchBasedSmoothers/seq/PatchTriangulations.jl b/src/PatchBasedSmoothers/seq/PatchTriangulations.jl index e73de1a0..b16abfd3 100644 --- a/src/PatchBasedSmoothers/seq/PatchTriangulations.jl +++ b/src/PatchBasedSmoothers/seq/PatchTriangulations.jl @@ -53,8 +53,8 @@ function Gridap.Geometry.BoundaryTriangulation(PD::PatchDecomposition{Dc}) where patch_faces = get_patch_faces(PD,Df,is_boundary) pfaces_to_pcells = get_pfaces_to_pcells(PD,Df,patch_faces) - Γ = BoundaryTriangulation(model) - glue = get_glue(Γ,Val(Df)) + trian = BoundaryTriangulation(model) + glue = get_glue(trian,Val(Df)) mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) return PatchTriangulation(trian,PD,patch_faces,pfaces_to_pcells,mface_to_tface) @@ -66,12 +66,12 @@ function Gridap.Geometry.SkeletonTriangulation(PD::PatchDecomposition{Dc}) where labeling = get_face_labeling(model) is_interior = get_face_mask(labeling,["interior"],Df) - patch_faces = get_patch_cell_faces(PD,Df,is_interior) + patch_faces = get_patch_faces(PD,Df,is_interior) pfaces_to_pcells = get_pfaces_to_pcells(PD,Df,patch_faces) - Λ = SkeletonTriangulation(model) - glue = get_glue(Λ,Val(Df)) - mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) + trian = SkeletonTriangulation(model) + glue = get_glue(trian,Val(Df)) + mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) return PatchTriangulation(trian,PD,patch_faces,pfaces_to_pcells,mface_to_tface) end @@ -86,7 +86,7 @@ function Gridap.Geometry.move_contributions(::Triangulation, scell_to_val::AbstractArray, strian::PatchTriangulation) patch_cells = strian.patch_faces - return lazy_map(Reindex(scell_to_val),patch_cells.data) + return lazy_map(Reindex(scell_to_val),patch_cells.data), strian end function Gridap.Geometry.move_contributions(::Union{<:BoundaryTriangulation,<:SkeletonTriangulation}, @@ -95,5 +95,5 @@ function Gridap.Geometry.move_contributions(::Union{<:BoundaryTriangulation,<:Sk patch_faces = strian.patch_faces mface_to_tface = strian.mface_to_tface patch_faces_data = lazy_map(Reindex(mface_to_tface),patch_faces.data) - return lazy_map(Reindex(scell_to_val),patch_faces_data) + return lazy_map(Reindex(scell_to_val),patch_faces_data), strian end diff --git a/test/seq/PatchBasedTesting.jl b/test/seq/PatchBasedTesting.jl index dfec80d0..042ce713 100644 --- a/test/seq/PatchBasedTesting.jl +++ b/test/seq/PatchBasedTesting.jl @@ -52,81 +52,18 @@ sol_h = solve(LUSolver(),Ah,fh) Ωₚ = Triangulation(PD) dΩₚ = Measure(Ωₚ,2*order+1) -ap(u,v) = ∫(v⋅u)*dΩₚ -lp(v) = ∫(1*v)*dΩₚ +Λₚ = SkeletonTriangulation(PD) +dΛₚ = Measure(Λₚ,3) +Γₚ = BoundaryTriangulation(PD) +dΓₚ = Measure(Γₚ,3) + +aΩp(u,v) = ∫(v⋅u)*dΩₚ +aΛp(u,v) = ∫(jump(v)⋅jump(u))*dΛₚ +aΓp(u,v) = ∫(v⋅u)*dΓₚ +ap(u,v) = aΩp(u,v) + aΛp(u,v) + aΓp(u,v) +lp(v) = ∫(1*v)*dΩₚ assembler_P = SparseMatrixAssembler(Ph,Ph) Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) fhp = assemble_vector(lp,assembler_P,Ph) -############################################################################################ -# Integration - -Dc = 2 -Df = Dc -1 -model = PD.model -labeling = get_face_labeling(model) - -u = get_trial_fe_basis(Vh) -v = get_fe_basis(Vh) - -patch_cells = PD.patch_cells - -# Boundary -is_boundary = get_face_mask(labeling,["boundary"],Df) -patch_faces = PBS.get_patch_faces(PD,1,is_boundary) -pfaces_to_pcells = PBS.get_pfaces_to_pcells(PD,Df,patch_faces) - -glue = get_glue(Γ,Val(Df)) -mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) -patch_faces_data = lazy_map(Reindex(mface_to_tface),patch_faces.data) - -contr = aΓ(u,v) -vecdata = first(contr.dict)[2] -patch_vecdata = lazy_map(Reindex(vecdata),patch_faces_data) - -cell_dof_ids = get_cell_dof_ids(Ph) -face_dof_ids = lazy_map(Reindex(cell_dof_ids),lazy_map(x->x[1],pfaces_to_pcells)) - -res = ([patch_vecdata],[face_dof_ids],[face_dof_ids]) -assemble_matrix(assembler_P,res) - -# Interior -is_interior = get_face_mask(labeling,["interior"],Df) -patch_faces = PBS.get_patch_faces(PD,Df,is_interior) -pfaces_to_pcells = PBS.get_pfaces_to_pcells(PD,Df,patch_faces) - -glue = get_glue(Λ,Val(Df)) -mface_to_tface = Gridap.Arrays.find_inverse_index_map(glue.tface_to_mface,num_faces(model,Df)) -patch_faces_data = lazy_map(Reindex(mface_to_tface),patch_faces.data) - -contr = aΛ(u,v) -vecdata = first(contr.dict)[2] -patch_vecdata = lazy_map(Reindex(vecdata),patch_faces_data) - -cell_dof_ids = get_cell_dof_ids(Ph) -plus = lazy_map(Reindex(cell_dof_ids),lazy_map(x->x[1],pfaces_to_pcells)) -minus = lazy_map(Reindex(cell_dof_ids),lazy_map(x->x[2],pfaces_to_pcells)) -face_dof_ids = lazy_map(Gridap.Fields.BlockMap(2,[1,2]),plus,minus) - -res = ([patch_vecdata],[face_dof_ids],[face_dof_ids]) -assemble_matrix(assembler_P,res) - -############################################################################################ - -β = 10 -aΩ(u,v) = ∫(v⋅u)*dΩₚ -aΓ(u,v) = ∫(β⋅jump(v)⋅jump(u))*dΛₚ - -ap(u,v) = aΩ(u,v) + aΓ(u,v) - -assembler_P = SparseMatrixAssembler(Ph,Ph) - -v = get_fe_basis(Ph) -u = get_trial_fe_basis(Ph) -contr = ap(u,v) - -cellmat,rows,cols = collect_cell_matrix(Ph,Ph,contr) - - -Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) From 009854fa4a71d36dacf6f54d375c1002a85f6bd4 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 10 May 2023 14:39:49 +1000 Subject: [PATCH 18/34] Fix tests --- test/seq/DistributedPatchFESpacesTests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index 7a0a4ac0..38564e84 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -27,7 +27,7 @@ reffe = ReferenceFE(lagrangian,Float64,order) #reffe = ReferenceFE(raviart_thomas,Float64,order) Vh = TestFESpace(model,reffe) PD = PBS.PatchDecomposition(model) -Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) +Ph = PBS.PatchFESpace(model,reffe,H1Conformity(),PD,Vh) # ---- Testing Prolongation and Injection ---- # From 91a7b6d027e0071ef5704b883586144242ecdbe0 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 10 May 2023 15:43:05 +1000 Subject: [PATCH 19/34] GMRES solver now stops at inner iterations if residual is low enough --- src/LinearSolvers/GMRESSolvers.jl | 24 ++++++++++++------------ test/seq/GMRESSolversTests.jl | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/LinearSolvers/GMRESSolvers.jl b/src/LinearSolvers/GMRESSolvers.jl index 7262b2da..de369920 100644 --- a/src/LinearSolvers/GMRESSolvers.jl +++ b/src/LinearSolvers/GMRESSolvers.jl @@ -1,9 +1,4 @@ -# Orthogonalization - - - - # GMRES Solver struct GMRESSolver <: Gridap.Algebra.LinearSolver m ::Int @@ -49,7 +44,7 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::GMRESNumericalSetup,b::Abst solver, A, Pl, caches = ns.solver, ns.A, ns.Pl_ns, ns.caches m, tol = solver.m, solver.tol w, V, Z, H, g, c, s = caches - println(" > Starting GMRES solve: ") + println(" > Starting GMRES solver: ") # Initial residual mul!(w,A,x); w .= b .- w @@ -63,7 +58,9 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::GMRESNumericalSetup,b::Abst # Arnoldi process fill!(g,0.0); g[1] = β V[1] .= w ./ β - for j in 1:m + j = 1 + while ( j < m+1 && β > tol ) + println(" > Inner iteration ", j," - Residual: ", β) # Arnoldi orthogonalization by Modified Gram-Schmidt solve!(Z[j],Pl,V[j]) mul!(w,A,Z[j]) @@ -86,23 +83,26 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::GMRESNumericalSetup,b::Abst H[j,j] = c[j]*H[j,j] + s[j]*H[j+1,j]; H[j+1,j] = 0.0 g[j+1] = -s[j]*g[j]; g[j] = c[j]*g[j] - β = abs(g[j+1]) + β = abs(g[j+1]) + j += 1 end + j = j-1 # Solve least squares problem Hy = g by backward substitution - for i in m:-1:1 - g[i] = (g[i] - dot(H[i,i+1:m],g[i+1:m])) / H[i,i] + for i in j:-1:1 + g[i] = (g[i] - dot(H[i,i+1:j],g[i+1:j])) / H[i,i] end # Update solution & residual - for i in 1:m + for i in 1:j x .+= g[i] .* Z[i] end mul!(w,A,x); w .= b .- w iter += 1 end - println(" > Iteration ", iter," - Residual: ", β) + println(" Exiting GMRES solver.") + println(" > Num Iter: ", iter-1," - Final residual: ", β) return x end diff --git a/test/seq/GMRESSolversTests.jl b/test/seq/GMRESSolversTests.jl index fa3e1579..23c06a54 100644 --- a/test/seq/GMRESSolversTests.jl +++ b/test/seq/GMRESSolversTests.jl @@ -29,7 +29,7 @@ function main(model) A, b = get_matrix(op), get_vector(op); Pl = JacobiLinearSolver() - solver = LinearSolvers.GMRESSolver(20,Pl,1.e-8) + solver = LinearSolvers.GMRESSolver(40,Pl,1.e-8) ns = numerical_setup(symbolic_setup(solver,A),A) x = LinearSolvers.allocate_col_vector(A) From e596a098e5badc4a309d6089718f12fcbf592820 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 11 May 2023 14:18:29 +1000 Subject: [PATCH 20/34] Fixed tests --- test/seq/SchurComplementSolversTests.jl | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/seq/SchurComplementSolversTests.jl b/test/seq/SchurComplementSolversTests.jl index 18e12f1a..a7e424a0 100644 --- a/test/seq/SchurComplementSolversTests.jl +++ b/test/seq/SchurComplementSolversTests.jl @@ -95,7 +95,7 @@ function main(model) psc_solver = SchurComplementSolver(A_ns,B,C,PS_ns); - gmres = GMRESSolver(20,psc_solver,1e-6) + gmres = GMRESSolver(20,psc_solver,1e-10) gmres_ns = numerical_setup(symbolic_setup(gmres,sysmat),sysmat) x = LinearSolvers.allocate_col_vector(sysmat) @@ -106,8 +106,10 @@ function main(model) err_u3 = l2_error(uh,u_ref,dΩ) err_p3 = l2_error(ph,p_ref,dΩ) - @test err_u3 ≈ err_u1 - @test err_p3 ≈ err_p1 + @test err_u1 < 1.e-4 + @test err_u3 < 1.e-4 + @test err_p1 < 1.e-4 + @test err_p3 < 1.e-4 end backend = SequentialBackend() @@ -115,7 +117,7 @@ ranks = (2,2) parts = get_part_ids(backend,ranks) D = 2 -n = 40 +n = 60 domain = Tuple(repeat([0,1],D)) partition = (n,n) From a44e722ec9108e91a12b5ca981f862c99cbfa31c Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 12 May 2023 13:16:31 +1000 Subject: [PATCH 21/34] Implemented BlockDiagonalSmoothers in parallel --- src/LinearSolvers/BlockDiagonalSmoothers.jl | 115 ++++++++++++++++--- src/LinearSolvers/SchurComplementSolvers.jl | 2 +- test/seq/BlockDiagonalSmoothersPETScTests.jl | 1 + test/seq/BlockDiagonalSmoothersTests.jl | 105 ++++++++++------- 4 files changed, 162 insertions(+), 61 deletions(-) diff --git a/src/LinearSolvers/BlockDiagonalSmoothers.jl b/src/LinearSolvers/BlockDiagonalSmoothers.jl index 3c5aea83..8f2b046e 100644 --- a/src/LinearSolvers/BlockDiagonalSmoothers.jl +++ b/src/LinearSolvers/BlockDiagonalSmoothers.jl @@ -5,9 +5,7 @@ struct BlockDiagonalSmoother{A,B,C} <: Gridap.Algebra.LinearSolver solvers :: C function BlockDiagonalSmoother(ranges,blocks,solvers) - num_blocks = length(ranges) - @check length(blocks) == num_blocks - @check length(solvers) == num_blocks + num_blocks = length(blocks) A = typeof(ranges) B = typeof(blocks) @@ -16,23 +14,27 @@ struct BlockDiagonalSmoother{A,B,C} <: Gridap.Algebra.LinearSolver end end +# Constructors + +function BlockDiagonalSmoother(blocks :: AbstractArray{<:AbstractMatrix}, + solvers:: AbstractArray{<:Gridap.Algebra.LinearSolver}) + ranges = compute_block_ranges(blocks...) + return BlockDiagonalSmoother(ranges,blocks,solvers) +end + function BlockDiagonalSmoother(biforms :: AbstractArray{<:Function}, trials :: AbstractArray{<:FESpace}, tests :: AbstractArray{<:FESpace}, solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}) - ranges = map(num_free_dofs,tests) blocks = compute_block_matrices(biforms,trials,tests) - return BlockDiagonalSmoother(ranges,blocks,solvers) + return BlockDiagonalSmoother(blocks,solvers) end function BlockDiagonalSmoother(biforms :: AbstractArray{<:Function}, - U :: MultiFieldFESpace, - V :: MultiFieldFESpace, + U :: Union{MultiFieldFESpace,GridapDistributed.DistributedMultiFieldFESpace}, + V :: Union{MultiFieldFESpace,GridapDistributed.DistributedMultiFieldFESpace}, solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}) - dof_ids = get_free_dof_ids(V) - ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) - blocks = compute_block_matrices(biforms,U.spaces,V.spaces) - return BlockDiagonalSmoother(ranges,blocks,solvers) + return BlockDiagonalSmoother(biforms,[U...],[V...],solvers) end function BlockDiagonalSmoother(A :: AbstractMatrix, @@ -43,11 +45,30 @@ function BlockDiagonalSmoother(A :: AbstractMatrix, return BlockDiagonalSmoother(ranges,blocks,solvers) end +# Computing blocks and ranges + +function compute_block_ranges(blocks::AbstractMatrix...) + num_blocks = length(blocks) + ranges = Vector{AbstractRange}(undef,num_blocks) + ranges[1] = 1:size(blocks[1],2) + for i in 2:num_blocks + ranges[i] = size(blocks[i-1],2) .+ (1:size(blocks[i],2)) + end + return ranges +end + +function compute_block_ranges(blocks::PSparseMatrix...) + _blocks = map(b -> b.owned_owned_values,blocks) + ranges = map_parts(_blocks...) do blocks... + compute_block_ranges(blocks...) + end + return ranges +end + function compute_block_matrices(biforms :: AbstractArray{<:Function}, trials :: AbstractArray{<:FESpace}, tests :: AbstractArray{<:FESpace}) @check length(biforms) == length(tests) == length(trials) - @check all(U -> isa(U,TrialFESpace),trials) blocks = map(assemble_matrix,biforms,tests,trials) return blocks @@ -64,6 +85,7 @@ function extract_diagonal_blocks(A::AbstractMatrix,ranges;lazy_mode=false) return blocks end +# Symbolic and numerical setup struct BlockDiagonalSmootherSS{A,B} <: Gridap.Algebra.SymbolicSetup solver :: A block_ss :: B @@ -74,22 +96,67 @@ function Gridap.Algebra.symbolic_setup(solver::BlockDiagonalSmoother,mat::Abstra return BlockDiagonalSmootherSS(solver,block_ss) end -struct BlockDiagonalSmootherNS{A,B} <: Gridap.Algebra.NumericalSetup +struct BlockDiagonalSmootherNS{A,B,C} <: Gridap.Algebra.NumericalSetup solver :: A block_ns :: B + caches :: C end function Gridap.Algebra.numerical_setup(ss::BlockDiagonalSmootherSS,mat::AbstractMatrix) solver = ss.solver block_ns = map(numerical_setup,ss.block_ss,solver.blocks) - return BlockDiagonalSmootherNS(solver,block_ns) + caches = _get_block_diagonal_smoothers_caches(solver.blocks) + return BlockDiagonalSmootherNS(solver,block_ns,caches) +end + +function _get_block_diagonal_smoothers_caches(blocks) + return nothing +end + +function _get_block_diagonal_smoothers_caches(blocks::AbstractArray{<:PSparseMatrix}) + x_blocks = map(bi->allocate_col_vector(bi),blocks) + b_blocks = map(bi->allocate_col_vector(bi),blocks) + return x_blocks,b_blocks +end + +# Solve + +function to_blocks!(x::AbstractVector,x_blocks,ranges) + map(ranges,x_blocks) do range,x_block + x_block .= x[range] + end + return x_blocks +end + +# TODO: The exchange could be optimized for sure by swapping the loop order... +function to_blocks!(x::PVector,x_blocks,ranges) + x_blocks_owned = map(xi->xi.owned_values,x_blocks) + map_parts(x.owned_values,ranges,x_blocks_owned...) do x,ranges,x_blocks... + to_blocks!(x,x_blocks,ranges) + end + map(exchange!,x_blocks) + return x_blocks +end + +function to_global!(x::AbstractVector,x_blocks,ranges) + map(ranges,x_blocks) do range,x_block + x[range] .= x_block + end + return x +end + +function to_global!(x::PVector,x_blocks,ranges) + x_blocks_owned = map(xi->xi.owned_values,x_blocks) + map_parts(x.owned_values,ranges,x_blocks_owned...) do x,ranges,x_blocks... + to_global!(x,x_blocks,ranges) + end + exchange!(x) + return x end -# TODO: Should we consider overlapping block smoothers? function Gridap.Algebra.solve!(x::AbstractVector,ns::BlockDiagonalSmootherNS,b::AbstractVector) solver, block_ns = ns.solver, ns.block_ns num_blocks, ranges = solver.num_blocks, solver.ranges - for iB in 1:num_blocks xi = view(x,ranges[iB]) bi = view(b,ranges[iB]) @@ -98,6 +165,22 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::BlockDiagonalSmootherNS,b:: return x end +function Gridap.Algebra.solve!(x::PVector,ns::BlockDiagonalSmootherNS,b::PVector) + solver, block_ns, caches = ns.solver, ns.block_ns, ns.caches + num_blocks, ranges = solver.num_blocks, solver.ranges + x_blocks, b_blocks = caches + + to_blocks!(x,x_blocks,ranges) + to_blocks!(b,b_blocks,ranges) + for iB in 1:num_blocks + xi = x_blocks[iB] + bi = b_blocks[iB] + solve!(xi,block_ns[iB],bi) + end + to_global!(x,x_blocks,ranges) + return x +end + function LinearAlgebra.ldiv!(x,ns::BlockDiagonalSmootherNS,b) solve!(x,ns,b) end \ No newline at end of file diff --git a/src/LinearSolvers/SchurComplementSolvers.jl b/src/LinearSolvers/SchurComplementSolvers.jl index d97f07d5..be9b63d6 100644 --- a/src/LinearSolvers/SchurComplementSolvers.jl +++ b/src/LinearSolvers/SchurComplementSolvers.jl @@ -65,7 +65,7 @@ end function Gridap.Algebra.numerical_setup(ss::SchurComplementSymbolicSetup,mat::AbstractMatrix) s = ss.solver B,C = s.B, s.C - ranges = get_block_ranges(B,C) + ranges = compute_block_ranges(C,B) caches = get_shur_complement_caches(B,C) return SchurComplementNumericalSetup(s,mat,ranges,caches) end diff --git a/test/seq/BlockDiagonalSmoothersPETScTests.jl b/test/seq/BlockDiagonalSmoothersPETScTests.jl index 44910010..8190672a 100644 --- a/test/seq/BlockDiagonalSmoothersPETScTests.jl +++ b/test/seq/BlockDiagonalSmoothersPETScTests.jl @@ -79,6 +79,7 @@ GridapPETSc.with() do x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) println("Error: ",norm(x-x_star)) + @test norm(x-x_star) < 1.0e-10 end end \ No newline at end of file diff --git a/test/seq/BlockDiagonalSmoothersTests.jl b/test/seq/BlockDiagonalSmoothersTests.jl index b76f02ab..d7dfd30f 100644 --- a/test/seq/BlockDiagonalSmoothersTests.jl +++ b/test/seq/BlockDiagonalSmoothersTests.jl @@ -1,11 +1,13 @@ module BlockDiagonalSmoothersTests +using Test using Gridap using Gridap.MultiField using BlockArrays using LinearAlgebra using FillArrays using IterativeSolvers +using PartitionedArrays using GridapSolvers @@ -15,65 +17,80 @@ f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) p(x) = x[1] + x[2] g(x) = -Δ(p)(x) -D = 2 -n = 10 -domain = Tuple(repeat([0,1],D)) -partition = (n,n) -model = CartesianDiscreteModel(domain,partition) +function main(model,single_proc::Bool) + order = 2 + reffeᵤ = ReferenceFE(lagrangian,VectorValue{D,Float64},order) + V = TestFESpace(model,reffeᵤ,conformity=:H1,dirichlet_tags=["boundary"]) + + reffeₚ = ReferenceFE(lagrangian,Float64,order) + Q = TestFESpace(model,reffeₚ,conformity=:H1,dirichlet_tags=["boundary"]) + + U = TrialFESpace(V,u) + P = TrialFESpace(Q,p) -order = 2 -reffeᵤ = ReferenceFE(lagrangian,VectorValue{D,Float64},order) -V = TestFESpace(model,reffeᵤ,conformity=:H1,dirichlet_tags=["boundary"]) + Y = MultiFieldFESpace([V, Q]) + X = MultiFieldFESpace([U, P]) -reffeₚ = ReferenceFE(lagrangian,Float64,order) -Q = TestFESpace(model,reffeₚ,conformity=:H1,dirichlet_tags=["boundary"]) + degree = 2*(order + 1) + Ω = Triangulation(model) + dΩ = Measure(Ω,degree) -U = TrialFESpace(V,u) -P = TrialFESpace(Q,p) -Y = MultiFieldFESpace([V, Q]) -X = MultiFieldFESpace([U, P]) + # Global problem + a((u,p),(v,q)) = ∫( v⊙u + ∇(v)⊙∇(u) + q⋅p + ∇(q)⊙∇(p))dΩ + l((v,q)) = ∫( v⋅f + q⋅g)dΩ -degree = 2*(order + 1) -Ω = Triangulation(model) -dΩ = Measure(Ω,degree) + op = AffineFEOperator(a,l,X,Y) + A,b = get_matrix(op), get_vector(op); + xh_star = solve(op) + x_star = get_free_dof_values(xh_star) + dof_ids = get_free_dof_ids(X) + ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) + solvers = Fill(BackslashSolver(),2) -# Global problem -a((u,p),(v,q)) = ∫( v⊙u + ∇(v)⊙∇(u) + q⋅p + ∇(q)⊙∇(p))dΩ -l((v,q)) = ∫( v⋅f + q⋅g)dΩ + # Build using the global matrix + if single_proc + BDS = BlockDiagonalSmoother(A,ranges,solvers) + BDSss = symbolic_setup(BDS,A) + BDSns = numerical_setup(BDSss,A) -op = AffineFEOperator(a,l,X,Y) -A,b = get_matrix(op), get_vector(op) -xh_star = solve(op) -x_star = get_free_dof_values(xh_star) + x = get_free_dof_values(zero(X)) + x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) -dof_ids = get_free_dof_ids(X) -ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) -solvers = Fill(BackslashSolver(),2) + @test norm(x-x_star) < 1.e-8 + end -# Build using the global matrix -BDS = BlockDiagonalSmoother(A,ranges,solvers) -BDSss = symbolic_setup(BDS,A) -BDSns = numerical_setup(BDSss,A) + # Build using local weakforms + a1(u,v) = ∫(v⊙u + ∇(v)⊙∇(u))dΩ + a2(p,q) = ∫(q⋅p + ∇(q)⊙∇(p))dΩ + biforms = [a1,a2] -x = get_free_dof_values(zero(X)) -x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) + BDS = BlockDiagonalSmoother(biforms,X,Y,solvers) + BDSss = symbolic_setup(BDS,A) + BDSns = numerical_setup(BDSss,A) -norm(x-x_star) + x = GridapSolvers.LinearSolvers.allocate_col_vector(A) + x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) -# Build using local weakforms -a1(u,v) = ∫(v⊙u + ∇(v)⊙∇(u))dΩ -a2(p,q) = ∫(q⋅p + ∇(q)⊙∇(p))dΩ -biforms = [a1,a2] + @test norm(x-x_star) < 1.e-8 +end -BDS = BlockDiagonalSmoother(biforms,X,Y,solvers) -BDSss = symbolic_setup(BDS,A) -BDSns = numerical_setup(BDSss,A) +backend = SequentialBackend() +ranks = (2,2) +parts = get_part_ids(backend,ranks) + +D = 2 +n = 10 +domain = Tuple(repeat([0,1],D)) +partition = (n,n) -x = get_free_dof_values(zero(X)) -x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) +# Serial +model = CartesianDiscreteModel(domain,partition) +main(model,true) -norm(x-x_star) +# Distributed, sequential +model = CartesianDiscreteModel(parts,domain,partition) +main(model,false) end \ No newline at end of file From 86d46ff6ae7c0ceeb7112a077cf6751fcd3189f5 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 12 May 2023 16:09:57 +1000 Subject: [PATCH 22/34] Fixed tests --- test/seq/BlockDiagonalSmoothersPETScTests.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/seq/BlockDiagonalSmoothersPETScTests.jl b/test/seq/BlockDiagonalSmoothersPETScTests.jl index 8190672a..bd25a70c 100644 --- a/test/seq/BlockDiagonalSmoothersPETScTests.jl +++ b/test/seq/BlockDiagonalSmoothersPETScTests.jl @@ -1,5 +1,6 @@ module BlockDiagonalSmoothersPETScTests +using Test using Gridap using Gridap.MultiField using BlockArrays From e55c1848c4f65cf0e6e4b77cac29a92ed2c486b8 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 22 Jun 2023 12:16:34 +1000 Subject: [PATCH 23/34] Added support for BlockVectors of PVectors --- src/LinearSolvers/Helpers.jl | 116 ++++++++++++++++++++++++++++++++++- 1 file changed, 113 insertions(+), 3 deletions(-) diff --git a/src/LinearSolvers/Helpers.jl b/src/LinearSolvers/Helpers.jl index 95089ca9..804d4f94 100644 --- a/src/LinearSolvers/Helpers.jl +++ b/src/LinearSolvers/Helpers.jl @@ -1,6 +1,5 @@ -# Row/Col vectors - +# Row/Col vector allocations for serial function allocate_row_vector(A::AbstractMatrix{T}) where T return zeros(T,size(A,1)) end @@ -9,7 +8,7 @@ function allocate_col_vector(A::AbstractMatrix{T}) where T return zeros(T,size(A,2)) end - +# Row/Col vector allocations for parallel function allocate_row_vector(A::PSparseMatrix) T = eltype(A) return PVector(zero(T),A.rows) @@ -20,4 +19,115 @@ function allocate_col_vector(A::PSparseMatrix) return PVector(zero(T),A.cols) end +# Block Row/Col vector allocations for serial +function allocate_row_vector(A::BlockMatrix{T}) where T + bsizes = blocksizes(A) + return mortar(map(s->zeros(T,s),bsizes[1])) +end + +function allocate_col_vector(A::BlockMatrix{T}) where T + bsizes = blocksizes(A) + return mortar(map(s->zeros(T,s),bsizes[2])) +end + +# BlockArrays of PVectors/PSparseMatrices + +const BlockPVector{T} = BlockVector{T,<:Vector{<:PVector{T}}} +const BlockPSparseMatrix{T,V} = BlockMatrix{T,<:Matrix{<:PSparseMatrix{V}}} + +# Block Row/Col vector allocations for parallel +function allocate_row_vector(A::BlockPSparseMatrix) + return mortar(map(Aii->PVector(0.0,Aii.rows),A.blocks[:,1])) +end + +function allocate_col_vector(A::BlockPSparseMatrix) + return mortar(map(Aii->PVector(1.0,Aii.cols),A.blocks[1,:])) +end + +# BlockVector algebra +function LinearAlgebra.mul!(y::BlockVector,A::BlockMatrix,x::BlockVector) + o = one(eltype(A)) + for i in blockaxes(A,2) + fill!(y[i],0.0) + for j in blockaxes(A,2) + mul!(y[i],A[i,j],x[j],o,o) + end + end +end + +function LinearAlgebra.dot(x::BlockPVector,y::BlockPVector) + return sum(map(dot,blocks(x),blocks(y))) +end + +function Base.zero(v::BlockPVector) + return mortar(map(zero,blocks(v))) +end + +function Base.similar(v::BlockPVector) + return mortar(map(similar,blocks(v))) +end + +function LinearAlgebra.norm(v::BlockPVector) + block_norms = map(norm,blocks(v)) + return sqrt(sum(block_norms.^2)) +end + +function Base.copyto!(y::BlockPVector,x::BlockPVector) + @check blocklength(x) == blocklength(y) + for i in blockaxes(x,1) + copyto!(y[i],x[i]) + end +end + +# BlockVector Broadcasting for PVectors + +struct BlockPBroadcasted{A,B} + blocks :: A + axes :: B +end + +BlockArrays.blocks(b::BlockPBroadcasted) = b.blocks +BlockArrays.blockaxes(b::BlockPBroadcasted) = b.axes + +function Base.broadcasted(f, args::Union{BlockPVector,BlockPBroadcasted}...) + a1 = first(args) + @boundscheck @assert all(ai -> blockaxes(ai) == blockaxes(a1),args) + + blocks_in = map(blocks,args) + blocks_out = map((largs...)->Base.broadcasted(f,largs...),blocks_in...) + + return BlockPBroadcasted(blocks_out,blockaxes(a1)) +end + +function Base.broadcasted(f, a::Number, b::Union{BlockPVector,BlockPBroadcasted}) + blocks_out = map(b->Base.broadcasted(f,a,b),blocks(b)) + return BlockPBroadcasted(blocks_out,blockaxes(b)) +end + +function Base.broadcasted(f, a::Union{BlockPVector,BlockPBroadcasted}, b::Number) + blocks_out = map(a->Base.broadcasted(f,a,b),blocks(a)) + return BlockPBroadcasted(blocks_out,blockaxes(a)) +end + +function Base.broadcasted(f, + a::Union{BlockPVector,BlockPBroadcasted}, + b::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}}) + Base.broadcasted(f,a,Base.materialize(b)) +end +function Base.broadcasted( + f, + a::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}}, + b::Union{BlockPVector,BlockPBroadcasted}) + Base.broadcasted(f,Base.materialize(a),b) +end + +function Base.materialize(b::BlockPBroadcasted) + blocks_out = map(Base.materialize,blocks(b)) + return mortar(blocks_out) +end + +function Base.materialize!(a::BlockPVector,b::BlockPBroadcasted) + map(Base.materialize!,blocks(a),blocks(b)) + return a +end From 1d9c50b6988016ad9910651d61d5c3c6fe8f4a44 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 22 Jun 2023 12:17:07 +1000 Subject: [PATCH 24/34] BlockDiagonalSmoothers now work with BlockVectors --- src/LinearSolvers/BlockDiagonalSmoothers.jl | 38 ++++++++++++++++++--- test/seq/BlockDiagonalSmoothersTests.jl | 17 +++++++++ 2 files changed, 50 insertions(+), 5 deletions(-) diff --git a/src/LinearSolvers/BlockDiagonalSmoothers.jl b/src/LinearSolvers/BlockDiagonalSmoothers.jl index 8f2b046e..e088e69e 100644 --- a/src/LinearSolvers/BlockDiagonalSmoothers.jl +++ b/src/LinearSolvers/BlockDiagonalSmoothers.jl @@ -16,8 +16,15 @@ end # Constructors -function BlockDiagonalSmoother(blocks :: AbstractArray{<:AbstractMatrix}, - solvers:: AbstractArray{<:Gridap.Algebra.LinearSolver}) +function BlockDiagonalSmoother(blocks :: AbstractArray{<:AbstractMatrix}, + solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}) + ranges = compute_block_ranges(blocks...) + return BlockDiagonalSmoother(ranges,blocks,solvers) +end + +function BlockDiagonalSmoother(block_mat :: BlockMatrix, + solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}) + blocks = [block_mat[Block(i,i)] for i in 1:length(solvers)] ranges = compute_block_ranges(blocks...) return BlockDiagonalSmoother(ranges,blocks,solvers) end @@ -105,20 +112,24 @@ end function Gridap.Algebra.numerical_setup(ss::BlockDiagonalSmootherSS,mat::AbstractMatrix) solver = ss.solver block_ns = map(numerical_setup,ss.block_ss,solver.blocks) - caches = _get_block_diagonal_smoothers_caches(solver.blocks) + caches = _get_block_diagonal_smoothers_caches(solver.blocks,mat) return BlockDiagonalSmootherNS(solver,block_ns,caches) end -function _get_block_diagonal_smoothers_caches(blocks) +function _get_block_diagonal_smoothers_caches(blocks,mat) return nothing end -function _get_block_diagonal_smoothers_caches(blocks::AbstractArray{<:PSparseMatrix}) +function _get_block_diagonal_smoothers_caches(blocks::AbstractArray{<:PSparseMatrix},mat::PSparseMatrix) x_blocks = map(bi->allocate_col_vector(bi),blocks) b_blocks = map(bi->allocate_col_vector(bi),blocks) return x_blocks,b_blocks end +function _get_block_diagonal_smoothers_caches(blocks::AbstractArray{<:PSparseMatrix},mat::BlockMatrix) + return nothing +end + # Solve function to_blocks!(x::AbstractVector,x_blocks,ranges) @@ -154,6 +165,7 @@ function to_global!(x::PVector,x_blocks,ranges) return x end +# Solve for serial vectors function Gridap.Algebra.solve!(x::AbstractVector,ns::BlockDiagonalSmootherNS,b::AbstractVector) solver, block_ns = ns.solver, ns.block_ns num_blocks, ranges = solver.num_blocks, solver.ranges @@ -165,6 +177,7 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::BlockDiagonalSmootherNS,b:: return x end +# Solve for PVectors (parallel) function Gridap.Algebra.solve!(x::PVector,ns::BlockDiagonalSmootherNS,b::PVector) solver, block_ns, caches = ns.solver, ns.block_ns, ns.caches num_blocks, ranges = solver.num_blocks, solver.ranges @@ -181,6 +194,21 @@ function Gridap.Algebra.solve!(x::PVector,ns::BlockDiagonalSmootherNS,b::PVector return x end +# Solve for BlockVectors (serial & parallel) +function Gridap.Algebra.solve!(x::BlockVector,ns::BlockDiagonalSmootherNS,b::BlockVector) + solver, block_ns = ns.solver, ns.block_ns + num_blocks = solver.num_blocks + + @check blocklength(x) == blocklength(b) == num_blocks + for iB in 1:num_blocks + xi = x[Block(iB)] + bi = b[Block(iB)] + solve!(xi,block_ns[iB],bi) + end + + return x +end + function LinearAlgebra.ldiv!(x,ns::BlockDiagonalSmootherNS,b) solve!(x,ns,b) end \ No newline at end of file diff --git a/test/seq/BlockDiagonalSmoothersTests.jl b/test/seq/BlockDiagonalSmoothersTests.jl index d7dfd30f..5be45313 100644 --- a/test/seq/BlockDiagonalSmoothersTests.jl +++ b/test/seq/BlockDiagonalSmoothersTests.jl @@ -74,6 +74,23 @@ function main(model,single_proc::Bool) x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) @test norm(x-x_star) < 1.e-8 + + # Build using BlockMatrixAssemblers + mfs = BlockMultiFieldStyle() + Yb = MultiFieldFESpace([V,Q];style=mfs) + Xb = MultiFieldFESpace([U,P];style=mfs) + + op_blocks = AffineFEOperator(a,l,Xb,Yb) + Ab,bb = get_matrix(op_blocks), get_vector(op_blocks); + + BDS = BlockDiagonalSmoother(Ab,solvers) + BDSss = symbolic_setup(BDS,A) + BDSns = numerical_setup(BDSss,A) + + xb = GridapSolvers.LinearSolvers.allocate_col_vector(Ab) + xb = cg!(xb,Ab,bb;verbose=true,Pl=BDSns,reltol=1.0e-12) + + @test norm(x-x_star) < 1.e-8 end backend = SequentialBackend() From eb640e9c116208326c13efbe093b9c5b15168f66 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 23 Jun 2023 16:33:50 +1000 Subject: [PATCH 25/34] GMRESSolver can now be updated --- src/LinearSolvers/GMRESSolvers.jl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/LinearSolvers/GMRESSolvers.jl b/src/LinearSolvers/GMRESSolvers.jl index de369920..c26080ce 100644 --- a/src/LinearSolvers/GMRESSolvers.jl +++ b/src/LinearSolvers/GMRESSolvers.jl @@ -14,7 +14,7 @@ function Gridap.Algebra.symbolic_setup(solver::GMRESSolver, A::AbstractMatrix) return GMRESSymbolicSetup(solver) end -struct GMRESNumericalSetup <: Gridap.Algebra.NumericalSetup +mutable struct GMRESNumericalSetup <: Gridap.Algebra.NumericalSetup solver A Pl_ns @@ -40,6 +40,11 @@ function Gridap.Algebra.numerical_setup(ss::GMRESSymbolicSetup, A::AbstractMatri return GMRESNumericalSetup(solver,A,Pl_ns,caches) end +function Gridap.Algebra.numerical_setup!(ns::GMRESNumericalSetup, A::AbstractMatrix) + numerical_setup!(ns.Pl_ns,A) + ns.A = A +end + function Gridap.Algebra.solve!(x::AbstractVector,ns::GMRESNumericalSetup,b::AbstractVector) solver, A, Pl, caches = ns.solver, ns.A, ns.Pl_ns, ns.caches m, tol = solver.m, solver.tol From 5a88a128031874d634da3caeef746577d4efb815 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 5 Jul 2023 19:49:05 +1000 Subject: [PATCH 26/34] Small modification of BlockPreconditioners tests --- test/seq/BlockDiagonalSmoothersTests.jl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/seq/BlockDiagonalSmoothersTests.jl b/test/seq/BlockDiagonalSmoothersTests.jl index 5be45313..b77131bb 100644 --- a/test/seq/BlockDiagonalSmoothersTests.jl +++ b/test/seq/BlockDiagonalSmoothersTests.jl @@ -9,6 +9,7 @@ using FillArrays using IterativeSolvers using PartitionedArrays +using GridapDistributed using GridapSolvers u(x) = VectorValue(x[1],x[2]) @@ -80,7 +81,12 @@ function main(model,single_proc::Bool) Yb = MultiFieldFESpace([V,Q];style=mfs) Xb = MultiFieldFESpace([U,P];style=mfs) - op_blocks = AffineFEOperator(a,l,Xb,Yb) + if single_proc + assem = SparseMatrixAssembler(Xb,Yb) + else + assem = SparseMatrixAssembler(Xb,Yb,FullyAssembledRows()) + end + op_blocks = AffineFEOperator(a,l,Xb,Yb,assem) Ab,bb = get_matrix(op_blocks), get_vector(op_blocks); BDS = BlockDiagonalSmoother(Ab,solvers) From 7bc8de6f3cf4a239f50f30a65bbc39fa4e838358 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 22 Aug 2023 10:44:52 +1000 Subject: [PATCH 27/34] Minor fix --- test/mpi/RichardsonSmoothersTests.jl | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl index 2fd20b44..d6eaf1d2 100644 --- a/test/mpi/RichardsonSmoothersTests.jl +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -10,16 +10,10 @@ using IterativeSolvers using GridapSolvers using GridapSolvers.LinearSolvers -<<<<<<< HEAD -function main(parts,partition) - domain = (0,1,0,1) - model = CartesianDiscreteModel(parts,domain,partition) -======= function main(parts,nranks,domain_partition) GridapP4est.with(parts) do domain = (0,1,0,1) model = CartesianDiscreteModel(parts,nranks,domain,domain_partition) ->>>>>>> acea3e200d6892357ac46b28c59e6f63ecc572da sol(x) = x[1] + x[2] f(x) = -Δ(sol)(x) From cfff7f4a2f536899e42d330ba70b8dfd555d5f9f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 22 Aug 2023 11:01:26 +1000 Subject: [PATCH 28/34] First round of test fixing --- src/LinearSolvers/JacobiLinearSolvers.jl | 2 +- src/LinearSolvers/SchurComplementSolvers.jl | 16 ++++++++-------- src/LinearSolvers/SymGaussSeidelSmoothers.jl | 18 +++++++++--------- test/mpi/SymGaussSeidelSmoothersTests.jl | 15 +++++++++------ test/seq/BlockDiagonalSmoothersTests.jl | 13 +++++++------ test/seq/GMRESSolversTests.jl | 13 +++++++------ test/seq/IterativeSolversTests.jl | 13 +++++++------ test/seq/SchurComplementSolversTests.jl | 13 +++++++------ test/seq/SymGaussSeidelSmoothersTests.jl | 13 +++++++------ 9 files changed, 62 insertions(+), 54 deletions(-) diff --git a/src/LinearSolvers/JacobiLinearSolvers.jl b/src/LinearSolvers/JacobiLinearSolvers.jl index 29319cce..5f412d99 100644 --- a/src/LinearSolvers/JacobiLinearSolvers.jl +++ b/src/LinearSolvers/JacobiLinearSolvers.jl @@ -22,7 +22,7 @@ function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::AbstractMa end function Gridap.Algebra.numerical_setup(ss::JacobiSymbolicSetup,A::PSparseMatrix) - inv_diag=map(own_values(A)) do a + inv_diag = map(own_values(A)) do a 1.0 ./ diag(a) end return JacobiNumericalSetup(inv_diag) diff --git a/src/LinearSolvers/SchurComplementSolvers.jl b/src/LinearSolvers/SchurComplementSolvers.jl index be9b63d6..195f354d 100644 --- a/src/LinearSolvers/SchurComplementSolvers.jl +++ b/src/LinearSolvers/SchurComplementSolvers.jl @@ -40,12 +40,12 @@ struct SchurComplementNumericalSetup <: Gridap.Algebra.NumericalSetup end function get_shur_complement_caches(B::AbstractMatrix,C::AbstractMatrix) - du1 = LinearSolvers.allocate_col_vector(C) - du2 = LinearSolvers.allocate_col_vector(C) - dp = LinearSolvers.allocate_col_vector(B) + du1 = allocate_col_vector(C) + du2 = allocate_col_vector(C) + dp = allocate_col_vector(B) - rv_u = LinearSolvers.allocate_row_vector(B) - rv_p = LinearSolvers.allocate_row_vector(C) + rv_u = allocate_row_vector(B) + rv_p = allocate_row_vector(C) return (du1,du2,dp,rv_u,rv_p) end @@ -81,8 +81,8 @@ function to_blocks!(x::PVector,u,p,ranges) map_parts(x.owned_values,u.owned_values,p.owned_values,ranges) do x,u,p,ranges to_blocks!(x,u,p,ranges) end - exchange!(u) - exchange!(p) + consistent!(u) |> fetch + consistent!(p) |> fetch return u,p end @@ -97,7 +97,7 @@ function to_global!(x::PVector,u,p,ranges) map_parts(x.owned_values,u.owned_values,p.owned_values,ranges) do x,u,p,ranges to_global!(x,u,p,ranges) end - exchange!(x) + consistent!(x) |> fetch return x end diff --git a/src/LinearSolvers/SymGaussSeidelSmoothers.jl b/src/LinearSolvers/SymGaussSeidelSmoothers.jl index 87194d1c..130d6c15 100644 --- a/src/LinearSolvers/SymGaussSeidelSmoothers.jl +++ b/src/LinearSolvers/SymGaussSeidelSmoothers.jl @@ -62,7 +62,7 @@ function forward_sub!(L::LowerTriangular{Tv,Ti,<:SparseMatrixCSC},x::AbstractVec # Substitute next values involving x[col] for i = idx + 1 : last[col] - x[A.rowval[i]] -= A.nzval[i] * x[col] + x[A.rowval[i]] -= A.nzval[i] * x[col] end end return x @@ -78,14 +78,14 @@ function backward_sub!(U::UpperTriangular{Tv,Ti,<:SparseMatrixCSC}, x::AbstractV A, diag = U.mat, U.diag.diag n = length(diag) for col = n : -1 : 1 - # Solve for diagonal element - idx = diag[col] - x[col] = x[col] / A.nzval[idx] - - # Substitute next values involving x[col] - for i = A.colptr[col] : idx - 1 - x[A.rowval[i]] -= A.nzval[i] * x[col] - end + # Solve for diagonal element + idx = diag[col] + x[col] = x[col] / A.nzval[idx] + + # Substitute next values involving x[col] + for i = A.colptr[col] : idx - 1 + x[A.rowval[i]] -= A.nzval[i] * x[col] + end end return x end diff --git a/test/mpi/SymGaussSeidelSmoothersTests.jl b/test/mpi/SymGaussSeidelSmoothersTests.jl index 2edc236d..86e10d1f 100644 --- a/test/mpi/SymGaussSeidelSmoothersTests.jl +++ b/test/mpi/SymGaussSeidelSmoothersTests.jl @@ -10,9 +10,9 @@ using IterativeSolvers using GridapSolvers using GridapSolvers.LinearSolvers -function main(parts,partition) +function main(parts,num_ranks,mesh_partition) domain = (0,1,0,1) - model = CartesianDiscreteModel(parts,domain,partition) + model = CartesianDiscreteModel(parts,num_ranks,domain,mesh_partition) sol(x) = x[1] + x[2] f(x) = -Δ(sol)(x) @@ -36,7 +36,7 @@ function main(parts,partition) ss = symbolic_setup(P,A) ns = numerical_setup(ss,A) - x = PVector(1.0,A.cols) + x = pfill(1.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-8, @@ -54,10 +54,13 @@ function main(parts,partition) @test E < 1.e-8 end -partition = (32,32) -ranks = (2,2) +mesh_partition = (32,32) +num_ranks = (2,2) +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end -with_backend(main,MPIBackend(),ranks,partition) +main(parts,num_ranks,mesh_partition) MPI.Finalize() end \ No newline at end of file diff --git a/test/seq/BlockDiagonalSmoothersTests.jl b/test/seq/BlockDiagonalSmoothersTests.jl index b77131bb..1b0638c6 100644 --- a/test/seq/BlockDiagonalSmoothersTests.jl +++ b/test/seq/BlockDiagonalSmoothersTests.jl @@ -99,21 +99,22 @@ function main(model,single_proc::Bool) @test norm(x-x_star) < 1.e-8 end -backend = SequentialBackend() -ranks = (2,2) -parts = get_part_ids(backend,ranks) +num_ranks = (2,2) +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end D = 2 n = 10 domain = Tuple(repeat([0,1],D)) -partition = (n,n) +mesh_partition = (n,n) # Serial -model = CartesianDiscreteModel(domain,partition) +model = CartesianDiscreteModel(domain,mesh_partition) main(model,true) # Distributed, sequential -model = CartesianDiscreteModel(parts,domain,partition) +model = CartesianDiscreteModel(parts,num_ranks,domain,mesh_partition) main(model,false) end \ No newline at end of file diff --git a/test/seq/GMRESSolversTests.jl b/test/seq/GMRESSolversTests.jl index 23c06a54..f8eee18b 100644 --- a/test/seq/GMRESSolversTests.jl +++ b/test/seq/GMRESSolversTests.jl @@ -43,17 +43,18 @@ function main(model) end # Completely serial -partition = (10,10) +mesh_partition = (10,10) domain = (0,1,0,1) -model = CartesianDiscreteModel(domain,partition) +model = CartesianDiscreteModel(domain,mesh_partition) @test main(model) # Sequential -backend = SequentialBackend() -ranks = (1,2) -parts = get_part_ids(backend,ranks) +num_ranks = (1,2) +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end -model = CartesianDiscreteModel(parts,domain,partition) +model = CartesianDiscreteModel(parts,num_ranks,domain,mesh_partition) @test main(model) end \ No newline at end of file diff --git a/test/seq/IterativeSolversTests.jl b/test/seq/IterativeSolversTests.jl index 46d94875..bdaf61a0 100644 --- a/test/seq/IterativeSolversTests.jl +++ b/test/seq/IterativeSolversTests.jl @@ -80,17 +80,18 @@ function main(model,is_distributed) end # Completely serial -partition = (8,8) +mesh_partition = (8,8) domain = (0,1,0,1) -model = CartesianDiscreteModel(domain,partition) +model = CartesianDiscreteModel(domain,mesh_partition) main(model,false) # Sequential -backend = SequentialBackend() -ranks = (1,2) -parts = get_part_ids(backend,ranks) +num_ranks = (1,2) +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end -model = CartesianDiscreteModel(parts,domain,partition) +model = CartesianDiscreteModel(parts,num_ranks,domain,mesh_partition) main(model,true) end \ No newline at end of file diff --git a/test/seq/SchurComplementSolversTests.jl b/test/seq/SchurComplementSolversTests.jl index a7e424a0..93c4763e 100644 --- a/test/seq/SchurComplementSolversTests.jl +++ b/test/seq/SchurComplementSolversTests.jl @@ -112,21 +112,22 @@ function main(model) @test err_p3 < 1.e-4 end -backend = SequentialBackend() -ranks = (2,2) -parts = get_part_ids(backend,ranks) +num_ranks = (2,2) +parts = with_debug() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end D = 2 n = 60 domain = Tuple(repeat([0,1],D)) -partition = (n,n) +mesh_partition = (n,n) # Serial -model = CartesianDiscreteModel(domain,partition) +model = CartesianDiscreteModel(domain,mesh_partition) main(model) # Distributed, sequential -model = CartesianDiscreteModel(parts,domain,partition) +model = CartesianDiscreteModel(parts,num_ranks,domain,mesh_partition) main(model) end \ No newline at end of file diff --git a/test/seq/SymGaussSeidelSmoothersTests.jl b/test/seq/SymGaussSeidelSmoothersTests.jl index e63aa740..203c2a03 100644 --- a/test/seq/SymGaussSeidelSmoothersTests.jl +++ b/test/seq/SymGaussSeidelSmoothersTests.jl @@ -48,17 +48,18 @@ function main(model) end # Completely serial -partition = (8,8) +mesh_partition = (8,8) domain = (0,1,0,1) -model = CartesianDiscreteModel(domain,partition) +model = CartesianDiscreteModel(domain,mesh_partition) @test main(model) # Sequential -backend = SequentialBackend() -ranks = (1,2) -parts = get_part_ids(backend,ranks) +num_ranks = (1,2) +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end -model = CartesianDiscreteModel(parts,domain,partition) +model = CartesianDiscreteModel(parts,num_ranks,domain,mesh_partition) @test main(model) end \ No newline at end of file From 7a91c5a2ceedc3a0e38010ff7e90524607bd1a24 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 22 Aug 2023 16:41:22 +1000 Subject: [PATCH 29/34] More fixes --- src/LinearSolvers/BlockDiagonalSmoothers.jl | 16 +++++++++------- src/LinearSolvers/SchurComplementSolvers.jl | 2 +- src/LinearSolvers/SymGaussSeidelSmoothers.jl | 16 +++++++--------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/LinearSolvers/BlockDiagonalSmoothers.jl b/src/LinearSolvers/BlockDiagonalSmoothers.jl index e088e69e..ace64723 100644 --- a/src/LinearSolvers/BlockDiagonalSmoothers.jl +++ b/src/LinearSolvers/BlockDiagonalSmoothers.jl @@ -65,7 +65,7 @@ function compute_block_ranges(blocks::AbstractMatrix...) end function compute_block_ranges(blocks::PSparseMatrix...) - _blocks = map(b -> b.owned_owned_values,blocks) + _blocks = map(b -> own_values(b),blocks) ranges = map_parts(_blocks...) do blocks... compute_block_ranges(blocks...) end @@ -141,11 +141,13 @@ end # TODO: The exchange could be optimized for sure by swapping the loop order... function to_blocks!(x::PVector,x_blocks,ranges) - x_blocks_owned = map(xi->xi.owned_values,x_blocks) - map_parts(x.owned_values,ranges,x_blocks_owned...) do x,ranges,x_blocks... + x_blocks_owned = map(xi->own_values(xi),x_blocks) + map_parts(own_values(x),ranges,x_blocks_owned...) do x,ranges,x_blocks... to_blocks!(x,x_blocks,ranges) end - map(exchange!,x_blocks) + map(x_blocks) do + consistent!(x) |> fetch + end return x_blocks end @@ -157,11 +159,11 @@ function to_global!(x::AbstractVector,x_blocks,ranges) end function to_global!(x::PVector,x_blocks,ranges) - x_blocks_owned = map(xi->xi.owned_values,x_blocks) - map_parts(x.owned_values,ranges,x_blocks_owned...) do x,ranges,x_blocks... + x_blocks_owned = map(xi->own_values(xi),x_blocks) + map_parts(own_values(x),ranges,x_blocks_owned...) do x,ranges,x_blocks... to_global!(x,x_blocks,ranges) end - exchange!(x) + consistent!(x) |> fetch return x end diff --git a/src/LinearSolvers/SchurComplementSolvers.jl b/src/LinearSolvers/SchurComplementSolvers.jl index 195f354d..f3ff1d74 100644 --- a/src/LinearSolvers/SchurComplementSolvers.jl +++ b/src/LinearSolvers/SchurComplementSolvers.jl @@ -78,7 +78,7 @@ function to_blocks!(x::AbstractVector,u,p,ranges) end function to_blocks!(x::PVector,u,p,ranges) - map_parts(x.owned_values,u.owned_values,p.owned_values,ranges) do x,u,p,ranges + map_parts(own_values(x),own_values(u),own_values(p),ranges) do x,u,p,ranges to_blocks!(x,u,p,ranges) end consistent!(u) |> fetch diff --git a/src/LinearSolvers/SymGaussSeidelSmoothers.jl b/src/LinearSolvers/SymGaussSeidelSmoothers.jl index 130d6c15..acfa98ed 100644 --- a/src/LinearSolvers/SymGaussSeidelSmoothers.jl +++ b/src/LinearSolvers/SymGaussSeidelSmoothers.jl @@ -68,7 +68,7 @@ function forward_sub!(L::LowerTriangular{Tv,Ti,<:SparseMatrixCSC},x::AbstractVec return x end -function forward_sub!(L::AbstractPData{<:LowerTriangular},x::PVector) +function forward_sub!(L::AbstractArray{<:LowerTriangular},x::PVector) map_parts(L,x.owned_values) do L, x forward_sub!(L, x) end @@ -90,7 +90,7 @@ function backward_sub!(U::UpperTriangular{Tv,Ti,<:SparseMatrixCSC}, x::AbstractV return x end -function backward_sub!(U::AbstractPData{<:UpperTriangular},x::PVector) +function backward_sub!(U::AbstractArray{<:UpperTriangular},x::PVector) map_parts(U,x.owned_values) do U, x backward_sub!(U, x) end @@ -125,9 +125,6 @@ function _gs_get_caches(A::AbstractMatrix) return dx, Adx end -_get_partition(A::PSparseMatrix,::Type{<:SparseMatrixCSC}) = A.cols.partition -_get_partition(A::PSparseMatrix,::Type{<:SparseMatrixCSR}) = A.rows.partition - function _gs_decompose_matrix(A::AbstractMatrix) idx_range = 1:minimum(size(A)) D = DiagonalIndices(A,idx_range) @@ -136,10 +133,11 @@ function _gs_decompose_matrix(A::AbstractMatrix) return L,U end -function _gs_decompose_matrix(A::PSparseMatrix{T,<:AbstractPData{MatType}}) where {T, MatType} - partition = _get_partition(A,MatType) - L,U = map_parts(A.values,partition) do A, partition - D = DiagonalIndices(A,partition.oid_to_lid) +function _gs_decompose_matrix(A::PSparseMatrix{T,<:AbstractArray{MatType}}) where {T, MatType} + values = partition(A) + indices = isa(PartitionedArrays.getany(values),SparseMatrixCSC) ? partition(axes(A,2)) : partition(axes(A,1)) + L,U = map_parts(values,indices) do A, indices + D = DiagonalIndices(A,own_to_local(indices)) L = LowerTriangular(A,D) U = UpperTriangular(A,D) return L,U From bb1fc7953f6e96c1ed236bed782a2103f610c0bb Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 22 Aug 2023 17:03:10 +1000 Subject: [PATCH 30/34] More fixes --- src/LinearSolvers/BlockDiagonalSmoothers.jl | 6 +++--- src/LinearSolvers/IterativeLinearSolvers.jl | 6 +++--- src/LinearSolvers/SchurComplementSolvers.jl | 6 +++--- src/LinearSolvers/SymGaussSeidelSmoothers.jl | 8 ++++---- test/seq/DistributedPatchFESpacesDebuggingTests.jl | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/LinearSolvers/BlockDiagonalSmoothers.jl b/src/LinearSolvers/BlockDiagonalSmoothers.jl index ace64723..55a96e2f 100644 --- a/src/LinearSolvers/BlockDiagonalSmoothers.jl +++ b/src/LinearSolvers/BlockDiagonalSmoothers.jl @@ -66,7 +66,7 @@ end function compute_block_ranges(blocks::PSparseMatrix...) _blocks = map(b -> own_values(b),blocks) - ranges = map_parts(_blocks...) do blocks... + ranges = map(_blocks...) do blocks... compute_block_ranges(blocks...) end return ranges @@ -142,7 +142,7 @@ end # TODO: The exchange could be optimized for sure by swapping the loop order... function to_blocks!(x::PVector,x_blocks,ranges) x_blocks_owned = map(xi->own_values(xi),x_blocks) - map_parts(own_values(x),ranges,x_blocks_owned...) do x,ranges,x_blocks... + map(own_values(x),ranges,x_blocks_owned...) do x,ranges,x_blocks... to_blocks!(x,x_blocks,ranges) end map(x_blocks) do @@ -160,7 +160,7 @@ end function to_global!(x::PVector,x_blocks,ranges) x_blocks_owned = map(xi->own_values(xi),x_blocks) - map_parts(own_values(x),ranges,x_blocks_owned...) do x,ranges,x_blocks... + map(own_values(x),ranges,x_blocks_owned...) do x,ranges,x_blocks... to_global!(x,x_blocks,ranges) end consistent!(x) |> fetch diff --git a/src/LinearSolvers/IterativeLinearSolvers.jl b/src/LinearSolvers/IterativeLinearSolvers.jl index 552dfa9f..49d5ff22 100644 --- a/src/LinearSolvers/IterativeLinearSolvers.jl +++ b/src/LinearSolvers/IterativeLinearSolvers.jl @@ -98,7 +98,7 @@ function IterativeSolvers.ssor_iterable(x::PVector, b::PVector, ω::Real; maxiter::Int = 10) - iterables = map_parts(x.owned_values,A.owned_owned_values,b.owned_values) do _xi,_Aii,_bi + iterables = map(own_values(x),own_values(A),own_values(b)) do _xi,_Aii,_bi xi = Vector(_xi) Aii = SparseMatrixCSC(_Aii) bi = Vector(_bi) @@ -168,13 +168,13 @@ function Gridap.Algebra.solve!(::SSORIterativeSolverType, ns::IterativeLinearSolverNS, y::PVector) iterables = ns.caches - map_parts(iterables,x.owned_values,y.owned_values) do iterable, xi, yi + map(iterables,own_values(x),own_values(y)) do iterable, xi, yi iterable.x .= xi iterable.b .= yi for item = iterable end xi .= iterable.x yi .= iterable.b end - exchange!(x) + consistent!(x) |> fetch return x end diff --git a/src/LinearSolvers/SchurComplementSolvers.jl b/src/LinearSolvers/SchurComplementSolvers.jl index f3ff1d74..67170d9c 100644 --- a/src/LinearSolvers/SchurComplementSolvers.jl +++ b/src/LinearSolvers/SchurComplementSolvers.jl @@ -56,7 +56,7 @@ function get_block_ranges(B::AbstractMatrix,C::AbstractMatrix) end function get_block_ranges(B::PSparseMatrix,C::PSparseMatrix) - ranges = map_parts(B.owned_owned_values,C.owned_owned_values) do B,C + ranges = map(own_values(B),own_values(C)) do B,C get_block_ranges(B,C) end return ranges @@ -78,7 +78,7 @@ function to_blocks!(x::AbstractVector,u,p,ranges) end function to_blocks!(x::PVector,u,p,ranges) - map_parts(own_values(x),own_values(u),own_values(p),ranges) do x,u,p,ranges + map(own_values(x),own_values(u),own_values(p),ranges) do x,u,p,ranges to_blocks!(x,u,p,ranges) end consistent!(u) |> fetch @@ -94,7 +94,7 @@ function to_global!(x::AbstractVector,u,p,ranges) end function to_global!(x::PVector,u,p,ranges) - map_parts(x.owned_values,u.owned_values,p.owned_values,ranges) do x,u,p,ranges + map(owned_values(x),owned_values(u),owned_values(p),ranges) do x,u,p,ranges to_global!(x,u,p,ranges) end consistent!(x) |> fetch diff --git a/src/LinearSolvers/SymGaussSeidelSmoothers.jl b/src/LinearSolvers/SymGaussSeidelSmoothers.jl index acfa98ed..841eac96 100644 --- a/src/LinearSolvers/SymGaussSeidelSmoothers.jl +++ b/src/LinearSolvers/SymGaussSeidelSmoothers.jl @@ -69,7 +69,7 @@ function forward_sub!(L::LowerTriangular{Tv,Ti,<:SparseMatrixCSC},x::AbstractVec end function forward_sub!(L::AbstractArray{<:LowerTriangular},x::PVector) - map_parts(L,x.owned_values) do L, x + map(L,own_values(x)) do L, x forward_sub!(L, x) end end @@ -91,7 +91,7 @@ function backward_sub!(U::UpperTriangular{Tv,Ti,<:SparseMatrixCSC}, x::AbstractV end function backward_sub!(U::AbstractArray{<:UpperTriangular},x::PVector) - map_parts(U,x.owned_values) do U, x + map(U,own_values(x)) do U, x backward_sub!(U, x) end end @@ -136,12 +136,12 @@ end function _gs_decompose_matrix(A::PSparseMatrix{T,<:AbstractArray{MatType}}) where {T, MatType} values = partition(A) indices = isa(PartitionedArrays.getany(values),SparseMatrixCSC) ? partition(axes(A,2)) : partition(axes(A,1)) - L,U = map_parts(values,indices) do A, indices + L,U = map(values,indices) do A, indices D = DiagonalIndices(A,own_to_local(indices)) L = LowerTriangular(A,D) U = UpperTriangular(A,D) return L,U - end + end |> tuple_of_arrays return L,U end diff --git a/test/seq/DistributedPatchFESpacesDebuggingTests.jl b/test/seq/DistributedPatchFESpacesDebuggingTests.jl index bea13993..a88f390b 100644 --- a/test/seq/DistributedPatchFESpacesDebuggingTests.jl +++ b/test/seq/DistributedPatchFESpacesDebuggingTests.jl @@ -207,7 +207,7 @@ for key in keys(res_single) val_m = res_multi[key] println(">>> ", key) - map(val_s.values) do v + map(partition(val_s)) do v println(transpose(v)) end; map(own_values(val_m)) do v From 2dd5815cd756e7aa736341cfb0f22f311f46fd80 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 22 Aug 2023 17:46:11 +1000 Subject: [PATCH 31/34] All MPI tests run --- test/mpi/RichardsonSmoothersTests.jl | 52 +++++++++++++++------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl index d6eaf1d2..16074250 100644 --- a/test/mpi/RichardsonSmoothersTests.jl +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -6,6 +6,7 @@ using Gridap using GridapDistributed using PartitionedArrays using IterativeSolvers +using GridapP4est using GridapSolvers using GridapSolvers.LinearSolvers @@ -15,27 +16,27 @@ function main(parts,nranks,domain_partition) domain = (0,1,0,1) model = CartesianDiscreteModel(parts,nranks,domain,domain_partition) - sol(x) = x[1] + x[2] - f(x) = -Δ(sol)(x) + sol(x) = x[1] + x[2] + f(x) = -Δ(sol)(x) - order = 1 - qorder = order*2 + 1 - reffe = ReferenceFE(lagrangian,Float64,order) - Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") - Uh = TrialFESpace(Vh,sol) - u = interpolate(sol,Uh) + order = 1 + qorder = order*2 + 1 + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(Vh,sol) + u = interpolate(sol,Uh) - Ω = Triangulation(model) - dΩ = Measure(Ω,qorder) - a(u,v) = ∫(∇(v)⋅∇(u))*dΩ - l(v) = ∫(v⋅f)*dΩ + Ω = Triangulation(model) + dΩ = Measure(Ω,qorder) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(v⋅f)*dΩ - op = AffineFEOperator(a,l,Uh,Vh) - A, b = get_matrix(op), get_vector(op) + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) - P = RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0) - ss = symbolic_setup(P,A) - ns = numerical_setup(ss,A) + P = RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0) + ss = symbolic_setup(P,A) + ns = numerical_setup(ss,A) x = pfill(1.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; @@ -44,15 +45,16 @@ function main(parts,nranks,domain_partition) Pl=ns, log=true) - u = interpolate(sol,Uh) - uh = FEFunction(Uh,x) - eh = uh - u - E = sum(∫(eh*eh)*dΩ) - if i_am_main(parts) - println("L2 Error: ", E) + u = interpolate(sol,Uh) + uh = FEFunction(Uh,x) + eh = uh - u + E = sum(∫(eh*eh)*dΩ) + if i_am_main(parts) + println("L2 Error: ", E) + end + + @test E < 1.e-8 end - - @test E < 1.e-8 end domain_partition = (32,32) From d1d3664b110d09934dbc93dd8416c615de607da4 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 22 Aug 2023 18:01:02 +1000 Subject: [PATCH 32/34] Sequential tests run --- src/LinearSolvers/BlockDiagonalSmoothers.jl | 2 +- src/LinearSolvers/SchurComplementSolvers.jl | 2 +- test/seq/BlockDiagonalSmoothersTests.jl | 2 +- test/seq/GMRESSolversTests.jl | 2 +- test/seq/IterativeSolversTests.jl | 2 +- test/seq/PatchBasedTesting.jl | 11 ++++++----- test/seq/SymGaussSeidelSmoothersTests.jl | 2 +- 7 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/LinearSolvers/BlockDiagonalSmoothers.jl b/src/LinearSolvers/BlockDiagonalSmoothers.jl index 55a96e2f..e9125bd5 100644 --- a/src/LinearSolvers/BlockDiagonalSmoothers.jl +++ b/src/LinearSolvers/BlockDiagonalSmoothers.jl @@ -145,7 +145,7 @@ function to_blocks!(x::PVector,x_blocks,ranges) map(own_values(x),ranges,x_blocks_owned...) do x,ranges,x_blocks... to_blocks!(x,x_blocks,ranges) end - map(x_blocks) do + map(x_blocks) do x consistent!(x) |> fetch end return x_blocks diff --git a/src/LinearSolvers/SchurComplementSolvers.jl b/src/LinearSolvers/SchurComplementSolvers.jl index 67170d9c..ebfde134 100644 --- a/src/LinearSolvers/SchurComplementSolvers.jl +++ b/src/LinearSolvers/SchurComplementSolvers.jl @@ -94,7 +94,7 @@ function to_global!(x::AbstractVector,u,p,ranges) end function to_global!(x::PVector,u,p,ranges) - map(owned_values(x),owned_values(u),owned_values(p),ranges) do x,u,p,ranges + map(own_values(x),own_values(u),own_values(p),ranges) do x,u,p,ranges to_global!(x,u,p,ranges) end consistent!(x) |> fetch diff --git a/test/seq/BlockDiagonalSmoothersTests.jl b/test/seq/BlockDiagonalSmoothersTests.jl index 1b0638c6..6b8fd511 100644 --- a/test/seq/BlockDiagonalSmoothersTests.jl +++ b/test/seq/BlockDiagonalSmoothersTests.jl @@ -100,7 +100,7 @@ function main(model,single_proc::Bool) end num_ranks = (2,2) -parts = with_mpi() do distribute +parts = with_debug() do distribute distribute(LinearIndices((prod(num_ranks),))) end diff --git a/test/seq/GMRESSolversTests.jl b/test/seq/GMRESSolversTests.jl index f8eee18b..146d5e17 100644 --- a/test/seq/GMRESSolversTests.jl +++ b/test/seq/GMRESSolversTests.jl @@ -50,7 +50,7 @@ model = CartesianDiscreteModel(domain,mesh_partition) # Sequential num_ranks = (1,2) -parts = with_mpi() do distribute +parts = with_debug() do distribute distribute(LinearIndices((prod(num_ranks),))) end diff --git a/test/seq/IterativeSolversTests.jl b/test/seq/IterativeSolversTests.jl index bdaf61a0..6177429f 100644 --- a/test/seq/IterativeSolversTests.jl +++ b/test/seq/IterativeSolversTests.jl @@ -87,7 +87,7 @@ main(model,false) # Sequential num_ranks = (1,2) -parts = with_mpi() do distribute +parts = with_debug() do distribute distribute(LinearIndices((prod(num_ranks),))) end diff --git a/test/seq/PatchBasedTesting.jl b/test/seq/PatchBasedTesting.jl index 042ce713..d4653a3a 100644 --- a/test/seq/PatchBasedTesting.jl +++ b/test/seq/PatchBasedTesting.jl @@ -14,13 +14,14 @@ using FillArrays using GridapSolvers import GridapSolvers.PatchBasedSmoothers as PBS -backend = SequentialBackend() -ranks = (1,2) -parts = get_part_ids(backend,ranks) +num_ranks = (1,2) +parts = with_debug() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end domain = (0.0,1.0,0.0,1.0) -partition = (2,4) -model = CartesianDiscreteModel(domain,partition) +mesh_partition = (2,4) +model = CartesianDiscreteModel(domain,mesh_partition) order = 1; reffe = ReferenceFE(lagrangian,Float64,order;space=:P); conformity = L2Conformity(); #order = 1; reffe = ReferenceFE(lagrangian,Float64,order); conformity = H1Conformity(); diff --git a/test/seq/SymGaussSeidelSmoothersTests.jl b/test/seq/SymGaussSeidelSmoothersTests.jl index 203c2a03..e29a7864 100644 --- a/test/seq/SymGaussSeidelSmoothersTests.jl +++ b/test/seq/SymGaussSeidelSmoothersTests.jl @@ -55,7 +55,7 @@ model = CartesianDiscreteModel(domain,mesh_partition) # Sequential num_ranks = (1,2) -parts = with_mpi() do distribute +parts = with_debug() do distribute distribute(LinearIndices((prod(num_ranks),))) end From 21296d34ab6cea9ef0dffa48703596269fcba448 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 30 Aug 2023 10:35:15 +1000 Subject: [PATCH 33/34] BlockDiagonalSmoothers now requires block assembly --- src/LinearSolvers/BlockDiagonalSmoothers.jl | 180 +++---------------- src/LinearSolvers/Helpers.jl | 97 +--------- src/LinearSolvers/SchurComplementSolvers.jl | 93 +++------- test/seq/BlockDiagonalSmoothersPETScTests.jl | 86 --------- test/seq/BlockDiagonalSmoothersTests.jl | 113 +++++++----- test/seq/SchurComplementSolversTests.jl | 29 +-- 6 files changed, 124 insertions(+), 474 deletions(-) delete mode 100644 test/seq/BlockDiagonalSmoothersPETScTests.jl diff --git a/src/LinearSolvers/BlockDiagonalSmoothers.jl b/src/LinearSolvers/BlockDiagonalSmoothers.jl index e9125bd5..8e34511e 100644 --- a/src/LinearSolvers/BlockDiagonalSmoothers.jl +++ b/src/LinearSolvers/BlockDiagonalSmoothers.jl @@ -1,40 +1,29 @@ -struct BlockDiagonalSmoother{A,B,C} <: Gridap.Algebra.LinearSolver - num_blocks :: Int32 - ranges :: A - blocks :: B - solvers :: C - - function BlockDiagonalSmoother(ranges,blocks,solvers) - num_blocks = length(blocks) - - A = typeof(ranges) - B = typeof(blocks) - C = typeof(solvers) - return new{A,B,C}(num_blocks,ranges,blocks,solvers) +struct BlockDiagonalSmoother{A,B} <: Gridap.Algebra.LinearSolver + blocks :: A + solvers :: B + function BlockDiagonalSmoother(blocks ::AbstractArray{<:AbstractMatrix}, + solvers::AbstractArray{<:Gridap.Algebra.LinearSolver}) + @check length(blocks) == length(solvers) + A = typeof(blocks) + B = typeof(solvers) + return new{A,B}(blocks,solvers) end end # Constructors -function BlockDiagonalSmoother(blocks :: AbstractArray{<:AbstractMatrix}, - solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}) - ranges = compute_block_ranges(blocks...) - return BlockDiagonalSmoother(ranges,blocks,solvers) -end - -function BlockDiagonalSmoother(block_mat :: BlockMatrix, +function BlockDiagonalSmoother(block_mat :: AbstractBlockMatrix, solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}) - blocks = [block_mat[Block(i,i)] for i in 1:length(solvers)] - ranges = compute_block_ranges(blocks...) - return BlockDiagonalSmoother(ranges,blocks,solvers) + mat_blocks = diag(blocks(block_mat)) + return BlockDiagonalSmoother(mat_blocks,solvers) end function BlockDiagonalSmoother(biforms :: AbstractArray{<:Function}, trials :: AbstractArray{<:FESpace}, tests :: AbstractArray{<:FESpace}, solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}) - blocks = compute_block_matrices(biforms,trials,tests) - return BlockDiagonalSmoother(blocks,solvers) + mat_blocks = compute_block_matrices(biforms,trials,tests) + return BlockDiagonalSmoother(mat_blocks,solvers) end function BlockDiagonalSmoother(biforms :: AbstractArray{<:Function}, @@ -44,52 +33,12 @@ function BlockDiagonalSmoother(biforms :: AbstractArray{<:Function}, return BlockDiagonalSmoother(biforms,[U...],[V...],solvers) end -function BlockDiagonalSmoother(A :: AbstractMatrix, - ranges :: AbstractArray{<:AbstractRange}, - solvers :: AbstractArray{<:Gridap.Algebra.LinearSolver}; - lazy_mode=false) - blocks = extract_diagonal_blocks(A,ranges;lazy_mode=lazy_mode) - return BlockDiagonalSmoother(ranges,blocks,solvers) -end - -# Computing blocks and ranges - -function compute_block_ranges(blocks::AbstractMatrix...) - num_blocks = length(blocks) - ranges = Vector{AbstractRange}(undef,num_blocks) - ranges[1] = 1:size(blocks[1],2) - for i in 2:num_blocks - ranges[i] = size(blocks[i-1],2) .+ (1:size(blocks[i],2)) - end - return ranges -end - -function compute_block_ranges(blocks::PSparseMatrix...) - _blocks = map(b -> own_values(b),blocks) - ranges = map(_blocks...) do blocks... - compute_block_ranges(blocks...) - end - return ranges -end - function compute_block_matrices(biforms :: AbstractArray{<:Function}, trials :: AbstractArray{<:FESpace}, tests :: AbstractArray{<:FESpace}) @check length(biforms) == length(tests) == length(trials) - - blocks = map(assemble_matrix,biforms,tests,trials) - return blocks -end - -function extract_diagonal_blocks(A::AbstractMatrix,ranges;lazy_mode=false) - blocks = map(ranges) do range - if lazy_mode - view(A,range,range) - else - A[range,range] - end - end - return blocks + mat_blocks = map(assemble_matrix,biforms,tests,trials) + return mat_blocks end # Symbolic and numerical setup @@ -103,111 +52,26 @@ function Gridap.Algebra.symbolic_setup(solver::BlockDiagonalSmoother,mat::Abstra return BlockDiagonalSmootherSS(solver,block_ss) end -struct BlockDiagonalSmootherNS{A,B,C} <: Gridap.Algebra.NumericalSetup +struct BlockDiagonalSmootherNS{A,B} <: Gridap.Algebra.NumericalSetup solver :: A block_ns :: B - caches :: C end function Gridap.Algebra.numerical_setup(ss::BlockDiagonalSmootherSS,mat::AbstractMatrix) solver = ss.solver block_ns = map(numerical_setup,ss.block_ss,solver.blocks) - caches = _get_block_diagonal_smoothers_caches(solver.blocks,mat) - return BlockDiagonalSmootherNS(solver,block_ns,caches) -end - -function _get_block_diagonal_smoothers_caches(blocks,mat) - return nothing -end - -function _get_block_diagonal_smoothers_caches(blocks::AbstractArray{<:PSparseMatrix},mat::PSparseMatrix) - x_blocks = map(bi->allocate_col_vector(bi),blocks) - b_blocks = map(bi->allocate_col_vector(bi),blocks) - return x_blocks,b_blocks -end - -function _get_block_diagonal_smoothers_caches(blocks::AbstractArray{<:PSparseMatrix},mat::BlockMatrix) - return nothing + return BlockDiagonalSmootherNS(solver,block_ns) end # Solve -function to_blocks!(x::AbstractVector,x_blocks,ranges) - map(ranges,x_blocks) do range,x_block - x_block .= x[range] - end - return x_blocks -end - -# TODO: The exchange could be optimized for sure by swapping the loop order... -function to_blocks!(x::PVector,x_blocks,ranges) - x_blocks_owned = map(xi->own_values(xi),x_blocks) - map(own_values(x),ranges,x_blocks_owned...) do x,ranges,x_blocks... - to_blocks!(x,x_blocks,ranges) - end - map(x_blocks) do x - consistent!(x) |> fetch - end - return x_blocks -end - -function to_global!(x::AbstractVector,x_blocks,ranges) - map(ranges,x_blocks) do range,x_block - x[range] .= x_block - end - return x -end - -function to_global!(x::PVector,x_blocks,ranges) - x_blocks_owned = map(xi->own_values(xi),x_blocks) - map(own_values(x),ranges,x_blocks_owned...) do x,ranges,x_blocks... - to_global!(x,x_blocks,ranges) - end - consistent!(x) |> fetch - return x -end - -# Solve for serial vectors -function Gridap.Algebra.solve!(x::AbstractVector,ns::BlockDiagonalSmootherNS,b::AbstractVector) - solver, block_ns = ns.solver, ns.block_ns - num_blocks, ranges = solver.num_blocks, solver.ranges - for iB in 1:num_blocks - xi = view(x,ranges[iB]) - bi = view(b,ranges[iB]) - solve!(xi,block_ns[iB],bi) - end - return x -end - -# Solve for PVectors (parallel) -function Gridap.Algebra.solve!(x::PVector,ns::BlockDiagonalSmootherNS,b::PVector) - solver, block_ns, caches = ns.solver, ns.block_ns, ns.caches - num_blocks, ranges = solver.num_blocks, solver.ranges - x_blocks, b_blocks = caches - - to_blocks!(x,x_blocks,ranges) - to_blocks!(b,b_blocks,ranges) - for iB in 1:num_blocks - xi = x_blocks[iB] - bi = b_blocks[iB] - solve!(xi,block_ns[iB],bi) - end - to_global!(x,x_blocks,ranges) - return x -end - -# Solve for BlockVectors (serial & parallel) -function Gridap.Algebra.solve!(x::BlockVector,ns::BlockDiagonalSmootherNS,b::BlockVector) - solver, block_ns = ns.solver, ns.block_ns - num_blocks = solver.num_blocks - - @check blocklength(x) == blocklength(b) == num_blocks - for iB in 1:num_blocks +function Gridap.Algebra.solve!(x::AbstractBlockVector,ns::BlockDiagonalSmootherNS,b::AbstractBlockVector) + @check blocklength(x) == blocklength(b) == length(ns.block_ns) + for (iB,bns) in enumerate(ns.block_ns) xi = x[Block(iB)] bi = b[Block(iB)] - solve!(xi,block_ns[iB],bi) + solve!(xi,bns,bi) end - return x end diff --git a/src/LinearSolvers/Helpers.jl b/src/LinearSolvers/Helpers.jl index 7b48005e..013ee4f9 100644 --- a/src/LinearSolvers/Helpers.jl +++ b/src/LinearSolvers/Helpers.jl @@ -20,103 +20,10 @@ function allocate_col_vector(A::PSparseMatrix) end # Row/Col vector allocations for blocks -function allocate_row_vector(A::BlockMatrix) +function allocate_row_vector(A::AbstractBlockMatrix) return mortar(map(Aii->allocate_row_vector(Aii),blocks(A)[:,1])) end -function allocate_col_vector(A::BlockMatrix) +function allocate_col_vector(A::AbstractBlockMatrix) return mortar(map(Aii->allocate_col_vector(Aii),blocks(A)[1,:])) end - -# BlockArrays of PVectors/PSparseMatrices - -const BlockPVector{T} = BlockVector{T,<:Vector{<:PVector{T}}} -const BlockPSparseMatrix{T,V} = BlockMatrix{T,<:Matrix{<:PSparseMatrix{V}}} - -# BlockVector algebra -function LinearAlgebra.mul!(y::BlockVector,A::BlockMatrix,x::BlockVector) - o = one(eltype(A)) - for i in blockaxes(A,2) - fill!(y[i],0.0) - for j in blockaxes(A,2) - mul!(y[i],A[i,j],x[j],o,o) - end - end -end - -function LinearAlgebra.dot(x::BlockPVector,y::BlockPVector) - return sum(map(dot,blocks(x),blocks(y))) -end - -function Base.zero(v::BlockPVector) - return mortar(map(zero,blocks(v))) -end - -function Base.similar(v::BlockPVector) - return mortar(map(similar,blocks(v))) -end - -function LinearAlgebra.norm(v::BlockPVector) - block_norms = map(norm,blocks(v)) - return sqrt(sum(block_norms.^2)) -end - -function Base.copyto!(y::BlockPVector,x::BlockPVector) - @check blocklength(x) == blocklength(y) - for i in blockaxes(x,1) - copyto!(y[i],x[i]) - end -end - -# BlockVector Broadcasting for PVectors - -struct BlockPBroadcasted{A,B} - blocks :: A - axes :: B -end - -BlockArrays.blocks(b::BlockPBroadcasted) = b.blocks -BlockArrays.blockaxes(b::BlockPBroadcasted) = b.axes - -function Base.broadcasted(f, args::Union{BlockPVector,BlockPBroadcasted}...) - a1 = first(args) - @boundscheck @assert all(ai -> blockaxes(ai) == blockaxes(a1),args) - - blocks_in = map(blocks,args) - blocks_out = map((largs...)->Base.broadcasted(f,largs...),blocks_in...) - - return BlockPBroadcasted(blocks_out,blockaxes(a1)) -end - -function Base.broadcasted(f, a::Number, b::Union{BlockPVector,BlockPBroadcasted}) - blocks_out = map(b->Base.broadcasted(f,a,b),blocks(b)) - return BlockPBroadcasted(blocks_out,blockaxes(b)) -end - -function Base.broadcasted(f, a::Union{BlockPVector,BlockPBroadcasted}, b::Number) - blocks_out = map(a->Base.broadcasted(f,a,b),blocks(a)) - return BlockPBroadcasted(blocks_out,blockaxes(a)) -end - -function Base.broadcasted(f, - a::Union{BlockPVector,BlockPBroadcasted}, - b::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}}) - Base.broadcasted(f,a,Base.materialize(b)) -end - -function Base.broadcasted( - f, - a::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}}, - b::Union{BlockPVector,BlockPBroadcasted}) - Base.broadcasted(f,Base.materialize(a),b) -end - -function Base.materialize(b::BlockPBroadcasted) - blocks_out = map(Base.materialize,blocks(b)) - return mortar(blocks_out) -end - -function Base.materialize!(a::BlockPVector,b::BlockPBroadcasted) - map(Base.materialize!,blocks(a),blocks(b)) - return a -end diff --git a/src/LinearSolvers/SchurComplementSolvers.jl b/src/LinearSolvers/SchurComplementSolvers.jl index ebfde134..d6ae9e5f 100644 --- a/src/LinearSolvers/SchurComplementSolvers.jl +++ b/src/LinearSolvers/SchurComplementSolvers.jl @@ -32,93 +32,42 @@ function Gridap.Algebra.symbolic_setup(s::SchurComplementSolver,A::AbstractMatri SchurComplementSymbolicSetup(s) end -struct SchurComplementNumericalSetup <: Gridap.Algebra.NumericalSetup - solver - mat - ranges - caches +struct SchurComplementNumericalSetup{A,B,C} <: Gridap.Algebra.NumericalSetup + solver::A + mat ::B + caches::C end function get_shur_complement_caches(B::AbstractMatrix,C::AbstractMatrix) - du1 = allocate_col_vector(C) - du2 = allocate_col_vector(C) - dp = allocate_col_vector(B) - - rv_u = allocate_row_vector(B) - rv_p = allocate_row_vector(C) - return (du1,du2,dp,rv_u,rv_p) -end - -function get_block_ranges(B::AbstractMatrix,C::AbstractMatrix) - u_range = 1:size(C,2) - p_range = size(C,2) .+ (1:size(B,2)) - return u_range, p_range -end - -function get_block_ranges(B::PSparseMatrix,C::PSparseMatrix) - ranges = map(own_values(B),own_values(C)) do B,C - get_block_ranges(B,C) - end - return ranges + du = allocate_col_vector(C) + bu = allocate_col_vector(C) + bp = allocate_col_vector(B) + return du,bu,bp end function Gridap.Algebra.numerical_setup(ss::SchurComplementSymbolicSetup,mat::AbstractMatrix) s = ss.solver - B,C = s.B, s.C - ranges = compute_block_ranges(C,B) - caches = get_shur_complement_caches(B,C) - return SchurComplementNumericalSetup(s,mat,ranges,caches) -end - -function to_blocks!(x::AbstractVector,u,p,ranges) - u_range, p_range = ranges - u .= x[u_range] - p .= x[p_range] - return u,p -end - -function to_blocks!(x::PVector,u,p,ranges) - map(own_values(x),own_values(u),own_values(p),ranges) do x,u,p,ranges - to_blocks!(x,u,p,ranges) - end - consistent!(u) |> fetch - consistent!(p) |> fetch - return u,p -end - -function to_global!(x::AbstractVector,u,p,ranges) - u_range, p_range = ranges - x[u_range] .= u - x[p_range] .= p - return x -end - -function to_global!(x::PVector,u,p,ranges) - map(own_values(x),own_values(u),own_values(p),ranges) do x,u,p,ranges - to_global!(x,u,p,ranges) - end - consistent!(x) |> fetch - return x + caches = get_shur_complement_caches(s.B,s.C) + return SchurComplementNumericalSetup(s,mat,caches) end -function Gridap.Algebra.solve!(x::AbstractVector,ns::SchurComplementNumericalSetup,y::AbstractVector) +function Gridap.Algebra.solve!(x::AbstractBlockVector,ns::SchurComplementNumericalSetup,y::AbstractBlockVector) s = ns.solver A,B,C,S = s.A,s.B,s.C,s.S - du1,du2,dp,rv_u,rv_p = ns.caches + du,bu,bp = ns.caches - # Split y into blocks - to_blocks!(y,rv_u,rv_p,ns.ranges) + @check blocklength(x) == blocklength(y) == 2 + y_u = y[Block(1)]; y_p = y[Block(2)] + x_u = x[Block(1)]; x_p = x[Block(2)] # Solve Schur complement - solve!(du1,A,rv_u) # du1 = A^-1 y_u - mul!(rv_p,C,du1,1.0,-1.0) # b1 = C*du1 - y_p - solve!(dp,S,rv_p) # dp = S^-1 b1 - mul!(rv_u,B,dp) # b2 = B*dp - solve!(du2,A,rv_u) # du2 = A^-1 b2 - du1 .-= du2 # du = du1 - du2 + solve!(x_u,A,y_u) # x_u = A^-1 y_u + copy!(bp,y_p); mul!(bp,C,du,1.0,-1.0) # bp = C*(A^-1 y_u) - y_p + solve!(x_p,S,bp) # x_p = S^-1 bp - # Assemble into global - to_global!(x,du1,dp,ns.ranges) + mul!(bu,B,x_p) # bu = B*x_p + solve!(du,A,bu) # du = A^-1 bu + x_u .-= du # x_u = x_u - du return x end diff --git a/test/seq/BlockDiagonalSmoothersPETScTests.jl b/test/seq/BlockDiagonalSmoothersPETScTests.jl deleted file mode 100644 index bd25a70c..00000000 --- a/test/seq/BlockDiagonalSmoothersPETScTests.jl +++ /dev/null @@ -1,86 +0,0 @@ -module BlockDiagonalSmoothersPETScTests - -using Test -using Gridap -using Gridap.MultiField -using BlockArrays -using LinearAlgebra -using FillArrays -using IterativeSolvers - -using GridapPETSc - -using GridapSolvers - -function set_ksp_options(ksp) - pc = Ref{GridapPETSc.PETSC.PC}() - mumpsmat = Ref{GridapPETSc.PETSC.Mat}() - @check_error_code GridapPETSc.PETSC.KSPView(ksp[],C_NULL) - @check_error_code GridapPETSc.PETSC.KSPSetType(ksp[],GridapPETSc.PETSC.KSPPREONLY) - @check_error_code GridapPETSc.PETSC.KSPGetPC(ksp[],pc) - @check_error_code GridapPETSc.PETSC.PCSetType(pc[],GridapPETSc.PETSC.PCLU) - @check_error_code GridapPETSc.PETSC.PCFactorSetMatSolverType(pc[],GridapPETSc.PETSC.MATSOLVERMUMPS) - @check_error_code GridapPETSc.PETSC.PCFactorSetUpMatSolverType(pc[]) - @check_error_code GridapPETSc.PETSC.PCFactorGetMatrix(pc[],mumpsmat) - @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 4, 1) - # percentage increase in the estimated working space - @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 14, 1000) - @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2) - @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2) - @check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6) -end - -u(x) = VectorValue(x[1],x[2]) -f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) - -p(x) = x[1] + x[2] -g(x) = -Δ(p)(x) - -GridapPETSc.with() do - D = 2 - n = 10 - domain = Tuple(repeat([0,1],D)) - partition = (n,n) - model = CartesianDiscreteModel(domain,partition) - - order = 2 - reffeᵤ = ReferenceFE(lagrangian,VectorValue{D,Float64},order) - V = TestFESpace(model,reffeᵤ,conformity=:H1,dirichlet_tags=["boundary"]) - - reffeₚ = ReferenceFE(lagrangian,Float64,order) - Q = TestFESpace(model,reffeₚ,conformity=:H1,dirichlet_tags=["boundary"]) - - U = TrialFESpace(V,u) - P = TrialFESpace(Q,p) - - Y = MultiFieldFESpace([V, Q]) - X = MultiFieldFESpace([U, P]) - - degree = 2*(order + 1) - Ω = Triangulation(model) - dΩ = Measure(Ω,degree) - - a((u,p),(v,q)) = ∫( v⊙u + ∇(v)⊙∇(u) + q⋅p + ∇(q)⊙∇(p))dΩ - l((v,q)) = ∫( v⋅f + q⋅g)dΩ - - op = AffineFEOperator(a,l,X,Y) - A,b = get_matrix(op), get_vector(op) - xh_star = solve(op) - x_star = get_free_dof_values(xh_star) - - dof_ids = get_free_dof_ids(X) - ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) - solvers = Fill(PETScLinearSolver(set_ksp_options),2) - - BDS = BlockDiagonalSmoother(A,ranges,solvers;lazy_mode=true) - BDSss = symbolic_setup(BDS,A) - BDSns = numerical_setup(BDSss,A) - - x = get_free_dof_values(zero(X)) - x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) - - println("Error: ",norm(x-x_star)) - @test norm(x-x_star) < 1.0e-10 -end - -end \ No newline at end of file diff --git a/test/seq/BlockDiagonalSmoothersTests.jl b/test/seq/BlockDiagonalSmoothersTests.jl index 6b8fd511..a8a98869 100644 --- a/test/seq/BlockDiagonalSmoothersTests.jl +++ b/test/seq/BlockDiagonalSmoothersTests.jl @@ -11,6 +11,9 @@ using PartitionedArrays using GridapDistributed using GridapSolvers +using GridapPETSc + +using GridapDistributed: BlockPVector, BlockPMatrix u(x) = VectorValue(x[1],x[2]) f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) @@ -18,7 +21,57 @@ f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) p(x) = x[1] + x[2] g(x) = -Δ(p)(x) -function main(model,single_proc::Bool) +function set_ksp_options(ksp) + pc = Ref{GridapPETSc.PETSC.PC}() + mumpsmat = Ref{GridapPETSc.PETSC.Mat}() + @check_error_code GridapPETSc.PETSC.KSPView(ksp[],C_NULL) + @check_error_code GridapPETSc.PETSC.KSPSetType(ksp[],GridapPETSc.PETSC.KSPPREONLY) + @check_error_code GridapPETSc.PETSC.KSPGetPC(ksp[],pc) + @check_error_code GridapPETSc.PETSC.PCSetType(pc[],GridapPETSc.PETSC.PCLU) + @check_error_code GridapPETSc.PETSC.PCFactorSetMatSolverType(pc[],GridapPETSc.PETSC.MATSOLVERMUMPS) + @check_error_code GridapPETSc.PETSC.PCFactorSetUpMatSolverType(pc[]) + @check_error_code GridapPETSc.PETSC.PCFactorGetMatrix(pc[],mumpsmat) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 4, 1) + # percentage increase in the estimated working space + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 14, 1000) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2) + @check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6) +end + +function _is_same_vector(x1,x2,X1,X2) + res = true + for i in 1:length(X1) + x1i = restrict_to_field(X1,x1,i) + x2i = restrict_to_field(X2,x2,i) + res = res & (norm(x1i-x2i) < 1.e-5) + end + return res +end + +function is_same_vector(x1::BlockVector,x2,X1,X2) + _is_same_vector(x1,x2,X1,X2) +end + +function is_same_vector(x1::BlockPVector,x2,X1,X2) + _x1 = GridapDistributed.change_ghost(x1,X1.gids;make_consistent=true) + _x2 = GridapDistributed.change_ghost(x2,X2.gids;make_consistent=true) + _is_same_vector(_x1,_x2,X1,X2) +end + +function main(model,use_petsc::Bool) + if use_petsc + GridapPETSc.with() do + solvers = Fill(PETScLinearSolver(set_ksp_options),2) + main(model,solvers) + end + else + solvers = Fill(BackslashSolver(),2) + main(model,solvers) + end +end + +function main(model,solvers) order = 2 reffeᵤ = ReferenceFE(lagrangian,VectorValue{D,Float64},order) V = TestFESpace(model,reffeᵤ,conformity=:H1,dirichlet_tags=["boundary"]) @@ -29,74 +82,48 @@ function main(model,single_proc::Bool) U = TrialFESpace(V,u) P = TrialFESpace(Q,p) - Y = MultiFieldFESpace([V, Q]) - X = MultiFieldFESpace([U, P]) + Y = MultiFieldFESpace([V,Q]) + X = MultiFieldFESpace([U,P]) + + mfs = BlockMultiFieldStyle() + Yb = MultiFieldFESpace([V,Q];style=mfs) + Xb = MultiFieldFESpace([U,P];style=mfs) degree = 2*(order + 1) Ω = Triangulation(model) dΩ = Measure(Ω,degree) - # Global problem a((u,p),(v,q)) = ∫( v⊙u + ∇(v)⊙∇(u) + q⋅p + ∇(q)⊙∇(p))dΩ l((v,q)) = ∫( v⋅f + q⋅g)dΩ op = AffineFEOperator(a,l,X,Y) - A,b = get_matrix(op), get_vector(op); - xh_star = solve(op) - x_star = get_free_dof_values(xh_star) - - dof_ids = get_free_dof_ids(X) - ranges = map(i->dof_ids[Block(i)],1:blocklength(dof_ids)) - solvers = Fill(BackslashSolver(),2) - - # Build using the global matrix - if single_proc - BDS = BlockDiagonalSmoother(A,ranges,solvers) - BDSss = symbolic_setup(BDS,A) - BDSns = numerical_setup(BDSss,A) + x_star = get_free_dof_values(solve(op)) - x = get_free_dof_values(zero(X)) - x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) - - @test norm(x-x_star) < 1.e-8 - end + opb = AffineFEOperator(a,l,Xb,Yb) + A,b = get_matrix(opb), get_vector(opb); # Build using local weakforms a1(u,v) = ∫(v⊙u + ∇(v)⊙∇(u))dΩ a2(p,q) = ∫(q⋅p + ∇(q)⊙∇(p))dΩ biforms = [a1,a2] - BDS = BlockDiagonalSmoother(biforms,X,Y,solvers) + BDS = BlockDiagonalSmoother(biforms,Xb,Yb,solvers) BDSss = symbolic_setup(BDS,A) BDSns = numerical_setup(BDSss,A) x = GridapSolvers.LinearSolvers.allocate_col_vector(A) x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) - - @test norm(x-x_star) < 1.e-8 + @test is_same_vector(x,x_star,Xb,X) # Build using BlockMatrixAssemblers - mfs = BlockMultiFieldStyle() - Yb = MultiFieldFESpace([V,Q];style=mfs) - Xb = MultiFieldFESpace([U,P];style=mfs) - - if single_proc - assem = SparseMatrixAssembler(Xb,Yb) - else - assem = SparseMatrixAssembler(Xb,Yb,FullyAssembledRows()) - end - op_blocks = AffineFEOperator(a,l,Xb,Yb,assem) - Ab,bb = get_matrix(op_blocks), get_vector(op_blocks); - - BDS = BlockDiagonalSmoother(Ab,solvers) + BDS = BlockDiagonalSmoother(A,solvers) BDSss = symbolic_setup(BDS,A) BDSns = numerical_setup(BDSss,A) - xb = GridapSolvers.LinearSolvers.allocate_col_vector(Ab) - xb = cg!(xb,Ab,bb;verbose=true,Pl=BDSns,reltol=1.0e-12) - - @test norm(x-x_star) < 1.e-8 + x = GridapSolvers.LinearSolvers.allocate_col_vector(A) + x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12) + @test is_same_vector(x,x_star,Xb,X) end num_ranks = (2,2) @@ -111,10 +138,12 @@ mesh_partition = (n,n) # Serial model = CartesianDiscreteModel(domain,mesh_partition) +main(model,false) main(model,true) # Distributed, sequential model = CartesianDiscreteModel(parts,num_ranks,domain,mesh_partition) main(model,false) +main(model,true) end \ No newline at end of file diff --git a/test/seq/SchurComplementSolversTests.jl b/test/seq/SchurComplementSolversTests.jl index 93c4763e..123ad7bf 100644 --- a/test/seq/SchurComplementSolversTests.jl +++ b/test/seq/SchurComplementSolversTests.jl @@ -1,6 +1,7 @@ module SchurComplementSolversTests using Test +using BlockArrays using Gridap using Gridap.MultiField using Gridap.Algebra @@ -49,8 +50,9 @@ function main(model) Q = TestFESpace(model,reffeₚ,conformity=:L2) P = TrialFESpace(Q,p_ref) - Y = MultiFieldFESpace([V, Q]) - X = MultiFieldFESpace([U, P]) + mfs = BlockMultiFieldStyle() + Y = MultiFieldFESpace([V, Q];style=mfs) + X = MultiFieldFESpace([U, P];style=mfs) qdegree = 4 Ω = Triangulation(model) @@ -70,18 +72,6 @@ function main(model) op = AffineFEOperator(biform,liform,X,Y) sysmat, sysvec = get_matrix(op), get_vector(op); - A = assemble_matrix(a,U,V) - B = assemble_matrix(b,P,V) - C = assemble_matrix(c,U,Q) - - ############################################################################################ - # Solve by global matrix factorization - - xh = solve(op) - uh, ph = xh - err_u1 = l2_error(uh,u_ref,dΩ) - err_p1 = l2_error(ph,p_ref,dΩ) - ############################################################################################ # Solve by GMRES preconditioned with inexact Schur complement @@ -90,9 +80,11 @@ function main(model) PS_solver = BackslashSolver() PS_ns = numerical_setup(symbolic_setup(PS_solver,PS),PS) + A = sysmat[Block(1,1)] A_solver = BackslashSolver() A_ns = numerical_setup(symbolic_setup(A_solver,A),A) + B = sysmat[Block(1,2)]; C = sysmat[Block(2,1)] psc_solver = SchurComplementSolver(A_ns,B,C,PS_ns); gmres = GMRESSolver(20,psc_solver,1e-10) @@ -103,13 +95,8 @@ function main(model) xh = FEFunction(X,x) uh, ph = xh - err_u3 = l2_error(uh,u_ref,dΩ) - err_p3 = l2_error(ph,p_ref,dΩ) - - @test err_u1 < 1.e-4 - @test err_u3 < 1.e-4 - @test err_p1 < 1.e-4 - @test err_p3 < 1.e-4 + @test l2_error(uh,u_ref,dΩ) < 1.e-4 + @test l2_error(ph,p_ref,dΩ) < 1.e-4 end num_ranks = (2,2) From e2a23a84fa6a44c423cea06074a2c2939defac54 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Sep 2023 10:33:25 +1000 Subject: [PATCH 34/34] Minor --- src/LinearSolvers/GMRESSolvers.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/LinearSolvers/GMRESSolvers.jl b/src/LinearSolvers/GMRESSolvers.jl index c26080ce..79729f23 100644 --- a/src/LinearSolvers/GMRESSolvers.jl +++ b/src/LinearSolvers/GMRESSolvers.jl @@ -2,7 +2,7 @@ # GMRES Solver struct GMRESSolver <: Gridap.Algebra.LinearSolver m ::Int - Pl + Pl ::Gridap.Algebra.LinearSolver tol::Float64 end @@ -107,7 +107,7 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::GMRESNumericalSetup,b::Abst iter += 1 end println(" Exiting GMRES solver.") - println(" > Num Iter: ", iter-1," - Final residual: ", β) + println(" > Num Iter: ", iter," - Final residual: ", β) return x end