Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bugfix: solve PETSc binary compatibility issues #96

Draft
wants to merge 15 commits into
base: master
Choose a base branch
from
25 changes: 8 additions & 17 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,41 +8,32 @@ jobs:
fail-fast: false
matrix:
version:
- '1.9'
- '1.10'
os:
- ubuntu-latest
arch:
- x64
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- uses: actions/cache@v1
env:
cache-name: cache-artifacts
with:
path: ~/.julia/artifacts
key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }}
restore-keys: |
${{ runner.os }}-test-${{ env.cache-name }}-
${{ runner.os }}-test-
${{ runner.os }}-
- uses: julia-actions/cache@v2
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v1
- uses: codecov/codecov-action@v4
with:
file: lcov.info
docs:
name: Documentation
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: '1.9'
version: '1.10'
- run: |
julia --project=docs -e '
using Pkg
Expand Down
18 changes: 10 additions & 8 deletions .github/workflows/ci_extra.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,24 @@ name: CI_EXTRA
on: [push, pull_request]
jobs:
test:
name: Tests ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
name: Tests ${{ matrix.version }} - petsc ${{ matrix.petsc_version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
runs-on: ${{ matrix.os }}
env:
JULIA_MPI_BINARY: "system"
JULIA_PETSC_LIBRARY: "/opt/petsc/3.15.4/lib/libpetsc"
JULIA_PETSC_LIBRARY: "/opt/petsc/${{ matrix.petsc_version }}/lib/libpetsc"
strategy:
fail-fast: false
matrix:
version:
- '1.9'
- '1.10'
os:
- ubuntu-latest
arch:
- x64
petsc_version:
- '3.15.4'
- '3.22.0'
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Cache petsc
id: cache-petsc
uses: actions/cache@v2
Expand All @@ -28,7 +30,7 @@ jobs:
${{ runner.os }}-build-${{ env.JULIA_PETSC_LIBRARY }}-
${{ runner.os }}-build-
${{ runner.os }}-
- uses: julia-actions/setup-julia@v1
- uses: julia-actions/setup-julia@v2
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
Expand All @@ -41,11 +43,11 @@ jobs:
run: |
CURR_DIR=$(pwd)
PACKAGE=petsc
VERSION=3.15.4
VERSION=${{ matrix.petsc_version }}
INSTALL_ROOT=/opt
PETSC_INSTALL=$INSTALL_ROOT/$PACKAGE/$VERSION
TAR_FILE=$PACKAGE-$VERSION.tar.gz
URL="https://ftp.mcs.anl.gov/pub/petsc/release-snapshots/"
URL="https://web.cels.anl.gov/projects/petsc/download/release-snapshots/"
ROOT_DIR=/tmp
SOURCES_DIR=$ROOT_DIR/$PACKAGE-$VERSION
BUILD_DIR=$SOURCES_DIR/build
Expand Down
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
Gridap = "0.17, 0.18"
GridapDistributed = "0.3, 0.4"
MPI = "0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20"
PETSc_jll = "=3.13.4, =3.15.2"
PETSc_jll = "3.13.4"
PartitionedArrays = "0.3"
SparseMatricesCSR = "0.6.6"
julia = "1.3"
Expand Down
4 changes: 2 additions & 2 deletions src/Environment.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@

function Init(;args=String[],file="",help="",finalize_atexit=true)
if !MPI.Initialized()
MPI.Init()
MPI.Init()
end
if finalize_atexit
atexit(Finalize)
Expand All @@ -23,7 +23,7 @@ const _NREFS = Ref(0)

function Finalize()
if Initialized()
GC.gc() # Finalize all object out of scope at this point
gridap_petsc_gc() # Finalize all object out of scope at this point
if _NREFS[] != 0
@warn "$(_NREFS[]) objects still not finalized before calling GridapPETSc.Finalize()"
end
Expand Down
4 changes: 2 additions & 2 deletions src/PETScLinearSolvers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ function Algebra.solve!(x::AbstractVector,ns::PETScLinearSolverNS,b::AbstractVec
end

function Algebra.solve!(x::PVector,ns::PETScLinearSolverNS,b::PVector)
X = similar(b,(axes(ns.A)[2],))
B = similar(b,(axes(ns.A)[2],))
X = similar(b,(axes(ns.A,2),))
B = similar(b,(axes(ns.A,1),))
copy!(X,x)
copy!(B,b)
Y = convert(PETScVector,X)
Expand Down
34 changes: 20 additions & 14 deletions test/PLaplacianTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,15 @@ using Test
using SparseMatricesCSR


function snes_convergence_test(snes::SNES,
it::PetscInt,
xnorm::PetscReal,
gnorm::PetscReal,
f::PetscReal,
reason::Ptr{PETSC.SNESConvergedReason},
user::Ptr{Cvoid})::PetscInt
function snes_convergence_test(
snes::SNES,
it::PetscInt,
xnorm::PetscReal,
gnorm::PetscReal,
f::PetscReal,
reason::Ptr{PETSC.SNESConvergedReason},
user::Ptr{Cvoid}
)::PetscInt
PETSC.SNESConvergedDefault(snes, it, xnorm, gnorm, f, reason, user)
end

Expand All @@ -39,9 +41,12 @@ function mysnessetup(snes)
@check_error_code GridapPETSc.PETSC.PCFactorSetUpMatSolverType(pc[])
@check_error_code GridapPETSc.PETSC.PCFactorGetMatrix(pc[],mumpsmat)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 4, 2)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2)
@check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6)
# Jordi: I've had to change to sequential ordering, since the jll binaries do not have pSCOTCH
# @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2) # 1-sequential,2-parallel
# @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2) # 1-parmetis, 2-pscotch
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 1)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 7, 0)
# @check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6)
end

function main(distribute,nparts)
Expand All @@ -60,8 +65,8 @@ function main(distribute,nparts,solver)
error()
end
GridapPETSc.with(args=split(options)) do
main(distribute,nparts,solver,FullyAssembledRows())
main(distribute,nparts,solver,SubAssembledRows())
main(distribute,nparts,solver,FullyAssembledRows())
main(distribute,nparts,solver,SubAssembledRows())
end
end

Expand All @@ -87,8 +92,9 @@ function main(distribute,nparts,solver,strategy)
V = TestFESpace(model,reffe,dirichlet_tags="boundary")
U = TrialFESpace(u,V)

assem = SparseMatrixAssembler(SparseMatrixCSR{0,PetscScalar,PetscInt},
Vector{PetscScalar},U,V,strategy)
assem = SparseMatrixAssembler(
SparseMatrixCSR{0,PetscScalar,PetscInt},Vector{PetscScalar},U,V,strategy
)

op = FEOperator(r,j,U,V,assem)

Expand Down
34 changes: 8 additions & 26 deletions test/PartitionedArraysTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,6 @@ using LinearAlgebra

using PartitionedArrays: assemble_coo!

function set_mumps_options(ksp)
pc = Ref{GridapPETSc.PETSC.PC}()
mumpsmat = Ref{GridapPETSc.PETSC.Mat}()
@check_error_code GridapPETSc.PETSC.KSPView(ksp[],C_NULL)
@check_error_code GridapPETSc.PETSC.KSPSetType(ksp[],GridapPETSc.PETSC.KSPPREONLY)
@check_error_code GridapPETSc.PETSC.KSPGetPC(ksp[],pc)
@check_error_code GridapPETSc.PETSC.PCSetType(pc[],GridapPETSc.PETSC.PCLU)
@check_error_code GridapPETSc.PETSC.PCFactorSetMatSolverType(pc[],GridapPETSc.PETSC.MATSOLVERMUMPS)
@check_error_code GridapPETSc.PETSC.PCFactorSetUpMatSolverType(pc[])
@check_error_code GridapPETSc.PETSC.PCFactorGetMatrix(pc[],mumpsmat)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 4, 1)
# percentage increase in the estimated working space
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 14, 1000)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2)
@check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6)
end

function partitioned_tests(distribute,nparts)
parts = distribute(LinearIndices((prod(nparts),)))

Expand Down Expand Up @@ -147,25 +129,25 @@ function partitioned_tests(distribute,nparts)

function solve_system_and_check_solution(A::PSparseMatrix,B::PETScMatrix,v)
solver = PETScLinearSolver()
ss = symbolic_setup(solver,A)
ns = numerical_setup(ss,A)

ns = numerical_setup(symbolic_setup(solver,A),A)
consistent!(v) |> fetch
y = pfill(0.0,partition(ids))
z = pfill(0.0,partition(ids))
y = pfill(0.0,partition(axes(A,1)))
z = pfill(0.0,partition(axes(A,2)))
mul!(y,A,v)
consistent!(y) |> fetch
z = solve!(z,ns,y)
consistent!(z) |> fetch

nspetsc = numerical_setup(symbolic_setup(PETScLinearSolver(),B),B)
ypetsc = convert(PETScVector,y)
zpetsc = PETScVector(0.0,ids)
zpetsc = PETScVector(0.0,axes(A,2))
zpetsc = solve!(zpetsc,nspetsc,ypetsc)

test_vectors(y,ypetsc,ids)
test_vectors(z,zpetsc,ids)
test_vectors(y,ypetsc,axes(A,1))
test_vectors(z,zpetsc,axes(A,2))

map(parts,partition(z),partition(v)) do p,z,v
map(partition(z),partition(v)) do z,v
@test maximum(abs.(z-v)) < 1e-5
end
GridapPETSc.Finalize(ypetsc)
Expand Down
115 changes: 59 additions & 56 deletions test/PoissonTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,12 @@ function mykspsetup(ksp)
@check_error_code GridapPETSc.PETSC.PCFactorSetUpMatSolverType(pc[])
@check_error_code GridapPETSc.PETSC.PCFactorGetMatrix(pc[],mumpsmat)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 4, 2)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2)
@check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6)
# Jordi: I've had to change to sequential ordering, since the jll binaries do not have pSCOTCH
# @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2) # 1-sequential,2-parallel
# @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2) # 1-parmetis, 2-pscotch
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 1)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 7, 0)
# @check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6)
@check_error_code GridapPETSc.PETSC.KSPSetFromOptions(ksp[])
end

Expand All @@ -45,67 +48,67 @@ function main(distribute,nparts,solver)
error()
end
GridapPETSc.with(args=split(options)) do
domain = (0,4,0,4)
cells = (4,4)
model = CartesianDiscreteModel(parts,nparts,domain,cells)
domain = (0,4,0,4)
cells = (4,4)
model = CartesianDiscreteModel(parts,nparts,domain,cells)

labels = get_face_labeling(model)
add_tag_from_tags!(labels,"dirichlet",[1,2,3,5,7])
add_tag_from_tags!(labels,"neumann",[4,6,8])
labels = get_face_labeling(model)
add_tag_from_tags!(labels,"dirichlet",[1,2,3,5,7])
add_tag_from_tags!(labels,"neumann",[4,6,8])

Ω = Triangulation(model)
Γn = Boundary(model,tags="neumann")
n_Γn = get_normal_vector(Γn)
Ω = Triangulation(model)
Γn = Boundary(model,tags="neumann")
n_Γn = get_normal_vector(Γn)

k = 2
u((x,y)) = (x+y)^k
f(x) = -Δ(u,x)
g = n_Γn⋅∇(u)
k = 2
u((x,y)) = (x+y)^k
f(x) = -Δ(u,x)
g = n_Γn⋅∇(u)

reffe = ReferenceFE(lagrangian,Float64,k)
V = TestFESpace(model,reffe,dirichlet_tags="dirichlet")
U = TrialFESpace(u,V)
reffe = ReferenceFE(lagrangian,Float64,k)
V = TestFESpace(model,reffe,dirichlet_tags="dirichlet")
U = TrialFESpace(u,V)

dΩ = Measure(Ω,2*k)
dΓn = Measure(Γn,2*k)
dΩ = Measure(Ω,2*k)
dΓn = Measure(Γn,2*k)

a(u,v) = ∫( ∇(v)⋅∇(u) )dΩ
l(v) = ∫( v*f )dΩ + ∫( v*g )dΓn
a(u,v) = ∫( ∇(v)⋅∇(u) )dΩ
l(v) = ∫( v*f )dΩ + ∫( v*g )dΓn

Tm = SparseMatrixCSR{0,PetscScalar,PetscInt}
Tv = Vector{PetscScalar}
assem = SparseMatrixAssembler(Tm,Tv,U,V)
op = AffineFEOperator(a,l,U,V,assem)
Tm = SparseMatrixCSR{0,PetscScalar,PetscInt}
Tv = Vector{PetscScalar}
assem = SparseMatrixAssembler(Tm,Tv,U,V)
op = AffineFEOperator(a,l,U,V,assem)

v_julia = get_vector(op)
v_petsc = convert(PETScVector,v_julia)
copy!(v_julia,v_petsc)
copy!(v_petsc,v_julia)
v_julia = get_vector(op)
v_petsc = convert(PETScVector,v_julia)
copy!(v_julia,v_petsc)
copy!(v_petsc,v_julia)

# Checking that convert performs deep copies and does not modify A
A = get_matrix(op)
vals_copy = map(partition(A)) do A
@test typeof(A)==SparseMatrixCSR{0,PetscScalar,PetscInt}
i = copy(A.rowptr)
j = copy(A.colval)
a = copy(A.nzval)
i,j,a
end
Apetsc = convert(PETScMatrix,A)
map(partition(A),vals_copy) do A, (i,j,a)
@test all(i .== A.rowptr)
@test all(j .== A.colval)
@test all(a .== A.nzval)
end
# Checking that convert performs deep copies and does not modify A
A = get_matrix(op)
vals_copy = map(partition(A)) do A
@test typeof(A)==SparseMatrixCSR{0,PetscScalar,PetscInt}
i = copy(A.rowptr)
j = copy(A.colval)
a = copy(A.nzval)
i,j,a
end
Apetsc = convert(PETScMatrix,A)
map(partition(A),vals_copy) do A, (i,j,a)
@test all(i .== A.rowptr)
@test all(j .== A.colval)
@test all(a .== A.nzval)
end

if solver == :mumps
ls = PETScLinearSolver(mykspsetup)
else
ls = PETScLinearSolver()
end
fels = LinearFESolver(ls)
uh = solve(fels,op)
eh = u - uh
@test sqrt(sum( ∫(abs2(eh))dΩ )) < 1.0e-9
if solver == :mumps
ls = PETScLinearSolver(mykspsetup)
else
ls = PETScLinearSolver()
end
fels = LinearFESolver(ls)
uh = solve(fels,op)
eh = u - uh
@test sqrt(sum( ∫(abs2(eh))dΩ )) < 1.0e-9
end
end
end
Loading