diff --git a/src/Algebra.jl b/src/Algebra.jl index 58f3b63f..7255e8d5 100644 --- a/src/Algebra.jl +++ b/src/Algebra.jl @@ -5,17 +5,36 @@ end # This might go to Gridap in the future. We keep it here for the moment. function change_axes(a::Algebra.CounterCOO{T,A}, axes::A) where {T,A} - b=Algebra.CounterCOO{T}(axes) + b = Algebra.CounterCOO{T}(axes) b.nnz = a.nnz b end # This might go to Gridap in the future. We keep it here for the moment. function change_axes(a::Algebra.AllocationCOO{T,A}, axes::A) where {T,A} - counter=change_axes(a.counter,axes) + counter = change_axes(a.counter,axes) Algebra.AllocationCOO(counter,a.I,a.J,a.V) end +# Array of PArrays -> PArray of Arrays +function to_parray_of_arrays(a::AbstractArray{<:MPIArray}) + indices = linear_indices(first(a)) + map(indices) do i + map(a) do aj + PartitionedArrays.getany(aj) + end + end +end + +function to_parray_of_arrays(a::AbstractArray{<:DebugArray}) + indices = linear_indices(first(a)) + map(indices) do i + map(a) do aj + aj.items[i] + end + end +end + # This type is required because MPIArray from PArrays # cannot be instantiated with a NULL communicator struct MPIVoidVector{T} <: AbstractVector{T} @@ -112,8 +131,8 @@ function local_views(a) @abstractmethod end -function get_parts(x) - return linear_indices(local_views(x)) +function get_parts(a) + return linear_indices(local_views(a)) end function local_views(a::AbstractVector,rows) @@ -124,10 +143,6 @@ function local_views(a::AbstractMatrix,rows,cols) @notimplemented end -function consistent_local_views(a,ids,isconsistent) - @abstractmethod -end - function local_views(a::AbstractArray) a end @@ -144,6 +159,40 @@ function local_views(a::PSparseMatrix) partition(a) end +# change_ghost + +function change_ghost(a::PVector{T},ids::PRange;is_consistent=false,make_consistent=false) where T + same_partition = (a.index_partition === partition(ids)) + a_new = same_partition ? a : change_ghost(T,a,ids) + if make_consistent && (!same_partition || !is_consistent) + consistent!(a_new) |> wait + end + return a_new +end + +function change_ghost(::Type{<:AbstractVector},a::PVector,ids::PRange) + a_new = similar(a,eltype(a),(ids,)) + # Equivalent to copy!(a_new,a) but does not check that owned indices match + map(copy!,own_values(a_new),own_values(a)) + return a_new +end + +function change_ghost(::Type{<:OwnAndGhostVectors},a::PVector,ids::PRange) + values = map(own_values(a),partition(ids)) do own_vals,ids + ghost_vals = fill(zero(eltype(a)),ghost_length(ids)) + perm = PartitionedArrays.local_permutation(ids) + OwnAndGhostVectors(own_vals,ghost_vals,perm) + end + return PVector(values,partition(ids)) +end + +function change_ghost(a::BlockPVector,ids::BlockPRange;is_consistent=false,make_consistent=false) + vals = map(blocks(a),blocks(ids)) do a, ids + change_ghost(a,ids;is_consistent=is_consistent,make_consistent=make_consistent) + end + return BlockPVector(vals,ids) +end + # This function computes a mapping among the local identifiers of a and b # for which the corresponding global identifiers are both in a and b. # Note that the haskey check is necessary because in the general case @@ -214,57 +263,35 @@ end function local_views(row_col_partitioned_matrix::PSparseMatrix, test_dofs_partition::PRange, trial_dofs_partition::PRange) - if (row_col_partitioned_matrix.row_partition === partition(test_dofs_partition) || - row_col_partitioned_matrix.col_partition === partition(trial_dofs_partition) ) - @assert false - else - map( - partition(row_col_partitioned_matrix), - partition(test_dofs_partition), - partition(trial_dofs_partition), - row_col_partitioned_matrix.row_partition, - row_col_partitioned_matrix.col_partition) do matrix_partition, - test_dof_partition, - trial_dof_partition, - row_partition, - col_partition - rl2lmap = find_local_to_local_map(test_dof_partition,row_partition) - cl2lmap = find_local_to_local_map(trial_dof_partition,col_partition) - LocalView(matrix_partition,(rl2lmap,cl2lmap)) - end + if (row_col_partitioned_matrix.row_partition === partition(test_dofs_partition) || + row_col_partitioned_matrix.col_partition === partition(trial_dofs_partition) ) + @assert false + else + map( + partition(row_col_partitioned_matrix), + partition(test_dofs_partition), + partition(trial_dofs_partition), + row_col_partitioned_matrix.row_partition, + row_col_partitioned_matrix.col_partition) do matrix_partition, + test_dof_partition, + trial_dof_partition, + row_partition, + col_partition + rl2lmap = find_local_to_local_map(test_dof_partition,row_partition) + cl2lmap = find_local_to_local_map(trial_dof_partition,col_partition) + LocalView(matrix_partition,(rl2lmap,cl2lmap)) end -end - -function change_ghost(a::PVector,ids_fespace::PRange) - if a.index_partition === partition(ids_fespace) - a_fespace = a - else - a_fespace = similar(a,eltype(a),(ids_fespace,)) - a_fespace .= a end - a_fespace end -function consistent_local_views(a::PVector, - ids_fespace::PRange, - isconsistent) - a_fespace = change_ghost(a,ids_fespace) - if ! isconsistent - fetch_vector_ghost_values!(partition(a_fespace), - map(reverse,a_fespace.cache)) |> wait - end - partition(a_fespace) +function Algebra.allocate_vector(::Type{<:PVector{V}},ids::PRange) where {V} + PVector{V}(undef,partition(ids)) end -function Algebra.allocate_vector(::Type{<:PVector{V,A}},ids::PRange) where {V,A} - values = map(partition(ids)) do ids - Tv = eltype(A) - Tv(undef,length(local_to_owner(ids))) - end - PVector(values,partition(ids)) +function Algebra.allocate_vector(::Type{<:BlockPVector{V}},ids::BlockPRange) where {V} + BlockPVector{V}(undef,ids) end - # PSparseMatrix assembly struct FullyAssembledRows end @@ -720,7 +747,7 @@ function Arrays.nz_allocation(a::PVectorCounter{<:SubAssembledRows}) touched = map(values) do values fill!(Vector{Bool}(undef,length(values)),false) end - allocations=map(values,touched) do values,touched + allocations = map(values,touched) do values,touched ArrayAllocationTrackTouchedAndValues(touched,values) end return PVectorAllocationTrackTouchedAndValues(allocations,values,dofs) @@ -993,8 +1020,6 @@ function assemble_coo_with_column_owner!(I,J,V,row_partition,Jown) end end -Base.wait(t::Matrix) = map(wait,t) - # dofs_gids_prange can be either test_dofs_gids_prange or trial_dofs_gids_prange # In the former case, gids is a vector of global test dof identifiers, while in the # latter, a vector of global trial dof identifiers diff --git a/src/BlockPartitionedArrays.jl b/src/BlockPartitionedArrays.jl new file mode 100644 index 00000000..cc30304a --- /dev/null +++ b/src/BlockPartitionedArrays.jl @@ -0,0 +1,360 @@ + +""" +""" +struct BlockPRange{A} <: AbstractUnitRange{Int} + ranges::Vector{PRange{A}} + function BlockPRange(ranges::Vector{<:PRange{A}}) where A + new{A}(ranges) + end +end + +Base.first(a::BlockPRange) = 1 +Base.last(a::BlockPRange) = sum(map(last,a.ranges)) + +BlockArrays.blocklength(a::BlockPRange) = length(a.ranges) +BlockArrays.blocksize(a::BlockPRange) = (blocklength(a),) +BlockArrays.blockaxes(a::BlockPRange) = (Block.(Base.OneTo(blocklength(a))),) +BlockArrays.blocks(a::BlockPRange) = a.ranges + +function PartitionedArrays.partition(a::BlockPRange) + return map(partition,blocks(a)) |> to_parray_of_arrays +end + +""" +""" +struct BlockPArray{V,T,N,A,B} <: BlockArrays.AbstractBlockArray{T,N} + blocks::Array{A,N} + axes::NTuple{N,B} + + function BlockPArray(blocks::Array{<:AbstractArray{T,N},N}, + axes ::NTuple{N,<:BlockPRange}) where {T,N} + @check all(map(d->size(blocks,d)==blocklength(axes[d]),1:N)) + local_type(::Type{<:PVector{V}}) where V = V + local_type(::Type{<:PSparseMatrix{V}}) where V = V + A = eltype(blocks) + B = typeof(first(axes)) + V = local_type(A) + new{V,T,N,A,B}(blocks,axes) + end +end + +const BlockPVector{V,T,A,B} = BlockPArray{V,T,1,A,B} +const BlockPMatrix{V,T,A,B} = BlockPArray{V,T,2,A,B} + +@inline function BlockPVector(blocks::Vector{<:PVector},rows::BlockPRange) + BlockPArray(blocks,(rows,)) +end + +@inline function BlockPVector(blocks::Vector{<:PVector},rows::Vector{<:PRange}) + BlockPVector(blocks,BlockPRange(rows)) +end + +@inline function BlockPMatrix(blocks::Matrix{<:PSparseMatrix},rows::BlockPRange,cols::BlockPRange) + BlockPArray(blocks,(rows,cols)) +end + +@inline function BlockPMatrix(blocks::Matrix{<:PSparseMatrix},rows::Vector{<:PRange},cols::Vector{<:PRange}) + BlockPMatrix(blocks,BlockPRange(rows),BlockPRange(cols)) +end + +function BlockPVector{V}(::UndefInitializer,rows::BlockPRange) where {V} + vals = map(blocks(rows)) do r + PVector{V}(undef,partition(r)) + end + return BlockPVector(vals,rows) +end + +function BlockPMatrix{V}(::UndefInitializer,rows::BlockPRange,cols::BlockPRange) where {V} + block_ids = CartesianIndices((blocklength(rows),blocklength(cols))) + block_rows = blocks(rows) + block_cols = blocks(cols) + vals = map(block_ids) do I + r = block_rows[I[1]] + c = block_cols[I[2]] + PSparseMatrix{V}(undef,partition(r),partition(c)) + end + return BlockPMatrix(vals,rows) +end + +# AbstractArray API + +Base.axes(a::BlockPArray) = a.axes +Base.size(a::BlockPArray) = Tuple(map(length,a.axes)) + +Base.IndexStyle(::Type{<:BlockPVector}) = IndexLinear() +Base.IndexStyle(::Type{<:BlockPMatrix}) = IndexCartesian() + +function Base.similar(a::BlockPVector,::Type{T},inds::Tuple{<:BlockPRange}) where T + vals = map(blocks(a),blocks(inds[1])) do ai,i + similar(ai,T,i) + end + return BlockPArray(vals,inds) +end + +function Base.similar(::Type{<:BlockPVector{V,T,A}},inds::Tuple{<:BlockPRange}) where {V,T,A} + rows = blocks(inds[1]) + values = map(rows) do r + return similar(A,(r,)) + end + return BlockPArray(values,inds) +end + +function Base.similar(a::BlockPMatrix,::Type{T},inds::Tuple{<:BlockPRange,<:BlockPRange}) where T + vals = map(CartesianIndices(blocksize(a))) do I + rows = inds[1].ranges[I[1]] + cols = inds[2].ranges[I[2]] + similar(a.blocks[I],T,(rows,cols)) + end + return BlockPArray(vals,inds) +end + +function Base.similar(::Type{<:BlockPMatrix{V,T,A}},inds::Tuple{<:BlockPRange,<:BlockPRange}) where {V,T,A} + rows = blocks(inds[1]) + cols = blocks(inds[2]) + values = map(CartesianIndices((length(rows),length(cols)))) do I + i,j = I[1],I[2] + return similar(A,(rows[i],cols[j])) + end + return BlockPArray(values,inds) +end + +function Base.getindex(a::BlockPArray{T,N},inds::Vararg{Int,N}) where {T,N} + @error "Scalar indexing not supported" +end +function Base.setindex(a::BlockPArray{T,N},v,inds::Vararg{Int,N}) where {T,N} + @error "Scalar indexing not supported" +end + +function Base.show(io::IO,k::MIME"text/plain",data::BlockPArray{T,N}) where {T,N} + v = first(blocks(data)) + s = prod(map(si->"$(si)x",blocksize(data)))[1:end-1] + map_main(partition(v)) do values + println(io,"$s-block BlockPArray{$T,$N}") + end +end + +function Base.zero(v::BlockPArray) + return mortar(map(zero,blocks(v))) +end + +function Base.copyto!(y::BlockPVector,x::BlockPVector) + @check blocklength(x) == blocklength(y) + for i in blockaxes(x,1) + copyto!(y[i],x[i]) + end + return y +end + +function Base.copyto!(y::BlockPMatrix,x::BlockPMatrix) + @check blocksize(x) == blocksize(y) + for i in blockaxes(x,1) + for j in blockaxes(x,2) + copyto!(y[i,j],x[i,j]) + end + end + return y +end + +function Base.fill!(a::BlockPVector,v) + map(blocks(a)) do a + fill!(a,v) + end + return a +end + +function Base.sum(a::BlockPArray) + return sum(map(sum,blocks(a))) +end + +Base.maximum(x::BlockPArray) = maximum(identity,x) +function Base.maximum(f::Function,x::BlockPArray) + maximum(map(xi->maximum(f,xi),blocks(x))) +end + +Base.minimum(x::BlockPArray) = minimum(identity,x) +function Base.minimum(f::Function,x::BlockPArray) + minimum(map(xi->minimum(f,xi),blocks(x))) +end + +function Base.:(==)(a::BlockPVector,b::BlockPVector) + A = length(a) == length(b) + B = all(map((ai,bi)->ai==bi,blocks(a),blocks(b))) + return A && B +end + +function Base.any(f::Function,x::BlockPVector) + any(map(xi->any(f,xi),blocks(x))) +end + +function Base.all(f::Function,x::BlockPVector) + all(map(xi->all(f,xi),blocks(x))) +end + +function LinearAlgebra.rmul!(a::BlockPVector,v::Number) + map(ai->rmul!(ai,v),blocks(a)) + return a +end + +# AbstractBlockArray API + +BlockArrays.blocks(a::BlockPArray) = a.blocks + +function Base.getindex(a::BlockPArray,inds::Block{1}) + a.blocks[inds.n...] +end +function Base.getindex(a::BlockPArray{V,T,N},inds::Block{N}) where {V,T,N} + a.blocks[inds.n...] +end +function Base.getindex(a::BlockPArray{V,T,N},inds::Vararg{Block{1},N}) where {V,T,N} + a.blocks[map(i->i.n[1],inds)...] +end + +function BlockArrays.mortar(blocks::Vector{<:PVector}) + rows = map(b->axes(b,1),blocks) + BlockPVector(blocks,rows) +end + +function BlockArrays.mortar(blocks::Matrix{<:PSparseMatrix}) + rows = map(b->axes(b,1),blocks[:,1]) + cols = map(b->axes(b,2),blocks[1,:]) + + function check_axes(a,r,c) + A = PartitionedArrays.matching_local_indices(axes(a,1),r) + B = PartitionedArrays.matching_local_indices(axes(a,2),c) + return A & B + end + @check all(map(I -> check_axes(blocks[I],rows[I[1]],cols[I[2]]),CartesianIndices(size(blocks)))) + + return BlockPMatrix(blocks,rows,cols) +end + +# PartitionedArrays API + +Base.wait(t::Array) = map(wait,t) +Base.fetch(t::Array) = map(fetch,t) + +function PartitionedArrays.assemble!(a::BlockPArray) + map(assemble!,blocks(a)) +end + +function PartitionedArrays.consistent!(a::BlockPArray) + map(consistent!,blocks(a)) +end + +function PartitionedArrays.partition(a::BlockPArray) + vals = map(partition,blocks(a)) |> to_parray_of_arrays + return map(mortar,vals) +end + +function PartitionedArrays.to_trivial_partition(a::BlockPArray) + vals = map(PartitionedArrays.to_trivial_partition,blocks(a)) + return mortar(vals) +end + +function PartitionedArrays.local_values(a::BlockPArray) + vals = map(local_values,blocks(a)) |> to_parray_of_arrays + return map(mortar,vals) +end + +function PartitionedArrays.own_values(a::BlockPArray) + vals = map(own_values,blocks(a)) |> to_parray_of_arrays + return map(mortar,vals) +end + +function PartitionedArrays.ghost_values(a::BlockPArray) + vals = map(ghost_values,blocks(a)) |> to_parray_of_arrays + return map(mortar,vals) +end + +function PartitionedArrays.own_ghost_values(a::BlockPMatrix) + vals = map(own_ghost_values,blocks(a)) |> to_parray_of_arrays + return map(mortar,vals) +end + +function PartitionedArrays.ghost_own_values(a::BlockPMatrix) + vals = map(ghost_own_values,blocks(a)) |> to_parray_of_arrays + return map(mortar,vals) +end + +# LinearAlgebra API + +function LinearAlgebra.mul!(y::BlockPVector,A::BlockPMatrix,x::BlockPVector) + z = zero(eltype(y)) + o = one(eltype(A)) + for i in blockaxes(A,2) + fill!(y[i],z) + for j in blockaxes(A,2) + mul!(y[i],A[i,j],x[j],o,o) + end + end +end + +function LinearAlgebra.dot(x::BlockPVector,y::BlockPVector) + return sum(map(dot,blocks(x),blocks(y))) +end + +function LinearAlgebra.norm(v::BlockPVector,p::Real=2) + block_norms = map(vi->norm(vi,p),blocks(v)) + return sum(block_norms.^p)^(1/p) +end + +function LinearAlgebra.fillstored!(a::BlockPMatrix,v) + map(blocks(a)) do a + LinearAlgebra.fillstored!(a,v) + end + return a +end + +# Broadcasting + +struct BlockPBroadcasted{A,B} + blocks :: A + axes :: B +end + +BlockArrays.blocks(b::BlockPBroadcasted) = b.blocks +BlockArrays.blockaxes(b::BlockPBroadcasted) = b.axes + +function Base.broadcasted(f, args::Union{BlockPVector,BlockPBroadcasted}...) + a1 = first(args) + @boundscheck @assert all(ai -> blockaxes(ai) == blockaxes(a1),args) + + blocks_in = map(blocks,args) + blocks_out = map((largs...)->Base.broadcasted(f,largs...),blocks_in...) + + return BlockPBroadcasted(blocks_out,blockaxes(a1)) +end + +function Base.broadcasted(f, a::Number, b::Union{BlockPVector,BlockPBroadcasted}) + blocks_out = map(b->Base.broadcasted(f,a,b),blocks(b)) + return BlockPBroadcasted(blocks_out,blockaxes(b)) +end + +function Base.broadcasted(f, a::Union{BlockPVector,BlockPBroadcasted}, b::Number) + blocks_out = map(a->Base.broadcasted(f,a,b),blocks(a)) + return BlockPBroadcasted(blocks_out,blockaxes(a)) +end + +function Base.broadcasted(f, + a::Union{BlockPVector,BlockPBroadcasted}, + b::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}}) + Base.broadcasted(f,a,Base.materialize(b)) +end + +function Base.broadcasted( + f, + a::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}}, + b::Union{BlockPVector,BlockPBroadcasted}) + Base.broadcasted(f,Base.materialize(a),b) +end + +function Base.materialize(b::BlockPBroadcasted) + blocks_out = map(Base.materialize,blocks(b)) + return mortar(blocks_out) +end + +function Base.materialize!(a::BlockPVector,b::BlockPBroadcasted) + map(Base.materialize!,blocks(a),blocks(b)) + return a +end + diff --git a/src/FESpaces.jl b/src/FESpaces.jl index 9f52a492..21aedf42 100644 --- a/src/FESpaces.jl +++ b/src/FESpaces.jl @@ -119,11 +119,11 @@ end function fetch_vector_ghost_values_cache(vector_partition,partition) cache = PArrays.p_vector_cache(vector_partition,partition) map(reverse,cache) -end +end function fetch_vector_ghost_values!(vector_partition,cache) assemble!((a,b)->b, vector_partition, cache) -end +end function generate_gids( cell_range::PRange, @@ -173,7 +173,6 @@ function generate_gids( cell_to_ldofs, cell_range) - # Find the global range of owned dofs first_gdof = scan(+,nodofs,type=:exclusive,init=one(eltype(nodofs))) @@ -325,18 +324,18 @@ function FESpaces.EvaluationFunction( end function _EvaluationFunction(func, - f::DistributedSingleFieldFESpace,free_values::AbstractVector,isconsistent=false) - local_vals = consistent_local_views(free_values,f.gids,isconsistent) - fields = map(func,f.spaces,local_vals) + f::DistributedSingleFieldFESpace,x::AbstractVector,isconsistent=false) + free_values = change_ghost(x,f.gids,is_consistent=isconsistent,make_consistent=true) + fields = map(func,f.spaces,partition(free_values)) metadata = DistributedFEFunctionData(free_values) DistributedCellField(fields,metadata) end function _EvaluationFunction(func, - f::DistributedSingleFieldFESpace,free_values::AbstractVector, + f::DistributedSingleFieldFESpace,x::AbstractVector, dirichlet_values::AbstractArray{<:AbstractVector},isconsistent=false) - local_vals = consistent_local_views(free_values,f.gids,isconsistent) - fields = map(func,f.spaces,local_vals,dirichlet_values) + free_values = change_ghost(x,f.gids,is_consistent=isconsistent,make_consistent=true) + fields = map(func,f.spaces,partition(free_values),dirichlet_values) metadata = DistributedFEFunctionData(free_values) DistributedCellField(fields,metadata) end @@ -463,16 +462,16 @@ end # Factories -function FESpaces.FESpace(model::DistributedDiscreteModel,reffe;kwargs...) +function FESpaces.FESpace(model::DistributedDiscreteModel,reffe;split_own_and_ghost=false,kwargs...) spaces = map(local_views(model)) do m FESpace(m,reffe;kwargs...) end gids = generate_gids(model,spaces) - vector_type = _find_vector_type(spaces,gids) + vector_type = _find_vector_type(spaces,gids;split_own_and_ghost=split_own_and_ghost) DistributedSingleFieldFESpace(spaces,gids,vector_type) end -function FESpaces.FESpace(_trian::DistributedTriangulation,reffe;kwargs...) +function FESpaces.FESpace(_trian::DistributedTriangulation,reffe;split_own_and_ghost=false,kwargs...) trian = add_ghost_cells(_trian) trian_gids = generate_cell_gids(trian) spaces = map(trian.trians) do t @@ -481,28 +480,22 @@ function FESpaces.FESpace(_trian::DistributedTriangulation,reffe;kwargs...) cell_to_ldofs = map(get_cell_dof_ids,spaces) nldofs = map(num_free_dofs,spaces) gids = generate_gids(trian_gids,cell_to_ldofs,nldofs) - vector_type = _find_vector_type(spaces,gids) + vector_type = _find_vector_type(spaces,gids;split_own_and_ghost=split_own_and_ghost) DistributedSingleFieldFESpace(spaces,gids,vector_type) end -function _find_vector_type(spaces,gids) - # TODO Now the user can select the local vector type but not the global one - # new kw-arg global_vector_type ? - # we use PVector for the moment +function _find_vector_type(spaces,gids;split_own_and_ghost=false) local_vector_type = get_vector_type(PartitionedArrays.getany(spaces)) - - if local_vector_type <: BlockVector - T = eltype(local_vector_type) - A = typeof(map(i->Vector{T}(undef,0),partition(gids))) - B = typeof(gids) - vector_type = PVector{T,A,B} - else - T = eltype(local_vector_type) - A = typeof(map(i->local_vector_type(undef,0),partition(gids))) - B = typeof(gids) - vector_type = PVector{T,A,B} + Tv = eltype(local_vector_type) + T = Vector{Tv} + if split_own_and_ghost + T = OwnAndGhostVectors{T} + end + if isa(gids,PRange) + vector_type = typeof(PVector{T}(undef,partition(gids))) + else # isa(gids,BlockPRange) + vector_type = typeof(BlockPVector{T}(undef,gids)) end - return vector_type end @@ -512,17 +505,12 @@ function FESpaces.collect_cell_matrix( trial::DistributedFESpace, test::DistributedFESpace, a::DistributedDomainContribution) - map( - collect_cell_matrix, - local_views(trial), - local_views(test), - local_views(a)) + map(collect_cell_matrix,local_views(trial),local_views(test),local_views(a)) end function FESpaces.collect_cell_vector( test::DistributedFESpace, a::DistributedDomainContribution) - map( - collect_cell_vector,local_views(test),local_views(a)) + map(collect_cell_vector,local_views(test),local_views(a)) end function FESpaces.collect_cell_matrix_and_vector( @@ -563,8 +551,7 @@ function FESpaces.collect_cell_matrix_and_vector( test::DistributedFESpace, mat::DistributedDomainContribution, l::Number) - map( - local_views(trial),local_views(test),local_views(mat)) do u,v,m + map(local_views(trial),local_views(test),local_views(mat)) do u,v,m collect_cell_matrix_and_vector(u,v,m,l) end end @@ -575,8 +562,7 @@ function FESpaces.collect_cell_matrix_and_vector( mat::DistributedDomainContribution, l::Number, uhd) - map( - local_views(trial),local_views(test),local_views(mat),local_views(uhd)) do u,v,m,f + map(local_views(trial),local_views(test),local_views(mat),local_views(uhd)) do u,v,m,f collect_cell_matrix_and_vector(u,v,m,l,f) end end @@ -641,16 +627,37 @@ end # When using this one, make sure that you also loop over ghost cells. # This is at your own risk. -function local_assembly_strategy(::FullyAssembledRows,test_space_indices,trial_space_indices) - test_space_local_to_ghost = local_to_ghost(test_space_indices) +function local_assembly_strategy(::FullyAssembledRows,rows,cols) + rows_local_to_ghost = local_to_ghost(rows) GenericAssemblyStrategy( identity, identity, - row->test_space_local_to_ghost[row]==0, + row->rows_local_to_ghost[row]==0, col->true) end # Assembler high level constructors +function FESpaces.SparseMatrixAssembler( + local_mat_type, + local_vec_type, + rows::PRange, + cols::PRange, + par_strategy=SubAssembledRows()) + + assems = map(partition(rows),partition(cols)) do rows,cols + local_strategy = local_assembly_strategy(par_strategy,rows,cols) + FESpaces.GenericSparseMatrixAssembler(SparseMatrixBuilder(local_mat_type), + ArrayBuilder(local_vec_type), + Base.OneTo(length(rows)), + Base.OneTo(length(cols)), + local_strategy) + end + + mat_builder = PSparseMatrixBuilderCOO(local_mat_type,par_strategy) + vec_builder = PVectorBuilder(local_vec_type,par_strategy) + return DistributedSparseMatrixAssembler(par_strategy,assems,mat_builder,vec_builder,rows,cols) +end + function FESpaces.SparseMatrixAssembler( local_mat_type, local_vec_type, @@ -658,25 +665,9 @@ function FESpaces.SparseMatrixAssembler( test::DistributedFESpace, par_strategy=SubAssembledRows()) - Tv = local_vec_type - T = eltype(Tv) - Tm = local_mat_type - trial_dofs_gids_partition = partition(trial.gids) - test_dofs_gids_partition = partition(test.gids) - assems = map(local_views(test),local_views(trial),test_dofs_gids_partition,trial_dofs_gids_partition) do v,u,trial_gids_partition,test_gids_partition - local_strategy = local_assembly_strategy(par_strategy,trial_gids_partition,test_gids_partition) - SparseMatrixAssembler(Tm,Tv,u,v,local_strategy) - end - matrix_builder = PSparseMatrixBuilderCOO(Tm,par_strategy) - vector_builder = PVectorBuilder(Tv,par_strategy) - test_dofs_gids_prange = get_free_dof_ids(test) - trial_dofs_gids_prange = get_free_dof_ids(trial) - DistributedSparseMatrixAssembler(par_strategy, - assems, - matrix_builder, - vector_builder, - test_dofs_gids_prange, - trial_dofs_gids_prange) + rows = get_free_dof_ids(test) + cols = get_free_dof_ids(trial) + SparseMatrixAssembler(local_mat_type,local_vec_type,rows,cols,par_strategy) end function FESpaces.SparseMatrixAssembler( @@ -684,11 +675,8 @@ function FESpaces.SparseMatrixAssembler( test::DistributedFESpace, par_strategy=SubAssembledRows()) - Tv = typeof(Int) - map(local_views(trial)) do trial - Tv = get_vector_type(trial) - end - T = eltype(Tv) + Tv = PartitionedArrays.getany(map(get_vector_type,local_views(trial))) + T = eltype(Tv) Tm = SparseMatrixCSC{T,Int} SparseMatrixAssembler(Tm,Tv,trial,test,par_strategy) end diff --git a/src/GridapDistributed.jl b/src/GridapDistributed.jl index 6df3dd25..9e985bc8 100644 --- a/src/GridapDistributed.jl +++ b/src/GridapDistributed.jl @@ -23,6 +23,7 @@ using SparseArrays using WriteVTK using FillArrays using BlockArrays +using LinearAlgebra import Gridap.TensorValues: inner, outer, double_contraction, symmetric_part import LinearAlgebra: det, tr, cross, dot, ⋅, diag @@ -39,6 +40,8 @@ export get_face_gids export local_views, get_parts export with_ghost, no_ghost +include("BlockPartitionedArrays.jl") + include("Algebra.jl") include("Geometry.jl") diff --git a/src/MultiField.jl b/src/MultiField.jl index a4ae661e..ed33e659 100644 --- a/src/MultiField.jl +++ b/src/MultiField.jl @@ -38,7 +38,7 @@ struct DistributedMultiFieldFESpace{MS,A,B,C,D} <: DistributedFESpace function DistributedMultiFieldFESpace( field_fe_space::AbstractVector{<:DistributedSingleFieldFESpace}, part_fe_space::AbstractArray{<:MultiFieldFESpace{MS}}, - gids::PRange, + gids::Union{<:PRange,<:BlockPRange}, vector_type::Type{D}) where {D,MS} A = typeof(field_fe_space) B = typeof(part_fe_space) @@ -63,7 +63,7 @@ function FESpaces.get_free_dof_ids(fs::DistributedMultiFieldFESpace) end function MultiField.restrict_to_field( - f::DistributedMultiFieldFESpace,free_values::PVector,field::Integer) + f::DistributedMultiFieldFESpace,free_values::AbstractVector,field::Integer) values = map(f.part_fe_space,partition(free_values)) do u,x restrict_to_field(u,x,field) end @@ -71,30 +71,10 @@ function MultiField.restrict_to_field( PVector(values,partition(gids)) end -function MultiField.restrict_to_field( - f::DistributedMultiFieldFESpace{<:BlockMultiFieldStyle},free_values::BlockVector,field::Integer) - - # BlockVector{PVector} -> PVector{BlockVector} - fv1 = map(partition,blocks(free_values)) |> to_parray_of_arrays - fv2 = map(mortar,fv1) - - values = map(f.part_fe_space,fv2) do u,x - restrict_to_field(u,x,field) - end - gids = f.field_fe_space[field].gids - PVector(values,partition(gids)) -end - -#function FESpaces.zero_free_values(f::DistributedMultiFieldFESpace{<:BlockMultiFieldStyle}) -# return mortar(map(zero_free_values,f.field_fe_space)) -#end - function FESpaces.FEFunction( f::DistributedMultiFieldFESpace,x::AbstractVector,isconsistent=false) - free_values = change_ghost(x,f.gids) - # This will cause also the single-field components to be consistent - local_vals = consistent_local_views(free_values,f.gids,isconsistent) - part_fe_fun = map(FEFunction,f.part_fe_space,local_vals) + free_values = change_ghost(x,f.gids;is_consistent=isconsistent,make_consistent=true) + part_fe_fun = map(FEFunction,f.part_fe_space,partition(free_values)) field_fe_fun = DistributedSingleFieldFEFunction[] for i in 1:num_fields(f) free_values_i = restrict_to_field(f,free_values,i) @@ -107,10 +87,8 @@ end function FESpaces.EvaluationFunction( f::DistributedMultiFieldFESpace,x::AbstractVector,isconsistent=false) - free_values = change_ghost(x,f.gids) - # This will cause also the single-field components to be consistent - local_vals = consistent_local_views(free_values,f.gids,false) - part_fe_fun = map(EvaluationFunction,f.part_fe_space,local_vals) + free_values = change_ghost(x,f.gids;is_consistent=isconsistent,make_consistent=true) + part_fe_fun = map(EvaluationFunction,f.part_fe_space,partition(free_values)) field_fe_fun = DistributedSingleFieldFEFunction[] for i in 1:num_fields(f) free_values_i = restrict_to_field(f,free_values,i) @@ -127,8 +105,7 @@ function FESpaces.interpolate(objects,fe::DistributedMultiFieldFESpace) end function FESpaces.interpolate!(objects,free_values::AbstractVector,fe::DistributedMultiFieldFESpace) - local_vals = consistent_local_views(free_values,fe.gids,true) - part_fe_fun = map(local_vals,local_views(fe)) do x,f + part_fe_fun = map(partition(free_values),local_views(fe)) do x,f interpolate!(objects,x,f) end field_fe_fun = DistributedSingleFieldFEFunction[] @@ -143,8 +120,7 @@ end function FESpaces.interpolate_everywhere(objects,fe::DistributedMultiFieldFESpace) free_values = zero_free_values(fe) - local_vals = consistent_local_views(free_values,fe.gids,true) - part_fe_fun = map(local_vals,local_views(fe)) do x,f + part_fe_fun = map(partition(free_values),local_views(fe)) do x,f interpolate!(objects,x,f) end field_fe_fun = DistributedSingleFieldFEFunction[] @@ -162,8 +138,10 @@ function FESpaces.interpolate_everywhere!( objects,free_values::AbstractVector, dirichlet_values::Vector{AbstractArray{<:AbstractVector}}, fe::DistributedMultiFieldFESpace) - local_vals = consistent_local_views(free_values,fe.gids,true) - part_fe_fun = map(local_vals,local_views(fe)) do x,f + msg = "free_values and fe have incompatible index partitions." + @check partition(axes(free_values,1)) === partition(fe.gids) msg + + part_fe_fun = map(partition(free_values),local_views(fe)) do x,f interpolate!(objects,x,f) end field_fe_fun = DistributedSingleFieldFEFunction[] @@ -179,7 +157,7 @@ end function FESpaces.interpolate_everywhere( objects::Vector{<:DistributedCellField},fe::DistributedMultiFieldFESpace) - local_objects = local_views(objects) + local_objects = map(local_views,objects) local_spaces = local_views(fe) part_fe_fun = map(local_spaces,local_objects...) do f,o... interpolate_everywhere(o,f) @@ -243,13 +221,13 @@ function FESpaces.TrialFESpace(objects,a::DistributedMultiFieldFESpace) TrialFESpace(a,objects) end -function FESpaces.TrialFESpace(a::DistributedMultiFieldFESpace,objects) +function FESpaces.TrialFESpace(a::DistributedMultiFieldFESpace{MS},objects) where MS f_dspace_test = a.field_fe_space f_dspace = map( arg -> TrialFESpace(arg[1],arg[2]), zip(f_dspace_test,objects) ) f_p_space = map(local_views,f_dspace) v(x...) = collect(x) p_f_space = map(v,f_p_space...) - p_mspace = map(MultiFieldFESpace,p_f_space) + p_mspace = map(s->MultiFieldFESpace(s;style=MS()),p_f_space) gids = a.gids vector_type = a.vector_type DistributedMultiFieldFESpace(f_dspace,p_mspace,gids,vector_type) @@ -258,18 +236,20 @@ end # Factory function MultiField.MultiFieldFESpace( - f_dspace::Vector{<:DistributedSingleFieldFESpace};kwargs...) + f_dspace::Vector{<:DistributedSingleFieldFESpace};split_own_and_ghost=false, kwargs...) f_p_space = map(local_views,f_dspace) v(x...) = collect(x) p_f_space = map(v,f_p_space...) p_mspace = map(f->MultiFieldFESpace(f;kwargs...),p_f_space) - gids = generate_multi_field_gids(f_dspace,p_mspace) - vector_type = _find_vector_type(p_mspace,gids) + style = PartitionedArrays.getany(map(MultiFieldStyle,p_mspace)) + gids = generate_multi_field_gids(style,f_dspace,p_mspace) + vector_type = _find_vector_type(p_mspace,gids;split_own_and_ghost=split_own_and_ghost) DistributedMultiFieldFESpace(f_dspace,p_mspace,gids,vector_type) end function generate_multi_field_gids( + ::MultiFieldStyle, f_dspace::Vector{<:DistributedSingleFieldFESpace}, p_mspace::AbstractArray{<:MultiFieldFESpace}) @@ -288,6 +268,19 @@ function generate_multi_field_gids( gids = generate_multi_field_gids(f_p_flid_lid,f_frange) end +function generate_multi_field_gids( + ::BlockMultiFieldStyle{NB,SB,P}, + f_dspace::Vector{<:DistributedSingleFieldFESpace}, + p_mspace::AbstractArray{<:MultiFieldFESpace}) where {NB,SB,P} + + block_ranges = MultiField.get_block_ranges(NB,SB,P) + block_gids = map(block_ranges) do range + space = (length(range) == 1) ? f_dspace[range[1]] : MultiFieldFESpace(f_dspace[range]) + get_free_dof_ids(space) + end + return BlockPRange(block_gids) +end + function generate_multi_field_gids( f_p_flid_lid::AbstractVector{<:AbstractArray{<:AbstractVector}}, f_frange::AbstractVector{<:PRange}) @@ -402,7 +395,6 @@ function propagate_to_ghost_multifield!( end end - # BlockSparseMatrixAssemblers const DistributedBlockSparseMatrixAssembler{NB,NV,SB,P} = @@ -415,53 +407,18 @@ function FESpaces.SparseMatrixAssembler( test::DistributedMultiFieldFESpace{<:BlockMultiFieldStyle{NB,SB,P}}, par_strategy=SubAssembledRows()) where {NB,SB,P} - # Build block spaces - function get_block_fespace(spaces,range) - (length(range) == 1) ? spaces[range[1]] : MultiFieldFESpace(spaces[range]) - end - block_ranges = MultiField.get_block_ranges(NB,SB,P) - block_tests = map(range -> get_block_fespace(test.field_fe_space,range),block_ranges) - block_trials = map(range -> get_block_fespace(trial.field_fe_space,range),block_ranges) - - block_idx = CartesianIndices((NB,NB)) + block_idx = CartesianIndices((NB,NB)) + block_rows = blocks(test.gids) + block_cols = blocks(trial.gids) block_assemblers = map(block_idx) do idx - Yi = block_tests[idx[1]]; Xj = block_trials[idx[2]] - return SparseMatrixAssembler(local_mat_type,local_vec_type,Xj,Yi,par_strategy) + rows = block_rows[idx[1]]; cols = block_cols[idx[2]] + return SparseMatrixAssembler(local_mat_type,local_vec_type,rows,cols,par_strategy) end - NV = length(trial.field_fe_space) + NV = length(P) return MultiField.BlockSparseMatrixAssembler{NB,NV,SB,P}(block_assemblers) end -function FESpaces.SparseMatrixAssembler( - trial::DistributedMultiFieldFESpace{<:BlockMultiFieldStyle}, - test ::DistributedMultiFieldFESpace{<:BlockMultiFieldStyle}, - par_strategy=SubAssembledRows()) - Tv = get_vector_type(PartitionedArrays.getany(local_views(first(trial)))) - T = eltype(Tv) - Tm = SparseMatrixCSC{T,Int} - SparseMatrixAssembler(Tm,Tv,trial,test,par_strategy) -end - -# Array of PArrays -> PArray of Arrays -function to_parray_of_arrays(a::AbstractArray{<:MPIArray}) - indices = linear_indices(first(a)) - map(indices) do i - map(a) do aj - PartitionedArrays.getany(aj) - end - end -end - -function to_parray_of_arrays(a::AbstractArray{<:DebugArray}) - indices = linear_indices(first(a)) - map(indices) do i - map(a) do aj - aj.items[i] - end - end -end - function local_views(a::MultiField.BlockSparseMatrixAssembler{NB,NV,SB,P}) where {NB,NV,SB,P} assems = a.block_assemblers array = to_parray_of_arrays(map(local_views,assems)) diff --git a/test/BlockPartitionedArraysTests.jl b/test/BlockPartitionedArraysTests.jl new file mode 100644 index 00000000..542e1fe1 --- /dev/null +++ b/test/BlockPartitionedArraysTests.jl @@ -0,0 +1,109 @@ + +using Test +using Gridap +using PartitionedArrays +using GridapDistributed +using BlockArrays +using SparseArrays +using LinearAlgebra + +using GridapDistributed: BlockPArray, BlockPVector, BlockPMatrix, BlockPRange + + +ranks = with_debug() do distribute + distribute(LinearIndices((2,))) +end + +indices = map(ranks) do r + if r == 1 + own_gids = [1,2,3,4,5] + ghost_gids = [6,7] + ghost_owners = [2,2] + else + own_gids = [6,7,8,9,10] + ghost_gids = [5] + ghost_owners = [1] + end + own_idx = OwnIndices(10,r,own_gids) + ghost_idx = GhostIndices(10,ghost_gids,ghost_owners) + OwnAndGhostIndices(own_idx,ghost_idx) +end + +block_range = BlockPRange([PRange(indices),PRange(indices)]) + +_v = PVector{OwnAndGhostVectors{Vector{Float64}}}(undef,indices) +v = BlockPArray([_v,_v],(block_range,)) + +_m = map(CartesianIndices((2,2))) do I + i,j = I[1],I[2] + local_mats = map(ranks,indices) do r, ind + n = local_length(ind) + if i==j && r == 1 + SparseMatrixCSC(n,n,Int[1,3,5,7,9,10,11,13],Int[1,2,2,3,3,4,4,5,5,6,6,7],fill(1.0,12)) + elseif i==j && r == 2 + SparseMatrixCSC(n,n,Int[1,2,4,6,8,10,11],Int[1,1,2,2,3,3,4,4,5,6],fill(1.0,10)) + else + SparseMatrixCSC(n,n,fill(Int(1),n+1),Int[],Float64.([])) + end + end + PSparseMatrix(local_mats,indices,indices) +end +m = BlockPArray(_m,(block_range,block_range)) + +x = similar(_v) +mul!(x,_m[1,1],_v) + +# BlockPRange + +@test blocklength(block_range) == 2 +@test blocksize(block_range) == (2,) + +# AbstractArray API + +__v = similar(v,block_range) +__m = similar(m,(block_range,block_range)) +fill!(__v,1.0) + +__v = __v .+ 1.0 +__v = __v .- 1.0 +__v = __v .* 1.0 +__v = __v ./ 1.0 + +__m = __m .+ 1.0 +__m = __m .- 1.0 +__m = __m .* 1.0 +__m = __m ./ 1.0 + +# PartitionedArrays API + +consistent!(__v) |> wait +t = assemble!(__v) +assemble!(__m) |> wait +fetch(t); + +PartitionedArrays.to_trivial_partition(m) + +local_values(v) +own_values(v) +ghost_values(v) +own_ghost_values(m) +ghost_own_values(m) + +# LinearAlgebra API + +x = similar(v) +mul!(x,m,v) +consistent!(x) |> fetch +partition(x) + +dot(v,x) +norm(v) +copy!(x,v) + +LinearAlgebra.fillstored!(__m,1.0) + +__v = BlockPVector{Float64,PVector{Vector{Float64}}}(undef,block_range) + +maximum(abs,v) +minimum(abs,v) + diff --git a/test/BlockSparseMatrixAssemblersTests.jl b/test/BlockSparseMatrixAssemblersTests.jl index 400b0183..9b92f185 100644 --- a/test/BlockSparseMatrixAssemblersTests.jl +++ b/test/BlockSparseMatrixAssemblersTests.jl @@ -7,31 +7,11 @@ using Gridap.FESpaces, Gridap.ReferenceFEs, Gridap.MultiField using GridapDistributed using PartitionedArrays +using GridapDistributed: BlockPVector, BlockPMatrix -function LinearAlgebra.mul!(y::BlockVector,A::BlockMatrix,x::BlockVector) - o = one(eltype(A)) - for i in blockaxes(A,2) - fill!(y[i],0.0) - for j in blockaxes(A,2) - mul!(y[i],A[i,j],x[j],o,o) - end - end -end - -function GridapDistributed.change_ghost( - x::BlockVector, - X::GridapDistributed.DistributedMultiFieldFESpace{<:BlockMultiFieldStyle{NB,SB,P}}) where {NB,SB,P} - block_ranges = MultiField.get_block_ranges(NB,SB,P) - array = map(block_ranges,blocks(x)) do range, xi - Xi = (length(range) == 1) ? X.field_fe_space[range[1]] : MultiFieldFESpace(X.field_fe_space[range]) - GridapDistributed.change_ghost(xi,Xi.gids) - end - return mortar(array) -end - -function is_same_vector(x::BlockVector,y::PVector,Ub,U) +function is_same_vector(x::BlockPVector,y::PVector,Ub,U) y_fespace = GridapDistributed.change_ghost(y,U.gids) - x_fespace = GridapDistributed.change_ghost(x,Ub) + x_fespace = GridapDistributed.change_ghost(x,Ub.gids) res = map(1:num_fields(Ub)) do i xi = restrict_to_field(Ub,x_fespace,i) @@ -41,13 +21,13 @@ function is_same_vector(x::BlockVector,y::PVector,Ub,U) return all(res) end -function is_same_matrix(Ab::BlockMatrix,A::PSparseMatrix,Xb,X) +function is_same_matrix(Ab::BlockPMatrix,A::PSparseMatrix,Xb,X) yb = mortar(map(Aii->pfill(0.0,partition(axes(Aii,1))),diag(blocks(Ab)))); xb = mortar(map(Aii->pfill(1.0,partition(axes(Aii,2))),diag(blocks(Ab)))); mul!(yb,Ab,xb) - y = pfill(0.0,partition(axes(A)[1])) - x = pfill(1.0,partition(axes(A)[2])) + y = pfill(0.0,partition(axes(A,1))) + x = pfill(1.0,partition(axes(A,2))) mul!(y,A,x) return is_same_vector(yb,y,Xb,X) @@ -89,9 +69,18 @@ function _main(n_spaces,mfs,weakform,Ω,dΩ,U,V) @test is_same_vector(b1_blocks,b1,Yb,Y) @test is_same_matrix(A1_blocks,A1,Xb,X) + assemble_matrix!(A1_blocks,assem_blocks,bmatdata); + assemble_vector!(b1_blocks,assem_blocks,bvecdata); + @test is_same_vector(b1_blocks,b1,Yb,Y) + @test is_same_matrix(A1_blocks,A1,Xb,X) + A2_blocks, b2_blocks = assemble_matrix_and_vector(assem_blocks,bdata) @test is_same_vector(b2_blocks,b2,Yb,Y) @test is_same_matrix(A2_blocks,A2,Xb,X) + + assemble_matrix_and_vector!(A2_blocks,b2_blocks,assem_blocks,bdata) + @test is_same_vector(b2_blocks,b2,Yb,Y) + @test is_same_matrix(A2_blocks,A2,Xb,X) op = AffineFEOperator(biform,liform,X,Y) block_op = AffineFEOperator(biform,liform,Xb,Yb) diff --git a/test/FESpacesTests.jl b/test/FESpacesTests.jl index 7b0860ab..bf1e78e6 100644 --- a/test/FESpacesTests.jl +++ b/test/FESpacesTests.jl @@ -91,11 +91,11 @@ function main(distribute,parts,das) V = TestFESpace(model,reffe,dirichlet_tags="boundary") U = TrialFESpace(u,V) V2 = FESpace(Ω,reffe) - @test get_vector_type(V) <: PVector - @test get_vector_type(U) <: PVector - @test get_vector_type(V2) <: PVector + @test get_vector_type(V) <: PVector{<:Vector} + @test get_vector_type(U) <: PVector{<:Vector} + @test get_vector_type(V2) <: PVector{<:Vector} - free_values_partition=map(partition(V.gids)) do indices + free_values_partition = map(partition(V.gids)) do indices ones(Float64,local_length(indices)) end @@ -163,6 +163,23 @@ function main(distribute,parts,das) cont = ∫( abs2(u0h) )dΩ @test sqrt(sum(cont)) < 1.0e-14 + # OwnAndGhostVector partitions + V3 = FESpace(model,reffe,dirichlet_tags="boundary",split_own_and_ghost=true) + U3 = TrialFESpace(u,V3) + @test get_vector_type(V3) <: PVector{<:OwnAndGhostVectors} + + free_values = zero_free_values(U3) + dirichlet_values = get_dirichlet_dof_values(U3) + uh = interpolate_everywhere(u,U3) + _uh = interpolate_everywhere(uh,U3) + __uh = interpolate_everywhere!(_uh,free_values,dirichlet_values,U3) + + uh = interpolate(u,U3) + dofs = get_fe_dof_basis(U3) + cell_vals = dofs(uh) + gather_free_values!(free_values,U3,cell_vals) + gather_free_and_dirichlet_values!(free_values,dirichlet_values,U3,cell_vals) + uh = FEFunction(U3,free_values,dirichlet_values) # I need to use the square [0,2]² in the sequel so that # when integrating over the interior facets, the entries diff --git a/test/MultiFieldTests.jl b/test/MultiFieldTests.jl index b5a4f534..cb53e322 100644 --- a/test/MultiFieldTests.jl +++ b/test/MultiFieldTests.jl @@ -2,11 +2,17 @@ module MultiFieldTests using Gridap using Gridap.FESpaces +using Gridap.MultiField using GridapDistributed using PartitionedArrays using Test -function main(distribute, parts) +function l2_error(u1,u2,dΩ) + eu = u1 - u2 + sqrt(sum(∫( eu⋅eu )dΩ)) +end + +function main(distribute, parts, mfs) ranks = distribute(LinearIndices((prod(parts),))) output = mkpath(joinpath(@__DIR__,"output")) @@ -16,6 +22,7 @@ function main(distribute, parts) Ω = Triangulation(model) k = 2 + dΩ = Measure(Ω,2*k) reffe_u = ReferenceFE(lagrangian,VectorValue{2,Float64},k) reffe_p = ReferenceFE(lagrangian,Float64,k-1,space=:P) @@ -29,34 +36,43 @@ function main(distribute, parts) U = TrialFESpace(V,u) P = TrialFESpace(Q,p) - VxQ = MultiFieldFESpace([V,Q]) - UxP = MultiFieldFESpace([U,P]) # This generates again the global numbering + VxQ = MultiFieldFESpace([V,Q];style=mfs) + UxP = MultiFieldFESpace([U,P];style=mfs) # This generates again the global numbering UxP = TrialFESpace([u,p],VxQ) # This reuses the one computed @test length(UxP) == 2 uh, ph = interpolate([u,p],UxP) - eu = u - uh - ep = p - ph - - dΩ = Measure(Ω,2*k) - @test sqrt(sum(∫( eu⋅eu )dΩ)) < 1.0e-9 - @test sqrt(sum(∫( eu⋅eu )dΩ)) < 1.0e-9 + @test l2_error(u,uh,dΩ) < 1.0e-9 + @test l2_error(p,ph,dΩ) < 1.0e-9 a((u,p),(v,q)) = ∫( ∇(v)⊙∇(u) - q*(∇⋅u) - (∇⋅v)*p )*dΩ l((v,q)) = ∫( v⋅f - q*g )*dΩ op = AffineFEOperator(a,l,UxP,VxQ) - solver = LinearFESolver(BackslashSolver()) - uh, ph = solve(solver,op) - - eu = u - uh - ep = p - ph - - writevtk(Ω,"Ω",nsubcells=10,cellfields=["uh"=>uh,"ph"=>ph]) - - @test sqrt(sum(∫( eu⋅eu )dΩ)) < 1.0e-9 - @test sqrt(sum(∫( eu⋅eu )dΩ)) < 1.0e-9 + if !isa(mfs,BlockMultiFieldStyle) # BlockMultiFieldStyle does not support BackslashSolver + solver = LinearFESolver(BackslashSolver()) + uh, ph = solve(solver,op) + @test l2_error(u,uh,dΩ) < 1.0e-9 + @test l2_error(p,ph,dΩ) < 1.0e-9 + + writevtk(Ω,"Ω",nsubcells=10,cellfields=["uh"=>uh,"ph"=>ph]) + end + + A = get_matrix(op) + xh = interpolate([u,p],UxP) + x = GridapDistributed.change_ghost(get_free_dof_values(xh),axes(A,2)) + uh1, ph1 = FESpaces.EvaluationFunction(UxP,x) + uh2, ph2 = FEFunction(UxP,x) + + @test l2_error(u,uh1,dΩ) < 1.0e-9 + @test l2_error(p,ph1,dΩ) < 1.0e-9 + @test l2_error(u,uh2,dΩ) < 1.0e-9 + @test l2_error(p,ph2,dΩ) < 1.0e-9 +end +function main(distribute, parts) + main(distribute, parts, ConsecutiveMultiFieldStyle()) + main(distribute, parts, BlockMultiFieldStyle()) end end # module diff --git a/test/StokesHdivDGTests.jl b/test/StokesHdivDGTests.jl index f3f19c78..f9d77494 100644 --- a/test/StokesHdivDGTests.jl +++ b/test/StokesHdivDGTests.jl @@ -34,84 +34,84 @@ function stokes_solution_2D(μ::Real) end function main(distribute,parts) - ranks = distribute(LinearIndices((prod(parts),))) - - μ = 1.0 - sol = stokes_solution_2D(μ) - u_ref = sol.u - f_ref = sol.f - σ_ref = sol.σ - - D = 2 - n = 4 - domain = Tuple(repeat([0,1],D)) - partition = (n,n) - model = CartesianDiscreteModel(ranks,parts,domain,partition) - - labels = get_face_labeling(model) - add_tag_from_tags!(labels,"dirichlet",[5,6,7]) - add_tag_from_tags!(labels,"neumann",[8,]) - - ############################################################################################ - order = 1 - reffeᵤ = ReferenceFE(raviart_thomas,Float64,order) - V = TestFESpace(model,reffeᵤ,conformity=:HDiv,dirichlet_tags="dirichlet") - U = TrialFESpace(V,u_ref) - - reffeₚ = ReferenceFE(lagrangian,Float64,order;space=:P) - Q = TestFESpace(model,reffeₚ,conformity=:L2) - P = TrialFESpace(Q) - - Y = MultiFieldFESpace([V, Q]) - X = MultiFieldFESpace([U, P]) - - qdegree = 2*order+1 - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - - Γ = BoundaryTriangulation(model) - dΓ = Measure(Γ,qdegree) - n_Γ = get_normal_vector(Γ) - - Γ_D = BoundaryTriangulation(model;tags=["dirichlet"]) - dΓ_D = Measure(Γ_D,qdegree) - n_Γ_D = get_normal_vector(Γ_D) - - Γ_N = BoundaryTriangulation(model;tags="neumann") - dΓ_N = Measure(Γ_N,qdegree) - n_Γ_N = get_normal_vector(Γ_N) - - Λ = SkeletonTriangulation(model) - dΛ = Measure(Λ,qdegree) - n_Λ = get_normal_vector(Λ) - - h_e = CellField(map(get_array,local_views(∫(1)dΩ)),Ω) - h_e_Λ = CellField(map(get_array,local_views(∫(1)dΛ)),Λ) - h_e_Γ_D = CellField(map(get_array,local_views(∫(1)dΓ_D)),Γ_D) - - β_U = 50.0 - Δ_dg(u,v) = ∫(∇(v)⊙∇(u))dΩ - - ∫(jump(v⊗n_Λ)⊙(mean(∇(u))))dΛ - - ∫(mean(∇(v))⊙jump(u⊗n_Λ))dΛ - - ∫(v⋅(∇(u)⋅n_Γ_D))dΓ_D - - ∫((∇(v)⋅n_Γ_D)⋅u)dΓ_D - rhs((v,q)) = ∫((f_ref⋅v))*dΩ - ∫((∇(v)⋅n_Γ_D)⋅u_ref)dΓ_D + ∫((n_Γ_N⋅σ_ref)⋅v)*dΓ_N - - penalty(u,v) = ∫(jump(v⊗n_Λ)⊙((β_U/h_e_Λ*jump(u⊗n_Λ))))dΛ + ∫(v⋅(β_U/h_e_Γ_D*u))dΓ_D - penalty_rhs((v,q)) = ∫(v⋅(β_U/h_e_Γ_D*u_ref))dΓ_D - - a((u,p),(v,q)) = Δ_dg(u,v) + ∫(-(∇⋅v)*p - q*(∇⋅u))dΩ + penalty(u,v) - l((v,q)) = rhs((v,q)) - ∫(q*(∇⋅u_ref))dΩ + penalty_rhs((v,q)) - - op = AffineFEOperator(a,l,X,Y) - xh = solve(op) - - uh, ph = xh - err_u = l2_error(Ω,uh,sol.u) - err_p = l2_error(Ω,ph,sol.p) - tol = 1.0e-12 - @test err_u < tol - @test err_p < tol + ranks = distribute(LinearIndices((prod(parts),))) + + μ = 1.0 + sol = stokes_solution_2D(μ) + u_ref = sol.u + f_ref = sol.f + σ_ref = sol.σ + + D = 2 + n = 4 + domain = Tuple(repeat([0,1],D)) + partition = (n,n) + model = CartesianDiscreteModel(ranks,parts,domain,partition) + + labels = get_face_labeling(model) + add_tag_from_tags!(labels,"dirichlet",[5,6,7]) + add_tag_from_tags!(labels,"neumann",[8,]) + + ############################################################################################ + order = 1 + reffeᵤ = ReferenceFE(raviart_thomas,Float64,order) + V = TestFESpace(model,reffeᵤ,conformity=:HDiv,dirichlet_tags="dirichlet") + U = TrialFESpace(V,u_ref) + + reffeₚ = ReferenceFE(lagrangian,Float64,order;space=:P) + Q = TestFESpace(model,reffeₚ,conformity=:L2) + P = TrialFESpace(Q) + + Y = MultiFieldFESpace([V, Q]) + X = MultiFieldFESpace([U, P]) + + qdegree = 2*order+1 + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + + Γ = BoundaryTriangulation(model) + dΓ = Measure(Γ,qdegree) + n_Γ = get_normal_vector(Γ) + + Γ_D = BoundaryTriangulation(model;tags=["dirichlet"]) + dΓ_D = Measure(Γ_D,qdegree) + n_Γ_D = get_normal_vector(Γ_D) + + Γ_N = BoundaryTriangulation(model;tags="neumann") + dΓ_N = Measure(Γ_N,qdegree) + n_Γ_N = get_normal_vector(Γ_N) + + Λ = SkeletonTriangulation(model) + dΛ = Measure(Λ,qdegree) + n_Λ = get_normal_vector(Λ) + + h_e = CellField(map(get_array,local_views(∫(1)dΩ)),Ω) + h_e_Λ = CellField(map(get_array,local_views(∫(1)dΛ)),Λ) + h_e_Γ_D = CellField(map(get_array,local_views(∫(1)dΓ_D)),Γ_D) + + β_U = 50.0 + Δ_dg(u,v) = ∫(∇(v)⊙∇(u))dΩ - + ∫(jump(v⊗n_Λ)⊙(mean(∇(u))))dΛ - + ∫(mean(∇(v))⊙jump(u⊗n_Λ))dΛ - + ∫(v⋅(∇(u)⋅n_Γ_D))dΓ_D - + ∫((∇(v)⋅n_Γ_D)⋅u)dΓ_D + rhs((v,q)) = ∫((f_ref⋅v))*dΩ - ∫((∇(v)⋅n_Γ_D)⋅u_ref)dΓ_D + ∫((n_Γ_N⋅σ_ref)⋅v)*dΓ_N + + penalty(u,v) = ∫(jump(v⊗n_Λ)⊙((β_U/h_e_Λ*jump(u⊗n_Λ))))dΛ + ∫(v⋅(β_U/h_e_Γ_D*u))dΓ_D + penalty_rhs((v,q)) = ∫(v⋅(β_U/h_e_Γ_D*u_ref))dΓ_D + + a((u,p),(v,q)) = Δ_dg(u,v) + ∫(-(∇⋅v)*p - q*(∇⋅u))dΩ + penalty(u,v) + l((v,q)) = rhs((v,q)) - ∫(q*(∇⋅u_ref))dΩ + penalty_rhs((v,q)) + + op = AffineFEOperator(a,l,X,Y) + xh = solve(op) + + uh, ph = xh + err_u = l2_error(Ω,uh,sol.u) + err_p = l2_error(Ω,ph,sol.p) + tol = 1.0e-12 + @test err_u < tol + @test err_p < tol end end # module