Skip to content

Commit

Permalink
Merge pull request #141 from gridap/ode-refactor
Browse files Browse the repository at this point in the history
ODE refactor
  • Loading branch information
JordiManyer authored Apr 14, 2024
2 parents 86b3d3d + 6a5a51f commit 81bcf23
Show file tree
Hide file tree
Showing 21 changed files with 662 additions and 578 deletions.
12 changes: 10 additions & 2 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,21 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [0.3.6] 2024-01-28
## [0.4.0] 2024-04-12

### Changed

- `DistributedCellField` now inherits from `CellField`. To accomodate the necessary API, we now save a pointer to the `DistributedTriangulation` where it is defined. This also requires `DistributedSingleFieldFESpace` to save the triangulation. Since PR[#141](https://github.com/gridap/GridapDistributed.jl/pull/141).
- All the distributed `Multifield` cellfield types are now represented by a `DistributedMultiFieldCellField`. Both `DistributedMultiFieldFEFunction` and `DistributedMultiFieldFEBasis` structs have been removed and replaced with constant aliases, which makes it more consistent with single-field types. Since PR[#141](https://github.com/gridap/GridapDistributed.jl/pull/141).
- Major refactor of ODE module. Implementation has been significantly simplified, while increasing the capability of the API. All `TransientDistributedObjects` structs have been removed, and replaced by `DistributedTransientObjects = DistributedObjects{TransientObject}`. Full support for EX/IM/IMEX methods. See Gridap's release for details. Since PR[#141](https://github.com/gridap/GridapDistributed.jl/pull/141).

## [0.3.6] 2024-01-28

### Added

- Added redistribution for MultiFieldFESpaces. Since PR [#140](https://github.com/gridap/GridapDistributed.jl/pull/140).

### Fixed
### Fixed

- Fixed issue [#142](https://github.com/gridap/GridapDistributed.jl/issues/142). Since PR [#142](https://github.com/gridap/GridapDistributed.jl/pull/142).

Expand Down
4 changes: 2 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "GridapDistributed"
uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355"
authors = ["S. Badia <[email protected]>", "A. F. Martin <[email protected]>", "F. Verdugo <[email protected]>"]
version = "0.3.6"
version = "0.4.0"

[deps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
Expand All @@ -17,7 +17,7 @@ WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192"
[compat]
BlockArrays = "0.16.38"
FillArrays = "0.8.4,1"
Gridap = "0.17.23"
Gridap = "0.18"
LinearAlgebra = "1.3"
MPI = "0.16, 0.17, 0.18, 0.19, 0.20"
PartitionedArrays = "0.3.3"
Expand Down
6 changes: 3 additions & 3 deletions src/Adaptivity.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@

const DistributedAdaptedDiscreteModel{Dc,Dp} = GenericDistributedDiscreteModel{Dc,Dp,<:AbstractArray{<:AdaptedDiscreteModel{Dc,Dp}}}

function DistributedAdaptedDiscreteModel(model ::DistributedDiscreteModel,
parent ::DistributedDiscreteModel,
glue ::AbstractArray{<:AdaptivityGlue})
function DistributedAdaptedDiscreteModel(model :: DistributedDiscreteModel,
parent :: DistributedDiscreteModel,
glue :: AbstractArray{<:AdaptivityGlue})
models = map(local_views(model),local_views(parent),glue) do model, parent, glue
AdaptedDiscreteModel(model,parent,glue)
end
Expand Down
94 changes: 76 additions & 18 deletions src/Algebra.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,57 @@ function Algebra.allocate_in_domain(matrix::BlockPMatrix)
allocate_in_domain(BlockPVector{V},matrix)
end

# PSparseMatrix copy

function Base.copy(a::PSparseMatrix)
mats = map(copy,partition(a))
cache = map(PartitionedArrays.copy_cache,a.cache)
return PSparseMatrix(mats,partition(axes(a,1)),partition(axes(a,2)),cache)
end

# PartitionedArrays extras

function LinearAlgebra.axpy!(α,x::PVector,y::PVector)
@check partition(axes(x,1)) === partition(axes(y,1))
map(partition(x),partition(y)) do x,y
LinearAlgebra.axpy!(α,x,y)
end
consistent!(y) |> wait
return y
end

function LinearAlgebra.axpy!(α,x::BlockPVector,y::BlockPVector)
map(blocks(x),blocks(y)) do x,y
LinearAlgebra.axpy!(α,x,y)
end
return y
end

function Algebra.axpy_entries!(
α::Number, A::PSparseMatrix, B::PSparseMatrix;
check::Bool=true
)
# We should definitely check here that the index partitions are the same.
# However: Because the different matrices are assembled separately, the objects are not the
# same (i.e can't use ===). Checking the index partitions would then be costly...
@assert reduce(&,map(PartitionedArrays.matching_local_indices,partition(axes(A,1)),partition(axes(B,1))))
@assert reduce(&,map(PartitionedArrays.matching_local_indices,partition(axes(A,2)),partition(axes(B,2))))
map(partition(A),partition(B)) do A, B
Algebra.axpy_entries!(α,A,B;check)
end
return B
end

function Algebra.axpy_entries!(
α::Number, A::BlockPMatrix, B::BlockPMatrix;
check::Bool=true
)
map(blocks(A),blocks(B)) do A, B
Algebra.axpy_entries!(α,A,B;check)
end
return B
end

# This might go to Gridap in the future. We keep it here for the moment.
function change_axes(a::Algebra.ArrayCounter,axes)
@notimplemented
Expand Down Expand Up @@ -95,6 +146,9 @@ function num_parts(comm::MPI.Comm)
end
nparts
end
@inline num_parts(comm::MPIArray) = num_parts(comm.comm)
@inline num_parts(comm::DebugArray) = length(comm.items)
@inline num_parts(comm::MPIVoidVector) = num_parts(comm.comm)

function get_part_id(comm::MPI.Comm)
if comm != MPI.COMM_NULL
Expand All @@ -104,23 +158,28 @@ function get_part_id(comm::MPI.Comm)
end
id
end
@inline get_part_id(comm::MPIArray) = get_part_id(comm.comm)
@inline get_part_id(comm::MPIVoidVector) = get_part_id(comm.comm)

"""
i_am_in(comm::MPIArray)
i_am_in(comm::DebugArray)
Returns `true` if the processor is part of the subcommunicator `comm`.
"""
function i_am_in(comm::MPI.Comm)
get_part_id(comm) >=0
end

function i_am_in(comm::MPIArray)
i_am_in(comm.comm)
end

function i_am_in(comm::MPIVoidVector)
i_am_in(comm.comm)
end
@inline i_am_in(comm::MPIArray) = i_am_in(comm.comm)
@inline i_am_in(comm::MPIVoidVector) = i_am_in(comm.comm)
@inline i_am_in(comm::DebugArray) = true

function change_parts(x::Union{MPIArray,DebugArray,Nothing,MPIVoidVector}, new_parts; default=nothing)
x_new = map(new_parts) do _p
if isa(x,MPIArray) || isa(x,DebugArray)
x_new = map(new_parts) do p
if isa(x,MPIArray)
PartitionedArrays.getany(x)
elseif isa(x,DebugArray) && (p <= length(x.items))
x.items[p]
else
default
end
Expand Down Expand Up @@ -314,7 +373,7 @@ end

function local_views(a::BlockPMatrix,new_rows::BlockPRange,new_cols::BlockPRange)
vals = map(CartesianIndices(blocksize(a))) do I
local_views(a[Block(I)],new_rows[Block(I[1])],new_cols[Block(I[2])])
local_views(blocks(a)[I],blocks(new_rows)[I],blocks(new_cols)[I])
end |> to_parray_of_arrays
return map(mortar,vals)
end
Expand Down Expand Up @@ -795,12 +854,10 @@ function local_views(a::PVectorAllocationTrackTouchedAndValues)
end

function _setup_prange_from_pvector_allocation(a::PVectorAllocationTrackTouchedAndValues)
test_dofs_prange = a.test_dofs_gids_prange # dof ids of the test space
ngrdofs = length(test_dofs_prange)


# Find the ghost rows
allocations = local_views(a.allocations)
indices = partition(test_dofs_prange)
indices = partition(a.test_dofs_gids_prange)
I_ghost_lids_to_dofs_ghost_lids = map(allocations, indices) do allocation, indices
dofs_lids_touched = findall(allocation.touched)
loc_to_gho = local_to_ghost(indices)
Expand All @@ -817,10 +874,11 @@ function _setup_prange_from_pvector_allocation(a::PVectorAllocationTrackTouchedA
I_ghost_lids
end

gids_ghost_to_global, gids_ghost_to_owner = map(
ghost_to_global, ghost_to_owner = map(
find_gid_and_owner,I_ghost_lids_to_dofs_ghost_lids,indices) |> tuple_of_arrays

_setup_prange_impl_(ngrdofs,indices,gids_ghost_to_global,gids_ghost_to_owner)
ngids = length(a.test_dofs_gids_prange)
_setup_prange_impl_(ngids,indices,ghost_to_global,ghost_to_owner)
end

function Algebra.create_from_nz(a::PVectorAllocationTrackTouchedAndValues)
Expand Down Expand Up @@ -848,7 +906,7 @@ function find_gid_and_owner(ighost_to_jghost,jindices)
jghost_to_local = ghost_to_local(jindices)
jlocal_to_global = local_to_global(jindices)
jlocal_to_owner = local_to_owner(jindices)
ighost_to_jlocal = view(jghost_to_local,ighost_to_jghost)
ighost_to_jlocal = sort(view(jghost_to_local,ighost_to_jghost))

ighost_to_global = jlocal_to_global[ighost_to_jlocal]
ighost_to_owner = jlocal_to_owner[ighost_to_jlocal]
Expand Down
65 changes: 52 additions & 13 deletions src/BlockPartitionedArrays.jl
Original file line number Diff line number Diff line change
Expand Up @@ -145,17 +145,19 @@ end

function Base.copyto!(y::BlockPVector,x::BlockPVector)
@check blocklength(x) == blocklength(y)
for i in blockaxes(x,1)
copyto!(y[i],x[i])
yb, xb = blocks(y), blocks(x)
for i in 1:blocksize(x,1)
copyto!(yb[i],xb[i])
end
return y
end

function Base.copyto!(y::BlockPMatrix,x::BlockPMatrix)
@check blocksize(x) == blocksize(y)
for i in blockaxes(x,1)
for j in blockaxes(x,2)
copyto!(y[i,j],x[i,j])
yb, xb = blocks(y), blocks(x)
for i in 1:blocksize(x,1)
for j in 1:blocksize(x,2)
copyto!(yb[i,j],xb[i,j])
end
end
return y
Expand All @@ -169,6 +171,8 @@ function Base.fill!(a::BlockPVector,v)
end

function Base.sum(a::BlockPArray)
# TODO: This could use a single communication, instead of one for each block
# TODO: We could implement a generic reduce, that we apply to sum, all, any, etc..
return sum(map(sum,blocks(a)))
end

Expand Down Expand Up @@ -284,15 +288,47 @@ end

# LinearAlgebra API

function Base.:*(a::Number,b::BlockPArray)
mortar(map(bi -> a*bi,blocks(b)))
end
Base.:*(b::BlockPMatrix,a::Number) = a*b
Base.:/(b::BlockPVector,a::Number) = (1/a)*b

function Base.:*(a::BlockPMatrix,b::BlockPVector)
c = similar(b)
mul!(c,a,b)
return c
end

for op in (:+,:-)
@eval begin
function Base.$op(a::BlockPArray)
mortar(map($op,blocks(a)))
end
function Base.$op(a::BlockPArray,b::BlockPArray)
@assert blocksize(a) == blocksize(b)
mortar(map($op,blocks(a),blocks(b)))
end
end
end

function LinearAlgebra.mul!(y::BlockPVector,A::BlockPMatrix,x::BlockPVector)
o = one(eltype(A))
mul!(y,A,x,o,o)
end

function LinearAlgebra.mul!(y::BlockPVector,A::BlockPMatrix,x::BlockPVector::Number::Number)
yb, Ab, xb = blocks(y), blocks(A), blocks(x)
z = zero(eltype(y))
o = one(eltype(A))
for i in blockaxes(A,2)
fill!(y[i],z)
for j in blockaxes(A,2)
mul!(y[i],A[i,j],x[j],o,o)
for i in 1:blocksize(A,1)
fill!(yb[i],z)
for j in 1:blocksize(A,2)
mul!(yb[i],Ab[i,j],xb[j],α,o)
end
rmul!(yb[i],β)
end
return y
end

function LinearAlgebra.dot(x::BlockPVector,y::BlockPVector)
Expand Down Expand Up @@ -341,16 +377,19 @@ function Base.broadcasted(f, a::Union{BlockPArray,BlockPBroadcasted}, b::Number)
return BlockPBroadcasted(blocks_out,blockaxes(a))
end

function Base.broadcasted(f,
a::Union{BlockPArray,BlockPBroadcasted},
b::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}})
function Base.broadcasted(
f,
a::Union{BlockPArray,BlockPBroadcasted},
b::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}}
)
Base.broadcasted(f,a,Base.materialize(b))
end

function Base.broadcasted(
f,
a::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}},
b::Union{BlockPArray,BlockPBroadcasted})
b::Union{BlockPArray,BlockPBroadcasted}
)
Base.broadcasted(f,Base.materialize(a),b)
end

Expand Down
Loading

2 comments on commit 81bcf23

@JordiManyer
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register()

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/104900

Tip: Release Notes

Did you know you can add release notes too? Just add markdown formatted text underneath the comment after the text
"Release notes:" and it will be added to the registry PR, and if TagBot is installed it will also be added to the
release that TagBot creates. i.e.

@JuliaRegistrator register

Release notes:

## Breaking changes

- blah

To add them here just re-invoke and the PR will be updated.

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v0.4.0 -m "<description of version>" 81bcf231c20b67d504a0f91f050ab68a3d020621
git push origin v0.4.0

Please sign in to comment.