diff --git a/Manifest.toml b/Manifest.toml index e5c90b75..12672e0e 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -148,7 +148,7 @@ version = "0.10.20" [[Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "c58a6aade499e243bddc152f6acdc17d45886a94" +git-tree-sha1 = "192cb4f55c2eeefcd7e39dd208a58e46fc9840a5" repo-rev = "gridap_distributed" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -340,8 +340,8 @@ uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" version = "2.0.5" [[PartitionedArrays]] -deps = ["IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] -git-tree-sha1 = "38a986dc7298d3d2a8e993c7db7944132c757415" +deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] +git-tree-sha1 = "763146e768b343fec44c521f5c7822116adc24ad" repo-rev = "gridap_distributed" repo-url = "https://github.com/fverdugo/PartitionedArrays.jl.git" uuid = "5a9dfac6-5c52-46f7-8278-5e2210713be9" diff --git a/src/Algebra.jl b/src/Algebra.jl index 8d1a3537..4e4a2b06 100644 --- a/src/Algebra.jl +++ b/src/Algebra.jl @@ -1,8 +1,28 @@ -function local_views(a::AbstractVector) +function Algebra.fill_entries!(a::PVector,v::Number) + fill!(a,v) + a +end + +function Algebra.fill_entries!(a::PSparseMatrix,v::Number) + map_parts(a.values) do values + fill_entries!(values,v) + end + a +end + +function local_views(a) @abstractmethod end +function local_views(a::AbstractVector,rows) + @notimplemented +end + +function local_views(a::AbstractMatrix,rows,cols) + @notimplemented +end + function consistent_local_views(a,ids,isconsistent) @abstractmethod end @@ -11,12 +31,24 @@ function local_views(a::AbstractPData) a end +function local_views(a::PRange) + a.partition +end + function local_views(a::PVector) a.values end -function local_views(a::PRange) - a.partition +function local_views(a::PVector,rows::PRange) + PArrays.local_view(a,rows) +end + +function local_views(a::PSparseMatrix) + a.values +end + +function local_views(a::PSparseMatrix,rows::PRange,cols::PRange) + PArrays.local_view(a,rows,cols) end function change_ghost(a::PVector,ids_fespace::PRange) @@ -45,10 +77,6 @@ function Algebra.allocate_vector(::Type{<:PVector{T,A}},ids::PRange) where {T,A} PVector(values,ids) end -function local_views(a::PSparseMatrix) - a.values -end - struct FullyAssembledRows end struct SubAssembledRows end @@ -94,6 +122,12 @@ function local_views(a::DistributedCounterCOO) a.counters end +function local_views(a::DistributedCounterCOO,rows,cols) + @check rows === a.rows + @check cols === a.cols + a.counters +end + function Algebra.nz_allocation(a::DistributedCounterCOO) allocs = map_parts(nz_allocation,a.counters) DistributedAllocationCOO(a.par_strategy,allocs,a.rows,a.cols) @@ -121,6 +155,12 @@ function local_views(a::DistributedAllocationCOO) a.allocs end +function local_views(a::DistributedAllocationCOO,rows,cols) + @check rows === a.rows + @check cols === a.cols + a.allocs +end + function first_gdof_from_ids(ids) num_oids(ids)>0 ? Int(ids.lid_to_gid[ids.oid_to_lid[1]]) : 1 end @@ -132,6 +172,13 @@ function find_gid_and_part(hid_to_hdof,dofs) hid_to_gid, hid_to_part end +function Algebra.create_from_nz(a::PSparseMatrix) + # For FullyAssembledRows the underlying Exchanger should + # not have ghost layer making assemble! do nothing (TODO check) + assemble!(a) + a +end + function Algebra.create_from_nz(a::DistributedAllocationCOO{<:FullyAssembledRows}) f(x) = nothing A, = _fa_create_from_nz_with_callback(f,a) @@ -292,17 +339,11 @@ function _sa_create_from_nz_with_callback(callback,async_callback,a) map_parts(wait,t2) end - # Build the matrix exchanger - # TODO for the moment, we build an empty exchanger - # (fine for problem that do not need to update the matrix) - exchanger = empty_exchanger(parts) - - # TODO building a non-empty exchanger - # can be costly and not always needed - # add a more lazy initzialization of this inside PSparseMatrix - # Finally build the matrix - A = PSparseMatrix(values,rows,cols,exchanger) + # A matrix exchanger will be created under the hood. + # Building a exchanger can be costly and not always needed. + # TODO add a more lazy initzialization of this inside PSparseMatrix + A = PSparseMatrix(values,rows,cols) A, callback_output end @@ -334,6 +375,11 @@ function local_views(a::PVectorCounter) a.counters end +function local_views(a::PVectorCounter,rows) + @check rows === a.rows + a.counters +end + function Arrays.nz_allocation(a::PVectorCounter) dofs = a.rows values = map_parts(nz_allocation,a.counters) @@ -350,11 +396,16 @@ function local_views(a::PVectorAllocation) a.values end -function Algebra.create_from_nz(a::PVector{<:FullyAssembledRows}) +function local_views(a::PVectorAllocation,rows) + @check rows === a.rows + a.values +end + +function Algebra.create_from_nz(a::PVectorAllocation{<:FullyAssembledRows}) @notimplemented end -function Algebra.create_from_nz(a::PVector{<:SubAssembledRows}) +function Algebra.create_from_nz(a::PVectorAllocation{<:SubAssembledRows}) @notimplemented end @@ -389,6 +440,13 @@ function _rhs_callback(c_fespace,rows) return b end +function Algebra.create_from_nz(a::PVector) + # For FullyAssembledRows the underlying Exchanger should + # not have ghost layer making asseble! do nothing (TODO check) + assemble!(a) + a +end + function Algebra.create_from_nz( a::DistributedAllocationCOO{<:FullyAssembledRows}, c_fespace::PVectorAllocation{<:FullyAssembledRows}) diff --git a/src/CellData.jl b/src/CellData.jl index a1b4794f..09d93469 100644 --- a/src/CellData.jl +++ b/src/CellData.jl @@ -117,12 +117,12 @@ end # Composition Base.:(∘)(f::Function,g::DistributedCellField) = Operation(f)(g) -Base.:(∘)(f::Function,g::DistributedCellField,b::DistributedCellField) = Operation(f)(g,b) -Base.:(∘)(f::Function,g::DistributedCellField,b::Number) = Operation(f)(g,b) -Base.:(∘)(f::Function,g::Number,b::DistributedCellField) = Operation(f)(g,b) -Base.:(∘)(f::Function,g::DistributedCellField,b::Function) = Operation(f)(g,b) -Base.:(∘)(f::Function,g::Function,b::DistributedCellField) = Operation(f)(g,b) -Base.:(∘)(f::Function,g::DistributedCellField...) = Operation(f)(g...) +Base.:(∘)(f::Function,g::Tuple{DistributedCellField,DistributedCellField}) = Operation(f)(g[1],g[2]) +Base.:(∘)(f::Function,g::Tuple{DistributedCellField,Number}) = Operation(f)(g[1],g[2]) +Base.:(∘)(f::Function,g::Tuple{Number,DistributedCellField}) = Operation(f)(g[1],g[2]) +Base.:(∘)(f::Function,g::Tuple{DistributedCellField,Function}) = Operation(f)(g[1],g[2]) +Base.:(∘)(f::Function,g::Tuple{Function,DistributedCellField}) = Operation(f)(g[1],g[2]) +Base.:(∘)(f::Function,g::Tuple{Vararg{DistributedCellField}}) = Operation(f)(g...) # Define some of the well known arithmetic ops diff --git a/src/FESpaces.jl b/src/FESpaces.jl index 2750544e..d62f72a4 100644 --- a/src/FESpaces.jl +++ b/src/FESpaces.jl @@ -382,27 +382,27 @@ FESpaces.get_vector_builder(a::DistributedSparseMatrixAssembler) = a.vector_buil FESpaces.get_assembly_strategy(a::DistributedSparseMatrixAssembler) = a.strategy function FESpaces.symbolic_loop_matrix!(A,a::DistributedSparseMatrixAssembler,matdata) - map_parts(symbolic_loop_matrix!,local_views(A),a.assems,matdata) + map_parts(symbolic_loop_matrix!,local_views(A,a.rows,a.cols),a.assems,matdata) end function FESpaces.numeric_loop_matrix!(A,a::DistributedSparseMatrixAssembler,matdata) - map_parts(numeric_loop_matrix!,local_views(A),a.assems,matdata) + map_parts(numeric_loop_matrix!,local_views(A,a.rows,a.cols),a.assems,matdata) end function FESpaces.symbolic_loop_vector!(b,a::DistributedSparseMatrixAssembler,vecdata) - map_parts(symbolic_loop_vector!,local_views(b),a.assems,vecdata) + map_parts(symbolic_loop_vector!,local_views(b,a.rows),a.assems,vecdata) end function FESpaces.numeric_loop_vector!(b,a::DistributedSparseMatrixAssembler,vecdata) - map_parts(numeric_loop_vector!,local_views(b),a.assems,vecdata) + map_parts(numeric_loop_vector!,local_views(b,a.rows),a.assems,vecdata) end function FESpaces.symbolic_loop_matrix_and_vector!(A,b,a::DistributedSparseMatrixAssembler,data) - map_parts(symbolic_loop_matrix_and_vector!,local_views(A),local_views(b),a.assems,data) + map_parts(symbolic_loop_matrix_and_vector!,local_views(A,a.rows,a.cols),local_views(b,a.rows),a.assems,data) end function FESpaces.numeric_loop_matrix_and_vector!(A,b,a::DistributedSparseMatrixAssembler,data) - map_parts(numeric_loop_matrix_and_vector!,local_views(A),local_views(b),a.assems,data) + map_parts(numeric_loop_matrix_and_vector!,local_views(A,a.rows,a.cols),local_views(b,a.rows),a.assems,data) end # Parallel Assembly strategies diff --git a/src/Geometry.jl b/src/Geometry.jl index 5f560bc2..a645475d 100644 --- a/src/Geometry.jl +++ b/src/Geometry.jl @@ -222,6 +222,22 @@ function filter_cells_when_needed( remove_ghost_cells(trian,cell_gids) end +function filter_cells_when_needed( + portion::FullyAssembledRows, + cell_gids::AbstractIndexSet, + trian::Triangulation) + + trian +end + +function filter_cells_when_needed( + portion::SubAssembledRows, + cell_gids::AbstractIndexSet, + trian::Triangulation) + + remove_ghost_cells(trian,cell_gids) +end + # For the moment remove_ghost_cells # refers to the triangulation faces # pointing into the ghost cells diff --git a/test/PLaplacianTests.jl b/test/PLaplacianTests.jl new file mode 100644 index 00000000..b53d3094 --- /dev/null +++ b/test/PLaplacianTests.jl @@ -0,0 +1,60 @@ +module PLaplacianTests + +using Gridap +using Gridap.Algebra +using GridapDistributed +using PartitionedArrays +using Test + +function main(parts) + main(parts,FullyAssembledRows()) + main(parts,SubAssembledRows()) +end + +function main(parts,strategy) + + output = mkpath(joinpath(@__DIR__,"output")) + + domain = (0,4,0,4) + cells = (4,4) + model = CartesianDiscreteModel(parts,domain,cells) + + k = 1 + u((x,y)) = (x+y)^k + σ(∇u) =(1.0+∇u⋅∇u)*∇u + dσ(∇du,∇u) = 2*∇u⋅∇du + (1.0+∇u⋅∇u)*∇du + f(x) = -divergence(y->σ(∇(u,y)),x) + + Ω = Triangulation(strategy,model) + dΩ = Measure(Ω,2*k) + r(u,v) = ∫( ∇(v)⋅(σ∘∇(u)) - v*f )dΩ + j(u,du,v) = ∫( ∇(v)⋅(dσ∘(∇(du),∇(u))) )dΩ + + reffe = ReferenceFE(lagrangian,Float64,k) + V = TestFESpace(model,reffe,dirichlet_tags="boundary") + U = TrialFESpace(u,V) + + op = FEOperator(r,j,U,V,strategy) + + uh = zero(U) + b,A = residual_and_jacobian(op,uh) + _A = copy(A) + _b = copy(b) + residual_and_jacobian!(_b,_A,op,uh) + @test (norm(_b-b)+1) ≈ 1 + x = similar(b,Float64,axes(A,2)) + fill!(x,1) + @test (norm(A*x-_A*x)+1) ≈ 1 + + nls = NLSolver(show_trace=i_am_main(parts), method=:newton) + solver = FESolver(nls) + uh = solve(solver,op) + + Ωo = Triangulation(model) + dΩo = Measure(Ωo,2*k) + eh = u - uh + @test sqrt(sum(∫( abs2(eh) )dΩo)) < 1.0e-9 + +end + +end # module diff --git a/test/sequential/PLaplacianTests.jl b/test/sequential/PLaplacianTests.jl new file mode 100644 index 00000000..ced09577 --- /dev/null +++ b/test/sequential/PLaplacianTests.jl @@ -0,0 +1,10 @@ +module PLaplacianTestsSeq + +using PartitionedArrays + +include("../PLaplacianTests.jl") + +parts = get_part_ids(sequential,(2,2)) +PLaplacianTests.main(parts) + +end # module diff --git a/test/sequential/runtests.jl b/test/sequential/runtests.jl index 884430d7..3ab25b6c 100644 --- a/test/sequential/runtests.jl +++ b/test/sequential/runtests.jl @@ -12,4 +12,6 @@ using Test @time @testset "Poisson" begin include("PoissonTests.jl") end +@time @testset "PLaplacian" begin include("PLaplacianTests.jl") end + end # module