From aa10c7e3bb47a9c25c78707fc2635e79993e6653 Mon Sep 17 00:00:00 2001 From: amartin Date: Wed, 27 Oct 2021 10:29:46 +1100 Subject: [PATCH 1/3] Removed old sources remaining from release 0.1 --- compile/README.md | 64 -- compile/create_gridap_image.jl | 159 ---- src/OLD/CartesianDiscreteModels.jl | 141 --- src/OLD/Communicators.jl | 38 - src/OLD/DistributedAssemblers.jl | 579 ------------ src/OLD/DistributedData.jl | 75 -- src/OLD/DistributedFEOperators.jl | 33 - src/OLD/DistributedFESpaceFactories.jl | 17 - src/OLD/DistributedFESpaces.jl | 404 -------- src/OLD/DistributedIndexSets.jl | 36 - src/OLD/DistributedTriangulations.jl | 147 --- src/OLD/DistributedVectors.jl | 31 - src/OLD/GridapDistributed.jl | 116 --- src/OLD/GridapHighLevelAPI.jl | 222 ----- src/OLD/MPIPETScAlgebraInterfaces.jl | 35 - src/OLD/MPIPETScCommunicators.jl | 80 -- ...MPIPETScDistributedAssemblersInterfaces.jl | 458 --------- src/OLD/MPIPETScDistributedData.jl | 43 - src/OLD/MPIPETScDistributedIndexSets.jl | 124 --- src/OLD/MPIPETScDistributedVectors.jl | 363 ------- src/OLD/MPIPETScLinearSolvers.jl | 46 - src/OLD/MPITimers.jl | 77 -- src/OLD/MultiFieldDistributedFESpaces.jl | 220 ----- src/OLD/SequentialCommunicators.jl | 42 - ...quentialDistributedAssemblersInterfaces.jl | 110 --- src/OLD/SequentialDistributedData.jl | 31 - src/OLD/SequentialDistributedIndexSets.jl | 19 - src/OLD/SequentialDistributedVectors.jl | 92 -- ...mlyRefinedForestOfOctreesDiscreteModels.jl | 888 ------------------ src/OLD/ZeroMeanDistributedFESpaces.jl | 176 ---- test/OLD/CartesianDiscreteModelsTests.jl | 79 -- test/OLD/DistributedAssemblersTests.jl | 51 - test/OLD/DistributedAssemblersTestsHelpers.jl | 75 -- test/OLD/DistributedDataTests.jl | 35 - test/OLD/DistributedFESpacesTests.jl | 25 - test/OLD/DistributedIndexSetsTests.jl | 25 - test/OLD/DistributedPLaplacianTests.jl | 70 -- test/OLD/DistributedPoissonDGTests.jl | 103 -- test/OLD/DistributedPoissonTests.jl | 56 -- test/OLD/DistributedStokesTests.jl | 97 -- test/OLD/DistributedVectorsTests.jl | 41 - .../MPIPETScCommunicatorsTests.jl | 8 - .../MPIPETScDistributedAssemblersTests.jl | 45 - .../MPIPETScDistributedIndexSetsTests.jl | 32 - .../MPIPETScDistributedPLaplacianTests.jl | 157 ---- .../MPIPETScDistributedPoissonDGTests.jl | 114 --- .../MPIPETScDistributedPoissonTests.jl | 94 -- .../MPIPETScDistributedStokesTests.jl | 108 --- .../MPIPETScDistributedVectorsTests.jl | 82 -- ...finedForestOfOctreesDiscreteModelsTests.jl | 93 -- test/OLD/MPIPETScTests/MPITimersTests.jl | 15 - test/OLD/MPIPETScTests/compile/compile.jl | 16 - test/OLD/MPIPETScTests/compile/compile.sh | 6 - test/OLD/MPIPETScTests/runtests.jl | 55 -- test/OLD/SequentialCommunicatorsTests.jl | 0 test/OLD/ZeroMeanDistributedFESpacesTests.jl | 38 - test/OLD/runtests.jl | 29 - 57 files changed, 6415 deletions(-) delete mode 100644 compile/README.md delete mode 100644 compile/create_gridap_image.jl delete mode 100644 src/OLD/CartesianDiscreteModels.jl delete mode 100644 src/OLD/Communicators.jl delete mode 100644 src/OLD/DistributedAssemblers.jl delete mode 100644 src/OLD/DistributedData.jl delete mode 100644 src/OLD/DistributedFEOperators.jl delete mode 100644 src/OLD/DistributedFESpaceFactories.jl delete mode 100644 src/OLD/DistributedFESpaces.jl delete mode 100644 src/OLD/DistributedIndexSets.jl delete mode 100644 src/OLD/DistributedTriangulations.jl delete mode 100644 src/OLD/DistributedVectors.jl delete mode 100644 src/OLD/GridapDistributed.jl delete mode 100644 src/OLD/GridapHighLevelAPI.jl delete mode 100644 src/OLD/MPIPETScAlgebraInterfaces.jl delete mode 100644 src/OLD/MPIPETScCommunicators.jl delete mode 100644 src/OLD/MPIPETScDistributedAssemblersInterfaces.jl delete mode 100644 src/OLD/MPIPETScDistributedData.jl delete mode 100644 src/OLD/MPIPETScDistributedIndexSets.jl delete mode 100644 src/OLD/MPIPETScDistributedVectors.jl delete mode 100644 src/OLD/MPIPETScLinearSolvers.jl delete mode 100644 src/OLD/MPITimers.jl delete mode 100644 src/OLD/MultiFieldDistributedFESpaces.jl delete mode 100644 src/OLD/SequentialCommunicators.jl delete mode 100644 src/OLD/SequentialDistributedAssemblersInterfaces.jl delete mode 100644 src/OLD/SequentialDistributedData.jl delete mode 100644 src/OLD/SequentialDistributedIndexSets.jl delete mode 100644 src/OLD/SequentialDistributedVectors.jl delete mode 100644 src/OLD/UniformlyRefinedForestOfOctreesDiscreteModels.jl delete mode 100644 src/OLD/ZeroMeanDistributedFESpaces.jl delete mode 100644 test/OLD/CartesianDiscreteModelsTests.jl delete mode 100644 test/OLD/DistributedAssemblersTests.jl delete mode 100644 test/OLD/DistributedAssemblersTestsHelpers.jl delete mode 100644 test/OLD/DistributedDataTests.jl delete mode 100644 test/OLD/DistributedFESpacesTests.jl delete mode 100644 test/OLD/DistributedIndexSetsTests.jl delete mode 100644 test/OLD/DistributedPLaplacianTests.jl delete mode 100644 test/OLD/DistributedPoissonDGTests.jl delete mode 100644 test/OLD/DistributedPoissonTests.jl delete mode 100644 test/OLD/DistributedStokesTests.jl delete mode 100644 test/OLD/DistributedVectorsTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPIPETScCommunicatorsTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPIPETScDistributedAssemblersTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPIPETScDistributedIndexSetsTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPIPETScDistributedPLaplacianTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPIPETScDistributedPoissonDGTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPIPETScDistributedPoissonTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPIPETScDistributedStokesTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPIPETScDistributedVectorsTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPIPETScUniformlyRefinedForestOfOctreesDiscreteModelsTests.jl delete mode 100644 test/OLD/MPIPETScTests/MPITimersTests.jl delete mode 100644 test/OLD/MPIPETScTests/compile/compile.jl delete mode 100755 test/OLD/MPIPETScTests/compile/compile.sh delete mode 100644 test/OLD/MPIPETScTests/runtests.jl delete mode 100644 test/OLD/SequentialCommunicatorsTests.jl delete mode 100644 test/OLD/ZeroMeanDistributedFESpacesTests.jl delete mode 100644 test/OLD/runtests.jl diff --git a/compile/README.md b/compile/README.md deleted file mode 100644 index 666d5cda..00000000 --- a/compile/README.md +++ /dev/null @@ -1,64 +0,0 @@ -The Julia script available in this folder (`create_gridap_image.jl`) lets one to create a custom sysimage of `Gridap.jl` using the so-called [`PackageCompiler.jl`](https://github.com/JuliaLang/PackageCompiler.jl) Julia package. See [documentation page](https://julialang.github.io/PackageCompiler.jl/dev/sysimages/) for more details. - -The Julia script at hand provides a set of command-line-arguments (CLAs) in order to customize the creation process of the sysimage. You can see the list of options available using the following command: - -```bash -$ julia create_gridap_image.jl -h -``` - -In order to create the sysimage, the script clones the Git repository of `Gridap.jl` and that of its [Tutorials](https://github.com/gridap/Tutorials), and (currently) runs the `validation.jl` tutorial. By default, the script will generate the sysimage from the code corresponding to the last commit pushed to the `master` branch of both Git repositories. However, via appropriate CLAs (see help message), one may select the particular version of both projects to be cloned by specifying the corresponding Git release tags. For example, if one wants to generate a sysimage of Gridap `v0.10.4` from Tutorials `v0.10.0`, one has to execute the following command: - - -```bash -$ julia create_gridap_image.jl -g v0.10.4 -t v0.10.0 -n Gridapv0.10.4.so -``` - -Obviously, one has to select a version of the Tutorials package compatible with the version selected for Gridap, **otherwise the image creation process will fail**. Note that in the above command, we are also specifying the name of the image to be generated, which by default, is created in the directory from which the script is executed -(the path where the image is generated can be customized as well via the corresponding CLA). - -Once the image is generated, one may use it to execute a Julia script that uses Gridap as follows: - -```bash -$ julia -J Gridapv0.10.4.so gridap_script.jl -``` - -or run an interactive Julia session as: - -```bash -$ julia -J Gridapv0.10.4.so -``` - -### Known issues, TO-DO, misc learn lessons, etc. - -1. **The sysimage works and is useful when `Gridap.jl` is being used but not being modified**, e.g., while developing `GridapDistributed.jl` or `GridapODEs.jl`. If you want to develop `Gridap.jl`, use [`Revise.jl`](https://github.com/gridap/Gridap.jl/wiki/REPL-based-workflow#editing-small-parts-of-the-project). - -2. Also note the following from the [documentation page](https://julialang.github.io/PackageCompiler.jl/dev/sysimages/) of `PackageCompiler.jl`: *It should be clearly stated that there are some drawbacks to using a custom sysimage, thereby sidestepping the standard Julia package precompilation system. The biggest drawback is that packages that are compiled into a sysimage (including their dependencies!) are "locked" to the version they where at when the sysimage was created. This means that no matter what package version you have installed in your current project, the one in the sysimage will take precedence. This can lead to bugs where you start with a project that needs a specific version of a package, but you have another one compiled into the sysimage.* - -3. We have to decide which code to execute in order to create the image. At present, `validation.jl` is chosen just for testing purposes. Ideally it should force the compilation of a large part of the project, but the problem that it solves should take small amount of time to execute once the code is compiled. - -4. We have observed that the resulting image does not contain all the code whose compilation is triggered from `validation.jl`. The code that is compiled during execution time can be spotted with - ```bash - $ julia --trace-compile=stderr -q -J Gridapv0.10.4.so validation.jl - ``` - At present, we have the following: - ``` - $ julia --trace-compile=stderr ../../Tutorials/src/validation.jl 2>&1 | wc -l - 2920 - $ julia --trace-compile=stderr -J./Gridap.so ../../Tutorials/src/validation.jl 2>&1 | wc -l - 825 - ``` - An hypothesis (not confirmed): the list of packages provided to `PackageCompiler.create_sysimage` should not only contain direct dependencies of `Gridap.jl`. This is related to the following comment in the script: - ``` - # TODO: reorg project structure (Project.toml) to use - # Pkg.dependencies() with PackageCompiler - #if VERSION >= v"1.4" - # append!(pkgs, [Symbol(v.name) for v in values(Pkg.dependencies())]) - #end - ``` - `Pkg.dependencies()` is a brand new method in Julia `v1.4`. From the resulting data structure that it provides one may obtain all the nodes of the dependency graph of the currently loaded project. - -5. See also issue [#276](https://github.com/gridap/Gridap.jl/issues/276). - - - - diff --git a/compile/create_gridap_image.jl b/compile/create_gridap_image.jl deleted file mode 100644 index 6342f3f6..00000000 --- a/compile/create_gridap_image.jl +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env julia -# -# Examples: -# julia create_gridap_image.jl -h -# julia create_gridap_image.jl -g v0.10.4 - -using Pkg -Pkg.add("ArgParse") -Pkg.add("PackageCompiler") -using ArgParse -using LibGit2 -using PackageCompiler -const do_not_clone_gridap_flag="--do-not-clone-gridap" -const do_not_clone_tutorials_flag="--do-not-clone-tutorials" - -function parse_commandline() - s = ArgParseSettings() - do_not_clone_gridap_flag="--do-not-clone-gridap" - do_not_clone_tutorials_flag="--do-not-clone-tutorials" - @add_arg_table! s begin - "--image-name", "-n" - help = "The name of the Gridap.jl custom system image that will be created" - arg_type = String - default = "Gridap.so" - "--image-path", "-p" - help = "The relative or absolute PATH where the Gridap.jl custom system image will be created" - arg_type = String - default = "./" - "--gridap-tag", "-g" - help = """Gridap.jl Git repo Tag string with the source code from which the Gridap.jl custom system - image will be created. This option is ignored if $(do_not_clone_gridap_flag) flag IS PASSED. - """ - arg_type = String - default = "master" - "--tutorials-tag", "-t" - help = "Tutorials Git repo Tag string with the base code line that will be executed in order to generate - the Gridap.jl custom system image. This option is ignored if $(do_not_clone_tutorials_flag) flag IS PASSED" - arg_type = String - default = "master" - "--gridap-path" - help = """If $(do_not_clone_gridap_flag) flag IS NOT PASSED, the relative or absolute PATH where to - clone the Gridap.jl Git repo (Warning: Removed if it exists!). - If $(do_not_clone_gridap_flag) flag IS PASSED, the relative or absolute PATH where an existing - Gridap.jl source directory tree can be found. - """ - arg_type = String - default = "/tmp/Gridap.jl/" - "--tutorials-path" - arg_type = String - default = "/tmp/Tutorials/" - help = """If $(do_not_clone_tutorials_flag) flag IS NOT PASSED, the relative or absolute PATH where to - clone the Tutorials Git repo (Warning: Removed if it exists!). - If $(do_not_clone_tutorials_flag) flag IS PASSED, the relative or absolute PATH where an existing - Tutorials source directory tree can be found. - """ - "--do-not-clone-gridap" - help = "Do not clone the Gridap.jl Git repo, but instead use an existing source directory." - action = :store_true - "--do-not-clone-tutorials" - help = "Do not clone the Tutorials Git repo, but instead use an existing source directory." - action = :store_true - "--tutorial-name" - help = "The name of the Julia script (including the .jl extension) with the tutorial from which the sys image is created." - default = "validation.jl" - arg_type = String - "--user-provided-script-path" - arg_type = String - help = "The Julia script from which the image is created is not extracted from the Tutorials.jl Git repo, but the one located at the path provided to this CLA is used instead" - default = nothing - end - return parse_args(s) -end - -function clone_and_checkout_tag( - repo_url::AbstractString, - repo_path::AbstractString, - git_tag::AbstractString, -) - rm(repo_path; force = true, recursive = true) - repo = LibGit2.clone(repo_url, repo_path) - if (lowercase(git_tag) != "master") - @assert git_tag in LibGit2.tag_list(repo) "Wrong Git Tag $(git_tag). Available choices for $(repo_url) are $(LibGit2.tag_list(repo))" - gt=LibGit2.GitTag(repo, git_tag) - commit_hash=LibGit2.target(gt) - LibGit2.checkout!(repo, string(commit_hash)) - end -end - - -function main() - parsed_args = parse_commandline() - image_name = parsed_args["image-name"] - image_path = parsed_args["image-path"] - gridap_tag = parsed_args["gridap-tag"] - tutorials_tag = parsed_args["tutorials-tag"] - gridap_path = parsed_args["gridap-path"] - tutorials_path = parsed_args["tutorials-path"] - tutorial_name = parsed_args["tutorial-name"] - user_provided_script_path = parsed_args["user-provided-script-path"] - - if (! parsed_args["do-not-clone-gridap"]) - clone_and_checkout_tag( - "https://github.com/gridap/Gridap.jl", - gridap_path, - gridap_tag, - ) - end - - if (user_provided_script_path == nothing) - if (! parsed_args["do-not-clone-tutorials"]) - clone_and_checkout_tag( - "https://github.com/gridap/Tutorials", - tutorials_path, - tutorials_tag, - ) - end - end - - @info "Creating system image for Gridap.jl#$(gridap_tag) object file at: '$(image_path)'" - @info "Building Gridap.jl#$(gridap_tag) into system image: $(joinpath(image_path,image_name))" - - start_time = time() - - Pkg.activate(gridap_path) - Pkg.instantiate(verbose = true) - - pkgs = Symbol[] - push!(pkgs, :Gridap) - - - if VERSION >= v"1.4" - append!(pkgs, [Symbol(v.name) for v in values(Pkg.dependencies()) if v.is_direct_dep],) - else - append!(pkgs, [Symbol(name) for name in keys(Pkg.installed())]) - end - - if ( user_provided_script_path == nothing ) - tutorial_script_path=joinpath(tutorials_path,"src",tutorial_name) - else - tutorial_script_path=user_provided_script_path - end - - if ! isfile(tutorial_script_path) - @error "$(tutorial_script_path) not found or not a file" - end - - # use package compiler - PackageCompiler.create_sysimage( - pkgs, - sysimage_path = joinpath(image_path,image_name), - precompile_execution_file = tutorial_script_path, - ) - - tot_secs = Int(floor(time() - start_time)) - @info "Created system image object file at: $(joinpath(image_path,image_name))" - @info "System image build time: $tot_secs sec" -end - -main() diff --git a/src/OLD/CartesianDiscreteModels.jl b/src/OLD/CartesianDiscreteModels.jl deleted file mode 100644 index ee9972ef..00000000 --- a/src/OLD/CartesianDiscreteModels.jl +++ /dev/null @@ -1,141 +0,0 @@ -function Gridap.CartesianDiscreteModel(comm::Communicator,subdomains::Tuple,args...) - desc = CartesianDescriptor(args...) - CartesianDiscreteModel(comm,subdomains,desc) -end - -function Gridap.CartesianDiscreteModel( - comm::Communicator,subdomains::Tuple,gdesc::CartesianDescriptor) - - nsubdoms = prod(subdomains) - ngcells = prod(Tuple(gdesc.partition)) - models = DistributedData(comm) do isubdom - cmin, cmax = compute_cmin_cmax(gdesc, subdomains, isubdom) - CartesianDiscreteModel(gdesc, cmin, cmax) - end - - gids = DistributedIndexSet(comm,ngcells) do isubdom - lid_to_gid, lid_to_owner = local_cartesian_gids(gdesc,subdomains,isubdom) - IndexSet(ngcells,lid_to_gid,lid_to_owner) - end - - DistributedDiscreteModel(models,gids) -end - -function compute_cmin_cmax( - gdesc::CartesianDescriptor{D,T}, - nsubdoms::Tuple, - isubdom::Integer, -) where {D,T} - cis = CartesianIndices(nsubdoms) - ci = cis[isubdom] - cmin = Vector{Int}(undef, D) - cmax = Vector{Int}(undef, D) - for d = 1:D - orange = uniform_partition_1d(gdesc.partition[d], nsubdoms[d], ci[d]) - lrange = extend_range_with_ghost_cells(orange, nsubdoms[d], ci[d]) - cmin[d] = first(lrange) - cmax[d] = last(lrange) - end - return CartesianIndex(Tuple(cmin)), CartesianIndex(Tuple(cmax)) -end - -function local_cartesian_gids_1d( - gdesc::CartesianDescriptor{1}, - nsubdoms::Integer, - isubdom::Integer, -) - gcells, = gdesc.partition - orange = uniform_partition_1d(gcells, nsubdoms, isubdom) - lrange = extend_range_with_ghost_cells(orange, nsubdoms, isubdom) - lcells = length(lrange) - lid_to_gid = collect(Int, lrange) - lid_to_owner = fill(isubdom, lcells) - if nsubdoms == 1 - nothing - elseif isubdom == 1 - lid_to_owner[end] = 2 - elseif isubdom != nsubdoms - lid_to_owner[1] = isubdom - 1 - lid_to_owner[end] = isubdom + 1 - else - lid_to_owner[1] = isubdom - 1 - end - lid_to_gid, lid_to_owner -end - -function extend_range_with_ghost_cells(orange, nsubdoms, isubdom) - if nsubdoms == 1 - lrange = orange - elseif isubdom == 1 - lrange = orange.start:(orange.stop+1) - elseif isubdom != nsubdoms - lrange = (orange.start-1):(orange.stop+1) - else - lrange = (orange.start-1):orange.stop - end - return lrange -end - -function local_cartesian_gids( - gdesc::CartesianDescriptor{D},nsubdoms::Tuple,isubdom::Integer) where D - cis = CartesianIndices(nsubdoms) - ci = cis[isubdom] - local_cartesian_gids(gdesc,nsubdoms,ci) -end - -function local_cartesian_gids( - gdesc::CartesianDescriptor{D},nsubdoms::Tuple,isubdom::CartesianIndex) where D - - d_to_lid_to_gid = Vector{Int}[] - d_to_lid_to_owner = Vector{Int}[] - for d in 1:D - origin_d = Point(gdesc.origin[d]) - sizes_d = (gdesc.sizes[d],) - partition_d = (gdesc.partition[d],) - gdesc_d = CartesianDescriptor(origin_d,sizes_d,partition_d) - lid_to_gid_d, lid_to_owner_d = local_cartesian_gids_1d(gdesc_d,nsubdoms[d],isubdom[d]) - push!(d_to_lid_to_gid,lid_to_gid_d) - push!(d_to_lid_to_owner,lid_to_owner_d) - end - - d_to_llength = Tuple(map(length,d_to_lid_to_gid)) - d_to_glength = Tuple(gdesc.partition) - - lcis = CartesianIndices(d_to_llength) - gcis = CartesianIndices(d_to_glength) - scis = CartesianIndices(nsubdoms) - llis = LinearIndices(lcis) - glis = LinearIndices(gcis) - slis = LinearIndices(scis) - - lid_to_gid = zeros(Int,length(lcis)) - lid_to_owner = zeros(Int,length(lcis)) - gci = zeros(Int,D) - sci = zeros(Int,D) - - for lci in lcis - for d in 1:D - gci[d] = d_to_lid_to_gid[d][lci[d]] - sci[d] = d_to_lid_to_owner[d][lci[d]] - end - lid = llis[lci] - lid_to_gid[lid] = glis[CartesianIndex(Tuple(gci))] - lid_to_owner[lid] = slis[CartesianIndex(Tuple(sci))] - end - - lid_to_gid, lid_to_owner -end - -function uniform_partition_1d(glength,np,pid) - _olength = glength ÷ np - _offset = _olength * (pid-1) - _rem = glength % np - if _rem < (np-pid+1) - olength = _olength - offset = _offset - else - olength = _olength + 1 - offset = _offset + pid - (np-_rem) - 1 - end - (1+offset):(olength+offset) -end diff --git a/src/OLD/Communicators.jl b/src/OLD/Communicators.jl deleted file mode 100644 index 54061851..00000000 --- a/src/OLD/Communicators.jl +++ /dev/null @@ -1,38 +0,0 @@ -abstract type Communicator end - -function num_parts(::Communicator) - @abstractmethod -end - -function num_workers(::Communicator) - @abstractmethod -end - -function do_on_parts(task::Function,::Communicator,args...) - @abstractmethod -end - -function do_on_parts(task::Function,args...) - comm = get_comm(get_distributed_data(first(args))) - do_on_parts(task,comm,args...) -end - -function i_am_master(::Communicator) - @abstractmethod -end - -# We need to compare communicators to perform some checks -function Base.:(==)(a::Communicator,b::Communicator) - @abstractmethod -end - -# All communicators that are to be executed in the master to workers -# model inherit from these one -abstract type OrchestratedCommunicator <: Communicator end - -function i_am_master(::OrchestratedCommunicator) - true -end - -# This is for the communicators to be executed in MPI mode -abstract type CollaborativeCommunicator <: Communicator end diff --git a/src/OLD/DistributedAssemblers.jl b/src/OLD/DistributedAssemblers.jl deleted file mode 100644 index 309729ca..00000000 --- a/src/OLD/DistributedAssemblers.jl +++ /dev/null @@ -1,579 +0,0 @@ -function Gridap.FESpaces.collect_cell_matrix( - u::DistributedFESpace,v::DistributedFESpace,terms) - DistributedData(u,v,terms) do part, (u,_), (v,_), terms - collect_cell_matrix(u,v,terms) - end -end - -function Gridap.FESpaces.collect_cell_vector(v::DistributedFESpace,terms) - DistributedData(v,terms) do part, (v,_), terms - collect_cell_vector(v,terms) - end -end - -function Gridap.FESpaces.collect_cell_matrix_and_vector( - u::DistributedFESpace,v::DistributedFESpace,mterms,vterms) - DistributedData(u,v,mterms,vterms) do part, (u,_), (v,_), mterms, vterms - collect_cell_matrix_and_vector(u,v,mterms,vterms) - end -end - -function Gridap.FESpaces.collect_cell_matrix_and_vector( - u::DistributedFESpace,v::DistributedFESpace,mterms,vterms,uhd::FEFunction) - DistributedData(u,v,mterms,vterms,uhd) do part, (u,_), (v,_), mterms, vterms, uhd - collect_cell_matrix_and_vector(u,v,mterms,vterms,uhd) - end -end - - -struct DistributedAssemblyStrategy{T<:AssemblyStrategy} - strategies::DistributedData{T} -end - -function get_distributed_data(dstrategy::DistributedAssemblyStrategy) - dstrategy.strategies -end - -struct DistributedAssembler{M,V,AS} <: Assembler - matrix_type :: Type{M} - vector_type :: Type{V} - trial::DistributedFESpace - test::DistributedFESpace - assems::DistributedData{<:Assembler} - strategy::DistributedAssemblyStrategy{AS} -end - -struct ArtificeSparseMatrixToLeverageDefaults{Tv,Ti} <: AbstractSparseMatrix{Tv,Ti} end - -function Gridap.Algebra.is_entry_stored(::Type{<:ArtificeSparseMatrixToLeverageDefaults},i,j) - true -end - -""" - allocate_local_vector(::DistributedAssemblyStrategy, ::Type{V}, indices) where {V} - -Allocate the local vector required in order to assemble a global vector -of type V accordingly to the assembly algorithm underlying the assembly strategy. The global vector is indexable using indices. -""" -function allocate_local_vector(::DistributedAssemblyStrategy, ::Type{V}, indices::DistributedIndexSet) where {V} - @abstractmethod -end - -""" - get_local_matrix_type(::Type{M}) where M - -Given a global matrix type M, returns the local matrix type -whose assembler-related methods are appropiate for the global -assembly process of M. - -""" -function get_local_matrix_type(::Type{M}) where M - @abstractmethod -end - -function get_local_matrix_type(::Type{M}) where M <: Union{<:SparseMatrixCSR,<:SparseMatrixCSC} - M -end - -""" - get_local_vector_type(::Type{V}) where V - -Given a global vector type V, returns the local vector type -whose assembler-related methods are appropiate for the global -assembly process of V. - -""" -function get_local_vector_type(::Type{V}) where V - @abstractmethod -end - -function get_local_vector_type(::Type{V}) where V <: Vector - V -end - -function assemble_global_matrix(::DistributedAssemblyStrategy, ::Type{M}, - ::DistributedData, - ::DistributedIndexSet, - ::DistributedIndexSet) where {M} - @abstractmethod -end - -function assemble_global_vector(::DistributedAssemblyStrategy, ::Type{V}, - ::DistributedData, - ::DistributedIndexSet) where {V} - @abstractmethod -end - -function get_distributed_data(dassem::DistributedAssembler) - dassem.assems -end - -function Gridap.FESpaces.get_assembly_strategy(a::DistributedAssembler) - a.strategy -end - -function Gridap.FESpaces.allocate_matrix(dassem::DistributedAssembler,dmatdata) - dm1 = DistributedData(dassem,dmatdata) do part, assem, matdata - builder=SparseMatrixBuilder(ArtificeSparseMatrixToLeverageDefaults{Float64,Int64},MinMemory()) - m1=nz_counter(builder,(get_rows(assem),get_cols(assem))) - symbolic_loop_matrix!(m1,assem,matdata) - end - - dm2 = DistributedData(dm1,dassem,dmatdata) do part, m1, assem, matdata - m2=nz_allocation(m1) - symbolic_loop_matrix!(m2,assem,matdata) - m2 - end - - dIJV = DistributedData(dm2) do part, m2 - m2.I,m2.J,m2.V - end - - finalize_coo!(dassem.matrix_type,dIJV,dassem.test.gids,dassem.trial.gids) - A = assemble_global_matrix(dassem.strategy, - dassem.matrix_type, - dIJV, - dassem.test.gids, - dassem.trial.gids) -end - -function Gridap.FESpaces.allocate_vector(a::DistributedAssembler,dvecdata) - gids = a.test.gids - allocate_vector(a.vector_type,gids) -end - -function Gridap.FESpaces.allocate_matrix_and_vector(dassem::DistributedAssembler,ddata) - dm1 = DistributedData(dassem,ddata) do part, assem, data - mbuilder=SparseMatrixBuilder(ArtificeSparseMatrixToLeverageDefaults{Float64,Int64},MinMemory()) - m1=nz_counter(mbuilder,(get_rows(assem),get_cols(assem))) - vbuilder=ArrayBuilder(get_local_vector_type(dassem.vector_type)) - v1=nz_counter(vbuilder,(get_rows(assem),)) - symbolic_loop_matrix_and_vector!(m1,v1,assem,data) - m1 - end - - gids = dassem.test.gids - db = allocate_local_vector(dassem.strategy,dassem.vector_type,gids) - - dm2 = DistributedData(dm1,db,dassem,ddata) do part, m1, b, assem, data - m2=nz_allocation(m1) - symbolic_loop_matrix_and_vector!(m2,b,assem,data) - m2 - end - - dIJV = DistributedData(dm2) do part, m2 - m2.I,m2.J,m2.V - end - - finalize_coo!(dassem.matrix_type,dIJV,dassem.test.gids,dassem.trial.gids) - A = assemble_global_matrix(dassem.strategy, - dassem.matrix_type, - dIJV, - dassem.test.gids, - dassem.trial.gids) - - b = assemble_global_vector(dassem.strategy, - dassem.vector_type, - db, - dassem.test.gids) - A,b -end - -function Gridap.FESpaces.assemble_matrix!(dmat,dassem::DistributedAssembler, dmatdata) - fill_entries!(dmat,zero(eltype(dmat))) - assemble_matrix_add!(dmat,dassem,dmatdata) -end - -function is_seq_and_map_dofs_type_proc_local(dassem::DistributedAssembler{V,M,AS}) where {V,M,AS} - result=isa(dassem.assems,SequentialDistributedData) - mapdofstype=get_map_dofs_type(AS) - result=result && mapdofstype==MapDoFsTypeProcLocal -end - -function Gridap.FESpaces.assemble_matrix_add!(dmat, - dassem::DistributedAssembler{V,M,AS}, - dmatdata) where {V,M,AS} - @notimplementedif is_seq_and_map_dofs_type_proc_local(dassem) - do_on_parts(dassem,dassem.test.gids,dmatdata,dmat) do part, assem, gids, matdata, mat - assemble_matrix_add!(mat,assem,matdata) - end -end - -function Gridap.FESpaces.assemble_vector!(dvec,dassem::DistributedAssembler, dvecdata) - fill_entries!(dvec,zero(eltype(dvec))) - assemble_vector_add!(dvec,dassem,dvecdata) -end - -function Gridap.FESpaces.assemble_vector_add!(dvec,dassem::DistributedAssembler, dvecdata) - @notimplementedif is_seq_and_map_dofs_type_proc_local(dassem) - do_on_parts(dassem,dvecdata,dvec) do part, assem, vecdata, vec - assemble_vector_add!(vec,assem,vecdata) - end -end - -function Gridap.FESpaces.assemble_matrix_and_vector!(dmat,dvec,dassem::DistributedAssembler, ddata) - fill_entries!(dmat,zero(eltype(dmat))) - fill_entries!(dvec,zero(eltype(dvec))) - assemble_matrix_and_vector_add!(dmat,dvec,dassem,ddata) -end - -function Gridap.FESpaces.assemble_matrix_and_vector_add!(dmat,dvec,dassem::DistributedAssembler, ddata) - @notimplementedif is_seq_and_map_dofs_type_proc_local(dassem) - do_on_parts(dassem,ddata,dmat,dvec) do part, assem, data, mat, vec - assemble_matrix_and_vector_add!(mat,vec,assem,data) - end -end - -function Gridap.FESpaces.assemble_matrix(dassem::DistributedAssembler, dmatdata) - dm1 = DistributedData(dassem,dmatdata) do part, assem, matdata - builder=SparseMatrixBuilder(ArtificeSparseMatrixToLeverageDefaults{Float64,Int64},MinMemory()) - m1=nz_counter(builder,(get_rows(assem),get_cols(assem))) - symbolic_loop_matrix!(m1,assem,matdata) - end - - dm2 = DistributedData(dm1,dassem,dmatdata) do part, m1, assem, matdata - m2=nz_allocation(m1) - numeric_loop_matrix!(m2,assem,matdata) - m2 - end - - dIJV = DistributedData(dm2) do part, m2 - m2.I,m2.J,m2.V - end - - finalize_coo!(dassem.matrix_type,dIJV,dassem.test.gids,dassem.trial.gids) - A = assemble_global_matrix(dassem.strategy, - dassem.matrix_type, - dIJV, - dassem.test.gids, - dassem.trial.gids) -end - -function Gridap.FESpaces.assemble_vector(dassem::DistributedAssembler, dvecdata) - gids = dassem.test.gids - db = allocate_local_vector(dassem.strategy,dassem.vector_type,gids) - do_on_parts(dassem,dvecdata,db) do part, assem, vecdata, b - numeric_loop_vector!(b,assem,vecdata) - end - b = assemble_global_vector(dassem.strategy, - dassem.vector_type, - db, - dassem.test.gids) -end - -function Gridap.FESpaces.assemble_matrix_and_vector(dassem::DistributedAssembler,ddata) - dm1 = DistributedData(dassem,ddata) do part, assem, data - mbuilder=SparseMatrixBuilder(ArtificeSparseMatrixToLeverageDefaults{Float64,Int64},MinMemory()) - m1=nz_counter(mbuilder,(get_rows(assem),get_cols(assem))) - vbuilder=ArrayBuilder(get_local_vector_type(dassem.vector_type)) - v1=nz_counter(vbuilder,(get_rows(assem),)) - symbolic_loop_matrix_and_vector!(m1,v1,assem,data) - m1 - end - - gids = dassem.test.gids - db = allocate_local_vector(dassem.strategy,dassem.vector_type,gids) - - dm2 = DistributedData(dm1,db,dassem,ddata) do part, m1, b, assem, data - m2=nz_allocation(m1) - numeric_loop_matrix_and_vector!(m2,b,assem,data) - m2 - end - - dIJV = DistributedData(dm2) do part, m2 - m2.I,m2.J,m2.V - end - - finalize_coo!(dassem.matrix_type,dIJV,dassem.test.gids,dassem.trial.gids) - A = assemble_global_matrix(dassem.strategy, - dassem.matrix_type, - dIJV, - dassem.test.gids, - dassem.trial.gids) - - b = assemble_global_vector(dassem.strategy, - dassem.vector_type, - db, - dassem.test.gids) - A,b -end - -# Type trait hierarchy for -# GridapDistributed AssemblyStrategy implementations -abstract type MapDoFsType end; -struct MapDoFsTypeProcLocal <: MapDoFsType end; -struct MapDoFsTypeGlobal <: MapDoFsType end; -const DefaultMapDoFsType = MapDoFsTypeGlobal - - -# -# Specializations - -# This is one of the usual assembly strategies in parallel FE computations -# Each proc owns a set of matrix / vector rows (and all cols in these rows) -# Each proc computes locally all values in the owned rows -# This typically requires to loop also over ghost cells -struct OwnedAndGhostCellsAssemblyStrategy{MapDoFsType} <: AssemblyStrategy - part::Int - gids::IndexSet -end - -function Gridap.FESpaces.row_map(a::OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeGlobal},row) - a.gids.lid_to_gid[row] -end - -function Gridap.FESpaces.col_map(a::OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeGlobal},col) - a.gids.lid_to_gid[col] -end - -function Gridap.FESpaces.row_map(a::OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeProcLocal},row) - row -end - -function Gridap.FESpaces.col_map(a::OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeProcLocal},col) - col -end - -function Gridap.FESpaces.row_mask(a::OwnedAndGhostCellsAssemblyStrategy,row) - a.part == a.gids.lid_to_owner[row] -end - -function Gridap.FESpaces.col_mask(a::OwnedAndGhostCellsAssemblyStrategy,col) - true -end - -function OwnedAndGhostCellsAssemblyStrategy( - map_dofs_type::Type{<:MapDoFsType},V::DistributedFESpace) - dgids = V.gids - strategies = DistributedData(dgids) do part, gids - OwnedAndGhostCellsAssemblyStrategy{map_dofs_type}(part,gids) - end - DistributedAssemblyStrategy(strategies) -end - -function OwnedAndGhostCellsAssemblyStrategy(V::DistributedFESpace, - map_dofs_type::MapDoFsType=DefaultMapDoFsType()) - OwnedAndGhostCellsAssemblyStrategy(typeof(map_dofs_type),V) -end - -struct OwnedCellsAssemblyStrategy{MapDoFsType} <: AssemblyStrategy - part::Int - dof_gids::IndexSet - cell_gids::IndexSet -end - -function Gridap.FESpaces.row_map(a::OwnedCellsAssemblyStrategy{MapDoFsTypeGlobal},row) - a.dof_gids.lid_to_gid[row] -end - -function Gridap.FESpaces.col_map(a::OwnedCellsAssemblyStrategy{MapDoFsTypeGlobal},col) - a.dof_gids.lid_to_gid[col] -end - -function Gridap.FESpaces.row_map(a::OwnedCellsAssemblyStrategy{MapDoFsTypeProcLocal},row) - row -end - -function Gridap.FESpaces.col_map(a::OwnedCellsAssemblyStrategy{MapDoFsTypeProcLocal},col) - col -end - -function Gridap.FESpaces.row_mask(a::OwnedCellsAssemblyStrategy,row) - true -end - -function Gridap.FESpaces.col_mask(a::OwnedCellsAssemblyStrategy,col) - true -end - -function OwnedCellsAssemblyStrategy(V::DistributedFESpace, - map_dofs_type::MapDoFsType=DefaultMapDoFsType()) - OwnedCellsAssemblyStrategy(typeof(map_dofs_type),V) -end - -function OwnedCellsAssemblyStrategy(map_dofs_type::Type{<:MapDoFsType}, V::DistributedFESpace) - dcell_gids = V.model.gids - ddof_gids = V.gids - strategies = - DistributedData(ddof_gids,dcell_gids) do part, dof_gids, cell_gids - OwnedCellsAssemblyStrategy{map_dofs_type}(part,dof_gids,cell_gids) - end - DistributedAssemblyStrategy(strategies) -end - -function default_assembly_strategy_type(::Communicator) - @abstractmethod -end - -function default_map_dofs_type(::Communicator) - @abstractmethod -end - -function default_global_vector_type(::Communicator) - @abstractmethod -end - -function default_global_matrix_type(::Communicator) - @abstractmethod -end - - - -proc_local_triangulation_portion_type(a::AssemblyStrategy)=proc_local_triangulation_portion_type(typeof(a)) - -proc_local_triangulation_portion_type(a::Type{<:AssemblyStrategy})=@abstractmethod - -proc_local_triangulation_portion_type(a::Type{<:OwnedCellsAssemblyStrategy})=OwnedCells - -proc_local_triangulation_portion_type(a::Type{<:OwnedAndGhostCellsAssemblyStrategy})=OwnedAndGhostCells - -function default_distributed_assembly_strategy(V::DistributedFESpace) - das=default_assembly_strategy_type(get_comm(V)) - mdt=default_map_dofs_type(get_comm(V)) - das(V,mdt()) -end - -get_map_dofs_type(::AssemblyStrategy)=@abstractmethod -get_map_dofs_type(::OwnedAndGhostCellsAssemblyStrategy{T}) where {T}=T -get_map_dofs_type(::OwnedCellsAssemblyStrategy{T}) where {T}=T -get_map_dofs_type(::Type{OwnedAndGhostCellsAssemblyStrategy{T}}) where {T}=T -get_map_dofs_type(::Type{OwnedCellsAssemblyStrategy{T}}) where {T}=T - - -function Gridap.FESpaces.SparseMatrixAssembler( - dtrial::DistributedFESpace, - dtest::DistributedFESpace, - dstrategy::DistributedAssemblyStrategy=default_distributed_assembly_strategy(dtest)) - - matrix_type=default_global_matrix_type(get_comm(dtrial)) - vector_type=default_global_vector_type(get_comm(dtrial)) - assems = DistributedData( - dtrial.spaces,dtest.spaces,dstrategy) do part, U, V, strategy - SparseMatrixAssembler(get_local_matrix_type(matrix_type), - get_local_vector_type(vector_type), - U, - V, - strategy) - end - DistributedAssembler(matrix_type, - vector_type, - dtrial, - dtest, - assems, - dstrategy) -end - -function Gridap.FESpaces.SparseMatrixAssembler( - matrix_type::Type, - vector_type::Type, - dtrial::DistributedFESpace, - dtest::DistributedFESpace, - dstrategy::DistributedAssemblyStrategy=default_distributed_assembly_strategy(dtest)) - - assems = DistributedData( - dtrial.spaces,dtest.spaces,dstrategy) do part, U, V, strategy - SparseMatrixAssembler(get_local_matrix_type(matrix_type), - get_local_vector_type(vector_type), - U, - V, - strategy) - end - - DistributedAssembler(matrix_type, - vector_type, - dtrial, - dtest, - assems, - dstrategy) -end - -function Gridap.Geometry.Triangulation(model::DistributedDiscreteModel, - args_triangulation...)#;kwargs_triangulation...) - AS=default_assembly_strategy_type(get_comm(model)) - Triangulation(AS, model, args_triangulation...)#;kwargs_triangulation...) -end - -function Gridap.Geometry.Triangulation(strategy::Type{<:AssemblyStrategy}, - model::DistributedDiscreteModel,args_triangulation...)#;kwargs_triangulation...) - DistributedData(model) do part, (model,gids) - Triangulation(strategy,part,gids,model,args_triangulation...)#;kwargs_triangulation) - end -end - -function Gridap.Geometry.Triangulation(strategy::AssemblyStrategy, - part,gids,model::DiscreteModel,args_triangulation...)#;kwargs_triangulation...) - Triangulation(typeof(strategy),part,gids,model,args_triangulation...)#;kwargs_triangulation) -end - -function Gridap.Geometry.Triangulation(strategy::Type{<:AssemblyStrategy}, - part,gids,model::DiscreteModel,args_triangulation...)#;kwargs_triangulation...) - portion=proc_local_triangulation_portion_type(strategy) - Triangulation(portion,part,gids,model,args_triangulation...)#;kwargs_triangulation) -end - -function Gridap.Geometry.Triangulation(strategy::DistributedAssemblyStrategy, - model::DistributedDiscreteModel,args_triangulation...)#;kwargs_triangulation...) - DistributedData(model,strategy) do part, (model,gids), strategy - Triangulation(strategy,part,gids,model,args_triangulation...)#;kwargs_triangulation) - end -end - -function Gridap.Geometry.BoundaryTriangulation(model::DistributedDiscreteModel;kwargs_triangulation...) - AS=default_assembly_strategy_type(get_comm(model)) - BoundaryTriangulation(AS, model; kwargs_triangulation...) -end - -function Gridap.Geometry.BoundaryTriangulation(strategy::Type{<:AssemblyStrategy}, - model::DistributedDiscreteModel;kwargs_triangulation...) - DistributedData(model) do part, (model,gids) - BoundaryTriangulation(strategy,part,gids,model;kwargs_triangulation...) - end -end - -function Gridap.Geometry.BoundaryTriangulation(strategy::AssemblyStrategy, - part,gids,model::DiscreteModel;kwargs_triangulation...) - BoundaryTriangulation(typeof(strategy),part,gids,model;kwargs_triangulation...) -end - -function Gridap.Geometry.BoundaryTriangulation(strategy::Type{<:AssemblyStrategy}, - part,gids,model::DiscreteModel;kwargs_triangulation...) - portion=proc_local_triangulation_portion_type(strategy) - BoundaryTriangulation(portion,part,gids,model;kwargs_triangulation...) -end - -function Gridap.Geometry.BoundaryTriangulation(strategy::DistributedAssemblyStrategy, - model::DistributedDiscreteModel;kwargs_triangulation...) - DistributedData(model,strategy) do part, (model,gids), strategy - BoundaryTriangulation(strategy,part,gids,model;kwargs_triangulation...) - end -end - -function Gridap.Geometry.SkeletonTriangulation(model::DistributedDiscreteModel;kwargs_triangulation...) - AS=default_assembly_strategy_type(get_comm(model)) - SkeletonTriangulation(AS, model; kwargs_triangulation...) -end - -function Gridap.Geometry.SkeletonTriangulation(strategy::Type{<:AssemblyStrategy}, - model::DistributedDiscreteModel;kwargs_triangulation...) - DistributedData(model) do part, (model,gids) - SkeletonTriangulation(strategy,part,gids,model;kwargs_triangulation...) - end -end - -function Gridap.Geometry.SkeletonTriangulation(strategy::AssemblyStrategy, - part,gids,model::DiscreteModel;kwargs_triangulation...) - SkeletonTriangulation(typeof(strategy),part,gids,model;kwargs_triangulation...) -end - -function Gridap.Geometry.SkeletonTriangulation(strategy::Type{<:AssemblyStrategy}, - part,gids,model::DiscreteModel;kwargs_triangulation...) - portion=proc_local_triangulation_portion_type(strategy) - SkeletonTriangulation(portion,part,gids,model;kwargs_triangulation...) -end - -function Gridap.Geometry.SkeletonTriangulation(strategy::DistributedAssemblyStrategy, - model::DistributedDiscreteModel;kwargs_triangulation...) - DistributedData(model,strategy) do part, (model,gids), strategy - SkeletonTriangulation(strategy,part,gids,model;kwargs_triangulation...) - end -end diff --git a/src/OLD/DistributedData.jl b/src/OLD/DistributedData.jl deleted file mode 100644 index 4270e659..00000000 --- a/src/OLD/DistributedData.jl +++ /dev/null @@ -1,75 +0,0 @@ -# Data distributed in parts of type T in a communicator -# Formerly, ScatteredVector -abstract type DistributedData{T} end - -function get_comm(a::DistributedData) - @abstractmethod -end - -function num_parts(a) - num_parts(get_comm(a)) -end - -# Construct a DistributedData object in a communicator -function DistributedData{T}(initializer::Function,::Communicator,args...) where T - @abstractmethod -end - -function DistributedData(initializer::Function,::Communicator,args...) - @abstractmethod -end - -# The comm argument can be omitted if it can be determined from the first -# data argument. -function DistributedData{T}(initializer::Function,args...) where T - comm = get_comm(get_distributed_data(first(args))) - DistributedData{T}(initializer,comm,args...) -end - -function DistributedData(initializer::Function,args...) - comm = get_comm(get_distributed_data(first(args))) - DistributedData(initializer,comm,args...) -end - -get_part_type(::Type{<:DistributedData{T}}) where T = T - -get_part_type(::DistributedData{T}) where T = T - -function gather!(a::AbstractVector,b::DistributedData) - @abstractmethod -end - -function gather(b::DistributedData{T}) where T - if i_am_master(get_comm(b)) - a = Vector{T}(undef,num_parts(b)) - else - a = Vector{T}(undef,0) - end - gather!(a,b) - a -end - -function scatter(comm::Communicator,b::AbstractVector) - @abstractmethod -end - -function scatter_value(comm::Communicator,v) - if i_am_master(comm) - part_to_v = fill(v,num_parts(comm)) - else - T = eltype(v) - part_to_v = T[] - end - scatter(comm,part_to_v) -end - -# return an object for which -# its restriction to the parts of a communicator is defined. -# The returned object is not necessarily an instance -# of DistributedData -# Do nothing by default. -function get_distributed_data(object) - object -end - -get_comm(a) = get_comm(get_distributed_data(a)) diff --git a/src/OLD/DistributedFEOperators.jl b/src/OLD/DistributedFEOperators.jl deleted file mode 100644 index d78ab463..00000000 --- a/src/OLD/DistributedFEOperators.jl +++ /dev/null @@ -1,33 +0,0 @@ - -# """ -# """ -# function Gridap.FESpaces.AffineFEOperator(trial::DistributedFESpace, -# test::DistributedFESpace, -# matrix::AbstractMatrix, -# vector::AbstractVector) -# @assert false # TO-DO -# end - -# """ -# """ -# function Gridap.FESpaces.AffineFEOperator(weakform::Function, -# trial::DistributedFESpace, -# test::DistributedFESpace, -# assem::DistributedAssembler) -# u = get_trial_fe_basis(trial) -# v = get_fe_basis(test) -# uhd = zero(trial) -# matcontribs, veccontribs = weakform(u,v) -# data = collect_cell_matrix_and_vector(trial,test,matcontribs,veccontribs,uhd) -# A,b = assemble_matrix_and_vector(assem,data) -# op = AffineOperator(A,b) -# AffineFEOperator(trial,test,op) -# end - -""" -""" -function Gridap.FESpaces.FEOperator(assem::DistributedAssembler,terms::DistributedData) - trial = assem.trial - test = assem.test - FEOperatorFromTerms(trial,test,assem,terms) -end diff --git a/src/OLD/DistributedFESpaceFactories.jl b/src/OLD/DistributedFESpaceFactories.jl deleted file mode 100644 index c655fdcc..00000000 --- a/src/OLD/DistributedFESpaceFactories.jl +++ /dev/null @@ -1,17 +0,0 @@ - -function Gridap.FESpace(::Type{V}; - model::DistributedDiscreteModel, reffe, constraint=nothing, kwargs...) where V - if constraint == nothing - DistributedFESpaceFromLocalFESpaces(V;model=model, reffe=reffe, kwargs...) - elseif constraint == :zeromean - ZeroMeanDistributedFESpace(V; model=model, reffe=reffe, kwargs...) - else - @unreachable "Unknown constraint value $constraint" - end -end - -function Gridap.FESpace(;model::DistributedDiscreteModel, - reffe, constraint=nothing, kwargs...) - V=default_global_vector_type(get_comm(model)) - FESpace(V;model=model,reffe=reffe,constraint=constraint,kwargs...) -end diff --git a/src/OLD/DistributedFESpaces.jl b/src/OLD/DistributedFESpaces.jl deleted file mode 100644 index 2de86f8b..00000000 --- a/src/OLD/DistributedFESpaces.jl +++ /dev/null @@ -1,404 +0,0 @@ - -abstract type DistributedFESpace <: FESpace end - -function get_distributed_data(dspace::DistributedFESpace) - spaces = dspace.spaces - gids = dspace.gids - - DistributedData(spaces, gids) do part, space, lgids - space, lgids - end -end - -function Gridap.FESpaces.FEFunction(dV::DistributedFESpace, x) - @abstractmethod -end - -function Gridap.FESpaces.EvaluationFunction(dV::DistributedFESpace, x) - @abstractmethod -end - -""" -""" -function get_vector_type(a::DistributedFESpace) - @abstractmethod -end - -# Minimal FE interface -function Gridap.FESpaces.num_free_dofs(f::DistributedFESpace) - f.gids.ngids -end - -function Gridap.FESpaces.zero_free_values(f::DistributedFESpace) - fv = Gridap.Algebra.allocate_vector(get_vector_type(f), f.gids) - fill_entries!(fv, zero(eltype(fv))) - fv -end - -function Gridap.FESpaces.get_trial_fe_basis(f::DistributedFESpace) - bases = DistributedData(f.spaces) do part, space - get_trial_fe_basis(space) - end -end - -function Gridap.FESpaces.get_fe_basis(f::DistributedFESpace) - bases = DistributedData(f.spaces) do part, space - get_fe_basis(space) - end -end - - -# TO-DO: Better name? -struct DistributedFESpaceFromLocalFESpaces{V} <: DistributedFESpace - vector_type::Type{V} - model::DistributedDiscreteModel - spaces::DistributedData{<:FESpace} - gids::DistributedIndexSet -end - -function get_vector_type(a::DistributedFESpaceFromLocalFESpaces) - a.vector_type -end - -function Gridap.FESpaces.FEFunction(dV::DistributedFESpaceFromLocalFESpaces, x) - dfree_vals = x[dV.gids] - funs = DistributedData(dV.spaces, dfree_vals) do part, V, free_vals - FEFunction(V, free_vals) - end - DistributedFEFunction(funs, x, dV) -end - -function Gridap.FESpaces.EvaluationFunction(dV::DistributedFESpaceFromLocalFESpaces, x) - dfree_vals = x[dV.gids] - funs = DistributedData(dV.spaces, dfree_vals) do part, V, free_vals - Gridap.FESpaces.EvaluationFunction(V, free_vals) - end - DistributedFEFunction(funs, x, dV) -end - - -# Constructors -function Gridap.TrialFESpace(V::DistributedFESpaceFromLocalFESpaces, args...) - spaces = DistributedData(V.spaces) do part, space - TrialFESpace(space, args...) - end - DistributedFESpaceFromLocalFESpaces(get_vector_type(V), V.model, spaces, V.gids) -end - -function DistributedFESpaceFromLocalFESpaces(::Type{V}; - model::DistributedDiscreteModel, - reffe, - kwargs...) where V - function init_local_spaces(part, model) - lspace = FESpace(model,reffe;kwargs...) - end - comm = get_comm(model) - spaces = DistributedData(init_local_spaces, comm, model.models) - DistributedFESpaceFromLocalFESpaces(V, model, spaces) -end - -function DistributedFESpaceFromLocalFESpaces(::Type{V}, - model::DistributedDiscreteModel, - spaces::DistributedData{<:FESpace}) where {V} - gids = _compute_distributed_index_set(model, spaces) - DistributedFESpaceFromLocalFESpaces(V, model, spaces, gids) -end - -function _compute_distributed_index_set( - model::DistributedDiscreteModel, - spaces::DistributedData{<:FESpace}) - - comm = get_comm(model) - - function init_lid_to_owner(part, lspace, cell_gids) - nlids = num_free_dofs(lspace) - lid_to_owner = zeros(Int, nlids) - cell_to_part = cell_gids.lid_to_owner - cell_to_lids = Table(get_cell_dof_ids(lspace)) - # if (part==1) - # println(cell_to_lids) - # end - _fill_max_part_around!(lid_to_owner, cell_to_part, cell_to_lids) - lid_to_owner - end - - part_to_lid_to_owner = DistributedData{Vector{Int}}(init_lid_to_owner, - comm, - spaces, - model.gids) - - offsets, ngids = _compute_offsets_and_ngids(part_to_lid_to_owner) - - num_dofs_x_cell = compute_num_dofs_x_cell(comm,spaces) - - part_to_cell_to_owners = DistributedVector(init_cell_to_owners, - model.gids, num_dofs_x_cell, - spaces, - part_to_lid_to_owner) - - exchange!(part_to_cell_to_owners) - - - do_on_parts(update_lid_to_owner, - part_to_lid_to_owner, - spaces, - part_to_cell_to_owners) - - part_to_lid_to_gid = _compute_part_to_lid_to_gid(model, - spaces, - num_dofs_x_cell, - part_to_lid_to_owner, - offsets) - - function init_free_gids(part, lid_to_gid, lid_to_owner, ngids) - # if (part==1) - # println("$(ngids)") - # println("$(lid_to_gid)") - # println("$(lid_to_owner)") - # end - IndexSet(ngids, lid_to_gid, lid_to_owner) - end - - gids = DistributedIndexSet(init_free_gids, - comm, - ngids, - part_to_lid_to_gid, - part_to_lid_to_owner, - ngids) -end - -function _compute_offsets_and_ngids(part_to_lid_to_owner) - comm = get_comm(part_to_lid_to_owner) - function count_owned_lids(part, lid_to_owner) - count(owner -> owner == part, lid_to_owner) - end - a = DistributedData{Int}(count_owned_lids, comm, part_to_lid_to_owner) - part_to_num_oids = gather(a) - if i_am_master(comm) - ngids = sum(part_to_num_oids) - _fill_offsets!(part_to_num_oids) - else - ngids = -1 - end - offsets = scatter(comm, part_to_num_oids) - part_to_ngids = scatter_value(comm, ngids) - do_on_parts(comm, part_to_ngids) do part, lngids - ngids = lngids - end - (offsets, ngids) -end - -function compute_num_dofs_x_cell(comm, spaces) - DistributedData(comm, spaces) do part, lspace - cell_dofs = get_cell_dof_ids(lspace) - [_count_dofs_x_cell(cell_dofs[i]) for i = 1:length(cell_dofs)] - end -end - -function _count_dofs_x_cell(cell_dof_ids::Gridap.Fields.VectorBlock) - n=0 - for i=1:length(cell_dof_ids) - n += length(cell_dof_ids.array[i]) - end - n -end - -function _count_dofs_x_cell(cell_dof_ids::Vector) - length(cell_dof_ids) -end - -function _compute_part_to_lid_to_gid(model, - spaces, - num_dofs_x_cell, - part_to_lid_to_owner, - offsets) - - comm = get_comm(part_to_lid_to_owner) - - part_to_lid_to_gid = DistributedData{Vector{Int}}( - init_lid_to_gids,comm,part_to_lid_to_owner,offsets) - - part_to_cell_to_gids = DistributedVector(init_cell_to_owners, - model.gids, - num_dofs_x_cell, - spaces, - part_to_lid_to_gid) - - exchange!(part_to_cell_to_gids) - - do_on_parts( - update_lid_to_gid, - part_to_lid_to_gid, - part_to_lid_to_owner, - spaces, - part_to_cell_to_gids, - model.gids) - - exchange!(part_to_cell_to_gids) - - do_on_parts( - update_lid_to_owner, - part_to_lid_to_gid, - spaces, - part_to_cell_to_gids) - - part_to_lid_to_gid -end - -function init_lid_to_gids(part, lid_to_owner, offset) - lid_to_gid = zeros(Int, length(lid_to_owner)) - _fill_owned_gids!(lid_to_gid, lid_to_owner, part, offset) - lid_to_gid -end - -function init_cell_to_owners(part, - num_dofs_x_cell, - lspace, - lid_to_owner) - ptrs = Vector{eltype(num_dofs_x_cell)}(undef, - length(num_dofs_x_cell) + 1) - ptrs[2:end] = num_dofs_x_cell[1:end] - length_to_ptrs!(ptrs) - data = Vector{eltype(lid_to_owner)}(undef, ptrs[end] - 1) - - cell_to_lids = get_cell_dof_ids(lspace) - dlid_to_zero = zeros(eltype(lid_to_owner), num_dirichlet_dofs(lspace)) - cell_to_owners_from = - lazy_map(Broadcasting(PosNegReindex(lid_to_owner,dlid_to_zero)),cell_to_lids) - k = 1 - for i = 1:length(cell_to_owners_from) - for j = 1:length(cell_to_owners_from[i]) - k=_fill_data!(data,cell_to_owners_from[i][j],k) - end - end - return Table(data, ptrs) -end - -function _fill_data!(data,entry::Integer,k) - data[k]=entry - k=k+1 -end - -function _fill_data!(data,entry::Vector{<:Integer},k) - for i=1:length(entry) - data[k]=entry[i] - k=k+1 - end - k -end - - - -function update_lid_to_gid(part, lid_to_gid, lid_to_owner, lspace, cell_to_gids, cell_gids) - cell_to_lids = Table(get_cell_dof_ids(lspace)) - cell_to_owner = cell_gids.lid_to_owner - _update_lid_to_gid!( - lid_to_gid,cell_to_lids,cell_to_gids,cell_to_owner,lid_to_owner) -end - -function Gridap.Arrays.Table(x::AbstractVector{<:Gridap.Fields.VectorBlock}) - c=array_cache(x) - n=length(x) - ptrs=Vector{Int32}(undef,n+1) - ptrs[1]=1 - for i in 1:length(x) - xi=getindex!(c,x,i) - ptrs[i+1]=ptrs[i]+_count_dofs_x_cell(xi) - end - data=Vector{Int32}(undef,ptrs[n+1]-1) - k=1 - for i in 1:length(x) - xi=getindex!(c,x,i) - for xij in xi.array - k=_fill_data!(data,xij,k) - end - end - Table(data,ptrs) -end - -function _update_lid_to_gid!(lid_to_gid, cell_to_lids, cell_to_gids, cell_to_owner, lid_to_owner) - for cell in 1:length(cell_to_lids) - i_to_gid = cell_to_gids[cell] - pini = cell_to_lids.ptrs[cell] - pend = cell_to_lids.ptrs[cell + 1] - 1 - cellowner = cell_to_owner[cell] - for (i, p) in enumerate(pini:pend) - lid = cell_to_lids.data[p] - if lid > 0 - owner = lid_to_owner[lid] - if owner == cellowner - gid = i_to_gid[i] - lid_to_gid[lid] = gid - end - end - end - end -end - -function update_lid_to_owner(part, lid_to_owner, lspace, cell_to_owners) - cell_to_lids = Table(get_cell_dof_ids(lspace)) - _update_lid_to_owner!(lid_to_owner, cell_to_lids, cell_to_owners) - -end - -function _update_lid_to_owner!(lid_to_owner, cell_to_lids, cell_to_owners) - for cell in 1:length(cell_to_lids) - i_to_owner = cell_to_owners[cell] - pini = cell_to_lids.ptrs[cell] - pend = cell_to_lids.ptrs[cell + 1] - 1 - for (i, p) in enumerate(pini:pend) - lid = cell_to_lids.data[p] - if lid > 0 && i_to_owner[i] > 0 - owner = i_to_owner[i] - lid_to_owner[lid] = owner - end - end - end -end - -function _fill_owned_gids!(lid_to_gid, lid_to_owner, part, offset) - o = offset - for (lid, owner) in enumerate(lid_to_owner) - if owner == part - o += 1 - lid_to_gid[lid] = o - end - end -end - -function _fill_offsets!(part_to_num_oids) - o = 0 - for part in 1:length(part_to_num_oids) - a = part_to_num_oids[part] - part_to_num_oids[part] = o - o += a - end -end - -function _fill_max_part_around!(lid_to_owner, cell_to_owner, cell_to_lids) - for cell in 1:length(cell_to_lids) - cellowner = cell_to_owner[cell] - pini = cell_to_lids.ptrs[cell] - pend = cell_to_lids.ptrs[cell + 1] - 1 - for p in pini:pend - lid = cell_to_lids.data[p] - if lid > 0 - owner = lid_to_owner[lid] - lid_to_owner[lid] = max(owner, cellowner) - end - end - end -end - -# FE Function -struct DistributedFEFunction <: FEFunction - funs::DistributedData - vals::AbstractVector - space::DistributedFESpace -end - -get_distributed_data(u::DistributedFEFunction) = u.funs - -Gridap.FESpaces.get_free_dof_values(a::DistributedFEFunction) = a.vals - -Gridap.FESpaces.get_fe_space(a::DistributedFEFunction) = a.space diff --git a/src/OLD/DistributedIndexSets.jl b/src/OLD/DistributedIndexSets.jl deleted file mode 100644 index a872a6f7..00000000 --- a/src/OLD/DistributedIndexSets.jl +++ /dev/null @@ -1,36 +0,0 @@ -# A, B, C should be the type of some indexable collection, e.g. ranges or vectors or dicts -struct IndexSet{A,B,C} - ngids::Int - lid_to_gid::A - lid_to_owner::B - gid_to_lid::C -end - -function IndexSet(ngids,lid_to_gid,lid_to_owner) - gid_to_lid = Dict{Int,Int32}() - for (lid,gid) in enumerate(lid_to_gid) - gid_to_lid[gid] = lid - end - IndexSet(ngids,lid_to_gid,lid_to_owner,gid_to_lid) -end - -abstract type DistributedIndexSet end - -function num_gids(a::DistributedIndexSet) - @abstractmethod -end - -function get_comm(a::DistributedIndexSet) - @abstractmethod -end - -function DistributedIndexSet(initializer::Function,::Communicator,ngids::Integer,args...) - @abstractmethod -end - -# The comm argument can be omitted if it can be determined from the first -# data argument. -function DistributedIndexSet(initializer::Function,ngids::Integer,args...) where T - comm = get_comm(get_distributed_data(first(args))) - DistributedIndexSet(initializer,comm,args...) -end diff --git a/src/OLD/DistributedTriangulations.jl b/src/OLD/DistributedTriangulations.jl deleted file mode 100644 index 65800d31..00000000 --- a/src/OLD/DistributedTriangulations.jl +++ /dev/null @@ -1,147 +0,0 @@ -abstract type ProcLocalTriangulationPortion end; -struct OwnedCells <: ProcLocalTriangulationPortion end; -struct OwnedAndGhostCells <: ProcLocalTriangulationPortion end; - -function filter_cells_when_needed(portion::ProcLocalTriangulationPortion, - trian::Triangulation, - args...) - filter_cells_when_needed(typeof(portion), trian, args...) -end - -function filter_cells_when_needed(portion::Type{<:ProcLocalTriangulationPortion}, - trian::Triangulation, - args...) - @abstractmethod -end - -function filter_cells_when_needed(portion::Type{OwnedAndGhostCells}, - trian::Triangulation, - args...) - trian -end - -function filter_cells_when_needed(portion::Type{OwnedCells}, - trian::Triangulation, - part, - cell_gids) - remove_ghost_cells(trian,part,cell_gids) -end - -function Gridap.Geometry.Triangulation(portion::Type{<:ProcLocalTriangulationPortion}, - part,gids,model::DiscreteModel,args_triangulation...)#;kwargs_triangulation...) - @abstractmethod -end - -function Gridap.Geometry.Triangulation(portion::Type{<:ProcLocalTriangulationPortion}, - model::DistributedDiscreteModel,args_triangulation...)#;kwargs_triangulation...) - DistributedData(model) do part, (model,gids) - Triangulation(portion,part,gids,model,args_triangulation...) - end -end - -function Gridap.Geometry.Triangulation(portion::Type{OwnedCells}, - part,gids,model::DiscreteModel,args_triangulation...)#;kwargs_triangulation...) - trian=Triangulation(model,args_triangulation...)#;kwargs_triangulation...) - filter_cells_when_needed(portion,trian,part,gids) -end - -function Gridap.Geometry.Triangulation(portion::Type{OwnedAndGhostCells}, - part,gids,model::DiscreteModel,args_triangulation...)#;kwargs_triangulation...) - trian=Triangulation(model,args_triangulation...)#;kwargs_triangulation...) - filter_cells_when_needed(portion,trian) -end - -#TO-DO: what does it mean OwnedCells and OwnedAndGhostCells for a BoundaryTriangulation? -# Perhaps we should use a different type, with a more intention revealing name! -function Gridap.Geometry.BoundaryTriangulation(portion::Type{<:ProcLocalTriangulationPortion}, - part,gids,model::DiscreteModel;kwargs_triangulation...) - @abstractmethod -end - -function Gridap.Geometry.BoundaryTriangulation(portion::Type{<:ProcLocalTriangulationPortion}, - model::DistributedDiscreteModel;kwargs_triangulation...) - DistributedData(model) do part, (model,gids) - BoundaryTriangulation(portion,part,gids,model;kwargs_triangulation...) - end -end - -function Gridap.Geometry.BoundaryTriangulation(portion::Type{OwnedCells}, - part,gids,model::DiscreteModel;kwargs_triangulation...) - trian=BoundaryTriangulation(model;kwargs_triangulation...) - filter_cells_when_needed(portion,trian,part,gids) -end - -function Gridap.Geometry.BoundaryTriangulation(portion::Type{OwnedAndGhostCells}, - part,gids,model::DiscreteModel;kwargs_triangulation...) - trian=BoundaryTriangulation(model;kwargs_triangulation...) - filter_cells_when_needed(portion,trian) -end - -#TO-DO: what does it mean OwnedCells and OwnedAndGhostCells for a SkeletonTriangulation? -# Perhaps we should use a different type, with a more intention revealing name! -function Gridap.Geometry.SkeletonTriangulation(portion::Type{<:ProcLocalTriangulationPortion}, - part,gids,model::DiscreteModel;kwargs_triangulation...) - @abstractmethod -end - -function Gridap.Geometry.SkeletonTriangulation(portion::Type{<:ProcLocalTriangulationPortion}, - model::DistributedDiscreteModel;kwargs_triangulation...) - DistributedData(model) do part, (model,gids) - SkeletonTriangulation(portion,part,gids,model;kwargs_triangulation...) - end -end - -function Gridap.Geometry.SkeletonTriangulation(portion::Type{OwnedCells}, - part,gids,model::DiscreteModel;kwargs_triangulation...) - trian=SkeletonTriangulation(model;kwargs_triangulation...) - filter_cells_when_needed(portion,trian,part,gids) -end - -function Gridap.Geometry.SkeletonTriangulation(portion::Type{OwnedAndGhostCells}, - part,gids,model::DiscreteModel;kwargs_triangulation...) - trian=SkeletonTriangulation(model;kwargs_triangulation...) - filter_cells_when_needed(portion,trian) -end - -function remove_ghost_cells(trian::Triangulation, part::Integer, gids::IndexSet) - tcell_to_mcell = get_cell_to_bgcell(trian) - ocell_to_tcell = - findall((x) -> (gids.lid_to_owner[x] == part), tcell_to_mcell) - RestrictedTriangulation(trian, ocell_to_tcell) -end - -function remove_ghost_cells( - trian::SkeletonTriangulation, - part::Integer, - gids::IndexSet, -) - cell_id_plus = get_cell_to_bgcell(trian.plus) - cell_id_minus = get_cell_to_bgcell(trian.minus) - @assert length(cell_id_plus) == length(cell_id_minus) - facets_to_old_facets = - _compute_facets_to_old_facets(cell_id_plus, cell_id_minus, part, gids) - RestrictedTriangulation(trian, facets_to_old_facets) -end - -function _compute_facets_to_old_facets(cell_id_plus, cell_id_minus, part, gids) - facets_to_old_facets = eltype(cell_id_minus)[] - for i = 1:length(cell_id_plus) - part_plus = gids.lid_to_owner[cell_id_plus[i]] - part_minus = gids.lid_to_owner[cell_id_minus[i]] - max_part_id = max(part_plus, part_minus) - if (max_part_id == part) - push!(facets_to_old_facets, i) - end - end - facets_to_old_facets -end - -function include_ghost_cells(trian::RestrictedTriangulation) - trian.oldtrian -end - -function Gridap.CellData.get_normal_vector(trian::DistributedData{<:Triangulation}) - DistributedData(trian) do part, trian - Gridap.CellData.get_normal_vector(trian) - end -end diff --git a/src/OLD/DistributedVectors.jl b/src/OLD/DistributedVectors.jl deleted file mode 100644 index 2fcc9c58..00000000 --- a/src/OLD/DistributedVectors.jl +++ /dev/null @@ -1,31 +0,0 @@ - -abstract type DistributedVector{T} end -# A distributed vector is a vector that can be restricted to a part of a communicator. -# Do not use the abstract type in function arguments since we want vectors from other packages -# to be used as distributed vectors. Use duck typing. - - -# a distributed vector should be indexable by a distributed index set -# The restriction of the resulting object to a part in a communicator -# should be a vector indexable by the local indices in this part. -# Note: some implementations will need to change the state of a -# to perform this operation and the result can take ownership of some -# part o the state of a. Be aware of this. -function Base.getindex(a,indices::DistributedIndexSet) - @abstractmethod -end - -Base.eltype(::Type{<:DistributedVector{T}}) where T=T -Base.eltype(a::DistributedVector)=Base.eltype(typeof(a)) - - -# Make the state of the vector globally consistent -function exchange!(a) - @abstractmethod -end - -# Build a Distributed vector from an index set -# the resulting object is assumed to be locally indexable when restricted to a part -function DistributedVector(initializer::Function,indices::DistributedIndexSet,args...) - @abstractmethod -end diff --git a/src/OLD/GridapDistributed.jl b/src/OLD/GridapDistributed.jl deleted file mode 100644 index 7e9f1021..00000000 --- a/src/OLD/GridapDistributed.jl +++ /dev/null @@ -1,116 +0,0 @@ -module GridapDistributed - -using Gridap -using Gridap.Helpers -using Gridap.Geometry -using Gridap.FESpaces -using Gridap.Arrays -using Gridap.Algebra -using Gridap.MultiField - -using SparseArrays -using MPI -using GridapDistributedPETScWrappers -using P4est_wrapper -using FillArrays -using SparseMatricesCSR - -export Communicator -export num_parts -export num_workers -export get_part -export do_on_parts -export i_am_master -export SequentialCommunicator -export MPIPETScCommunicator - -export DistributedData -export get_comm -export get_part_type -export gather -export gather! -export scatter -export scatter_value - -export DistributedIndexSet -export IndexSet -export num_gids - -export DistributedVector -export exchange! - -export OwnedAndGhostCellsAssemblyStrategy -export OwnedCellsAssemblyStrategy -export MapDoFsTypeGlobal -export MapDoFsTypeProcLocal -export OwnedCells -export OwnedAndGhostCells - -export remove_ghost_cells -export include_ghost_cells - -export PETScLinearSolver -export UniformlyRefinedForestOfOctreesDiscreteModel - - -include("Communicators.jl") - -include("SequentialCommunicators.jl") - -include("MPIPETScCommunicators.jl") - -include("MPITimers.jl") - -include("DistributedData.jl") - -include("SequentialDistributedData.jl") - -include("MPIPETScDistributedData.jl") - -include("DistributedIndexSets.jl") - -include("SequentialDistributedIndexSets.jl") - -include("MPIPETScDistributedIndexSets.jl") - -include("DistributedVectors.jl") - -include("SequentialDistributedVectors.jl") - -include("MPIPETScDistributedVectors.jl") - -include("DistributedDiscreteModels.jl") - -include("CartesianDiscreteModels.jl") - -include("UniformlyRefinedForestOfOctreesDiscreteModels.jl") - -include("DistributedFESpaces.jl") - -include("ZeroMeanDistributedFESpaces.jl") - -include("MultiFieldDistributedFESpaces.jl") - -include("DistributedFESpaceFactories.jl") - -include("DistributedTriangulations.jl") - -include("DistributedAssemblers.jl") - -include("SequentialDistributedAssemblersInterfaces.jl") - -include("MPIPETScAlgebraInterfaces.jl") - -include("MPIPETScDistributedAssemblersInterfaces.jl") - -include("DistributedFEOperators.jl") - -include("MPIPETScLinearSolvers.jl") - -import Gridap.TensorValues: inner, outer, double_contraction, symmetric_part -import LinearAlgebra: det, tr, cross, dot, ⋅ -import Base: inv, abs, abs2, *, +, -, /, adjoint, transpose, real, imag, conj -include("GridapHighLevelAPI.jl") - - -end # module diff --git a/src/OLD/GridapHighLevelAPI.jl b/src/OLD/GridapHighLevelAPI.jl deleted file mode 100644 index 6647fea9..00000000 --- a/src/OLD/GridapHighLevelAPI.jl +++ /dev/null @@ -1,222 +0,0 @@ - -function Gridap.Geometry.Triangulation(model::DistributedDiscreteModel) - das=default_assembly_strategy_type(get_comm(model)) - Triangulation(das,model) -end - -function Gridap.CellData.CellQuadrature(trian::DistributedData{<:Triangulation},degree::Integer) - DistributedData(trian) do part, trian - cell_quad = Gridap.CellData.Quadrature(trian,degree) - CellQuadrature(trian,cell_quad) - end -end - -function Gridap.CellData.Measure(trian::DistributedData{<:Triangulation},degree::Integer) - cell_quad=Gridap.CellData.CellQuadrature(trian,degree) - DistributedData(cell_quad) do part, cell_quad - Measure(cell_quad) - end -end - -function (*)(a::Gridap.CellData.Integrand,b::DistributedData{<:Measure}) - integrate(a.object,b) -end - -(*)(b::DistributedData{<:Measure},a::Gridap.CellData.Integrand) = a*b - -function Gridap.CellData.integrate(f::DistributedData,b::DistributedData{<:Measure}) - DistributedData(f,b) do part,f,b - integrate(f,b) - end -end - -function Base.sum(a::DistributedData{<:Gridap.CellData.DomainContribution}) - g=DistributedData(a) do part, a - sum(a) - end - sum(gather(g)) -end - -# Composition (this replaces the @law macro) -for T in (DistributedData{<:CellField},DistributedFEFunction) - @eval begin - function Base.:(∘)(f::Function,g::$T) - DistributedData(g) do part, g - Operation(f)(g) - end - end - function Base.:(∘)(f::Function,g::Tuple{$T,$T}) - DistributedData(g...) do part, g... - Operation(f)(g...) - end - end - # function Base.:(∘)(f::Function,g::Tuple{Vararg{Union{AbstractArray{<:Number},$T}}}) - # Operation(f)(g...) - # end - # function Base.:(∘)(f::Function,g::Tuple{Vararg{Union{Function,$T}}}) - # Operation(f)(g...) - # end - end -end - -# Define some of the well known arithmetic ops - -#TO-DO: get the list of operators, e.g., from a Gridap constant -for T in (DistributedData{<:CellField},DistributedFEFunction) - for op in (:symmetric_part,:inv,:det,:abs,:abs2,:+,:-,:tr,:transpose,:adjoint,:grad2curl,:real,:imag,:conj) - @eval begin - function ($op)(a::$T) - DistributedData(a) do part, a - Operation($op)(a) - end - end - end - end - end - - # Binary ops - for T in (DistributedData{<:CellField},DistributedFEFunction) - for op in (:inner,:outer,:double_contraction,:+,:-,:*,:cross,:dot,:/) - @eval begin - function ($op)(a::$T,b::$T) - DistributedData(a,b) do part, a, b - Operation($op)(a,b) - end - end - function ($op)(a::$T,b::Number) - DistributedData(a) do part, a - Operation($op)(a,b) - end - end - function ($op)(a::Number,b::$T) - DistributedData(b) do part, b - Operation($op)(a,b) - end - end - function ($op)(a::$T,b::Function) - DistributedData(a) do part, a - Operation($op)(a,b) - end - end - function ($op)(a::Function,b::$T) - DistributedData(b) do part, b - Operation($op)(a,b) - end - end - function ($op)(a::$T,b::AbstractArray{<:Number}) - DistributedData(a) do part, a - Operation($op)(a,b) - end - end - function ($op)(a::AbstractArray{<:Number},b::$T) - DistributedData(b) do part, b - Operation($op)(a,b) - end - end - end - end - end - -function (+)(a::DistributedData{<:Gridap.CellData.DomainContribution}, - b::DistributedData{<:Gridap.CellData.DomainContribution}) - DistributedData(a,b) do part, a, b - a+b - end -end - -function (-)(a::DistributedData{<:Gridap.CellData.DomainContribution}, - b::DistributedData{<:Gridap.CellData.DomainContribution}) - DistributedData(a,b) do part, a, b - a-b - end -end - -function (*)(a::Number,b::DistributedData{<:Gridap.CellData.DomainContribution}) - DistributedData(b) do part, b - a*b - end -end - -(*)(a::DistributedData{<:Gridap.CellData.DomainContribution},b::Number) = b*a - - function (a::typeof(gradient))(x::DistributedData{<:CellField}) - DistributedData(x) do part, x - a(x) - end - end - - function (a::typeof(gradient))(x::DistributedFEFunction) - DistributedData(x) do part, x - a(x) - end -end - -function dot(::typeof(∇),f::DistributedData{<:CellField}) - DistributedData(f) do part, f - divergence(f) - end -end - - -function Base.iterate(a::DistributedData{<:MultiFieldCellField}) - if _num_fields(a)==0 - return nothing - end - sf=_get_field(a,1) - (sf,2) -end - -function Base.iterate(a::DistributedData{<:MultiFieldCellField},state) - if state > _num_fields(a) - return nothing - end - sf=_get_field(a,state) - (sf,state+1) -end - -function _num_fields(a::DistributedData{<:MultiFieldCellField}) - num_fields=0 - do_on_parts(a) do part, a - num_fields=length(a.single_fields) - end - num_fields -end - -function _get_field(a::DistributedData{<:MultiFieldCellField},field_id) - DistributedData(a) do part, a - a.single_fields[field_id] - end -end - -function Gridap.CellData.jump(a::DistributedData{<:CellField}) - DistributedData(a) do part, a - jump(a) - end -end - -function Gridap.CellData.jump(a::DistributedData{<:SkeletonPair{<:CellField}}) - DistributedData(a) do part, a - jump(a) - end -end - -function Gridap.CellData.mean(a::DistributedData{<:CellField}) - DistributedData(a) do part, a - mean(a) - end -end - -for op in (:outer,:*,:dot) - @eval begin - function ($op)(a::DistributedData{<:CellField},b::DistributedData{<:SkeletonPair{<:CellField}}) - DistributedData(a,b) do part, a, b - Operation($op)(a,b) - end - end - function ($op)(a::DistributedData{<:SkeletonPair{<:CellField}},b::DistributedData{<:CellField}) - DistributedData(a,b) do part, a, b - Operation($op)(a,b) - end - end - end -end diff --git a/src/OLD/MPIPETScAlgebraInterfaces.jl b/src/OLD/MPIPETScAlgebraInterfaces.jl deleted file mode 100644 index 65c08e4a..00000000 --- a/src/OLD/MPIPETScAlgebraInterfaces.jl +++ /dev/null @@ -1,35 +0,0 @@ -""" - add_entry!(A,v,i,j,combine=+) - -Add an entry given its position and the operation to perform. -This method implementation (at present) assumes the following: - (1) i,j are local identifiers, and A has been set up a LocalToGlobalMapping IS. - (2) The insertion mode of A is GridapDistributedPETScWrappers.ADD_VALUES -""" -function Gridap.Algebra.add_entry!(::typeof(+), - A::GridapDistributedPETScWrappers.Mat{Float64}, - v::Float64, - i::Integer, - j::Integer) - GridapDistributedPETScWrappers.set_values_local!(A, - GridapDistributedPETScWrappers.PetscInt[i-1], - GridapDistributedPETScWrappers.PetscInt[j-1], - Float64[v]) -end - -""" - add_entry!(A,v,i,combine=+) - -Add an entry given its position and the operation to perform. -This method implementation (at present) assumes the following: - (1) i,j are local identifiers, and A has been set up a LocalToGlobalMapping IS. - (2) The insertion mode of A is GridapDistributedPETScWrappers.ADD_VALUES -""" -function Gridap.Algebra.add_entry!(::typeof(+), - A::GridapDistributedPETScWrappers.Vec{Float64}, - v::Float64, - i::Integer) - GridapDistributedPETScWrappers.set_values_local!(A, - GridapDistributedPETScWrappers.PetscInt[i-1], - Float64[v]) -end diff --git a/src/OLD/MPIPETScCommunicators.jl b/src/OLD/MPIPETScCommunicators.jl deleted file mode 100644 index 637ee523..00000000 --- a/src/OLD/MPIPETScCommunicators.jl +++ /dev/null @@ -1,80 +0,0 @@ - -mutable struct MPIPETScCommunicator <: CollaborativeCommunicator - comm::MPI.Comm - master_rank::Int - function MPIPETScCommunicator(comm::MPI.Comm, master_rank::Int = 0) - comm = new(comm, master_rank) - finalizer(MPIPETScCommunicatorDestroy,comm) - comm - end -end - -function MPIPETScCommunicator(user_driver_function) - comm=_MPIPETScCommunicator() - try - user_driver_function(comm) - catch err - showerror(stdout,err,stacktrace(catch_backtrace())) - MPI.Abort(comm.comm,1) - end -end - -function _MPIPETScCommunicator() - petsc_comm = Ref{MPI.Comm}(MPI.Comm()) - first_tag = Ref{GridapDistributedPETScWrappers.C.PetscMPIInt}() - GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.PetscCommDuplicate(Float64,MPI.COMM_WORLD,petsc_comm,first_tag)) - MPIPETScCommunicator(petsc_comm[]) -end - -function MPIPETScCommunicatorDestroy(comm::MPIPETScCommunicator) - petsc_comm = Ref{MPI.Comm}(comm.comm) - GridapDistributedPETScWrappers.PetscFinalized(Float64) || GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.PetscCommDestroy(Float64,petsc_comm)) -end - - - -function get_part(comm::MPIPETScCommunicator) - MPI.Comm_rank(comm.comm)+1 -end - -# All objects to be used with this communicator need to implement this -# function -function get_part(comm::MPIPETScCommunicator,object,part::Integer) - @abstractmethod -end - -function get_part(comm::MPIPETScCommunicator,object::Number,part::Integer) - object -end - -function get_part(comm::MPIPETScCommunicator,object::GridapDistributedPETScWrappers.Mat{Float64},part::Integer) - object -end - -function i_am_master(comm::MPIPETScCommunicator) - MPI.Comm_rank(comm.comm) == comm.master_rank -end - -function do_on_parts(task::Function, comm::MPIPETScCommunicator, args...) - part = MPI.Comm_rank(comm.comm) + 1 - #largs = map(a->get_part(comm,get_distributed_data(a),part), args) - largs = [] - for arg in args - current = get_part(comm,get_distributed_data(arg),part) - push!(largs,current) - end - task(part, largs...) -end - -function num_parts(comm::MPIPETScCommunicator) - MPI.Comm_size(comm.comm) -end - -function num_workers(comm::MPIPETScCommunicator) - MPI.Comm_size(comm.comm) -end - -# We need to compare communicators to perform some checks -function Base.:(==)(a::MPIPETScCommunicator,b::MPIPETScCommunicator) - @notimplemented -end diff --git a/src/OLD/MPIPETScDistributedAssemblersInterfaces.jl b/src/OLD/MPIPETScDistributedAssemblersInterfaces.jl deleted file mode 100644 index 3dcac425..00000000 --- a/src/OLD/MPIPETScDistributedAssemblersInterfaces.jl +++ /dev/null @@ -1,458 +0,0 @@ - -function default_assembly_strategy_type(::MPIPETScCommunicator) - OwnedAndGhostCellsAssemblyStrategy -end - -function default_map_dofs_type(::MPIPETScCommunicator) - MapDoFsTypeProcLocal -end - -function default_global_vector_type(::MPIPETScCommunicator) - T = Float64 - GridapDistributedPETScWrappers.Vec{T} -end - -function default_global_matrix_type(::MPIPETScCommunicator) - T = Float64 - GridapDistributedPETScWrappers.Mat{T} -end - -""" - allocate_vector(::Type{V},indices) where V - -Allocate a vector of type `V` indexable at the indices `indices` -""" -function Gridap.Algebra.allocate_vector( - ::Type{GridapDistributedPETScWrappers.Vec{Float64}}, - indices::MPIPETScDistributedIndexSet, -) - ng = num_gids(indices) - nl = num_owned_entries(indices) - vec=GridapDistributedPETScWrappers.Vec(Float64, ng; mlocal = nl, comm = get_comm(indices).comm) - vec.insertmode = GridapDistributedPETScWrappers.C.ADD_VALUES - GridapDistributedPETScWrappers.set_local_to_global_mapping(vec,indices.IS) - vec -end - -function Gridap.Algebra.fill_entries!(a::GridapDistributedPETScWrappers.Mat{Float64},v::Number) - fill!(a,v) - a -end - -function is_mpipetsc_and_map_dofs_type_global(dassem::DistributedAssembler{V,M,AS}) where {V,M,AS} - result=isa(dassem.assems,MPIPETScDistributedData) - mapdofstype=get_map_dofs_type(AS) - result=result && mapdofstype==MapDoFsTypeGlobal -end - - -function Gridap.FESpaces.assemble_matrix_and_vector_add!(dmat::GridapDistributedPETScWrappers.Mat{Float64},dvec::GridapDistributedPETScWrappers.Vec{Float64},dassem::DistributedAssembler, ddata) - @notimplementedif is_mpipetsc_and_map_dofs_type_global(dassem) - do_on_parts(dassem,ddata,dmat,dvec) do part, assem, data, mat, vec - assemble_matrix_and_vector_add!(mat,vec,assem,data) - end - GridapDistributedPETScWrappers.assemble(dmat) - GridapDistributedPETScWrappers.assemble(dvec) -end - -function Gridap.FESpaces.assemble_matrix_add!(dmat::GridapDistributedPETScWrappers.Mat{Float64},dassem::DistributedAssembler, ddata) - @notimplementedif is_mpipetsc_and_map_dofs_type_global(dassem) - do_on_parts(dassem,ddata,dmat) do part, assem, data, mat - assemble_matrix_add!(mat,assem,data) - end - GridapDistributedPETScWrappers.assemble(dmat) -end - -function Gridap.FESpaces.assemble_vector_add!(dvec::GridapDistributedPETScWrappers.Vec{Float64},dassem::DistributedAssembler, ddata) - @notimplementedif is_mpipetsc_and_map_dofs_type_global(dassem) - do_on_parts(dassem,ddata,dvec) do part, assem, data, vec - assemble_vector_add!(vec,assem,data) - end - GridapDistributedPETScWrappers.assemble(dvec) -end - - - -function get_local_vector_type(::Type{GridapDistributedPETScWrappers.Vec{Float64}}) - Vector{Float64} -end - -function get_local_matrix_type(::Type{GridapDistributedPETScWrappers.Mat{Float64}}) - SparseMatrixCSR{1,Float64,Int64} -end - -function allocate_local_vector( - strat::Union{DistributedAssemblyStrategy{OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeProcLocal}},DistributedAssemblyStrategy{OwnedCellsAssemblyStrategy{MapDoFsTypeProcLocal}}}, - ::Type{GridapDistributedPETScWrappers.Vec{Float64}}, - indices::MPIPETScDistributedIndexSet, -) - DistributedData(indices) do part,index - T = get_local_vector_type(GridapDistributedPETScWrappers.Vec{Float64}) - lvec=T(undef,length(index.lid_to_gid)) - fill!(lvec,zero(eltype(T))) - lvec - end -end - -function Gridap.Algebra.finalize_coo!( - ::Type{GridapDistributedPETScWrappers.Mat{Float64}},IJV::MPIPETScDistributedData,m::MPIPETScDistributedIndexSet,n::MPIPETScDistributedIndexSet) -end - -function _convert_buf_to_petscint(buf) - if isempty(buf) - Ptr{GridapDistributedPETScWrappers.C.PetscInt}(0) - else - isa(buf,Vector{GridapDistributedPETScWrappers.C.PetscInt}) ? buf : GridapDistributedPETScWrappers.C.PetscInt[ i for i in buf ] - end -end - -function assemble_global_matrix( - strat::DistributedAssemblyStrategy{OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeProcLocal}}, - ::Type{GridapDistributedPETScWrappers.Mat{Float64}}, - IJV::MPIPETScDistributedData, - m::MPIPETScDistributedIndexSet, - n::MPIPETScDistributedIndexSet, -) - I, J, V = IJV.part - for i = 1:length(I) - I[i] = m.app_to_petsc_locidx[I[i]] - end - # Build fully assembled local portions - Alocal = - build_fully_assembled_local_portion(m,I,J,V,m.lid_to_gid_petsc) - - # Build global matrix from fully assembled local portions - build_petsc_matrix_from_local_portion(m,n,Alocal) -end - -function compute_subdomain_graph_dIS_and_lst_snd(gids, dI) - # List parts I have to send data to - function compute_lst_snd(part, gids, I) - lst_snd = Set{Int}() - for i = 1:length(I) - owner = gids.lid_to_owner[I[i]] - if (owner != part) - if (!(owner in lst_snd)) - push!(lst_snd, owner) - end - end - end - collect(lst_snd) - end - - part_to_lst_snd = DistributedData(compute_lst_snd, gids, dI) - part_to_num_snd = DistributedData(part_to_lst_snd) do part, lst_snd - length(lst_snd) - end - - offsets = gather(part_to_num_snd) - num_edges = sum(offsets) - GridapDistributed._fill_offsets!(offsets) - part_to_offsets = scatter(get_comm(part_to_num_snd), offsets) - - part_to_owned_subdomain_graph_edge_gids = - DistributedData(part_to_num_snd, part_to_offsets) do part, num_snd, offset - owned_edge_gids = zeros(Int, num_snd) - o = offset - for i = 1:num_snd - o += 1 - owned_edge_gids[i] = o - end - owned_edge_gids - end - - num_snd = GridapDistributedPETScWrappers.C.PetscMPIInt(part_to_num_snd.part) - lst_snd = convert(Vector{GridapDistributedPETScWrappers.C.PetscMPIInt}, part_to_lst_snd.part) .- GridapDistributedPETScWrappers.C.PetscMPIInt(1) - snd_buf = part_to_owned_subdomain_graph_edge_gids.part - - num_rcv = Ref{GridapDistributedPETScWrappers.C.PetscMPIInt}() - lst_rcv = Ref{Ptr{GridapDistributedPETScWrappers.C.PetscMPIInt}}() - rcv_buf = Ref{Ptr{Int64}}() - - #GridapDistributedPETScWrappers.C.PetscCommBuildTwoSidedSetType(Float64, get_comm(gids).comm, GridapDistributedPETScWrappers.C.PETSC_BUILDTWOSIDED_ALLREDUCE) - GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.PetscCommBuildTwoSided(Float64, - get_comm(gids).comm, - GridapDistributedPETScWrappers.C.PetscMPIInt(1), - MPI.Datatype(Int64).val, - num_snd, - pointer(lst_snd), - Ptr{Cvoid}(pointer(snd_buf)), - num_rcv, - lst_rcv, - Base.unsafe_convert(Ptr{Cvoid},rcv_buf))) - - - #TO-DO: Call to PetscFree for lst_rcv and rcv_buf - #All attempts so far resulted in a SEGfault, so I gave up - #see https://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Sys/PetscCommBuildTwoSided.html - #for more details - - num_rcv=num_rcv[] - lst_rcv_vec=unsafe_wrap(Vector{GridapDistributedPETScWrappers.C.PetscMPIInt},lst_rcv[],num_rcv) - rcv_buf_vec=unsafe_wrap(Vector{Int64},rcv_buf[],num_rcv) - - function compute_subdomain_graph_index_set(part,num_edges,owned_edge_entries) - lid_to_gid=Vector{Int}(undef,num_snd+num_rcv) - lid_to_owner=Vector{Int}(undef,num_snd+num_rcv) - for i=1:num_snd - lid_to_gid[i]=owned_edge_entries[i] - lid_to_owner[i]=part - end - for i=num_snd+1:num_snd+num_rcv - lid_to_gid[i]=rcv_buf_vec[i-num_snd] - lid_to_owner[i]=lst_rcv_vec[i-num_snd]+1 - end - IndexSet(num_edges, lid_to_gid, lid_to_owner) - end - - (DistributedIndexSet(compute_subdomain_graph_index_set, - get_comm(gids), - num_edges, - num_edges, - part_to_owned_subdomain_graph_edge_gids), - part_to_lst_snd) -end - -function pack_and_comm_entries(dIS, dIJV, m, n, part_to_lst_snd) - length_entries = - DistributedVector(dIS, dIS, m, dIJV, part_to_lst_snd) do part, dIS, gid, IJV, lst_snd - I,_,_ = IJV - local_vector = fill(zero(Int),length(dIS.lid_to_owner)) - for i = 1:length(I) - owner = gid.lid_to_owner[I[i]] - if (owner != part) - edge_lid = findfirst((i) -> (i == owner), lst_snd) - local_vector[edge_lid]+=3 - end - end - local_vector - end - exchange!(length_entries) - dd_length_entries = DistributedData(length_entries) do part, length_entries - collect(length_entries) - end - - # Pack data to be sent - exchange_entries_vector = - DistributedVector(dIS, dIS, dd_length_entries, - m, dIJV, part_to_lst_snd) do part, IS, length_entries, test_gids, IJV, lst_snd - - ptrs = Vector{Int64}(undef, length(length_entries)+1) - ptrs[1] = 1 - for i=1:length(ptrs)-1 - ptrs[i+1]=ptrs[i]+length_entries[i] - end - data = Vector{Float64}(undef, ptrs[end]-1) - - current_pack_position = fill(zero(Int32), length(IS.lid_to_owner)) - I,J,V = IJV - for i = 1:length(I) - owner = test_gids.lid_to_owner[I[i]] - if (owner != part) - edge_lid = findfirst((i) -> (i == owner), lst_snd) - row=m.lid_to_gid_petsc[I[i]] - col=n.lid_to_gid_petsc[J[i]] - current_pos=current_pack_position[edge_lid] - data[ptrs[edge_lid]+current_pos] = row - data[ptrs[edge_lid]+current_pos+1]= col - data[ptrs[edge_lid]+current_pos+2]= V[i] - current_pack_position[edge_lid] += 3 - end - end - Table(data,ptrs) - end - exchange!(exchange_entries_vector) - exchange_entries_vector -end - -function combine_local_and_remote_entries(dIS, dIJV, m, n, exchange_entries_vector) - # 3. Combine local + remote entries - part = MPI.Comm_rank(get_comm(m).comm)+1 - test_lid_to_owner = m.parts.part.lid_to_owner - test_lid_to_gid = m.lid_to_gid_petsc - test_gid_to_lid = Dict{Int,Int32}() - for (lid,gid) in enumerate(test_lid_to_gid) - test_gid_to_lid[gid] = lid - end - - trial_lid_to_gid = n.lid_to_gid_petsc - trial_gid_to_lid = Dict{Int,Int32}() - for (lid,gid) in enumerate(trial_lid_to_gid) - trial_gid_to_lid[gid] = lid - end - - #TODO: check with fverdugo if there is an already coded way of - # doing this vector pattern operation - lid_to_owned_lid = fill(-1, length(test_lid_to_owner)) - current = 1 - for i = 1:length(test_lid_to_owner) - if (test_lid_to_owner[i] == part) - lid_to_owned_lid[i] = current - current += 1 - end - end - - trial_lid_to_gid_extended = copy(trial_lid_to_gid) - trial_gid_to_lid_extended = copy(trial_gid_to_lid) - - I,J,V = dIJV.part - IS = dIS.parts.part - remote_entries = exchange_entries_vector.part - - length_GI_GJ_GV = count(a->(test_lid_to_owner[a]==part), I) - for i=1:length(IS.lid_to_owner) - if (IS.lid_to_owner[i] != part) - length_GI_GJ_GV += length(remote_entries[i])÷3 - end - end - - GI = Vector{Int64}(undef,length_GI_GJ_GV) - GJ = Vector{Int64}(undef,length_GI_GJ_GV) - GV = Vector{Float64}(undef,length_GI_GJ_GV) - - # Add local entries - current = 1 - for i = 1:length(I) - owner = test_lid_to_owner[I[i]] - if (owner == part) - GI[current]=lid_to_owned_lid[I[i]] - GJ[current]=J[i] - GV[current]=V[i] - current=current+1 - end - end - - data = remote_entries.data - ptrs = remote_entries.ptrs - - # Add remote entries - for edge_lid = 1:length(IS.lid_to_gid) - if (IS.lid_to_owner[edge_lid] != part) - - for i = 1:3:length(remote_entries[edge_lid]) - - - row=Int64(data[ptrs[edge_lid]+i-1]) - GI[current]=lid_to_owned_lid[test_gid_to_lid[row]] - col=Int64(data[ptrs[edge_lid]+i]) - if (!(haskey(trial_gid_to_lid_extended,col))) - trial_gid_to_lid_extended[col]=length(trial_lid_to_gid_extended)+1 - push!(trial_lid_to_gid_extended, col) - end - GJ[current] = trial_gid_to_lid_extended[col] - - GV[current] = data[ptrs[edge_lid]+i+1] - - current = current + 1 - end - end - end - (GI,GJ,GV,trial_lid_to_gid_extended) -end - -function build_fully_assembled_local_portion(m,GI,GJ,GV,trial_lid_to_gid_extended) - n_owned_dofs = num_owned_entries(m) - Alocal = sparse_from_coo( - Gridap.Algebra.SparseMatrixCSR{0,Float64,Int64}, - GI, - GJ, - GV, - n_owned_dofs, - length(trial_lid_to_gid_extended)) - for i = 1:length(Alocal.colval) - Alocal.colval[i] = trial_lid_to_gid_extended[Alocal.colval[i]+1] - 1 - end - Alocal -end - -function build_petsc_matrix_from_local_portion(m,n,Alocal) - # Build PETSc Matrix - ngrows = num_gids(m) - ngcols = num_gids(n) - n_owned_dofs = num_owned_entries(m) - p = Ref{GridapDistributedPETScWrappers.C.Mat{Float64}}() - rowptr = _convert_buf_to_petscint(Alocal.rowptr) - colval = _convert_buf_to_petscint(Alocal.colval) - GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.MatCreateMPIAIJWithArrays( - get_comm(m).comm, - GridapDistributedPETScWrappers.C.PetscInt(n_owned_dofs), - GridapDistributedPETScWrappers.C.PetscInt(n_owned_dofs), - GridapDistributedPETScWrappers.C.PetscInt(ngrows), - GridapDistributedPETScWrappers.C.PetscInt(ngcols), - rowptr, - colval, - Alocal.nzval, - p,)) - # NOTE: the triple (rowptr,colval,Alocal.nzval) is passed to the - # constructor of Mat() in order to avoid these Julia arrays - # from being garbage collected. - A=GridapDistributedPETScWrappers.Mat{Float64}(p[],(rowptr,colval,Alocal.nzval)) - GridapDistributedPETScWrappers.set_local_to_global_mapping(A,m.IS,n.IS) - GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.MatSetOption(p[], - GridapDistributedPETScWrappers.C.MAT_NEW_NONZERO_LOCATIONS, - GridapDistributedPETScWrappers.C.PETSC_FALSE)) - A.insertmode = GridapDistributedPETScWrappers.C.ADD_VALUES - A -end - -function assemble_global_matrix( - strat::DistributedAssemblyStrategy{OwnedCellsAssemblyStrategy{MapDoFsTypeProcLocal}}, - ::Type{GridapDistributedPETScWrappers.Mat{Float64}}, - IJV::MPIPETScDistributedData, - m::MPIPETScDistributedIndexSet, - n::MPIPETScDistributedIndexSet, -) - dI = DistributedData(get_comm(m),IJV) do part, IJV - I,_,_ = IJV - I - end - - # 1. Determine communication pattern - dIS, part_to_lst_snd = - compute_subdomain_graph_dIS_and_lst_snd(m, dI) - - # 2. Communicate entries - exchange_entries_vector = - pack_and_comm_entries(dIS, IJV, m, n, part_to_lst_snd) - - # 3. Combine local and remote entries - GI, GJ, GV, trial_lid_to_gid_extended = - combine_local_and_remote_entries(dIS, IJV, m, n, exchange_entries_vector) - - # 4. Build fully assembled local portions - Alocal = - build_fully_assembled_local_portion(m,GI,GJ,GV,trial_lid_to_gid_extended) - - # 5. Build global matrix from fully assembled local portions - build_petsc_matrix_from_local_portion(m,n,Alocal) -end - -function assemble_global_vector( - strat::DistributedAssemblyStrategy{OwnedCellsAssemblyStrategy{MapDoFsTypeProcLocal}}, - ::Type{GridapDistributedPETScWrappers.Vec{Float64}}, - db::MPIPETScDistributedData, - indices::MPIPETScDistributedIndexSet) - vec = allocate_vector(GridapDistributedPETScWrappers.Vec{Float64},indices) - GridapDistributedPETScWrappers.setindex0!(vec, db.part, indices.lid_to_gid_petsc .- GridapDistributedPETScWrappers.C.PetscInt(1)) - GridapDistributedPETScWrappers.AssemblyBegin(vec) - GridapDistributedPETScWrappers.AssemblyEnd(vec) - vec -end - -function assemble_global_vector( - strat::DistributedAssemblyStrategy{OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeProcLocal}}, - ::Type{GridapDistributedPETScWrappers.Vec{Float64}}, - db::MPIPETScDistributedData, - indices::MPIPETScDistributedIndexSet) - vec = allocate_vector(GridapDistributedPETScWrappers.Vec{Float64},indices) - - part = MPI.Comm_rank(get_comm(indices).comm)+1 - owned_pos = (indices.parts.part.lid_to_owner .== part) - bowned = db.part[owned_pos] - l2g_petsc = indices.lid_to_gid_petsc[owned_pos] .- GridapDistributedPETScWrappers.C.PetscInt(1) - - GridapDistributedPETScWrappers.setindex0!(vec, bowned, l2g_petsc) - GridapDistributedPETScWrappers.AssemblyBegin(vec) - GridapDistributedPETScWrappers.AssemblyEnd(vec) - vec -end diff --git a/src/OLD/MPIPETScDistributedData.jl b/src/OLD/MPIPETScDistributedData.jl deleted file mode 100644 index c65c2db9..00000000 --- a/src/OLD/MPIPETScDistributedData.jl +++ /dev/null @@ -1,43 +0,0 @@ -struct MPIPETScDistributedData{T} <: DistributedData{T} - part::T - comm::MPIPETScCommunicator -end - -get_comm(a::MPIPETScDistributedData) = a.comm - -num_parts(a::MPIPETScDistributedData) = num_parts(a.comm) - -get_part(comm::MPIPETScCommunicator,a::MPIPETScDistributedData,part::Integer) = a.part - -function DistributedData{T}(initializer::Function,comm::MPIPETScCommunicator,args...) where T - i = MPI.Comm_rank(comm.comm)+1 - largs = map(a->get_part(comm,get_distributed_data(a),i),args) - part = initializer(i,largs...) - MPIPETScDistributedData{T}(part,comm) -end - -function DistributedData(initializer::Function,comm::MPIPETScCommunicator,args...) - i = MPI.Comm_rank(comm.comm)+1 - largs = map(a->get_part(comm,get_distributed_data(a),i),args) - part = initializer(i,largs...) - MPIPETScDistributedData(part,comm) -end - -function gather!(a::AbstractVector,b::MPIPETScDistributedData) - comm=get_comm(b) - if (i_am_master(comm)) - @assert length(a) == num_parts(b) - a[comm.master_rank+1]=b.part - MPI.Gather!(nothing , a, 1, comm.master_rank, comm.comm) - else - MPI.Gather!(Ref(b.part), nothing, 1, comm.master_rank, comm.comm) - end -end - -function scatter(comm::MPIPETScCommunicator,b::AbstractVector) - if (i_am_master(comm)) @assert length(b) == num_parts(comm) end - v=MPI.Scatter(b,1,comm.master_rank,comm.comm) - DistributedData(comm) do part - v[1] - end -end diff --git a/src/OLD/MPIPETScDistributedIndexSets.jl b/src/OLD/MPIPETScDistributedIndexSets.jl deleted file mode 100644 index a97abdf5..00000000 --- a/src/OLD/MPIPETScDistributedIndexSets.jl +++ /dev/null @@ -1,124 +0,0 @@ -# Specializations -struct MPIPETScDistributedIndexSet{A,B,C} <: DistributedIndexSet - ngids :: Int - # TO-THINK: Do we really need to have a DistributedData for parts? - # Why dont we store part directly? - parts :: MPIPETScDistributedData{IndexSet{A,B,C}} - # TO-THINK: Should we store these as DistributedData? - lid_to_gid_petsc :: Vector{GridapDistributedPETScWrappers.C.PetscInt} - IS :: GridapDistributedPETScWrappers.ISLocalToGlobalMapping{Float64} - petsc_to_app_locidx :: Vector{Int32} - app_to_petsc_locidx :: Vector{Int32} -end - -num_gids(a::MPIPETScDistributedIndexSet) = a.ngids - -get_part( - comm::MPIPETScCommunicator, - a::MPIPETScDistributedIndexSet, - part::Integer) = a.parts.part - -function num_owned_entries(indices::MPIPETScDistributedIndexSet) - comm = get_comm(indices) - comm_rank = MPI.Comm_rank(comm.comm) + 1 - lid_to_owner = indices.parts.part.lid_to_owner - count((a)->(a == comm_rank), lid_to_owner) - end - -function create_ghost_vector(entries::Vector{Float64}, - indices::MPIPETScDistributedIndexSet) - ghost_idx = _generate_ghost_idx(indices) - num_owned = num_owned_entries(indices) - comm = get_comm(indices) - vref = Ref{GridapDistributedPETScWrappers.C.Vec{Float64}}() - GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.VecCreateGhostWithArray(comm.comm, - GridapDistributedPETScWrappers.C.PetscInt(num_owned), - GridapDistributedPETScWrappers.C.PetscInt(GridapDistributedPETScWrappers.C.PETSC_DECIDE), - GridapDistributedPETScWrappers.C.PetscInt(length(ghost_idx)), - ghost_idx, - entries, - vref)) - GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.VecSetType(vref[], GridapDistributedPETScWrappers.C.VECMPI)) - return GridapDistributedPETScWrappers.Vec{Float64}(vref[],entries) -end - -function _generate_ghost_idx(indices::MPIPETScDistributedIndexSet) - comm = get_comm(indices) - comm_rank = MPI.Comm_rank(comm.comm) - ghost_idx = GridapDistributedPETScWrappers.C.PetscInt[] - lid_to_owner = indices.parts.part.lid_to_owner - lid_to_gid_petsc = indices.lid_to_gid_petsc - num_local_entries = length(lid_to_owner) - for i = 1:num_local_entries - if (lid_to_owner[i] !== comm_rank + 1) - push!(ghost_idx, lid_to_gid_petsc[i]-1) - end - end - ghost_idx -end - - - -get_comm(a::MPIPETScDistributedIndexSet) = a.parts.comm - -function DistributedIndexSet(initializer::Function,comm::MPIPETScCommunicator,ngids::Integer,args...) - parts = DistributedData(initializer,comm,args...) - lid_to_gid_petsc, petsc_to_app_locidx, app_to_petsc_locidx = _compute_internal_members(comm,parts.part) - IS=GridapDistributedPETScWrappers.ISLocalToGlobalMapping(Float64, lid_to_gid_petsc, comm=comm.comm) - MPIPETScDistributedIndexSet(ngids,parts,lid_to_gid_petsc, IS,petsc_to_app_locidx,app_to_petsc_locidx) -end - -#TODO: think about type stability of this auxiliary function -function _compute_internal_members(comm::MPIPETScCommunicator, is::IndexSet) - comm_rank = MPI.Comm_rank(comm.comm) - num_owned_entries = count( (a)->(a==comm_rank+1), is.lid_to_owner ) - num_local_entries = length(is.lid_to_owner) - - sndbuf = Ref{Int64}(num_owned_entries) - rcvbuf = Ref{Int64}() - MPI.Exscan!(sndbuf, rcvbuf, 1, +, comm.comm) - - - app_idx = Array{GridapDistributedPETScWrappers.C.PetscInt}(undef, num_owned_entries) - petsc_idx = Array{GridapDistributedPETScWrappers.C.PetscInt}(undef, num_owned_entries) - - (comm_rank == 0) ? (offset = GridapDistributedPETScWrappers.C.PetscInt(1)) : (offset = GridapDistributedPETScWrappers.C.PetscInt(rcvbuf[] + 1)) - current = 1 - for i = 1:num_local_entries - if (is.lid_to_owner[i] == comm_rank + 1) - app_idx[current] = is.lid_to_gid[i] - petsc_idx[current] = offset - offset = offset + 1 - current = current + 1 - end - end - - # build application ordering in order to get lid_to_gid - # accordingly to PETSc global numbering - petsc_ao = AO(Float64, app_idx, petsc_idx) - lid_to_gid_petsc = convert(Vector{GridapDistributedPETScWrappers.C.PetscInt}, collect(is.lid_to_gid)) - map_app_to_petsc!(petsc_ao, lid_to_gid_petsc) - - ghost_idx = Int[] - - app_to_petsc_locidx = Vector{Int32}(undef, num_local_entries) - current = 1 - for i = 1:num_local_entries - if (is.lid_to_owner[i] == (comm_rank + 1)) - app_to_petsc_locidx[i] = current - current = current + 1 - end - end - for i = 1:num_local_entries - if (is.lid_to_owner[i] !== (comm_rank + 1)) - app_to_petsc_locidx[i] = current - current = current + 1 - end - end - - petsc_to_app_locidx = Vector{Int32}(undef, num_local_entries) - for i = 1:num_local_entries - petsc_to_app_locidx[app_to_petsc_locidx[i]] = i - end - return lid_to_gid_petsc, petsc_to_app_locidx, app_to_petsc_locidx -end diff --git a/src/OLD/MPIPETScDistributedVectors.jl b/src/OLD/MPIPETScDistributedVectors.jl deleted file mode 100644 index a0c042f8..00000000 --- a/src/OLD/MPIPETScDistributedVectors.jl +++ /dev/null @@ -1,363 +0,0 @@ -# Retrieved from Gridap 0.14.3 -struct Reindexed{T,N,A,B} <: AbstractArray{T,N} - i_to_v::A - j_to_i::B - function Reindexed(i_to_v::AbstractArray, j_to_i::AbstractArray) - T = eltype(i_to_v) - N = ndims(j_to_i) - A = typeof(i_to_v) - B = typeof(j_to_i) - new{T,N,A,B}(i_to_v,j_to_i) - end -end - -Base.size(a::Reindexed) = size(a.j_to_i) - -Base.IndexStyle(::Type{Reindexed{T,N,A,B}}) where {T,N,A,B} = IndexStyle(B) - -function getindex(a::Reindexed,j::Integer) - i = a.j_to_i[j] - a.i_to_v[i] -end - -function getindex(a::Reindexed{T,N}, j::Vararg{Int,N}) where {T,N} - i = a.j_to_i[j...] - a.i_to_v[i] -end - -function Base.setindex!(a::Reindexed,v,j::Integer) - i = a.j_to_i[j] - a.i_to_v[i]=v -end - -function testitem(a::Reindexed) - if length(a.j_to_i) == 0 - testitem(a.i_to_v) - else - a.i_to_v[first(a.j_to_i)] - end -end - -Gridap.Arrays.array_cache(a::Reindexed) = array_cache(a.i_to_v) - -function Gridap.Arrays.getindex!(cache,a::Reindexed,j::Integer...) - i = a.j_to_i[j...] - Gridap.Arrays.getindex!(cache,a.i_to_v,i) -end - -function Base.getindex(a::Reindexed,j::Integer...) - c=array_cache(a) - Gridap.Arrays.getindex!(c,a,j...) -end - - -""" - reindex(i_to_v::AbstractArray, j_to_i::AbstractArray) -""" -function reindex(i_to_v::AbstractArray, j_to_i::AbstractArray) - Reindexed(i_to_v,j_to_i) -end - - -# Specializations -struct MPIPETScDistributedVector{T <: Union{Number,AbstractVector{<:Number}},V <: AbstractVector{T},A,B,C} <: DistributedVector{T} - part::V - indices::MPIPETScDistributedIndexSet{A,B,C} - vecghost::GridapDistributedPETScWrappers.Vec{Float64} -end - -get_comm(a::MPIPETScDistributedVector) = get_comm(a.indices) - -get_part( - comm::MPIPETScCommunicator, - a::MPIPETScDistributedVector, - part::Integer) = a.part - -get_part( - comm::MPIPETScCommunicator, - a::GridapDistributedPETScWrappers.Vec{Float64}, - part::Integer) = a - - -function DistributedVector( - initializer::Function, indices::MPIPETScDistributedIndexSet,args...) - comm = get_comm(indices) - data = DistributedData(initializer, comm, args...) - part = data.part - V = typeof(part) - if (V <: Table) - sizes = [part.ptrs[i + 1] - part.ptrs[i] - for i = 1:length(part.ptrs)-1] - - max_sizes = length(sizes)!=0 ? maximum(sizes) : 0 - min_sizes = length(sizes)!=0 ? minimum(sizes) : 0 - if (max_sizes == min_sizes) - scalar_indices = - _create_fixed_length_indices(max_sizes, indices) - else - scalar_indices = - _create_variable_length_indices(sizes, indices) - end - T=get_data_eltype(V) - @assert sizeof(T) == sizeof(Float64) - petsc_ghost_array = Vector{Float64}(undef, length(part.data)) - new_data = reinterpret(T, petsc_ghost_array) - - current=1 - p_reindex_data=similar(part.data,Int32) - for i=1:length(indices.petsc_to_app_locidx) - j=indices.petsc_to_app_locidx[i] - size=part.ptrs[j+1]-part.ptrs[j] - new_data[current:current+size-1]= - part.data[part.ptrs[j]:part.ptrs[j+1]-1] - p_reindex_data[part.ptrs[j]:part.ptrs[j+1]-1] .= - current:current+size-1 - current=current+size - end - new_data_reindexed=reindex(new_data,p_reindex_data) - new_part=Table(new_data_reindexed,part.ptrs) - vecghost = create_ghost_vector(petsc_ghost_array, scalar_indices) - MPIPETScDistributedVector(new_part, scalar_indices, vecghost) - elseif (V <: Vector{<:Number}) - T = eltype(V) - @assert sizeof(T) == sizeof(Float64) - petsc_ghost_array = Vector{Float64}(undef, length(part)) - new_part = reindex(reinterpret(T, petsc_ghost_array), - indices.app_to_petsc_locidx) - copy_entries!(new_part, part) - vecghost = create_ghost_vector(petsc_ghost_array, indices) - MPIPETScDistributedVector(new_part, indices, vecghost) - else - @error "Initializer function returns unsupported local vector type" - end - -end - -function _create_fixed_length_indices( - length_entry::Int, - indices::MPIPETScDistributedIndexSet) - l = length_entry - n = l * indices.ngids - indices = DistributedIndexSet(get_comm(indices), n, indices, l, n) do part, indices, l, n - lid_to_gid = Vector{Int}(undef, l * length(indices.lid_to_owner)) - lid_to_owner = Vector{Int}(undef, l * length(indices.lid_to_owner)) - k = 1 - for i = 1:length(indices.lid_to_owner) - offset = (indices.lid_to_gid[i] - 1) * l - for j = 1:l - lid_to_gid[k] = offset + j - lid_to_owner[k] = indices.lid_to_owner[i] - k = k + 1 - end - end - IndexSet(n, lid_to_gid, lid_to_owner) - end - indices -end - -function _create_variable_length_indices( - length_entries::AbstractVector{<:Integer}, - indices::MPIPETScDistributedIndexSet, -) - -# println(T) -# println(eltype(T)) -# @assert sizeof(eltype(T)) == sizeof(Float64) - comm = get_comm(indices) - part = MPI.Comm_rank(comm.comm) + 1 - num_owned_entries = 0 - num_local_entries = 0 - lid_to_owner = indices.parts.part.lid_to_owner - for i = 1:length(lid_to_owner) - if (lid_to_owner[i] == part) - num_owned_entries = num_owned_entries + length_entries[i] - end - num_local_entries = num_local_entries + length_entries[i] - end - - sndbuf = Ref{Int64}(num_owned_entries) - rcvbuf = Ref{Int64}() - - MPI.Exscan!(sndbuf, rcvbuf, 1, +, comm.comm) - (part == 1) ? (offset = 1) : (offset = rcvbuf[] + 1) - - sendrecvbuf = Ref{Int64}(num_owned_entries) - MPI.Allreduce!(sendrecvbuf, +, comm.comm) - n = sendrecvbuf[] - - offsets = DistributedVector(indices, indices) do part, indices - local_part = Vector{Int}(undef,length(indices.lid_to_owner)) - for i = 1:length(indices.lid_to_owner) - if (indices.lid_to_owner[i] == part) - local_part[i] = offset - for j = 1:length_entries[i] - offset = offset + 1 - end - end - end - local_part - end - exchange!(offsets) - - indices = DistributedIndexSet(get_comm(indices), n, indices, offsets) do part, indices, offsets - lid_to_gid = Vector{Int}(undef, num_local_entries) - lid_to_owner = Vector{Int}(undef, num_local_entries) - k = 1 - for i = 1:length(indices.lid_to_owner) - offset = offsets[i] - for j = 1:length_entries[i] - lid_to_gid[k] = offset - lid_to_owner[k] = indices.lid_to_owner[i] - k = k + 1 - offset = offset + 1 - end - end - IndexSet(n, lid_to_gid, lid_to_owner) - end - indices -end - -function unpack_all_entries!(a::MPIPETScDistributedVector{T}) where T - num_local = length(a.indices.app_to_petsc_locidx) - lvec = GridapDistributedPETScWrappers.LocalVector(a.vecghost, num_local) - _unpack_all_entries!(T, a.part, a.indices.app_to_petsc_locidx, lvec) - GridapDistributedPETScWrappers.restore(lvec) -end - -function Base.getindex(a::GridapDistributedPETScWrappers.Vec{Float64}, indices::MPIPETScDistributedIndexSet) - result = DistributedVector(indices,indices) do part, indices - Vector{Float64}(undef,length(indices.lid_to_owner)) - end - copy!(result.vecghost, a) - exchange!(result.vecghost) - result -end - -function exchange!(a::GridapDistributedPETScWrappers.Vec{T}) where T - # Send data - GridapDistributedPETScWrappers.scatter!(a) -end - -function exchange!(a::MPIPETScDistributedVector{T}) where T - indices = a.indices - local_part = a.part - lid_to_owner = indices.parts.part.lid_to_owner - petsc_to_app_locidx = indices.petsc_to_app_locidx - app_to_petsc_locidx = indices.app_to_petsc_locidx - comm = get_comm(indices) - comm_rank = MPI.Comm_rank(comm.comm) - - # Pack data - # _pack_local_entries!(a.vecghost, local_part, lid_to_owner, comm_rank) - - exchange!(a.vecghost) - - # Unpack data - # num_local = length(lid_to_owner) - # lvec = GridapDistributedPETScWrappers.LocalVector(a.vecghost,num_local) - # _unpack_ghost_entries!(eltype(local_part), local_part, lid_to_owner, comm_rank, app_to_petsc_locidx, lvec) - # GridapDistributedPETScWrappers.restore(lvec) -end - -function _pack_local_entries!(vecghost, local_part, lid_to_owner, comm_rank) - num_owned = GridapDistributedPETScWrappers.lengthlocal(vecghost) - idxs = Vector{Int}(undef, num_owned) - vals = Vector{Float64}(undef, num_owned) - # Pack data - k = 1 - current = 1 - for i = 1:length(local_part) - for j = 1:length(local_part[i]) - if ( lid_to_owner[k] == comm_rank + 1 ) - idxs[current] = current - 1 - vals[current] = reinterpret(Float64, local_part[i][j]) - current = current + 1 - end - k = k + 1 - end - end - set_values_local!(vecghost, idxs, vals, GridapDistributedPETScWrappers.C.INSERT_VALUES) - AssemblyBegin(vecghost, GridapDistributedPETScWrappers.C.MAT_FINAL_ASSEMBLY) - AssemblyEnd(vecghost, GridapDistributedPETScWrappers.C.MAT_FINAL_ASSEMBLY) -end - -function _pack_all_entries!(vecghost, local_part) - num_local = prod(size(local_part)) - idxs = Vector{Int}(undef, num_local) - vals = Vector{Float64}(undef, num_local) - - # Pack data - k = 1 - current = 1 - for i = 1:length(local_part) - for j = 1:length(local_part[i]) - idxs[current] = current - 1 - vals[current] = reinterpret(Float64, local_part[i][j]) - current = current + 1 - k = k + 1 - end - end - set_values_local!(vecghost, idxs, vals, GridapDistributedPETScWrappers.C.INSERT_VALUES) - AssemblyBegin(vecghost, GridapDistributedPETScWrappers.C.MAT_FINAL_ASSEMBLY) - AssemblyEnd(vecghost, GridapDistributedPETScWrappers.C.MAT_FINAL_ASSEMBLY) -end - -function _unpack_ghost_entries!( - T::Type{<:Number}, - local_part, - lid_to_owner, - comm_rank, - app_to_petsc_locidx, - lvec, -) - for i = 1:length(local_part) - if (lid_to_owner[i] != comm_rank + 1) - local_part[i] = reinterpret(T, lvec.a[app_to_petsc_locidx[i]]) - end - end -end - -function _unpack_ghost_entries!( - T::Type{<:AbstractVector{K}}, - local_part, - lid_to_owner, - comm_rank, - app_to_petsc_locidx, - lvec, -) where K <: Number - k = 1 - for i = 1:length(local_part) - for j = 1:length(local_part[i]) - if (lid_to_owner[k] != comm_rank + 1) - local_part[i][j] = reinterpret(K, lvec.a[app_to_petsc_locidx[k]]) - end - k = k + 1 - end - end -end - -function _unpack_all_entries!( - T::Type{<:Number}, - local_part, - app_to_petsc_locidx, - lvec, -) - for i = 1:length(local_part) - local_part[i] = reinterpret(T, lvec.a[app_to_petsc_locidx[i]]) - end -end - -function _unpack_all_entries!( - T::Type{<:AbstractVector{K}}, - local_part, - app_to_petsc_locidx, - lvec, -) where {K <: Number} - k = 1 - for i = 1:length(local_part) - for j = 1:length(local_part[i]) - local_part[i][j] = reinterpret(K, lvec.a[app_to_petsc_locidx[k]]) - k = k + 1 - end - end -end diff --git a/src/OLD/MPIPETScLinearSolvers.jl b/src/OLD/MPIPETScLinearSolvers.jl deleted file mode 100644 index a366717a..00000000 --- a/src/OLD/MPIPETScLinearSolvers.jl +++ /dev/null @@ -1,46 +0,0 @@ -mutable struct PETScLinearSolver{T} <: LinearSolver - ksp :: GridapDistributedPETScWrappers.KSP{T} - kws - function PETScLinearSolver{T}(::Type{T}; kws...) where {T} - ksp=KSP(GridapDistributedPETScWrappers.C.KSP{T}(C_NULL)) - new{T}(ksp,kws) - end -end - -PETScLinearSolver(::Type{T}; kws...) where {T}=PETScLinearSolver{T}(T;kws...) - -struct PETScSymbolicSetup{T} <: SymbolicSetup - solver :: PETScLinearSolver{T} -end - -struct PETScNumericalSetup{T} <: NumericalSetup - solver :: PETScLinearSolver{T} -end - -function Gridap.Algebra.symbolic_setup( - ps::PETScLinearSolver{T}, - mat::GridapDistributedPETScWrappers.PetscMat{T}) where {T} - return PETScSymbolicSetup{T}(ps) -end - -function Gridap.Algebra.numerical_setup( - pss::PETScSymbolicSetup{T}, - mat::GridapDistributedPETScWrappers.PetscMat{T}) where {T} - pss.solver.ksp = KSP(mat; pss.solver.kws...) - GridapDistributedPETScWrappers.KSPSetUp!(pss.solver.ksp) - return PETScNumericalSetup{T}(pss.solver) -end - -function Gridap.Algebra.numerical_setup!( - pns::PETScNumericalSetup{T}, - mat::GridapDistributedPETScWrappers.PetscMat{T}) where {T} - GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.KSPSetOperators(pns.solver.ksp.p,mat.p,mat.p)) - GridapDistributedPETScWrappers.KSPSetUp!(pns.solver.ksp) -end - -function Gridap.Algebra.solve!( - x::GridapDistributedPETScWrappers.Vec{T}, - ns::PETScNumericalSetup{T}, - b::GridapDistributedPETScWrappers.Vec{T}) where {T} - GridapDistributedPETScWrappers.ldiv!(ns.solver.ksp, b, x) -end diff --git a/src/OLD/MPITimers.jl b/src/OLD/MPITimers.jl deleted file mode 100644 index bacfb433..00000000 --- a/src/OLD/MPITimers.jl +++ /dev/null @@ -1,77 +0,0 @@ -using Printf - -abstract type MPITimerMode end -struct MPITimerModeSum <: MPITimerMode end -struct MPITimerModeMin <: MPITimerMode end -struct MPITimerModeLast <: MPITimerMode end - -mutable struct MPITimer{M<:MPITimerMode} - comm :: MPI.Comm - message :: String # Concept being measured (e.g., assembly) - t_start :: Float64 # last call to start - t_stop :: Float64 # last call to stop - t_accum :: Float64 # result of processing all stop-start - function MPITimer{M}(comm::MPI.Comm, message) where M<:Union{MPITimerModeSum,MPITimerModeLast} - new{M}(comm, message, 0.0, 0.0, 0.0) - end - function MPITimer{M}(comm::MPI.Comm, message) where M<:MPITimerModeMin - new{M}(comm, message, 0.0, 0.0, 1.79769e+308) - end -end - -function timer_init(t::MPITimer{M}) where M - t.t_start=0.0 - t.t_stop =0.0 - if M <: MPITimerModeMin - t.t_accum = 1.79769e+308_rp - end -end - -function timer_start(t::MPITimer) - MPI.Barrier(t.comm) - t.t_start = MPI.Wtime() -end - -function timer_stop(t::MPITimer{M}) where M - t.t_stop = MPI.Wtime() - if ( t.t_stop - t.t_start >= 0.0) - cur_time = t.t_stop - t.t_start - else - cur_time = 0.0 - end - - if (M <: MPITimerModeMin) - if (t.t_accum > cur_time) t.t_accum = cur_time end - elseif (M <: MPITimerModeSum) - t.t_accum = t.t_accum + cur_time - elseif (M <: MPITimerModeLast) - t.t_accum = cur_time - end - t.t_start = 0.0 - t.t_stop = 0.0 -end - -function timer_report(t::MPITimer, show_header::Bool=true) - rank = MPI.Comm_rank(t.comm) - if (show_header) - if (rank==0) - @printf "%25s %15s %15s %15s\n" "" "Min (secs.)" "Max (secs.)" "Avg (secs.)" - end - end - tmin,tmax,tavg = timer_min_max_avg(t) - if (rank==0) - @printf "%25s %15.9e %15.9e %15.9e\n" t.message tmin tmax tavg - end -end - -function timer_min_max_avg(t::MPITimer) - accum_max = t.t_accum - accum_min = t.t_accum - accum_sum = t.t_accum - rank = MPI.Comm_rank(t.comm) - size = MPI.Comm_size(t.comm) - accum_max_reduced = MPI.Allreduce([accum_max], MPI.MAX, t.comm) - accum_min_reduced = MPI.Allreduce([accum_min], MPI.MIN, t.comm) - accum_sum_reduced = MPI.Allreduce([accum_sum], +, t.comm) - (accum_min_reduced[1],accum_max_reduced[1],accum_sum_reduced[1]/size) -end diff --git a/src/OLD/MultiFieldDistributedFESpaces.jl b/src/OLD/MultiFieldDistributedFESpaces.jl deleted file mode 100644 index a023a7dc..00000000 --- a/src/OLD/MultiFieldDistributedFESpaces.jl +++ /dev/null @@ -1,220 +0,0 @@ -struct MultiFieldDistributedFESpace{V} <: DistributedFESpace - vector_type::Type{V} - distributed_spaces::Vector{<:DistributedFESpace} - spaces::DistributedData{<:MultiFieldFESpace} - gids::DistributedIndexSet -end - -function get_vector_type(a::MultiFieldDistributedFESpace) - a.vector_type -end - -function Gridap.MultiFieldFESpace(test_space::MultiFieldDistributedFESpace{V}, - trial_spaces::Vector{<:DistributedFESpace}) where V - - spaces = DistributedData(test_space.spaces, trial_spaces...) do part, lspace, spaces_and_gids... - MultiFieldFESpace([s[1] for s in spaces_and_gids]) - end - MultiFieldDistributedFESpace(V, trial_spaces, spaces, test_space.gids) -end - - -function Gridap.FESpaces.FEFunction(dV::MultiFieldDistributedFESpace{T}, x) where {T} - _gen_multifield_distributed_fe_function(dV, x, FEFunction) -end - - -function _gen_multifield_distributed_fe_function(dV::MultiFieldDistributedFESpace{T}, x, f) where {T} - single_fe_functions = DistributedFEFunction[] - for (field, U) in enumerate(dV.distributed_spaces) - free_values_i = restrict_to_field(dV, x, field) - uhi = f(U, free_values_i) - push!(single_fe_functions, uhi) - end - - funs = DistributedData(get_comm(dV.distributed_spaces[1]), - dV.spaces, single_fe_functions...,) do part, V, fe_functions... - mfv = zero_free_values(V) - mf_lids = 1:length(mfv) - current = 1 - for (field_id,fun) in enumerate(fe_functions) - fv = get_free_dof_values(fun) - sf_lids=Gridap.MultiField.restrict_to_field(V,mf_lids,field_id) - for i = 1:length(sf_lids) - mfv[sf_lids[i]] = fv[i] - current = current + 1 - end - end - f(V, mfv) - end - multifield_fe_function = DistributedFEFunction(funs, x, dV) - MultiFieldDistributedFEFunction(single_fe_functions, - multifield_fe_function, - dV) -end - - -function restrict_to_field(dV::MultiFieldDistributedFESpace, x::Vector, field) - @assert isa(dV.gids, SequentialDistributedIndexSet) - - xi = Gridap.Algebra.allocate_vector(Vector{eltype(x)}, - dV.distributed_spaces[field].gids) - - do_on_parts(dV.spaces, dV.gids, xi, x, dV.distributed_spaces...) do part, mfspace, mfgids, xi, x, fspaces_and_gids... - fspace = fspaces_and_gids[field][1] - fgids = fspaces_and_gids[field][2] - mf_lids = i=1:num_free_dofs(mfspace) - sf_lids = Gridap.MultiField.restrict_to_field(mfspace,mf_lids,field) - for i = 1:num_free_dofs(fspace) - if fgids.lid_to_owner[i] == part - xi[fgids.lid_to_gid[i]] = x[mfgids.lid_to_gid[sf_lids[i]]] - end - end - end - xi -end - -function restrict_to_field(dV::MultiFieldDistributedFESpace, x::GridapDistributedPETScWrappers.Vec{Float64}, field) - fgids = dV.distributed_spaces[field].gids - mfgids = dV.gids - @assert isa(fgids, MPIPETScDistributedIndexSet) - - xi = Gridap.Algebra.allocate_vector(GridapDistributedPETScWrappers.Vec{Float64}, - dV.distributed_spaces[field].gids) - - comm = get_comm(fgids) - part = get_part(comm) - - fis_gids = [ GridapDistributedPETScWrappers.PetscInt(fgids.lid_to_gid_petsc[i] - 1) - for i = 1:length(fgids.lid_to_gid_petsc) - if fgids.parts.part.lid_to_owner[i] == part ] - - mfis_gids = Vector{GridapDistributedPETScWrappers.PetscInt}(undef,length(fis_gids)) - - do_on_parts(dV.spaces, dV.gids, dV.distributed_spaces...) do part, mfspace, lmfgids, fspaces_and_gids... - mf_lids = 1:num_free_dofs(mfspace) - sf_lids = Gridap.MultiField.restrict_to_field(mfspace,mf_lids,field) - fspace = fspaces_and_gids[field][1] - current=1 - for i = 1:num_free_dofs(fspace) - if lmfgids.lid_to_owner[sf_lids[i]] == part - mfis_gids[current]=mfgids.lid_to_gid_petsc[sf_lids[i]]-1 - current=current+1 - end - end - end - - fis = GridapDistributedPETScWrappers.IS_(Float64, fis_gids; comm=comm.comm) - mfis = GridapDistributedPETScWrappers.IS_(Float64, mfis_gids; comm=comm.comm) - - vscatter = GridapDistributedPETScWrappers.VecScatter(x, mfis, xi, fis) - scatter!(vscatter,x,xi) - xi -end - - - -function Gridap.FESpaces.EvaluationFunction(dV::MultiFieldDistributedFESpace, x) - _gen_multifield_distributed_fe_function(dV, x, EvaluationFEFunction) -end - - -function Gridap.MultiFieldFESpace(model::DistributedDiscreteModel, - distributed_spaces::Vector{<:DistributedFESpace}) where V - - spaces = DistributedData(distributed_spaces...) do part, spaces_and_gids... - MultiFieldFESpace([s[1] for s in spaces_and_gids]) - end - - gids=_generate_multifield_gids(model,spaces,distributed_spaces) - - MultiFieldDistributedFESpace(get_vector_type(distributed_spaces[1]), - distributed_spaces, - spaces, - gids) -end - - -function Gridap.MultiFieldFESpace(model::DistributedDiscreteModel, - distributed_spaces::Vector{<:DistributedFESpace}, - multifield_style::MultiFieldStyle) where V - - spaces = DistributedData(distributed_spaces...) do part, spaces_and_gids... - MultiFieldFESpace([s[1] for s in spaces_and_gids],multifield_style) - end - - gids=_generate_multifield_gids(model,spaces,distributed_spaces) - - MultiFieldDistributedFESpace(get_vector_type(distributed_spaces[1]), - distributed_spaces, - spaces, - gids) -end - - - - -function _generate_multifield_gids(model,spaces,distributed_spaces) - function init_lid_to_owner(part, lspace, spaces_and_gids...) - nlids = num_free_dofs(lspace) - lid_to_owner = zeros(Int, nlids) - mf_lids = 1:nlids - for (field_id,current_field_space_gids) in enumerate(spaces_and_gids) - gids = current_field_space_gids[2] - sf_lids=Gridap.MultiField.restrict_to_field(lspace,mf_lids,field_id) - for i=1:length(sf_lids) - lid_to_owner[sf_lids[i]]=gids.lid_to_owner[i] - end - end - lid_to_owner - end - - comm = get_comm(model) - - part_to_lid_to_owner = DistributedData{Vector{Int}}(init_lid_to_owner, - comm, - spaces, - distributed_spaces...) - - offsets, ngids = _compute_offsets_and_ngids(part_to_lid_to_owner) - - num_dofs_x_cell = compute_num_dofs_x_cell(comm, spaces) - - part_to_lid_to_gid = _compute_part_to_lid_to_gid(model, - spaces, - num_dofs_x_cell, - part_to_lid_to_owner, - offsets) - - function init_free_gids(part, lid_to_gid, lid_to_owner, ngids) - IndexSet(ngids, lid_to_gid, lid_to_owner) - end - - gids = DistributedIndexSet(init_free_gids, - comm, - ngids, - part_to_lid_to_gid, - part_to_lid_to_owner, - ngids) -end - - - -# FE Function -struct MultiFieldDistributedFEFunction <: FEFunction - single_fe_functions::Vector{DistributedFEFunction} - multifield_fe_function::DistributedFEFunction - space::MultiFieldDistributedFESpace -end - -get_distributed_data(u::MultiFieldDistributedFEFunction) = - get_distributed_data(u.multifield_fe_function) - -Gridap.FESpaces.get_free_dof_values(a::MultiFieldDistributedFEFunction) = - get_free_dof_values(a.multifield_fe_function) - -Gridap.FESpaces.get_fe_space(a::MultiFieldDistributedFEFunction) = a.space - -Base.iterate(m::MultiFieldDistributedFEFunction)=iterate(m.single_fe_functions) -Base.iterate(m::MultiFieldDistributedFEFunction,state) = iterate(m.single_fe_functions,state) -Base.getindex(m::MultiFieldDistributedFEFunction,field_id::Integer) = m.single_fe_functions[field_id] diff --git a/src/OLD/SequentialCommunicators.jl b/src/OLD/SequentialCommunicators.jl deleted file mode 100644 index c3a9ee57..00000000 --- a/src/OLD/SequentialCommunicators.jl +++ /dev/null @@ -1,42 +0,0 @@ -# Specializations -struct SequentialCommunicator <: OrchestratedCommunicator - nparts::Int -end - -function SequentialCommunicator(user_driver_function,nparts) - comm=SequentialCommunicator(nparts) - user_driver_function(comm) -end - -function SequentialCommunicator(nparts::Tuple) - SequentialCommunicator(prod(nparts)) -end - -# All objects to be used with this communicator need to implement this -# function -function get_part(comm::SequentialCommunicator,object,part::Integer) - @abstractmethod -end - -function get_part(comm::SequentialCommunicator,object::Number,part::Integer) - object -end - -function num_parts(a::SequentialCommunicator) - a.nparts -end - -function num_workers(a::SequentialCommunicator) - 1 -end - -function Base.:(==)(a::SequentialCommunicator,b::SequentialCommunicator) - a.nparts == b.nparts -end - -function do_on_parts(task::Function,comm::SequentialCommunicator,args...) - for part in 1:num_parts(comm) - largs = map(a->get_part(comm,get_distributed_data(a),part),args) - task(part,largs...) - end -end diff --git a/src/OLD/SequentialDistributedAssemblersInterfaces.jl b/src/OLD/SequentialDistributedAssemblersInterfaces.jl deleted file mode 100644 index e76e7e20..00000000 --- a/src/OLD/SequentialDistributedAssemblersInterfaces.jl +++ /dev/null @@ -1,110 +0,0 @@ -# Assembly related - -function default_assembly_strategy_type(::SequentialCommunicator) - OwnedAndGhostCellsAssemblyStrategy -end - -function default_map_dofs_type(::SequentialCommunicator) - MapDoFsTypeGlobal -end - -function default_global_vector_type(::SequentialCommunicator) - T = Float64 - Vector{Float64} - -end - -function default_global_matrix_type(::SequentialCommunicator) - T = Float64 - SparseMatrixCSC{T} -end - - -function Gridap.Algebra.allocate_vector(::Type{V},gids::DistributedIndexSet) where V <: AbstractVector - ngids = num_gids(gids) - allocate_vector(V,ngids) -end - -function allocate_local_vector( - strat::Union{DistributedAssemblyStrategy{OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeProcLocal}},DistributedAssemblyStrategy{OwnedCellsAssemblyStrategy{MapDoFsTypeProcLocal}}}, - ::Type{V}, - indices::SequentialDistributedIndexSet, -) where V<:Vector - DistributedData(indices) do part,index - T = get_local_vector_type(V) - lvec=T(undef,length(index.lid_to_gid)) - fill!(lvec,zero(eltype(T))) - lvec - end -end - -function allocate_local_vector( - strat::Union{DistributedAssemblyStrategy{OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeGlobal}},DistributedAssemblyStrategy{OwnedCellsAssemblyStrategy{MapDoFsTypeGlobal}}}, - ::Type{V}, - indices::SequentialDistributedIndexSet, -) where V<:Vector - T = get_local_vector_type(V) - vec=T(undef,num_gids(indices)) - fill!(vec,zero(eltype(T))) - vec -end - - -function assemble_global_matrix(strat::Union{DistributedAssemblyStrategy{OwnedAndGhostCellsAssemblyStrategy{T}},DistributedAssemblyStrategy{OwnedCellsAssemblyStrategy{T}}}, - ::Type{M}, - dIJV::SequentialDistributedData, - m::DistributedIndexSet, - n::DistributedIndexSet) where {T,M} - if (T==MapDoFsTypeProcLocal) - do_on_parts(dIJV,m,n) do part, IJV, mindexset, nindexset - I,J,V = IJV - for i=1:length(I) - I[i]=mindexset.lid_to_gid[I[i]] - J[i]=nindexset.lid_to_gid[J[i]] - end - end - end - if (length(dIJV.parts)==1) - I,J,V=dIJV.parts[1] - else - I=lazy_append(dIJV.parts[1][1],dIJV.parts[2][1]) - J=lazy_append(dIJV.parts[1][2],dIJV.parts[2][2]) - V=lazy_append(dIJV.parts[1][3],dIJV.parts[2][3]) - for part=3:length(dIJV.parts) - I=lazy_append(I,dIJV.parts[part][1]) - J=lazy_append(J,dIJV.parts[part][2]) - V=lazy_append(V,dIJV.parts[part][3]) - end - end - A=sparse_from_coo(M,I,J,V,num_gids(m),num_gids(n)) -end - -function assemble_global_vector(strat::Union{DistributedAssemblyStrategy{OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeProcLocal}},DistributedAssemblyStrategy{OwnedCellsAssemblyStrategy{MapDoFsTypeProcLocal}}}, - ::Type{M}, - db::DistributedData, - m::DistributedIndexSet) where M <: Vector - b=allocate_vector(M, num_gids(m)) - do_on_parts(m, db, b) do part, mindexset, blocal, b - for i=1:length(blocal) - b[mindexset.lid_to_gid[i]] += blocal[i] - end - end - b -end - -function assemble_global_vector(strat::Union{DistributedAssemblyStrategy{OwnedAndGhostCellsAssemblyStrategy{MapDoFsTypeGlobal}},DistributedAssemblyStrategy{OwnedCellsAssemblyStrategy{MapDoFsTypeGlobal}}}, - ::Type{M}, - b::M, - m::DistributedIndexSet) where M <: Vector - b -end - -function Gridap.Algebra.finalize_coo!( - ::Type{M}, - IJV::SequentialDistributedData, - m::DistributedIndexSet, - n::DistributedIndexSet) where M <: AbstractMatrix - for part in IJV.parts - finalize_coo!(M,part[1],part[2],part[3],num_gids(m),num_gids(n)) - end -end diff --git a/src/OLD/SequentialDistributedData.jl b/src/OLD/SequentialDistributedData.jl deleted file mode 100644 index e9254213..00000000 --- a/src/OLD/SequentialDistributedData.jl +++ /dev/null @@ -1,31 +0,0 @@ -# Specializations -struct SequentialDistributedData{T} <: DistributedData{T} - comm::SequentialCommunicator - parts::Vector{T} -end - -get_part(comm::SequentialCommunicator,a::SequentialDistributedData,part::Integer) = a.parts[part] - -get_comm(a::SequentialDistributedData) = a.comm - -function DistributedData{T}(initializer::Function,comm::SequentialCommunicator,args...) where T - nparts = num_parts(comm) - parts = [initializer(i,map(a->get_part(comm,get_distributed_data(a),i),args)...) for i in 1:nparts] - SequentialDistributedData{T}(comm,parts) -end - -function DistributedData(initializer::Function,comm::SequentialCommunicator,args...) - nparts = num_parts(comm) - parts = [initializer(i,map(a->get_part(comm,get_distributed_data(a),i),args)...) for i in 1:nparts] - SequentialDistributedData(comm,parts) -end - -function gather!(a::AbstractVector,b::SequentialDistributedData) - @assert length(a) == num_parts(b) - copyto!(a,b.parts) -end - -function scatter(comm::SequentialCommunicator,b::AbstractVector) - @assert length(b) == num_parts(comm) - SequentialDistributedData(comm,b) -end diff --git a/src/OLD/SequentialDistributedIndexSets.jl b/src/OLD/SequentialDistributedIndexSets.jl deleted file mode 100644 index 0e40a4e7..00000000 --- a/src/OLD/SequentialDistributedIndexSets.jl +++ /dev/null @@ -1,19 +0,0 @@ -# Specializations -struct SequentialDistributedIndexSet{A,B,C} <: DistributedIndexSet - ngids::Int - parts::SequentialDistributedData{IndexSet{A,B,C}} -end - -num_gids(a::SequentialDistributedIndexSet) = a.ngids - -get_part( - comm::SequentialCommunicator, - a::SequentialDistributedIndexSet, - part::Integer) = a.parts.parts[part] - -get_comm(a::SequentialDistributedIndexSet) = a.parts.comm - -function DistributedIndexSet(initializer::Function,comm::SequentialCommunicator,ngids::Integer,args...) - parts = DistributedData(initializer,comm,args...) - SequentialDistributedIndexSet(ngids,parts) -end diff --git a/src/OLD/SequentialDistributedVectors.jl b/src/OLD/SequentialDistributedVectors.jl deleted file mode 100644 index cb3d7ea7..00000000 --- a/src/OLD/SequentialDistributedVectors.jl +++ /dev/null @@ -1,92 +0,0 @@ -# Specializations -struct SequentialDistributedVector{T,V<:AbstractVector{T},A,B,C} - parts::Vector{V} - indices::SequentialDistributedIndexSet{A,B,C} -end - -get_comm(a::SequentialDistributedVector) = get_comm(a.indices) - -get_part( - comm::SequentialCommunicator, - a::SequentialDistributedVector, - part::Integer) = a.parts[part] - - -function DistributedVector( - initializer::Function, indices::SequentialDistributedIndexSet,args...) - comm = get_comm(indices) - data = DistributedData(initializer,comm,args...) - parts = data.parts - SequentialDistributedVector(parts,indices) -end - -function Base.getindex(a::SequentialDistributedVector,indices::SequentialDistributedIndexSet) - @notimplementedif a.indices !== indices - exchange!(a) - a -end - -function exchange!(a::SequentialDistributedVector) - indices = a.indices - for part in 1:num_parts(indices) - lid_to_gid = indices.parts.parts[part].lid_to_gid - lid_to_item = a.parts[part] - lid_to_owner = indices.parts.parts[part].lid_to_owner - for lid in 1:length(lid_to_item) - gid = lid_to_gid[lid] - owner = lid_to_owner[lid] - if owner != part - lid_owner = indices.parts.parts[owner].gid_to_lid[gid] - item = a.parts[owner][lid_owner] - lid_to_item[lid] = item - end - end - end - a -end - -# Note: this type dispatch is needed because setindex! is -# not available for Table -function exchange!(a::SequentialDistributedVector{T,<:Table}) where {T} - indices = a.indices - for part in 1:num_parts(indices) - lid_to_gid = indices.parts.parts[part].lid_to_gid - lid_to_item = a.parts[part] - lid_to_owner = indices.parts.parts[part].lid_to_owner - for lid in 1:length(lid_to_item) - gid = lid_to_gid[lid] - owner = lid_to_owner[lid] - if owner != part - lid_owner = indices.parts.parts[owner].gid_to_lid[gid] - k=a.parts[owner].ptrs[lid_owner] - for j=lid_to_item.ptrs[lid]:lid_to_item.ptrs[lid+1]-1 - lid_to_item.data[j]=a.parts[owner].data[k] - k=k+1 - end - end - end - end - a -end - - -# Julia vectors -function Base.getindex(a::Vector,indices::SequentialDistributedIndexSet) - dv = DistributedVector(indices,indices,a) do part, indices, a - v=Vector{eltype(a)}(undef,length(indices.lid_to_gid)) - for i=1:length(v) - v[i]=a[indices.lid_to_gid[i]] - end - v - end - dv -end - -#function exchange!(a::Vector) -# a -#end - -# By default in the SequentialCommunicator arrays are globally indexable when restricted to a part -function get_part(comm::SequentialCommunicator,a::AbstractArray,part::Integer) - a -end diff --git a/src/OLD/UniformlyRefinedForestOfOctreesDiscreteModels.jl b/src/OLD/UniformlyRefinedForestOfOctreesDiscreteModels.jl deleted file mode 100644 index 995b7733..00000000 --- a/src/OLD/UniformlyRefinedForestOfOctreesDiscreteModels.jl +++ /dev/null @@ -1,888 +0,0 @@ -const P4EST_2_GRIDAP_FACET_2D = [ 3, 4, 1, 2 ] -const GRIDAP_2_P4EST_FACET_2D = [ 3, 4, 1, 2 ] - - -const P4EST_2_GRIDAP_FACET_3D = [ 5, 6, 3, 4, 1, 2 ] -const GRIDAP_2_P4EST_FACET_3D = [ 5, 6, 3, 4, 1, 2 ] - -P4est_wrapper.quadrant_data(x::Clong) = reinterpret(P4est_wrapper.quadrant_data, x) - -function p4est_get_quadrant_vertex_coordinates(connectivity::Ptr{p4est_connectivity_t}, - treeid::p4est_topidx_t, - x::p4est_qcoord_t, - y::p4est_qcoord_t, - level::Int8, - corner::Cint, - vxy::Ptr{Cdouble}) - - myself=Ref{p4est_quadrant_t}( - p4est_quadrant_t(x,y,level,Int8(0),Int16(0), - P4est_wrapper.quadrant_data(Clong(0)))) - neighbour=Ref{p4est_quadrant_t}(myself[]) - if corner == 1 - p4est_quadrant_face_neighbor(myself,corner,neighbour) - elseif corner == 2 - p4est_quadrant_face_neighbor(myself,corner+1,neighbour) - elseif corner == 3 - p4est_quadrant_corner_neighbor(myself,corner,neighbour) - end - # Extract numerical coordinates of lower_left - # corner of my corner neighbour - p4est_qcoord_to_vertex(connectivity, - treeid, - neighbour[].x, - neighbour[].y, - vxy) -end - - -function p8est_get_quadrant_vertex_coordinates(connectivity::Ptr{p8est_connectivity_t}, - treeid::p4est_topidx_t, - x::p4est_qcoord_t, - y::p4est_qcoord_t, - z::p4est_qcoord_t, - level::Int8, - corner::Cint, - vxyz::Ptr{Cdouble}) - - myself=Ref{p8est_quadrant_t}( - p8est_quadrant_t(x,y,z,level,Int8(0),Int16(0), - P4est_wrapper.quadrant_data(Clong(0)))) - neighbour=Ref{p8est_quadrant_t}(myself[]) - - if ( corner == 1 ) - p8est_quadrant_face_neighbor(myself,Cint(1),neighbour) - elseif ( corner == 2 ) - p8est_quadrant_face_neighbor(myself,Cint(3),neighbour) - elseif ( corner == 3 ) - p8est_quadrant_edge_neighbor(myself,Cint(11),neighbour) - elseif ( corner == 4 ) - p8est_quadrant_face_neighbor(myself,Cint(5),neighbour) - elseif ( corner == 5 ) - p8est_quadrant_edge_neighbor(myself,Cint(7),neighbour) - elseif ( corner == 6 ) - p8est_quadrant_edge_neighbor(myself,Cint(3),neighbour) - elseif ( corner == 7 ) - p8est_quadrant_corner_neighbor(myself,Cint(7),neighbour) - end - - # Extract numerical coordinates of lower_left corner of my corner neighbour - p8est_qcoord_to_vertex(connectivity, - treeid, - neighbour[].x, - neighbour[].y, - neighbour[].z, - vxyz) -end - - -function setup_pXest_connectivity( - coarse_discrete_model::DiscreteModel{Dc,Dp}) where {Dc,Dp} - - trian=Triangulation(coarse_discrete_model) - node_coordinates=Gridap.Geometry.get_node_coordinates(trian) - cell_nodes_ids=Gridap.Geometry.get_cell_node_ids(trian) - - if (Dc==2) - pconn=p4est_connectivity_new( - p4est_topidx_t(length(node_coordinates)), # num_vertices - p4est_topidx_t(num_cells(coarse_discrete_model)), # num_trees - p4est_topidx_t(0), - p4est_topidx_t(0)) - else - @assert Dc==3 - pconn=p8est_connectivity_new( - p4est_topidx_t(length(node_coordinates)), # num_vertices - p4est_topidx_t(num_cells(coarse_discrete_model)), # num_trees - p4est_topidx_t(0), - p4est_topidx_t(0), - p4est_topidx_t(0), - p4est_topidx_t(0)) - end - - conn=pconn[] - vertices=unsafe_wrap(Array, conn.vertices, length(node_coordinates)*3) - current=1 - for i=1:length(node_coordinates) - p=node_coordinates[i] - for j=1:Dp - vertices[current]=Cdouble(p[j]) - current=current+1 - end - if (Dp==2) - vertices[current]=Cdouble(0.0) # Z coordinate always to 0.0 in 2D - current=current+1 - end - end - #print("XXX", vertices, "\n") - - tree_to_vertex=unsafe_wrap(Array, conn.tree_to_vertex, length(cell_nodes_ids)*(2^Dc)) - c=Gridap.Arrays.array_cache(cell_nodes_ids) - current=1 - for j=1:length(cell_nodes_ids) - ids=Gridap.Arrays.getindex!(c,cell_nodes_ids,j) - for id in ids - tree_to_vertex[current]=p4est_topidx_t(id-1) - current=current+1 - end - end - - - # /* - # * Fill tree_to_tree and tree_to_face to make sure we have a valid - # * connectivity. - # */ - PXEST_FACES=2*Dc - tree_to_tree=unsafe_wrap(Array, conn.tree_to_tree, conn.num_trees*PXEST_FACES ) - tree_to_face=unsafe_wrap(Array, conn.tree_to_face, conn.num_trees*PXEST_FACES ) - for tree=1:conn.num_trees - for face=1:PXEST_FACES - tree_to_tree[PXEST_FACES * (tree-1) + face] = tree-1 - tree_to_face[PXEST_FACES * (tree-1) + face] = face-1 - end - end - - if (Dc==2) - p4est_connectivity_complete(pconn) - @assert Bool(p4est_connectivity_is_valid(pconn)) - else - p8est_connectivity_complete(pconn) - @assert Bool(p8est_connectivity_is_valid(pconn)) - end - pconn -end - -function setup_pXest(::Type{Val{Dc}}, comm, connectivity, num_uniform_refinements) where Dc - if (Dc==2) - p4est_new_ext(comm.comm, - connectivity, - Cint(0), Cint(num_uniform_refinements), Cint(1), Cint(0), - C_NULL, C_NULL) - else - p8est_new_ext(comm.comm, - connectivity, - Cint(0), Cint(num_uniform_refinements), Cint(1), Cint(0), - C_NULL, C_NULL) - end -end - -function setup_pXest_ghost(::Type{Val{Dc}}, ptr_pXest) where Dc - if (Dc==2) - p4est_ghost_new(ptr_pXest,P4est_wrapper.P4EST_CONNECT_FULL) - else - p8est_ghost_new(ptr_pXest,P4est_wrapper.P8EST_CONNECT_FULL) - end -end - -function setup_cell_indexset(::Type{Val{Dc}}, comm, ptr_pXest, ptr_pXest_ghost) where Dc - pXest_ghost = ptr_pXest_ghost[] - pXest = ptr_pXest[] - - # Obtain ghost quadrants - if (Dc==2) - ptr_ghost_quadrants = Ptr{p4est_quadrant_t}(pXest_ghost.ghosts.array) - else - ptr_ghost_quadrants = Ptr{p8est_quadrant_t}(pXest_ghost.ghosts.array) - end - proc_offsets = unsafe_wrap(Array, pXest_ghost.proc_offsets, pXest_ghost.mpisize+1) - - global_first_quadrant = unsafe_wrap(Array, - pXest.global_first_quadrant, - pXest.mpisize+1) - - n = pXest.global_num_quadrants - cellindices = DistributedIndexSet(comm,n) do part - lid_to_gid = Vector{Int}(undef, pXest.local_num_quadrants + pXest_ghost.ghosts.elem_count) - lid_to_part = Vector{Int}(undef, pXest.local_num_quadrants + pXest_ghost.ghosts.elem_count) - - for i=1:pXest.local_num_quadrants - lid_to_gid[i] = global_first_quadrant[MPI.Comm_rank(comm.comm)+1]+i - lid_to_part[i] = MPI.Comm_rank(comm.comm)+1 - end - - k=pXest.local_num_quadrants+1 - for i=1:pXest_ghost.mpisize - for j=proc_offsets[i]:proc_offsets[i+1]-1 - quadrant = ptr_ghost_quadrants[j+1] - piggy3 = quadrant.p.piggy3 - lid_to_part[k] = i - lid_to_gid[k] = global_first_quadrant[i]+piggy3.local_num+1 - k=k+1 - end - end - # if (part==2) - # print(n,"\n") - # print(lid_to_gid,"\n") - # print(lid_to_part,"\n") - # end - IndexSet(n,lid_to_gid,lid_to_part) - end -end - -function setup_pXest_lnodes(::Type{Val{Dc}}, ptr_pXest, ptr_pXest_ghost) where Dc - if (Dc==2) - p4est_lnodes_new(ptr_pXest, ptr_pXest_ghost, Cint(1)) - else - p8est_lnodes_new(ptr_pXest, ptr_pXest_ghost, Cint(1)) - end -end - -function generate_cell_vertex_gids(ptr_pXest_lnodes, cellindices) - pXest_lnodes=ptr_pXest_lnodes[] - - nvertices = pXest_lnodes.vnodes - element_nodes = unsafe_wrap(Array, - pXest_lnodes.element_nodes, - pXest_lnodes.num_local_elements*nvertices) - - nonlocal_nodes = unsafe_wrap(Array, - pXest_lnodes.nonlocal_nodes, - pXest_lnodes.num_local_nodes-pXest_lnodes.owned_count) - - k = 1 - cell_vertex_gids=DistributedVector(cellindices,cellindices) do part, indices - n = length(indices.lid_to_owner) - ptrs = Vector{Int}(undef,n+1) - ptrs[1]=1 - for i=1:n - ptrs[i+1]=ptrs[i]+nvertices - end - k=1 - current=1 - data = Vector{Int}(undef, ptrs[n+1]-1) - for i=1:pXest_lnodes.num_local_elements - for j=1:nvertices - l=element_nodes[k+j-1] - if (l < pXest_lnodes.owned_count) - data[current]=pXest_lnodes.global_offset+l+1 - else - data[current]=nonlocal_nodes[l-pXest_lnodes.owned_count+1]+1 - end - current=current+1 - end - k=k+nvertices - end - Gridap.Arrays.Table(data,ptrs) - end - exchange!(cell_vertex_gids) - cell_vertex_gids -end - - -function generate_cell_vertex_lids_nlvertices(cell_vertex_gids) - DistributedData(cell_vertex_gids) do part, cell_vertex_gids - g2l=Dict{Int,Int}() - current=1 - data=Vector{Int}(undef,length(cell_vertex_gids.data)) - for (i,gid) in enumerate(cell_vertex_gids.data) - if haskey(g2l,gid) - data[i]=g2l[gid] - else - data[i]=current - g2l[gid]=current - current=current+1 - end - end - (Gridap.Arrays.Table(data,cell_vertex_gids.ptrs), current-1) - end -end - -function generate_node_coordinates(::Type{Val{Dc}}, - cell_vertex_lids_nlvertices, - ptr_pXest_connectivity, - ptr_pXest, - ptr_pXest_ghost) where Dc - - - PXEST_CORNERS=2^Dc - pXest_ghost = ptr_pXest_ghost[] - pXest = ptr_pXest[] - - # Obtain ghost quadrants - if (Dc==2) - ptr_ghost_quadrants = Ptr{p4est_quadrant_t}(pXest_ghost.ghosts.array) - else - ptr_ghost_quadrants = Ptr{p8est_quadrant_t}(pXest_ghost.ghosts.array) - end - - tree_offsets = unsafe_wrap(Array, pXest_ghost.tree_offsets, pXest_ghost.num_trees+1) - dnode_coordinates=DistributedData(cell_vertex_lids_nlvertices) do part, (cell_vertex_lids, nl) - node_coordinates=Vector{Point{Dc,Float64}}(undef,nl) - current=1 - vxy=Vector{Cdouble}(undef,Dc) - pvxy=pointer(vxy,1) - cell_lids=cell_vertex_lids.data - for itree=1:pXest_ghost.num_trees - if (Dc==2) - tree = p4est_tree_array_index(pXest.trees, itree-1)[] - else - tree = p8est_tree_array_index(pXest.trees, itree-1)[] - end - for cell=1:tree.quadrants.elem_count - if (Dc==2) - quadrant=p4est_quadrant_array_index(tree.quadrants, cell-1)[] - else - quadrant=p8est_quadrant_array_index(tree.quadrants, cell-1)[] - end - for vertex=1:PXEST_CORNERS - if (Dc==2) - p4est_get_quadrant_vertex_coordinates(ptr_pXest_connectivity, - p4est_topidx_t(itree-1), - quadrant.x, - quadrant.y, - quadrant.level, - Cint(vertex-1), - pvxy) - else - p8est_get_quadrant_vertex_coordinates(ptr_pXest_connectivity, - p4est_topidx_t(itree-1), - quadrant.x, - quadrant.y, - quadrant.z, - quadrant.level, - Cint(vertex-1), - pvxy) - end - - node_coordinates[cell_lids[current]]=Point{Dc,Float64}(vxy...) - current=current+1 - end - end - end - - # Go over ghost cells - for i=1:pXest_ghost.num_trees - for j=tree_offsets[i]:tree_offsets[i+1]-1 - quadrant = ptr_ghost_quadrants[j+1] - for vertex=1:PXEST_CORNERS - if (Dc==2) - p4est_get_quadrant_vertex_coordinates(ptr_pXest_connectivity, - p4est_topidx_t(i-1), - quadrant.x, - quadrant.y, - quadrant.level, - Cint(vertex-1), - pvxy) - else - p8est_get_quadrant_vertex_coordinates(ptr_pXest_connectivity, - p4est_topidx_t(i-1), - quadrant.x, - quadrant.y, - quadrant.z, - quadrant.level, - Cint(vertex-1), - pvxy) - - end - # if (MPI.Comm_rank(comm.comm)==0) - # println(vxy) - # end - node_coordinates[cell_lids[current]]=Point{Dc,Float64}(vxy...) - current=current+1 - end - end - end - node_coordinates - end -end - -function generate_grid_and_topology(::Type{Val{Dc}}, - cell_vertex_lids_nlvertices, - node_coordinates) where {Dc} - DistributedData(cell_vertex_lids_nlvertices,node_coordinates) do part, (cell_vertex_lids, nl), node_coordinates - polytope= Dc==2 ? QUAD : HEX - scalar_reffe=Gridap.ReferenceFEs.ReferenceFE(polytope,Gridap.ReferenceFEs.lagrangian,Float64,1) - cell_types=collect(Fill(1,length(cell_vertex_lids))) - cell_reffes=[scalar_reffe] - grid = Gridap.Geometry.UnstructuredGrid(node_coordinates, - cell_vertex_lids, - cell_reffes, - cell_types, - Gridap.Geometry.NonOriented()) - - topology = Gridap.Geometry.UnstructuredGridTopology(node_coordinates, - cell_vertex_lids, - cell_types, - map(Gridap.ReferenceFEs.get_polytope, cell_reffes), - Gridap.Geometry.NonOriented()) - grid,topology - end -end - -function generate_face_labeling(comm, - cellindices, - coarse_discrete_model::DiscreteModel{Dc,Dp}, - grid_and_topology, - ptr_pXest, - ptr_pXest_ghost) where {Dc,Dp} - - pXest = ptr_pXest[] - pXest_ghost = ptr_pXest_ghost[] - - coarse_grid_topology = Gridap.Geometry.get_grid_topology(coarse_discrete_model) - coarse_grid_labeling = Gridap.Geometry.get_face_labeling(coarse_discrete_model) - - coarse_cell_vertices = Gridap.Geometry.get_faces(coarse_grid_topology,Dc,0) - if (Dc==3) - coarse_cell_edgets = Gridap.Geometry.get_faces(coarse_grid_topology,Dc,1) - end - coarse_cell_facets = Gridap.Geometry.get_faces(coarse_grid_topology,Dc,Dc-1) - - owned_trees_offset=Vector{Int}(undef,pXest_ghost.num_trees+1) - owned_trees_offset[1]=0 - for itree=1:pXest_ghost.num_trees - if Dc==2 - tree = p4est_tree_array_index(pXest.trees, itree-1)[] - else - tree = p8est_tree_array_index(pXest.trees, itree-1)[] - end - owned_trees_offset[itree+1]=owned_trees_offset[itree]+tree.quadrants.elem_count - end - - dfaces_to_entity=DistributedData(grid_and_topology) do part, (grid,topology) - # Iterate over corners - num_vertices=Gridap.Geometry.num_faces(topology,0) - vertex_to_entity=zeros(Int,num_vertices) - cell_vertices=Gridap.Geometry.get_faces(topology,Dc,0) - # if part==1 - # println(cell_vertices) - # end - - # Corner iterator callback - function jcorner_callback(pinfo :: Ptr{p8est_iter_corner_info_t}, - user_data :: Ptr{Cvoid}) - info=pinfo[] - if (Dc==2) - sides=Ptr{p4est_iter_corner_side_t}(info.sides.array) - else - sides=Ptr{p8est_iter_corner_side_t}(info.sides.array) - end - nsides=info.sides.elem_count - tree=sides[1].treeid+1 - # We are on the interior of a tree - data=sides[1] - if data.is_ghost==1 - ref_cell=pXest.local_num_quadrants+data.quadid+1 - else - ref_cell=owned_trees_offset[tree]+data.quadid+1 - end - corner=sides[1].corner+1 - ref_cornergid=cell_vertices[ref_cell][corner] - # if (MPI.Comm_rank(comm.comm)==0) - # println("XXX ", ref_cell, " ", ref_cornergid, " ", info.tree_boundary, " ", nsides, " ", corner) - # end - if (info.tree_boundary!=0 && nsides==1) - # The current corner is also a corner of the coarse mesh - coarse_cornergid=coarse_cell_vertices[tree][corner] - vertex_to_entity[ref_cornergid]= - coarse_grid_labeling.d_to_dface_to_entity[1][coarse_cornergid] - else - if vertex_to_entity[ref_cornergid]==0 - # We are on the interior of a tree (if we did not touch it yet) - vertex_to_entity[ref_cornergid]=coarse_grid_labeling.d_to_dface_to_entity[Dc+1][tree] - end - end - # if (MPI.Comm_rank(comm.comm)==0) - # println("YYY ", cell_vertices) - # end - nothing - end - - # C-callable face callback - ccorner_callback = @cfunction($jcorner_callback, - Cvoid, - (Ptr{p8est_iter_corner_info_t},Ptr{Cvoid})) - - cell_edgets=Gridap.Geometry.get_faces(topology,Dc,1) - num_edgets=Gridap.Geometry.num_faces(topology,1) - edget_to_entity=zeros(Int,num_edgets) - if (Dc==3) - # Edge iterator callback - function jedge_callback(pinfo :: Ptr{p8est_iter_edge_info_t}, - user_data :: Ptr{Cvoid}) - info=pinfo[] - sides=Ptr{p8est_iter_edge_side_t}(info.sides.array) - - nsides=info.sides.elem_count - tree=sides[1].treeid+1 - # We are on the interior of a tree - data=sides[1].is.full - if data.is_ghost==1 - ref_cell=pXest.local_num_quadrants+data.quadid+1 - else - ref_cell=owned_trees_offset[tree]+data.quadid+1 - end - edge=sides[1].edge+1 - - polytope=HEX - poly_faces=Gridap.ReferenceFEs.get_faces(polytope) - poly_edget_range=Gridap.ReferenceFEs.get_dimrange(polytope,1) - poly_first_edget=first(poly_edget_range) - poly_facet=poly_first_edget+edge-1 - - # if (MPI.Comm_rank(comm.comm)==0) - # coarse_edgetgid=coarse_cell_edgets[tree][edge] - # coarse_edgetgid_entity=coarse_grid_labeling.d_to_dface_to_entity[2][coarse_edgetgid] - # println("PPP ", ref_cell, " ", edge, " TB ", info.tree_boundary, " ", nsides, " ",coarse_edgetgid, " ", coarse_edgetgid_entity) - # end - if (info.tree_boundary!=0 && nsides==1) - coarse_edgetgid=coarse_cell_edgets[tree][edge] - coarse_edgetgid_entity=coarse_grid_labeling.d_to_dface_to_entity[2][coarse_edgetgid] - # We are on the boundary of coarse mesh or inter-octree boundary - for poly_incident_face in poly_faces[poly_facet] - if poly_incident_face == poly_facet - ref_edgetgid=cell_edgets[ref_cell][edge] - edget_to_entity[ref_edgetgid]=coarse_edgetgid_entity - else - ref_cornergid=cell_vertices[ref_cell][poly_incident_face] - # if (MPI.Comm_rank(comm.comm)==0) - # println("CCC ", ref_cell, " ", ref_cornergid, " ", info.tree_boundary, " ", nsides) - # end - vertex_to_entity[ref_cornergid]=coarse_edgetgid_entity - end - end - else - # We are on the interior of the domain if we did not touch the edge yet - ref_edgetgid=cell_edgets[ref_cell][edge] - if (edget_to_entity[ref_edgetgid]==0) - edget_to_entity[ref_edgetgid]=coarse_grid_labeling.d_to_dface_to_entity[Dc+1][tree] - end - end - nothing - end - - # C-callable edge callback - cedge_callback = @cfunction($jedge_callback, - Cvoid, - (Ptr{p8est_iter_edge_info_t},Ptr{Cvoid})) - end - - # Iterate over faces - num_faces=Gridap.Geometry.num_faces(topology,Dc-1) - facet_to_entity=zeros(Int,num_faces) - cell_facets=Gridap.Geometry.get_faces(topology,Dc,Dc-1) - - - # Face iterator callback - function jface_callback(pinfo :: Ptr{p8est_iter_face_info_t}, - user_data :: Ptr{Cvoid}) - info=pinfo[] - if Dc==2 - sides=Ptr{p4est_iter_face_side_t}(info.sides.array) - else - sides=Ptr{p8est_iter_face_side_t}(info.sides.array) - end - - nsides=info.sides.elem_count - tree=sides[1].treeid+1 - # We are on the interior of a tree - data=sides[1].is.full - if data.is_ghost==1 - ref_cell=pXest.local_num_quadrants+data.quadid+1 - else - ref_cell=owned_trees_offset[tree]+data.quadid+1 - end - face=sides[1].face+1 - if Dc==2 - gridap_facet=P4EST_2_GRIDAP_FACET_2D[face] - else - gridap_facet=P4EST_2_GRIDAP_FACET_3D[face] - end - - polytope= Dc==2 ? QUAD : HEX - poly_faces=Gridap.ReferenceFEs.get_faces(polytope) - poly_facet_range=Gridap.ReferenceFEs.get_dimrange(polytope,Dc-1) - poly_first_facet=first(poly_facet_range) - poly_facet=poly_first_facet+gridap_facet-1 - - # if (MPI.Comm_rank(comm.comm)==0) - # coarse_facetgid=coarse_cell_facets[tree][gridap_facet] - # coarse_facetgid_entity=coarse_grid_labeling.d_to_dface_to_entity[Dc][coarse_facetgid] - # println("PPP ", ref_cell, " ", gridap_facet, " ", info.tree_boundary, " ", nsides, " ",coarse_facetgid, " ", coarse_facetgid_entity) - # end - if (info.tree_boundary!=0 && nsides==1) - coarse_facetgid=coarse_cell_facets[tree][gridap_facet] - coarse_facetgid_entity=coarse_grid_labeling.d_to_dface_to_entity[Dc][coarse_facetgid] - # We are on the boundary of coarse mesh or inter-octree boundary - for poly_incident_face in poly_faces[poly_facet] - if poly_incident_face == poly_facet - ref_facetgid=cell_facets[ref_cell][gridap_facet] - facet_to_entity[ref_facetgid]=coarse_facetgid_entity - elseif (Dc==3 && poly_incident_face in Gridap.ReferenceFEs.get_dimrange(polytope,1)) - poly_first_edget=first(Gridap.ReferenceFEs.get_dimrange(polytope,1)) - edget=poly_incident_face-poly_first_edget+1 - ref_edgetgid=cell_edgets[ref_cell][edget] - edget_to_entity[ref_edgetgid]=coarse_facetgid_entity - else - ref_cornergid=cell_vertices[ref_cell][poly_incident_face] - # if (MPI.Comm_rank(comm.comm)==0) - # println("CCC ", ref_cell, " ", ref_cornergid, " ", info.tree_boundary, " ", nsides) - # end - vertex_to_entity[ref_cornergid]=coarse_facetgid_entity - end - end - else - # We are on the interior of the domain - ref_facegid=cell_facets[ref_cell][gridap_facet] - facet_to_entity[ref_facegid]=coarse_grid_labeling.d_to_dface_to_entity[Dc+1][tree] - end - nothing - end - - # C-callable face callback - cface_callback = @cfunction($jface_callback, - Cvoid, - (Ptr{p8est_iter_face_info_t},Ptr{Cvoid})) - - - # Iterate over cells - num_cells=Gridap.Geometry.num_faces(topology,Dc) - cell_to_entity=zeros(Int,num_cells) - - # Face iterator callback - function jcell_callback(pinfo :: Ptr{p8est_iter_volume_info_t}, - user_data :: Ptr{Cvoid}) - info=pinfo[] - tree=info.treeid+1 - cell=owned_trees_offset[tree]+info.quadid+1 - #println("XXX $(tree) $(cell)") - cell_to_entity[cell]=coarse_grid_labeling.d_to_dface_to_entity[Dc+1][tree] - nothing - end - ccell_callback = @cfunction($jcell_callback, - Cvoid, - (Ptr{p8est_iter_volume_info_t},Ptr{Cvoid})) - - if (Dc==2) - p4est_iterate(ptr_pXest,ptr_pXest_ghost,C_NULL,C_NULL,cface_callback,C_NULL) - p4est_iterate(ptr_pXest,ptr_pXest_ghost,C_NULL,ccell_callback,C_NULL,ccorner_callback) - else - p8est_iterate(ptr_pXest,ptr_pXest_ghost,C_NULL,C_NULL,cface_callback,C_NULL,C_NULL) - p8est_iterate(ptr_pXest,ptr_pXest_ghost,C_NULL,C_NULL,C_NULL,cedge_callback,C_NULL) - p8est_iterate(ptr_pXest,ptr_pXest_ghost,C_NULL,ccell_callback,C_NULL,C_NULL,ccorner_callback) - end - if (Dc==2) - vertex_to_entity, facet_to_entity, cell_to_entity - else - vertex_to_entity, edget_to_entity, facet_to_entity, cell_to_entity - end - end - - dvertex_to_entity = DistributedData(comm,dfaces_to_entity) do part, faces - faces[1] - end - if Dc==3 - dedget_to_entity = DistributedData(comm,dfaces_to_entity) do part, faces - faces[2] - end - end - dfacet_to_entity = DistributedData(comm,dfaces_to_entity) do part, faces - faces[Dc] - end - dcell_to_entity = DistributedData(comm,dfaces_to_entity) do part, faces - faces[Dc+1] - end - - function dcell_to_faces(grid_and_topology,cell_dim,face_dim) - DistributedData(get_comm(grid_and_topology),grid_and_topology) do part, (grid,topology) - Gridap.Geometry.get_faces(topology,cell_dim,face_dim) - end - end - - polytope = Dc==2 ? QUAD : HEX - - update_face_to_entity_with_ghost_data!(dvertex_to_entity, - cellindices, - num_faces(polytope,0), - dcell_to_faces(grid_and_topology,Dc,0)) - - if Dc==3 - update_face_to_entity_with_ghost_data!(dedget_to_entity, - cellindices, - num_faces(polytope,1), - dcell_to_faces(grid_and_topology,Dc,1)) - end - - update_face_to_entity_with_ghost_data!(dfacet_to_entity, - cellindices, - num_faces(polytope,Dc-1), - dcell_to_faces(grid_and_topology,Dc,Dc-1)) - - update_face_to_entity_with_ghost_data!(dcell_to_entity, - cellindices, - num_faces(polytope,Dc), - dcell_to_faces(grid_and_topology,Dc,Dc)) - - - if (Dc==2) - dfaces_to_entity=[dvertex_to_entity,dfacet_to_entity,dcell_to_entity] - else - dfaces_to_entity=[dvertex_to_entity,dedget_to_entity,dfacet_to_entity,dcell_to_entity] - end - - - dface_labeling = - DistributedData(comm,dfaces_to_entity...) do part, faces_to_entity... - - # if (part == 1) - # println("XXX", faces_to_entity[1]) - # println("XXX", faces_to_entity[2]) - # println("XXX", faces_to_entity[3]) - # #println("XXX", faces_to_entity[4]) - # end - - d_to_dface_to_entity = Vector{Vector{Int}}(undef,Dc+1) - d_to_dface_to_entity[1] = faces_to_entity[1] - if (Dc==3) - d_to_dface_to_entity[2] = faces_to_entity[2] - end - d_to_dface_to_entity[Dc] = faces_to_entity[Dc] - d_to_dface_to_entity[Dc+1] = faces_to_entity[Dc+1] - Gridap.Geometry.FaceLabeling(d_to_dface_to_entity, - coarse_grid_labeling.tag_to_entities, - coarse_grid_labeling.tag_to_name) - end - dface_labeling -end - - - -function p4est_connectivity_print(pconn::Ptr{p4est_connectivity_t}) - # struct p4est_connectivity - # num_vertices::p4est_topidx_t - # num_trees::p4est_topidx_t - # num_corners::p4est_topidx_t - # vertices::Ptr{Cdouble} - # tree_to_vertex::Ptr{p4est_topidx_t} - # tree_attr_bytes::Csize_t - # tree_to_attr::Cstring - # tree_to_tree::Ptr{p4est_topidx_t} - # tree_to_face::Ptr{Int8} - # tree_to_corner::Ptr{p4est_topidx_t} - # ctt_offset::Ptr{p4est_topidx_t} - # corner_to_tree::Ptr{p4est_topidx_t} - # corner_to_corner::Ptr{Int8} - # end - conn = pconn[] - println("num_vertices=$(conn.num_vertices)") - println("num_trees=$(conn.num_trees)") - println("num_corners=$(conn.num_corners)") - vertices=unsafe_wrap(Array, conn.vertices, conn.num_vertices*3) - println("vertices=$(vertices)") -end - -function init_cell_to_face_entity(part, - num_faces_x_cell, - cell_to_faces, - face_to_entity) - ptrs = Vector{eltype(num_faces_x_cell)}(undef,length(cell_to_faces) + 1) - ptrs[2:end] .= num_faces_x_cell - Gridap.Arrays.length_to_ptrs!(ptrs) - data = Vector{eltype(face_to_entity)}(undef, ptrs[end] - 1) - cell_to_face_entity = lazy_map(Broadcasting(Reindex(face_to_entity)),cell_to_faces) - k = 1 - for i = 1:length(cell_to_face_entity) - for j = 1:length(cell_to_face_entity[i]) - k=_fill_data!(data,cell_to_face_entity[i][j],k) - end - end - return Gridap.Arrays.Table(data, ptrs) -end - -function update_face_to_entity!(part,face_to_entity, cell_to_faces, cell_to_face_entity) - for cell in 1:length(cell_to_faces) - i_to_entity = cell_to_face_entity[cell] - pini = cell_to_faces.ptrs[cell] - pend = cell_to_faces.ptrs[cell + 1] - 1 - for (i, p) in enumerate(pini:pend) - lid = cell_to_faces.data[p] - face_to_entity[lid] = i_to_entity[i] - end - end -end - -function update_face_to_entity_with_ghost_data!( - face_to_entity,cell_gids,num_faces_x_cell,cell_to_faces) - - part_to_cell_to_entity = DistributedVector(init_cell_to_face_entity, - cell_gids, - num_faces_x_cell, - cell_to_faces, - face_to_entity) - exchange!(part_to_cell_to_entity) - do_on_parts(update_face_to_entity!, - get_comm(cell_gids), - face_to_entity, - cell_to_faces, - part_to_cell_to_entity) -end - - -function UniformlyRefinedForestOfOctreesDiscreteModel(comm::Communicator, - coarse_discrete_model::DiscreteModel{Dc,Dp}, - num_uniform_refinements::Int) where {Dc,Dp} - - ptr_pXest_connectivity=setup_pXest_connectivity(coarse_discrete_model) - - # Create a new forest - ptr_pXest = setup_pXest(Val{Dc},comm,ptr_pXest_connectivity,num_uniform_refinements) - - # Build the ghost layer - ptr_pXest_ghost=setup_pXest_ghost(Val{Dc},ptr_pXest) - - cellindices = setup_cell_indexset(Val{Dc},comm,ptr_pXest,ptr_pXest_ghost) - - ptr_pXest_lnodes=setup_pXest_lnodes(Val{Dc}, ptr_pXest, ptr_pXest_ghost) - - cell_vertex_gids=generate_cell_vertex_gids(ptr_pXest_lnodes,cellindices) - - cell_vertex_lids_nlvertices=generate_cell_vertex_lids_nlvertices(cell_vertex_gids) - - dnode_coordinates=generate_node_coordinates(Val{Dc}, - cell_vertex_lids_nlvertices, - ptr_pXest_connectivity, - ptr_pXest, - ptr_pXest_ghost) - - dgrid_and_topology=generate_grid_and_topology(Val{Dc}, - cell_vertex_lids_nlvertices,dnode_coordinates) - - - dface_labeling=generate_face_labeling(comm, - cellindices, - coarse_discrete_model, - dgrid_and_topology, - ptr_pXest, - ptr_pXest_ghost) - - ddiscretemodel= - DistributedData(comm,dgrid_and_topology,dface_labeling) do part, (grid,topology), face_labeling - Gridap.Geometry.UnstructuredDiscreteModel(grid,topology,face_labeling) - end - - # Write forest to VTK file - #p4est_vtk_write_file(unitsquare_forest, C_NULL, "my_step") - - if (Dc==2) - # Destroy lnodes - p4est_lnodes_destroy(ptr_pXest_lnodes) - # Destroy ghost - p4est_ghost_destroy(ptr_pXest_ghost) - # Destroy the forest - p4est_destroy(ptr_pXest) - # Destroy the connectivity - p4est_connectivity_destroy(ptr_pXest_connectivity) - else - # Destroy lnodes - p8est_lnodes_destroy(ptr_pXest_lnodes) - # Destroy ghost - p8est_ghost_destroy(ptr_pXest_ghost) - # Destroy the forest - p8est_destroy(ptr_pXest) - # Destroy the connectivity - p8est_connectivity_destroy(ptr_pXest_connectivity) - end - - DistributedDiscreteModel(ddiscretemodel,cellindices) - -end diff --git a/src/OLD/ZeroMeanDistributedFESpaces.jl b/src/OLD/ZeroMeanDistributedFESpaces.jl deleted file mode 100644 index ac317d86..00000000 --- a/src/OLD/ZeroMeanDistributedFESpaces.jl +++ /dev/null @@ -1,176 +0,0 @@ -struct ZeroMeanDistributedFESpace{V} <: DistributedFESpace - vector_type :: Type{V} - model :: DistributedDiscreteModel - spaces :: DistributedData{<:FESpace} - gids :: DistributedIndexSet - vol_i :: DistributedData{Vector{Float64}} - vol :: DistributedData{Float64} - end - -function get_vector_type(a::ZeroMeanDistributedFESpace) - a.vector_type -end - - # Constructors -function Gridap.TrialFESpace(V::ZeroMeanDistributedFESpace,args...) - spaces = DistributedData(V.spaces) do part, space - TrialFESpace(space,args...) - end - ZeroMeanDistributedFESpace(get_vector_type(V),V.model,spaces,V.gids,V.vol_i,V.vol) -end - -# U = TrialFESpace(f.space) -# ZeroMeanFESpace(U,f.vol_i,f.vol,f.constraint_style) - -function Gridap.FESpaces.FEFunction(dV::ZeroMeanDistributedFESpace,x) - dfree_vals = x[dV.gids] - funs = DistributedData(dV.spaces,dfree_vals) do part, V, free_vals - FEFunction(V,free_vals) - end - dfuns=_generate_zero_mean_funs(dV,funs) - DistributedFEFunction(dfuns,x,dV) -end - -function Gridap.FESpaces.EvaluationFunction(dV::ZeroMeanDistributedFESpace,x) - dfree_vals = x[dV.gids] - funs = DistributedData(dV.spaces,dfree_vals) do part, V, free_vals - Gridap.FESpaces.EvaluationFunction(V,free_vals) - end - dfuns=_generate_zero_mean_funs(dV,funs) - DistributedFEFunction(dfuns,x,dV) -end - -function _generate_zero_mean_funs(dV::ZeroMeanDistributedFESpace, funs) - dpartial_sums_fixed_val= - DistributedData(dV.spaces, funs, dV.vol_i, dV.vol) do part, V, fun, vol_i, vol - if (constant_fixed(V)) - fv=get_free_dof_values(fun) - dv=get_dirichlet_dof_values(get_fe_space(fun)) - c=Gridap.FESpaces._compute_new_fixedval(fv, - dv, - vol_i, - vol, - V.dof_to_fix) - else - fv=get_free_dof_values(fun) - c=-dot(fv,vol_i)/vol - end - c - end - - partial_sums_fixed_val=gather(dpartial_sums_fixed_val) - fixed_val=sum(partial_sums_fixed_val) - comm=get_comm(dV) - dfixed_val=scatter_value(comm,fixed_val) - - dfuns = DistributedData(dV.spaces, funs, dfixed_val) do part, V, fun, fixed_val - free_values=get_free_dof_values(fun) - fv=lazy_map(Broadcasting(+),free_values, Fill(fixed_val,length(free_values))) - if (constant_fixed(V)) - dirichlet_values=get_dirichlet_dof_values(get_fe_space(fun)) - dv = dirichlet_values .+ fixed_val - return FEFunction(V,fv,dv) - else - return FEFunction(V,fv) - end - end -end - -function constant_fixed(V::FESpaceWithConstantFixed{Gridap.FESpaces.FixConstant}) - true -end - -function constant_fixed(V::FESpaceWithConstantFixed) - false -end - -function ZeroMeanDistributedFESpace(::Type{V}; - model::DistributedDiscreteModel, - reffe, - degree, - kwargs...) where V - - function init_local_spaces(part,model) - lspace = FESpace(model,reffe;kwargs...) - end - - comm = get_comm(model) - spaces = DistributedData(init_local_spaces,comm,model.models) - - dof_lid_to_fix = _compute_dof_lid_to_fix(model,spaces) - - function init_local_spaces_with_constant_fixed(part,lspace,dof_lid_to_fix) - Gridap.FESpaces.FESpaceWithConstantFixed( - lspace, dof_lid_to_fix != -1, dof_lid_to_fix) - end - - spaces_dof_removed = DistributedData(init_local_spaces_with_constant_fixed, - comm, - spaces, - dof_lid_to_fix) - - dvol_i, dvol = _setup_vols(model,spaces,degree) - gids=_compute_distributed_index_set(model, spaces_dof_removed) - ZeroMeanDistributedFESpace(V,model,spaces_dof_removed,gids,dvol_i,dvol) -end - - -function _setup_vols(model,spaces,degree) - comm = get_comm(model) - dvol_i_and_vol = DistributedData(model,spaces) do part, (model,gids), lspace - trian = Triangulation(model) - owned_trian = remove_ghost_cells(trian, part, gids) - dΩ=Measure(owned_trian,degree) - vol_i = assemble_vector(v->∫(v)*dΩ,lspace) - vol = sum(vol_i) - (vol_i,vol) - end - dvol_i=DistributedData(dvol_i_and_vol) do part, vol_i_and_vol - vol_i_and_vol[1] - end - dvol_partial_sums=DistributedData(dvol_i_and_vol) do part, vol_i_and_vol - vol_i_and_vol[2] - end - partial_sums=gather(dvol_partial_sums) - if (i_am_master(comm)) - vol=sum(partial_sums) - end - dvol=scatter_value(comm,vol) - (dvol_i,dvol) -end - -function _compute_dof_lid_to_fix(model,spaces) - dof_lids_candidates=DistributedData(model.gids,spaces) do part, cell_gids, lspace - n_free_dofs = num_free_dofs(lspace) - lid_to_n_local_minus_ghost=zeros(Int32,n_free_dofs) - cell_dofs=get_cell_dof_ids(lspace) - cell_dofs_cache = array_cache(cell_dofs) - for cell in 1:length(cell_dofs) - current_cell_dofs = getindex!(cell_dofs_cache,cell_dofs,cell) - is_local = (cell_gids.lid_to_owner[cell] == part) - for lid in current_cell_dofs - if (lid>0) - if (is_local) - lid_to_n_local_minus_ghost[lid] += 1 - else - lid_to_n_local_minus_ghost[lid] -= 1 - end - end - end - end - min_lid_only_local_cells= - findfirst(x->(x>0),lid_to_n_local_minus_ghost) - min_lid_only_local_cells==nothing ? -1 : min_lid_only_local_cells - end - - comm = get_comm(model) - part_dof_lids_candidates = gather(dof_lids_candidates) - if i_am_master(comm) - first_proc = findfirst(x->(x!=-1), part_dof_lids_candidates) - for proc=first_proc+1:length(part_dof_lids_candidates) - part_dof_lids_candidates[proc]=-1 - end - end - - dof_lid_to_fix = scatter(comm,part_dof_lids_candidates) -end diff --git a/test/OLD/CartesianDiscreteModelsTests.jl b/test/OLD/CartesianDiscreteModelsTests.jl deleted file mode 100644 index 279f0413..00000000 --- a/test/OLD/CartesianDiscreteModelsTests.jl +++ /dev/null @@ -1,79 +0,0 @@ -module CartesianDiscreteModelsTests - -using Gridap -using GridapDistributed -using Test - -function test_face_labelings_consistency(comm,subdomains,D::Int) - domain = Int[] - for i = 1:D - push!(domain, 0) - push!(domain, 1) - end - domain = Tuple(domain) - cells = Gridap.Geometry.tfill(4, Val{D}()) - dmodel = CartesianDiscreteModel(comm, subdomains, domain, cells) - gmodel = CartesianDiscreteModel(domain, cells) - - result = DistributedData{Bool}( - comm, - dmodel.models, - dmodel.gids, - ) do part, model, gid - test_local_part_face_labelings_consistency(model, gid, gmodel) - end - all(gather(result)) -end - -function test_local_part_face_labelings_consistency(model::CartesianDiscreteModel{D},gid,gmodel) where {D} - local_topology = model.grid_topology - global_topology = gmodel.grid_topology - local_labelings = model.face_labeling - global_labelings = gmodel.face_labeling - l_d_to_dface_to_entity = local_labelings.d_to_dface_to_entity - g_d_to_dface_to_entity = global_labelings.d_to_dface_to_entity - #traverse local cells - for cell_lid=1:num_cells(model) - cell_gid=gid.lid_to_gid[cell_lid] - for d=0:D-1 - local_cell_to_faces = local_topology.n_m_to_nface_to_mfaces[D+1,d+1] - global_cell_to_faces = global_topology.n_m_to_nface_to_mfaces[D+1,d+1] - la = local_cell_to_faces.ptrs[cell_lid] - lb = local_cell_to_faces.ptrs[cell_lid+1] - ga = global_cell_to_faces.ptrs[cell_gid] - gb = global_cell_to_faces.ptrs[cell_gid+1] - @assert (lb-la)==(gb-ga) - for i=0:lb-la-1 - face_lid = local_cell_to_faces.data[la+i] - face_gid = global_cell_to_faces.data[ga+i] - local_entity = l_d_to_dface_to_entity[d+1][face_lid] - global_entity = g_d_to_dface_to_entity[d+1][face_gid] - if (local_entity != global_entity) - return false - end - end - end - end - return true -end - -function generate_subdomains(D::Int) - subdomains = Gridap.Geometry.tfill(2, Val{D}()) -end - -subdomains = (2,3) -SequentialCommunicator(subdomains) do comm - domain = (0,1,0,1) - cells = (10,10) - model = CartesianDiscreteModel(comm,subdomains,domain,cells) - writevtk(model,"model") -end - -for D in (1,2,3) - subdomains = generate_subdomains(D) - SequentialCommunicator(subdomains) do comm - @test test_face_labelings_consistency(comm,subdomains,D) - end -end - -end # module diff --git a/test/OLD/DistributedAssemblersTests.jl b/test/OLD/DistributedAssemblersTests.jl deleted file mode 100644 index aa72de9c..00000000 --- a/test/OLD/DistributedAssemblersTests.jl +++ /dev/null @@ -1,51 +0,0 @@ -module DistributedAssemblersTests - -using Gridap -using Gridap.FESpaces -using GridapDistributed -using Test -using SparseArrays - -const T = Float64 -const vector_type = Vector{T} -const matrix_type = SparseMatrixCSC{T,Int} - -function setup_model(comm) - domain = (0,1,0,1) - cells = (4,4) - model = CartesianDiscreteModel(comm,subdomains,domain,cells) -end - -function setup_fe_spaces(model) - reffe = ReferenceFE(lagrangian,Float64,1) - V = FESpace(vector_type,model=model,reffe=reffe) - U = TrialFESpace(V) - U,V -end - - -include("./DistributedAssemblersTestsHelpers.jl") - - -subdomains = (2,2) -SequentialCommunicator(subdomains) do comm - model=setup_model(comm) - U,V=setup_fe_spaces(model) - - das=OwnedAndGhostCellsAssemblyStrategy(V,MapDoFsTypeGlobal()) - test_assemble(comm,model,U,V,das) - test_allocate_assemble_add(comm,model,U,V,das) - - das=OwnedCellsAssemblyStrategy(V,MapDoFsTypeGlobal()) - test_assemble(comm,model,U,V,das) - test_allocate_assemble_add(comm,model,U,V,das) - - das=OwnedAndGhostCellsAssemblyStrategy(V,MapDoFsTypeProcLocal()) - test_assemble(comm,model,U,V,das) - - das=OwnedCellsAssemblyStrategy(V,MapDoFsTypeProcLocal()) - test_assemble(comm,model,U,V,das) -end - - -end # module diff --git a/test/OLD/DistributedAssemblersTestsHelpers.jl b/test/OLD/DistributedAssemblersTestsHelpers.jl deleted file mode 100644 index 502fcb0c..00000000 --- a/test/OLD/DistributedAssemblersTestsHelpers.jl +++ /dev/null @@ -1,75 +0,0 @@ -function test_assemble(comm,model,U,V,das) - assem = SparseMatrixAssembler(matrix_type, vector_type, U, V, das) - trian=Triangulation(das,model) - degree=2 - dΩ=Measure(trian,degree) - v=get_fe_basis(V) - u=get_trial_fe_basis(U) - veccont = ∫(1*v)dΩ - matcont = ∫(v*u)dΩ - tol=1.0e-14 - matdata=collect_cell_matrix(U,V,matcont) - A1 = assemble_matrix(assem,matdata) - vecdata=collect_cell_vector(V,veccont) - b1 = assemble_vector(assem,vecdata) - @test sum(b1)-1 < tol - if isa(comm,SequentialCommunicator) - @test ones(1,size(A1,1))*A1*ones(size(A1,2)) ≈ [1] - end - data = collect_cell_matrix_and_vector(U,V,matcont,veccont) - A2,b2 = assemble_matrix_and_vector(assem,data) - @test sum(b2)-1 < tol - if isa(comm,SequentialCommunicator) - @test ones(1,size(A2,1))*A2*ones(size(A2,2)) ≈ [1] - @test norm(A1-A2) ≈ 0.0 - end - @test norm(b1-b2) < tol -end - -function test_allocate_assemble_add(comm,model,U,V,das) - assem = SparseMatrixAssembler(matrix_type, vector_type, U, V, das) - trian=Triangulation(das,model) - degree=2 - dΩ=Measure(trian,degree) - v=get_fe_basis(V) - u=get_trial_fe_basis(U) - veccont = ∫(1*v)dΩ - matcont = ∫(v*u)dΩ - - tol=1.0e-14 - matdata=collect_cell_matrix(U,V,matcont) - A1 = assemble_matrix(assem,matdata) - vecdata=collect_cell_vector(V,veccont) - b1 = assemble_vector(assem,vecdata) - @test sum(b1) ≈ 1 - if isa(comm,SequentialCommunicator) - @test ones(1,size(A1,1))*A1*ones(size(A1,2)) ≈ [1] - end - A3 = allocate_matrix(assem,matdata) - if isa(comm,SequentialCommunicator) - @test findnz(A1)[1] == findnz(A3)[1] - @test findnz(A1)[2] == findnz(A3)[2] - end - assemble_matrix!(A3,assem,matdata) - if isa(comm,SequentialCommunicator) - @test norm(A1-A3) ≈ 0.0 - end - assemble_matrix_add!(A3,assem,matdata) - if isa(comm,SequentialCommunicator) - @test norm(2*A1-A3) ≈ 0.0 - end - b3 = allocate_vector(assem,vecdata) - assemble_vector!(b3,assem,vecdata) - @test norm(b3-b1) < tol - assemble_vector_add!(b3,assem,vecdata) - @test norm(b3-2*b1) < tol - data = collect_cell_matrix_and_vector(U,V,matcont,veccont) - A4, b4 = allocate_matrix_and_vector(assem,data) - assemble_matrix_and_vector!(A4,b4,assem,data) - if isa(comm,SequentialCommunicator) - @test norm(A1-A4) ≈ 0.0 - end - @test norm(b1-b4) < tol - assemble_matrix_and_vector_add!(A4,b4,assem,data) - @test norm(b4-2*b1) < tol -end diff --git a/test/OLD/DistributedDataTests.jl b/test/OLD/DistributedDataTests.jl deleted file mode 100644 index 10001036..00000000 --- a/test/OLD/DistributedDataTests.jl +++ /dev/null @@ -1,35 +0,0 @@ -module DistributedDataTests - -using GridapDistributed -using Test - -nparts = 4 -SequentialCommunicator(nparts) do comm - @test num_parts(comm) == nparts - @test num_workers(comm) == 1 - @test comm == comm - @test i_am_master(comm) - - a = DistributedData{Int}(comm) do part - 10*part - end - - b = DistributedData(a) do part, a - 20*part + a - end - - @test a.parts == 10*collect(1:nparts) - @test b.parts == 30*collect(1:nparts) - - do_on_parts(a,b) do part, a, b - @test a == 10*part - @test b == 30*part - end - - @test gather(b) == 30*collect(1:nparts) - - c = scatter_value(comm,2) - @test c.parts == fill(2,nparts) -end - -end # module diff --git a/test/OLD/DistributedFESpacesTests.jl b/test/OLD/DistributedFESpacesTests.jl deleted file mode 100644 index d55d466f..00000000 --- a/test/OLD/DistributedFESpacesTests.jl +++ /dev/null @@ -1,25 +0,0 @@ -module DistributedFESpacesTests - -using Gridap -using GridapDistributed -using Gridap.FESpaces -using Test - -subdomains = (2,2) -SequentialCommunicator(subdomains) do comm - domain = (0,1,0,1) - cells = (4,4) - model = CartesianDiscreteModel(comm,subdomains,domain,cells) - nsubdoms = prod(subdomains) - vector_type = Vector{Float64} - reffe = ReferenceFE(lagrangian,Float64,1) - V = FESpace(vector_type,model=model,reffe=reffe) - do_on_parts(V, model) do part,(space,gids), (model,_) - uh_gids = FEFunction(space,convert(Vector{Float64},gids.lid_to_gid)) - uh_owner = FEFunction(space,convert(Vector{Float64},gids.lid_to_owner)) - trian = Triangulation(model) - writevtk(trian,"results_$(part)",cellfields=["gid"=>uh_gids,"owner"=>uh_owner]) - end -end - -end # module diff --git a/test/OLD/DistributedIndexSetsTests.jl b/test/OLD/DistributedIndexSetsTests.jl deleted file mode 100644 index f015a6de..00000000 --- a/test/OLD/DistributedIndexSetsTests.jl +++ /dev/null @@ -1,25 +0,0 @@ -module DistributedIndexSets - -using GridapDistributed -using Test - -nparts = 2 -SequentialCommunicator(nparts) do comm - n = 10 - indices = DistributedIndexSet(comm,n) do part - if part == 1 - IndexSet(n,1:5,fill(part,5)) - else - IndexSet(n,6:10,fill(part,5)) - end - end - do_on_parts(indices) do part, indices - @test indices.lid_to_owner == fill(part,5) - @test indices.ngids == n - end - @test get_comm(indices) === comm - @test num_parts(indices) == nparts - @test num_gids(indices) == n -end - -end # module diff --git a/test/OLD/DistributedPLaplacianTests.jl b/test/OLD/DistributedPLaplacianTests.jl deleted file mode 100644 index bd2f0084..00000000 --- a/test/OLD/DistributedPLaplacianTests.jl +++ /dev/null @@ -1,70 +0,0 @@ -module DistributedPLaplacianTests - -using Test -using Gridap -using Gridap.FESpaces -using GridapDistributed -using SparseArrays -using LinearAlgebra: norm - -function run(comm,subdomains) - # Manufactured solution - u(x) = x[1] + x[2] + 1 - f(x) = - Δ(u)(x) - - p = 3 - flux(∇u) = norm(∇u)^(p-2) * ∇u - dflux(∇du,∇u) = (p-2)*norm(∇u)^(p-4)*(∇u⋅∇du)*∇u + norm(∇u)^(p-2)*∇du - - # Discretization - - domain = (0,1,0,1) - cells = (4,4) - model = CartesianDiscreteModel(comm,subdomains,domain,cells) - - # FE Spaces - order=3 - degree=2*order - reffe = ReferenceFE(lagrangian,Float64,order) - V = FESpace(model=model, - reffe=reffe, - conformity=:H1, - dirichlet_tags="boundary") - U = TrialFESpace(V,u) - - trian=Triangulation(model) - dΩ=Measure(trian,degree) - - function res(u,v) - ∫(∇(v)⋅(flux∘∇(u)))*dΩ - end - function jac(u,du,v) - ∫( ∇(v)⋅(dflux∘(∇(du),∇(u))) )*dΩ - end - - # Non linear solver - nls = NLSolver(show_trace=true, method=:newton) - solver = FESolver(nls) - - # FE solution - op = FEOperator(res,jac,U,V) - x = rand(num_free_dofs(U)) - uh0 = FEFunction(U,x) - uh, = solve!(uh0,solver,op) - - # Error norms and print solution - trian=Triangulation(OwnedCells,model) - dΩ=Measure(trian,2*order) - e = u-uh - e_l2 = sum(∫(e*e)dΩ) - tol = 1.0e-9 - println("$(e_l2) < $(tol)") - @test e_l2 < tol -end - -subdomains = (2,2) -SequentialCommunicator(subdomains) do comm - run(comm,subdomains) -end - -end # module diff --git a/test/OLD/DistributedPoissonDGTests.jl b/test/OLD/DistributedPoissonDGTests.jl deleted file mode 100644 index 278449bb..00000000 --- a/test/OLD/DistributedPoissonDGTests.jl +++ /dev/null @@ -1,103 +0,0 @@ -module DistributedPoissonDGTests - -using Test -using Gridap -using Gridap.FESpaces -using GridapDistributed -using SparseArrays - -# Select matrix and vector types for discrete problem -# Note that here we use serial vectors and matrices -# but the assembly is distributed -const T = Float64 -const vector_type = Vector{T} -const matrix_type = SparseMatrixCSC{T,Int} - -# Manufactured solution -function u(x) - x[1] * (x[1] - 1) * x[2] * (x[2] - 1) -end -f(x) = -Δ(u)(x) -ud(x) = zero(x[1]) - -# Model -const domain = (0, 1, 0, 1) -const cells = (4, 4) -const h = (domain[2] - domain[1]) / cells[1] - -# FE Spaces -const order = 2 -const γ = 10 -const degree = 2*order - -function setup_model(comm) - # Discretization - subdomains = (2, 2) - model = CartesianDiscreteModel(comm, subdomains, domain, cells) -end - -function setup_fe_spaces(model) - reffe = ReferenceFE(lagrangian,Float64,order) - V = FESpace(vector_type, - model=model, - reffe=reffe, - conformity=:L2) - U = TrialFESpace(V) - U,V -end - -function run(comm,model,U,V,strategy) - trian=Triangulation(strategy,model) - dΩ=Measure(trian,degree) - - btrian=BoundaryTriangulation(strategy,model) - dΓ = Measure(btrian,degree) - - strian=SkeletonTriangulation(strategy,model) - dΛ = Measure(strian,degree) - - n_Γ = get_normal_vector(btrian) - n_Λ = get_normal_vector(strian) - - function a(u,v) - ∫( ∇(v)⋅∇(u) )*dΩ + - ∫( (γ/h)*v*u - v*(n_Γ⋅∇(u)) - (n_Γ⋅∇(v))*u )*dΓ + - ∫( (γ/h)*jump(v*n_Λ)⋅jump(u*n_Λ) - jump(v*n_Λ)⋅mean(∇(u)) - mean(∇(v))⋅jump(u*n_Λ) )*dΛ - end - function l(v) - ∫( v*f )*dΩ + - ∫( (γ/h)*v*ud - (n_Γ⋅∇(v))*ud )*dΓ - end - - # Assembly - assem = SparseMatrixAssembler(matrix_type, vector_type, U, V, strategy) - op = AffineFEOperator(a,l,U,V,assem) - - # FE solution - uh = solve(op) - - trian = Triangulation(OwnedCells,model) - dΩ = Measure(trian,degree) - e = u - uh - l2(u) = ∫( u⊙u )*dΩ - h1(u) = ∫( u⊙u + ∇(u)⊙∇(u) )*dΩ - e_l2 = sum(l2(e)) - e_h1 = sum(h1(e)) - tol = 1.0e-9 - println("$(e_l2) < $(tol)") - println("$(e_h1) < $(tol)") - @test e_l2 < tol - @test e_h1 < tol -end - -subdomains = (2,2) -SequentialCommunicator(subdomains) do comm - model=setup_model(comm) - U,V=setup_fe_spaces(model) - strategy = OwnedAndGhostCellsAssemblyStrategy(V,MapDoFsTypeGlobal()) - run(comm,model,U,V,strategy) - strategy = OwnedCellsAssemblyStrategy(V,MapDoFsTypeGlobal()) - run(comm,model,U,V,strategy) -end - -end # module# diff --git a/test/OLD/DistributedPoissonTests.jl b/test/OLD/DistributedPoissonTests.jl deleted file mode 100644 index 539564ec..00000000 --- a/test/OLD/DistributedPoissonTests.jl +++ /dev/null @@ -1,56 +0,0 @@ -module DistributedPoissonTests - -using Test -using Gridap -using Gridap.FESpaces -using GridapDistributed -using SparseArrays - -function run(comm,subdomains) - # Manufactured solution - u(x) = x[1] + x[2] - f(x) = -Δ(u)(x) - - # Discretization - domain = (0, 1, 0, 1) - cells = (4, 4) - model = CartesianDiscreteModel(comm, subdomains, domain, cells) - - # FE Spaces - order=1 - reffe = ReferenceFE(lagrangian,Float64,order) - V = FESpace(model=model, - reffe=reffe, - conformity=:H1, - dirichlet_tags="boundary") - U = TrialFESpace(V, u) - - trian=Triangulation(model) - dΩ=Measure(trian,2*(order+1)) - - function a(u,v) - ∫(∇(v)⋅∇(u))dΩ - end - function l(v) - ∫(v*f)dΩ - end - - # FE solution - op = AffineFEOperator(a,l,U,V) - uh = solve(op) - - trian=Triangulation(OwnedCells,model) - dΩ=Measure(trian,2*order) - e = u-uh - e_l2 = sum(∫(e*e)dΩ) - tol = 1.0e-9 - println("$(e_l2) < $(tol)") - @test e_l2 < tol -end - -subdomains = (2,2) -SequentialCommunicator(subdomains) do comm - run(comm,subdomains) -end - -end # module diff --git a/test/OLD/DistributedStokesTests.jl b/test/OLD/DistributedStokesTests.jl deleted file mode 100644 index a5d7a422..00000000 --- a/test/OLD/DistributedStokesTests.jl +++ /dev/null @@ -1,97 +0,0 @@ -module DistributedStokesTests - -using Test -using Gridap -using Gridap.FESpaces -using GridapDistributed -using SparseArrays - -function Gridap.FESpaces.num_dirichlet_dofs(f::Gridap.MultiField.MultiFieldFESpace) - result=0 - for space in f.spaces - result=result+num_dirichlet_dofs(space) - end - result -end - -function run(comm,subdomains) - # Manufactured solution - ux(x)=2*x[1]*x[2] - uy(x)=-x[2]^2 - u(x)=VectorValue(ux(x),uy(x)) - f(x)=VectorValue(1.0,3.0) - p(x)=x[1]+x[2] - sx(x)=-2.0*x[1] - sy(x)=x[1]+3*x[2] - s(x)=VectorValue(sx(x),sy(x)) - - # Discretization - domain = (0, 1, 0, 1) - cells = (4, 4) - model = CartesianDiscreteModel(comm, subdomains, domain, cells) - - labels = get_face_labeling(model) - add_tag_from_tags!(labels,"diri0",[1,2,3,4,6,7,8]) - add_tag_from_tags!(labels,"diri1",[1,2,3,4,6,7,8]) - add_tag_from_tags!(labels,"neumann",[5]) - - # FE Spaces - order = 2 - reffeᵤ = ReferenceFE(lagrangian,VectorValue{2,Float64},order) - reffeₚ = Gridap.ReferenceFEs.LagrangianRefFE(Float64,QUAD,order-1;space=:P) - V = FESpace( - model=model, - reffe=reffeᵤ, - conformity=:H1, - dirichlet_tags=["diri0","diri1"], - ) - - Q = FESpace( - model=model, - reffe=reffeₚ, - conformity=:L2) - #constraint=:zeromean) - - Y=MultiFieldFESpace(model,[V,Q]) - - U=TrialFESpace(V,[u,u]) - P=TrialFESpace(Q) - X=MultiFieldFESpace(Y,[U,P]) - - trian=Triangulation(model) - degree = 2*(order+1) - dΩ = Measure(trian,degree) - - btrian=BoundaryTriangulation(model;tags="neumann") - dΓ = Measure(btrian,degree) - - function a((u,p),(v,q)) - ∫( ∇(v)⊙∇(u) - (∇⋅v)*p + q*(∇⋅u) )dΩ - end - - function l((v,q)) - ∫( v⋅f )dΩ + ∫( v⋅s )dΓ - end - - # # # FE solution - op = AffineFEOperator(a,l,X,Y) - xh = solve(op) - uh,_ = xh - - trian=Triangulation(OwnedCells,model) - dΩ=Measure(trian,2*order) - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 - println("$(e_l2) < $(tol)") - @test e_l2 < tol -end - -subdomains = (2,2) -function f(comm) - run(comm,subdomains) -end - -SequentialCommunicator(f,subdomains) - -end # module diff --git a/test/OLD/DistributedVectorsTests.jl b/test/OLD/DistributedVectorsTests.jl deleted file mode 100644 index 421ba75d..00000000 --- a/test/OLD/DistributedVectorsTests.jl +++ /dev/null @@ -1,41 +0,0 @@ -module DistributedVectorsTests - -using GridapDistributed -using Test - -nparts = 2 -SequentialCommunicator(nparts) do comm - n = 10 - - indices = DistributedIndexSet(comm,n) do part - lid_to_owner = fill(part,6) - if part == 1 - lid_to_gid = 1:6 - lid_to_owner[end] = 2 - else - lid_to_gid = 5:10 - lid_to_owner[1] = 1 - end - IndexSet(n,lid_to_gid,lid_to_owner) - end - - a = DistributedVector(indices) do part - fill(10*part,6) - end - exchange!(a) - @test a.parts == [[10, 10, 10, 10, 10, 20], [10, 20, 20, 20, 20, 20]] - - b = DistributedVector(indices,a) do part, a - a .+ part - end - c = b[indices] - @test c.parts == [[11, 11, 11, 11, 11, 22], [11, 22, 22, 22, 22, 22]] - - v = rand(n) - w = v[indices] - do_on_parts(w,indices) do part, w, indices - @test w == v[indices.lid_to_gid] - end -end - -end # module diff --git a/test/OLD/MPIPETScTests/MPIPETScCommunicatorsTests.jl b/test/OLD/MPIPETScTests/MPIPETScCommunicatorsTests.jl deleted file mode 100644 index b5202e38..00000000 --- a/test/OLD/MPIPETScTests/MPIPETScCommunicatorsTests.jl +++ /dev/null @@ -1,8 +0,0 @@ -using MPI -using GridapDistributed - -MPIPETScCommunicator() do comm - println("$(MPI.Comm_rank(MPI.COMM_WORLD))") - println("I am processor $(get_part(comm)) out of $(num_parts(comm))") - println("Am I ($(get_part(comm))) master? -> $(i_am_master(comm))") -end \ No newline at end of file diff --git a/test/OLD/MPIPETScTests/MPIPETScDistributedAssemblersTests.jl b/test/OLD/MPIPETScTests/MPIPETScDistributedAssemblersTests.jl deleted file mode 100644 index 729c21fd..00000000 --- a/test/OLD/MPIPETScTests/MPIPETScDistributedAssemblersTests.jl +++ /dev/null @@ -1,45 +0,0 @@ -module DistributedAssemblersTests - -using Gridap -using Gridap.FESpaces -using GridapDistributed -using GridapDistributedPETScWrappers -using Test -using SparseArrays - -const T = Float64 -const vector_type = GridapDistributedPETScWrappers.Vec{T} -const matrix_type = GridapDistributedPETScWrappers.Mat{T} - -function setup_model(comm) - domain = (0,1,0,1) - cells = (4,4) - subdomains = (2,2) - model = CartesianDiscreteModel(comm,subdomains,domain,cells) -end - -function setup_fe_spaces(model) - reffe = ReferenceFE(lagrangian,Float64,1) - V = FESpace(vector_type,model=model,reffe=reffe) - U = TrialFESpace(V) - U,V -end - -include("../DistributedAssemblersTestsHelpers.jl") - -MPIPETScCommunicator() do comm - model=setup_model(comm) - U,V=setup_fe_spaces(model) - - das=OwnedAndGhostCellsAssemblyStrategy(V,MapDoFsTypeProcLocal()) - test_assemble(comm,model,U,V,das) - test_allocate_assemble_add(comm,model,U,V,das) - - das=OwnedCellsAssemblyStrategy(V,MapDoFsTypeProcLocal()) - test_assemble(comm,model,U,V,das) - test_allocate_assemble_add(comm,model,U,V,das) - -end - - -end # module diff --git a/test/OLD/MPIPETScTests/MPIPETScDistributedIndexSetsTests.jl b/test/OLD/MPIPETScTests/MPIPETScDistributedIndexSetsTests.jl deleted file mode 100644 index 32100a74..00000000 --- a/test/OLD/MPIPETScTests/MPIPETScDistributedIndexSetsTests.jl +++ /dev/null @@ -1,32 +0,0 @@ -module MPIPETScDistributedIndexSets - -using GridapDistributed -using Test -using MPI - -MPIPETScCommunicator() do comm - @test num_parts(comm) == 2 - - n = 10 - indices = DistributedIndexSet(comm,n) do part - if part == 1 - IndexSet(n,1:5,[2,1,1,1,2]) - else - IndexSet(n,[2,6,5,7,8,9,10,1],[1,2,2,2,2,2,2,2]) - end - end - - #do_on_parts(indices) do part, indices - # @test indices.lid_to_owner == fill(part,5) - # @test indices.ngids == n - #end - - println("$(MPI.Comm_rank(comm.comm)): lid_to_gid_petsc=$(indices.lid_to_gid_petsc) - petsc_to_app_locidx=$(indices.lid_to_gid_petsc) - app_to_petsc_locidx=$(indices.lid_to_gid_petsc)") - #@test get_comm(indices) === comm - #@test num_parts(indices) == nparts - #@test num_gids(indices) == n -end - -end # module diff --git a/test/OLD/MPIPETScTests/MPIPETScDistributedPLaplacianTests.jl b/test/OLD/MPIPETScTests/MPIPETScDistributedPLaplacianTests.jl deleted file mode 100644 index 63c0061a..00000000 --- a/test/OLD/MPIPETScTests/MPIPETScDistributedPLaplacianTests.jl +++ /dev/null @@ -1,157 +0,0 @@ -module MPIPETScDistributedPLaplacianTests - -using Test -using Gridap -using Gridap.FESpaces -using GridapDistributed -using SparseArrays -using LinearAlgebra: norm -using LinearAlgebra -using GridapDistributedPETScWrappers -using NLSolversBase -using NLsolve -using Distances - - -function NLSolversBase.x_of_nans(x::GridapDistributedPETScWrappers.Vec{Float64}, Tf=eltype(x)) - similar(x) -end - -function Base.copyto!(dest::GridapDistributedPETScWrappers.Vec{Float64}, src::GridapDistributedPETScWrappers.Vec{Float64}) - copy!(dest,src) -end - -function LinearAlgebra.mul!(Y::GridapDistributedPETScWrappers.Vec{Float64},A::GridapDistributedPETScWrappers.Mat{Float64},B::GridapDistributedPETScWrappers.Vec{Float64}) - GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.MatMult(A.p,B.p,Y.p)) -end - -function LinearAlgebra.rmul!(Y::GridapDistributedPETScWrappers.Vec{Float64},A::Number) - GridapDistributedPETScWrappers.scale!(Y,A) -end - -struct PETScVecStyle <: Broadcast.BroadcastStyle end -Base.BroadcastStyle(::Type{GridapDistributedPETScWrappers.Vec{Float64}}) = PETScVecStyle() - -"`v = find_petscvec(bc)` returns the first GridapDistributedPETScWrappers.Vec among the arguments." -find_petscvec(bc::Base.Broadcast.Broadcasted{PETScVecStyle}) = find_petscvec(bc.args) -find_petscvec(args::Tuple) = find_petscvec(find_petscvec(args[1]), Base.tail(args)) -find_petscvec(x) = x -find_petscvec(::Tuple{}) = nothing -find_petscvec(a::GridapDistributedPETScWrappers.Vec{Float64}, rest) = a -find_petscvec(::Any, rest) = find_petscvec(rest) - - -function Base.similar(bc::Base.Broadcast.Broadcasted{PETScVecStyle}, ::Type{ElType}) where {ElType} - v=find_petscvec(bc.args) - similar(v) -end - -function Base.copyto!(v::GridapDistributedPETScWrappers.Vec{Float64}, bc::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}}) - @assert bc.f == identity - @assert length(bc.args)==1 - @assert bc.args[1] isa Number - fill!(v,Float64(bc.args[1])) -end - -function Base.copyto!(v::GridapDistributedPETScWrappers.Vec{Float64}, bc::Base.Broadcast.Broadcasted{PETScVecStyle}) - if bc.f == identity - @assert length(bc.args)==1 - @assert bc.args[1] isa GridapDistributedPETScWrappers.Vec{Float64} - copy!(v,bc.args[1]) - elseif bc.f == + - @assert bc.f == + - @assert bc.args[1] isa GridapDistributedPETScWrappers.Vec{Float64} - @assert bc.args[2] isa GridapDistributedPETScWrappers.Vec{Float64} - GridapDistributedPETScWrappers.C.chk(GridapDistributedPETScWrappers.C.VecWAXPY(v.p, 1.0, bc.args[1].p, bc.args[2].p)) - else - Gridap.Helpers.@notimplemented - end -end - -function Base.maximum(::typeof(Base.abs), x::GridapDistributedPETScWrappers.Vec{Float64}) - norm(x,Inf) -end - -function Base.any(::typeof(Base.isnan),x::GridapDistributedPETScWrappers.Vec{Float64}) - false -end - -function NLsolve.check_isfinite(x::GridapDistributedPETScWrappers.Vec{Float64}) - true -end - -function (dist::Distances.SqEuclidean)(a::GridapDistributedPETScWrappers.Vec{Float64}, b::GridapDistributedPETScWrappers.Vec{Float64}) - norm(a-b,2)^2 -end - -function run(comm) - # Manufactured solution - u(x) = x[1] + x[2] + 1 - f(x) = - Δ(u)(x) - - p = 3 - flux(∇u) = norm(∇u)^(p-2) * ∇u - dflux(∇du,∇u) = (p-2)*norm(∇u)^(p-4)*(∇u⋅∇du)*∇u + norm(∇u)^(p-2)*∇du - - # Discretization - subdomains = (2,2) - domain = (0,1,0,1) - cells = (4,4) - model = CartesianDiscreteModel(comm,subdomains,domain,cells) - - # FE Spaces - order=3 - degree=2*order - reffe = ReferenceFE(lagrangian,Float64,order) - V = FESpace(model=model, - reffe=reffe, - conformity=:H1, - dirichlet_tags="boundary") - U = TrialFESpace(V,u) - - trian=Triangulation(model) - dΩ=Measure(trian,degree) - - function res(u,v) - ∫(∇(v)⋅(flux∘∇(u)))*dΩ - end - - function jac(u,du,v) - ∫( ∇(v)⋅(dflux∘(∇(du),∇(u))) )*dΩ - end - - # Non linear solver - ls = PETScLinearSolver( - Float64; - ksp_type = "cg", - ksp_rtol = 1.0e-12, - ksp_atol = 0.0, - #ksp_monitor = "", - pc_type = "jacobi") - nls = NLSolver(ls; show_trace=true, method=:newton) - solver = FESolver(nls) - - # FE solution - op = FEOperator(res,jac,U,V) - x = zero_free_values(U) - x .= 1.0 - uh0 = FEFunction(U,x) - uh, = Gridap.solve!(uh0,solver,op) - - # Error norms and print solution - trian=Triangulation(OwnedCells,model) - dΩ=Measure(trian,2*order) - e = u-uh - e_l2 = sum(∫(e*e)dΩ) - tol = 1.0e-9 - @test e_l2 < tol - if (i_am_master(comm)) - println("$(e_l2) < $(tol)") - end -end - -MPIPETScCommunicator() do comm - run(comm) -end - -end # module diff --git a/test/OLD/MPIPETScTests/MPIPETScDistributedPoissonDGTests.jl b/test/OLD/MPIPETScTests/MPIPETScDistributedPoissonDGTests.jl deleted file mode 100644 index f74960e0..00000000 --- a/test/OLD/MPIPETScTests/MPIPETScDistributedPoissonDGTests.jl +++ /dev/null @@ -1,114 +0,0 @@ -module MPIPETScDistributedPoissonDGTests - -using Test -using Gridap -using Gridap.FESpaces -using GridapDistributed -using SparseArrays -using GridapDistributedPETScWrappers - -# Select matrix and vector types for discrete problem -# Note that here we use serial vectors and matrices -# but the assembly is distributed -const T = Float64 -const vector_type = GridapDistributedPETScWrappers.Vec{T} -const matrix_type = GridapDistributedPETScWrappers.Mat{T} - -# Manufactured solution -function u(x) - x[1] * (x[1] - 1) * x[2] * (x[2] - 1) -end -f(x) = -Δ(u)(x) -ud(x) = zero(x[1]) - -# Model -const domain = (0, 1, 0, 1) -const cells = (4, 4) -const h = (domain[2] - domain[1]) / cells[1] - -# FE Spaces -const order = 2 -const γ = 10 -const degree = 2*order - -function setup_model(comm) - # Discretization - subdomains = (2, 2) - model = CartesianDiscreteModel(comm, subdomains, domain, cells) -end - -function setup_fe_spaces(model) - reffe = ReferenceFE(lagrangian,Float64,order) - V = FESpace(vector_type, - model=model, - reffe=reffe, - conformity=:L2) - U = TrialFESpace(V) - U,V -end - -function run(comm,model,U,V,strategy) - trian=Triangulation(strategy,model) - dΩ=Measure(trian,degree) - - btrian=BoundaryTriangulation(strategy,model) - dΓ = Measure(btrian,degree) - - strian=SkeletonTriangulation(strategy,model) - dΛ = Measure(strian,degree) - - n_Γ = get_normal_vector(btrian) - n_Λ = get_normal_vector(strian) - - function a(u,v) - ∫( ∇(v)⋅∇(u) )*dΩ + - ∫( (γ/h)*v*u - v*(n_Γ⋅∇(u)) - (n_Γ⋅∇(v))*u )*dΓ + - ∫( (γ/h)*jump(v*n_Λ)⋅jump(u*n_Λ) - jump(v*n_Λ)⋅mean(∇(u)) - mean(∇(v))⋅jump(u*n_Λ) )*dΛ - end - function l(v) - ∫( v*f )*dΩ + - ∫( (γ/h)*v*ud - (n_Γ⋅∇(v))*ud )*dΓ - end - - # Assembly - assem = SparseMatrixAssembler(matrix_type, vector_type, U, V, strategy) - op = AffineFEOperator(a,l,U,V,assem) - - # FE solution - ls = PETScLinearSolver( - Float64; - ksp_type = "cg", - ksp_rtol = 1.0e-06, - ksp_atol = 0.0, - ksp_monitor = "", - pc_type = "jacobi", - ) - fels = LinearFESolver(ls) - uh = solve(fels, op) - - trian = Triangulation(OwnedCells,model) - dΩ = Measure(trian,degree) - e = u - uh - l2(u) = ∫( u⊙u )*dΩ - h1(u) = ∫( u⊙u + ∇(u)⊙∇(u) )*dΩ - e_l2 = sum(l2(e)) - e_h1 = sum(h1(e)) - tol = 1.0e-9 - @test e_l2 < tol - @test e_h1 < tol - if (i_am_master(comm)) - println("$(e_l2) < $(tol)") - println("$(e_h1) < $(tol)") - end -end - -MPIPETScCommunicator() do comm - model=setup_model(comm) - U,V=setup_fe_spaces(model) - strategy = OwnedAndGhostCellsAssemblyStrategy(V,MapDoFsTypeProcLocal()) - run(comm,model,U,V,strategy) - strategy = OwnedCellsAssemblyStrategy(V,MapDoFsTypeProcLocal()) - run(comm,model,U,V,strategy) -end - -end # module# diff --git a/test/OLD/MPIPETScTests/MPIPETScDistributedPoissonTests.jl b/test/OLD/MPIPETScTests/MPIPETScDistributedPoissonTests.jl deleted file mode 100644 index 4b026496..00000000 --- a/test/OLD/MPIPETScTests/MPIPETScDistributedPoissonTests.jl +++ /dev/null @@ -1,94 +0,0 @@ -module MPIPETScDistributedPoissonTests - -using Test -using Gridap -using Gridap.FESpaces -using GridapDistributed -using SparseArrays - -using ArgParse - -function parse_commandline() - s = ArgParseSettings() - @add_arg_table! s begin - "--subdomains", "-s" - help = "Tuple with the # of subdomains per Cartesian direction" - arg_type = Int64 - default=[1,1] - nargs='+' - "--partition", "-p" - help = "Tuple with the # of cells per Cartesian direction" - arg_type = Int64 - default=[4,4] - nargs='+' - end - return parse_args(s) -end - - -function run(comm, - subdomains=(2, 2), - cells=(4, 4), - domain = (0, 1, 0, 1)) - - # Manufactured solution - u(x) = x[1] + x[2] - f(x) = -Δ(u)(x) - - # Discretization - domain = (0, 1, 0, 1) - cells = (4, 4) - model = CartesianDiscreteModel(comm, subdomains, domain, cells) - - # FE Spaces - order=1 - reffe = ReferenceFE(lagrangian,Float64,order) - V = FESpace(model=model, - reffe=reffe, - conformity=:H1, - dirichlet_tags="boundary") - U = TrialFESpace(V, u) - - trian=Triangulation(model) - dΩ=Measure(trian,2*(order+1)) - - function a(u,v) - ∫(∇(v)⋅∇(u))dΩ - end - function l(v) - ∫(v*f)dΩ - end - - # FE solution - op = AffineFEOperator(a,l,U,V) - ls = PETScLinearSolver( - Float64; - ksp_type = "cg", - ksp_rtol = 1.0e-06, - ksp_atol = 0.0, - ksp_monitor = "", - pc_type = "jacobi", - ) - fels = LinearFESolver(ls) - uh = solve(fels, op) - - # Error norms and print solution - trian=Triangulation(OwnedCells,model) - dΩ=Measure(trian,2*order) - e = u-uh - e_l2 = sum(∫(e*e)dΩ) - tol = 1.0e-9 - @test e_l2 < tol - if (i_am_master(comm)) println("$(e_l2) < $(tol)\n") end -end - - -parsed_args = parse_commandline() -subdomains = Tuple(parsed_args["subdomains"]) -partition = Tuple(parsed_args["partition"]) - -MPIPETScCommunicator() do comm - run(comm, subdomains,partition) -end - -end # module diff --git a/test/OLD/MPIPETScTests/MPIPETScDistributedStokesTests.jl b/test/OLD/MPIPETScTests/MPIPETScDistributedStokesTests.jl deleted file mode 100644 index 28ff1697..00000000 --- a/test/OLD/MPIPETScTests/MPIPETScDistributedStokesTests.jl +++ /dev/null @@ -1,108 +0,0 @@ -module MPIPETScDistributedStokesTests - -using Test -using Gridap -using Gridap.FESpaces -using GridapDistributed -using SparseArrays - - -function Gridap.FESpaces.num_dirichlet_dofs(f::Gridap.MultiField.MultiFieldFESpace) - result=0 - for space in f.spaces - result=result+num_dirichlet_dofs(space) - end - result -end - -function run(comm) - - # Manufactured solution - ux(x)=2*x[1]*x[2] - uy(x)=-x[2]^2 - u(x)=VectorValue(ux(x),uy(x)) - f(x)=VectorValue(1.0,3.0) - p(x)=x[1]+x[2] - sx(x)=-2.0*x[1] - sy(x)=x[1]+3*x[2] - s(x)=VectorValue(sx(x),sy(x)) - - # Discretization - domain = (0, 1, 0, 1) - cells = (4, 4) - subdomains = (2,2) - model = CartesianDiscreteModel(comm, subdomains, domain, cells) - - labels = get_face_labeling(model) - add_tag_from_tags!(labels,"diri0",[1,2,3,4,6,7,8]) - add_tag_from_tags!(labels,"diri1",[1,2,3,4,6,7,8]) - add_tag_from_tags!(labels,"neumann",[5]) - - # FE Spaces - order = 2 - reffeᵤ = ReferenceFE(lagrangian,VectorValue{2,Float64},order) - reffeₚ = Gridap.ReferenceFEs.LagrangianRefFE(Float64,QUAD,order-1;space=:P) - V = FESpace( - model=model, - reffe=reffeᵤ, - conformity=:H1, - dirichlet_tags=["diri0","diri1"], - ) - - Q = FESpace( - model=model, - reffe=reffeₚ, - conformity=:L2) - #constraint=:zeromean) - - Y=MultiFieldFESpace(model,[V,Q]) - - U=TrialFESpace(V,[u,u]) - P=TrialFESpace(Q) - X=MultiFieldFESpace(Y,[U,P]) - - trian=Triangulation(model) - degree = 2*(order+1) - dΩ = Measure(trian,degree) - - btrian=BoundaryTriangulation(model;tags="neumann") - dΓ = Measure(btrian,degree) - - function a((u,p),(v,q)) - ∫( ∇(v)⊙∇(u) - (∇⋅v)*p + q*(∇⋅u) )dΩ - end - - function l((v,q)) - ∫( v⋅f )dΩ + ∫( v⋅s )dΓ - end - - # FE solution - op = AffineFEOperator(a,l,X,Y) - ls = PETScLinearSolver( - Float64; - ksp_type = "gmres", - ksp_rtol = 1.0e-06, - ksp_atol = 0.0, - ksp_monitor = "", - pc_type = "none", - ) - fels = LinearFESolver(ls) - xh = solve(fels, op) - - trian=Triangulation(OwnedCells,model) - dΩ=Measure(trian,2*order) - uh,_ = xh - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 - if (i_am_master(comm)) - println("$(e_l2) < $(tol)") - end - @test e_l2 < tol -end - -MPIPETScCommunicator() do comm - run(comm) -end - -end # module diff --git a/test/OLD/MPIPETScTests/MPIPETScDistributedVectorsTests.jl b/test/OLD/MPIPETScTests/MPIPETScDistributedVectorsTests.jl deleted file mode 100644 index bba6a5fd..00000000 --- a/test/OLD/MPIPETScTests/MPIPETScDistributedVectorsTests.jl +++ /dev/null @@ -1,82 +0,0 @@ -module MPIPETScDistributedVectors -using Gridap -using GridapDistributed -using Test -using MPI -using GridapDistributedPETScWrappers - -MPIPETScCommunicator() do comm - @test num_parts(comm) == 2 - n = 10 - indices = DistributedIndexSet(comm,n) do part - if part == 1 - IndexSet(n,1:5,[2,1,1,1,2]) - else - IndexSet(n,[2,6,5,7,8,9,10,1],[1,2,2,2,2,2,2,2]) - end - end - vec = DistributedVector(indices,indices) do part, indices - local_part=Vector{Int}(undef,length(indices.lid_to_owner)) - for i=1:length(local_part) - if (indices.lid_to_owner[i] == part) - local_part[i]=part - end - end - local_part - end - exchange!(vec) - @test vec.part == indices.parts.part.lid_to_owner - - function init_vec() - sizes=DistributedVector(indices,indices) do part, indices - n=length(indices.lid_to_owner) - lsizes=Vector{Int}(undef,n) - for i=1:n - if (indices.lid_to_owner[i] == part) - lsizes[i]=rand(1:4) - end - end - lsizes - end - exchange!(sizes) - DistributedVector(indices,indices,sizes) do part, indices, lsizes - n = length(indices.lid_to_owner) - ptrs = Vector{Int}(undef,n+1) - ptrs[1]=1 - for i=1:n - ptrs[i+1]=ptrs[i]+lsizes[i] - end - current=1 - data = Vector{Int}(undef, ptrs[n+1]-1) - for i=1:n - if (indices.lid_to_owner[i] == part) - for j=1:(ptrs[i+1]-ptrs[i]) - data[current]=part - current=current+1 - end - else - for j=1:(ptrs[i+1]-ptrs[i]) - data[current]=0 - current=current+1 - end - end - end - Gridap.Arrays.Table(data,ptrs) - end - end - vec = init_vec() - exchange!(vec) - function test_result() - result = true - for i = 1:length(vec.part) - for j = 1:length(vec.part[i]) - result = - result && (vec.part[i][j] == indices.parts.part.lid_to_owner[i]) - end - end - @test result - end - test_result() -end - -end diff --git a/test/OLD/MPIPETScTests/MPIPETScUniformlyRefinedForestOfOctreesDiscreteModelsTests.jl b/test/OLD/MPIPETScTests/MPIPETScUniformlyRefinedForestOfOctreesDiscreteModelsTests.jl deleted file mode 100644 index 247422ce..00000000 --- a/test/OLD/MPIPETScTests/MPIPETScUniformlyRefinedForestOfOctreesDiscreteModelsTests.jl +++ /dev/null @@ -1,93 +0,0 @@ -module MPIPETScUniformlyRefinedForestOfOctreesDiscreteModelsTests - using Gridap - using GridapDistributed - using Test - using ArgParse - - - - function parse_commandline() - s = ArgParseSettings() - @add_arg_table! s begin - "--subdomains", "-s" - help = "Tuple with the # of coarse subdomains per Cartesian direction" - arg_type = Int64 - default=[1,1] - nargs='+' - "--num-uniform-refinements", "-r" - help = "# of uniform refinements" - arg_type = Int64 - default=1 - end - return parse_args(s) - end - - - function run(comm,subdomains,num_uniform_refinements) - # Manufactured solution - u(x) = x[1] + x[2] - f(x) = -Δ(u)(x) - - if length(subdomains)==2 - domain=(0,1,0,1) - else - @assert length(subdomains)==3 - domain=(0,1,0,1,0,1) - end - - coarse_discrete_model=CartesianDiscreteModel(domain,subdomains) - model=UniformlyRefinedForestOfOctreesDiscreteModel(comm, - coarse_discrete_model, - num_uniform_refinements) - - # FE Spaces - order=1 - reffe = ReferenceFE(lagrangian,Float64,order) - V = FESpace(model=model, - reffe=reffe, - conformity=:H1, - dirichlet_tags="boundary") - U = TrialFESpace(V, u) - - trian=Triangulation(model) - dΩ=Measure(trian,2*(order+1)) - - function a(u,v) - ∫(∇(v)⋅∇(u))dΩ - end - function l(v) - ∫(v*f)dΩ - end - - # FE solution - op = AffineFEOperator(a,l,U,V) - ls = PETScLinearSolver( - Float64; - ksp_type = "cg", - ksp_rtol = 1.0e-06, - ksp_atol = 0.0, - ksp_monitor = "", - pc_type = "jacobi", - ) - fels = LinearFESolver(ls) - uh = solve(fels, op) - - # Error norms and print solution - trian=Triangulation(OwnedCells,model) - dΩ=Measure(trian,2*order) - e = u-uh - e_l2 = sum(∫(e*e)dΩ) - tol = 1.0e-9 - @test e_l2 < tol - if (i_am_master(comm)) println("$(e_l2) < $(tol)\n") end - end - - parsed_args = parse_commandline() - subdomains = Tuple(parsed_args["subdomains"]) - num_uniform_refinements = Tuple(parsed_args["num-uniform-refinements"]) - - MPIPETScCommunicator() do comm - run(comm,subdomains,num_uniform_refinements[1]) - end - -end # module diff --git a/test/OLD/MPIPETScTests/MPITimersTests.jl b/test/OLD/MPIPETScTests/MPITimersTests.jl deleted file mode 100644 index 186bb51b..00000000 --- a/test/OLD/MPIPETScTests/MPITimersTests.jl +++ /dev/null @@ -1,15 +0,0 @@ -module MPITimersTests - using MPI - using GridapDistributed - if (!MPI.Initialized()) - MPI.Init() - end - timer=GridapDistributed.MPITimer{GridapDistributed.MPITimerModeMin}(MPI.COMM_WORLD,"sleep(rand())") - GridapDistributed.timer_start(timer) - sleep(rand()) - GridapDistributed.timer_stop(timer) - GridapDistributed.timer_start(timer) - sleep(rand()) - GridapDistributed.timer_stop(timer) - GridapDistributed.timer_report(timer) -end diff --git a/test/OLD/MPIPETScTests/compile/compile.jl b/test/OLD/MPIPETScTests/compile/compile.jl deleted file mode 100644 index 967bdecc..00000000 --- a/test/OLD/MPIPETScTests/compile/compile.jl +++ /dev/null @@ -1,16 +0,0 @@ -using Pkg -Pkg.add("PackageCompiler") -using PackageCompiler - -pkgs = Symbol[] -push!(pkgs, :GridapDistributed) - -if VERSION >= v"1.4" - append!(pkgs, [Symbol(v.name) for v in values(Pkg.dependencies()) if v.is_direct_dep],) -else - append!(pkgs, [Symbol(name) for name in keys(Pkg.installed())]) -end - -create_sysimage(pkgs, - sysimage_path=joinpath(@__DIR__,"GridapDistributed.so"), - precompile_execution_file=joinpath(@__DIR__,"..","MPIPETScDistributedPoissonTests.jl")) diff --git a/test/OLD/MPIPETScTests/compile/compile.sh b/test/OLD/MPIPETScTests/compile/compile.sh deleted file mode 100755 index 70304f72..00000000 --- a/test/OLD/MPIPETScTests/compile/compile.sh +++ /dev/null @@ -1,6 +0,0 @@ -# This script is to be executed from this folder (compile/) -julia --project=../../.. --color=yes -e 'using Pkg; Pkg.instantiate()' -# See https://juliaparallel.github.io/MPI.jl/latest/knownissues/#Julia-module-precompilation-1 -# for a justification of this line -julia --project=../../.. --color=yes -e 'using Pkg; pkg"precompile"' -julia --project=../../.. -O3 --check-bounds=no --color=yes compile.jl diff --git a/test/OLD/MPIPETScTests/runtests.jl b/test/OLD/MPIPETScTests/runtests.jl deleted file mode 100644 index 826cd1cf..00000000 --- a/test/OLD/MPIPETScTests/runtests.jl +++ /dev/null @@ -1,55 +0,0 @@ -module MPIPETScTests - -using Test -using MPI -using ArgParse - -function parse_commandline() - s = ArgParseSettings() - @add_arg_table! s begin - "--image-file", "-i" - help = "Path to the image file that one can use in order to accelerate MPIPETSc tests" - arg_type = String - default="GridapDistributed.so" - end - return parse_args(s) -end - -parsed_args = parse_commandline() -image_file_path=parsed_args["image-file"] -image_file_exists=isfile(image_file_path) - -nprocs_str = get(ENV, "JULIA_GRIDAPDISTRIBUTED_TEST_NPROCS","") -nprocs = nprocs_str == "" ? clamp(Sys.CPU_THREADS, 2, 4) : parse(Int, nprocs_str) -#mpiexec_args = Base.shell_split("--allow-run-as-root --tag-output") #Base.shell_split(get(ENV, "JULIA_MPIEXEC_TEST_ARGS", "")) -testdir = @__DIR__ -istest(f) = endswith(f, ".jl") && startswith(f, "MPI") -testfiles = sort(filter(istest, readdir(testdir))) -@time @testset "$f" for f in testfiles - MPI.mpiexec() do cmd - #cmd = `$cmd $mpiexec_args` - np = nprocs - extra_args = "" - if f in ["MPIPETScDistributedVectorsTests.jl","MPIPETScDistributedIndexSetsTests.jl"] - np = 2 - elseif f in ["MPIPETScDistributedPoissonTests.jl"] - np = 4 - extra_args = "-s 2 2 -p 4 4" - elseif f in ["MPIPETScUniformlyRefinedForestOfOctreesDiscreteModelsTests.jl"] - np = 4 - extra_args = "-s 2 2 2 -r 2" - else - np = 4 - end - if ! image_file_exists - cmd = `$cmd -n $(np) --allow-run-as-root --oversubscribe $(Base.julia_cmd()) --project=. $(joinpath(testdir, f)) $(split(extra_args))` - else - cmd = `$cmd -n $(np) --allow-run-as-root --oversubscribe $(Base.julia_cmd()) -J$(image_file_path) --project=. $(joinpath(testdir, f)) $(split(extra_args))` - end - @show cmd - run(cmd) - @test true - end -end - -end # module diff --git a/test/OLD/SequentialCommunicatorsTests.jl b/test/OLD/SequentialCommunicatorsTests.jl deleted file mode 100644 index e69de29b..00000000 diff --git a/test/OLD/ZeroMeanDistributedFESpacesTests.jl b/test/OLD/ZeroMeanDistributedFESpacesTests.jl deleted file mode 100644 index e66ccd6f..00000000 --- a/test/OLD/ZeroMeanDistributedFESpacesTests.jl +++ /dev/null @@ -1,38 +0,0 @@ -module ZeroMeanDistributedFESpacesTests - -using Gridap -using GridapDistributed -using Gridap.FESpaces -using Test - -subdomains = (2,2) -SequentialCommunicator(subdomains) do comm - domain = (0,1,0,1) - cells = (4,4) - model = CartesianDiscreteModel(comm,subdomains,domain,cells) - nsubdoms = prod(subdomains) - vector_type = Vector{Float64} - reffe = ReferenceFE(lagrangian,Float64,1) - V = FESpace(vector_type, - model=model, - reffe=reffe, - degree=1, - constraint=:zeromean) - fv=zero_free_values(V) - fv .= rand(length(fv)) - vh=FEFunction(V,fv) - - # Error norms and print solution - sums = DistributedData(model, vh) do part, (model, gids), vh - trian = Triangulation(model) - owned_trian = remove_ghost_cells(trian, part, gids) - dΩ = Measure(owned_trian, 1) - sum(∫(vh)dΩ) - end - mean = sum(gather(sums)) - tol = 1.0e-10 - if (i_am_master(comm)) println("$(abs(mean)) < $(tol)\n") end - @test abs(mean) < tol -end - -end # module diff --git a/test/OLD/runtests.jl b/test/OLD/runtests.jl deleted file mode 100644 index 57c06513..00000000 --- a/test/OLD/runtests.jl +++ /dev/null @@ -1,29 +0,0 @@ -module GridapDistributedTests - -using Test -using MPI -using GridapDistributedPETScWrappers - -@time @testset "DistributedData" begin include("DistributedDataTests.jl") end - -@time @testset "DistributedIndexSets" begin include("DistributedIndexSetsTests.jl") end - -@time @testset "DistributedVectors" begin include("DistributedVectorsTests.jl") end - -@time @testset "CartesianDiscreteModels" begin include("CartesianDiscreteModelsTests.jl") end - -@time @testset "DistributedFESpaces" begin include("DistributedFESpacesTests.jl") end - -@time @testset "ZeroMeanDistributedFESpacesTests" begin include("ZeroMeanDistributedFESpacesTests.jl") end - -@time @testset "DistributedAssemblers" begin include("DistributedAssemblersTests.jl") end - -@time @testset "DistributedPoisson" begin include("DistributedPoissonTests.jl") end - -@time @testset "DistributedPoissonDG" begin include("DistributedPoissonDGTests.jl") end - -@time @testset "DistributedPLaplacian" begin include("DistributedPLaplacianTests.jl") end - -@time @testset "DistributedStokes" begin include("DistributedStokesTests.jl") end - -end # module From aa92cbe555506289e8ac711d5a4fc99d3804fa3b Mon Sep 17 00:00:00 2001 From: amartin Date: Wed, 27 Oct 2021 10:43:15 +1100 Subject: [PATCH 2/3] Added NEWS.md file --- NEWS.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 NEWS.md diff --git a/NEWS.md b/NEWS.md new file mode 100644 index 00000000..39f6b4eb --- /dev/null +++ b/NEWS.md @@ -0,0 +1,19 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Unreleased + +A changelog is not maintained for this version. + +This version introduces fully-parallel distributed memory data structures for all the steps required in a finite element simulation (geometry handling, fe space setup, linear system assembly) except for the linear solver kernel, which is just a sparse LU solver applied to the global system gathered on a master task (and thus obviously not scalable). These distributed data structures mirror their counterparts in the Gridap.jl software architecture and implement most of their abstract interfaces. This version of GridapDistributed.jl relies on PartitionedArrays.jl (https://github.com/fverdugo/PartitionedArrays.jl) as distributed linear algebra backend (global distributed sparse matrices and vectors). + +See also + +## [0.1.0] - 2021-09-29 + +A changelog is not maintained for this version. + +This version although functional, is fully deprecated. From 30df203abe7cbb61ce7a5cdc52557f8c6389ea80 Mon Sep 17 00:00:00 2001 From: amartin Date: Wed, 27 Oct 2021 12:10:40 +1100 Subject: [PATCH 3/3] Adding README.md --- NEWS.md | 2 +- README.md | 24 ++++++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/NEWS.md b/NEWS.md index 39f6b4eb..2cdc4a93 100644 --- a/NEWS.md +++ b/NEWS.md @@ -10,7 +10,7 @@ A changelog is not maintained for this version. This version introduces fully-parallel distributed memory data structures for all the steps required in a finite element simulation (geometry handling, fe space setup, linear system assembly) except for the linear solver kernel, which is just a sparse LU solver applied to the global system gathered on a master task (and thus obviously not scalable). These distributed data structures mirror their counterparts in the Gridap.jl software architecture and implement most of their abstract interfaces. This version of GridapDistributed.jl relies on PartitionedArrays.jl (https://github.com/fverdugo/PartitionedArrays.jl) as distributed linear algebra backend (global distributed sparse matrices and vectors). -See also +More details can also be found in https://github.com/gridap/GridapDistributed.jl/issues/39 ## [0.1.0] - 2021-09-29 diff --git a/README.md b/README.md index bd621551..5e463998 100644 --- a/README.md +++ b/README.md @@ -12,12 +12,23 @@ Parallel distributed-memory version of `Gridap.jl`. 🚧 work in progress 🚧 ## Purpose -This package is currently **experimental, under development**. The final purpose is to provide programming paradigm-neutral, parallel finite element data structures for distributed computing environments. This feature implies that communication among tasks are not tailored for a particular programming model, and thus can be leveraged with, e.g., MPI or the master-worker programming model built-in in Julia. Whenever one sticks to MPI as the underlying communication layer, `GridapDistributed.jl` leverages the suite of tools available in the [PETSc software](https://petsc.org/release/) package for the assembly and solution of distributed discrete systems of equations. Also for MPI, the user may also leverage the [`p4est` software library](https://p4est.github.io/) for generating a uniformly refined forest of quadtrees/octrees of the computational domain (click [here](https://github.com/gridap/GridapDistributed.jl/blob/master/test/MPIPETScTests/MPIPETScUniformlyRefinedForestOfOctreesDiscreteModelsTests.jl) for an example). +This package is currently **experimental, under development**. + +`GridapDistributed.jl` provides fully-parallel distributed memory data structures for the Finite Element (FE) numerical solution of Partial Differential Equations (PDEs) on parallel computers, from multi-core CPU desktop computers, to HPC clusters and supercomputers. These distributed data structures are designed to mirror as far as possible their counterparts in the [`Gridap.jl`](https://github.com/gridap/Gridap.jl) software package, while implementing/leveraging most of their abstract interfaces. As a result, sequential Julia scripts written in the high level API of `Gridap.jl` can be used almost verbatim up to minor adjustments in a parallel context using `GridapDistributed.jl`. This is indeed one of the main advantages of `GridapDistributed.jl` and a major design goal that we pursue. + +At present, `GridapDistributed.jl` provides scalable parallel data structures for grid handling, finite element spaces setup, and distributed linear system assembly. For the latter part, i.e., global distributed sparse matrices and vectors, `GridapDistributed.jl` relies on [`PartitionedArrays.jl`](https://github.com/fverdugo/PartitionedArrays.jl) as distributed linear algebra backend. This implies, among others, that all `GridapDistributed.jl` driver programs can be either run in sequential execution mode--very useful for developing/debugging parallel programs--, see `test/sequential/` folder for examples, or in message-passing (MPI) execution mode--when you want to deploy the code in the actual parallel computer and perform a fast simulation., see `test/mpi/` folder for examples. + +## Caveats + +At present, we have the following caveats: +1. Grid handling currently available within `GridapDistributed.jl` is restricted to Cartesian-like meshes of arbitrary-dimensional, topologically n-cube domains. +2. The linear solver kernel within `GridapDistributed.jl`, leveraged, e.g., via the backslash operator `\`, is just a sparse LU solver applied to the global system gathered on a master task (and thus obviously not scalable). It is in our future plans to provide highly scalable linear and nonlinear solvers tailored for the FE discretization of PDEs. + + Complementarily, one may leverage two satellite packages of `GridapDistributed.jl` to address 1. and 2., namely [`GridapP4est.jl`](https://github.com/gridap/GridapP4est.jl), for peta-scale handling of meshes which can be decomposed as forest of quadtrees/octrees of the computational domain., and [`GridapPETSc.jl`](https://github.com/gridap/GridapPETSc.jl), which offers to the full set of scalable linear and non-linear solvers in the [PETSc](https://petsc.org/release/) numerical software package. We refer to the readme of these two packages for further details. ## Build -Before using `GridapDistributed.jl` package, we have to build `MPI.jl`, `GridapDistributedPETScWrappers.jl`, and -`P4est_wrapper.jl`. We refer to the main `README.md` of the latter two packages (available [here](https://github.com/gridap/GridapDistributedPETScWrappers.jl) and [here](https://github.com/gridap/p4est_wrapper.jl), resp.) for configuration instructions. +Before using `GridapDistributed.jl` package, one needs to build the [`MPI.jl`](https://github.com/JuliaParallel/MPI.jl) package. We refer to the main documentation of this package for configuration instructions. ## MPI-parallel Julia script execution instructions @@ -34,13 +45,14 @@ Alternatively, for convenience, one can assign the path of `mpirun` to an enviro $ export MPIRUN=$(julia --project=. -e "using MPI;println(MPI.mpiexec_path)") ``` -As an example, the MPI-parallel `GridapDistributed.jl` driver `MPIPETScCommunicatorsTests.jl`, located in the `test` directory, can be executed as: +As an example, assuming that we are located on the root directory of `GridapDistributed.jl`, +an hypothetic MPI-parallel `GridapDistributed.jl` driver named `driver.jl` can be executed on 4 MPI tasks as: ``` -$MPIRUN -np 2 julia --project=. -J ../Gridap.jl/compile/Gridapv0.14.1.so test/MPIPETScTests/MPIPETScCommunicatorsTests.jl +$MPIRUN -np 4 julia --project=. -J sys-image.so driver.jl ``` -where `-J ../Gridap.jl/compile/Gridapv0.14.1.so` is optional, but highly recommended in order to reduce JIT compilation times. More details about how to generate this file can be found [here](https://github.com/gridap/GridapDistributed.jl/blob/master/compile/README.md). +where `-J sys-image.so` is optional, but highly recommended in order to reduce JIT compilation times. Here, `sys-image.so` is assumed to be a Julia system image pre-generated for the driver at hand using the [`PackageCompiler.jl`](https://julialang.github.io/PackageCompiler.jl/dev/index.html) package. See the `test/TestApp/compile` folder for example scripts with system image generation along with a test application with source available at `test/TestApp/`. These scripts are triggered from `.github/workflows/ci.yml` file on Github CI actions. Two big warnings when executing MPI-parallel drivers: