Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 54 additions & 3 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,60 @@

## Bug Fixes

# 0.18.0

Please see https://github.com/rapidsai/cugraph/releases/tag/branch-0.18-latest for the latest changes to this development branch.
# cuGraph 0.18.0 (24 Feb 2021)

## Bug Fixes 🐛

- Fixed TSP returned routes (#1412) @hlinsen
- Updated CI scripts to use a different error handling convention, updated LD_LIBRARY_PATH for project flash runs (#1386) @rlratzel
- Bug fixes for MNMG coarsen_graph, renumber_edgelist, relabel (#1364) @seunghwak
- Set a specific known working commit hash for gunrock instead of "dev" (#1336) @rlratzel
- Updated git utils used by copyright.py for compatibility with current CI env (#1325) @rlratzel
- Fix MNMG Louvain tests on Pascal architecture (#1322) @ChuckHastings
- FIX Set bash trap after PATH is updated (#1321) @dillon-cullinan
- Fix graph nodes function and renumbering from series (#1319) @Iroy30
- Fix Branch 0.18 merge 0.17 (#1314) @BradReesWork
- Fix EXPERIMENTAL_LOUVAIN_TEST on Pascal (#1312) @ChuckHastings
- Updated cuxfilter to 0.18, removed datashader indirect dependency in conda dev .yml files (#1311) @rlratzel
- Update SG PageRank C++ tests (#1307) @seunghwak

## Documentation 📖

- Enabled MultiGraph class and tests, updated SOURCEBUILD.md to include the latest build.sh options (#1351) @rlratzel

## New Features 🚀

- New EgoNet extractor (#1365) @afender
- Implement induced subgraph extraction primitive (SG C++) (#1354) @seunghwak

## Improvements 🛠️

- Update stale GHA with exemptions & new labels (#1413) @mike-wendt
- Add GHA to mark issues/prs as stale/rotten (#1408) @Ethyling
- update subgraph tests and remove legacy pagerank (#1378) @Iroy30
- Update the conda environments and README file (#1369) @BradReesWork
- Prepare Changelog for Automation (#1368) @ajschmidt8
- Update CMakeLists.txt files for consistency with RAPIDS and to support cugraph as an external project and other tech debt removal (#1367) @rlratzel
- Use new coarsen_graph primitive in Louvain (#1362) @ChuckHastings
- Added initial infrastructure for MG C++ testing and a Pagerank MG test using it (#1361) @rlratzel
- Add SG TSP (#1360) @hlinsen
- Build a Dendrogram class, adapt Louvain/Leiden/ECG to use it (#1359) @ChuckHastings
- Auto-label PRs based on their content (#1358) @jolorunyomi
- Implement MNMG Renumber (#1355) @aschaffer
- Enabling pytest code coverage output by default (#1352) @jnke2016
- Added configuration for new cugraph-doc-codeowners review group (#1344) @rlratzel
- API update to match RAFT PR #120 (#1343) @drobison00
- Pin gunrock to v1.2 for version 0.18 (#1342) @ChuckHastings
- Fix #1340 - Use generic from_edgelist() methods (#1341) @miguelusque
- Using RAPIDS_DATASET_ROOT_DIR env var in place of absolute path to datasets in tests (#1337) @jnke2016
- Expose dense implementation of Hungarian algorithm (#1333) @ChuckHastings
- SG Pagerank transition (#1332) @Iroy30
- improving error checking and docs (#1327) @BradReesWork
- Fix MNMG cleanup exceptions (#1326) @Iroy30
- Create labeler.yml (#1318) @jolorunyomi
- Updates to support nightly MG test automation (#1308) @rlratzel
- Add C++ graph functions (coarsen_grpah, renumber_edgelist, relabel) and primitvies (transform_reduce_by_adj_matrix_row_key, transform_reduce_by_adj_matrix_col_key, copy_v_transform_reduce_key_aggregated_out_nbr) (#1257) @seunghwak
>>>>>>> upstream/branch-0.18

# cuGraph 0.17.0 (10 Dec 2020)
## New Features
Expand Down
2 changes: 1 addition & 1 deletion cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ else(DEFINED ENV{RAFT_PATH})
FetchContent_Declare(
raft
GIT_REPOSITORY https://github.com/rapidsai/raft.git
GIT_TAG 4a79adcb0c0e87964dcdc9b9122f242b5235b702
GIT_TAG a3461b201ea1c9f61571f1927274f739e775d2d2
SOURCE_SUBDIR raft
)

Expand Down
22 changes: 19 additions & 3 deletions cpp/include/experimental/detail/graph_utils.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <rmm/device_uvector.hpp>

#include <thrust/sort.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
#include <cuco/detail/hash_functions.cuh>

Expand All @@ -39,7 +40,7 @@ namespace detail {
// compute the numbers of nonzeros in rows (of the graph adjacency matrix, if store_transposed =
// false) or columns (of the graph adjacency matrix, if store_transposed = true)
template <typename vertex_t, typename edge_t>
rmm::device_uvector<edge_t> compute_major_degree(
rmm::device_uvector<edge_t> compute_major_degrees(
raft::handle_t const &handle,
std::vector<edge_t const *> const &adj_matrix_partition_offsets,
partition_t<vertex_t> const &partition)
Expand Down Expand Up @@ -120,7 +121,7 @@ rmm::device_uvector<edge_t> compute_major_degree(
// compute the numbers of nonzeros in rows (of the graph adjacency matrix, if store_transposed =
// false) or columns (of the graph adjacency matrix, if store_transposed = true)
template <typename vertex_t, typename edge_t>
rmm::device_uvector<edge_t> compute_major_degree(
rmm::device_uvector<edge_t> compute_major_degrees(
raft::handle_t const &handle,
std::vector<rmm::device_uvector<edge_t>> const &adj_matrix_partition_offsets,
partition_t<vertex_t> const &partition)
Expand All @@ -131,7 +132,22 @@ rmm::device_uvector<edge_t> compute_major_degree(
adj_matrix_partition_offsets.end(),
tmp_offsets.begin(),
[](auto const &offsets) { return offsets.data(); });
return compute_major_degree(handle, tmp_offsets, partition);
return compute_major_degrees(handle, tmp_offsets, partition);
}

// compute the numbers of nonzeros in rows (of the graph adjacency matrix, if store_transposed =
// false) or columns (of the graph adjacency matrix, if store_transposed = true)
template <typename vertex_t, typename edge_t>
rmm::device_uvector<edge_t> compute_major_degrees(raft::handle_t const &handle,
edge_t const *offsets,
vertex_t number_of_vertices)
{
rmm::device_uvector<edge_t> degrees(number_of_vertices, handle.get_stream());
thrust::tabulate(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
degrees.begin(),
degrees.end(),
[offsets] __device__(auto i) { return offsets[i + 1] - offsets[i]; });
return degrees;
}

template <typename vertex_t, typename edge_t>
Expand Down
12 changes: 12 additions & 0 deletions cpp/include/experimental/graph_view.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -494,6 +494,12 @@ class graph_view_t<vertex_t,
: static_cast<weight_t const*>(nullptr);
}

rmm::device_uvector<edge_t> compute_in_degrees(raft::handle_t const& handle) const;
rmm::device_uvector<edge_t> compute_out_degrees(raft::handle_t const& handle) const;

rmm::device_uvector<weight_t> compute_in_weight_sums(raft::handle_t const& handle) const;
rmm::device_uvector<weight_t> compute_out_weight_sums(raft::handle_t const& handle) const;

private:
std::vector<edge_t const*> adj_matrix_partition_offsets_{};
std::vector<vertex_t const*> adj_matrix_partition_indices_{};
Expand Down Expand Up @@ -638,6 +644,12 @@ class graph_view_t<vertex_t,
// private.
weight_t const* weights() const { return weights_; }

rmm::device_uvector<edge_t> compute_in_degrees(raft::handle_t const& handle) const;
rmm::device_uvector<edge_t> compute_out_degrees(raft::handle_t const& handle) const;

rmm::device_uvector<weight_t> compute_in_weight_sums(raft::handle_t const& handle) const;
rmm::device_uvector<weight_t> compute_out_weight_sums(raft::handle_t const& handle) const;

private:
edge_t const* offsets_{nullptr};
vertex_t const* indices_{nullptr};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,10 @@
#include <experimental/detail/graph_utils.cuh>
#include <experimental/graph.hpp>
#include <experimental/graph_view.hpp>
#include <matrix_partition_device.cuh>
#include <utilities/dataframe_buffer.cuh>
#include <utilities/error.hpp>
#include <utilities/host_scalar_comm.cuh>
#include <utilities/shuffle_comm.cuh>
#include <vertex_partition_device.cuh>

Expand Down Expand Up @@ -100,10 +102,10 @@ __global__ void for_all_major_for_all_nbr_low_degree(
}
thrust::fill(thrust::seq,
major_vertices + local_offset,
major_vertices + local_offset + key_idx,
major_vertices + local_offset + key_idx + 1,
matrix_partition.get_major_from_major_offset_nocheck(major_offset));
thrust::fill(thrust::seq,
major_vertices + local_offset + key_idx,
major_vertices + local_offset + key_idx + 1,
major_vertices + local_offset + local_degree,
invalid_vertex);
}
Expand Down Expand Up @@ -159,8 +161,7 @@ __global__ void for_all_major_for_all_nbr_low_degree(
* pairs provided by @p map_key_first, @p map_key_last, and @p map_value_first (aggregated over the
* entire set of processes in multi-GPU).
* @param reduce_op Binary operator takes two input arguments and reduce the two variables to one.
* @param init Initial value to be added to the reduced @p key_aggregated_e_op return values for
* each vertex.
* @param init Initial value to be added to the reduced @p reduce_op return values for each vertex.
* @param vertex_value_output_first Iterator pointing to the vertex property variables for the
* first (inclusive) vertex (assigned to tihs process in multi-GPU). `vertex_value_output_last`
* (exclusive) is deduced as @p vertex_value_output_first + @p
Expand Down Expand Up @@ -191,6 +192,7 @@ void copy_v_transform_reduce_key_aggregated_out_nbr(
"GraphViewType should support the push model.");
static_assert(std::is_same<typename std::iterator_traits<VertexIterator>::value_type,
typename GraphViewType::vertex_type>::value);
static_assert(is_arithmetic_or_thrust_tuple_of_arithmetic<T>::value);

using vertex_t = typename GraphViewType::vertex_type;
using edge_t = typename GraphViewType::edge_type;
Expand Down Expand Up @@ -393,7 +395,7 @@ void copy_v_transform_reduce_key_aggregated_out_nbr(
tmp_major_vertices.begin(), tmp_minor_keys.begin(), tmp_key_aggregated_edge_weights.begin()));
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
triplet_first,
triplet_first + major_vertices.size(),
triplet_first + tmp_major_vertices.size(),
tmp_e_op_result_buffer_first,
[adj_matrix_row_value_input_first,
key_aggregated_e_op,
Expand All @@ -408,7 +410,7 @@ void copy_v_transform_reduce_key_aggregated_out_nbr(
w,
*(adj_matrix_row_value_input_first +
matrix_partition.get_major_offset_from_major_nocheck(major)),
kv_map.find(key)->second);
kv_map.find(key)->second.load(cuda::std::memory_order_relaxed));
});
tmp_minor_keys.resize(0, handle.get_stream());
tmp_key_aggregated_edge_weights.resize(0, handle.get_stream());
Expand Down Expand Up @@ -488,11 +490,12 @@ void copy_v_transform_reduce_key_aggregated_out_nbr(
auto major_vertex_first = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_t{0}),
[major_vertices = major_vertices.data()] __device__(auto i) {
return ((i == 0) || (major_vertices[i] == major_vertices[i - 1]))
return ((i == 0) || (major_vertices[i] != major_vertices[i - 1]))
? major_vertices[i]
: invalid_vertex_id<vertex_t>::value;
});
thrust::copy_if(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
major_vertex_first,
major_vertex_first + major_vertices.size(),
unique_major_vertices.begin(),
Expand All @@ -506,9 +509,10 @@ void copy_v_transform_reduce_key_aggregated_out_nbr(
thrust::make_permutation_iterator(
vertex_value_output_first,
thrust::make_transform_iterator(
major_vertices.begin(),
unique_major_vertices.begin(),
[vertex_partition = vertex_partition_device_t<GraphViewType>(graph_view)] __device__(
auto v) { return vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v); })),
thrust::equal_to<vertex_t>{},
reduce_op);

thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
Expand Down
Loading