diff --git a/examples/notebooks/example_heat_clustering.ipynb b/examples/notebooks/example_heat_clustering.ipynb new file mode 100644 index 0000000..c93c66c --- /dev/null +++ b/examples/notebooks/example_heat_clustering.ipynb @@ -0,0 +1,157 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import simkit as sk\n", + "import igl\n", + "from simkit.filesystem import get_data_directory" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# -- 2D Meshes -- #\n", + "filename = '2d/beam/beam.obj'\n", + "filename = '2d/bingby/bingby.obj'\n", + "filename = '2d/cthulu/cthulu.obj'\n", + "\n", + "[V, _, _, T, _, _]= igl.readOBJ(get_data_directory() + filename)\n", + "V = V[:, :2] # Keep only the first two dimensions for 2D meshes\n", + "print('Vertices:', V.shape)\n", + "print('Triangles:', T.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "k= 5\n", + "mode = 'elem'\n", + "mu = np.ones((T.shape[0],1))\n", + "data = sk.heat_clustering_precomp(V, T, mu=mu, k=k, mode=mode)\n", + "parts, C, dist, clusters, centroids = sk.heat_clustering_solve(data, max_iters=100)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import polyscope as ps\n", + "import polyscope.imgui as psim\n", + "\n", + "if T.shape[1] == 3:\n", + " elem_type = \"faces\"\n", + "else:\n", + " elem_type = \"cells\"\n", + "if mode == 'elem':\n", + " part_type = elem_type\n", + "else:\n", + " part_type = \"vertices\"\n", + "\n", + "def visualize_heat_clustering(V, T, C, clusters, centroids, dist, parts, mu=None):\n", + " ps.init()\n", + "\n", + " # Register mesh\n", + " if T.shape[1] == 3:\n", + " msh = ps.register_surface_mesh(\"mesh\", V, T)\n", + " else:\n", + " msh = ps.register_volume_mesh(\"mesh\", V, T)\n", + "\n", + " # Register centroids and initial cluster labeling\n", + " pnts = ps.register_point_cloud(\"centroids\", C)\n", + " msh.add_scalar_quantity(\"clusters\", parts, enabled=True, cmap=\"rainbow\", defined_on=part_type)\n", + "\n", + " # Optional stiffness visualization\n", + " if mu is not None:\n", + " msh.add_scalar_quantity(\"mu\", mu.flatten(), defined_on=elem_type, cmap=\"rainbow\")\n", + "\n", + " # Set up interactive sliders for cluster index and iteration\n", + " current_k = 0\n", + " current_iter = 0\n", + "\n", + " def callback_func():\n", + " nonlocal current_k, current_iter\n", + "\n", + " # Distance field for cluster k\n", + " changed, k = psim.SliderInt(\"k\", current_k, v_min=0, v_max=dist.shape[1]-1)\n", + " if changed:\n", + " current_k = k\n", + " msh.add_scalar_quantity(\"d\", dist[:, k], defined_on=part_type, enabled=True, cmap=\"rainbow\")\n", + "\n", + " # Cluster labeling at iteration\n", + " changed, iter = psim.SliderInt(\"iter\", current_iter, v_min=0, v_max=len(clusters)-1)\n", + " if changed:\n", + " current_iter = iter\n", + " pnts = ps.register_point_cloud(\"centroids\", centroids[iter])\n", + " pnts.update_point_positions(centroids[iter])\n", + " msh.add_scalar_quantity(\"clusters\", clusters[iter], enabled=True, cmap=\"rainbow\", defined_on=part_type)\n", + "\n", + " ps.set_user_callback(callback_func)\n", + " ps.show()\n", + " ps.remove_all_structures()\n", + "\n", + "visualize_heat_clustering(V, T, C, clusters, centroids, dist, parts, mu=mu)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Next version try modifying the stiffness so that there that the top part is stiffer.\n", + "BC = igl.barycenter(V, T)\n", + "\n", + "min_y = np.min(BC[:, 1])\n", + "max_y = np.max(BC[:, 1])\n", + "min_x = np.min(BC[:, 0])\n", + "max_x = np.max(BC[:, 0])\n", + "y_range = max_y - min_y\n", + "x_range = max_x - min_x\n", + "\n", + "stiffness = np.ones((T.shape[0], 1))\n", + "top_mask = (BC[:, 1] > 0.5 * y_range + min_y)\n", + "middle_mask = (BC[:, 0] > 0.4 * x_range + min_x) & (BC[:, 0] < 0.6 * x_range + min_x)\n", + "mask = top_mask & middle_mask\n", + "stiffness[mask] = 100.0\n", + "k = 8\n", + "data = sk.heat_clustering_precomp(V, T, mu=stiffness, k=k, mode=mode)\n", + "parts, C, dist, clusters, centroids = sk.heat_clustering_solve(data)\n", + "visualize_heat_clustering(V, T, C, clusters, centroids, dist, parts, mu=stiffness)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "simkit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.16" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/run_elastics_reduced.py b/examples/run_elastics_reduced.py index 6beb743..3253bf9 100644 --- a/examples/run_elastics_reduced.py +++ b/examples/run_elastics_reduced.py @@ -2,6 +2,7 @@ import igl import polyscope as ps import scipy as sp +# import gpytoolbox as gpy from simkit import ( deformation_jacobian, @@ -20,10 +21,13 @@ view_cubature, view_displacement_modes, view_scalar_modes) + from simkit.filesystem import get_data_directory from simkit.sims.elastic import ElasticROMMFEMSim, ElasticROMMFEMSimParams, ElasticROMFEMSim, ElasticROMFEMSimParams +from simkit.spectral_moment_fitting_cubature import spectral_moment_fitting_cubature [X, _, _, T, _, _] = igl.readOBJ(get_data_directory() + "2d/cthulu/cthulu.obj") +# [X, T] = gpy.regular_square_mesh(30, 30) X = X[:, 0:2] X = X / max(X.max(axis=0) - X.min(axis=0)) @@ -31,17 +35,24 @@ dim = X.shape[1] m = 10 -k = 100 [W, E, B] = skinning_eigenmodes(X, T, m) + +k = 100 [cI, cW, labels] = spectral_cubature(X, T, W, k, return_labels=True) +# p = 1 will give you m * k total cubature points (roughly) +# p = 2 will give you about m choose 2 per cluster. +# For m=k=10, you end up with up to 650 samples. +k = 10 +p = 2 +[cI, cW, labels] = spectral_moment_fitting_cubature(X, T, W, k, return_labels=True, p=p) # B = np.identity(X.shape[0]*dim) # cI = np.arange(T.shape[0]) # cW = volume(X, T) # view_scalar_modes(X, T, W) # view_displacement_modes(X, T, B, a=1) -# view_cubature(X, T, cI, cW, labels) +view_cubature(X, T, cI, cW, labels) A = np.zeros(((W.shape[1], dim, dim+1))) A[:, :dim, :dim] = np.identity(dim) diff --git a/simkit/__init__.py b/simkit/__init__.py index 1b3a65f..826c170 100644 --- a/simkit/__init__.py +++ b/simkit/__init__.py @@ -12,12 +12,14 @@ from .massmatrix import massmatrix from .dirichlet_penalty import dirichlet_penalty +from .dirichlet_laplacian import dirichlet_laplacian from .grad import grad from .volume import volume from .project_into_subspace import project_into_subspace from .pairwise_displacement import pairwise_displacement +from .pairwise_distance import pairwise_distance from .volume import volume from .deformation_jacobian import deformation_jacobian @@ -25,5 +27,9 @@ from .rotation_gradient import rotation_gradient_F from .skinning_eigenmodes import skinning_eigenmodes from .spectral_cubature import spectral_cubature +from .spectral_clustering import spectral_clustering from .gravity_force import gravity_force -from .cluster_grouping_matrices import cluster_grouping_matrices \ No newline at end of file +from .cluster_grouping_matrices import cluster_grouping_matrices +from .average_onto_simplex import average_onto_simplex +from .heat_distance import heat_distance_solve, heat_distance_precompute +from .heat_clustering import heat_clustering_precomp, heat_clustering_solve, HeatClusteringData \ No newline at end of file diff --git a/simkit/heat_clustering.py b/simkit/heat_clustering.py new file mode 100644 index 0000000..5eb35f6 --- /dev/null +++ b/simkit/heat_clustering.py @@ -0,0 +1,257 @@ +from scipy.spatial import cKDTree +import numpy as np +import igl +from functools import partial +from concurrent.futures import ThreadPoolExecutor +import simkit as sk + +def distance_all_pairs(A, B): + # Compute all pairwise distances between two sets of points + tree = cKDTree(A) + [D, I] = tree.query(B) + return D, I + +def dist_func_elem(I, data, T): + """Distance function for elements.""" + return np.mean(sk.heat_distance_solve(data, T[I])[T], axis=1) + +def dist_func_nodal(I, data): + """Distance function for nodes.""" + return sk.heat_distance_solve(data, I) + +def centroid(X, WX, labels, cluster_volumes, exclusion): + """ + Compute the closest representative (approximate centroid) within each cluster. + + Parameters: + - X: All point positions (n x d) + - WX: Weighted positions (element-wise volume or mass times X) + - labels: Cluster assignment for each point (length n) + - cluster_volumes: Total volume per cluster (length k) + - exclusion: Indices to exclude from centroid selection + + Returns: + - XI: Indices of representative centroids (length k) + """ + k = len(cluster_volumes) + dim = X.shape[1] + CX = np.zeros((k, dim)) + for d in range(dim): + CX[:, d] = np.bincount(labels, WX[:, d], minlength=k) + CX /= cluster_volumes[:, None] + + # Compute distance from centroid to closest point within the cluster + XI = np.zeros(k, dtype=int) + for i in range(k): + cluster_indices = np.where(labels == i)[0] + cluster_indices = np.setdiff1d(cluster_indices, exclusion) + _, idx = distance_all_pairs(X[cluster_indices], CX[i]) + XI[i] = cluster_indices[idx] + return XI + +def approximate_mediod(labels, cluster_volumes, dist_func, exclusion, num_samples=100): + """ + Approximate the medoid for each cluster using a subset of candidates. + + Parameters: + - labels: Cluster assignment for each element (length n) + - cluster_volumes: Total volume of each cluster + - dist_func: Function that computes distance from a source index to all other points + - exclusion: Indices to exclude + - num_samples: Number of candidates to sample per cluster + + Returns: + - XI: Indices of approximate medoids (length k) + """ + k = len(cluster_volumes) + XI = np.zeros(k, dtype=int) + + for i in range(k): + cluster_id = i + cluster_indices = np.where(labels == cluster_id)[0] # elements within the cluster + cluster_indices = np.setdiff1d(cluster_indices, exclusion) + + num_samples = min(num_samples, len(cluster_indices)) + sample_indices = np.random.choice(cluster_indices, num_samples, replace=False) + + min_sum = np.inf + current_idx = -1 + for idx in sample_indices: + dist = dist_func(idx)[cluster_indices] + sum_dist = np.sum(dist) + if sum_dist < min_sum: + min_sum = sum_dist + current_idx = idx + XI[i] = current_idx + return XI + +class HeatClusteringData: + """ + Container for heat clustering precomputation. + + Attributes: + - X: Points to cluster (either vertices or barycenters) + - XI: Indices of initial cluster representatives (length k) + - C: Current cluster centroid positions + - vol: Volume or weight associated with each point + - distFunc: Distance function used to compute heat distances + - centroidFunc: Function to recompute cluster representatives + """ + def __init__(self, X, XI, C, vol, distFunc, centroidFunc): + self.X = X + self.XI = XI + self.C = C + self.vol = vol + self.distFunc = distFunc + self.centroidFunc = centroidFunc + +def heat_clustering_precomp(V, T, k=30, mu=None, mode='elem', exclusion=None, centroid_type="centroid"): + """ + Precompute clustering data using heat geodesic distances. + + Parameters: + - V: Vertex positions (n x 3) + - T: Connectivity (triangle or tetrahedron indices) + - k: Number of clusters + - mu: Stiffness or weight per element (defaults to uniform) + - mode: 'elem' for element-wise clustering, 'nodal' for vertex-wise + - exclusion: Indices to exclude from seed/centroid selection + - centroid_type: Either 'centroid' (weighted mean) or 'mediod' (minimum sum distance) + + Returns: + - HeatClusteringData instance + """ + if mu is None: + mu = np.ones(len(T)) + + # mass-lumping onto the diagonal + if mode == 'elem': + if T.shape[1] == 3: + vol = 0.5 * igl.doublearea(V, T) + else: + vol = igl.volume(V, T) + X = igl.barycenter(V, T) + elif mode == 'nodal': + vol = igl.massmatrix(V, T) + vol = vol.sum(1).reshape(-1, 1) + vol = np.array(vol).flatten() + X = V + else: + raise ValueError('Unknown mode: {}'.format(mode)) + + dim = X.shape[1] + n = len(X) + XI = np.zeros(k, dtype=int) + C = np.zeros((k, dim)) + rng = np.random.default_rng(seed=0) + + # Exclusion point handling + probabilities = vol.copy() + if exclusion is not None: + probabilities[exclusion] = 0 + + probabilities /= np.sum(probabilities) + + # Initialize first seed + idx = rng.choice(n, 1, p=probabilities.flatten()) + XI[0] = idx + C[0, :] = X[idx] + + # Precompute heat data + t = igl.avg_edge_length(V, T)**2 / np.min(mu) + L = sk.dirichlet_laplacian(V, T, mu=mu) + heat_data = sk.heat_distance_precompute(V, T, L=-L, t=t) + + # Define distance function + if mode == 'elem': + distFunc = partial(dist_func_elem, data=heat_data, T=T) + else: + distFunc = partial(dist_func_nodal, data=heat_data) + + # Initialize minimum distances + minDist = np.full((n,1), np.inf) + + # kmeans++ initialization + for ii in range(1, k): + minDist = np.minimum(minDist, distFunc(XI[ii - 1])) + sampleProbability = np.maximum(minDist, 1e-12) + sampleProbability[exclusion] = 1e-12 + sampleProbability /= np.sum(sampleProbability) + + if not np.isfinite(sampleProbability).all(): + raise ValueError('Sample probability is not finite') + + XI[ii] = rng.choice(n, 1, p=sampleProbability.flatten()) + C[ii, :] = X[XI[ii]] + + # Choose centroid function + if centroid_type == "centroid": + WX = vol[:, None] * X # weighted X for centroids + centroidFunc = lambda labels, cluster_volumes : centroid(X, WX, labels, cluster_volumes, exclusion) + elif centroid_type == "mediod": + centroidFunc = lambda labels, cluster_volumes : approximate_mediod( + labels, cluster_volumes, distFunc, exclusion, num_samples=50) + else: + raise ValueError(f'Unknown centroid type: {centroid_type}') + + return HeatClusteringData(X, XI, C, vol, distFunc, centroidFunc) + +def heat_clustering_solve(data, max_iters=50, save_intermediate=True, tolerance=1e-5): + + """ + Perform Lloyd relaxation clustering using heat geodesic distances. + + Parameters: + - data: HeatClusteringData object + - max_iters: Maximum number of k-means-like refinement steps + - save_intermediate: Whether to store and return history of assignments + - tolerance: Convergence threshold (relative centroid movement) + + Returns: + - labels: Final cluster assignments + - C: Final centroid positions + - dist: Distance matrix (|X| x k) + - clusters: List of cluster assignments at each iteration + - centroids: List of centroids at each iteration + """ + X, XI, C, vol, distFunc = data.X, data.XI, data.C, data.vol, data.distFunc + k = len(C) + dist = np.zeros((len(X), k)) + L = np.linalg.norm(np.max(X, axis=0) - np.min(X, axis=0)) + + clusters=[] + centroids=[] + + # compute initial partitions and distances + # This is usually just 2x faster using threading + with ThreadPoolExecutor() as executor: + dist[:, :] = np.array(list(executor.map(lambda j: distFunc(XI[j]), range(k)))).T + labels = np.argmin(dist, axis=1) + + if save_intermediate: + clusters.append(labels) + centroids.append(C) + + for i in range(max_iters): + C0 = C.copy() + + cluster_volumes = np.bincount(labels, vol, minlength=k) + XI = data.centroidFunc(labels, cluster_volumes) + C = X[XI] + + conv = np.linalg.norm(C - C0) / (L * k) + minvol = np.min(cluster_volumes) / np.sum(cluster_volumes) + maxvol = np.max(cluster_volumes) / np.sum(cluster_volumes) + + with ThreadPoolExecutor() as executor: + dist[:, :] = np.array(list(executor.map(lambda j: distFunc(XI[j]), range(k)))).T + labels = np.argmin(dist, axis=1) + if save_intermediate: + clusters.append(labels) + centroids.append(C) + + print(f'Iteration {i}, Conv: {conv:.4g}, Min/Max vol ratio: {minvol:.4g}, {maxvol:.4g}') + + if conv < tolerance: + break + return labels, C, dist, clusters, centroids \ No newline at end of file diff --git a/simkit/heat_distance.py b/simkit/heat_distance.py new file mode 100644 index 0000000..b832c4d --- /dev/null +++ b/simkit/heat_distance.py @@ -0,0 +1,141 @@ +import numpy as np +import scipy.sparse as sp +import igl + +""" +This module extends the heat-based geodesics from +https://github.com/libigl/libigl/blob/main/include/igl/heat_geodesics.h +to support input Laplacians, allowing for stiffness-weighted Laplacians (i.e. compliance distances) +""" +class HeatGeodesicsData: + """ + Container class for precomputed data used in the heat method. + + Attributes: + - Grad: Discrete gradient operator + - M: Mass matrix + - Div: Discrete divergence operator + - Neumann / Dirichlet / Poisson: Data for solving corresponding linear systems + - b: List of boundary vertex indices (for Dirichlet BCs) + - ng: Number of gradient components per simplex (2 in 2D, 3 in 3D) + """ + def __init__(self): + self.Grad = None + self.M = None + self.Div = None + self.Neumann = igl.pyigl_core.min_quad_with_fixed_data() + self.Dirichlet = igl.pyigl_core.min_quad_with_fixed_data() + self.Poisson = igl.pyigl_core.min_quad_with_fixed_data() + self.b = None + self.ng = 0 + +def heat_distance_precompute(V, F, L=None, t=None, data=None): + """ + Precompute the necessary matrices for heat-based geodesic distance computation. + + Parameters: + - V: Vertex positions (n x 3) + - F: Faces or elements (m x 3 or m x 4) + - L: (Optional) custom Laplacian matrix. Defaults to cotangent Laplacian + - t: (Optional) time step for the heat method. Defaults to avg_edge_length^2 + - data: (Optional) existing HeatGeodesicsData object to reuse + + Returns: + - data: Filled-in HeatGeodesicsData object + """ + if data is None: + data = HeatGeodesicsData() + if t is None: + h = igl.avg_edge_length(V, F) + t = h * h + if L is None: + L = igl.cotmatrix(V, F) + + M = igl.massmatrix(V, F) + if F.shape[1] == 3: + dblA = igl.doublearea(V, F) * 0.5 + data.Grad = igl.grad(V, F) * 0.5 # triangle grad is doubled, so half it + else: + dblA = igl.volume(V, F) + data.Grad = igl.grad(V, F) + data.M = M + + data.ng = data.Grad.shape[0] // F.shape[0] + + diag = sp.diags(np.tile(dblA, (1, data.ng)).flatten()) + data.Div = -data.Grad.T @ diag + + Q = M - t * L + O,_,_ = igl.boundary_facets(F) + data.b = np.unique(O.flatten()) + + # create empty sparse matrix + Aeq = sp.csc_matrix((0, Q.shape[1])) + igl.min_quad_with_fixed_precompute(Q, np.array([], dtype=np.int64), Aeq, True, data.Neumann) + if data.b.size > 0: + igl.min_quad_with_fixed_precompute(Q, data.b, Aeq, True, data.Dirichlet) + + L *= -0.5 + Aeq = sp.csc_matrix(M.diagonal()) + igl.min_quad_with_fixed_precompute(L, np.array([], dtype=np.int64), Aeq, True, data.Poisson) + return data + +def heat_distance_solve(data, gamma, mode='average'): + """ + Solve for geodesic distances from a set of source vertices using the heat method. + + Parameters: + - data: Precomputed HeatGeodesicsData object + - gamma: List or array of source vertex indices + - mode: Solve mode; one of ['neumann', 'dirichlet', 'average'] + + Returns: + - D: Approximate geodesic distance vector (n x 1) + """ + n = data.Grad.shape[1] + u0 = np.zeros((n,1)) + u0[gamma] = 1 + + # Create empty matrices 0x0 + Beq = np.zeros((0,0)) + Yn = np.zeros((0, 1)) + Yp = np.ones((0, 1)) + + if mode == 'neumann': + u = igl.min_quad_with_fixed_solve(data.Neumann, data.M @ u0, Yn, Beq) + elif mode == 'dirichlet': + u = igl.min_quad_with_fixed_solve(data.Dirichlet, data.M @ u0, np.zeros((len(data.b), 1)), Beq) + elif mode == 'average': + u = igl.min_quad_with_fixed_solve(data.Neumann, data.M @ u0, Yn, Beq) + uD = igl.min_quad_with_fixed_solve(data.Dirichlet, data.M @ u0, np.zeros((len(data.b), 1)), Beq) + u = 0.5 * (u + uD) + else: + raise ValueError("Invalid mode. Choose from 'neumann', 'dirichlet', or 'average'.") + grad_u = data.Grad @ u + + # Stable normalization + m = data.Grad.shape[0] // data.ng + grad_u = grad_u.reshape(data.ng, m) + + ma = np.max(np.abs(grad_u), axis=0) + ma_safe = np.where(ma == 0, 1.0, ma) + scaled = grad_u / ma_safe + norm = np.linalg.norm(scaled, axis=0) * ma + + # Handle cases where ma or norm is zero or norm is NaN + mask = (ma == 0) | (norm == 0) | np.isnan(norm) + grad_u[:, mask] = 0 + + # Normalize the gradients for non-masked columns + grad_u[:, ~mask] /= norm[~mask] + + # Reshape back to the original shape + grad_u = grad_u.reshape(-1, 1) + + div_X = -data.Div @ grad_u + Beq = np.zeros((1, 1)) # constrain constant to be 0 + D = igl.min_quad_with_fixed_solve(data.Poisson, -div_X, Yp, Beq) + D -= np.mean(D[gamma]) + if np.mean(D) < 0: + D = -D + return D diff --git a/simkit/polyscope/view_cubature.py b/simkit/polyscope/view_cubature.py index 86c8a38..9eddd66 100644 --- a/simkit/polyscope/view_cubature.py +++ b/simkit/polyscope/view_cubature.py @@ -18,7 +18,7 @@ def view_cubature(X, T, cI, cW=None, labels=None): mesh.add_scalar_quantity("labels", labels, defined_on='faces', cmap='rainbow', enabled=True) if cW is not None: - points.add_scalar_quantity("cW", cW) + points.add_scalar_quantity("cW", cW.flatten()) # Set the quantity as the point size points.set_point_radius_quantity("cW") diff --git a/simkit/spectral_moment_fitting_cubature.py b/simkit/spectral_moment_fitting_cubature.py new file mode 100644 index 0000000..f98bdba --- /dev/null +++ b/simkit/spectral_moment_fitting_cubature.py @@ -0,0 +1,211 @@ +import igl +import scipy as sp +import numpy as np +from simkit import spectral_clustering, volume, average_onto_simplex, spectral_clustering, pairwise_distance + +from itertools import combinations_with_replacement + +def compute_basis_integrals(volumes, B, p): + """ + Compute the exact integrals of the polynomial basis over elements. + + Parameters: + - volumes: Volumes of the elements (n x 1) + - B: basis matrix (n x m) + - p: Polynomial degree + Returns: + - basis_integrals: Precomputed basis integrals + - A: Matrix of integrals for each basis function in each element + """ + np.random.seed(0) + + # Generate polynomial combinations for the given degree p + polynomial_combinations = [] + for i in range(p+1): # Include combinations of degree 0 to p + polynomial_combinations.extend(combinations_with_replacement(range(B.shape[1]), i)) + + # Compute products for all combinations + products = np.ones((B.shape[0], len(polynomial_combinations))) + for k, idx in enumerate(polynomial_combinations): + products[:, k] = np.prod(B[:, idx], axis=1) + + # Integrate over each element + A = products.T * volumes + + # Sum over all elements to get the basis integrals + basis_integrals = np.sum(A, axis=1) + return basis_integrals, A + + +def cubature_sampling(A, basis_integrals, num_to_sample, tolerance=1e-8, only_positive=True): + """ + Perform cubature sampling to select points based on least-squares error minimization. + + Parameters: + - A: Matrix of integrals for each basis function in each element (N x m) + - basis_integrals: Precomputed integrals for each basis function (N,) + - num_to_sample: Number of points to sample per iteration + - tolerance: Error tolerance for stopping criterion (default 1e-8) + - only_positive: If True, only return positive weights and corresponding indices (default True) + + Returns: + - q_idx: Indices of the cubature points selected + - w: Weights computed from the least-squares solution + - errors: List of errors over the iterations + """ + # Total number of elements (from the second dimension of A) + m = A.shape[1] + + # Initialize the indices of cubature points + q_idx = np.zeros(m + 1, dtype=int) # Can hold up to m points + + i = 0 + errors = [] + + # Per-element errors for sampling probabilities + element_errors = np.ones(m) + + while i < m: + # Set error of already-sampled elements to zero + element_errors[q_idx[:i]] = 0 + + # Normalize errors for sampling + element_errors /= np.sum(element_errors) + + # Sample new cubature points proportional to error + # if m exceeds m - i, sample m - i points + num_to_sample = min(num_to_sample, m - i - 1) + if num_to_sample == 0: + break + + samples = np.random.choice(m, num_to_sample, replace=False, p=element_errors) + q_idx[i + 1:i + 1 + num_to_sample] = samples + i += num_to_sample + + # Build the linear system df (rows = basis functions at sample points) + df = np.zeros((basis_integrals.shape[0], i + 1)) + for j in range(i + 1): + idx = q_idx[j] + df[:, j] = A[:, idx] + + # Solve least-squares system for weights (non-negative) + wn = sp.optimize.nnls(df, basis_integrals, maxiter=100000)[0] + + # Compute moment fitting error + fitted_moments = df @ wn + error = basis_integrals - fitted_moments + + element_errors = (A.T @ error)**2 + + lsq_error = np.linalg.norm(error) + relative_error = lsq_error / np.linalg.norm(basis_integrals) + # print(f"# points: {i + 1}, # constraints: {len(basis_integrals)}, Absolute error: {lsq_error}, Relative error: {relative_error}") + errors.append(relative_error) + + # Check if the error is below the tolerance + if relative_error < tolerance: + break + + # Filter positive weights and corresponding indices if `only_positive` is True + if only_positive: + pos_ids = np.where(wn > 1e-10)[0] + wn_pos = wn[pos_ids] + q_pos = q_idx[pos_ids] + return q_pos, wn_pos, errors + + # Return all weights and indices if `only_positive` is False + return q_idx, wn, errors + +def compute_cluster_cubature(labels, V, T, W, p=2, num_to_sample=500, tolerance=1e-8): + """ + Perform cubature sampling over a set of labeled clusters. + + For each cluster, a reduced cubature rule is computed to approximate integrals + over that region using a subset of elements and associated weights. + + Parameters: + - labels: Integer array of length |T| assigning each tetrahedron to a cluster (values in [0, k-1]) + - V: Vertex positions of the full mesh (|V| x 3) + - T: Tetrahedral element connectivity (|T| x 4) + - W: Per-element mode evaluations (|T| x num_modes) + - p: Degree of polynomial combinations to integrate against (default 2) + - num_to_sample: Maximum number of elements to sample per cluster (default 500) + - tolerance: Error tolerance for cubature fitting (default 1e-8) + + Returns: + - cubature_indices: Concatenated list of selected element indices across all clusters + - cubature_weights: Corresponding cubature weights (volume-scaled) for the selected indices + """ + num_labels = np.max(labels) + 1 + print(f"Number of clusters: {num_labels}") + + cubature_indices = [] + cubature_weights = [] + + volumes = volume(V, T).flatten() + + for cluster_id in range(num_labels): + # Get indices of the current cluster + indices = np.where(labels == cluster_id)[0] + if len(indices) == 0: + print(f"Cluster {cluster_id} has no elements, skipping.") + continue + + print(f"Processing cluster {cluster_id} with {len(indices)} elements") + + # Restrict modes to submesh + local_modes = W[indices, :] + sub_volume = volumes[indices] + + # Select modes that are non-zero in the submesh + is_nonzero = np.any(local_modes != 0, axis=0) + local_modes = local_modes[:, is_nonzero] + print (f"Local modes shape after filtering: {local_modes.shape}") + + # Compute basis integrals and sampling matrix + basis_integrals, A = compute_basis_integrals(sub_volume, local_modes, p) + + if len(basis_integrals) > len(indices): + print("Warning: More constraints than elements. Using all elements.") + q_idx = np.arange(len(indices)) + weights = np.ones(len(q_idx)) + else: + num_samples = min(num_to_sample, len(indices) - 1) + q_idx, weights, errors = cubature_sampling(A, basis_integrals, num_samples, + tolerance=tolerance, only_positive=True) + + # Remap to global indices + cubature_indices.extend(indices[q_idx]) + cubature_weights.extend(weights * sub_volume[q_idx]) + + return np.array(cubature_indices), np.array(cubature_weights) + +def spectral_moment_fitting_cubature(X, T, W, k, p=2, return_labels=False): + """ + Generate cubature points using spectral clustering and moment fitting. + + This function averages basis weights onto tetrahedra, clusters them using + spectral clustering, and then computes cubature within each cluster. + + Parameters: + - X: Vertex positions (|V| x 3) + - T: Tetrahedral elements (|T| x 4) + - W: Per-vertex basis weight matrix (|V| x num_modes) + - k: Number of clusters to form + - return_labels: If True, also return the computed cluster labels (default False) + + Returns: + - cubature_indices: Selected element indices used in cubature + - cubature_weights: Corresponding cubature weights + - labels (optional): Cluster label for each tetrahedron + """ + Wt = average_onto_simplex(W, T) + + [labels, _] = spectral_clustering(Wt, k) + ret = compute_cluster_cubature(labels, X, T, Wt, p=p) + + if return_labels: + ret = ret + (labels,) + + return ret +