From 8cef71471d70d8fb763c0fa12fcdd040298b4d9b Mon Sep 17 00:00:00 2001 From: Lukas Devos Date: Tue, 9 Sep 2025 15:58:20 -0400 Subject: [PATCH 1/4] Add method for embedding tensors --- src/TensorKit.jl | 2 +- src/spaces/homspace.jl | 7 +++++++ src/tensors/linalg.jl | 30 ++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/src/TensorKit.jl b/src/TensorKit.jl index 29b553d49..dc0dd6ef7 100644 --- a/src/TensorKit.jl +++ b/src/TensorKit.jl @@ -76,7 +76,7 @@ export leftorth, rightorth, leftnull, rightnull, isposdef, isposdef!, ishermitian, sylvester, rank, cond export braid, braid!, permute, permute!, transpose, transpose!, twist, twist!, repartition, repartition! -export catdomain, catcodomain +export catdomain, catcodomain, embed! export OrthogonalFactorizationAlgorithm, QR, QRpos, QL, QLpos, LQ, LQpos, RQ, RQpos, SVD, SDD, Polar diff --git a/src/spaces/homspace.jl b/src/spaces/homspace.jl index d6a06cedd..92188b9ef 100644 --- a/src/spaces/homspace.jl +++ b/src/spaces/homspace.jl @@ -125,6 +125,13 @@ function dim(W::HomSpace) return d end +""" + fusiontrees(W::HomSpace) + +Return the fusiontrees corresponding to all valid fusion channels of a given `HomSpace`. +""" +fusiontrees(W::HomSpace) = fusionblockstructure(W).fusiontreelist + # Operations on HomSpaces # ----------------------- """ diff --git a/src/tensors/linalg.jl b/src/tensors/linalg.jl index f29bdf809..659a42779 100644 --- a/src/tensors/linalg.jl +++ b/src/tensors/linalg.jl @@ -512,6 +512,36 @@ function catcodomain(t1::TT, t2::TT) where {S,N₂,TT<:AbstractTensorMap{<:Any,S return t end +""" + embed!(tdst::AbstactTensorMap, tsrc::AbstractTensorMap) + +Embed the contents of `tsrc` into `tdst`, which may have different sizes of data. +This is equivalent to the following operation on dense arrays, but also works for symmetric +tensors. Note also that this only overwrites the regions that are shared, and will do +nothing on the ones that are not, so it is up to the user to properly initialize the +destination. + +```julia +sub_axes = map((x, y) -> 1:min(x, y), size(tdst), size(tsrc)) +tdst[sub_axes...] .= tsrc[sub_axes...] +``` +""" +function embed!(tdst::AbstractTensorMap, tsrc::AbstractTensorMap) + numin(tdst) == numin(tsrc) && numout(tdst) == numout(tsrc) || + throw(DimensionError("Incompatible number of indices for source and destination")) + S = spacetype(tdst) + S == spacetype(tsrc) || throw(SpaceMismatch("incompatible spacetypes")) + dom = mapreduce(infimum, ⊗, domain(tdst), domain(tsrc); init=one(S)) + cod = mapreduce(infimum, ⊗, codomain(tdst), codomain(tsrc); init=one(S)) + for (f1, f2) in fusiontrees(cod ← dom) + @inbounds data_dst = tdst[f1, f2] + @inbounds data_src = tsrc[f1, f2] + sub_axes = map(Base.OneTo ∘ min, size(data_dst), size(data_src)) + data_dst[sub_axes...] .= data_src[sub_axes...] + end + return tdst +end + # tensor product of tensors """ ⊗(t1::AbstractTensorMap, t2::AbstractTensorMap, ...) -> TensorMap From f96b2906f402ae89686cebca098a17c53e2e29a7 Mon Sep 17 00:00:00 2001 From: Lukas Devos Date: Tue, 9 Sep 2025 16:15:40 -0400 Subject: [PATCH 2/4] Add small amount of tests --- test/tensors.jl | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/tensors.jl b/test/tensors.jl index 25bc157b9..4d1940d24 100644 --- a/test/tensors.jl +++ b/test/tensors.jl @@ -739,6 +739,18 @@ for V in spacelist @test t ≈ t′ end end + @timedtestset "Tensor embedding" begin + t1 = rand(V1 ⊕ V1, V2 ⊗ V3) + t2 = rand(V1, V2 ⊗ V3) + + # embedding small into large + t3 = @constinferred embed!(zerovector(t1), t2) + @test norm(t3) ≈ norm(t2) + + # embedding large into small + t4 = @constinferred embed!(zerovector(t2), t1) + @test norm(t4) < norm(t1) + end end TensorKit.empty_globalcaches!() end From ba7310934b4bf4110e9864287ae91dcf13fa281e Mon Sep 17 00:00:00 2001 From: Lukas Devos Date: Tue, 9 Sep 2025 16:23:45 -0400 Subject: [PATCH 3/4] Fix issue with kwarg interpreted as iterator --- src/spaces/gradedspace.jl | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/src/spaces/gradedspace.jl b/src/spaces/gradedspace.jl index 7aa746076..63903a1ac 100644 --- a/src/spaces/gradedspace.jl +++ b/src/spaces/gradedspace.jl @@ -168,23 +168,19 @@ function fuse(V₁::GradedSpace{I}, V₂::GradedSpace{I}) where {I<:Sector} end function infimum(V₁::GradedSpace{I}, V₂::GradedSpace{I}) where {I<:Sector} - if V₁.dual == V₂.dual - typeof(V₁)(c => min(dim(V₁, c), dim(V₂, c)) - for c in - union(sectors(V₁), sectors(V₂)), dual in V₁.dual) - else + Visdual = isdual(V₁) + Visdual == isdual(V₂) || throw(SpaceMismatch("Infimum of space and dual space does not exist")) - end + return typeof(V₁)((Visdual ? dual(c) : c) => min(dim(V₁, c), dim(V₂, c)) + for c in intersect(sectors(V₁), sectors(V₂)); dual=Visdual) end function supremum(V₁::GradedSpace{I}, V₂::GradedSpace{I}) where {I<:Sector} - if V₁.dual == V₂.dual - typeof(V₁)(c => max(dim(V₁, c), dim(V₂, c)) - for c in - union(sectors(V₁), sectors(V₂)), dual in V₁.dual) - else + Visdual = isdual(V₁) + Visdual == isdual(V₂) || throw(SpaceMismatch("Supremum of space and dual space does not exist")) - end + return typeof(V₁)((Visdual ? dual(c) : c) => max(dim(V₁, c), dim(V₂, c)) + for c in union(sectors(V₁), sectors(V₂)); dual=Visdual) end function Base.show(io::IO, V::GradedSpace{I}) where {I<:Sector} From 2e3265ac37545840ec1bc4786e315b8c12035ce4 Mon Sep 17 00:00:00 2001 From: Lukas Devos Date: Mon, 15 Sep 2025 16:05:45 +0200 Subject: [PATCH 4/4] rename `absorb` and add out-of-place version --- src/TensorKit.jl | 2 +- src/tensors/linalg.jl | 8 +++++--- test/tensors.jl | 25 +++++++++++++++++-------- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/TensorKit.jl b/src/TensorKit.jl index dc0dd6ef7..fc1a5119f 100644 --- a/src/TensorKit.jl +++ b/src/TensorKit.jl @@ -76,7 +76,7 @@ export leftorth, rightorth, leftnull, rightnull, isposdef, isposdef!, ishermitian, sylvester, rank, cond export braid, braid!, permute, permute!, transpose, transpose!, twist, twist!, repartition, repartition! -export catdomain, catcodomain, embed! +export catdomain, catcodomain, absorb, absorb! export OrthogonalFactorizationAlgorithm, QR, QRpos, QL, QLpos, LQ, LQpos, RQ, RQpos, SVD, SDD, Polar diff --git a/src/tensors/linalg.jl b/src/tensors/linalg.jl index 659a42779..5eba8414c 100644 --- a/src/tensors/linalg.jl +++ b/src/tensors/linalg.jl @@ -513,9 +513,10 @@ function catcodomain(t1::TT, t2::TT) where {S,N₂,TT<:AbstractTensorMap{<:Any,S end """ - embed!(tdst::AbstactTensorMap, tsrc::AbstractTensorMap) + absorb(tdst::AbstractTensorMap, tsrc::AbstractTensorMap) + absorb!(tdst::AbstactTensorMap, tsrc::AbstractTensorMap) -Embed the contents of `tsrc` into `tdst`, which may have different sizes of data. +Absorb the contents of `tsrc` into `tdst`, which may have different sizes of data. This is equivalent to the following operation on dense arrays, but also works for symmetric tensors. Note also that this only overwrites the regions that are shared, and will do nothing on the ones that are not, so it is up to the user to properly initialize the @@ -526,7 +527,8 @@ sub_axes = map((x, y) -> 1:min(x, y), size(tdst), size(tsrc)) tdst[sub_axes...] .= tsrc[sub_axes...] ``` """ -function embed!(tdst::AbstractTensorMap, tsrc::AbstractTensorMap) +absorb(tdst::AbstractTensorMap, tsrc::AbstractTensorMap) = absorb!(copy(tdst), tsrc) +function absorb!(tdst::AbstractTensorMap, tsrc::AbstractTensorMap) numin(tdst) == numin(tsrc) && numout(tdst) == numout(tsrc) || throw(DimensionError("Incompatible number of indices for source and destination")) S = spacetype(tdst) diff --git a/test/tensors.jl b/test/tensors.jl index 4d1940d24..30526f2c1 100644 --- a/test/tensors.jl +++ b/test/tensors.jl @@ -739,17 +739,26 @@ for V in spacelist @test t ≈ t′ end end - @timedtestset "Tensor embedding" begin - t1 = rand(V1 ⊕ V1, V2 ⊗ V3) + @timedtestset "Tensor absorpsion" begin + # absorbing small into large + t1 = zeros(V1 ⊕ V1, V2 ⊗ V3) t2 = rand(V1, V2 ⊗ V3) - - # embedding small into large - t3 = @constinferred embed!(zerovector(t1), t2) + t3 = @constinferred absorb(t1, t2) @test norm(t3) ≈ norm(t2) + @test norm(t1) == 0 + t4 = @constinferred absorb!(t1, t2) + @test t1 === t4 + @test t3 ≈ t4 - # embedding large into small - t4 = @constinferred embed!(zerovector(t2), t1) - @test norm(t4) < norm(t1) + # absorbing large into small + t1 = rand(V1 ⊕ V1, V2 ⊗ V3) + t2 = zeros(V1, V2 ⊗ V3) + t3 = @constinferred absorb(t2, t1) + @test norm(t3) < norm(t1) + @test norm(t2) == 0 + t4 = @constinferred absorb!(t2, t1) + @test t2 === t4 + @test t3 ≈ t4 end end TensorKit.empty_globalcaches!()