Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/TensorKit.jl
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ export leftorth, rightorth, leftnull, rightnull,
isposdef, isposdef!, ishermitian, sylvester, rank, cond
export braid, braid!, permute, permute!, transpose, transpose!, twist, twist!, repartition,
repartition!
export catdomain, catcodomain
export catdomain, catcodomain, absorb, absorb!

export OrthogonalFactorizationAlgorithm, QR, QRpos, QL, QLpos, LQ, LQpos, RQ, RQpos,
SVD, SDD, Polar
Expand Down
20 changes: 8 additions & 12 deletions src/spaces/gradedspace.jl
Original file line number Diff line number Diff line change
Expand Up @@ -168,23 +168,19 @@ function fuse(V₁::GradedSpace{I}, V₂::GradedSpace{I}) where {I<:Sector}
end

function infimum(V₁::GradedSpace{I}, V₂::GradedSpace{I}) where {I<:Sector}
if V₁.dual == V₂.dual
typeof(V₁)(c => min(dim(V₁, c), dim(V₂, c))
for c in
union(sectors(V₁), sectors(V₂)), dual in V₁.dual)
else
Visdual = isdual(V₁)
Visdual == isdual(V₂) ||
throw(SpaceMismatch("Infimum of space and dual space does not exist"))
end
return typeof(V₁)((Visdual ? dual(c) : c) => min(dim(V₁, c), dim(V₂, c))
for c in intersect(sectors(V₁), sectors(V₂)); dual=Visdual)
end

function supremum(V₁::GradedSpace{I}, V₂::GradedSpace{I}) where {I<:Sector}
if V₁.dual == V₂.dual
typeof(V₁)(c => max(dim(V₁, c), dim(V₂, c))
for c in
union(sectors(V₁), sectors(V₂)), dual in V₁.dual)
else
Visdual = isdual(V₁)
Visdual == isdual(V₂) ||
throw(SpaceMismatch("Supremum of space and dual space does not exist"))
end
return typeof(V₁)((Visdual ? dual(c) : c) => max(dim(V₁, c), dim(V₂, c))
for c in union(sectors(V₁), sectors(V₂)); dual=Visdual)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Out of curiosity, are both implementations functionally equivalent and is it just nicer to have the error checks at the top. Or is there something technically superior to the new implementation?

Copy link
Member Author

@lkdvos lkdvos Sep 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

They would be, but there is a weird error that got introduced at some point which I actually traced to a formatting change where there is a very subtle difference between:

julia> function my_function(args; kwargs...)
           @info "my args are" args
           @info "my kwargs are" kwargs
       end
my_function (generic function with 1 method)

julia> my_function(x for x in 1:4, y = 3)
┌ Info: my args are
└   args = Base.Generator{Base.Iterators.ProductIterator{Tuple{UnitRange{Int64}, Int64}}, var"#20#21"}(var"#20#21"(), Base.Iterators.ProductIterator{Tuple{UnitRange{Int64}, Int64}}((1:4, 3)))
┌ Info: my kwargs are
└   kwargs = Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}()

julia> my_function(x for x in 1:4; y = 3)
┌ Info: my args are
└   args = Base.Generator{UnitRange{Int64}, typeof(identity)}(identity, 1:4)
┌ Info: my kwargs are
│   kwargs =pairs(::NamedTuple) with 1 entry::y => 3

julia> my_function(x for x in 1:4, y in 3)
┌ Info: my args are
└   args = Base.Generator{Base.Iterators.ProductIterator{Tuple{UnitRange{Int64}, Int64}}, var"#22#23"}(var"#22#23"(), Base.Iterators.ProductIterator{Tuple{UnitRange{Int64}, Int64}}((1:4, 3)))
┌ Info: my kwargs are
└   kwargs = Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}()

julia> my_function(3, y=3)
┌ Info: my args are
└   args = 3
┌ Info: my kwargs are
│   kwargs =pairs(::NamedTuple) with 1 entry::y => 3

unfortunately both iterators and keyword arguments can be specified using in and =, so because previously we weren't being explicit about the ; to separate arguments from keyword arguments, it seems like we messed up the dual as keyword argument and it became a product iterator, therefore losing the dual flag altogether.
To avoid having this confusion, I felt like instead of changing the , for ; it might be more obvious to just have the error upfront

end

function Base.show(io::IO, V::GradedSpace{I}) where {I<:Sector}
Expand Down
7 changes: 7 additions & 0 deletions src/spaces/homspace.jl
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,13 @@ function dim(W::HomSpace)
return d
end

"""
fusiontrees(W::HomSpace)

Return the fusiontrees corresponding to all valid fusion channels of a given `HomSpace`.
"""
fusiontrees(W::HomSpace) = fusionblockstructure(W).fusiontreelist

# Operations on HomSpaces
# -----------------------
"""
Expand Down
32 changes: 32 additions & 0 deletions src/tensors/linalg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -512,6 +512,38 @@ function catcodomain(t1::TT, t2::TT) where {S,N₂,TT<:AbstractTensorMap{<:Any,S
return t
end

"""
absorb(tdst::AbstractTensorMap, tsrc::AbstractTensorMap)
absorb!(tdst::AbstactTensorMap, tsrc::AbstractTensorMap)

Absorb the contents of `tsrc` into `tdst`, which may have different sizes of data.
This is equivalent to the following operation on dense arrays, but also works for symmetric
tensors. Note also that this only overwrites the regions that are shared, and will do
nothing on the ones that are not, so it is up to the user to properly initialize the
destination.

```julia
sub_axes = map((x, y) -> 1:min(x, y), size(tdst), size(tsrc))
tdst[sub_axes...] .= tsrc[sub_axes...]
```
"""
absorb(tdst::AbstractTensorMap, tsrc::AbstractTensorMap) = absorb!(copy(tdst), tsrc)
function absorb!(tdst::AbstractTensorMap, tsrc::AbstractTensorMap)
numin(tdst) == numin(tsrc) && numout(tdst) == numout(tsrc) ||
throw(DimensionError("Incompatible number of indices for source and destination"))
S = spacetype(tdst)
S == spacetype(tsrc) || throw(SpaceMismatch("incompatible spacetypes"))
dom = mapreduce(infimum, ⊗, domain(tdst), domain(tsrc); init=one(S))
cod = mapreduce(infimum, ⊗, codomain(tdst), codomain(tsrc); init=one(S))
for (f1, f2) in fusiontrees(cod ← dom)
@inbounds data_dst = tdst[f1, f2]
@inbounds data_src = tsrc[f1, f2]
sub_axes = map(Base.OneTo ∘ min, size(data_dst), size(data_src))
data_dst[sub_axes...] .= data_src[sub_axes...]
end
return tdst
end

# tensor product of tensors
"""
⊗(t1::AbstractTensorMap, t2::AbstractTensorMap, ...) -> TensorMap
Expand Down
21 changes: 21 additions & 0 deletions test/tensors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -739,6 +739,27 @@ for V in spacelist
@test t ≈ t′
end
end
@timedtestset "Tensor absorpsion" begin
# absorbing small into large
t1 = zeros(V1 ⊕ V1, V2 ⊗ V3)
t2 = rand(V1, V2 ⊗ V3)
t3 = @constinferred absorb(t1, t2)
@test norm(t3) ≈ norm(t2)
@test norm(t1) == 0
t4 = @constinferred absorb!(t1, t2)
@test t1 === t4
@test t3 ≈ t4

# absorbing large into small
t1 = rand(V1 ⊕ V1, V2 ⊗ V3)
t2 = zeros(V1, V2 ⊗ V3)
t3 = @constinferred absorb(t2, t1)
@test norm(t3) < norm(t1)
@test norm(t2) == 0
t4 = @constinferred absorb!(t2, t1)
@test t2 === t4
@test t3 ≈ t4
end
end
TensorKit.empty_globalcaches!()
end
Expand Down
Loading