diff --git a/.github/workflows/PR_comment.yml b/.github/workflows/PR_comment.yml
new file mode 100644
index 000000000..863acd174
--- /dev/null
+++ b/.github/workflows/PR_comment.yml
@@ -0,0 +1,17 @@
+name: Docs preview comment
+on:
+ pull_request:
+ types: [labeled]
+
+permissions:
+ pull-requests: write
+jobs:
+ pr_comment:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Create PR comment
+ if: github.event_name == 'pull_request' && github.repository == github.event.pull_request.head.repo.full_name && github.event.label.name == 'documentation'
+ uses: thollander/actions-comment-pull-request@v3
+ with:
+ message: 'After the build completes, the updated documentation will be available [here](https://quantumkithub.github.io/TensorKit.jl/previews/PR${{ github.event.number }}/)'
+ comment-tag: 'preview-doc'
diff --git a/CITATION.cff b/CITATION.cff
index 628a9c587..91887c722 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -8,10 +8,10 @@ authors:
given-names: "Jutho"
orcid: "https://orcid.org/0000-0002-0858-291X"
title: "TensorKit.jl"
-version: "0.14.9"
+version: "0.14.10"
doi: "10.5281/zenodo.8421339"
date-released: "2025-07-18"
-url: "https://github.com/Jutho/TensorKit.jl"
+url: "https://github.com/QuantumKitHub/TensorKit.jl"
preferred-citation:
type: article
authors:
diff --git a/Project.toml b/Project.toml
index 20be8e7eb..212e965e2 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "TensorKit"
uuid = "07d1fe3e-3e46-537d-9eac-e9e13d0d4cec"
authors = ["Jutho Haegeman"]
-version = "0.14.9"
+version = "0.14.10"
[deps]
LRUCache = "8ac3fa9e-de4c-5943-b1dc-09c6b5f20637"
diff --git a/README.md b/README.md
index 6437f9d9f..82d0f79bb 100644
--- a/README.md
+++ b/README.md
@@ -32,7 +32,7 @@ A Julia package for large-scale tensor computations, with a hint of category the
[ci-img]: https://github.com/QuantumKitHub/TensorKit.jl/actions/workflows/CI.yml/badge.svg
[ci-url]: https://github.com/QuantumKitHub/TensorKit.jl/actions/workflows/CI.yml
-[codecov-img]: https://codecov.io/gh/QuantumKitHub/TensorKit.jl/branch/master/graph/badge.svg
+[codecov-img]: https://codecov.io/gh/QuantumKitHub/TensorKit.jl/branch/main/graph/badge.svg
[codecov-url]: https://codecov.io/gh/QuantumKitHub/TensorKit.jl
[aqua-img]: https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg
diff --git a/benchmark/README.md b/benchmark/README.md
index 2ce857f86..c3415d26e 100644
--- a/benchmark/README.md
+++ b/benchmark/README.md
@@ -12,11 +12,11 @@ To do this, you can use the `--modules` flag to specify which modules to run.
Alternatively, you can use the `TensorKitBenchmarks` module directly, which is designed after `BaseBenchmarks` to allow for conditional loading of the benchmarks.
For a more streamlined CLI experience, you can use [`AirspeedVelocity.jl`](https://github.com/MilesCranmer/AirspeedVelocity.jl) to run the benchmarks.
-The following command will run the benchmarks and compare with the current master branch:
+The following command will run the benchmarks and compare with the current main branch:
```bash
benchpkg TensorKit \
- --rev=dirty,master \
+ --rev=dirty,main \
-o benchmark/results/ \
-exeflags="--threads=4"
```
@@ -25,7 +25,7 @@ To compare with previous results, the following command can be used:
```bash
benchpkgtable TensorKit \
- --rev=dirty,master \
+ --rev=dirty,main \
-i benchmark/results/ \
-o benchmark/results/ \
```
diff --git a/docs/Project.toml b/docs/Project.toml
index 695415bc4..bb471e442 100644
--- a/docs/Project.toml
+++ b/docs/Project.toml
@@ -1,6 +1,7 @@
[deps]
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+TensorKit = "07d1fe3e-3e46-537d-9eac-e9e13d0d4cec"
TensorKitSectors = "13a9c161-d5da-41f0-bcbd-e1a08ae0647f"
[compat]
diff --git a/docs/make.jl b/docs/make.jl
index 0c2e0a35a..2dab6cbdd 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -17,4 +17,4 @@ makedocs(; modules=[TensorKit, TensorKitSectors],
pages=pages,
pagesonly=true)
-deploydocs(; repo="github.com/Jutho/TensorKit.jl.git", push_preview=true)
+deploydocs(; repo="github.com/QuantumKitHub/TensorKit.jl.git", push_preview=true)
diff --git a/docs/src/index.md b/docs/src/index.md
index e232eb7aa..bf88806fe 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -14,7 +14,7 @@ actually a specific case of the type [`TensorMap`](@ref)) and defines for these
number of vector space operations (scalar multiplication, addition, norms and inner
products), index operations (permutations) and linear algebra operations (multiplication,
factorizations). Finally, tensor contractions can be performed using the `@tensor` macro
-from [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl).
+from [TensorOperations.jl](https://github.com/QuantumKitHub/TensorOperations.jl).
Currently, most effort is oriented towards tensors as they appear in the context of quantum
many body physics and in particular the field of tensor networks. Such tensors often have
diff --git a/docs/src/man/sectors.md b/docs/src/man/sectors.md
index 30590ef00..db7913f1f 100644
--- a/docs/src/man/sectors.md
+++ b/docs/src/man/sectors.md
@@ -30,10 +30,10 @@ completely specified by the values of ``n_a``.
The gain in efficiency (both in memory occupation and computation time) obtained from using
(technically: equivariant) tensor maps is that, by Schur's lemma, they are block diagonal in
the basis of coupled sectors. To exploit this block diagonal form, it is however essential
-that we know the basis transform from the individual (uncoupled) sectors appearing in the
-tensor product form of the domain and codomain, to the totally coupled sectors that label
-the different blocks. We refer to the latter as block sectors. The transformation from the
-uncoupled sectors in the domain (or codomain) of the tensor map to the block sector is
+that we know the basis transformation from the individual (uncoupled) sectors appearing in
+the tensor product form of the domain and codomain, to the totally coupled sectors that
+label the different blocks. We refer to the latter as block sectors. The transformation from
+the uncoupled sectors in the domain (or codomain) of the tensor map to the block sector is
encoded in a fusion tree (or splitting tree). Essentially, it is a sequential application of
pairwise fusion as described by the group's
[Clebsch–Gordan (CG) coefficients](https://en.wikipedia.org/wiki/Clebsch–Gordan_coefficients).
@@ -143,7 +143,7 @@ both instances and in the type domain, and its use will be illustrated further o
The minimal data to completely specify a type of sector are
* the fusion rules, i.e. `` a ⊗ b = ⨁ N^{ab}_{c} c ``; this is implemented by a function
- [`Nsymbol(a,b,c)`](@ref)
+ [`Nsymbol(a, b, c)`](@ref)
* the list of fusion outputs from ``a ⊗ b``; while this information is contained in
``N^{ab}_c``, it might be costly or impossible to iterate over all possible values of
`c` and test `Nsymbol(a,b,c)`; instead we implement for `a ⊗ b` to return an iterable
@@ -155,8 +155,8 @@ The minimal data to completely specify a type of sector are
``N^{a\bar{a}}_{u} = 1``; this is implemented by `conj(a)` from Julia Base;
`dual(a)` also works as alias, but `conj(a)` is the method that should be defined
* the F-symbol or recoupling coefficients ``[F^{abc}_{d}]^f_e``, implemented as the
- function [`Fsymbol(a,b,c,d,e,f)`](@ref)
-* the R-symbol ``R^{ab}_c``, implemented as the function [`Rsymbol(a,b,c)`](@ref)
+ function [`Fsymbol(a, b, c, d, e, f)`](@ref)
+* the R-symbol ``R^{ab}_c``, implemented as the function [`Rsymbol(a, b, c)`](@ref)
For practical reasons, we also require some additional methods to be defined:
* `isreal(::Type{<:Sector})` returns whether the topological data of this type of sector
@@ -166,14 +166,14 @@ For practical reasons, we also require some additional methods to be defined:
R-symbol evaluated with all sectors equal to the identity sector have real `eltype`.
* `hash(a, h)` creates a hash of sectors, because sectors and objects created from them
are used as keys in lookup tables (i.e. dictionaries)
-* `isless(a,b)` associates a canonical order to sectors (of the same type), in order to
+* `isless(a, b)` associates a canonical order to sectors (of the same type), in order to
unambiguously represent representation spaces ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}``.
Further information, such as the quantum dimensions ``d_a`` and Frobenius-Schur indicator
``χ_a`` (only if ``a == \overline{a}``) are encoded in the F-symbol. They are obtained as
[`dim(a)`](@ref) and [`frobeniusschur(a)`](@ref). These functions have default definitions
-which extract the requested data from `Fsymbol(a,conj(a),a,a,one(a),one(a))`, but they can
-be overloaded in case the value can be computed more efficiently.
+which extract the requested data from `Fsymbol(a, conj(a), a, a, one(a), one(a))`, but they
+can be overloaded in case the value can be computed more efficiently.
We also define a parametric type to represent an indexable iterator over the different
values of a sector as
@@ -187,7 +187,7 @@ Note that an instance of the singleton type `SectorValues{I}` is obtained as `va
A new sector `I<:Sector` should define
```julia
Base.iterate(::SectorValues{I}[, state]) = ...
-Base.IteratorSize(::Type{SectorValues{I}}) = # HasLenght() or IsInfinite()
+Base.IteratorSize(::Type{SectorValues{I}}) = # HasLength() or IsInfinite()
# if previous function returns HasLength():
Base.length(::SectorValues{I}) = ...
Base.getindex(::SectorValues{I}, i::Int) = ...
@@ -256,6 +256,7 @@ the cases which have already been implemented. Currently, they all correspond to
of groups.
### [Existing group representations](@id sss_groups)
+
The first sector type is called `Trivial`, and corresponds to the case where there is
actually no symmetry, or thus, the symmetry is the trivial group with only an identity
operation and a trivial representation. Its representation theory is particularly simple:
@@ -314,7 +315,7 @@ Base.isreal(::Type{<:AbelianIrrep}) = true
Nsymbol(a::I, b::I, c::I) where {I<:AbelianIrrep} = c == first(a ⊗ b)
Fsymbol(a::I, b::I, c::I, d::I, e::I, f::I) where {I<:AbelianIrrep} =
- Int(Nsymbol(a, b, e)*Nsymbol(e, c, d)*Nsymbol(b, c, f)*Nsymbol(a, f, d))
+ Int(Nsymbol(a, b, e) * Nsymbol(e, c, d) * Nsymbol(b, c, f) * Nsymbol(a, f, d))
frobeniusschur(a::AbelianIrrep) = 1
Bsymbol(a::I, b::I, c::I) where {I<:AbelianIrrep} = Int(Nsymbol(a, b, c))
Rsymbol(a::I, b::I, c::I) where {I<:AbelianIrrep} = Int(Nsymbol(a, b, c))
@@ -508,13 +509,16 @@ FusionStyle(::Type{CU1Irrep}) = SimpleFusion()
The rest of the implementation can be read in the source code, but is rather long due to all
the different cases for the arguments of `Fsymbol`.
-So far, no sectors have been implemented with `FusionStyle(G) == GenericFusion()`,
-though an example would be the representation theory of ``\mathsf{SU}_N``, i.e. represented
-by the group `SU{N}`, for `N>2`. Such sectors are not yet fully supported; certain
-operations remain to be implemented. Furthermore, the topological data of the
-representation theory of such groups is not readily available and needs to be computed.
+By default, no sectors are included with `FusionStyle(G) == GenericFusion()`, though an
+example would be the representation theory of ``\mathsf{SU}_N``, i.e. represented by the
+group `SU{N}`, for `N>2`. Such sectors are supported through
+[SUNRepresentations.jl](https://github.com/QuantumKitHub/SUNRepresentations.jl), which
+provides numerical routines to compute the topological data of the representation theory of
+these groups, as no general analytic formula is available.
+
### [Combining different sectors](@id sss_productsectors)
+
It is also possible to define two or more different types of symmetries, e.g. when the total
symmetry group is a direct product of individual simple groups. Such sectors are obtained
using the binary operator `⊠`, which can be entered as `\boxtimes`+TAB. First some examples
@@ -546,7 +550,7 @@ We refer to the source file of [`ProductSector`](@ref) for implementation detail
The symbol `⊠` refers to the
[Deligne tensor product](https://ncatlab.org/nlab/show/Deligne+tensor+product+of+abelian+categories)
within the literature on category theory. Indeed, the category of representation of a
-product group `G₁ × G₂` corresponds the Deligne tensor product of the categories of
+product group `G₁ × G₂` corresponds to the Deligne tensor product of the categories of
representations of the two groups separately. But this definition also extends to 𝕜-linear
categories which are not the representation category of a group. Note that `⊠` also works
in the type domain, i.e. `Irrep[ℤ₃] ⊠ Irrep[CU₁]` can be used to create
@@ -561,7 +565,7 @@ Some more examples:
a = Z3Irrep(1) ⊠ Irrep[CU₁](1.5)
a isa Irrep[ℤ₃] ⊠ CU1Irrep
a isa Irrep[ℤ₃ × CU₁]
-a isa Irrep{ℤ₃ × CU₁}
+a isa AbstractIrrep{ℤ₃ × CU₁}
a == Irrep[ℤ₃ × CU₁](1, 1.5)
```
@@ -605,18 +609,7 @@ for each of these three functions that just relies on `Fsymbol`, and alternative
definitions need to be given only if a more efficient version is available.
If `FusionStyle(I) == GenericFusion()`, then the multiple outputs `c` in the tensor
-product of `a` and `b` will be labeled as `i=1`, `2`, …, `Nsymbol(a,b,c)`. Optionally, a
-different label can be provided by defining
-```julia
-TensorKit.vertex_ind2label(i::Int, a::I, b::I, c::I) = ...
-# some label, e.g. a `Char` or `Symbol`
-```
-The following function will then automatically determine the corresponding label type (which
-should not vary, i.e. `vertex_ind2label` should be type stable)
-```julia
-vertex_labeltype(I::Type{<:Sector}) =
- typeof(vertex_ind2label(1, one(I), one(I), one(I)))
-```
+product of `a` and `b` will be labeled as `i=1`, `2`, …, `Nsymbol(a, b, c)`.
The following type, which already appeared in the implementation of `SU2Irrep` above, can be
useful for providing the return type of `a ⊗ b`
@@ -638,26 +631,28 @@ the elements of `set`; if `f` is not provided it is just taken as the function `
### [Generalizations](@id sss_generalsectors)
-As mentioned before, the framework for sectors outlined above depends is in one-to-one
+As mentioned before, the framework for sectors outlined above is in one-to-one
correspondence to the topological data for specifying a unitary (spherical and braided, and
hence ribbon) [fusion category](https://en.wikipedia.org/wiki/Fusion_category), which was
reviewed at the end of the introduction to [category theory](@ref s_categories). For such
categories, the objects are not necessarily vector spaces and the fusion and splitting
tensors ``X^{ab}_{c,μ}`` do not necessarily exist as actual tensors. However, the morphism
spaces ``c → a ⊗ b`` still behave as vector spaces, and the ``X^{ab}_{c,μ}`` act as generic
-basis for that space. As TensorKit.jl does not rely on the ``X^{ab}_{c,μ}`` themselves
-(even when they do exist) it can also deal with such general fusion categories. Note,
-though, that when ``X^{ab}_{c,μ}`` does exist, it is available as `fusiontensor(a,b,c[,μ])`
-(even though it is actually the splitting tensor) and can be useful for checking purposes,
-as illustrated below.
+basis for that space. As TensorKit.jl does not rely on the ``X^{ab}_{c,μ}`` themselves (even
+when they do exist) it can also deal with such general fusion categories. Note, though, that
+when ``X^{ab}_{c,μ}`` does exist, it is available as `fusiontensor(a, b, c, [μ])` (even
+though it is actually the splitting tensor) and can be useful for checking purposes, as
+illustrated below. By default TensorKit includes the Fibonacci category and the Ising
+category, but a list of additional fusion categories is provided in
+[CategoryData.jl](https://github.com/lkdvos/CategoryData.jl).
## [Graded spaces](@id ss_rep)
+
We have introduced `Sector` subtypes as a way to label the irreps or sectors in the
decomposition ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}``. To actually represent such spaces, we now also
-introduce a corresponding type `GradedSpace`, which is a subtype of
-`ElementarySpace{ℂ}`, i.e.
+introduce a corresponding type `GradedSpace`, which is a subtype of `ElementarySpace`, i.e.
```julia
-struct GradedSpace{I<:Sector, D} <: ElementarySpace{ℂ}
+struct GradedSpace{I<:Sector, D} <: ElementarySpace
dims::D
dual::Bool
end
@@ -676,6 +671,7 @@ fusion category (or strictly speaking, a pre-fusion category, as we allow for an
infinite number of simple objects, e.g. the irreps of a continuous group).
### Implementation details
+
As mentioned, the way in which the degeneracy dimensions ``n_a`` are stored depends on the
specific sector type `I`, more specifically on the `IteratorSize` of `values(I)`. If
`IteratorSize(values(I)) isa Union{IsInfinite, SizeUnknown}`, the different sectors ``a``
@@ -695,9 +691,12 @@ If `IteratorSize(values(I)) isa Union{HasLength, HasShape}`, the degeneracy dime
specifically a `NTuple{N, Int}` with `N = length(values(I))`. The methods
`getindex(values(I), i)` and `findindex(values(I), a)` are used to map between a sector
`a ∈ values(I)` and a corresponding index `i ∈ 1:N`. As `N` is a compile time constant,
-these types can be created in a type stable manner.
+these types can be created in a type stable manner. Note however that this implies that for
+large values of `N`, it can be beneficial to define
+`IteratorSize(values(a)) = SizeUnknown()` to not overly burden the compiler.
### Constructing instances
+
As mentioned, the convenience method `Vect[I]` will return the concrete type
`GradedSpace{I,D}` with the matching value of `D`, so that should never be a user's
concern. In fact, for consistency, `Vect[Trivial]` will just return `ComplexSpace`,
@@ -713,7 +712,7 @@ Rep[ℤ₂ × SU₂]
Vect[Irrep[ℤ₂ × SU₂]]
```
Note that we also have the specific alias `U₁Space`. In fact, for all the common groups we
-have a number of alias, both in ASCII and using Unicode:
+have a number of aliases, both in ASCII and using Unicode:
```julia
# ASCII type aliases
const ZNSpace{N} = GradedSpace{ZNIrrep{N}, NTuple{N,Int}}
@@ -753,6 +752,7 @@ Rep[ℤ₂ × SU₂]((0,0) => 3, (1,1/2) => 2, (0,1) => 1) ==
```
### Methods
+
There are a number of methods to work with instances `V` of `GradedSpace`. The
function [`sectortype`](@ref) returns the type of the sector labels. It also works on other
vector spaces, in which case it returns [`Trivial`](@ref). The function [`sectors`](@ref)
@@ -760,19 +760,19 @@ returns an iterator over the different sectors `a` with non-zero `n_a`, for othe
`ElementarySpace` types it returns `(Trivial,)`. The degeneracy dimensions `n_a` can be
extracted as `dim(V, a)`, it properly returns `0` if sector `a` is not present in the
decomposition of `V`. With [`hassector(V, a)`](@ref) one can check if `V` contains a sector
-`a` with `dim(V,a)>0`. Finally, `dim(V)` returns the total dimension of the space `V`, i.e.
-``∑_a n_a d_a`` or thus `dim(V) = sum(dim(V,a) * dim(a) for a in sectors(V))`. Note that a
+`a` with `dim(V, a) > 0`. Finally, `dim(V)` returns the total dimension of the space `V`, i.e.
+``∑_a n_a d_a`` or thus `dim(V) = sum(dim(V, a) * dim(a) for a in sectors(V))`. Note that a
representation space `V` has certain sectors `a` with dimensions `n_a`, then its dual `V'`
will report to have sectors `dual(a)`, and `dim(V', dual(a)) == n_a`. There is a subtelty
regarding the difference between the dual of a representation space ``R_a^*``, on which the
-conjugate representation acts, and the representation space of the irrep `dual(a)==conj(a)`
-that is isomorphic to the conjugate representation, i.e. ``R_{\overline{a}} ≂ R_a^*`` but
-they are not equal. We return to this in the section on [fusion trees](@ref ss_fusiontrees).
-This is true also in more general fusion categories beyond the representation categories of
-groups.
+conjugate representation acts, and the representation space of the irrep
+`dual(a) == conj(a)` that is isomorphic to the conjugate representation, i.e.
+``R_{\overline{a}} ≂ R_a^*`` but they are not equal. We return to this in the section on
+[fusion trees](@ref ss_fusiontrees). This is true also in more general fusion categories
+beyond the representation categories of groups.
Other methods for `ElementarySpace`, such as [`dual`](@ref), [`fuse`](@ref) and
-[`flip`](@ref) also work. In fact, `GradedSpace` is the reason `flip` exists, cause
+[`flip`](@ref) also work. In fact, `GradedSpace` is the reason `flip` exists, because
in this case it is different than `dual`. The existence of flip originates from the
non-trivial isomorphism between ``R_{\overline{a}}`` and ``R_{a}^*``, i.e. the
representation space of the dual ``\overline{a}`` of sector ``a`` and the dual of the
@@ -782,16 +782,16 @@ such that, if `V = GradedSpace(a=>n_a,...)` then
Furthermore, for two spaces `V1 = GradedSpace(a=>n1_a, ...)` and
`V2 = GradedSpace(a=>n2_a, ...)`, we have
-`infimum(V1,V2) = GradedSpace(a=>min(n1_a,n2_a), ....)` and similarly for
+`infimum(V1, V2) = GradedSpace(a=>min(n1_a, n2_a), ....)` and similarly for
`supremum`, i.e. they act on the degeneracy dimensions of every sector separately.
-Therefore, it can be that the return value of `infimum(V1,V2)` or `supremum(V1,V2)` is
+Therefore, it can be that the return value of `infimum(V1, V2)` or `supremum(V1, V2)` is
neither equal to `V1` or `V2`.
For `W` a `ProductSpace{Vect[I], N}`, [`sectors(W)`](@ref) returns an
iterator that generates all possible combinations of sectors `as` represented as
`NTuple{I,N}`. The function [`dims(W, as)`](@ref) returns the corresponding tuple with
degeneracy dimensions, while [`dim(W, as)`](@ref) returns the product of these dimensions.
-[`hassector(W, as)`](@ref) is equivalent to `dim(W, as)>0`. Finally, there is the function
+[`hassector(W, as)`](@ref) is equivalent to `dim(W, as) > 0`. Finally, there is the function
[`blocksectors(W)`](@ref) which returns a list (of type `Vector`) with all possible "block
sectors" or total/coupled sectors that can result from fusing the individual uncoupled
sectors in `W`. Correspondingly, [`blockdim(W, a)`](@ref) counts the total degeneracy
@@ -800,6 +800,7 @@ of the next section on [Fusion trees](@ref ss_fusiontrees), but first, it's time
examples.
### Examples
+
Let's start with an example involving ``\mathsf{U}_1``:
```@repl sectors
V1 = Rep[U₁](0=>3, 1=>2, -1=>1)
@@ -855,16 +856,17 @@ blockdim(W, SU2Irrep(0))
```
## [Fusion trees](@id ss_fusiontrees)
+
The gain in efficiency (both in memory occupation and computation time) obtained from using
symmetric (equivariant) tensor maps is that, by Schur's lemma, they are block diagonal in
the basis of coupled sectors, i.e. they exhibit block sparsity. To exploit this block
-diagonal form, it is however essential that we know the basis transform from the individual
-(uncoupled) sectors appearing in the tensor product form of the domain and codomain, to the
-totally coupled sectors that label the different blocks. We refer to the latter as block
-sectors, as we already encountered in the previous section [`blocksectors`](@ref) and
-[`blockdim`](@ref) defined on the type [`ProductSpace`](@ref).
+diagonal form, it is however essential that we know the basis transformation from the
+individual (uncoupled) sectors appearing in the tensor product form of the domain and
+codomain, to the totally coupled sectors that label the different blocks. We refer to the
+latter as block sectors, as we already encountered in the previous section
+[`blocksectors`](@ref) and [`blockdim`](@ref) defined on the type [`ProductSpace`](@ref).
-This basis transform consists of a basis of inclusion and projection maps, denoted as
+This basis transformation consists of a basis of inclusion and projection maps, denoted as
``X^{a_1a_2…a_N}_{c,α}: R_c → R_{a_1} ⊗ R_{a_2} ⊗ … ⊗ R_{a_N}`` and their adjoints
``(X^{a_1a_2…a_N}_{c,α})^†``, such that
@@ -887,27 +889,28 @@ To couple or fuse the different sectors together into a single block sector, we
sequentially fuse together two sectors into a single coupled sector, which is then fused
with the next uncoupled sector, using the splitting tensors ``X_{a,b}^{c,μ} : R_c → R_a ⊗
R_b`` and their adjoints. This amounts to the canonical choice of our tensor product, and
-for a given tensor mapping from ``(((W_1 ⊗ W_2) ⊗ W_3) ⊗ … )⊗ W_{N_2})`` to ``(((V_1 ⊗ V_2)
-⊗ V_3) ⊗ … )⊗ V_{N_1})``, the corresponding fusion and splitting trees take the form
+for a given tensor mapping from ``(((W_1 ⊗ W_2) ⊗ W_3) ⊗ … )⊗ W_{N_2})`` to
+``(((V_1 ⊗ V_2) ⊗ V_3) ⊗ … )⊗ V_{N_1})``, the corresponding fusion and splitting trees take
+the form
```@raw html
```
-for the specific case ``N_1=4`` and ``N_2=3``. We can separate this tree into the fusing
-part ``(b_1⊗b_2)⊗b_3 → c`` and the splitting part ``c→(((a_1⊗a_2)⊗a_3)⊗a_4)``. Given that
-the fusion tree can be considered to be the adjoint of a corresponding splitting tree
-``c→(b_1⊗b_2)⊗b_3``, we now first consider splitting trees in isolation. A splitting tree
-which goes from one coupled sector ``c`` to ``N`` uncoupled sectors ``a_1``, ``a_2``, …,
-``a_N`` needs ``N-2`` additional internal sector labels ``e_1``, …, ``e_{N-2}``, and, if
-`FusionStyle(I) isa GenericFusion`, ``N-1`` additional multiplicity labels ``μ_1``,
+for the specific case ``N_1 = 4`` and ``N_2 = 3``. We can separate this tree into the fusing
+part ``(b_1 ⊗ b_2) ⊗ b_3 → c`` and the splitting part ``c→(((a_1 ⊗ a_2) ⊗ a_3) ⊗ a_4)``.
+Given that the fusion tree can be considered to be the adjoint of a corresponding splitting
+tree ``c → (b_1 ⊗ b_2) ⊗ b_3``, we now first consider splitting trees in isolation. A
+splitting tree which goes from one coupled sector ``c`` to ``N`` uncoupled sectors ``a_1``,
+``a_2``, …, ``a_N`` needs ``N-2`` additional internal sector labels ``e_1``, …, ``e_{N-2}``,
+and, if `FusionStyle(I) isa GenericFusion`, ``N-1`` additional multiplicity labels ``μ_1``,
…, ``μ_{N-1}``. We henceforth refer to them as vertex labels, as they are associated with
the vertices of the splitting tree. In the case of `FusionStyle(I) isa UniqueFusion`, the
internal sectors ``e_1``, …, ``e_{N-2}`` are completely fixed, for
`FusionStyle(I) isa MultipleFusion` they can also take different values. In our abstract
-notation of the splitting basis ``X^{a_1a_2…a_N}_{c,α}`` used above, ``α`` can be consided
-a collective label, i.e. ``α = (e_1, …, e_{N-2}; μ₁, … ,μ_{N-1})``. Indeed, we can check
-the orthogonality condition
+notation of the splitting basis ``X^{a_1a_2…a_N}_{c,α}`` used above, ``α`` can be consided a
+collective label, i.e. ``α = (e_1, …, e_{N-2}; μ₁, … ,μ_{N-1})``. Indeed, we can check the
+orthogonality condition
``(X^{a_1a_2…a_N}_{c,α})^† ∘ X^{a_1a_2…a_N}_{c′,α′} = δ_{c,c′} δ_{α,α′} \mathrm{id}_c``,
which now forces all internal lines ``e_k`` and vertex labels ``μ_l`` to be the same.
@@ -918,12 +921,12 @@ and graphically depicted as an upgoing arrow ``a``) and ``R_{\bar{a}}`` (simply
which the conjugated irrep acts, or the irrep ``\bar{a}`` to which the complex conjugate of
irrep ``a`` is isomorphic. This distinction is however important, when certain uncoupled
sectors in the fusion tree actually originate from a dual space. We use the isomorphisms
-``Z_a:R_a^* → R_{\bar{a}}`` and its adjoint ``Z_a^†:R_{\bar{a}}→R_a^*``, as introduced in
-the section on [topological data of a fusion category](@ref ss_topologicalfusion), to build
-fusion and splitting trees that take the distinction between irreps and their conjugates
-into account. Hence, in the previous example, if e.g. the first and third space in the
-codomain and the second space in the domain of the tensor were dual spaces, the actual pair
-of splitting and fusion tree would look as
+``Z_a : R_a^* → R_{\bar{a}}`` and its adjoint ``Z_a^† : R_{\bar{a}} → R_a^*``, as introduced
+in the section on [topological data of a fusion category](@ref ss_topologicalfusion), to
+build fusion and splitting trees that take the distinction between irreps and their
+conjugates into account. Hence, in the previous example, if e.g. the first and third space
+in the codomain and the second space in the domain of the tensor were dual spaces, the
+actual pair of splitting and fusion tree would look as
```@raw html
@@ -940,12 +943,12 @@ We represent splitting trees and their adjoints using a specific immutable type
`FusionTree` (which actually represents a splitting tree, but fusion tree is a more common
term), defined as
```julia
-struct FusionTree{I<:Sector,N,M,L,T}
+struct FusionTree{I<:Sector,N,M,L}
uncoupled::NTuple{N,I}
coupled::I
isdual::NTuple{N,Bool}
innerlines::NTuple{M,I} # fixed to M = N-2
- vertices::NTuple{L,T} # fixed to L = N-1
+ vertices::NTuple{L,Int} # fixed to L = N-1
end
```
Here, the fields are probably self-explanotary. The `isdual` field indicates whether an
@@ -956,8 +959,7 @@ isomorphism is present (if the corresponding value is `true`) or not. Note that
capabilities, such as checking for equality with `==` and support for
`hash(f::FusionTree, h::UInt)`, as splitting and fusion trees are used as keys in look-up
tables (i.e. `AbstractDictionary` instances) to look up certain parts of the data of a
-tensor. The type of `L` of the vertex labels can be `Nothing` when they are not needed
-(i.e. if `FusionStyle(I) isa MultiplicityFreeFusion`).
+tensor.
`FusionTree` instances are not checked for consistency (i.e. valid fusion rules etc) upon
creation, hence, they are assumed to be created correctly. The most natural way to create
@@ -972,15 +974,15 @@ illustrated with some examples
```@repl sectors
s = Irrep[SU₂](1/2)
-collect(fusiontrees((s,s,s,s)))
-collect(fusiontrees((s,s,s,s,s), s, (true, false, false, true, false)))
-iter = fusiontrees(ntuple(n->s, 16))
-sum(n->1, iter)
+collect(fusiontrees((s, s, s, s)))
+collect(fusiontrees((s, s, s, s, s), s, (true, false, false, true, false)))
+iter = fusiontrees(ntuple(n -> s, 16))
+sum(n -> 1, iter)
length(iter)
-@elapsed sum(n->1, iter)
+@elapsed sum(n -> 1, iter)
@elapsed length(iter)
s2 = s ⊠ s
-collect(fusiontrees((s2,s2,s2,s2)))
+collect(fusiontrees((s2, s2, s2, s2)))
```
Note that `FusionTree` instances are shown (printed) in a way that is valid code to
reproduce them, a property which also holds for both instances of `Sector` and instances of
@@ -1046,7 +1048,8 @@ sector is braided over or under its right neighbor. This interface does not allo
specify the most general braid, and in particular will never wind one line around another,
but can be used as a more general building block for arbitrary braids than the elementary
Artin generators. A graphical example makes this probably more clear, i.e for
-`levels=(1,2,3,4,5)` and `permutation=(5,3,1,4,2)`, the corresponding braid is given by
+`levels = (1, 2, 3, 4, 5)` and `permutation = (5, 3, 1, 4, 2)`, the corresponding braid is
+given by
```@raw html
@@ -1066,39 +1069,40 @@ the interface
Other manipulations which are sometimes needed are
-* [insertat(f1::FusionTree{I,N₁}, i::Int, f2::FusionTree{I,N₂})](@ref TensorKit.insertat) :
+* [`insertat(f1::FusionTree{I,N₁}, i::Int, f2::FusionTree{I,N₂})`](@ref TensorKit.insertat) :
inserts a fusion tree `f2` at the `i`th uncoupled sector of fusion tree `f1` (this
requires that the coupled sector `f2` matches with the `i`th uncoupled sector of `f1`,
and that `!f1.isdual[i]`, i.e. that there is no ``Z``-isomorphism on the `i`th line of
`f1`), and recouple this into a linear combination of trees in canonical order, with
- `N₁+N₂-1` uncoupled sectors, i.e. diagrammatically for `i=3`
+ `N₁ + N₂ - 1` uncoupled sectors, i.e. diagrammatically for `i = 3`
- ```@raw html
-
- ```
+```@raw html
+
+```
-* [split(f::FusionTree{I,N}, M::Int)](@ref TensorKit.split) :
+* [`split(f::FusionTree{I,N}, M::Int)`](@ref TensorKit.split) :
splits a fusion tree `f` into two trees `f1` and `f2`, such that `f1` has the first `M`
- uncoupled sectors of `f`, and `f2` the remaining `N-M`. This function is type stable if `M` is a compile time constant.
+ uncoupled sectors of `f`, and `f2` the remaining `N - M`. This function is type stable
+ if `M` is a compile time constant.
`split(f, M)` is the inverse of `insertat` in the sence that `insertat(f2, 1, f1)`
should return a dictionary with a single key-value pair `f=>1`. Diagrammatically, for
- `M=4`, the function `split` returns
+ `M = 4`, the function `split` returns
- ```@raw html
-
- ```
+```@raw html
+
+```
-* [merge(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, c::I, μ=nothing)](@ref TensorKit.merge) :
+* [`merge(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, c::I, [μ=1])`](@ref TensorKit.merge) :
merges two fusion trees `f1` and `f2` by fusing the coupled sectors of `f1` and `f2`
into a sector `c` (with vertex label `μ` if `FusionStyle(I) == GenericFusion()`),
- and reexpressing the result as a linear combination of fusion trees with `N₁+N₂`
+ and reexpressing the result as a linear combination of fusion trees with `N₁ + N₂`
uncoupled sectors in canonical order. This is a simple application of `insertat`.
Diagrammatically, this operation is represented as:
- ```@raw html
-
- ```
+```@raw html
+
+```
### Manipulations on a splitting - fusion tree pair
@@ -1160,8 +1164,8 @@ The `FusionTree` interface to duality and line bending is given by
which takes a splitting tree `f1` with `N₁` outgoing sectors, a fusion tree `f2` with `N₂`
incoming sectors, and applies line bending such that the resulting splitting and fusion
trees have `N` outgoing sectors, corresponding to the first `N` sectors out of the list
-``(a_1, a_2, …, a_{N_1}, b_{N_2}^*, …, b_{1}^*)`` and `N₁+N₂-N` incoming sectors,
-corresponding to the dual of the last `N₁+N₂-N` sectors from the previous list, in reverse.
+``(a_1, a_2, …, a_{N_1}, b_{N_2}^*, …, b_{1}^*)`` and `N₁ + N₂ - N` incoming sectors,
+corresponding to the dual of the last `N₁ + N₂ - N` sectors from the previous list, in reverse.
This return values are correctly inferred if `N` is a compile time constant.
Graphically, for `N₁ = 4`, `N₂ = 3`, `N = 2` and some particular choice of `isdual` in both
@@ -1185,21 +1189,22 @@ interface provided for this is given by
where we now have splitting tree `f1` with `N₁` outgoing sectors, a fusion tree `f2` with
`N₂` incoming sectors, `levels1` and `levels2` assign a level or depth to the corresponding
uncoupled sectors in `f1` and `f2`, and we represent the new configuration as a pair `p1`
-and `p2`. Together, `(p1..., p2...)` represents a permutation of length `N₁+N₂ = N₁′+N₂′`,
-where `p1` indicates which of the original sectors should appear as outgoing sectors in the
-new splitting tree and `p2` indicates which appear as incoming sectors in the new fusion
-tree. Hereto, we label the uncoupled sectors of `f1` from `1` to `N₁`, followed by the
-uncoupled sectors of `f2` from `N₁+1` to `N₁+N₂`. Note that simply repartitioning the
-splitting and fusion tree such that e.g. all sectors appear in the new splitting tree (i.e.
-are outgoing), amounts to chosing `p1 = (1,..., N₁, N₁+N₂, N₁+N₂-1, ... , N₁+1)` and
-`p2=()`, because the duality isomorphism reverses the order of the tensor product.
+and `p2`. Together, `(p1..., p2...)` represents a permutation of length
+`N₁ + N₂ = N₁′ + N₂′`, where `p1` indicates which of the original sectors should appear as
+outgoing sectors in the new splitting tree and `p2` indicates which appear as incoming
+sectors in the new fusion tree. Hereto, we label the uncoupled sectors of `f1` from `1` to
+`N₁`, followed by the uncoupled sectors of `f2` from `N₁ + 1` to `N₁ + N₂`. Note that simply
+repartitioning the splitting and fusion tree such that e.g. all sectors appear in the new
+splitting tree (i.e. are outgoing), amounts to chosing
+`p1 = (1,..., N₁, N₁ + N₂, N₁ + N₂ - 1, ... , N₁ + 1)` and `p2 = ()`, because the duality
+isomorphism reverses the order of the tensor product.
This routine is implemented by indeed first making all sectors outgoing using the
`repartition` function discussed above, such that only splitting trees remain, then
braiding those using the routine from the previous subsection such that the new outgoing
sectors appear first, followed by the new incoming sectors (in reverse order), and then
again invoking the `repartition` routine to bring everything in final form. The result is
-again returned as a dictionary where the keys are `(f1′,f2′)` and the values the
+again returned as a dictionary where the keys are `(f1′, f2′)` and the values the
corresponding coefficients.
As before, there is a simplified interface for the case where
@@ -1231,6 +1236,7 @@ tree manipulations (which we nonetheless try to avoid) will not seriously affect
performance of tensor manipulations.
### Inspecting fusion trees as tensors
+
For those cases where the fusion and splitting tensors have an explicit representation as
a tensor, i.e. a morphism in the category `Vect` (this essentially coincides with the case
of group representations), this explicit representation can be created, which can be useful
@@ -1250,11 +1256,11 @@ iter = fusiontrees((s, s, s, s), SU2Irrep(1))
f = first(iter)
convert(Array, f)
-I ≈ convert(Array, FusionTree((SU₂(1/2),), SU₂(1/2), (false,), ()))
+LinearAlgebra.I ≈ convert(Array, FusionTree((SU2Irrep(1/2),), SU2Irrep(1/2), (false,), ()))
Z = adjoint(convert(Array, FusionTree((SU2Irrep(1/2),), SU2Irrep(1/2), (true,), ())))
transpose(Z) ≈ frobeniusschur(SU2Irrep(1/2)) * Z
-I ≈ convert(Array, FusionTree((Irrep[SU₂](1),), Irrep[SU₂](1), (false,), ()))
+LinearAlgebra.I ≈ convert(Array, FusionTree((Irrep[SU₂](1),), Irrep[SU₂](1), (false,), ()))
Z = adjoint(convert(Array, FusionTree((Irrep[SU₂](1),), Irrep[SU₂](1), (true,), ())))
transpose(Z) ≈ frobeniusschur(Irrep[SU₂](1)) * Z
diff --git a/docs/src/man/spaces.md b/docs/src/man/spaces.md
index 9091aaa0e..503730bfe 100644
--- a/docs/src/man/spaces.md
+++ b/docs/src/man/spaces.md
@@ -57,7 +57,7 @@ to introduce some syntactic sugar without committing type piracy. In particular,
```@repl tensorkit
3 ∈ ℝ
5.0 ∈ ℂ
-5.0+1.0*im ∈ ℝ
+5.0 + 1.0 * im ∈ ℝ
Float64 ⊆ ℝ
ComplexF64 ⊆ ℂ
ℝ ⊆ ℂ
@@ -81,17 +81,17 @@ objects of the same concrete type (i.e. with the same type parameters in case of
parametric type). In particular, every `ElementarySpace` should implement the following
methods
-* `dim(::ElementarySpace) -> ::Int` returns the dimension of the space as an `Int`
+* `dim(::ElementarySpace)` returns the dimension of the space
* `dual(::S) where {S<:ElementarySpace} -> ::S` returns the
[dual space](http://en.wikipedia.org/wiki/Dual_space) `dual(V)`, using an instance of
the same concrete type (i.e. not via type parameters); this should satisfy
- `dual(dual(V))==V`
+ `dual(dual(V)) == V`
* `conj(::S) where {S<:ElementarySpace} -> ::S` returns the
[complex conjugate space](http://en.wikipedia.org/wiki/Complex_conjugate_vector_space)
`conj(V)`, using an instance of the same concrete type (i.e. not via type parameters);
- this should satisfy `conj(conj(V))==V` and we automatically have
+ this should satisfy `conj(conj(V)) == V` and we automatically have
`conj(V::ElementarySpace{ℝ}) = V`.
For convenience, the dual of a space `V` can also be obtained as `V'`.
@@ -186,7 +186,7 @@ struct ProductSpace{S<:ElementarySpace, N} <: CompositeSpace{S}
end
```
Given some `V1::S`, `V2::S`, `V3::S` of the same type `S<:ElementarySpace`, we can easily
-construct `ProductSpace{S,3}((V1,V2,V3))` as `ProductSpace(V1,V2,V3)` or using
+construct `ProductSpace{S,3}((V1, V2, V3))` as `ProductSpace(V1, V2, V3)` or using
`V1 ⊗ V2 ⊗ V3`, where `⊗` is simply obtained by typing `\otimes`+TAB. In fact, for
convenience, also the regular multiplication operator `*` acts as tensor product between
vector spaces, and as a consequence so does raising a vector space to a positive integer
@@ -194,7 +194,7 @@ power, i.e.
```@repl tensorkit
V1 = ℂ^2
V2 = ℂ^3
-V1 ⊗ V2 ⊗ V1' == V1 * V2 * V1' == ProductSpace(V1,V2,V1') == ProductSpace(V1,V2) ⊗ V1'
+V1 ⊗ V2 ⊗ V1' == V1 * V2 * V1' == ProductSpace(V1, V2, V1') == ProductSpace(V1, V2) ⊗ V1'
V1^3
dim(V1 ⊗ V2)
dims(V1 ⊗ V2)
diff --git a/docs/src/man/tensors.md b/docs/src/man/tensors.md
index d80ce4ba9..cf3d468de 100644
--- a/docs/src/man/tensors.md
+++ b/docs/src/man/tensors.md
@@ -24,32 +24,33 @@ normal matrix is also denoted as having size `m × n` or is constructed in Julia
`Array(..., (m, n))`, where the first integer `m` refers to the codomain being `m`-
dimensional, and the seond integer `n` to the domain being `n`-dimensional. This also
explains why we have consistently used the symbol ``W`` for spaces in the domain and ``V``
-for spaces in the codomain. A tensor map ``t:(W₁ ⊗ … ⊗ W_{N₂}) → (V₁ ⊗ … ⊗ V_{N₁})`` will
-be created in Julia as `TensorMap(..., V1 ⊗ ... ⊗ VN₁, W1 ⊗ ... ⊗ WN2)`.
+for spaces in the codomain. A tensor map ``t:(W_1 ⊗ … ⊗ W_{N_2}) → (V_1 ⊗ … ⊗ V_{N_1})`` will
+be created in Julia as `TensorMap(..., V1 ⊗ ... ⊗ VN₁, W1 ⊗ ... ⊗ WN₂)`.
Furthermore, the abstract type `AbstractTensor{S,N}` is just a synonym for
`AbstractTensorMap{S,N,0}`, i.e. for tensor maps with an empty domain, which is equivalent
to the unit of the monoidal category, or thus, the field of scalars ``𝕜``.
-Currently, `AbstractTensorMap` has two subtypes. `TensorMap` provides the actual
+Currently, `AbstractTensorMap` has three subtypes. `TensorMap` provides the actual
implementation, where the data of the tensor is stored in a `DenseArray` (more specifically
a `DenseMatrix` as will be explained below). `AdjointTensorMap` is a simple wrapper type to
-denote the adjoint of an existing `TensorMap` object. In the future, additional types could
+denote the adjoint of an existing `TensorMap` object. `DiagonalTensorMap` provides an
+efficient representations of diagonal tensor maps. In the future, additional types could
be defined, to deal with sparse data, static data, diagonal data, etc...
## [Storage of tensor data](@id ss_tensor_storage)
-Before discussion how to construct and initalize a `TensorMap{S}`, let us discuss what is
+Before discussion how to construct and initalize a `TensorMap`, let us discuss what is
meant by 'tensor data' and how it can efficiently and compactly be stored. Let us first
discuss the case `sectortype(S) == Trivial` sector, i.e. the case of no symmetries. In that
-case the data of a tensor `t = TensorMap(..., V1 ⊗ ... ⊗ VN₁, W1 ⊗ ... ⊗ WN2)` can just be
+case the data of a tensor `t = TensorMap(..., V1 ⊗ ... ⊗ VN₁, W₁ ⊗ ... ⊗ WN₂)` can just be
represented as a multidimensional array of size
`(dim(V1), dim(V2), …, dim(VN₁), dim(W1), …, dim(WN₂))`
which can also be reshaped into matrix of size
-`(dim(V1)*dim(V2)*…*dim(VN₁), dim(W1)*dim(W2)*…*dim(WN₂))`
+`(dim(V1) * dim(V2) * … * dim(VN₁), dim(W1) * dim(W2) * … * dim(WN₂))`
and is really the matrix representation of the linear map that the tensor represents. In
particular, given a second tensor `t2` whose domain matches with the codomain of `t`,
@@ -74,15 +75,15 @@ sectors `(a1, a2, …, aN₁) ∈ sectors(V1 ⊗ V2 ⊗ … ⊗ VN₁)` and
`(c = first(⊗(a1, …, aN₁))) == first(⊗(b1, …, bN₂))`. The data associated with this takes
the form of a multidimensional array with size
`(dim(V1, a1), …, dim(VN₁, aN₁), dim(W1, b1), …, dim(WN₂, bN₂))`, or equivalently, a matrix
-of with row size `dim(V1, a1)*…*dim(VN₁, aN₁) == dim(codomain, (a1, …, aN₁))` and column
-size `dim(W1, b1)*…*dim(WN₂, aN₂) == dim(domain, (b1, …, bN₂))`.
+of with row size `dim(V1, a1) * … * dim(VN₁, aN₁) == dim(codomain, (a1, …, aN₁))` and column
+size `dim(W1, b1) * … * dim(WN₂, aN₂) == dim(domain, (b1, …, bN₂))`.
However, there are multiple combinations of `(a1, …, aN₁)` giving rise to the same `c`, and
so there is data associated with all of these, as well as all possible combinations of
-`(b1, …, bN₂)`. Stacking all matrices for different `(a1,…)` and a fixed value of `(b1,…)`
-underneath each other, and for fixed value of `(a1,…)` and different values of `(b1,…)` next
-to each other, gives rise to a larger block matrix of all data associated with the central
-sector `c`. The size of this matrix is exactly
+`(b1, …, bN₂)`. Stacking all matrices for different `(a1, …)` and a fixed value of `(b1, …)`
+underneath each other, and for fixed value of `(a1, …)` and different values of `(b1, …)`
+next to each other, gives rise to a larger block matrix of all data associated with the
+central sector `c`. The size of this matrix is exactly
`(blockdim(codomain, c), blockdim(domain, c))` and these matrices are exactly the diagonal
blocks whose existence is guaranteed by Schur's lemma, and which are labeled by the coupled
sector `c`. Indeed, if we would represent the tensor map `t` as a matrix without explicitly
@@ -102,7 +103,7 @@ diagonal blocks, the existence of which is provided by Schur's lemma and which a
by the coupled sectors `c`. We directly store these blocks as `DenseMatrix` and gather them
as values in a dictionary, together with the corresponding coupled sector `c` as key. For a
given tensor `t`, we can access a specific block as `block(t, c)`, whereas `blocks(t)`
-yields an iterator over pairs `c=>block(t,c)`.
+yields an iterator over pairs `c => block(t, c)`.
The subblocks corresponding to a particular combination of sectors then correspond to a
particular view for some range of the rows and some range of the colums, i.e.
@@ -121,14 +122,14 @@ which they fuse to `c`. These different possibilities are enumerated by the iter
there is tensor data that takes the form of a multidimensional array, or, after reshaping, a
matrix of size `(dim(codomain, (a1, …, aN₁)), dim(domain, (b1, …, bN₂))))`. Again, we can
stack all such matrices with the same value of `f₁ ∈ fusiontrees((a1, …, aN₁), c)`
-horizontally (as they all have the same number of rows), and with the same value of `f₂ ∈
-fusiontrees((b1, …, bN₂), c)` vertically (as they have the same number of columns). What
-emerges is a large matrix of size `(blockdim(codomain, c), blockdim(domain, c))` containing
-all the tensor data associated with the coupled sector `c`, where `blockdim(P, c) =
-sum(dim(P, s)*length(fusiontrees(s, c)) for s in sectors(P))` for some instance `P` of
-`ProductSpace`. The tensor implementation does not distinguish between abelian or
-non-abelian sectors and still stores these matrices as a `DenseMatrix`, accessible via
-`block(t, c)`.
+horizontally (as they all have the same number of rows), and with the same value of
+`f₂ ∈ fusiontrees((b1, …, bN₂), c)` vertically (as they have the same number of columns).
+What emerges is a large matrix of size `(blockdim(codomain, c), blockdim(domain, c))`
+containing all the tensor data associated with the coupled sector `c`, where
+`blockdim(P, c) = sum(dim(P, s)*length(fusiontrees(s, c)) for s in sectors(P))` for some
+instance `P` of `ProductSpace`. The tensor implementation does not distinguish between
+abelian or non-abelian sectors and still stores these matrices as a `DenseMatrix`,
+accessible via `block(t, c)`.
At first sight, it might now be less clear what the relevance of this block is in relation
to the full matrix representation of the tensor map, where the symmetry is not exploited.
@@ -142,8 +143,8 @@ according to the irrep ``c``. In the abelian case, `dim(c) == 1`, i.e. all irred
representations are one-dimensional and Schur's lemma only dictates that all off-diagonal
blocks are zero. However, in this case the basis transform to the block diagonal
representation is not simply a permutation matrix, but a more general unitary matrix
-composed of the different fusion trees. Indeed, let us denote the fusion trees `f₁ ∈
-fusiontrees((a1, …, aN₁), c)` as ``X^{a_1, …, a_{N₁}}_{c,α}`` where
+composed of the different fusion trees. Indeed, let us denote the fusion trees
+`f₁ ∈ fusiontrees((a1, …, aN₁), c)` as ``X^{a_1, …, a_{N₁}}_{c,α}`` where
``α = (e_1, …, e_{N_1-2}; μ₁, …, μ_{N_1-1})`` is a collective label for the internal sectors
`e` and the vertex degeneracy labels `μ` of a generic fusion tree, as discussed in the
[corresponding section](@ref ss_fusiontrees). The tensor is then represented as
@@ -168,7 +169,7 @@ different fusion (actually splitting) trees, either because of different sectors
vertical lines define the border between regions of different fusion trees from the domain
to `c`, either because of different sectors ``(b_1 … b_{N₂})`` or a different label ``β``.
-To understand this better, we need to understand the basis transform, e.g. on the left
+To understand this better, we need to understand the basis transformation, e.g. on the left
(codomain) side. In more detail, it is given by
```@raw html
@@ -196,7 +197,7 @@ has a fusion tree to ``c'``, labeld by ``c', α''``. Finally then, because the f
do not act on the spaces ``ℂ^{n_{a_1} × … n_{a_{N_1}}}``, the dotted lines which represent
the different ``n_{(a…)} = n_{a_1} × … n_{a_{N_1}}`` dimensions are also drawn vertically.
In particular, for a given sector ``(a…)`` and a specific fusion tree
-``X^{(a…)}_{c,α}: R_{(a…)}→R_c``, the action is ``X^{(a…)}_{c,α} ⊗ 𝟙_{n_{(a…)}}``, which
+``X^{(a…)}_{c,α} : R_{(a…)}→R_c``, the action is ``X^{(a…)}_{c,α} ⊗ 𝟙_{n_{(a…)}}``, which
corresponds to the diagonal green blocks in this drawing where the same matrix
``X^{(a…)}_{c,α}`` (the fusion tree) is repeated along the diagonal. Note that the fusion
tree is not a vector or single column, but a matrix with number of rows equal to
@@ -206,8 +207,8 @@ right, by taking its adjoint. In this particular example, it has two different c
of sectors ``(b…)`` and ``(b…)'``, where both have a single fusion tree to ``c`` as well as
to ``c'``, and ``n_{(b…)}=2``, ``n_{(b…)'}=3``.
-Note that we never explicitly store or act with the basis transforms on the left and the
-right. For composing tensor maps (i.e. multiplying them), these basis transforms just
+Note that we never explicitly store or act with the basis transformations on the left and
+the right. For composing tensor maps (i.e. multiplying them), these basis transforms just
cancel, whereas for tensor factorizations they just go through trivially. They transform
non-trivially when reshuffling the tensor indices, both within or in between the domain and
codomain. For this, however, we can completely rely on the manipulations of fusion trees to
@@ -222,7 +223,7 @@ Hence, as before, we only store the diagonal blocks ``B_c`` of size
``X^{(a_1 … a_{N₁})}_{c,α}`` and ``X^{(b_1 … b_{N₂})}_{c,β}``, henceforth again denoted as
`f₁` and `f₂`, with `f₁.coupled == f₂.coupled == c`. The ranges where this subblock is
living are managed within the tensor implementation, and these subblocks can be accessed
-via `t[f₁,f₂]`, and is returned as a `StridedArray` of size
+via `t[f₁, f₂]`, and is returned as a `StridedArray` of size
``n_{a_1} × n_{a_2} × … × n_{a_{N_1}} × n_{b_1} × … n_{b_{N₂}}``, or in code,
`(dim(V1, a1), dim(V2, a2), …, dim(VN₁, aN₁), dim(W1, b1), …, dim(WN₂, bN₂))`. While the
implementation does not distinguish between `FusionStyle isa UniqueFusion` or
@@ -240,16 +241,15 @@ tensors and tensor maps. From hereon, we focus purely on the interface rather th
implementation.
### Random and uninitialized tensor maps
+
The most convenient set of constructors are those that construct tensors or tensor maps
with random or uninitialized data. They take the form
```julia
f(codomain, domain = one(codomain))
f(eltype::Type{<:Number}, codomain, domain = one(codomain))
-TensorMap(undef, codomain, domain = one(codomain))
-TensorMap(undef, eltype::Type{<:Number}, codomain, domain = one(codomain))
-Tensor(undef, codomain)
-Tensor(undef, eltype::Type{<:Number}, codomain)
+TensorMap{eltype::Type{<:Number}}(undef, codomain, domain = one(codomain))
+Tensor{eltype::Type{<:Number}}(undef, codomain)
```
Here, `f` is any of the typical functions from Base that normally create arrays, namely
`zeros`, `ones`, `rand`, `randn` and `Random.randexp`. Remember that `one(codomain)` is the
@@ -257,15 +257,15 @@ empty `ProductSpace{S,0}()`. The third and fourth calling syntax use the `UndefI
from Julia Base and generates a `TensorMap` with unitialized data, which can thus contain
`NaN`s.
-In all of these constructors, the last two arguments can be replaced by `domain→codomain`
-or `codomain←domain`, where the arrows are obtained as `\rightarrow+TAB` and
+In all of these constructors, the last two arguments can be replaced by `domain → codomain`
+or `codomain ← domain`, where the arrows are obtained as `\rightarrow+TAB` and
`\leftarrow+TAB` and create a `HomSpace` as explained in the section on
[Spaces of morphisms](@ref ss_homspaces). Some examples are perhaps in order
```@repl tensors
t1 = randn(ℂ^2 ⊗ ℂ^3, ℂ^2)
t2 = zeros(Float32, ℂ^2 ⊗ ℂ^3 ← ℂ^2)
-t3 = TensorMap(undef, ℂ^2 → ℂ^2 ⊗ ℂ^3)
+t3 = TensorMap{Float64}(undef, ℂ^2 → ℂ^2 ⊗ ℂ^3)
domain(t1) == domain(t2) == domain(t3)
codomain(t1) == codomain(t2) == codomain(t3)
disp(x) = show(IOContext(Core.stdout, :compact=>false), "text/plain", trunc.(x; digits = 3));
@@ -290,8 +290,6 @@ standard library.
Support for static or sparse data is currently unavailable, and if it would be implemented,
it would lead to new subtypes of `AbstractTensorMap` which are distinct from `TensorMap`.
Future implementations of e.g. `SparseTensorMap` or `StaticTensorMap` could be useful.
-Furthermore, there could be specific implementations for tensors whose blocks are
-`Diagonal`.
### Tensor maps from existing data
@@ -299,14 +297,14 @@ To create a `TensorMap` with existing data, one can use the aforementioned form
the function `f` replaced with the actual data, i.e. `TensorMap(data, codomain, domain)` or
any of its equivalents.
-Here, `data` can be of two types. It can be a dictionary (any `Associative` subtype) which
+Here, `data` can be of two types. It can be a dictionary (any `AbstractDict` subtype) which
has blocksectors `c` of type `sectortype(codomain)` as keys, and the corresponding matrix
-blocks as value, i.e. `data[c]` is some `DenseMatrix` of size `(blockdim(codomain, c),
-blockdim(domain, c))`. This is the form of how the data is stored within the `TensorMap`
-objects.
+blocks as value, i.e. `data[c]` is some `DenseMatrix` of size
+`(blockdim(codomain, c), blockdim(domain, c))`. This is the form of how the data is stored
+within the `TensorMap` objects.
For those space types for which a `TensorMap` can be converted to a plain multidimensional
-array, the `data` can also be a general `DenseArray`, either of rank `N₁+N₂` and with
+array, the `data` can also be a general `DenseArray`, either of rank `N₁ + N₂` and with
matching size `(dims(codomain)..., dims(domain)...)`, or just as a `DenseMatrix` with size
`(dim(codomain), dim(domain))`. This is true in particular if the sector type is `Trivial`,
e.g. for `CartesianSpace` or `ComplexSpace`. Then the `data` array is just reshaped into
@@ -357,9 +355,9 @@ sector is two-dimensional, and has an eigenvalue ``+1`` and an eigenvalue ``-1``
To construct the proper `data` in more complicated cases, one has to know where to find
each sector in the range `1:dim(V)` of every index `i` with associated space `V`, as well
as the internal structure of the representation space when the corresponding sector `c` has
-`dim(c)>1`, i.e. in the case of `FusionStyle(c) isa MultipleFusion`. Currently, the only non-
-abelian sectors are `Irrep[SU₂]` and `Irrep[CU₁]`, for which the internal structure is the
-natural one.
+`dim(c) > 1`, i.e. in the case of `FusionStyle(c) isa MultipleFusion`. Currently, the only
+non-abelian sectors are `Irrep[SU₂]` and `Irrep[CU₁]`, for which the internal structure is
+the natural one.
There are some tools available to facilitate finding the proper range of sector `c` in space
`V`, namely `axes(V, c)`. This also works on a `ProductSpace`, with a tuple of sectors. An
@@ -457,17 +455,18 @@ the same domain and codomain); `zero(t)` is the additive identity, i.e. a `Tenso
instance where all entries are zero. For a `t::TensorMap` with `domain(t) == codomain(t)`,
i.e. an endomorphism, `one(t)` creates the identity tensor, i.e. the identity under
composition. As discussed in the section on [linear algebra operations](@ref
-ss_tensor_linalg), we denote composition of tensor maps with the mutliplication operator
+ss_tensor_linalg), we denote composition of tensor maps with the multiplication operator
`*`, such that `one(t)` is the multiplicative identity. Similarly, it can be created as
`id(V)` with `V` the relevant vector space, e.g. `one(t) == id(domain(t))`. The identity
-tensor is currently represented with dense data, and one can use `id(A::Type{<:DenseMatrix},
-V)` to specify the type of `DenseMatrix` (and its `eltype`), e.g. `A = Matrix{Float64}`.
-Finally, it often occurs that we want to construct a specific isomorphism between two spaces
-that are isomorphic but not equal, and for which there is no canonical choice. Hereto, one
-can use the method `u = isomorphism([A::Type{<:DenseMatrix}, ] codomain, domain)`, which
-will explicitly check that the domain and codomain are isomorphic, and return an error
-otherwise. Again, an optional first argument can be given to specify the specific type of
-`DenseMatrix` that is currently used to store the rather trivial data of this tensor. If
+tensor is currently represented with dense data, and one can use
+`id(A::Type{<:DenseMatrix}, V)` to specify the type of `DenseMatrix` (and its `eltype`),
+e.g. `A = Matrix{Float64}`. Finally, it often occurs that we want to construct a specific
+isomorphism between two spaces that are isomorphic but not equal, and for which there is no
+canonical choice. Hereto, one can use the method
+`u = isomorphism([A::Type{<:DenseMatrix}, ] codomain, domain)`, which will explicitly check
+that the domain and codomain are isomorphic, and return an error otherwise. Again, an
+optional first argument can be given to specify the specific type of `DenseMatrix` that is
+currently used to store the rather trivial data of this tensor. If
`InnerProductStyle(u) <: EuclideanProduct`, the same result can be obtained with the method
`u = unitary([A::Type{<:DenseMatrix}, ] codomain, domain)`. Note that reversing the domain
and codomain yields the inverse morphism, which in the case of `EuclideanProduct` coincides
@@ -476,10 +475,10 @@ where `inv` and `adjoint` will be further discussed [below](@ref ss_tensor_linal
Finally, if two spaces `V1` and `V2` are such that `V2` can be embedded in `V1`, i.e. there
exists an inclusion with a left inverse, and furthermore they represent tensor products of
some `ElementarySpace` with `EuclideanProduct`, the function
-`w = isometry([A::Type{<:DenseMatrix}, ], V1, V2)` creates one specific isometric
-embedding, such that `adjoint(w) * w == id(V2)` and `w * adjoint(w)` is some hermitian
-idempotent (a.k.a. orthogonal projector) acting on `V1`. An error will be thrown if such a
-map cannot be constructed for the given domain and codomain.
+`w = isometry([A::Type{<:DenseMatrix}, ], V1, V2)` creates one specific isometric embedding,
+such that `adjoint(w) * w == id(V2)` and `w * adjoint(w)` is some hermitian idempotent
+(a.k.a. orthogonal projector) acting on `V1`. An error will be thrown if such a map cannot
+be constructed for the given domain and codomain.
Let's conclude this section with some examples with `GradedSpace`.
```@repl tensors
@@ -499,8 +498,8 @@ d2 = dim(domain(t))
(matrix = reshape(array, d1, d2)) |> disp
(u = reshape(convert(Array, unitary(codomain(t), fuse(codomain(t)))), d1, d1)) |> disp
(v = reshape(convert(Array, unitary(domain(t), fuse(domain(t)))), d2, d2)) |> disp
-u'*u ≈ I ≈ v'*v
-(u'*matrix*v) |> disp
+u' * u ≈ I ≈ v' * v
+(u' * matrix * v) |> disp
# compare with:
block(t, Z2Irrep(0)) |> disp
block(t, Z2Irrep(1)) |> disp
@@ -540,8 +539,8 @@ d2 = dim(domain(t))
(matrix = reshape(array, d1, d2)) |> disp
(u = reshape(convert(Array, unitary(codomain(t), fuse(codomain(t)))), d1, d1)) |> disp
(v = reshape(convert(Array, unitary(domain(t), fuse(domain(t)))), d2, d2)) |> disp
-u'*u ≈ I ≈ v'*v
-(u'*matrix*v) |> disp
+u' * u ≈ I ≈ v' * v
+(u' * matrix * v) |> disp
# compare with:
block(t, SU2Irrep(0)) |> disp
block(t, SU2Irrep(1)) |> disp
@@ -563,7 +562,7 @@ However, for a general `AbstractTensorMap` this has no meaning. However, we can
to `codomain(t, i) = codomain(t)[i]`. For `j = i-N₁ ∈ (1:N₂)`, this corresponds to
`dual(domain(t, j)) = dual(domain(t)[j])`.
-The total number of indices, i.e. `N₁+N₂`, is given by `numind(t)`, with `N₁ == numout(t)`
+The total number of indices, i.e. `N₁ + N₂`, is given by `numind(t)`, with `N₁ == numout(t)`
and `N₂ == numin(t)`, the number of outgoing and incoming indices. There are also the
unexported methods `TensorKit.codomainind(t)` and `TensorKit.domainind(t)` which return the
tuples `(1, 2, …, N₁)` and `(N₁+1, …, N₁+N₂)`, and are useful for internal purposes. The
@@ -571,9 +570,9 @@ type parameter `S<:ElementarySpace` can be obtained as `spacetype(t)`; the corre
sector can directly obtained as `sectortype(t)` and is `Trivial` when
`S != GradedSpace`. The underlying field scalars of `S` can also directly be obtained as
`field(t)`. This is different from `eltype(t)`, which returns the type of `Number` in the
-tensor data, i.e. the type parameter `T` in the (subtype of) `DenseMatrix{T}` in which the
+tensor data, i.e. the type parameter `T` in the (subtype of) `DenseVector{T}` in which the
matrix blocks are stored. Note that during construction, a (one-time) warning is printed if
-`!(T ⊂ field(S))`. The specific `DenseMatrix{T}` subtype in which the tensor data is stored
+`!(T ⊂ field(S))`. The specific `DenseVector{T}` subtype in which the tensor data is stored
is obtained as `storagetype(t)`. Each of the methods `numind`, `numout`, `numin`,
`TensorKit.codomainind`, `TensorKit.domainind`, `spacetype`, `sectortype`, `field`, `eltype`
and `storagetype` work in the type domain as well, i.e. they are encoded in `typeof(t)`.
@@ -585,9 +584,9 @@ obtained from fusing the uncoupled sectors available in the codomain (i.e. it is
intersection of both `blocksectors(codomain(t))` and `blocksectors(domain(t))`). For a
specific sector `c ∈ blocksectors(t)`, `block(t, c)` returns the corresponding data. Both
are obtained together with `blocks(t)`, which returns an iterator over the pairs
-`c=>block(t, c)`. Furthermore, there is `fusiontrees(t)` which returns an iterator over
-splitting-fusion tree pairs `(f₁,f₂)`, for which the corresponding data is given by
-`t[f₁,f₂]` (i.e. using Base.getindex).
+`c => block(t, c)`. Furthermore, there is `fusiontrees(t)` which returns an iterator over
+splitting-fusion tree pairs `(f₁, f₂)`, for which the corresponding data is given by
+`t[f₁, f₂]` (i.e. using Base.getindex).
Let's again illustrate these methods with an example, continuing with the tensor `t` from
the previous example
@@ -666,14 +665,14 @@ operations, TensorKit.jl reexports a number of efficient in-place methods from
`lmul!` and `rmul!` (for `y ← α*y` and `y ← y*α`, which is typically the same) and `mul!`,
which can also be used for out-of-place scalar multiplication `y ← α*x`.
-For `t::AbstractTensorMap{S}` where `InnerProductStyle(S) <: EuclideanProduct`, we can
-compute `norm(t)`, and for two such instances, the inner product `dot(t1, t2)`, provided
-`t1` and `t2` have the same domain and codomain. Furthermore, there is `normalize(t)` and
+For `S = spacetype(t)` where `InnerProductStyle(S) <: EuclideanProduct`, we can compute
+`norm(t)`, and for two such instances, the inner product `dot(t1, t2)`, provided `t1` and
+`t2` have the same domain and codomain. Furthermore, there is `normalize(t)` and
`normalize!(t)` to return a scaled version of `t` with unit norm. These operations should
also exist for `InnerProductStyle(S) <: HasInnerProduct`, but require an interface for
defining a custom inner product in these spaces. Currently, there is no concrete subtype of
`HasInnerProduct` that is not an `EuclideanProduct`. In particular, `CartesianSpace`,
-`ComplexSpace` and `GradedSpace` all have `InnerProductStyle(V) <: EuclideanProduct`.
+`ComplexSpace` and `GradedSpace` all have `InnerProductStyle(S) <: EuclideanProduct`.
With tensors that have `InnerProductStyle(t) <: EuclideanProduct` there is associated an
adjoint operation, given by `adjoint(t)` or simply `t'`, such that
@@ -694,44 +693,45 @@ When tensor map instances are endomorphisms, i.e. they have the same domain and
there is a multiplicative identity which can be obtained as `one(t)` or `one!(t)`, where the
latter overwrites the contents of `t`. The multiplicative identity on a space `V` can also
be obtained using `id(A, V)` as discussed [above](@ref ss_tensor_construction), such that
-for a general homomorphism `t′`, we have `t′ == id(codomain(t′))*t′ == t′*id(domain(t′))`.
-Returning to the case of endomorphisms `t`, we can compute the trace via `tr(t)` and
-exponentiate them using `exp(t)`, or if the contents of `t` can be destroyed in the
-process, `exp!(t)`. Furthermore, there are a number of tensor factorizations for both
-endomorphisms and general homomorphism that we discuss below.
+for a general homomorphism `t′`, we have
+`t′ == id(codomain(t′)) * t′ == t′ * id(domain(t′))`. Returning to the case of
+endomorphisms `t`, we can compute the trace via `tr(t)` and exponentiate them using
+`exp(t)`, or if the contents of `t` can be destroyed in the process, `exp!(t)`. Furthermore,
+there are a number of tensor factorizations for both endomorphisms and general homomorphism
+that we discuss below.
Finally, there are a number of operations that also belong in this paragraph because of
their analogy to common matrix operations. The tensor product of two `TensorMap` instances
`t1` and `t2` is obtained as `t1 ⊗ t2` and results in a new `TensorMap` with
-`codomain(t1⊗t2) = codomain(t1) ⊗ codomain(t2)` and
-`domain(t1⊗t2) = domain(t1) ⊗ domain(t2)`. If we have two `TensorMap{S,N,1}` instances `t1`
-and `t2` with the same codomain, we can combine them in a way that is analoguous to `hcat`,
-i.e. we stack them such that the new tensor `catdomain(t1, t2)` has also the same codomain,
-but has a domain which is `domain(t1) ⊕ domain(t2)`. Similarly, if `t1` and `t2` are of
-type `TensorMap{S,1,N}` and have the same domain, the operation `catcodomain(t1, t2)`
-results in a new tensor with the same domain and a codomain given by
+`codomain(t1 ⊗ t2) = codomain(t1) ⊗ codomain(t2)` and
+`domain(t1 ⊗ t2) = domain(t1) ⊗ domain(t2)`. If we have two `TensorMap{T,S,N,1}` instances
+`t1` and `t2` with the same codomain, we can combine them in a way that is analoguous to
+`hcat`, i.e. we stack them such that the new tensor `catdomain(t1, t2)` has also the same
+codomain, but has a domain which is `domain(t1) ⊕ domain(t2)`. Similarly, if `t1` and `t2`
+are of type `TensorMap{T,S,1,N}` and have the same domain, the operation
+`catcodomain(t1, t2)` results in a new tensor with the same domain and a codomain given by
`codomain(t1) ⊕ codomain(t2)`, which is the analogy of `vcat`. Note that direct sum only
-makes sense between `ElementarySpace` objects, i.e. there is no way to give a tensor
-product meaning to a direct sum of tensor product spaces.
+makes sense between `ElementarySpace` objects, i.e. there is no way to give a tensor product
+meaning to a direct sum of tensor product spaces.
Time for some more examples:
```@repl tensors
-t == t + zero(t) == t*id(domain(t)) == id(codomain(t))*t
+t == t + zero(t) == t * id(domain(t)) == id(codomain(t)) * t
t2 = randn(ComplexF64, codomain(t), domain(t));
dot(t2, t)
-tr(t2'*t)
+tr(t2' * t)
dot(t2, t) ≈ dot(t', t2')
dot(t2, t2)
norm(t2)^2
t3 = copy!(similar(t, ComplexF64), t);
t3 == t
rmul!(t3, 0.8);
-t3 ≈ 0.8*t
+t3 ≈ 0.8 * t
axpby!(0.5, t2, 1.3im, t3);
t3 ≈ 0.5 * t2 + 0.8 * 1.3im * t
t4 = randn(fuse(codomain(t)), codomain(t));
-t5 = TensorMap(undef, fuse(codomain(t)), domain(t));
-mul!(t5, t4, t) == t4*t
+t5 = TensorMap{Float64}(undef, fuse(codomain(t)), domain(t));
+mul!(t5, t4, t) == t4 * t
inv(t4) * t4 ≈ id(codomain(t))
t4 * inv(t4) ≈ id(fuse(codomain(t)))
t4 \ (t4 * t) ≈ t
@@ -758,21 +758,19 @@ For this, we use an interface that is closely related to that for manipulating s
fusion tree pairs, namely [`braid`](@ref) and [`permute`](@ref), with the interface
```julia
-braid(t::AbstractTensorMap{S,N₁,N₂}, levels::NTuple{N₁+N₂,Int},
- p1::NTuple{N₁′,Int}, p2::NTuple{N₂′,Int})
+braid(t::AbstractTensorMap{T,S,N₁,N₂}, (p1, p2)::Index2Tuple{N₁′,N₂′}, levels::IndexTuple{N₁+N₂,Int})
```
and
```julia
-permute(t::AbstractTensorMap{S,N₁,N₂},
- p1::NTuple{N₁′,Int}, p2::NTuple{N₂′,Int}; copy = false)
+permute(t::AbstractTensorMap{T,S,N₁,N₂}, (p1, p2)::Index2Tuple{N₁′,N₂′}; copy = false)
```
-both of which return an instance of `AbstractTensorMap{S,N₁′,N₂′}`.
+both of which return an instance of `AbstractTensorMap{T,S,N₁′,N₂′}`.
In these methods, `p1` and `p2` specify which of the original tensor indices ranging from
-`1` to `N₁+N₂` make up the new codomain (with `N₁′` spaces) and new domain (with `N₂′`
+`1` to `N₁ + N₂` make up the new codomain (with `N₁′` spaces) and new domain (with `N₂′`
spaces). Hence, `(p1..., p2...)` should be a valid permutation of `1:(N₁+N₂)`. Note that,
throughout TensorKit.jl, permutations are always specified using tuples of `Int`s, for
reasons of type stability. For `braid`, we also need to specify `levels` or depths for each
@@ -789,10 +787,10 @@ data with the input tensor `t`, though this is only possible in specific cases (
`sectortype(S) == Trivial` and `(p1..., p2...) = (1:(N₁+N₂)...)`).
Both `braid` and `permute` come in a version where the result is stored in an already
-existing tensor, i.e. [`braid!(tdst, tsrc, levels, p1, p2)`](@ref) and
-[`permute!(tdst, tsrc, p1, p2)`](@ref).
+existing tensor, i.e. [`braid!(tdst, tsrc, (p1, p2), levels)`](@ref) and
+[`permute!(tdst, tsrc, (p1, p2))`](@ref).
-Another operation that belongs und index manipulations is taking the `transpose` of a
+Another operation that belongs under index manipulations is taking the `transpose` of a
tensor, i.e. `LinearAlgebra.transpose(t)` and `LinearAlgebra.transpose!(tdst, tsrc)`, both
of which are reexported by TensorKit.jl. Note that `transpose(t)` is not simply equal to
reshuffling domain and codomain with
@@ -841,8 +839,8 @@ twist(t, 3) ≈ t
# as twist acts trivially for
BraidingStyle(sectortype(t))
```
-Note that `transpose` acts like one would expect on a `TensorMap{S,1,1}`. On a
-`TensorMap{S,N₁,N₂}`, because `transpose` replaces the codomain with the dual of the
+Note that `transpose` acts like one would expect on a `TensorMap{T,S,1,1}`. On a
+`TensorMap{TS,N₁,N₂}`, because `transpose` replaces the codomain with the dual of the
domain, which has its tensor product operation reversed, this in the end amounts in a
complete reversal of all tensor indices when representing it as a plain mutli-dimensional
`Array`. Also, note that we have not defined the conjugation of `TensorMap` instances. One
@@ -865,8 +863,8 @@ V2 = GradedSpace{FibonacciAnyon}(:I=>2,:τ=>1)
m = TensorMap(randn, Float32, V1, V2)
transpose(m)
twist(braid(m, (1,2), (2,), (1,)), 1)
-t1 = randn(V1*V2', V2*V1);
-t2 = randn(ComplexF64, V1*V2', V2*V1);
+t1 = randn(V1 * V2', V2 * V1);
+t2 = randn(ComplexF64, V1 * V2', V2 * V1);
dot(t1, t2) ≈ dot(transpose(t1), transpose(t2))
transpose(transpose(t1)) ≈ t1
```
@@ -875,7 +873,7 @@ A final operation that one might expect in this section is to fuse or join indic
inverse, to split a given index into two or more indices. For a plain tensor (i.e. with
`sectortype(t) == Trivial`) amount to the equivalent of `reshape` on the multidimensional
data. However, this represents only one possibility, as there is no canonically unique way
-to embed the tensor product of two spaces `V₁ ⊗ V₂` in a new space `V = fuse(V₁⊗V₂)`. Such a
+to embed the tensor product of two spaces `V1 ⊗ V2` in a new space `V = fuse(V1 ⊗ V2)`. Such a
mapping can always be accompagnied by a basis transform. However, one particular choice is
created by the function `isomorphism`, or for `EuclideanProduct` spaces, `unitary`.
Hence, we can join or fuse two indices of a tensor by first constructing
@@ -889,37 +887,35 @@ next section.
## [Tensor factorizations](@id ss_tensor_factorization)
### Eigenvalue decomposition
+
As tensors are linear maps, they have various kinds of factorizations. Endomorphism, i.e.
tensor maps `t` with `codomain(t) == domain(t)`, have an eigenvalue decomposition. For
this, we overload both `LinearAlgebra.eigen(t; kwargs...)` and
`LinearAlgebra.eigen!(t; kwargs...)`, where the latter destroys `t` in the process. The
keyword arguments are the same that are accepted by `LinearAlgebra.eigen(!)` for matrices.
-The result is returned as `D, V = eigen(t)`, such that `t*V ≈ V*D`. For given
-`t::TensorMap{S,N,N}`, `V` is a `TensorMap{S,N,1}`, whose codomain corresponds to that of
+The result is returned as `D, V = eigen(t)`, such that `t * V ≈ V * D`. For given
+`t::TensorMap{T,S,N,N}`, `V` is a `TensorMap{T,S,N,1}`, whose codomain corresponds to that of
`t`, but whose domain is a single space `S` (or more correctly a `ProductSpace{S,1}`), that
corresponds to `fuse(codomain(t))`. The eigenvalues are encoded in `D`, a
`TensorMap{S,1,1}`, whose domain and codomain correspond to the domain of `V`. Indeed, we
cannot reasonably associate a tensor product structure with the different eigenvalues. Note
-that `D` stores the eigenvalues on the diagonal of a (collection of) `DenseMatrix`
-instance(s), as there is currently no dedicated `DiagonalTensorMap` or diagonal storage
-support.
-
-We also define `LinearAlgebra.ishermitian(t)`, which can only return true for instances of
-`AbstractEuclideanTensorMap`. In all other cases, as the inner product is not defined, there
-is no notion of hermiticity (i.e. we are not working in a `†`-category). For instances of
-`EuclideanTensorMap`, we also define and export the routines `eigh` and `eigh!`, which
-compute the eigenvalue decomposition under the guarantee (not checked) that the map is
-hermitian. Hence, eigenvalues will be real and `V` will be unitary with
-`eltype(V) == eltype(t)`. We also define and export `eig` and `eig!`, which similarly assume
-that the `TensorMap` is not hermitian (hence this does not require `EuclideanTensorMap`),
-and always returns complex values eigenvalues and eigenvectors. Like for matrices,
-`LinearAlgebra.eigen` is type unstable and checks hermiticity at run-time, then falling back
-to either `eig` or `eigh`.
+that `D` stores the eigenvalues in a dedicated `DiagonalTensorMap` type.
+
+We also define `LinearAlgebra.ishermitian(t)`, which can only return true for spacetypes
+that have a Euclidean inner product. In all other cases, as the inner product is not
+defined, there is no notion of hermiticity (i.e. we are not working in a `†`-category). We
+also define and export the routines `eigh` and `eigh!`, which compute the eigenvalue
+decomposition under the guarantee (not checked) that the map is hermitian. Hence,
+eigenvalues will be real and `V` will be unitary with `eltype(V) == eltype(t)`. We also
+define and export `eig` and `eig!`, which similarly assume that the `TensorMap` is not
+hermitian (hence this does not require `EuclideanTensorMap`), and always returns complex
+values eigenvalues and eigenvectors. Like for matrices, `LinearAlgebra.eigen` is type
+unstable and checks hermiticity at run-time, then falling back to either `eig` or `eigh`.
### Orthogonal factorizations
Other factorizations that are provided by TensorKit.jl are orthogonal or unitary in nature,
-and thus always require a `AbstractEuclideanTensorMap`. However, they don't require equal
+and thus always require a Euclidean inner product. However, they don't require equal
domain and codomain. Let us first discuss the *singular value decomposition*, for which we
define and export the methods [`tsvd`](@ref) and `tsvd!` (where as always, the latter
destroys the input).
@@ -930,15 +926,15 @@ U, Σ, Vʰ, ϵ = tsvd(t; trunc = notrunc(), p::Real = 2,
```
This computes a (possibly truncated) singular value decomposition of
-`t::TensorMap{S,N₁,N₂}` (with `InnerProductStyle(t)<:EuclideanProduct`), such that
-`norm(t - U*Σ*Vʰ) ≈ ϵ`, where `U::TensorMap{S,N₁,1}`, `S::TensorMap{S,1,1}`,
-`Vʰ::TensorMap{S,1,N₂}` and `ϵ::Real`. `U` is an isometry, i.e. `U'*U` approximates the
-identity, whereas `U*U'` is an idempotent (squares to itself). The same holds for
-`adjoint(Vʰ)`. The domain of `U` equals the domain and codomain of `Σ` and the codomain of
-`Vʰ`. In the case of `trunc = notrunc()` (default value, see below), this space is
-given by `min(fuse(codomain(t)), fuse(domain(t)))`. The singular values are contained in `Σ`
-and are stored on the diagonal of a (collection of) `DenseMatrix` instance(s), similar to
-the eigenvalues before.
+`t::TensorMap{T,S,N₁,N₂}` (with `InnerProductStyle(t)<:EuclideanProduct`), such that
+`norm(t - U * Σ * Vʰ) ≈ ϵ`, where `U::TensorMap{T,S,N₁,1}`,
+`Σ::DiagonalTensorMap{real(T),S}`, `Vʰ::TensorMap{T,S,1,N₂}` and `ϵ::Real`. `U` is an
+isometry, i.e. `U' * U` approximates the identity, whereas `U * U'` is an idempotent
+(squares to itself). The same holds for `adjoint(Vʰ)`. The domain of `U` equals the domain
+and codomain of `Σ` and the codomain of `Vʰ`. In the case of `trunc = notrunc()` (default
+value, see below), this space is given by `min(fuse(codomain(t)), fuse(domain(t)))`. The
+singular values are contained in `Σ` and are stored in a specialized `DiagonalTensorMap`,
+similar to the eigenvalues before.
The keyword argument `trunc` provides a way to control the truncation, and is connected to
the keyword argument `p`. The default value `notrunc()` implies no truncation, and thus
@@ -977,10 +973,10 @@ when one is not interested in truncating a tensor or knowing the singular values
in its image or coimage.
* `Q, R = leftorth(t; alg::OrthogonalFactorizationAlgorithm = QRpos(), kwargs...)`:
- this produces an isometry `Q::TensorMap{S,N₁,1}` (i.e. `Q'*Q` approximates the identity,
- `Q*Q'` is an idempotent, i.e. squares to itself) and a general tensor map
- `R::TensorMap{1,N₂}`, such that `t ≈ Q*R`. Here, the domain of `Q` and thus codomain of
- `R` is a single vector space of type `S` that is typically given by
+ this produces an isometry `Q::TensorMap{T,S,N₁,1}` (i.e. `Q' * Q` approximates the
+ identity, `Q * Q'` is an idempotent, i.e. squares to itself) and a general tensor map
+ `R::TensorMap{T,1,N₂}`, such that `t ≈ Q * R`. Here, the domain of `Q` and thus codomain
+ of `R` is a single vector space of type `S` that is typically given by
`min(fuse(codomain(t)), fuse(domain(t)))`.
The underlying algorithm used to compute this decomposition can be chosen among `QR()`,
@@ -994,20 +990,20 @@ in its image or coimage.
`c ∈ blocksectors(t)`.
One can also use `alg = SVD()` or `alg = SDD()`, with extra keywords to control the
- absolute (`atol`) or relative (`rtol`) tolerance. We then set `Q=U` and `R=Σ*Vʰ` from
+ absolute (`atol`) or relative (`rtol`) tolerance. We then set `Q=U` and `R=Σ * Vʰ` from
the corresponding singular value decomposition, where only these singular values
- `σ >= max(atol, norm(t)*rtol)` (and corresponding singular vectors in `U`) are kept.
+ `σ >= max(atol, norm(t) * rtol)` (and corresponding singular vectors in `U`) are kept.
More finegrained control on the chosen singular values can be obtained with `tsvd` and
its `trunc` keyword.
- Finally, `Polar()` sets `Q=U*Vʰ` and `R = (Vʰ)'*Σ*Vʰ`, such that `R` is positive
+ Finally, `Polar()` sets `Q = U * Vʰ` and `R = (Vʰ)' * Σ * Vʰ`, such that `R` is positive
definite; in this case `SDD()` is used to actually compute the singular value
decomposition and no `atol` or `rtol` can be provided.
* `L, Q = rightorth(t; alg::OrthogonalFactorizationAlgorithm = QRpos())`:
- this produces a general tensor map `L::TensorMap{S,N₁,1}` and the adjoint of an isometry
- `Q::TensorMap{S,1,N₂}`, such that `t ≈ L*Q`. Here, the domain of `L` and thus codomain
- of `Q` is a single vector space of type `S` that is typically given by
+ this produces a general tensor map `L::TensorMap{T,S,N₁,1}` and the adjoint of an
+ isometry `Q::TensorMap{T,S,1,N₂}`, such that `t ≈ L * Q`. Here, the domain of `L` and
+ thus codomain of `Q` is a single vector space of type `S` that is typically given by
`min(fuse(codomain(t)), fuse(domain(t)))`.
The underlying algorithm used to compute this decomposition can be chosen among `LQ()`,
@@ -1020,13 +1016,13 @@ in its image or coimage.
`blockdim(codomain(t), c) <= blockdim(domain(t), c)` for all `c ∈ blocksectors(t)`.
One can also use `alg = SVD()` or `alg = SDD()`, with extra keywords to control the
- absolute (`atol`) or relative (`rtol`) tolerance. We then set `L=U*Σ` and `Q=Vʰ` from
+ absolute (`atol`) or relative (`rtol`) tolerance. We then set `L = U * Σ` and `Q = Vʰ` from
the corresponding singular value decomposition, where only these singular values
- `σ >= max(atol, norm(t)*rtol)` (and corresponding singular vectors in `Vʰ`) are kept.
+ `σ >= max(atol, norm(t) * rtol)` (and corresponding singular vectors in `Vʰ`) are kept.
More finegrained control on the chosen singular values can be obtained with `tsvd` and
its `trunc` keyword.
- Finally, `Polar()` sets `L = U*Σ*U'` and `Q=U*Vʰ`, such that `L` is positive definite;
+ Finally, `Polar()` sets `L = U * Σ * U'` and `Q=U*Vʰ`, such that `L` is positive definite;
in this case `SDD()` is used to actually compute the singular value decomposition and no
`atol` or `rtol` can be provided.
@@ -1034,8 +1030,8 @@ Furthermore, we can compute an orthonormal basis for the orthogonal complement o
and of the co-image (i.e. the kernel) with the following methods:
* `N = leftnull(t; alg::OrthogonalFactorizationAlgorithm = QR(), kwargs...)`:
- returns an isometric `TensorMap{S,N₁,1}` (i.e. `N'*N` approximates the identity) such
- that `N'*t` is approximately zero.
+ returns an isometric `TensorMap{T,S,N₁,1}` (i.e. `N' * N` approximates the identity) such
+ that `N' * t` is approximately zero.
Here, `alg` can be `QR()` (`QRpos()` acts identically in this case), which assumes that
`t` is full rank in all of its blocks and only returns an orthonormal basis for the
@@ -1044,11 +1040,11 @@ and of the co-image (i.e. the kernel) with the following methods:
If this is not the case, one can also use `alg = SVD()` or `alg = SDD()`, with extra
keywords to control the absolute (`atol`) or relative (`rtol`) tolerance. We then
construct `N` from the left singular vectors corresponding to singular values
- `σ < max(atol, norm(t)*rtol)`.
+ `σ < max(atol, norm(t) * rtol)`.
* `N = rightnull(t; alg::OrthogonalFactorizationAlgorithm = QR(), kwargs...)`:
- returns a `TensorMap{S,1,N₂}` with isometric adjoint (i.e. `N*N'` approximates the
- identity) such that `t*N'` is approximately zero.
+ returns a `TensorMap{T,S,1,N₂}` with isometric adjoint (i.e. `N * N'` approximates the
+ identity) such that `t * N'` is approximately zero.
Here, `alg` can be `LQ()` (`LQpos()` acts identically in this case), which assumes that
`t` is full rank in all of its blocks and only returns an orthonormal basis for the
@@ -1057,7 +1053,7 @@ and of the co-image (i.e. the kernel) with the following methods:
If this is not the case, one can also use `alg = SVD()` or `alg = SDD()`, with extra
keywords to control the absolute (`atol`) or relative (`rtol`) tolerance. We then
construct `N` from the right singular vectors corresponding to singular values
- `σ < max(atol, norm(t)*rtol)`.
+ `σ < max(atol, norm(t) * rtol)`.
Note that the methods `leftorth`, `rightorth`, `leftnull` and `rightnull` also come in a
form with exclamation mark, i.e. `leftorth!`, `rightorth!`, `leftnull!` and `rightnull!`,
@@ -1073,7 +1069,7 @@ SymmetricBraiding`, we can immediately specify an alternative bipartition of the
`t` in all of these methods, in the form
```julia
-factorize(t::AbstracTensorMap, pleft::NTuple{N₁′,Int}, pright::NTuple{N₂′,Int}; kwargs...)
+factorize(t::AbstracTensorMap, (pleft, pright)::Index2Tuple{N₁′,N₂′}; kwargs...)
```
where `pleft` will be the indices in the codomain of the new tensor map, and `pright` the
@@ -1082,7 +1078,7 @@ indices of the domain. Here, `factorize` is any of the methods `LinearAlgebra.ei
This signature does not allow for the exclamation mark, because it amounts to
```julia
-factorize!(permute(t, pleft, pright; copy = true); kwargs...)
+factorize!(permute(t, (pleft, pright); copy = true); kwargs...)
```
where [`permute`](@ref) was introduced and discussed in the previous section. When the
@@ -1091,33 +1087,33 @@ tensor map in proper form before performing the factorization.
Some examples to conclude this section
```@repl tensors
-V1 = SU₂Space(0=>2,1/2=>1)
-V2 = SU₂Space(0=>1,1/2=>1,1=>1)
+V1 = SU₂Space(0=>2, 1/2=>1)
+V2 = SU₂Space(0=>1, 1/2=>1, 1=>1)
t = randn(V1 ⊗ V1, V2);
U, S, W = tsvd(t);
t ≈ U * S * W
-D, V = eigh(t'*t);
-D ≈ S*S
-U'*U ≈ id(domain(U))
+D, V = eigh(t' * t);
+D ≈ S * S
+U' * U ≈ id(domain(U))
S
Q, R = leftorth(t; alg = Polar());
isposdef(R)
-Q ≈ U*W
-R ≈ W'*S*W
+Q ≈ U * W
+R ≈ W' * S * W
U2, S2, W2, ε = tsvd(t; trunc = truncspace(V1));
-W2*W2' ≈ id(codomain(W2))
+W2 * W2' ≈ id(codomain(W2))
S2
-ε ≈ norm(block(S, Irrep[SU₂](1)))*sqrt(dim(Irrep[SU₂](1)))
+ε ≈ norm(block(S, Irrep[SU₂](1))) * sqrt(dim(Irrep[SU₂](1)))
L, Q = rightorth(t, (1,), (2,3));
codomain(L), domain(L), domain(Q)
-Q*Q'
-P = Q'*Q;
-P ≈ P*P
-t′ = permute(t, (1,), (2,3));
+Q * Q'
+P = Q' * Q;
+P ≈ P * P
+t′ = permute(t, ((1,), (2, 3)));
t′ ≈ t′ * P
```
@@ -1169,15 +1165,15 @@ and specifies which indices are connected by the evaluation map using Einstein's
conventation. Indeed, for `BraidingStyle(I) == Bosonic()`, such a tensor contraction can
take the same format as if all tensors were just multi-dimensional arrays. For this, we
rely on the interface provided by the package
-[TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl).
+[TensorOperations.jl](https://github.com/QuantumKitHub/TensorOperations.jl).
The above picture would be encoded as
```julia
-@tensor E[a,b,c,d,e] := A[v,w,d,x]*B[y,z,c,x]*C[v,e,y,b]*D[a,w,z]
+@tensor E[a,b,c,d,e] := A[v,w,d,x] * B[y,z,c,x] * C[v,e,y,b] * D[a,w,z]
```
or
```julia
-@tensor E[:] := A[1,2,-4,3]*B[4,5,-3,3]*C[1,-5,4,-2]*D[-1,2,5]
+@tensor E[:] := A[1,2,-4,3] * B[4,5,-3,3] * C[1,-5,4,-2] * D[-1,2,5]
```
where the latter syntax is known as NCON-style, and labels the unconnected or outgoing
indices with negative integers, and the contracted indices with positive integers.
@@ -1185,9 +1181,9 @@ indices with negative integers, and the contracted indices with positive integer
A number of remarks are in order. TensorOperations.jl accepts both integers and any valid
variable name as dummy label for indices, and everything in between `[ ]` is not resolved in
the current context but interpreted as a dummy label. Here, we label the indices of a
-`TensorMap`, like `A::TensorMap{S,N₁,N₂}`, in a linear fashion, where the first position
+`TensorMap`, like `A::TensorMap{T,S,N₁,N₂}`, in a linear fashion, where the first position
corresponds to the first space in `codomain(A)`, and so forth, up to position `N₁`. Index
-`N₁+1`then corresponds to the first space in `domain(A)`. However, because we have applied
+`N₁ + 1`then corresponds to the first space in `domain(A)`. However, because we have applied
the coevaluation ``η``, it actually corresponds to the corresponding dual space, in
accordance with the interface of [`space(A, i)`](@ref) that we introduced
[above](@ref ss_tensor_properties), and as indiated by the dotted box around ``A`` in the
@@ -1201,11 +1197,11 @@ With the current syntax, we create a new object `E` because we use the definitio
trivial domain, and correspond to the dotted box in the picture above, rather than the
actual morphism `E`. We can also directly define `E` with the correct codomain and domain by rather using
```julia
-@tensor E[a b c;d e] := A[v,w,d,x]*B[y,z,c,x]*C[v,e,y,b]*D[a,w,z]
+@tensor E[a b c;d e] := A[v,w,d,x] * B[y,z,c,x] * C[v,e,y,b] * D[a,w,z]
```
or
```julia
-@tensor E[(a,b,c);(d,e)] := A[v,w,d,x]*B[y,z,c,x]*C[v,e,y,b]*D[a,w,z]
+@tensor E[(a,b,c);(d,e)] := A[v,w,d,x] * B[y,z,c,x] * C[v,e,y,b] * D[a,w,z]
```
where the latter syntax can also be used when the codomain is empty. When using the
assignment operator `=`, the `TensorMap` `E` is assumed to exist and the contents will be
@@ -1215,9 +1211,9 @@ seperately using the above syntax, has no effect, as the bipartition of indices
fixed by the existing object. Hence, if `E` has been created by the previous line of code,
all of the following lines are now equivalent
```julia
-@tensor E[(a,b,c);(d,e)] = A[v,w,d,x]*B[y,z,c,x]*C[v,e,y,b]*D[a,w,z]
-@tensor E[a,b,c,d,e] = A[v w d;x]*B[(y,z,c);(x,)]*C[v e y; b]*D[a,w,z]
-@tensor E[a b; c d e] = A[v; w d x]*B[y,z,c,x]*C[v,e,y,b]*D[a w;z]
+@tensor E[(a,b,c);(d,e)] = A[v,w,d,x] * B[y,z,c,x] * C[v,e,y,b] * D[a,w,z]
+@tensor E[a,b,c,d,e] = A[v w d;x] * B[(y,z,c);(x,)] * C[v e y; b] * D[a,w,z]
+@tensor E[a b; c d e] = A[v; w d x] * B[y,z,c,x] * C[v,e,y,b] * D[a w;z]
```
and none of those will or can change the partition of the indices of `E` into its codomain
and its domain.
@@ -1226,7 +1222,8 @@ Two final remarks are in order. Firstly, the order of the tensors appearing on t
hand side is irrelevant, as we can reorder them by using the allowed moves of the Penrose
graphical calculus, which yields some crossings and a twist. As the latter is trivial, it
can be omitted, and we just use the same rules to evaluate the newly ordered tensor
-network. For the particular case of matrix matrix multiplication, which also captures more general settings by appropriotely combining spaces into a single line, we indeed find
+network. For the particular case of matrix-matrix multiplication, which also captures more
+general settings by appropriotely combining spaces into a single line, we indeed find
```@raw html
@@ -1234,34 +1231,34 @@ network. For the particular case of matrix matrix multiplication, which also cap
or thus, the following to lines of code yield the same result
```julia
-@tensor C[i,j] := B[i,k]*A[k,j]
-@tensor C[i,j] := A[k,j]*B[i,k]
+@tensor C[i,j] := B[i,k] * A[k,j]
+@tensor C[i,j] := A[k,j] * B[i,k]
```
Reordering of tensors can be used internally by the `@tensor` macro to evaluate the
contraction in a more efficient manner. In particular, the NCON-style of specifying the
contraction gives the user control over the order, and there are other macros, such as
`@tensoropt`, that try to automate this process. There is also an `@ncon` macro and `ncon`
function, an we recommend reading the
-[manual of TensorOperations.jl](https://jutho.github.io/TensorOperations.jl/stable/) to
+[manual of TensorOperations.jl](https://quantumkithub.github.io/TensorOperations.jl/stable/) to
learn more about the possibilities and how they work.
A final remark involves the use of adjoints of tensors. The current framework is such that
the user should not be to worried about the actual bipartition into codomain and domain of
a given `TensorMap` instance. Indeed, for factorizations one just specifies the requested
-bipartition via the `factorize(t, pleft, pright)` interface, and for tensor contractions
+bipartition via the `factorize(t, (pleft, pright))` interface, and for tensor contractions
the `@contract` macro figures out the correct manipulations automatically. However, when
-wanting to use the `adjoint` of an instance `t::TensorMap{S,N₁,N₂}`, the resulting
-`adjoint(t)` is a `AbstractTensorMap{S,N₂,N₁}` and one need to know the values of `N₁` and
+wanting to use the `adjoint` of an instance `t::TensorMap{T,S,N₁,N₂}`, the resulting
+`adjoint(t)` is a `AbstractTensorMap{T,S,N₂,N₁}` and one need to know the values of `N₁` and
`N₂` to know exactly where the `i`th index of `t` will end up in `adjoint(t)`, and hence to
know and understand the index order of `t'`. Within the `@tensor` macro, one can instead use
`conj()` on the whole index expression so as to be able to use the original index ordering
-of `t`. Indeed, for matrices of thus, `TensorMap{S,1,1}` instances, this yields exactly the
+of `t`. Indeed, for matrices of thus, `TensorMap{T,S,1,1}` instances, this yields exactly the
equivalence one expects, namely equivalence between the following to expressions.
```julia
-@tensor C[i,j] := B'[i,k]*A[k,j]
-@tensor C[i,j] := conj(B[k,i])*A[k,j]
+@tensor C[i,j] := B'[i,k] * A[k,j]
+@tensor C[i,j] := conj(B[k,i]) * A[k,j]
```
-For e.g. an instance `A::TensorMap{S,3,2}`, the following two syntaxes have the same effect
+For e.g. an instance `A::TensorMap{T,S,3,2}`, the following two syntaxes have the same effect
within an `@tensor` expression: `conj(A[a,b,c,d,e])` and `A'[d,e,a,b,c]`.
Some examples:
diff --git a/docs/src/man/tutorial.md b/docs/src/man/tutorial.md
index 2facf5356..2c8085915 100644
--- a/docs/src/man/tutorial.md
+++ b/docs/src/man/tutorial.md
@@ -75,7 +75,7 @@ allowed
```@repl tutorial
B′ = randn(ℝ^4 * ℝ^2 * ℝ^3);
space(B′) == space(A)
-C′ = 0.5*A + 2.5*B′
+C′ = 0.5 * A + 2.5 * B′
scalarBA′ = dot(B′,A)
```
@@ -84,16 +84,16 @@ using the routine `permute` (we deliberately choose not to overload `permutedims
Julia Base, for reasons that become clear below):
```@repl tutorial
-space(permute(B′,(3,2,1))) == space(A)
+space(permute(B′, (3, 2, 1))) == space(A)
```
We can contract two tensors using Einstein summation convention, which takes the interface
-from [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl). TensorKit.jl
+from [TensorOperations.jl](https://github.com/quantumkithub/TensorOperations.jl). TensorKit.jl
reexports the `@tensor` macro
```@repl tutorial
-@tensor D[a,b,c,d] := A[a,b,e]*B[d,c,e]
-@tensor d = A[a,b,c]*A[a,b,c]
+@tensor D[a,b,c,d] := A[a,b,e] * B[d,c,e]
+@tensor d = A[a,b,c] * A[a,b,c]
d ≈ scalarAA ≈ normA²
```
@@ -108,8 +108,8 @@ cast the array into a matrix before applying e.g. the singular value decompositi
TensorKit.jl, one just specifies which indices go to the left (rows) and right (columns)
```@repl tutorial
-U, S, Vd = tsvd(A, (1,3), (2,));
-@tensor A′[a,b,c] := U[a,c,d]*S[d,e]*Vd[e,b];
+U, S, Vd = tsvd(A, ((1,3), (2,)));
+@tensor A′[a,b,c] := U[a,c,d] * S[d,e] * Vd[e,b];
A ≈ A′
U
```
@@ -148,8 +148,8 @@ represent a vector `v` and matrix `m` as
v = randn(ℝ^3)
M₁ = randn(ℝ^4, ℝ^3)
M₂ = randn(ℝ^4 → ℝ^2) # alternative syntax for randn(ℝ^2, ℝ^4)
-w = M₁ * v # matrix vector product
-M₃ = M₂ * M₁ # matrix matrix product
+w = M₁ * v # matrix-vector product
+M₃ = M₂ * M₁ # matrix-matrix product
space(M₃)
```
@@ -161,7 +161,7 @@ that we also support this more mathemical notation, as illustrated in the constr
support the syntax `codomain ← domain` and actually use this as the default way of printing
`HomSpace` instances.
-The 'matrix vector' or 'matrix matrix' product can be computed between any two `TensorMap`
+The 'matrix-vector' or 'matrix-matrix' product can be computed between any two `TensorMap`
instances for which the domain of the first matches with the codomain of the second, e.g.
```@repl tutorial
@@ -178,29 +178,28 @@ unitary, or at least a (left) isometric tensor
codomain(U)
domain(U)
space(U)
-U'*U # should be the identity on the corresponding domain = codomain
-U'*U ≈ one(U'*U)
-P = U*U' # should be a projector
-P*P ≈ P
+U' * U # should be the identity on the corresponding domain = codomain
+U' * U ≈ one(U'*U)
+P = U * U' # should be a projector
+P * P ≈ P
```
Here, the adjoint of a `TensorMap` results in a new tensor map (actually a simple wrapper
of type `AdjointTensorMap <: AbstractTensorMap`) with domain and codomain interchanged.
Our original tensor `A` living in `ℝ^4 * ℝ^2 * ℝ^3` is isomorphic to e.g. a linear map
-`ℝ^3 → ℝ^4 * ℝ^2`. This is where the full power of `permute` emerges. It allows to
-specify a permutation where some indices go to the codomain, and others go to the domain,
-as in
+`ℝ^3 → ℝ^4 * ℝ^2`. This is where the full power of `permute` emerges. It allows to specify a
+permutation where some indices go to the codomain, and others go to the domain, as in
```@repl tutorial
-A2 = permute(A,(1,2),(3,))
+A2 = permute(A, ((1, 2), (3,)))
codomain(A2)
domain(A2)
```
-In fact, `tsvd(A, (1,3),(2,))` is a shorthand for `tsvd(permute(A,(1,3),(2,)))`, where
-`tsvd(A::TensorMap)` will just compute the singular value decomposition according to the
-given codomain and domain of `A`.
+In fact, `tsvd(A, ((1, 3), (2,)))` is a shorthand for `tsvd(permute(A, ((1, 3), (2,))))`,
+where `tsvd(A::TensorMap)` will just compute the singular value decomposition according to
+the given codomain and domain of `A`.
Note, finally, that the `@tensor` macro treats all indices at the same footing and thus
does not distinguish between codomain and domain. The linear numbering is first all indices
@@ -209,13 +208,13 @@ new tensor (i.e. when using `:=`), the default syntax always creates a `Tensor`,
all indices in the codomain.
```@repl tutorial
-@tensor A′[a,b,c] := U[a,c,d]*S[d,e]*Vd[e,b];
+@tensor A′[a,b,c] := U[a,c,d] * S[d,e] * Vd[e,b];
codomain(A′)
domain(A′)
-@tensor A2′[(a,b);(c,)] := U[a,c,d]*S[d,e]*Vd[e,b];
+@tensor A2′[(a,b); (c,)] := U[a,c,d] * S[d,e] * Vd[e,b];
codomain(A2′)
domain(A2′)
-@tensor A2′′[a b; c] := U[a,c,d]*S[d,e]*Vd[e,b];
+@tensor A2′′[a b; c] := U[a,c,d] * S[d,e] * Vd[e,b];
A2 ≈ A2′ == A2′′
```
@@ -244,32 +243,32 @@ where `ℂ` is obtained as `\bbC+TAB` and we also have the non-Unicode alternati
```@repl tutorial
B = randn(ℂ^3 * ℂ^2 * ℂ^4);
-C = im*A + (2.5-0.8im)*B
-scalarBA = dot(B,A)
-scalarAA = dot(A,A)
+C = im*A + (2.5 - 0.8im) * B
+scalarBA = dot(B, A)
+scalarAA = dot(A, A)
normA² = norm(A)^2
-U,S,Vd = tsvd(A,(1,3),(2,));
-@tensor A′[a,b,c] := U[a,c,d]*S[d,e]*Vd[e,b];
+U, S, Vd = tsvd(A, ((1, 3), (2,)));
+@tensor A′[a,b,c] := U[a,c,d] * S[d,e] * Vd[e,b];
A′ ≈ A
-permute(A,(1,3),(2,)) ≈ U*S*Vd
+permute(A, ((1, 3), (2,))) ≈ U * S * Vd
```
However, trying the following
```@repl tutorial
-@tensor D[a,b,c,d] := A[a,b,e]*B[d,c,e]
-@tensor d = A[a,b,c]*A[a,b,c]
+@tensor D[a,b,c,d] := A[a,b,e] * B[d,c,e]
+@tensor d = A[a,b,c] * A[a,b,c]
```
we obtain `SpaceMismatch` errors. The reason for this is that, with `ComplexSpace`, an
index in a space `ℂ^n` can only be contracted with an index in the dual space
`dual(ℂ^n) == (ℂ^n)'`. Because of the complex Euclidean inner product, the dual space is
-equivalent to the complex conjugate space, but not the the space itself.
+equivalent to the complex conjugate space, but not the space itself.
```@repl tutorial
dual(ℂ^3) == conj(ℂ^3) == (ℂ^3)'
(ℂ^3)' == ℂ^3
-@tensor d = conj(A[a,b,c])*A[a,b,c]
+@tensor d = conj(A[a,b,c]) * A[a,b,c]
d ≈ normA²
```
@@ -290,7 +289,7 @@ space(m, 2)
Hence, spaces become their corresponding dual space if they are 'permuted' from the domain
to the codomain or vice versa. Also, spaces in the domain are reported as their dual when
-probing them with `space(A, i)`. Generalizing matrix vector and matrix matrix multiplication
+probing them with `space(A, i)`. Generalizing matrix-vector and matrix-matrix multiplication
to arbitrary tensor contractions require that the two indices to be contracted have spaces
which are each others dual. Knowing this, all the other functionality of tensors with
`CartesianSpace` indices remains the same for tensors with `ComplexSpace` indices.
@@ -312,11 +311,11 @@ some block sparsity. Let's clarify all of this with some examples.
We start with a simple ``ℤ₂`` symmetry:
```@repl tutorial
-V1 = ℤ₂Space(0=>3,1=>2)
+V1 = ℤ₂Space(0=>3, 1=>2)
dim(V1)
-V2 = ℤ₂Space(0=>1,1=>1)
+V2 = ℤ₂Space(0=>1, 1=>1)
dim(V2)
-A = randn(V1*V1*V2')
+A = randn(V1 * V1 * V2')
convert(Array, A)
```
@@ -332,27 +331,27 @@ From there on, the resulting tensors support all of the same operations as the o
encountered in the previous examples.
```@repl tutorial
-B = randn(V1'*V1*V2);
-@tensor C[a,b] := A[a,c,d]*B[c,b,d]
-U,S,V = tsvd(A,(1,3),(2,));
-U'*U # should be the identity on the corresponding domain = codomain
-U'*U ≈ one(U'*U)
-P = U*U' # should be a projector
-P*P ≈ P
+B = randn(V1' * V1 * V2);
+@tensor C[a,b] := A[a,c,d] * B[c,b,d]
+U, S, V = tsvd(A, ((1, 3), (2,)));
+U' * U # should be the identity on the corresponding domain = codomain
+U' * U ≈ one(U'*U)
+P = U * U' # should be a projector
+P * P ≈ P
```
We also support other abelian symmetries, e.g.
```@repl tutorial
-V = U₁Space(0=>2,1=>1,-1=>1)
+V = U₁Space(0=>2, 1=>1, -1=>1)
dim(V)
-A = randn(V*V, V)
+A = randn(V * V, V)
dim(A)
convert(Array, A)
V = Rep[U₁×ℤ₂]((0, 0) => 2, (1, 1) => 1, (-1, 0) => 1)
dim(V)
-A = randn(V*V, V)
+A = randn(V * V, V)
dim(A)
convert(Array, A)
```
@@ -367,12 +366,12 @@ more general sectortypes `I` it can be constructed as `Vect[I]`. Furthermore, `
synonyms, e.g.
```@repl tutorial
-Rep[U₁](0=>3,1=>2,-1=>1) == U1Space(-1=>1,1=>2,0=>3)
-V = U₁Space(1=>2,0=>3,-1=>1)
+Rep[U₁](0=>3, 1=>2, -1=>1) == U1Space(-1=>1, 1=>2, 0=>3)
+V = U₁Space(1=>2, 0=>3, -1=>1)
for s in sectors(V)
@show s, dim(V, s)
end
-U₁Space(-1=>1,0=>3,1=>2) == GradedSpace(Irrep[U₁](1)=>2, Irrep[U₁](0)=>3, Irrep[U₁](-1)=>1)
+U₁Space(-1=>1, 0=>3, 1=>2) == GradedSpace(Irrep[U₁](1)=>2, Irrep[U₁](0)=>3, Irrep[U₁](-1)=>1)
supertype(GradedSpace)
```
@@ -386,9 +385,8 @@ type, in this case `Irrep[U₁] == U1Irrep`. However, the `Vect[I]` constructor
converts the keys in the list of `Pair`s it receives to the correct type. Alternatively, we
can directly create the sectors of the correct type and use the generic `GradedSpace`
constructor. We can probe the subspace dimension of a certain sector `s` in a space `V` with
-`dim(V, s)`. Finally, note that `GradedSpace` is also a subtype of `EuclideanSpace`, which
-implies that it still has the standard Euclidean inner product and we assume all
-representations to be unitary.
+`dim(V, s)`. Finally, note that `GradedSpace` still has the standard Euclidean inner product
+and we assume all representations to be unitary.
TensorKit.jl also allows for non-abelian symmetries such as `SU₂`. In this case, the vector
space is characterized via the spin quantum number (i.e. the irrep label of `SU₂`) for each
@@ -396,18 +394,18 @@ of its subspaces, and is created using `SU₂Space` (or `SU2Space` or `Rep[SU₂
`Vect[Irrep[SU₂]]`)
```@repl tutorial
-V = SU₂Space(0=>2,1/2=>1,1=>1)
+V = SU₂Space(0=>2, 1/2=>1, 1=>1)
dim(V)
V == Vect[Irrep[SU₂]](0=>2, 1=>1, 1//2=>1)
```
Note that now `V` has a two-dimensional subspace with spin zero, and two one-dimensional
subspaces with spin 1/2 and spin 1. However, a subspace with spin `j` has an additional
-`2j+1` dimensional degeneracy on which the irreducible representation acts. This brings the
-total dimension to `2*1 + 1*2 + 1*3`. Creating a tensor with `SU₂` symmetry yields
+`2j + 1` dimensional degeneracy on which the irreducible representation acts. This brings
+the total dimension to `2*1 + 1*2 + 1*3`. Creating a tensor with `SU₂` symmetry yields
```@repl tutorial
-A = randn(V*V, V)
+A = randn(V * V, V)
dim(A)
convert(Array, A)
norm(A) ≈ norm(convert(Array, A))
@@ -419,7 +417,7 @@ in the original tensor data do not match with those in the `Array`. The reason i
that the original tensor data now needs to be transformed with a construction known as
fusion trees, which are made up out of the Clebsch-Gordan coefficients of the group.
Indeed, note that the non-zero blocks are also no longer labeled by a list of sectors, but
-by pair of fusion trees. This will be explained further in the manual. However, the
+by pairs of fusion trees. This will be explained further in the manual. However, the
Clebsch-Gordan coefficients of the group are only needed to actually convert a tensor to an
`Array`. For working with tensors with `SU₂Space` indices, e.g. contracting or factorizing
them, the Clebsch-Gordan coefficients are never needed explicitly. Instead, recoupling
diff --git a/src/TensorKit.jl b/src/TensorKit.jl
index 6f7467e37..29b553d49 100644
--- a/src/TensorKit.jl
+++ b/src/TensorKit.jl
@@ -156,24 +156,24 @@ struct SectorMismatch{S<:Union{Nothing,AbstractString}} <: TensorException
message::S
end
SectorMismatch() = SectorMismatch{Nothing}(nothing)
-Base.show(io::IO, ::SectorMismatch{Nothing}) = print(io, "SectorMismatch()")
-Base.show(io::IO, e::SectorMismatch) = print(io, "SectorMismatch(\"", e.message, "\")")
+Base.showerror(io::IO, ::SectorMismatch{Nothing}) = print(io, "SectorMismatch()")
+Base.showerror(io::IO, e::SectorMismatch) = print(io, "SectorMismatch(\"", e.message, "\")")
# Exception type for all errors related to vector space mismatch
struct SpaceMismatch{S<:Union{Nothing,AbstractString}} <: TensorException
message::S
end
SpaceMismatch() = SpaceMismatch{Nothing}(nothing)
-Base.show(io::IO, ::SpaceMismatch{Nothing}) = print(io, "SpaceMismatch()")
-Base.show(io::IO, e::SpaceMismatch) = print(io, "SpaceMismatch(\"", e.message, "\")")
+Base.showerror(io::IO, ::SpaceMismatch{Nothing}) = print(io, "SpaceMismatch()")
+Base.showerror(io::IO, e::SpaceMismatch) = print(io, "SpaceMismatch(\"", e.message, "\")")
# Exception type for all errors related to invalid tensor index specification.
struct IndexError{S<:Union{Nothing,AbstractString}} <: TensorException
message::S
end
IndexError() = IndexError{Nothing}(nothing)
-Base.show(io::IO, ::IndexError{Nothing}) = print(io, "IndexError()")
-Base.show(io::IO, e::IndexError) = print(io, "IndexError(", e.message, ")")
+Base.showerror(io::IO, ::IndexError{Nothing}) = print(io, "IndexError()")
+Base.showerror(io::IO, e::IndexError) = print(io, "IndexError(", e.message, ")")
# Constructing and manipulating fusion trees and iterators thereof
#------------------------------------------------------------------
diff --git a/src/spaces/homspace.jl b/src/spaces/homspace.jl
index b2ac5f894..d6a06cedd 100644
--- a/src/spaces/homspace.jl
+++ b/src/spaces/homspace.jl
@@ -12,6 +12,18 @@ struct HomSpace{S<:ElementarySpace,P1<:CompositeSpace{S},P2<:CompositeSpace{S}}
codomain::P1
domain::P2
end
+
+function HomSpace(codomain::S, domain::CompositeSpace{S}) where {S<:ElementarySpace}
+ return HomSpace(⊗(codomain), domain)
+end
+function HomSpace(codomain::CompositeSpace{S}, domain::S) where {S<:ElementarySpace}
+ return HomSpace(codomain, ⊗(domain))
+end
+function HomSpace(codomain::S, domain::S) where {S<:ElementarySpace}
+ return HomSpace(⊗(codomain), ⊗(domain))
+end
+HomSpace(codomain::VectorSpace) = HomSpace(codomain, zero(codomain))
+
codomain(W::HomSpace) = W.codomain
domain(W::HomSpace) = W.domain
diff --git a/test/bugfixes.jl b/test/bugfixes.jl
index faa348c0d..fb2f0f45c 100644
--- a/test/bugfixes.jl
+++ b/test/bugfixes.jl
@@ -23,14 +23,14 @@
@test scalartype(w) == Float64
end
- # https://github.com/Jutho/TensorKit.jl/issues/178
+ # https://github.com/quantumkithub/TensorKit.jl/issues/178
@testset "Issue #178" begin
t = rand(U1Space(1 => 1) ← U1Space(1 => 1)')
a = convert(Array, t)
@test a == zeros(size(a))
end
- # https://github.com/Jutho/TensorKit.jl/issues/194
+ # https://github.com/quantumkithub/TensorKit.jl/issues/194
@testset "Issue #194" begin
t1 = rand(ℂ^4 ← ℂ^4)
t2 = tensoralloc(typeof(t1), space(t1), Val(true),
@@ -44,7 +44,7 @@
tensorfree!(t2)
end
- # https://github.com/Jutho/TensorKit.jl/issues/201
+ # https://github.com/quantumkithub/TensorKit.jl/issues/201
@testset "Issue #201" begin
function f(A::AbstractTensorMap)
U, S, V, = tsvd(A)
@@ -73,7 +73,7 @@
@test convert(Array, grad3) ≈ grad4
end
- # https://github.com/Jutho/TensorKit.jl/issues/209
+ # https://github.com/quantumkithub/TensorKit.jl/issues/209
@testset "Issue #209" begin
function f(T, D)
@tensor T[1, 4, 1, 3] * D[3, 4]