diff --git a/docs/LocalPreferences.toml b/docs/LocalPreferences.toml
new file mode 100644
index 000000000..ed82aeff8
--- /dev/null
+++ b/docs/LocalPreferences.toml
@@ -0,0 +1,2 @@
+[SUNRepresentations]
+display_mode = "dimension"
diff --git a/docs/make.jl b/docs/make.jl
index 3fffae9eb..52f556195 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -7,14 +7,18 @@ using DocumenterInterLinks
links = InterLinks(
"MatrixAlgebraKit" => "https://quantumkithub.github.io/MatrixAlgebraKit.jl/stable/",
- "TensorOperations" => "https://quantumkithub.github.io/TensorOperations.jl/stable/"
+ "TensorOperations" => "https://quantumkithub.github.io/TensorOperations.jl/stable/",
+ "TensorKitSectors" => "https://quantumkithub.github.io/TensorKitSectors.jl/dev/"
)
pages = [
"Home" => "index.md",
"Manual" => [
"man/intro.md", "man/tutorial.md",
- "man/spaces.md", "man/sectors.md", "man/tensors.md",
+ "man/spaces.md", "man/symmetries.md",
+ "man/sectors.md", "man/gradedspaces.md",
+ "man/fusiontrees.md", "man/tensors.md",
+ "man/tensormanipulations.md",
],
"Library" => [
"lib/sectors.md", "lib/fusiontrees.md",
diff --git a/docs/src/appendix/categories.md b/docs/src/appendix/categories.md
index 849f03603..5a111b4fe 100644
--- a/docs/src/appendix/categories.md
+++ b/docs/src/appendix/categories.md
@@ -1,264 +1,153 @@
-
# [Optional introduction to category theory](@id s_categories)
-The purpose of this page (which can safely be skipped), is to explain how certain
-concepts and terminology from the theory of monoidal categories apply in the context of
-tensors. In particular, we are interested in the category ``\mathbf{Vect}``, but our
-concept of tensors can be extended to morphisms of any category that shares similar
-properties. These properties are reviewed below.
+The purpose of this page (which can safely be skipped), is to explain how certain concepts and terminology from the theory of monoidal categories apply in the context of tensors.
+In particular, we are interested in the category ``\mathbf{Vect}``, but our concept of tensors can be extended to morphisms of any category that shares similar properties.
+These properties are reviewed below.
-In particular, we will as example also study the more general case of ``\mathbf{SVect}``,
-i.e. the category of super vector spaces, which contains ``\mathbf{Vect}`` as a subcategory
-and which are useful to describe fermions.
+In particular, we will as example also study the more general case of ``\mathbf{SVect}``, i.e. the category of super vector spaces, which contains ``\mathbf{Vect}`` as a subcategory and which are useful to describe fermions.
-In the end, the goal of identifying tensor manipulations in TensorKit.jl with concepts from
-category theory is to put the diagrammatic formulation of tensor networks in the most
-general context on a firmer footing. The following exposition is mostly based on [^turaev],
-combined with input from [^selinger], [^kassel], [^kitaev], and
-[``n``Lab](https://ncatlab.org/), to which we refer for further information. Furthermore,
-we recommend the nice introduction of [^beer].
+In the end, the goal of identifying tensor manipulations in TensorKit.jl with concepts from category theory is to put the diagrammatic formulation of tensor networks in the most general context on a firmer footing.
+The following exposition is mostly based on [^Turaev], combined with input from [^Selinger], [^Kassel], [^Kitaev], and [``n``Lab](https://ncatlab.org/), to which we refer for further information.
+Furthermore, we recommend the nice introduction of [^Beer].
## [Categories, functors and natural transformations](@id ss_categoryfunctor)
To start, a **category** ``C`` consists of
* a class ``\mathrm{Ob}(C)`` of objects ``V``, ``W``, …
-* for each pair of objects ``V`` and ``W``, a set ``\mathrm{Hom}_C(W,V)`` of morphisms
- ``f:W→V``; for a given map ``f``, ``W`` is called the *domain* or *source*, and ``V``
- the *codomain* or *target*.
-* composition of morphisms ``f:W→V`` and ``g:X→W`` into ``(f ∘ g):X→V`` that is
- associative, such that for ``h:Y→X`` we have ``f ∘ (g ∘ h) = (f ∘ g) ∘ h``
-* for each object ``V``, an identity morphism ``\mathrm{id}_V:V→V`` such that
- ``f ∘ \mathrm{id}_W = f = \mathrm{id}_V ∘ f``.
-
-The morphisms in ``\mathrm{Hom}_C(V,V)`` are known as endomorphism and this set is also
-denoted as ``End_C(V)``. When the category ``C`` is clear, we can drop the subscript in
-``\mathrm{Hom}(W,V)``. A morphism ``f:W→V`` is an isomorphism if there exists a morphism
-``f^{-1}:V→W`` called its inverse, such that ``f^{-1} ∘ f = \mathrm{id}_W`` and ``f ∘ f^{-1}
-= \mathrm{id}_V``.
-
-Throughout this manual, we associate a graphical representation to morphisms and
-compositions thereof, which is sometimes referred to as the Penrose graphical calculus. To
-morphisms, we associate boxes with an incoming and outgoing line denoting the object in its
-source and target. The flow from source to target, and thus the direction of morphism
-composition ``f ∘ g`` (sometimes known as the flow of time) can be chosen left to right
-(like the arrow in ``f:W→V``), right to left (like the composition order ``f ∘ g``, or the
-matrix product), bottom to top (quantum field theory convention) or top to bottom (quantum
-circuit convention). Throughout this manual, we stick to this latter convention (which is
-not very common in manuscripts on category theory):
+* for each pair of objects ``V`` and ``W``, a set ``\mathrm{Hom}_C(W,V)`` of morphisms ``f:W→V``; for a given map ``f``, ``W`` is called the *domain* or *source*, and ``V`` the *codomain* or *target*.
+* composition of morphisms ``f:W→V`` and ``g:X→W`` into ``(f ∘ g):X→V`` that is associative, such that for ``h:Y→X`` we have ``f ∘ (g ∘ h) = (f ∘ g) ∘ h``
+* for each object ``V``, an identity morphism ``\mathrm{id}_V:V→V`` such that ``f ∘ \mathrm{id}_W = f = \mathrm{id}_V ∘ f``.
+
+The morphisms in ``\mathrm{Hom}_C(V,V)`` are known as endomorphism and this set is also denoted as ``End_C(V)``.
+When the category ``C`` is clear, we can drop the subscript in ``\mathrm{Hom}(W,V)``.
+A morphism ``f:W→V`` is an isomorphism if there exists a morphism ``f^{-1}:V→W`` called its inverse, such that ``f^{-1} ∘ f = \mathrm{id}_W`` and ``f ∘ f^{-1} = \mathrm{id}_V``.
+
+Throughout this manual, we associate a graphical representation to morphisms and compositions thereof, which is sometimes referred to as the Penrose graphical calculus.
+To morphisms, we associate boxes with an incoming and outgoing line denoting the object in its source and target.
+The flow from source to target, and thus the direction of morphism composition ``f ∘ g`` (sometimes known as the flow of time) can be chosen left to right (like the arrow in ``f:W→V``), right to left (like the composition order ``f ∘ g``, or the matrix product), bottom to top (quantum field theory convention) or top to bottom (quantum circuit convention).
+Throughout this manual, we stick to this latter convention (which is not very common in manuscripts on category theory):
```@raw html
```
-The direction of the arrows, which become important once we introduce duals, are also
-subject to convention, and are here chosen to follow the arrow in ``f:W→V``, i.e. the
-source comes in and the target goes out. Strangely enough, this is opposite to the most
-common convention.
-
-In the case of interest, i.e. the category ``\mathbf{(Fin)Vect}_{𝕜}`` (or some subcategory
-thereof), the objects are (finite-dimensional) vector spaces over a field ``𝕜``, and the
-morphisms are linear maps between these vector spaces with "matrix multiplication" as
-composition. More importantly, the morphism spaces ``\mathrm{Hom}(W,V)`` are themselves
-vector spaces. More general categories where the morphism spaces are vector spaces over a
-field ``𝕜`` (or modules over a ring ``𝕜``) and the composition of morphisms is a bilinear
-operation are called ``𝕜``-linear categories (or ``𝕜``-algebroids, or
-``\mathbf{Vect}_{𝕜}``-enriched categories). In that case, the endomorphisms
-``\mathrm{End}(V)`` are a ``𝕜``-algebra with ``\mathrm{id}_V`` as the identity.
-
-We also introduce some definitions which will be useful further on. A **functor** ``F``
-between two categories ``C`` and ``D`` is, colloquially speaking, a mapping between
-categories that preserves morphism composition and identities. More specifically, ``F:C→D``
-assigns to every object ``V ∈ \mathrm{Ob}(C)`` an object ``F(V) ∈ \mathrm{Ob}(D)``, and to
-each morphism ``f ∈ \mathrm{Hom}_C(W,V)`` a morphism ``F(f) ∈ \mathrm{Hom}_D(F(W), F(V))``
-such that ``F(f) ∘_D F(g) = F(f ∘_C g)`` and ``F(\mathrm{id}_V) = \mathrm{id}_{F(V)}``
-(where we denoted the possibly different composition laws in ``C`` and ``D`` explicitly with
-a subscript). In particular, every category ``C`` has an identity functor ``1_C`` that acts
-trivially on objects and morphisms. Functors can also be composed. A ``𝕜``-linear functor
-between two ``𝕜``-linear categories has a linear action on morphisms.
-
-Given two categories ``C`` and ``D``, and two functors ``F`` and ``G`` that map from ``C``
-to ``D``, a **natural transformation** ``φ:F⟶G`` is a family of morphisms
-``φ_V ∈ \mathrm{Hom}_D(F(V),G(V))`` in ``D``, labeled by the objects ``V`` of ``C``, such
-that ``φ_V ∘ F(f) = G(f) ∘ φ_W`` for all morphisms ``f ∈ \mathrm{Hom}_C(W,V)``. If all
-morphisms ``φ_V`` are isomorphisms, ``φ`` is called a natural isomorphism and the two
-functors ``F`` and ``G`` are said to be *isomorphic*.
-
-The *product* of two categories ``C`` and ``C′``, denoted ``C × C′``, is the category with
-objects ``\mathrm{Ob}(C×C′) = \mathrm{Ob}(C) × \mathrm{Ob}(C′)``, whose elements are denoted
-as tuples ``(V,V′)``, and morphisms
-``\mathrm{Hom}_{C×C′}((W,W′), (V,V′)) = \mathrm{Hom}_{C}(W,V) × \mathrm{Hom}_{C′}(W′,V′)``.
-Composition acts as ``(f,f′) ∘ (g,g′) = (f∘f′, g∘g′)`` and the identity is given by
-``\mathrm{id}_{V,V′} = (\mathrm{id}_V, \mathrm{id}_{V′})``. In a similar fashion, we can
-define the *product of functors* ``F:C→D`` and ``F′:C′→D′`` as a functor
-``F×F′: (C×C′)→(D×D′)`` mapping objects ``(V,V′)`` to ``(F(V), F′(V′))`` and morphisms
-``(f,f′)`` to ``(F(f), F′(f′))``.
+The direction of the arrows, which become important once we introduce duals, are also subject to convention, and are here chosen to follow the arrow in ``f:W→V``, i.e. the source comes in and the target goes out.
+Strangely enough, this is opposite to the most common convention.
+
+In the case of interest, i.e. the category ``\mathbf{(Fin)Vect}_{𝕜}`` (or some subcategory thereof), the objects are (finite-dimensional) vector spaces over a field ``𝕜``, and the morphisms are linear maps between these vector spaces with "matrix multiplication" as composition.
+More importantly, the morphism spaces ``\mathrm{Hom}(W,V)`` are themselves vector spaces.
+More general categories where the morphism spaces are vector spaces over a field ``𝕜`` (or modules over a ring ``𝕜``) and the composition of morphisms is a bilinear operation are called ``𝕜``-linear categories (or ``𝕜``-algebroids, or ``\mathbf{Vect}_{𝕜}``-enriched categories).
+In that case, the endomorphisms ``\mathrm{End}(V)`` are a ``𝕜``-algebra with ``\mathrm{id}_V`` as the identity.
+
+We also introduce some definitions which will be useful further on.
+A **functor** ``F`` between two categories ``C`` and ``D`` is, colloquially speaking, a mapping between categories that preserves morphism composition and identities.
+More specifically, ``F:C→D`` assigns to every object ``V ∈ \mathrm{Ob}(C)`` an object ``F(V) ∈ \mathrm{Ob}(D)``, and to each morphism ``f ∈ \mathrm{Hom}_C(W,V)`` a morphism ``F(f) ∈ \mathrm{Hom}_D(F(W), F(V))`` such that ``F(f) ∘_D F(g) = F(f ∘_C g)`` and ``F(\mathrm{id}_V) = \mathrm{id}_{F(V)}`` (where we denoted the possibly different composition laws in ``C`` and ``D`` explicitly with a subscript).
+In particular, every category ``C`` has an identity functor ``1_C`` that acts trivially on objects and morphisms.
+Functors can also be composed. A ``𝕜``-linear functor between two ``𝕜``-linear categories has a linear action on morphisms.
+
+Given two categories ``C`` and ``D``, and two functors ``F`` and ``G`` that map from ``C`` to ``D``, a **natural transformation** ``φ:F⟶G`` is a family of morphisms ``φ_V ∈ \mathrm{Hom}_D(F(V),G(V))`` in ``D``, labeled by the objects ``V`` of ``C``, such that ``φ_V ∘ F(f) = G(f) ∘ φ_W`` for all morphisms ``f ∈ \mathrm{Hom}_C(W,V)``.
+If all morphisms ``φ_V`` are isomorphisms, ``φ`` is called a natural isomorphism and the two functors ``F`` and ``G`` are said to be *isomorphic*.
+
+The *product* of two categories ``C`` and ``C′``, denoted ``C × C′``, is the category with objects ``\mathrm{Ob}(C×C′) = \mathrm{Ob}(C) × \mathrm{Ob}(C′)``, whose elements are denoted as tuples ``(V, V′)``, and morphisms ``\mathrm{Hom}_{C×C′}((W,W′), (V,V′)) = \mathrm{Hom}_{C}(W,V) × \mathrm{Hom}_{C′}(W′,V′)``.
+Composition acts as ``(f,f′) ∘ (g,g′) = (f∘f′, g∘g′)`` and the identity is given by ``\mathrm{id}_{V,V′} = (\mathrm{id}_V, \mathrm{id}_{V′})``.
+In a similar fashion, we can define the *product of functors* ``F:C→D`` and ``F′:C′→D′`` as a functor ``F×F′: (C×C′)→(D×D′)`` mapping objects ``(V,V′)`` to ``(F(V), F′(V′))`` and morphisms ``(f,f′)`` to ``(F(f), F′(f′))``.
## [Monoidal categories](@id ss_monoidalcategory)
-The next property of the category ``\mathbf{Vect}`` that we want to highlight and
-generalize is that which allows to take tensor products. Indeed, a category ``C`` is said
-to be a **tensor category** (a.k.a. a *monoidal category*), if it has
+
+The next property of the category ``\mathbf{Vect}`` that we want to highlight and generalize is that which allows to take tensor products.
+Indeed, a category ``C`` is said to be a **tensor category** (a.k.a. a *monoidal category*), if it has
* a binary operation on objects ``⊗: \mathrm{Ob}(C) × \mathrm{Ob}(C) → \mathrm{Ob}(C)``
-* a binary operation on morphisms, also denoted as ``⊗``, such that
- ``⊗: \mathrm{Hom}_C(W_1,V_1) × \mathrm{Hom}_C(W_2,V_2) → \mathrm{Hom}_C(W_1 ⊗ W_2, V_1 ⊗ V_2)``
+* a binary operation on morphisms, also denoted as ``⊗``, such that ``⊗: \mathrm{Hom}_C(W_1,V_1) × \mathrm{Hom}_C(W_2,V_2) → \mathrm{Hom}_C(W_1 ⊗ W_2, V_1 ⊗ V_2)``
* an identity or unit object ``I``
* three families of natural isomorphisms:
- * ``∀ V ∈ \mathrm{Ob}(C)``, a left unitor (a.k.a. left unitality constraint)
- ``λ_V: I ⊗ V → V``
- * ``∀ V ∈ \mathrm{Ob}(C)``, a right unitor (a.k.a. right unitality constraint)
- ``ρ_V: V ⊗ I → V``
- * ``∀ V_1, V_2, V_3 ∈ \mathrm{Ob}(C)``, an associator (a.k.a. associativity
- constraint) ``α_{V_1,V_2,V_3}:(V_1 ⊗ V_2) ⊗ V_3 → V_1 ⊗ (V_2 ⊗ V_3)``
- that satisfy certain consistency conditions (coherence axioms), which are known as the
- *pentagon equation* (stating that the two possible mappings from
- ``(((V_1 ⊗ V_2) ⊗ V_3) ⊗ V_4)`` to ``(V_1 ⊗ (V_2 ⊗ (V_3 ⊗ V_4)))`` are compatible) and
- the *triangle equation* (expressing compatibility between the two possible ways to map
- ``((V_1 ⊗ I) ⊗ V_2)`` to ``(V_1 ⊗ (I ⊗ V_2))``).
-
-In terms of functors and natural transformations, ``⊗`` is a functor from the product
-category ``C × C`` to ``C``. Furthermore, the left (or right) unitor ``λ`` (or ``ρ``) is a
-natural isomorphism between a nameless functor ``C→C`` that maps objects ``V → I ⊗ V`` (or
-``V→V ⊗ I``) and the identity functor ``1_C``. Similarly, the associator ``α`` is a natural
-isomorphism between the two functors ``⊗(⊗ × 1_C)`` and ``⊗(1_C × ⊗)`` from ``C × C × C``
-to ``C``. In a ``k``-linear category, the tensor product of morphisms is also a bilinear
-operation. A monoidal category is said to be *strict* if ``I ⊗ V = V = V ⊗ I`` and
-``(V_1⊗V_2)⊗V_3 = V_1⊗(V_2⊗V_3)``, and the left and right unitor and associator are just the
-identity morphisms for these objects.
-
-For the category ``\mathbf{Vect}``, the identity object ``I`` is just the scalar field
-``𝕜`` over which the vector spaces are defined, and which can be identified with a one-
-dimensional vector space. This is not automatically a strict category, especially if one
-considers how to represent tensor maps on a computer. The distinction between ``V``,
-``I ⊗ V`` and ``V ⊗ I`` amounts to adding or removing an extra factor ``I`` to the tensor
-product structure of the domain or codomain, and so the left and right unitor are analogous
-to removing extra dimensions of size 1 from a multidimensional array. The fact that arrays
-with and without additional dimensions 1 are not automatically identical and an actual
-operation is required to insert or remove them, has led to some discussion in several
-programming languages that provide native support for multidimensional arrays.
-
-For what concerns the associator, the distinction between ``(V_1 ⊗ V_2) ⊗ V_3`` and
-``V_1 ⊗ (V_2 ⊗ V_3)`` is typically absent for simple tensors or multidimensional arrays.
-However, this grouping can be taken to indicate how to build the fusion tree for coupling
-irreps to a joint irrep in the case of symmetric tensors. As such, going from one to the
-other requires a recoupling (F-move) which has a non-trivial action on the reduced blocks.
+ * ``∀ V ∈ \mathrm{Ob}(C)``, a left unitor (a.k.a. left unitality constraint) ``λ_V: I ⊗ V → V``
+ * ``∀ V ∈ \mathrm{Ob}(C)``, a right unitor (a.k.a. right unitality constraint) ``ρ_V: V ⊗ I → V``
+ * ``∀ V_1, V_2, V_3 ∈ \mathrm{Ob}(C)``, an associator (a.k.a. associativity constraint) ``α_{V_1,V_2,V_3}:(V_1 ⊗ V_2) ⊗ V_3 → V_1 ⊗ (V_2 ⊗ V_3)`` that satisfy certain consistency conditions (coherence axioms), which are known as the *pentagon equation* (stating that the two possible mappings from ``(((V_1 ⊗ V_2) ⊗ V_3) ⊗ V_4)`` to ``(V_1 ⊗ (V_2 ⊗ (V_3 ⊗ V_4)))`` are compatible) and the *triangle equation* (expressing compatibility between the two possible ways to map ``((V_1 ⊗ I) ⊗ V_2)`` to ``(V_1 ⊗ (I ⊗ V_2))``).
+
+In terms of functors and natural transformations, ``⊗`` is a functor from the product category ``C × C`` to ``C``.
+Furthermore, the left (or right) unitor ``λ`` (or ``ρ``) is a natural isomorphism between a nameless functor ``C→C`` that maps objects ``V → I ⊗ V`` (or ``V→V ⊗ I``) and the identity functor ``1_C``.
+Similarly, the associator ``α`` is a natural isomorphism between the two functors ``⊗(⊗ × 1_C)`` and ``⊗(1_C × ⊗)`` from ``C × C × C`` to ``C``.
+In a ``k``-linear category, the tensor product of morphisms is also a bilinear operation.
+A monoidal category is said to be *strict* if ``I ⊗ V = V = V ⊗ I`` and ``(V_1⊗V_2)⊗V_3 = V_1⊗(V_2⊗V_3)``, and the left and right unitor and associator are just the identity morphisms for these objects.
+
+For the category ``\mathbf{Vect}``, the identity object ``I`` is just the scalar field ``𝕜`` over which the vector spaces are defined, and which can be identified with a one-dimensional vector space.
+This is not automatically a strict category, especially if one considers how to represent tensor maps on a computer.
+The distinction between ``V``, ``I ⊗ V`` and ``V ⊗ I`` amounts to adding or removing an extra factor ``I`` to the tensor product structure of the domain or codomain, and so the left and right unitor are analogous to removing extra dimensions of size 1 from a multidimensional array.
+The fact that arrays with and without additional dimensions 1 are not automatically identical and an actual operation is required to insert or remove them, has led to some discussion in several programming languages that provide native support for multidimensional arrays.
+
+For what concerns the associator, the distinction between ``(V_1 ⊗ V_2) ⊗ V_3`` and ``V_1 ⊗ (V_2 ⊗ V_3)`` is typically absent for simple tensors or multidimensional arrays.
+However, this grouping can be taken to indicate how to build the fusion tree for coupling irreps to a joint irrep in the case of symmetric tensors.
+As such, going from one to the other requires a recoupling (F-move) which has a non-trivial action on the reduced blocks.
We elaborate on this in the context of [Fusion categories](@ref ss_topologicalfusion) below.
-However, we can already note that we will always represent tensor products using a
-canonical order ``(…((V_1 ⊗ V_2) ⊗ V_3) … ⊗ V_N)``. A similar approach can be followed to
-turn any tensor category into a strict tensor category (see Section XI.5 of [^kassel]).
-
-The different natural isomorphisms involving the unit object have various relations, such
-as ``λ_{V⊗W} ∘ α_{I,V,W} = λ_V ⊗ \mathrm{id}_W`` and ``λ_I = ρ_I : I ⊗ I → I``. The last
-relation defines an isomorphism between ``I ⊗ I`` and ``I``, which can also be used to
-state that for ``f, g ∈ End_C(I)``, ``f ∘ g = ρ_I ∘ (f ⊗ g) ∘ λ_I^{-1} = g ∘ f``. Hence, the
-tensor product of morphisms in ``End_C(I)`` can be related to morphism composition in
-``End_C(I)``, and furthermore, the monoid of endomorphisms ``End_C(I)`` is commutative
-(abelian). In the case of a ``𝕜``-linear category, it is an abelian ``𝕜``-algebra. In the
-case of ``\mathbf{Vect}``, ``\mathrm{End}(I)`` is indeed isomorphic to the field of scalars
-``𝕜``. We return to the general case where ``End_C(I)`` is isomorphic to ``𝕜`` itself in
-the section on [pre-fusion categories](@ref ss_fusion).
-
-Furthermore, *Mac Lane's coherence theorem* states that the triangle and pentagon
-condition are sufficient to ensure that any consistent diagram made of associators and
-left and right unitors (involving all possible objects in ``C``) commutes. For what
-concerns the graphical notation, the natural isomorphisms will not be represented and we
-make no distinction between ``(V_1 ⊗ V_2) ⊗ V_3`` and ``V_1 ⊗ (V_2 ⊗ V_3)``. Similarly, the
-identity object ``I`` can be added or removed at will, and when drawn, is often represented
-by a dotted or dashed line. Note that any consistent way of inserting the associator or
-left or right unitor to convert a graphical representation to a diagram of compositions and
-tensor products of morphisms gives rise to the same result, by virtue of Mac Lane's
-coherence theorem. Using the horizontal direction (left to right) to stack tensor products,
-this gives rise to the following graphical notation for the tensor product of two
-morphisms, and for a general morphism ``t`` between a tensor product of objects in source
-and target:
+However, we can already note that we will always represent tensor products using a canonical order ``(…((V_1 ⊗ V_2) ⊗ V_3) … ⊗ V_N)``.
+A similar approach can be followed to turn any tensor category into a strict tensor category (see Section XI.5 of [^Kassel]).
+
+The different natural isomorphisms involving the unit object have various relations, such as ``λ_{V⊗W} ∘ α_{I,V,W} = λ_V ⊗ \mathrm{id}_W`` and ``λ_I = ρ_I : I ⊗ I → I``.
+The last relation defines an isomorphism between ``I ⊗ I`` and ``I``, which can also be used to state that for ``f, g ∈ End_C(I)``, ``f ∘ g = ρ_I ∘ (f ⊗ g) ∘ λ_I^{-1} = g ∘ f``.
+Hence, the tensor product of morphisms in ``End_C(I)`` can be related to morphism composition in ``End_C(I)``, and furthermore, the monoid of endomorphisms ``End_C(I)`` is commutative (abelian).
+In the case of a ``𝕜``-linear category, it is an abelian ``𝕜``-algebra.
+In the case of ``\mathbf{Vect}``, ``\mathrm{End}(I)`` is indeed isomorphic to the field of scalars ``𝕜``.
+We return to the general case where ``End_C(I)`` is isomorphic to ``𝕜`` itself in the section on [pre-fusion categories](@ref ss_fusion).
+
+Furthermore, *Mac Lane's coherence theorem* states that the triangle and pentagon condition are sufficient to ensure that any consistent diagram made of associators and left and right unitors (involving all possible objects in ``C``) commutes.
+For what concerns the graphical notation, the natural isomorphisms will not be represented and we make no distinction between ``(V_1 ⊗ V_2) ⊗ V_3`` and ``V_1 ⊗ (V_2 ⊗ V_3)``.
+Similarly, the identity object ``I`` can be added or removed at will, and when drawn, is often represented by a dotted or dashed line.
+Note that any consistent way of inserting the associator or left or right unitor to convert a graphical representation to a diagram of compositions and tensor products of morphisms gives rise to the same result, by virtue of Mac Lane's coherence theorem.
+Using the horizontal direction (left to right) to stack tensor products, this gives rise to the following graphical notation for the tensor product of two morphisms, and for a general morphism ``t`` between a tensor product of objects in source and target:
```@raw html
```
-Another relevant example is the category ``\mathbf{SVect}_𝕜``, which has as objects *super
-vector spaces* over ``𝕜``, which are vector spaces with a ``ℤ₂`` grading, i.e.
-they are decomposed as a direct sum ``V = V_0 ⊕ V_1``. Furthermore, the morphisms between
-two super vector spaces are restricted to be grading preserving, i.e.
-``f∈ \mathrm{Hom}_{\mathbf{SVect}}(W,V)`` has ``f(W_0) ⊂ V_0`` and ``f(W_1) ⊂ V_1``. The graded
-tensor product between two super vector spaces is defined as
-``(V⊗_\mathrm{g}W) = (V ⊗_\mathrm{g} W)_0 ⊕ (V ⊗_\mathrm{g} W)_1`` with
-``(V ⊗_\mathrm{g} W)_0 = (V_0 ⊗ W_0) ⊕ (V_1 ⊗ W_1)`` and
-``(V ⊗_\mathrm{g} W)_1 = (V_0 ⊗ W_1) ⊕ (V_1 ⊗ W_0)``. The unit object ``I`` is again
-isomorphic to ``𝕜``, i.e. ``I_0 = 𝕜`` and ``I_1 = 0``, a zero-dimensional vector space. In
-particular, the category ``\mathbf{SVect}_𝕜`` contains ``\mathbf{Vect}_𝕜`` as a
-(monoidal) subcategory, by only selecting those objects ``V`` for which ``V_1 = 0``. We
-will return to the example of ``\mathbf{SVect}`` throughout the remainder of this page.
-
-Finally, we generalize the notion of a functor between monoidal categories. A *monoidal
-functor* between two tensor categories ``(C, ⊗_C, I_C, α_C, λ_C, ρ_C)`` and
-``(D, ⊗_D, I_D, α_D, λ_D, ρ_D)`` is a functor ``F:C→D`` together with two monoidal
-constraints, namely
+Another relevant example is the category ``\mathbf{SVect}_𝕜``, which has as objects *super vector spaces* over ``𝕜``, which are vector spaces with a ``ℤ₂`` grading, i.e. they are decomposed as a direct sum ``V = V_0 ⊕ V_1``.
+Furthermore, the morphisms between two super vector spaces are restricted to be grading preserving, i.e. ``f∈ \mathrm{Hom}_{\mathbf{SVect}}(W,V)`` has ``f(W_0) ⊂ V_0`` and ``f(W_1) ⊂ V_1``.
+The graded tensor product between two super vector spaces is defined as ``(V⊗_\mathrm{g}W) = (V ⊗_\mathrm{g} W)_0 ⊕ (V ⊗_\mathrm{g} W)_1`` with ``(V ⊗_\mathrm{g} W)_0 = (V_0 ⊗ W_0) ⊕ (V_1 ⊗ W_1)`` and ``(V ⊗_\mathrm{g} W)_1 = (V_0 ⊗ W_1) ⊕ (V_1 ⊗ W_0)``.
+The unit object ``I`` is again isomorphic to ``𝕜``, i.e. ``I_0 = 𝕜`` and ``I_1 = 0``, a zero-dimensional vector space.
+In particular, the category ``\mathbf{SVect}_𝕜`` contains ``\mathbf{Vect}_𝕜`` as a (monoidal) subcategory, by only selecting those objects ``V`` for which ``V_1 = 0``.
+We will return to the example of ``\mathbf{SVect}`` throughout the remainder of this page.
+
+Finally, we generalize the notion of a functor between monoidal categories.
+A *monoidal functor* between two tensor categories ``(C, ⊗_C, I_C, α_C, λ_C, ρ_C)`` and ``(D, ⊗_D, I_D, α_D, λ_D, ρ_D)`` is a functor ``F:C→D`` together with two monoidal constraints, namely
* a morphism ``F₀:I_D → F(I_C)``;
-* a natural transformation
- ``F_2={F_2(X,Y): F(X) ⊗_D F(Y) → F(X ⊗_C Y), ∀ X,Y∈ \mathrm{Ob}(C)}``
- between the functors ``⊗_D(F×F)`` and ``F ⊗_C`` from ``C×C`` to ``D``.
-A *monoidal natural transformation* ``φ`` between two monoidal functors ``F:C→D`` and
-``G:C→D``is a natural transformation ``φ:F⟶G`` that furthermore satisfies
-* ``φ_{I_C} F_0 = G_0``;
-* ``∀ X,Y ∈ \mathrm{Ob}(C)``: ``φ_{X ⊗ Y} F_2(X,Y) = G_2(X,Y)(φ_X ⊗ φ_Y)``.
-
-For further reference, we also define the following categories which can be associated with
-the category ``\mathcal{C} = (C, ⊗, I, α, λ, ρ)``
-* ``\mathcal{C}^{\mathrm{op}} = (C^{\mathrm{op}}, ⊗, I, α^{\mathrm{op}}, λ^{\mathrm{op}}, ρ^{\mathrm{op}})``
- where the opposite category ``C^{\mathrm{op}}`` has the same objects as ``C`` but has
- ``\mathrm{Hom}_{C^{\mathrm{op}}}(X,Y) = \mathrm{Hom}_C(Y,X)`` and a composition law
- ``g ∘^{\mathrm{op}} f = f ∘ g``, with ``∘`` the composition law of ``C``. Furthermore,
- we have ``α^{\mathrm{op}}_{X,Y,Z} = (α_{X,Y,Z})^{-1}``,
- ``λ^{\mathrm{op}}_X = (λ_X)^{-1}`` and ``ρ^{\mathrm{op}}_X = (ρ_X)^{-1}``;
-* ``\mathcal{C}^{⊗\mathrm{op}} = (C, ⊗^{\mathrm{op}}, I, α^{⊗\mathrm{op}}, λ^{⊗\mathrm{op}}, ρ^{⊗\mathrm{op}})``
- where the functor ``⊗^{\mathrm{op}}:C×C → C`` is the opposite monoidal product, which
- acts as ``X ⊗^{\mathrm{op}} Y = Y ⊗ X`` on objects and similar on morphisms.
- Furthermore, ``α^{⊗\mathrm{op}}_{X,Y,Z} = (α_{Z,Y,X})^{-1}``,
- ``λ^{⊗\mathrm{op}}_X = ρ_X`` and ``ρ^{⊗\mathrm{op}}_X = λ_X``;
-* The two previous transformations (which commute) composed:
- ``\mathcal{C}^{\mathrm{rev}} = (C^{\mathrm{op}}, ⊗^{\mathrm{op}}, I, α^{\mathrm{rev}}, λ^{\mathrm{rev}}, ρ^{\mathrm{rev}})``
- with ``α^{\mathrm{rev}}_{X,Y,Z} = α_{Z,Y,X}``, ``λ^{\mathrm{rev}}_X = (ρ_X)^{-1}``,
- ``ρ^{\mathrm{rev}}_X = (λ_X)^{-1}``.
+* a natural transformation ``F_2={F_2(X,Y): F(X) ⊗_D F(Y) → F(X ⊗_C Y), ∀ X,Y∈ \mathrm{Ob}(C)}`` between the functors ``⊗_D(F×F)`` and ``F ⊗_C`` from ``C×C`` to ``D``.
+ A *monoidal natural transformation* ``φ`` between two monoidal functors ``F:C→D`` and ``G:C→D``is a natural transformation ``φ:F⟶G`` that furthermore satisfies
+ * ``φ_{I_C} F_0 = G_0``;
+ * ``∀ X,Y ∈ \mathrm{Ob}(C)``: ``φ_{X ⊗ Y} F_2(X,Y) = G_2(X,Y)(φ_X ⊗ φ_Y)``.
+
+For further reference, we also define the following categories which can be associated with the category ``\mathcal{C} = (C, ⊗, I, α, λ, ρ)``
+* ``\mathcal{C}^{\mathrm{op}} = (C^{\mathrm{op}}, ⊗, I, α^{\mathrm{op}}, λ^{\mathrm{op}}, ρ^{\mathrm{op}})`` where the opposite category ``C^{\mathrm{op}}`` has the same objects as ``C`` but has ``\mathrm{Hom}_{C^{\mathrm{op}}}(X,Y) = \mathrm{Hom}_C(Y,X)`` and a composition law ``g ∘^{\mathrm{op}} f = f ∘ g``, with ``∘`` the composition law of ``C``.
+ Furthermore, we have ``α^{\mathrm{op}}_{X,Y,Z} = (α_{X,Y,Z})^{-1}``, ``λ^{\mathrm{op}}_X = (λ_X)^{-1}`` and ``ρ^{\mathrm{op}}_X = (ρ_X)^{-1}``;
+* ``\mathcal{C}^{⊗\mathrm{op}} = (C, ⊗^{\mathrm{op}}, I, α^{⊗\mathrm{op}}, λ^{⊗\mathrm{op}}, ρ^{⊗\mathrm{op}})`` where the functor ``⊗^{\mathrm{op}}:C×C → C`` is the opposite monoidal product, which acts as ``X ⊗^{\mathrm{op}} Y = Y ⊗ X`` on objects and similar on morphisms.
+ Furthermore, ``α^{⊗\mathrm{op}}_{X,Y,Z} = (α_{Z,Y,X})^{-1}``, ``λ^{⊗\mathrm{op}}_X = ρ_X`` and ``ρ^{⊗\mathrm{op}}_X = λ_X``;
+* The two previous transformations (which commute) composed: ``\mathcal{C}^{\mathrm{rev}} = (C^{\mathrm{op}}, ⊗^{\mathrm{op}}, I, α^{\mathrm{rev}}, λ^{\mathrm{rev}}, ρ^{\mathrm{rev}})`` with ``α^{\mathrm{rev}}_{X,Y,Z} = α_{Z,Y,X}``, ``λ^{\mathrm{rev}}_X = (ρ_X)^{-1}``, ``ρ^{\mathrm{rev}}_X = (λ_X)^{-1}``.
## [Duality: rigid, pivotal and spherical categories](@id ss_dual)
-Another property of the category ``\mathbf{Vect}`` that we want to generalize is the notion
-of duals. For a vector space ``V``, i.e. an object of ``\mathbf{Vect}``, the dual ``V^*``
-is itself a vector space. Evaluating the action of dual vector on a vector can, because of
-linearity, be interpreted as a morphism from ``V^* ⊗ V`` to ``I``. Note that elements of a
-vector space ``V`` have no categorical counterpart in themselves, but can be interpreted as
-morphism from ``I`` to ``V``. To map morphisms from ``\mathrm{Hom}(W,V)`` to elements of
-``V ⊗ W^*``, i.e. morphisms in ``\mathrm{Hom}(I, V ⊗ W^*)``, we use another morphism
-``\mathrm{Hom}(I, W ⊗ W^*)`` which can be considered as the inverse of the evaluation map.
-
-Hence, duality in a monoidal category is defined via an *exact pairing*, i.e. two families
-of non-degenerate morphisms, the evaluation (or co-unit) ``ϵ_V: {}^{∨}V ⊗ V → I`` and the
-coevaluation (or unit) ``η_V: I → V ⊗ {}^{∨}V`` which satisfy the "snake rules":
-
-``ρ_V ∘ (\mathrm{id}_V ⊗ ϵ_V) ∘ (η_V ⊗ \mathrm{id}_V) ∘ λ_V^{-1} = \mathrm{id}_V``
-
-``λ_{^{∨}V}^{-1} ∘ (ϵ_V ⊗ \mathrm{id}_{^{∨}V}) ∘ (\mathrm{id}_{^{∨}V} ⊗ η_V) ∘ ρ_{^{∨}V}^{-1} = \mathrm{id}_{^{∨}V}``
-
-and can be used to define an isomorphism between ``\mathrm{Hom}(W ⊗ V, U)`` and
-``\mathrm{Hom}(W, U ⊗ {}^{∨}V)`` for any triple of objects ``U, V, W ∈ \mathrm{Ob}(C)``.
-Note that if there are different duals (with corresponding exact pairings) associated
-to an object ``V``, a mixed snake composition using the evaluation of one and coevaluation
-of the other duality can be used to construct an isomorphism between the two associated
-dual objects. Hence, duality is unique up to isomorphisms.
-
-For (real or complex) vector spaces, we denote the dual as ``V^*``, a notation that we
-preserve for pivotal categories (see below). Using a bra-ket notation and a generic basis
-``{|n⟩}`` for ``V`` and dual basis ``{⟨m|}`` for ``V^*`` (such that ``⟨m|n⟩ = δ_{m,n}``),
-the evaluation is given by ``ϵ_V:{}^{∨}V ⊗ V → ℂ: ⟨m| ⊗ |n⟩ ↦ δ_{m,n}`` and the
-coevaluation or unit is ``η_V:ℂ→ V ⊗ {}^{∨}V:α ↦ α ∑_n |n⟩ ⊗ ⟨n|``. Note that this does not
-require an inner product, i.e. no relation or mapping from ``|n⟩`` to ``⟨n|`` was defined.
-For a general tensor map ``t:W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2} → V_1 ⊗ V_2 ⊗ … ⊗ V_{N_1}``, by
-successively applying ``η_{W_{N_2}}``, ``η_{W_{N_2-1}}``, …, ``η_{W_{1}}`` (in combination
-with the left or right unitor), we obtain a tensor in
-``V_1 ⊗ V_2 ⊗ … ⊗ V_{N_1} ⊗ W_{N_2}^* ⊗ … ⊗ W_{1}^*``. Hence, we can define or identify
-``(W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2})^* = W_{N_2}^* ⊗ … ⊗ W_{1}^*``. Indeed, it can be shown that for
-any category which has duals for objects ``V`` and ``W``, an exact pairing between
-``V ⊗ W`` and ``{}^{∨}W ⊗ {}^{∨}V`` can be constructed out of the evaluation and
-coevaluation of ``V`` and ``W``, such that ``{}^{∨}W ⊗ {}^{∨}V`` is at least isomorphic to
-``{}^{∨}(V ⊗ W)``.
+
+Another property of the category ``\mathbf{Vect}`` that we want to generalize is the notion of duals.
+For a vector space ``V``, i.e. an object of ``\mathbf{Vect}``, the dual ``V^*`` is itself a vector space.
+Evaluating the action of dual vector on a vector can, because of linearity, be interpreted as a morphism from ``V^* ⊗ V`` to ``I``.
+Note that elements of a vector space ``V`` have no categorical counterpart in themselves, but can be interpreted as morphism from ``I`` to ``V``.
+To map morphisms from ``\mathrm{Hom}(W,V)`` to elements of ``V ⊗ W^*``, i.e. morphisms in ``\mathrm{Hom}(I, V ⊗ W^*)``, we use another morphism ``\mathrm{Hom}(I, W ⊗ W^*)`` which can be considered as the inverse of the evaluation map.
+
+Hence, duality in a monoidal category is defined via an *exact pairing*, i.e. two families of non-degenerate morphisms, the evaluation (or co-unit) ``ϵ_V: {}^{∨}V ⊗ V → I`` and the coevaluation (or unit) ``η_V: I → V ⊗ {}^{∨}V`` which satisfy the "snake rules":
+
+```math
+\begin{align*}
+ρ_V ∘ (\mathrm{id}_V ⊗ ϵ_V) ∘ (η_V ⊗ \mathrm{id}_V) ∘ λ_V^{-1} = \mathrm{id}_V\\
+λ_{^{∨}V}^{-1} ∘ (ϵ_V ⊗ \mathrm{id}_{^{∨}V}) ∘ (\mathrm{id}_{^{∨}V} ⊗ η_V) ∘ ρ_{^{∨}V}^{-1} = \mathrm{id}_{^{∨}V}
+\end{align*}
+```
+
+and can be used to define an isomorphism between ``\mathrm{Hom}(W ⊗ V, U)`` and ``\mathrm{Hom}(W, U ⊗ {}^{∨}V)`` for any triple of objects ``U, V, W ∈ \mathrm{Ob}(C)``.
+Note that if there are different duals (with corresponding exact pairings) associated to an object ``V``, a mixed snake composition using the evaluation of one and coevaluation of the other duality can be used to construct an isomorphism between the two associated dual objects.
+Hence, duality is unique up to isomorphisms.
+
+For (real or complex) vector spaces, we denote the dual as ``V^*``, a notation that we preserve for pivotal categories (see below).
+Using a bra-ket notation and a generic basis ``{|n⟩}`` for ``V`` and dual basis ``{⟨m|}`` for ``V^*`` (such that ``⟨m|n⟩ = δ_{m,n}``), the evaluation is given by ``ϵ_V:{}^{∨}V ⊗ V → ℂ: ⟨m| ⊗ |n⟩ ↦ δ_{m,n}`` and the coevaluation or unit is ``η_V:ℂ→ V ⊗ {}^{∨}V:α ↦ α ∑_n |n⟩ ⊗ ⟨n|``.
+Note that this does not require an inner product, i.e. no relation or mapping from ``|n⟩`` to ``⟨n|`` was defined.
+For a general tensor map ``t:W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2} → V_1 ⊗ V_2 ⊗ … ⊗ V_{N_1}``, by successively applying ``η_{W_{N_2}}``, ``η_{W_{N_2-1}}``, …, ``η_{W_{1}}`` (in combination with the left or right unitor), we obtain a tensor in ``V_1 ⊗ V_2 ⊗ … ⊗ V_{N_1} ⊗ W_{N_2}^* ⊗ … ⊗ W_{1}^*``.
+Hence, we can define or identify ``(W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2})^* = W_{N_2}^* ⊗ … ⊗ W_{1}^*``.
+Indeed, it can be shown that for any category which has duals for objects ``V`` and ``W``, an exact pairing between ``V ⊗ W`` and ``{}^{∨}W ⊗ {}^{∨}V`` can be constructed out of the evaluation and coevaluation of ``V`` and ``W``, such that ``{}^{∨}W ⊗ {}^{∨}V`` is at least isomorphic to ``{}^{∨}(V ⊗ W)``.
Graphically, we represent the exact pairing and snake rules as
@@ -266,86 +155,59 @@ Graphically, we represent the exact pairing and snake rules as
```
-Note that we denote the dual objects ``{}^{∨}V`` as a line ``V`` with arrows pointing in the
-opposite (i.e. upward) direction. This notation is related to quantum field theory, where
-anti-particles are (to some extent) interpreted as particles running backwards in time.
+Note that we denote the dual objects ``{}^{∨}V`` as a line ``V`` with arrows pointing in the opposite (i.e. upward) direction.
+This notation is related to quantum field theory, where anti-particles are (to some extent) interpreted as particles running backwards in time.
-These exact pairings are known as the left evaluation and coevaluation, and ``{}^{∨}V`` is
-the left dual of ``V``. Likewise, we can also define a right dual ``V^{∨}`` of ``V`` and
-associated pairings, the right evaluation ``\tilde{ϵ}_V: V ⊗ V^{∨} → I`` and coevaluation
-``\tilde{η}_V: I → V^{∨} ⊗ V``, satisfying
+These exact pairings are known as the left evaluation and coevaluation, and ``{}^{∨}V`` is the left dual of ``V``.
+Likewise, we can also define a right dual ``V^{∨}`` of ``V`` and associated pairings, the right evaluation ``\tilde{ϵ}_V: V ⊗ V^{∨} → I`` and coevaluation ``\tilde{η}_V: I → V^{∨} ⊗ V``, satisfying
```@raw html
```
-In particular, one could choose ``\tilde{ϵ}_{{}^{∨}V} = ϵ_V`` and thus define ``V`` as the
-right dual of ``{}^{∨}V``. While there might be other choices, this choice must at least be
-isomorphic, such that ``({}^{∨}V)^{∨} ≂ V``.
+In particular, one could choose ``\tilde{ϵ}_{{}^{∨}V} = ϵ_V`` and thus define ``V`` as the right dual of ``{}^{∨}V``.
+While there might be other choices, this choice must at least be isomorphic, such that ``({}^{∨}V)^{∨} ≂ V``.
-If objects ``V`` and ``W`` have left (respectively right) duals, than for a morphism ``f ∈ \mathrm{Hom}(W,V)``, we furthermore define the left (respectively right)
-*transpose* ``{}^{∨}f ∈ \mathrm{Hom}({}^{∨}V, {}^{∨}W)`` (respectively ``f^{∨} ∈ \mathrm{Hom}(V^{∨}, W^{∨})``) as
+If objects ``V`` and ``W`` have left (respectively right) duals, then for a morphism ``f ∈ \mathrm{Hom}(W,V)``, we furthermore define the left (respectively right) *transpose* ``{}^{∨}f ∈ \mathrm{Hom}({}^{∨}V, {}^{∨}W)`` (respectively ``f^{∨} ∈ \mathrm{Hom}(V^{∨}, W^{∨})``) as
```@raw html
```
-where on the right we also illustrate the mapping from
-``t ∈ \mathrm{Hom}(W_1 ⊗ W_2 ⊗ W_3, V_1 ⊗ V_2)`` to a morphism in
-``\mathrm{Hom}(I, V_1 ⊗ V_2 ⊗ {}^{∨} W_3 ⊗ {}^{∨} W_2 ⊗ {}^{∨} W_1)``.
-
-Note that the graphical notation, at least the lines with opposite arrows, do not allow to
-distinguish between the right dual ``V^{∨}`` and the left dual ``{}^{∨}V``. We come back to
-this point below.
-
-A left (or right) duality in a (monoidal) category is now defined as an association of a
-left (or right) dual with every object of the category, with corresponding exact pairings,
-and a category admitting such a duality is a left (or right) **rigid category** (or left or
-right autonomous category). Given that left (or right) morphism transposition satisfies
-``{}^{∨}(f ∘ g)= {}^{∨}g ∘ {}^{∨}f= {}^{∨}f ∘^{\mathrm{op}} {}^{∨}g`` and recalling
-``{}^{∨}(V ⊗ W) = {}^{∨}W ⊗ {}^{∨}V`` (and similar for right duality), we can define duality
-in a functorial way. A (left or right) rigid category ``\mathcal{C}`` is a category which
-admits a (left or right) duality functor, i.e. a functor from ``\mathcal{C}`` to
-``\mathcal{C}^{\mathrm{rev}}`` that maps objects to its (left or right) dual, and morphisms
-to its (left or right) transpose. In particular, the snake rules can now be read as the
-functioral requirement that ``{}^{∨}(\mathrm{id}_V) = \mathrm{id}_{{}^{∨}V}``.
-
-In all of this, left and right duality can be completely distinct. Equivalently, the left
-dual of the left dual of an object ``V``, i.e. ``{}^{∨∨}V`` is not necessarily ``V`` itself,
-nor do the exact pairings enable us to construct an isomorphism between ``{}^{∨∨}V`` and ``V``.
-For finite-dimensional vector spaces, however, ``{}^{∨∨}V`` and ``V``, or thus ``{}^{∨}V``
-and ``V^{∨}`` are known to be isomorphic. The categorical generalization is that of a
-**pivotal category** (or sovereign category), i.e. a monoidal category with two-sided duals
-``X^* = {}^{∨}X = X^{∨} = X^*`` such that the left and right duality functor coincide, and
-thus also the left and right transpose of morphisms, i.e.
-``f^* = {}^{∨}f = f^{∨} ∈ \mathrm{Hom}(V^*,W^*)`` for any ``f∈\mathrm{Hom}(W,V)``. Given that
-``\tilde{ϵ}_{X}`` and ``\tilde{η}_{X}`` can be interpreted as an exact pairing ``ϵ_{X^*}``
-and ``η_{X^*}``, this can be used to recognize ``X`` as a left dual of ``X^*``, which is
-then not necessarily equal but at least isomorphic to ``X^{**}`` with the isomorphism given
-by the mixed snake composition alluded to in the beginning of this section, i.e. ``δ_X: X →
-X^{**}`` given by ``δ_X = (\tilde{ϵ}_X ⊗ \mathrm{id}_{X^*}) ∘ (\mathrm{id}_X ⊗ η_{X^*})``. A
-more formal statement is that ``δ`` is a natural isomorphism between the double dual functor
-and the identity functor of a category ``C``. In a similar manner, such a ``δ`` can be used
-to define a natural isomorphism between left and right dual functor (which is a slight
-generalization of the above definition of a pivotal category), and as such it is often
-called the *pivotal structure*.
-
-Hence, in a pivotal category, left and right duals are the same or isomorphic, and so are
-objects and their double duals. As such, we will not distinguish between them in the
-graphical representation and suppress the natural isomorphism ``δ``. Note, as already
-suggested by the graphical notation above, that we can interpret transposing a morphism as
-rotating its graphical notation by 180 degrees (either way).
-
-Furthermore, in a pivotal category, we can define a map from ``\mathrm{End}(V)``, the
-endomorphisms of an object ``V`` to endomorphisms of the identity object ``I``, i.e. the
-field of scalars in the case of the category ``\mathbf{Vect}``, known as the trace of
-``f``. In fact, we can define both a left trace as
-
-``\mathrm{tr}_{\mathrm{l}}(f) = ϵ_V ∘ (\mathrm{id}_{V^*} ⊗ f) ∘ \tilde{η}_V``
+where on the right we also illustrate the mapping from ``t ∈ \mathrm{Hom}(W_1 ⊗ W_2 ⊗ W_3, V_1 ⊗ V_2)`` to a morphism in ``\mathrm{Hom}(I, V_1 ⊗ V_2 ⊗ {}^{∨} W_3 ⊗ {}^{∨} W_2 ⊗ {}^{∨} W_1)``.
+
+Note that the graphical notation, at least the lines with opposite arrows, do not allow to distinguish between the right dual ``V^{∨}`` and the left dual ``{}^{∨}V``.
+We come back to this point below.
+
+A left (or right) duality in a (monoidal) category is now defined as an association of a left (or right) dual with every object of the category, with corresponding exact pairings, and a category admitting such a duality is a left (or right) **rigid category** (or left or right autonomous category).
+Given that left (or right) morphism transposition satisfies ``{}^{∨}(f ∘ g)= {}^{∨}g ∘ {}^{∨}f= {}^{∨}f ∘^{\mathrm{op}} {}^{∨}g`` and recalling ``{}^{∨}(V ⊗ W) = {}^{∨}W ⊗ {}^{∨}V`` (and similar for right duality), we can define duality in a functorial way.
+A (left or right) rigid category ``\mathcal{C}`` is a category which admits a (left or right) duality functor, i.e. a functor from ``\mathcal{C}`` to ``\mathcal{C}^{\mathrm{rev}}`` that maps objects to its (left or right) dual, and morphisms to its (left or right) transpose.
+In particular, the snake rules can now be read as the functioral requirement that ``{}^{∨}(\mathrm{id}_V) = \mathrm{id}_{{}^{∨}V}``.
+
+In all of this, left and right duality can be completely distinct.
+Equivalently, the left dual of the left dual of an object ``V``, i.e. ``{}^{∨∨}V`` is not necessarily ``V`` itself, nor do the exact pairings enable us to construct an isomorphism between ``{}^{∨∨}V`` and ``V``.
+For finite-dimensional vector spaces, however, ``{}^{∨∨}V`` and ``V``, or thus ``{}^{∨}V`` and ``V^{∨}`` are known to be isomorphic.
+The categorical generalization is that of a **pivotal category** (or sovereign category), i.e. a monoidal category with two-sided duals ``X^* = {}^{∨}X = X^{∨} = X^*`` such that the left and right duality functor coincide, and thus also the left and right transpose of morphisms, i.e. ``f^* = {}^{∨}f = f^{∨} ∈ \mathrm{Hom}(V^*,W^*)`` for any ``f∈\mathrm{Hom}(W,V)``.
+Given that ``\tilde{ϵ}_{X}`` and ``\tilde{η}_{X}`` can be interpreted as an exact pairing ``ϵ_{X^*}`` and ``η_{X^*}``, this can be used to recognize ``X`` as a left dual of ``X^*``, which is then not necessarily equal but at least isomorphic to ``X^{**}`` with the isomorphism given by the mixed snake composition alluded to in the beginning of this section, i.e. ``δ_X: X → X^{**}`` given by ``δ_X = (\tilde{ϵ}_X ⊗ \mathrm{id}_{X^*}) ∘ (\mathrm{id}_X ⊗ η_{X^*})``.
+A more formal statement is that ``δ`` is a natural isomorphism between the double dual functor and the identity functor of a category ``C``.
+In a similar manner, such a ``δ`` can be used to define a natural isomorphism between left and right dual functor (which is a slight generalization of the above definition of a pivotal category), and as such it is often called the *pivotal structure*.
+
+Hence, in a pivotal category, left and right duals are the same or isomorphic, and so are objects and their double duals.
+As such, we will not distinguish between them in the graphical representation and suppress the natural isomorphism ``δ``.
+Note, as already suggested by the graphical notation above, that we can interpret transposing a morphism as rotating its graphical notation by 180 degrees (either way).
+
+Furthermore, in a pivotal category, we can define a map from ``\mathrm{End}(V)``, the endomorphisms of an object ``V`` to endomorphisms of the identity object ``I``, i.e. the field of scalars in the case of the category ``\mathbf{Vect}``, known as the trace of ``f``.
+In fact, we can define both a left trace as
+
+```math
+\mathrm{tr}_{\mathrm{l}}(f) = ϵ_V ∘ (\mathrm{id}_{V^*} ⊗ f) ∘ \tilde{η}_V
+```
and a right trace as
-``\mathrm{tr}_{\mathrm{r}}(f) = \tilde{ϵ}_V ∘ (f ⊗ \mathrm{id}_{V^*}) ∘ η_V``
+```math
+\mathrm{tr}_{\mathrm{r}}(f) = \tilde{ϵ}_V ∘ (f ⊗ \mathrm{id}_{V^*}) ∘ η_V
+```
They are graphically represented as
@@ -353,55 +215,31 @@ They are graphically represented as
```
-and they do not need to coincide. Note that
-``\mathrm{tr}_{\mathrm{l}}(f) = \mathrm{tr}_{\mathrm{r}}(f*)`` and that
-``\mathrm{tr}_{\mathrm{l}/\mathrm{r}}(f∘g) = \mathrm{tr}_{\mathrm{l}/\mathrm{r}}(g∘f)``.
-The (left or right) trace of the identity morphism ``\mathrm{id}_V`` defines the
-corresponding (left or right) dimension of the object ``V``, i.e.
-``\mathrm{dim}_{\mathrm{l}/\mathrm{r}}(V) = tr_{\mathrm{l}/\mathrm{r}}(\mathrm{id}_V)``. In
-a **spherical** category, both definitions of the trace coincide for all ``V`` and we simply
-refer to the trace ``\mathrm{tr}(f)`` of an endomorphism. The particular value
-``\mathrm{dim}(V) = \mathrm{tr}(\mathrm{id}_V)`` is known as the (quantum) dimension of the
-object ``V``, referred to as `dim(V)` in TensorKit.jl.
-
-For further information and a more detailed treatment of rigid and pivotal categories, we
-refer to [^turaev] and [^selinger]. We conclude this section by studying the example of
-``\mathbf{SVect}``. Let us, in every super vector space ``V``, define a basis ``|n⟩``
-that is compatible with the grading, such ``|n|=0,1`` indicates that ``|n⟩ ∈ V_{|n|}``.
-We again define a dual basis ``{⟨m|}`` for ``V^*`` (such that ``⟨m|n⟩ = δ_{m,n}``), and
-then define the left evaluation by
-``ϵ_V:V^* ⊗ V → ℂ: ⟨m| ⊗_\mathrm{g} |n⟩ → ⟨m|n⟩ = δ_{m,n}`` and the left coevaluation by
-``η_V:ℂ→ V ⊗ V^*:α → α ∑_n |n⟩ ⊗_\mathrm{g} ⟨n|``. Note that this does not require an inner
-product and satisfies the snake rules. For the right evaluation and coevaluation, there are
-two natural choices, namely
-``\tilde{ϵ}_V:V ⊗ V^* → ℂ: |n⟩ ⊗_\mathrm{g} ⟨m| → (±1)^{|n|} δ_{m,n}`` and
-``\tilde{η}_V:ℂ → V^* ⊗ V: α → ∑_n (±1)^{|n|} ⟨n| ⊗_\mathrm{g} |n⟩``. The resulting trace
-of an endomorphism ``f ∈ \mathrm{End}(V)`` is given by
-``\mathrm{tr}^{\mathrm{l}}(f) = \mathrm{tr}^{\mathrm{r}}(f) = \mathrm{tr}(f) = ∑_n (± 1)^{|n|} ⟨n|f|n⟩``
-and is known as either the regular trace (in the case of ``+1``) or the *supertrace* (in the
-case of ``-1``). In particular, ``\mathrm{dim}(V) = \mathrm{dim}(V_0) ± \mathrm{dim}(V_1)``,
-and can be negative in the case of the supertrace. Both are valid choices to make
-``\mathbf{SVect}`` into a spherical category.
+and they do not need to coincide.
+Note that ``\mathrm{tr}_{\mathrm{l}}(f) = \mathrm{tr}_{\mathrm{r}}(f*)`` and that ``\mathrm{tr}_{\mathrm{l}/\mathrm{r}}(f∘g) = \mathrm{tr}_{\mathrm{l}/\mathrm{r}}(g∘f)``.
+The (left or right) trace of the identity morphism ``\mathrm{id}_V`` defines the corresponding (left or right) dimension of the object ``V``, i.e. ``\mathrm{dim}_{\mathrm{l}/\mathrm{r}}(V) = tr_{\mathrm{l}/\mathrm{r}}(\mathrm{id}_V)``.
+In a **spherical** category, both definitions of the trace coincide for all ``V`` and we simply refer to the trace ``\mathrm{tr}(f)`` of an endomorphism.
+The particular value ``\mathrm{dim}(V) = \mathrm{tr}(\mathrm{id}_V)`` is known as the (quantum) dimension of the object ``V``, referred to as `dim(V)` in TensorKit.jl.
+
+For further information and a more detailed treatment of rigid and pivotal categories, we refer to [^Turaev] and [^Selinger].
+We conclude this section by studying the example of ``\mathbf{SVect}``.
+Let us, in every super vector space ``V``, define a basis ``|n⟩`` that is compatible with the grading.
+The value ``|n|=0,1`` indicates that ``|n⟩ ∈ V_{|n|}``.
+We again define a dual basis ``{⟨m|}`` for ``V^*`` (such that ``⟨m|n⟩ = δ_{m,n}``), and then define the left evaluation by ``ϵ_V:V^* ⊗ V → ℂ: ⟨m| ⊗_\mathrm{g} |n⟩ → ⟨m|n⟩ = δ_{m,n}`` and the left coevaluation by ``η_V:ℂ→ V ⊗ V^*:α → α ∑_n |n⟩ ⊗_\mathrm{g} ⟨n|``.
+Note that this does not require an inner product and satisfies the snake rules.
+For the right evaluation and coevaluation, there are two natural choices, namely ``\tilde{ϵ}_V:V ⊗ V^* → ℂ: |n⟩ ⊗_\mathrm{g} ⟨m| → (±1)^{|n|} δ_{m,n}`` and ``\tilde{η}_V:ℂ → V^* ⊗ V: α → ∑_n (±1)^{|n|} ⟨n| ⊗_\mathrm{g} |n⟩``.
+The resulting trace of an endomorphism ``f ∈ \mathrm{End}(V)`` is given by ``\mathrm{tr}^{\mathrm{l}}(f) = \mathrm{tr}^{\mathrm{r}}(f) = \mathrm{tr}(f) = ∑_n (± 1)^{|n|} ⟨n|f|n⟩`` and is known as either the regular trace (in the case of ``+1``) or the *supertrace* (in the case of ``-1``).
+In particular, ``\mathrm{dim}(V) = \mathrm{dim}(V_0) ± \mathrm{dim}(V_1)``, and can be negative in the case of the supertrace.
+Both are valid choices to make ``\mathbf{SVect}`` into a spherical category.
## [Braidings, twists and ribbons](@id ss_braiding)
-While duality and the pivotal structure allow to move vector spaces back and forth between
-the domain (source) and codomain (target) of a tensor map, reordering vector spaces within
-the domain or codomain of a tensor map , i.e. within a tensor product
-``V_1 ⊗ V_2 ⊗ … V_N`` requires additional structure. In particular, we need at the very
-least a **braided tensor category** ``C``, which is endowed with a *braiding* ``τ``, i.e. a
-natural isomorphism ``{τ_{V,W}:V⊗W → W⊗V}_{V,W ∈ \mathrm{Ob}(C)}`` between the functors ``⊗`` and
-``⊗^{\mathrm{op}}`` such that ``τ_{V,V′}∘(f ⊗ g) = (g ⊗ f)∘τ_{W,W′}`` for any morphisms
-``f ∈ \mathrm{Hom}(W,V)`` and ``g ∈ \mathrm{Hom}(W′,V′)``. A valid braiding needs to satisfy a coherence
-condition with the associator ``α`` known as the *hexagon equation*, which expresses that
-the braiding is ``⊗``-multiplicative, i.e.
-``τ_{U,V⊗W} = (\mathrm{id}_V ⊗ τ_{U,W})(τ_{U,V}⊗\mathrm{id}_W)`` and
-``τ_{U⊗V,W} = (τ_{U,W}⊗\mathrm{id}_VW)(\mathrm{id}_U ⊗ τ_{V,W})`` (where the associator
-has been omitted). We also have ``λ_V ∘ τ_{V,I} = ρ_{V,I}``, ``ρ_V ∘ τ_{I,V} = λ_{V}`` and
-``τ_{V,I} = τ_{I,V}^{-1}`` for any ``V ∈ \mathrm{Ob}(C)``.
-
-The braiding isomorphism ``τ_{V,W}`` and its inverse are graphically represented as the
-lines ``V`` and ``W`` crossing over and under each other:
+While duality and the pivotal structure allow to move vector spaces back and forth between the domain (source) and codomain (target) of a tensor map, reordering vector spaces within the domain or codomain of a tensor map, i.e. within a tensor product ``V_1 ⊗ V_2 ⊗ … V_N`` requires additional structure.
+In particular, we need at the very least a **braided tensor category** ``C``, which is endowed with a *braiding* ``τ``, i.e. a natural isomorphism ``{τ_{V,W}:V⊗W → W⊗V}_{V,W ∈ \mathrm{Ob}(C)}`` between the functors ``⊗`` and ``⊗^{\mathrm{op}}`` such that ``τ_{V,V′}∘(f ⊗ g) = (g ⊗ f)∘τ_{W,W′}`` for any morphisms ``f ∈ \mathrm{Hom}(W,V)`` and ``g ∈ \mathrm{Hom}(W′,V′)``.
+A valid braiding needs to satisfy a coherence condition with the associator ``α`` known as the *hexagon equation*, which expresses that the braiding is ``⊗``-multiplicative, i.e. ``τ_{U,V⊗W} = (\mathrm{id}_V ⊗ τ_{U,W})(τ_{U,V}⊗\mathrm{id}_W)`` and ``τ_{U⊗V,W} = (τ_{U,W}⊗\mathrm{id}_VW)(\mathrm{id}_U ⊗ τ_{V,W})`` (where the associator has been omitted).
+We also have ``λ_V ∘ τ_{V,I} = ρ_{V,I}``, ``ρ_V ∘ τ_{I,V} = λ_{V}`` and ``τ_{V,I} = τ_{I,V}^{-1}`` for any ``V ∈ \mathrm{Ob}(C)``.
+
+The braiding isomorphism ``τ_{V,W}`` and its inverse are graphically represented as the lines ``V`` and ``W`` crossing over and under each other:
```@raw html
@@ -413,75 +251,61 @@ such that we have
```
-where the expression on the right hand side, ``τ_{W,V}∘τ_{V,W}`` can generically not be
-simplified. Hence, for general braidings, there is no unique choice to identify a tensor in
-``V⊗W`` and ``W⊗V``, as the isomorphisms ``τ_{V,W}``, ``τ_{W,V}^{-1}``,
-``τ_{V,W} ∘ τ_{W,V} ∘ τ_{V,W}``, … mapping from ``V⊗W`` to ``W⊗V`` can all be different. In
-order for there to be a unique map from ``V_1 ⊗ V_2 ⊗ … V_N`` to any permutation of the
-objects in this tensor product, the braiding needs to be *symmetric*, i.e.
-``τ_{V,W} = τ_{W,V}^{-1}`` or, equivalently ``τ_{W,V} ∘ τ_{V,W} = \mathrm{id}_{V⊗W}``. The
-resulting category is then referred to as a **symmetric tensor category**. In a graphical
-representation, it means that there is no distinction between over- and under- crossings
-and, as such, lines can just cross, where the crossing represents the action of
-``τ_{V,W} = τ_{W,V}^{-1}``.
-
-In the case of the category ``\mathbf{Vect}`` a valid braiding consists of just flipping
-the the objects/morphisms involved, e.g. for a simple cartesian tensor, permuting the
-tensor indices is equivalent to applying Julia's function `permutedims` on the underlying
-data. Less trivial braiding implementations arise in the context of tensors with symmetries
-(where the fusion tree needs to be reordered, as discussed in
-[Sectors, representation spaces and fusion trees](@ref s_sectorsrepfusion)) or in the case
-of ``\mathbf{SVect}``, which will again be studied in detail at the end of this section.
-
-The braiding of a space and a dual space also follows naturally, it is given by
-``τ_{V^*,W} = λ_{W ⊗ V^*} ∘ (ϵ_V ⊗ \mathrm{id}_{W ⊗ V^*}) ∘ (\mathrm{id}_{V^*} ⊗ τ_{V,W}^{-1} ⊗ \mathrm{id}_{V^*}) ∘ (\mathrm{id}_{V^*⊗ W} ⊗ η_V) ∘ ρ_{V^* ⊗ W}^{-1}``, i.e.
+where the expression on the right hand side, ``τ_{W,V}∘τ_{V,W}`` can generically not be simplified.
+Hence, for general braidings, there is no unique choice to identify a tensor in ``V⊗W`` and ``W⊗V``, as the isomorphisms ``τ_{V,W}``, ``τ_{W,V}^{-1}``, ``τ_{V,W} ∘ τ_{W,V} ∘ τ_{V,W}``, … mapping from ``V⊗W`` to ``W⊗V`` can all be different.
+In order for there to be a unique map from ``V_1 ⊗ V_2 ⊗ … V_N`` to any permutation of the objects in this tensor product, the braiding needs to be *symmetric*, i.e. ``τ_{V,W} = τ_{W,V}^{-1}`` or, equivalently ``τ_{W,V} ∘ τ_{V,W} = \mathrm{id}_{V⊗W}``.
+The resulting category is then referred to as a **symmetric tensor category**.
+In a graphical representation, it means that there is no distinction between over- and under- crossings and, as such, lines can just cross, where the crossing represents the action of ``τ_{V,W} = τ_{W,V}^{-1}``.
+
+In the case of the category ``\mathbf{Vect}`` a valid braiding consists of just flipping the the objects/morphisms involved, e.g. for a simple cartesian tensor, permuting the tensor indices is equivalent to applying Julia's function `permutedims` on the underlying data.
+Less trivial braiding implementations arise in the context of tensors with symmetries (where the fusion tree needs to be reordered, as discussed in [Sectors, representation spaces and fusion trees](@ref s_sectorsrepfusion)) or in the case of ``\mathbf{SVect}``, which will again be studied in detail at the end of this section.
+
+The braiding of a space and a dual space also follows naturally, it is given by ``τ_{V^*,W} = λ_{W ⊗ V^*} ∘ (ϵ_V ⊗ \mathrm{id}_{W ⊗ V^*}) ∘ (\mathrm{id}_{V^*} ⊗ τ_{V,W}^{-1} ⊗ \mathrm{id}_{V^*}) ∘ (\mathrm{id}_{V^*⊗ W} ⊗ η_V) ∘ ρ_{V^* ⊗ W}^{-1}``, i.e.
```@raw html
```
-**Balanced categories** ``C`` are braided categories that come with a **twist** ``θ``, a
-natural transformation from the identity functor ``1_C`` to itself, such that
-``θ_V ∘ f = f ∘ θ_W`` for all morphisms ``f ∈ \mathrm{Hom}(W,V)``, and for which main requirement is
-that
+**Balanced categories** ``C`` are braided categories that come with a **twist** ``θ``, a natural transformation from the identity functor ``1_C`` to itself, such that ``θ_V ∘ f = f ∘ θ_W`` for all morphisms ``f ∈ \mathrm{Hom}(W,V)``, and for which the main requirement is that
-``θ_{V⊗W} = τ_{W,V} ∘ (θ_W ⊗ θ_V) ∘ τ_{V,W} = (θ_V ⊗ θ_W) ∘ τ_{W,V} ∘ τ_{V,W}.``
+```math
+θ_{V⊗W} = τ_{W,V} ∘ (θ_W ⊗ θ_V) ∘ τ_{V,W} = (θ_V ⊗ θ_W) ∘ τ_{W,V} ∘ τ_{V,W}.
+```
-In particular, a braided pivotal category is balanced, as we can even define two such
-twists, namely a left and right twist given by
+In particular, a braided pivotal category is balanced, as we can even define two such twists, namely a left and right twist given by
-``θ^{\mathrm{l}}_V = (ϵ_V ⊗ \mathrm{id}_V)(\mathrm{id}_{V*} ⊗ τ_{V,V}) (\tilde{η}_V ⊗ \mathrm{id}_V)``
+```math
+θ^{\mathrm{l}}_V = (ϵ_V ⊗ \mathrm{id}_V)(\mathrm{id}_{V*} ⊗ τ_{V,V}) (\tilde{η}_V ⊗ \mathrm{id}_V)
+```
and
-``θ^{\mathrm{r}}_V = (\mathrm{id}_V ⊗ \tilde{ϵ}_V)(τ_{V,V} ⊗ \mathrm{id}_{V*})(\mathrm{id}_V ⊗ ϵ_V)``
+```math
+θ^{\mathrm{r}}_V = (\mathrm{id}_V ⊗ \tilde{ϵ}_V)(τ_{V,V} ⊗ \mathrm{id}_{V*})(\mathrm{id}_V ⊗ ϵ_V)
+```
-where we omitted the necessary left and right unitors and associators. Graphically, the
-twists and their inverse (for which we refer to [^turaev]) are then represented as
+where we omitted the necessary left and right unitors and associators.
+Graphically, the twists and their inverse (for which we refer to [^Turaev]) are then represented as
```@raw html
```
-The graphical representation also makes it straightforward to verify that
-``(θ^{\mathrm{l}}_V)^* = θ^{\mathrm{r}}_{V^*}``,
-``(θ^{\mathrm{r}}_V)^* = θ^{\mathrm{l}}_{V^*}`` and
-``\mathrm{tr}_{\mathrm{l}}( θ^{\mathrm{r}}_V ) = \mathrm{tr}_{\mathrm{r}}( θ^{\mathrm{l}}_V )``.
+The graphical representation also makes it straightforward to verify that ``(θ^{\mathrm{l}}_V)^* = θ^{\mathrm{r}}_{V^*}``, ``(θ^{\mathrm{r}}_V)^* = θ^{\mathrm{l}}_{V^*}`` and ``\mathrm{tr}_{\mathrm{l}}( θ^{\mathrm{r}}_V ) = \mathrm{tr}_{\mathrm{r}}( θ^{\mathrm{l}}_V )``.
-When ``θ^{\mathrm{l}} = θ^{\mathrm{r}}``, or thus, equivalently, ``θ_V^* = θ_{V^*}`` for
-either ``θ^{\mathrm{l}}`` or ``θ^{\mathrm{r}}``, the category is said to be **tortile** or
-also a **ribbon category**, because its graphical representation is compatible with the
-isotopy of a ribbon, i.e. where the lines representing objects are depicted as ribbons. For
-convenience, we continue to denote them as lines. Ribbon categories are necessarily
-spherical, i.e. one can prove the equivalence of the left and right trace.
+When ``θ^{\mathrm{l}} = θ^{\mathrm{r}}``, or thus, equivalently, ``θ_V^* = θ_{V^*}`` for either ``θ^{\mathrm{l}}`` or ``θ^{\mathrm{r}}``, the category is said to be **tortile** or also a **ribbon category**, because its graphical representation is compatible with the isotopy of a ribbon, i.e. where the lines representing objects are depicted as ribbons.
+For convenience, we continue to denote them as lines.
+Ribbon categories are necessarily spherical, i.e. one can prove the equivalence of the left and right trace.
-Alternatively, one can start from a balanced and rigid category (e.g. with a left duality),
-and use the twist ``θ``, which should satisfy ``θ_V^* = θ_{V^*}``, to define a pivotal
-structure, or, to define the exact pairing for the right dual functor as
+Alternatively, one can start from a balanced and rigid category (e.g. with a left duality), and use the twist ``θ``, which should satisfy ``θ_V^* = θ_{V^*}``, to define a pivotal structure, or, to define the exact pairing for the right dual functor as
-``\tilde{η}_V = τ_{V,V^*} ∘ (θ_V ⊗ \mathrm{id}_{V^*}) ∘ η_V = (\mathrm{id}_{V^*} ⊗ θ_V) ∘ τ_{V,V^*} ∘ η_V``
+```math
+\tilde{η}_V = τ_{V,V^*} ∘ (θ_V ⊗ \mathrm{id}_{V^*}) ∘ η_V = (\mathrm{id}_{V^*} ⊗ θ_V) ∘ τ_{V,V^*} ∘ η_V
+```
-``\tilde{ϵ}_V = ϵ_V ∘ (\mathrm{id}_{V^*} ⊗ θ_V) ∘ τ_{V,V^*} = ϵ_V ∘ τ_{V,V^*} ∘ (θ_V ⊗ \mathrm{id}_{V^*})``
+```math
+\tilde{ϵ}_V = ϵ_V ∘ (\mathrm{id}_{V^*} ⊗ θ_V) ∘ τ_{V,V^*} = ϵ_V ∘ τ_{V,V^*} ∘ (θ_V ⊗ \mathrm{id}_{V^*})
+```
or graphically
@@ -489,242 +313,164 @@ or graphically
```
-where we have drawn ``θ`` as ``θ^{\mathrm{l}}`` on the left and as ``θ^{\mathrm{r}}`` on
-the right, but in this case the starting assumption was that they are one and the same, and
-we defined the pivotal structure so as to make it compatible with the graphical
-representation. This construction of the pivotal structure can than be used to define the
-trace, which is spherical, i.e.
-
-``\mathrm{tr}(f) = ϵ_V ∘ τ_{V,V^*} ∘ (( θ_V ∘ f) ⊗ \mathrm{id}_{V^*}) ∘ η_V = ϵ_V ∘ (\mathrm{id}_{V^*} ⊗ (f ∘ θ_V)) ∘ τ_{V,V^*} ∘ η_V``
-
-Note finally, that a ribbon category where the braiding is symmetric, is known as a
-**compact closed category**. For a symmetric braiding, the trivial twist
-``θ_V = \mathrm{id}_V`` is always a valid choice, but it might not be the choice that one
-necessarily want to use. Let us study the case of ``\mathbf{SVect}`` again. Reinvoking our
-basis ``|m⟩ ∈ V`` and ``|n⟩ ∈ W``, the braiding ``τ_{V,W}`` is given by the Koszul sign
-rule, i.e. ``τ_{V,W}:|m⟩ ⊗_\mathrm{g} |n⟩ ↦ (-1)^{|m| |n|} |n⟩ ⊗_\mathrm{g} |m⟩``. Hence,
-braiding amounts to flipping the two spaces, but picks up an additional minus sign if both
-``|m⟩ ∈ V_1`` and ``|n⟩ ∈ W_1``. This braiding is symmetric, i.e.
-``τ_{W,V} ∘ τ_{V,W} = \mathrm{id}_{V⊗W}``. Between spaces and dual spaces, we similarly
-obtain the braiding rule ``⟨m| ⊗_\mathrm{g} |n⟩ ↦ (-1)^{|m| |n|} |n⟩ ⊗_\mathrm{g} ⟨m|``.
-Combining the braiding and the pivotal structure gives rise to a ribbon category, and thus,
-a compact closed category, where the resulting twist is given by
-``θ_V : |n⟩ ↦ (∓1)^{|n|} |n⟩`` for
-``\tilde{ϵ}_V:V ⊗ V^* → ℂ: |n⟩ ⊗_\mathrm{g} ⟨m| ↦ (±1)^{|n|} δ_{m,n}`` and corresponding
-``\tilde{η}_V``. Hence, if the right (co)evaluation contains a minus sign, the twist is
-``θ_V = \mathrm{id}_V``, which, as mentioned above, is always a valid twist for a symmetric
-category. However, if the right (co)evaluation contains no minus sign, the twist acts as
-the parity endomorphism, i.e. as ``+1`` on ``V_0`` and as ``-1`` on ``V_1``, which, as we
-will see in the next section, corresponds to a choice bearing additional structure.
+where we have drawn ``θ`` as ``θ^{\mathrm{l}}`` on the left and as ``θ^{\mathrm{r}}`` on the right, but in this case the starting assumption was that they are one and the same, and we defined the pivotal structure so as to make it compatible with the graphical representation.
+This construction of the pivotal structure can than be used to define the trace, which is spherical, i.e.
+
+```math
+\mathrm{tr}(f) = ϵ_V ∘ τ_{V,V^*} ∘ (( θ_V ∘ f) ⊗ \mathrm{id}_{V^*}) ∘ η_V = ϵ_V ∘ (\mathrm{id}_{V^*} ⊗ (f ∘ θ_V)) ∘ τ_{V,V^*} ∘ η_V
+```
+
+Note finally, that a ribbon category where the braiding is symmetric, is known as a **compact closed category**.
+For a symmetric braiding, the trivial twist ``θ_V = \mathrm{id}_V`` is always a valid choice, but it might not be the choice that one necessarily want to use.
+Let us study the case of ``\mathbf{SVect}`` again.
+Reinvoking our basis ``|m⟩ ∈ V`` and ``|n⟩ ∈ W``, the braiding ``τ_{V,W}`` is given by the Koszul sign rule, i.e. ``τ_{V,W}:|m⟩ ⊗_\mathrm{g} |n⟩ ↦ (-1)^{|m| |n|} |n⟩ ⊗_\mathrm{g} |m⟩``.
+Hence, braiding amounts to flipping the two spaces, but picks up an additional minus sign if both ``|m⟩ ∈ V_1`` and ``|n⟩ ∈ W_1``.
+This braiding is symmetric, i.e. ``τ_{W,V} ∘ τ_{V,W} = \mathrm{id}_{V⊗W}``.
+Between spaces and dual spaces, we similarly obtain the braiding rule ``⟨m| ⊗_\mathrm{g} |n⟩ ↦ (-1)^{|m| |n|} |n⟩ ⊗_\mathrm{g} ⟨m|``.
+Combining the braiding and the pivotal structure gives rise to a ribbon category, and thus, a compact closed category, where the resulting twist is given by ``θ_V : |n⟩ ↦ (∓1)^{|n|} |n⟩`` for ``\tilde{ϵ}_V:V ⊗ V^* → ℂ: |n⟩ ⊗_\mathrm{g} ⟨m| ↦ (±1)^{|n|} δ_{m,n}`` and corresponding ``\tilde{η}_V``.
+Hence, if the right (co)evaluation contains a minus sign, the twist is ``θ_V = \mathrm{id}_V``, which, as mentioned above, is always a valid twist for a symmetric category.
+However, if the right (co)evaluation contains no minus sign, the twist acts as the parity endomorphism, i.e. as ``+1`` on ``V_0`` and as ``-1`` on ``V_1``, which, as we will see in the next section, corresponds to a choice bearing additional structure.
## [Adjoints and dagger categories](@id ss_adjoints)
-A final aspect of categories as they are relevant to physics, and in particular quantum
-physics, is the notion of an adjoint or dagger. A **dagger category** ``C`` is a category
-together with an involutive functor ``†:C→C^{\mathrm{op}}``, i.e. it acts as the identity
-on objects, whereas on morphisms ``f:W→V`` it defines a morphism ``f^†:V→W`` such that
-* ``\mathrm{id}_V^† = \mathrm{id}_V``
-* ``(f ∘ g)^† = f^† ∘^{\mathrm{op}} g^† = g^† ∘ f^†``
-* ``(f^†)^† = f``
-Sometimes also the symbol ``*`` is used instead of ``†``, however we have already used
-``*`` to denote dual objects and transposed morphisms in the case of a pivotal category.
-
-If the category is ``ℂ``-linear, the dagger functor is often assumed to be antilinear, i.e.
-``(λ f)^† = \bar{λ} f^†`` for ``λ ∈ ℂ`` and ``f ∈ \mathrm{Hom}(V,W)``. In a dagger
-category, a morphism ``f:W→V`` is said to be *unitary* if it is an isomorphism and
-``f^{-1} = f^†``. Furthermore, an endomorphism ``f:V→V`` is *hermitian* or self-adjoint if
-``f^† = f``. Finally, we will also use the term *isometry* for a morphism ``f:W→V`` which has
-a left inverse ``f^†``, i.e. such that ``f^† ∘ f = \mathrm{id}_W``, but for which
-``f ∘ f^†`` is not necessarily the identity (but rather some orthogonal projector, i.e. a
-hermitian idempotent in ``\mathrm{End}(V)``).
-
-In the graphical representation, the dagger of a morphism can be represented by mirroring
-the morphism around a horizontal axis, and then reversing all arrows (bringing them back to
-their original orientation before the mirror operation):
+A final aspect of categories as they are relevant to physics, and in particular quantum physics, is the notion of an adjoint or dagger.
+A **dagger category** ``C`` is a category together with an involutive functor ``†:C→C^{\mathrm{op}}``, i.e. it acts as the identity on objects, whereas on morphisms ``f:W→V`` it defines a morphism ``f^†:V→W`` such that
+
+* ``\mathrm{id}_V^† = \mathrm{id}_V``
+* ``(f ∘ g)^† = f^† ∘^{\mathrm{op}} g^† = g^† ∘ f^†``
+* ``(f^†)^† = f``.
+
+Sometimes also the symbol ``*`` is used instead of ``†``.
+However, we have already used ``*`` to denote dual objects and transposed morphisms in the case of a pivotal category.
+
+If the category is ``ℂ``-linear, the dagger functor is often assumed to be antilinear, i.e. ``(λ f)^† = \bar{λ} f^†`` for ``λ ∈ ℂ`` and ``f ∈ \mathrm{Hom}(V,W)``.
+In a dagger category, a morphism ``f:W→V`` is said to be *unitary* if it is an isomorphism and ``f^{-1} = f^†``.
+Furthermore, an endomorphism ``f:V→V`` is *hermitian* or self-adjoint if ``f^† = f``.
+Finally, we will also use the term *isometry* for a morphism ``f:W→V`` which has a left inverse ``f^†``, i.e. such that ``f^† ∘ f = \mathrm{id}_W``, but for which ``f ∘ f^†`` is not necessarily the identity (but rather some orthogonal projector, i.e. a hermitian idempotent in ``\mathrm{End}(V)``).
+
+In the graphical representation, the dagger of a morphism can be represented by mirroring the morphism around a horizontal axis, and then reversing all arrows (bringing them back to their original orientation before the mirror operation):
```@raw html
```
-where for completeness we have also depicted the graphical representation of the transpose,
-which is a very different operation. In particular, the dagger does not reverse the order
-of the tensor product. Note that, for readibility, we have not mirrored or rotated the
-label in the box, but this implies that we need to use a type of box for which the action
-of mirroring or rotating can be observed.
+where for completeness we have also depicted the graphical representation of the transpose, which is a very different operation.
+In particular, the dagger does not reverse the order of the tensor product.
+Note that, for readibility, we have not mirrored or rotated the label in the box, but this implies that we need to use a type of box for which the action of mirroring or rotating can be observed.
-A dagger monoidal category is one in which the associator and left and right unitor are
-unitary morphisms. Similarly, a dagger braided category also has a unitary braiding, and a
-dagger balanced category in addition has a unitary twist.
+A dagger monoidal category is one in which the associator and left and right unitor are unitary morphisms.
+Similarly, a dagger braided category also has a unitary braiding, and a dagger balanced category in addition has a unitary twist.
-There is more to be said about the interplay between the dagger and duals. Given a left
-evaluation ``ϵ_V: V^* ⊗ V → I`` and coevaluation ``η_V: I → V ⊗ V^*``, we can define a
-right evaluation ``\tilde{ϵ}_V = (η_V)^†`` and coevaluation ``\tilde{η}_V = (ϵ_V)^†``.
+There is more to be said about the interplay between the dagger and duals.
+Given a left evaluation ``ϵ_V: V^* ⊗ V → I`` and coevaluation ``η_V: I → V ⊗ V^*``, we can define a right evaluation ``\tilde{ϵ}_V = (η_V)^†`` and coevaluation ``\tilde{η}_V = (ϵ_V)^†``.
Hence, left rigid dagger categories are automatically pivotal dagger categories.
The (right) twist defined via the pivotal structure now becomes
-``θ_V = (\mathrm{id}_V ⊗ (η_V)^†) ∘ (τ_{V,V} ⊗ \mathrm{id}_{V^*}) ∘ (\mathrm{id}_V ⊗ η_V)``
-
-and is itself unitary. Even for a symmetric category, the twist defined as such must not be
-the identity, as we discuss for the ``\mathbf{SVect}`` example below.
-
-Finally, the dagger allows to define two Hermitian forms on the morphisms, namely
-``⟨ f, g ⟩_{\mathrm{l}/\mathrm{r}} = \mathrm{tr}_{\mathrm{l}/\mathrm{r}}(f^† g)``, which
-coincide for a spherical category. For a *unitary ``𝕜``-linear category*, these Hermitian
-forms should be positive definite and thus define an inner product on each of the
-homomorphism spaces ``\mathrm{Hom}(W,V)``. In particular then, dimensions of objects are
-positive, as they satisfy
-``\mathrm{dim}_{\mathrm{l}/\mathrm{r}}(V) = ⟨ \mathrm{id}_V, \mathrm{id}_V ⟩_{\mathrm{l}/\mathrm{r}}``.
-
-This concludes the most important categorical definitions and structures that we want to
-discuss for the category ``\mathbf{Vect}``, but which can also be realized in other
-categories. In particular, the interface of TensorKit.jl could *in principle* represent
-morphisms from any ``𝕜``-linear monoidal category, but assumes categories with duals to be
-pivotal and in fact spherical, and categories with a braiding to be ribbon categories. A
-dagger ribbon category where the braiding is symmetric, i.e. a dagger category which is
-also a compact closed category and where the right (co)evaluation is given via the dagger
-of the left (co)evaluation is called a **dagger compact** category. This is the playground
-of quantum mechanics of bosonic and fermionic systems. However, we also allow for non-
-symmetric braiding in TensorKit.jl, though this functionality is currently much more
-limited.
-
-Again studying the category ``\mathbf{SVect}_{ℂ}`` (now explicitly over the complex
-numbers) and using the conventional adjoint or the complex Euclidean inner product to
-define the dagger functor, the right (co)evaluation that is obtained from applying the
-dagger to the left (co)evaluation is the definition we gave above with the ``+1`` sign. This
-choice gives rise to a regular trace (versus the supertrace) of endomorphisms, to positive
-dimensions, and a non-trivial twist that acts as the parity endomorphism. The resulting
-category is then a **dagger compact** category, that can be used for the quantum mechanical
-description of fermionic systems. The bosonic version is obtained by restricting to the
-subcategory ``\mathbf{Vect}``.
+```math
+θ_V = (\mathrm{id}_V ⊗ (η_V)^†) ∘ (τ_{V,V} ⊗ \mathrm{id}_{V^*}) ∘ (\mathrm{id}_V ⊗ η_V)
+```
+
+and is itself unitary.
+Even for a symmetric category, the twist defined as such must not be the identity, as we discuss for the ``\mathbf{SVect}`` example below.
+
+Finally, the dagger allows to define two Hermitian forms on the morphisms, namely ``⟨ f, g ⟩_{\mathrm{l}/\mathrm{r}} = \mathrm{tr}_{\mathrm{l}/\mathrm{r}}(f^† g)``, which coincide for a spherical category.
+For a *unitary ``𝕜``-linear category*, these Hermitian forms should be positive definite and thus define an inner product on each of the homomorphism spaces ``\mathrm{Hom}(W,V)``.
+In particular then, dimensions of objects are positive, as they satisfy ``\mathrm{dim}_{\mathrm{l}/\mathrm{r}}(V) = ⟨ \mathrm{id}_V, \mathrm{id}_V ⟩_{\mathrm{l}/\mathrm{r}}``.
+
+This concludes the most important categorical definitions and structures that we want to discuss for the category ``\mathbf{Vect}``, but which can also be realized in other categories.
+In particular, the interface of TensorKit.jl could *in principle* represent morphisms from any ``𝕜``-linear monoidal category, but assumes categories with duals to be pivotal and in fact spherical, and categories with a braiding to be ribbon categories.
+A dagger ribbon category where the braiding is symmetric, i.e. a dagger category which is also a compact closed category and where the right (co)evaluation is given via the dagger of the left (co)evaluation, is called a **dagger compact** category.
+This is the playground of quantum mechanics of bosonic and fermionic systems.
+However, we also allow for non- symmetric braiding in TensorKit.jl, though this functionality typically requires more careful considerations.
+
+Again studying the category ``\mathbf{SVect}_{ℂ}`` (now explicitly over the complex numbers) and using the conventional adjoint or the complex Euclidean inner product to define the dagger functor, the right (co)evaluation that is obtained from applying the dagger to the left (co)evaluation is the definition we gave above with the ``+1`` sign.
+This choice gives rise to a regular trace (versus the supertrace) of endomorphisms, to positive dimensions, and a non-trivial twist that acts as the parity endomorphism.
+The resulting category is then a **dagger compact** category, that can be used for the quantum mechanical description of fermionic systems.
+The bosonic version is obtained by restricting to the subcategory ``\mathbf{Vect}``.
## [Direct sums, simple objects and fusion categories](@id ss_fusion)
-These last two section on fusion categories is also applicable, in a straightforward
-manner, to ``\mathbf{Vect}`` and ``\mathbf{SVect}``, but is rather meant to provide the
-background of working with symmetries. We first need two new concepts:
-
-* An object ``W ∈ \mathrm{Ob}(C)`` is a **direct sum** of objects
- ``V_1, V_2, …, V_k ∈ \mathrm{Ob}(C)`` if there exists a family morphisms
- ``x_α ∈ \mathrm{Hom}(V_α,W)`` and ``y^α ∈ \mathrm{Hom}(W,V_α)`` such that
- ``\mathrm{id}_W = ∑_{α=1}^{k} x_α ∘ y^α`` and ``y^α ∘ x_β = δ^α_β \mathrm{id}_{V_α}``.
- The morphisms ``x_α`` and ``y^α`` are known as *inclusions* and *projections*
- respectively, and in the context of dagger categories it is natural to assume
- ``y^α = x_α^†`` in order to obtain an orthogonal direct sum decomposition.
-
-* A **simple object** ``V ∈ \mathrm{Ob}(C)`` of a ``𝕜``-linear category ``C`` is an
- object for which ``End_C(V) ≂ 𝕜``, i.e. the algebra of endomorphisms on ``V`` is
- isomorphic to the field (or ring) ``𝕜``. As ``End_C(V)`` always contains the identity
- morphism ``\mathrm{id}_V``, and this must be the only linearly independent endomorphism
- if ``V`` is a simple object, the isomorphism between ``\mathrm{End}_C(V)`` and ``𝕜``
- is typically of the form ``k ∈ 𝕜 ↔ k \mathrm{id}_V ∈ End_C(V)``. In particular, for
- ``\mathbf{SVect}`` and its subcategory ``\mathbf{Vect}``, the unit object ``I`` is a
- simple object.
-
-In particular, for a pivotal ``𝕜``-linear category where ``I`` is simple, it holds that
-the left and right dimensions of any simple object ``V`` are invertible in ``𝕜``, and that
-any endomorphism ``f ∈ \mathrm{End}(V)`` can be written as
-
-``f = (\mathrm{dim}_\mathrm{l}(V))^{-1} \mathrm{tr}_{\mathrm{l}}(f) \mathrm{id}_V =
-(\mathrm{dim}_\mathrm{r}(V))^{-1} \mathrm{tr}_{\mathrm{r}}(f) \mathrm{id}_V``
-
-Strictly speaking, this holds only if the category is non-degenerate, which means that
-``I`` is simple and that any non-degenerate pairing ``e:V ⊗ W → I`` induces a non-
-degenerate pairing ``\mathrm{Hom}(I,V) ⊗ \mathrm{Hom}(I,W) → \mathrm{End}(I)``. This
-property is always satisfied for a **pre-fusion category** ``C``, i.e. a monoidal ``𝕜``-
-linear category having a set ``\mathcal{S} ⊂ \mathrm{Ob}(C)`` of simple objects
-``\mathcal{S}=\{I, V_1, V_2, \ldots\}`` such that
+These last two section on fusion categories is also applicable, in a straightforward manner, to ``\mathbf{Vect}`` and ``\mathbf{SVect}``, but is rather meant to provide the background of working with symmetries.
+We first need two new concepts:
+
+* An object ``W ∈ \mathrm{Ob}(C)`` is a **direct sum** of objects ``V_1, V_2, …, V_k ∈ \mathrm{Ob}(C)`` if there exists a family morphisms ``x_α ∈ \mathrm{Hom}(V_α,W)`` and ``y^α ∈ \mathrm{Hom}(W,V_α)`` such that ``\mathrm{id}_W = ∑_{α=1}^{k} x_α ∘ y^α`` and ``y^α ∘ x_β = δ^α_β \mathrm{id}_{V_α}``.
+ The morphisms ``x_α`` and ``y^α`` are known as *inclusions* and *projections* respectively, and in the context of dagger categories it is natural to assume ``y^α = x_α^†`` in order to obtain an orthogonal direct sum decomposition.
+
+* A **simple object** ``V ∈ \mathrm{Ob}(C)`` of a ``𝕜``-linear category ``C`` is an object for which ``End_C(V) ≂ 𝕜``, i.e. the algebra of endomorphisms on ``V`` is isomorphic to the field (or ring) ``𝕜``.
+ As ``End_C(V)`` always contains the identity morphism ``\mathrm{id}_V``, and this must be the only linearly independent endomorphism if ``V`` is a simple object, the isomorphism between ``\mathrm{End}_C(V)`` and ``𝕜`` is typically of the form ``k ∈ 𝕜 ↔ k \mathrm{id}_V ∈ End_C(V)``.
+ In particular, for ``\mathbf{SVect}`` and its subcategory ``\mathbf{Vect}``, the unit object ``I`` is a simple object.
+
+In particular, for a pivotal ``𝕜``-linear category where ``I`` is simple, it holds that the left and right dimensions of any simple object ``V`` are invertible in ``𝕜``, and that any endomorphism ``f ∈ \mathrm{End}(V)`` can be written as
+
+```math
+f = (\mathrm{dim}_\mathrm{l}(V))^{-1} \mathrm{tr}_{\mathrm{l}}(f) \mathrm{id}_V =
+(\mathrm{dim}_\mathrm{r}(V))^{-1} \mathrm{tr}_{\mathrm{r}}(f) \mathrm{id}_V
+```
+
+Strictly speaking, this holds only if the category is non-degenerate, which means that ``I`` is simple and that any non-degenerate pairing ``e:V ⊗ W → I`` induces a non-degenerate pairing ``\mathrm{Hom}(I,V) ⊗ \mathrm{Hom}(I,W) → \mathrm{End}(I)``.
+This property is always satisfied for a **pre-fusion category** ``C``, i.e. a monoidal ``𝕜``- linear category having a set ``\mathcal{S} ⊂ \mathrm{Ob}(C)`` of simple objects ``\mathcal{S}=\{I, V_1, V_2, \ldots\}`` such that
* the monoidal unit ``I_C ∈ \mathcal{S}``;
-* ``\mathrm{Hom}_C(V_i,V_j) = 0`` (the singleton set containing only the zero
- homomorphism) for any distinct ``V_i, V_j ∈ \mathcal{S}``;
-* every object ``V ∈ \mathrm{Ob}(C)`` can be written as a direct sum of a finite family of
- elements from ``\mathcal{S}``.
-
-Note that in the direct sum decomposition of an object ``V``, a particular simple object
-``V_i`` might appear multiple times. This number is known as the multiplicity index
-``N^V_i``, and equal to the rank of ``\mathrm{Hom}(V,V_i)`` or, equivalently, of ``\mathrm{Hom}(V_i,V)``.
-Hence, we can choose inclusion and projection maps ``x_{i,μ}:V_i→V`` and ``y^{i,μ}:V→V_i``
-for ``μ = 1,\ldots, N^V_i``, such that
-``\mathrm{id}_V = \sum_{i}\sum_{μ=1}^{N_V^i} x_{i,μ} ∘ y^{i,μ}`` and
-``y^{i,μ} ∘ x_{j,ν} = δ^i_j δ^μ_ν``. In particular, for a simple object ``V``, it either
-appears in ``\mathcal{S}`` or is isomorphic to an object ``S``. We thus have ``N^V_i = 1``
-for one particular object ``V_i`` and ``N^V_j= 0`` for all other ``j``, with ``x_{i}`` and
-``y^i = (x_i)^{-1}`` representing the isomorphism between ``V`` and ``V_i``.
-
-The homomorphisms between two general objects ``W`` and ``V`` in a pre-fusion category can
-be decomposed as
-
-``\mathrm{Hom}(W,V) ≂ ⨁_{V_i ∈ \mathcal{S}} \mathrm{Hom}(W,V_i) ⊗ \mathrm{Hom}(V_i,V)``
+* ``\mathrm{Hom}_C(V_i,V_j) = 0`` (the singleton set containing only the zero homomorphism) for any distinct ``V_i, V_j ∈ \mathcal{S}``;
+* every object ``V ∈ \mathrm{Ob}(C)`` can be written as a direct sum of a finite family of elements from ``\mathcal{S}``.
+
+Note that in the direct sum decomposition of an object ``V``, a particular simple object ``V_i`` might appear multiple times.
+This number is known as the multiplicity index ``N^V_i``, and equal to the rank of ``\mathrm{Hom}(V,V_i)`` or, equivalently, of ``\mathrm{Hom}(V_i,V)``.
+Hence, we can choose inclusion and projection maps ``x_{i,μ}:V_i→V`` and ``y^{i,μ}:V→V_i`` for ``μ = 1,\ldots, N^V_i``, such that ``\mathrm{id}_V = \sum_{i}\sum_{μ=1}^{N_V^i} x_{i,μ} ∘ y^{i,μ}`` and ``y^{i,μ} ∘ x_{j,ν} = δ^i_j δ^μ_ν``.
+In particular, for a simple object ``V``, it either appears in ``\mathcal{S}`` or is isomorphic to an object ``S``.
+We thus have ``N^V_i = 1`` for one particular object ``V_i`` and ``N^V_j= 0`` for all other ``j``, with ``x_{i}`` and ``y^i = (x_i)^{-1}`` representing the isomorphism between ``V`` and ``V_i``.
+
+The homomorphisms between two general objects ``W`` and ``V`` in a pre-fusion category can be decomposed as
+
+```math
+\mathrm{Hom}(W,V) ≂ ⨁_{V_i ∈ \mathcal{S}} \mathrm{Hom}(W,V_i) ⊗ \mathrm{Hom}(V_i,V)
+```
and thus that the rank of ``\mathrm{Hom}(W,V)`` is given by ``∑_i N^W_i N^V_i``.
-A **fusion category** is a pre-fusion category that has (left or right) duals, i.e. that is
-rigid, and that only has a finite number of isomorphism classes of simple objects. Note
-that the duality functor maps ``\mathrm{End}(V)`` to ``\mathrm{End}(V^*)``, such that, if
-``V`` is a simple object, so must be ``V^*``. Henceforth, we will be sloppy about the
-distinction between a pre-fusion or fusion category, only use the latter term, even when it
-is not fully justified.
-
-Before continuing, let us use some examples to sketch the relevance of the concept of
-fusion categories. As mentioned, the categories ``\mathbf{Vect}_𝕜`` and
-``\mathbf{SVect}_𝕜`` have ``I ≂ 𝕜`` as simple object. For ``\mathbf{Vect}``, this is the
-only simple object, i.e. any other vector space ``V`` over ``𝕜``, can be thought of as a
-direct sum over ``N^V_I = \mathrm{dim}(V)`` multiple copies of ``𝕜``. In
-``\mathbf{SVect}``, the object ``J = 0 ⊕ 𝕜`` with ``J_0=0`` the zero dimensional space and
-``J_1 ≂ 𝕜`` is another simple object. Clearly, there are no non-zero grading preserving
-morphisms between ``I`` and ``J``, i.e. ``\mathrm{Hom}(I,J) = 0``, whereas
-``\mathrm{Hom}(J,J) ≂ 𝕜``. Any other super vector space ``V=V_0 ⊕ V_1`` can be written as
-a direct sum over ``N^V_I = \mathrm{dim}(V_0)`` copies of ``I`` and
-``N^V_J = \mathrm{dim}(V_1)`` copies of ``J``.
-
-A more representative example is that of the category ``C = \mathbf{Rep}_{\mathsf{G}}``,
-the category of representations of a group ``\mathsf{G}``. Colloquially, this could be
-thought of as a subcategory of ``\mathbf{Vect}`` containing as objects vector spaces ``V``
-on which a representation of ``\mathsf{G}`` is defined, denoted as ``u_V(g)`` for
-``g ∈ \mathsf{G}``, and as morphisms the equivariant transformations, i.e. intertwiners
-between the representations on the source and target:
-
-``\mathrm{Hom}_C(W,V) = \{f ∈ \mathrm{Hom}_{\mathbf{Vect}}(W,V)| u_V(g) ∘ f = f ∘ u_W(g), ∀ g ∈ G\}.``
-
-Note that the ``u_V(g)`` is itself generally not an element from ``End_C(V)``. Simple
-objects ``V_a`` are those corresponding irreducible representations (irreps) ``a`` of the
-group ``\mathsf{G}``, for which Schur's lemma implies ``End_C(V_a) ≂ 𝕜`` and
-``\mathrm{Hom}_C(V_a, V_b) = 0`` if ``a`` and ``b`` are not equivalent irreps. On the dual
-space ``V^*``, the group acts with the contragradient representation, i.e.
-``u_{V^*}(g) = ((u_V(g))^{-1})^* = u_V(g^{-1})^*``, where one should remind that ``^*``
-denotes the transpose. For a finite group or compact Lie group, we can introduce a dagger
-and restrict to unitary representations, such that ``u_V(g)^{-1} = u_V(g)^†`` and the
-contragradient representation becomes the complex conjugated representation, denoted as
-``u_{V^*}(g) = \bar{u}_V(g)``. The resulting category can then be given the structure of a
-unitary ribbon (pre-)fusion category. (Note that the number of isomorphism classes of simple
-objects, i.e. the number of non-equivalent irreps, is finite only in the case of a finite
-group). This example is very relevant to working with symmetries in TensorKit.jl, and will
-be expanded upon in more detail below.
-
-Fusion categories have a number of simplifying properties. A pivotal fusion category is
-spherical as soon as ``\mathrm{dim}_{\mathrm{l}}(V_i) = \mathrm{dim}_{\mathrm{r}}(V_i)``
-(i.e. the trace of the identity morphism) for all (isomorphism classes of) simple objects
-(note that all isomorphic simple objects have the same dimension). A braided pivotal fusion
-category is spherical if and only if it is a ribbon category.
+A **fusion category** is a pre-fusion category that has (left or right) duals, i.e. that is rigid, and that only has a finite number of isomorphism classes of simple objects.
+Note that the duality functor maps ``\mathrm{End}(V)`` to ``\mathrm{End}(V^*)``, such that, if ``V`` is a simple object, so must be ``V^*``.
+Henceforth, we will be sloppy about the distinction between a pre-fusion or fusion category, only use the latter term, even when it is not fully justified.
+
+Before continuing, let us use some examples to sketch the relevance of the concept of fusion categories.
+As mentioned, the categories ``\mathbf{Vect}_𝕜`` and ``\mathbf{SVect}_𝕜`` have ``I ≂ 𝕜`` as simple object.
+For ``\mathbf{Vect}``, this is the only simple object, i.e. any other vector space ``V`` over ``𝕜`` can be thought of as a direct sum over ``N^V_I = \mathrm{dim}(V)`` multiple copies of ``𝕜``.
+In ``\mathbf{SVect}``, the object ``J = 0 ⊕ 𝕜`` with ``J_0=0`` the zero dimensional space and ``J_1 ≂ 𝕜`` is another simple object.
+Clearly, there are no non-zero grading preserving morphisms between ``I`` and ``J``, i.
+. ``\mathrm{Hom}(I,J) = 0``, whereas ``\mathrm{Hom}(J,J) ≂ 𝕜``. Any other super vector space ``V=V_0 ⊕ V_1`` can be written as a direct sum over ``N^V_I = \mathrm{dim}(V_0)`` copies of ``I`` and ``N^V_J = \mathrm{dim}(V_1)`` copies of ``J``.
+
+A more representative example is that of the category ``C = \mathbf{Rep}_{\mathsf{G}}``, the category of representations of a group ``\mathsf{G}``.
+Colloquially, this could be thought of as a subcategory of ``\mathbf{Vect}`` containing as objects vector spaces ``V`` on which a representation of ``\mathsf{G}`` is defined, denoted as ``u_V(g)`` for ``g ∈ \mathsf{G}``, and as morphisms the equivariant transformations, i.e. intertwiners between the representations on the source and target:
+
+```math
+\mathrm{Hom}_C(W,V) = \{f ∈ \mathrm{Hom}_{\mathbf{Vect}}(W,V)| u_V(g) ∘ f = f ∘ u_W(g), ∀ g ∈ G\}.
+```
+
+Note that the ``u_V(g)`` is itself generally not an element from ``End_C(V)``.
+Simple objects ``V_a`` are those corresponding irreducible representations (irreps) ``a`` of the group ``\mathsf{G}``, for which Schur's lemma implies ``End_C(V_a) ≂ 𝕜`` and ``\mathrm{Hom}_C(V_a, V_b) = 0`` if ``a`` and ``b`` are not equivalent irreps.
+On the dual space ``V^*``, the group acts with the contragradient representation, i.e. ``u_{V^*}(g) = ((u_V(g))^{-1})^* = u_V(g^{-1})^*``, where one should remind that ``^*`` denotes the transpose.
+For a finite group or compact Lie group, we can introduce a dagger and restrict to unitary representations, such that ``u_V(g)^{-1} = u_V(g)^†`` and the contragradient representation becomes the complex conjugated representation, denoted as ``u_{V^*}(g) = \bar{u}_V(g)``.
+The resulting category can then be given the structure of a unitary ribbon (pre-)fusion category.
+(Note that the number of isomorphism classes of simple objects, i.e. the number of non-equivalent irreps, is finite only in the case of a finite group).
+This example is very relevant to working with symmetries in TensorKit.jl, and will be expanded upon in more detail below.
+
+Fusion categories have a number of simplifying properties.
+A pivotal fusion category is spherical as soon as ``\mathrm{dim}_{\mathrm{l}}(V_i) = \mathrm{dim}_{\mathrm{r}}(V_i)`` (i.e. the trace of the identity morphism) for all (isomorphism classes of) simple objects (note that all isomorphic simple objects have the same dimension).
+A braided pivotal fusion category is spherical if and only if it is a ribbon category.
## [Topological data of a unitary pivotal fusion category](@id ss_topologicalfusion)
-More explicitly, the different structures (monoidal structure, duals and pivotal structure,
-braiding and twists) in a fusion category can be characterized in terms of the simple
-objects, which we will henceforth denoted with just ``a`` instead of ``V_a``. This gives
-rise to what is known as the *topological data* of a unitary pivotal fusion category, most
-importantly the ``N``, ``F`` and ``R`` symbols, which are introduced in this final section.
+More explicitly, the different structures (monoidal structure, duals and pivotal structure, braiding and twists) in a fusion category can be characterized in terms of the simple objects, which we will henceforth denoted with just ``a`` instead of ``V_a``.
+This gives rise to what is known as the *topological data* of a unitary pivotal fusion category, most importantly the ``N``, ``F`` and ``R`` symbols, which are introduced in this final section.
### Monoidal structure
-Starting with the monoidal or tensor product, we start by characterizing how the object
-``a ⊗ b`` can be decomposed as a direct sum over simple objects ``c``, which gives rise to
-the multiplicity indices ``N_c^{ab}``, as well as the inclusion maps, which we henceforth
-denote as ``X_{c,μ}^{ab}:c→a⊗b`` for ``μ=1,…,N^{c}_{ab}``. In the context of a unitary
-fusion category, on which we now focus, the corresponding projection maps are
-``Y^{c,μ}_{a,b} = (X_{c,μ}^{ab})^†:a⊗b→c`` such that
+Starting with the monoidal or tensor product, we start by characterizing how the object ``a ⊗ b`` can be decomposed as a direct sum over simple objects ``c``, which gives rise to the multiplicity indices ``N_c^{ab}``, as well as the inclusion maps, which we henceforth denote as ``X_{c,μ}^{ab}:c→a⊗b`` for ``μ=1,…,N^{c}_{ab}``.
+In the context of a unitary fusion category, on which we now focus, the corresponding projection maps are ``Y^{c,μ}_{a,b} = (X_{c,μ}^{ab})^†:a⊗b→c`` such that
-``(X_{c,μ}^{ab})^† ∘ X_{c′,μ′}^{ab} = δ_{c,c′} δ_{μ,μ′} \mathrm{id}_c.``
+```math
+(X_{c,μ}^{ab})^† ∘ X_{c′,μ′}^{ab} = δ_{c,c′} δ_{μ,μ′} \mathrm{id}_c.
+```
Graphically, we represent these relations as
@@ -732,202 +478,165 @@ Graphically, we represent these relations as
```
-and also refer to the inclusion and projection maps as splitting and fusion tensor,
-respectively.
+and also refer to the inclusion and projection maps as splitting and fusion tensor, respectively.
-For both ``(a⊗b)⊗c`` and ``a⊗(b⊗c)``, which are isomorphic via the associator
-``α_{a,b,c}``, we must thus obtain a direct sum decomposition with the same multiplicity
-indices, leading to the associativity constraint
+For both ``(a⊗b)⊗c`` and ``a⊗(b⊗c)``, which are isomorphic via the associator ``α_{a,b,c}``, we must thus obtain a direct sum decomposition with the same multiplicity indices, leading to the associativity constraint
-``N_{d}^{abc}= ∑_e N_{e}^{ab} N_d^{ec} = ∑_f N_{f}^{bc} N_{d}^{af}.``
+```math
+N_{d}^{abc}= ∑_e N_{e}^{ab} N_d^{ec} = ∑_f N_{f}^{bc} N_{d}^{af}.
+```
The corresponding inclusion maps can be chosen as
-``X_{d,(eμν)}^{abc} = (X_{e,μ}^{ab} ⊗ \mathrm{id}_c) ∘ X_{dν}^{e,c} : d→(a⊗b)⊗c.``
+```math
+X_{d,(eμν)}^{abc} = (X_{e,μ}^{ab} ⊗ \mathrm{id}_c) ∘ X_{dν}^{e,c} : d→(a⊗b)⊗c.
+```
and
-``\tilde{X}_{d,(fκλ)}^{abc} = (\mathrm{id}_a ⊗ X_{f,κ}^{bc}) ∘ X_{d,λ}^{af} : d→a⊗(b⊗c)``
+```math
+\tilde{X}_{d,(fκλ)}^{abc} = (\mathrm{id}_a ⊗ X_{f,κ}^{bc}) ∘ X_{d,λ}^{af} : d→a⊗(b⊗c)
+```
and satisfy
-``(X_{d,(eμν)}^{abc})^† ∘ X_{d′,(e′μ′ν′)}^{abc} = δ_{e,e′} δ_{μ,μ′} δ_{ν,ν′} δ_{d,d′} \mathrm{id}_d,``
+```math
+(X_{d,(eμν)}^{abc})^† ∘ X_{d′,(e′μ′ν′)}^{abc} = δ_{e,e′} δ_{μ,μ′} δ_{ν,ν′} δ_{d,d′} \mathrm{id}_d,
+```
-``∑_{d,eμν} X_{d,(eμν)}^{abc} ∘ (X_{d,(eμν)}^{abc})^† = \mathrm{id}_{(a⊗b)⊗c},``
+```math
+∑_{d,eμν} X_{d,(eμν)}^{abc} ∘ (X_{d,(eμν)}^{abc})^† = \mathrm{id}_{(a⊗b)⊗c},
+```
-and similar for ``\tilde{X}_{d,(fκλ)}^{a,b,c}``. Applying the associator leads to a relation
+and similar for ``\tilde{X}_{d,(fκλ)}^{a,b,c}``.
+Applying the associator leads to a relation
-``α_{a,b,c} ∘ X_{d,(eμν)}^{abc} = ∑_{f,κ,λ} [F^{abc}_{d}]_{(eμν)}^{(fκλ)} \tilde{X}_{d,(fκλ)}^{abc}.``
+```math
+α_{a,b,c} ∘ X_{d,(eμν)}^{abc} = ∑_{f,κ,λ} [F^{abc}_{d}]_{(eμν)}^{(fκλ)} \tilde{X}_{d,(fκλ)}^{abc}.
+```
which defines the *F-symbol*, i.e. the matrix elements of the associator
-``(\tilde{X}_{d,(fκλ)}^{abc})^† ∘ α_{a,b,c} ∘ X_{d′,(eμν)}^{abc} = δ_{d,d′} [F^{abc}_{d}]_{(eμν)}^{(fκλ)} \mathrm{id}_d.``
+```math
+(\tilde{X}_{d,(fκλ)}^{abc})^† ∘ α_{a,b,c} ∘ X_{d′,(eμν)}^{abc} = δ_{d,d′} [F^{abc}_{d}]_{(eμν)}^{(fκλ)} \mathrm{id}_d.
+```
-Note that the left hand side represents a map in ``\mathrm{Hom}(d′,d)``, which must be zero if
-``d′`` is different from ``d``, hence the ``δ_{d,d′}`` on the right hand side. In a strict
-category, or in the graphical notation, the associator ``α`` is omitted and these relations
-thus represent a unitary basis transform between the basis of inclusion maps
-``X_{d,(eμν)}^{abc}`` and ``\tilde{X}_{d,(fκλ)}^{abc}``, which is also called an F-move,
-i.e. graphically:
+Note that the left hand side represents a map in ``\mathrm{Hom}(d′,d)``, which must be zero if ``d′`` is different from ``d``, hence the ``δ_{d,d′}`` on the right hand side.
+In a strict category, or in the graphical notation, the associator ``α`` is omitted and these relations thus represent a unitary basis transform between the basis of inclusion maps ``X_{d,(eμν)}^{abc}`` and ``\tilde{X}_{d,(fκλ)}^{abc}``, which is also called an F-move, i.e. graphically:
```@raw html
```
-The matrix ``F^{abc}_d`` is thus a unitary matrix. The pentagon coherence equation can also
-be rewritten in terms of these matrix elements, and as such yields the celebrated pentagon
-equation for the F-symbols. In a similar fashion, the unitors result in
-``N^{a1}_{b} = N^{1a}_b = δ^{a}_b`` (where we have now written ``1`` instead of ``I`` for
-the unit object) and the triangle equation leads to additional relations between the F-
-symbols involving the unit object. In particular, if we identify
-``X^{1a}_{a,1}:a→(1⊗a)`` with ``λ_a^†`` and ``X^{a1}_{a,1}:a→(a⊗1)`` with ``ρ_a^†``, the
-triangle equation and its collaries imply that
-``[F^{1ab}_{c}]_{(11μ)}^{(cν1)} = δ^{ν}_{μ}``, and similar relations for ``F^{a1b}_c`` and
-``F^{ab1}_c``, which are graphically represented as
+The matrix ``F^{abc}_d`` is thus a unitary matrix.
+The pentagon coherence equation can also be rewritten in terms of these matrix elements, and as such yields the celebrated pentagon equation for the F-symbols.
+In a similar fashion, the unitors result in ``N^{a1}_{b} = N^{1a}_b = δ^{a}_b`` (where we have now written ``1`` instead of ``I`` for the unit object) and the triangle equation leads to additional relations between the F- symbols involving the unit object.
+In particular, if we identify ``X^{1a}_{a,1}:a→(1⊗a)`` with ``λ_a^†`` and ``X^{a1}_{a,1}:a→(a⊗1)`` with ``ρ_a^†``, the triangle equation and its collaries imply that ``[F^{1ab}_{c}]_{(11μ)}^{(cν1)} = δ^{ν}_{μ}``, and similar relations for ``F^{a1b}_c`` and ``F^{ab1}_c``, which are graphically represented as
```@raw html
```
-In the case of group representations, i.e. the category ``\mathbf{Rep}_{\mathsf{G}}``, the
-splitting and fusion tensors are known as the Clebsch-Gordan coefficients, especially in
-the case of ``\mathsf{SU}_2``. An F-move amounts to a recoupling and the F-symbols can thus
-be identified with the *6j-symbols* (strictly speaking, Racah's W-symbol for
-``\mathsf{SU}_2``).
+In the case of group representations, i.e. the category ``\mathbf{Rep}_{\mathsf{G}}``, the splitting and fusion tensors are known as the Clebsch-Gordan coefficients, especially in the case of ``\mathsf{SU}_2``.
+An F-move amounts to a recoupling and the F-symbols can thus be identified with the *6j-symbols* (strictly speaking, Racah's W-symbol for ``\mathsf{SU}_2``).
### Duality and pivotal structure
-Next up is duality. Since we are assuming a dagger category, it can be assumed pivotal,
-where the left dual objects are identical to the right dual objects, and the left and right
-(co)evaluation are related via the dagger. We have already pointed out above that the dual
-object ``a^*`` of a simple object ``a`` is simple, and thus, it must be isomorphic to one
-of the representives ``\bar{a}`` of the different isomorphism classes of simple objects
-that we have chosen. Note that it can happen that ``\bar{a}=a``. Duality implies an
-isomorphism between ``\mathrm{Hom}(W,V)`` and ``\mathrm{Hom}(I,V⊗W^*)``, and thus, for a
-simple object ``a``, ``\mathrm{End}(a) ≂ 𝕜`` is isomorphic to ``\mathrm{Hom}(1,a⊗a^*)``,
-such that the latter is also isomorphic to ``𝕜``, or thus ``N^{a\bar{a}}_1 = 1``. Also,
-all possible duals of ``a`` must be isomorphic, and thus there is a single representive
-``\bar{a}``, meaning that ``N^{ab}_1 = δ^{b,\bar{a}}``, i.e. for all other ``b ≠ \bar{a}``,
-``\mathrm{Hom}(1,a⊗b) ≂ \mathrm{Hom}(b^*,a) = 0``. Note that also ``\bar{\bar{a}}=a``.
-
-Let us now be somewhat careful with respect to the isomorphism between ``a^*`` and
-``\bar{a}``. If ``\bar{a} ≠ a``, we can basically choose the representative of that
-isomorphism class as ``\bar{a} = a^*``. However, this choice might not be valid if
-``\bar{a}=a``, as in that case the choice is already fixed, and might be different from
-``a``. To give a concrete example, the ``j=1/2`` representation of ``\mathsf{SU}_2`` has a
-dual (contragradient, but because of unitarity, complex conjugated) representation which is
-isomorphic to itself, but not equal. In the context of tensors in quantum physics, we would
-like to be able to represent this representation and its conjugate, so we need to take the
-distinction and the isomorphism between them into account. This means that
-``\mathrm{Hom}(a^*,\bar{a})`` is isomorphic to ``𝕜`` and contains a single linearly independent
-element, ``Z_a``, which is a unitary isomorphism such that
-``Z_a^\dagger ∘ Z_a = \mathrm{id}_{a^*}`` and
-``Z_a ∘ Z_a^\dagger = \mathrm{id}_{\bar{a}}``. Using the transpose, we obtain
-``Z_a^* ∈ \mathrm{Hom}(\bar{a}^*,a)``, and thus it is proportional to ``Z_{\bar{a}}``, i.e.
-``Z_a^* = χ_a Z_{\bar{a}}`` with ``χ_a`` a complex phase (assuming ``𝕜 = ℂ``). Another
-transpose results in ``Z_{\bar{a}}^* = χ_{\bar{a}} Z_a`` with
-``χ_{\bar{a}} = \overline{χ_{a}}``, where bar of a scalar quantity denotes its complex
-conjugate to avoid confusion with the transpose functor. If ``a``and ``\bar{a}`` are
-distinct, we can essentially choose ``Z_{\bar{a}}`` such that ``χ_a`` is ``1``. However,
-for ``a=\bar{a}``, the value of ``χ_a`` cannot be changed, but must satisfy ``χ_a^2 = 1``,
-or thus ``χ_a = ±1``. This value is a topological invariant known as the
-*Frobenius-Schur indicator*. Graphically, we represent this isomorphism and its relations as
+Next up is duality.
+Since we are assuming a dagger category, it can be assumed pivotal, where the left dual objects are identical to the right dual objects, and the left and right (co)evaluation are related via the dagger.
+We have already pointed out above that the dual object ``a^*`` of a simple object ``a`` is simple, and thus, it must be isomorphic to one of the representatives ``\bar{a}`` of the different isomorphism classes of simple objects that we have chosen.
+Note that it can happen that ``\bar{a}=a``. Duality implies an isomorphism between ``\mathrm{Hom}(W,V)`` and ``\mathrm{Hom}(I,V⊗W^*)``, and thus, for a simple object ``a``, ``\mathrm{End}(a) ≂ 𝕜`` is isomorphic to ``\mathrm{Hom}(1,a⊗a^*)``, such that the latter is also isomorphic to ``𝕜``, or thus ``N^{a\bar{a}}_1 = 1``.
+Also, all possible duals of ``a`` must be isomorphic, and thus there is a single representative ``\bar{a}`` such that ``N^{ab}_1 = δ^{b,\bar{a}}``, i.e. for all other ``b ≠ \bar{a}``, ``\mathrm{Hom}(1,a⊗b) ≂ \mathrm{Hom}(b^*,a) = 0``.
+Note that also ``\bar{\bar{a}}=a``.
+
+Let us now be somewhat careful with respect to the isomorphism between ``a^*`` and ``\bar{a}``. If ``\bar{a} ≠ a``, we can basically choose the representative of that isomorphism class as ``\bar{a} = a^*``.
+However, this choice might not be valid if ``\bar{a}=a``, as in that case the choice is already fixed, and might be different from ``a``.
+To give a concrete example, the ``j=1/2`` representation of ``\mathsf{SU}_2`` has a dual (contragradient, but because of unitarity, complex conjugated) representation which is isomorphic to itself, but not equal.
+In the context of tensors in quantum physics, we would like to be able to represent this representation and its conjugate, so we need to take the distinction and the isomorphism between them into account.
+This means that ``\mathrm{Hom}(a^*,\bar{a})`` is isomorphic to ``𝕜`` and contains a single linearly independent element, ``Z_a``, which is a unitary isomorphism such that ``Z_a^\dagger ∘ Z_a = \mathrm{id}_{a^*}`` and ``Z_a ∘ Z_a^\dagger = \mathrm{id}_{\bar{a}}``.
+Using the transpose, we obtain ``Z_a^* ∈ \mathrm{Hom}(\bar{a}^*,a)``, and thus it is proportional to ``Z_{\bar{a}}``, i.e. ``Z_a^* = χ_a Z_{\bar{a}}`` with ``χ_a`` a complex phase (assuming ``𝕜 = ℂ``).
+Another transpose results in ``Z_{\bar{a}}^* = χ_{\bar{a}} Z_a`` with ``χ_{\bar{a}} = \overline{χ_{a}}``, where bar of a scalar quantity denotes its complex conjugate to avoid confusion with the transpose functor.
+If ``a``and ``\bar{a}`` are distinct, we can essentially choose ``Z_{\bar{a}}`` such that ``χ_a`` is ``1``.
+However, for ``a=\bar{a}``, the value of ``χ_a`` cannot be changed, but must satisfy ``χ_a^2 = 1``, or thus ``χ_a = ±1``.
+This value is a topological invariant known as the *Frobenius-Schur indicator*. Graphically, we represent this isomorphism and its relations as
```@raw html
```
-We can now discuss the relation between the exact pairing and the fusion and splitting
-tensors. Given that the (left) coevaluation ``η_a ∈ \mathrm{Hom}(1, a⊗a^*)``, we can define the
-splitting tensor as
+We can now discuss the relation between the exact pairing and the fusion and splitting tensors.
+Given that the (left) coevaluation ``η_a ∈ \mathrm{Hom}(1, a⊗a^*)``, we can define the splitting tensor as
-``X^{a\bar{a}}_{1} = \frac{1}{\sqrt{d_a}}(\mathrm{id}_a ⊗ Z_a) ∘ η_a = \frac{1}{\sqrt{d_a}}(Z_a^* ⊗ \mathrm{id}_{\bar{a}}) ∘ \tilde{η}_{\bar{a}} ∈ \mathrm{Hom}(1, a⊗\bar{a}).``
+```math
+X^{a\bar{a}}_{1} = \frac{1}{\sqrt{d_a}}(\mathrm{id}_a ⊗ Z_a) ∘ η_a = \frac{1}{\sqrt{d_a}}(Z_a^* ⊗ \mathrm{id}_{\bar{a}}) ∘ \tilde{η}_{\bar{a}} ∈ \mathrm{Hom}(1, a⊗\bar{a}).
+```
-The prefactor takes care of normalization, i.e. with ``η_a^† = \tilde{ϵ}_a``, we find
-``η_a^† ∘ η_a = \tilde{ϵ}_a ∘ η_a = \mathrm{tr}(\mathrm{id}_a) = d_a \mathrm{id}_1``, and
-thus ``(X^{a\bar{a}}_{1})^† ∘ X^{a\bar{a}}_{1} = \mathrm{id}_1``. Here, we have denoted
-``d_a = \mathrm{dim}(a) = \mathrm{tr}(\mathrm{id}_a)`` for the quantum dimension of the
-simple objects ``a``. With this information, we can then compute ``[F^{a\bar{a}a}_a]``,
-which has a single element (it's a ``1 × 1`` matrix), and find
-``[F^{a\bar{a}a}_a] = \frac{χ_a}{d_a}``, where we've used ``\tilde{η}_a = ϵ_a^†`` and the
-snake rules. Hence, both the quantum dimensions and the Frobenius-Schur indicator are
-encoded in the F-symbol. Hence, they do not represent new independent data. Again, the
-graphical representation is more enlightning:
+The prefactor takes care of normalization, i.e. with ``η_a^† = \tilde{ϵ}_a``, we find ``η_a^† ∘ η_a = \tilde{ϵ}_a ∘ η_a = \mathrm{tr}(\mathrm{id}_a) = d_a \mathrm{id}_1``, and thus ``(X^{a\bar{a}}_{1})^† ∘ X^{a\bar{a}}_{1} = \mathrm{id}_1``.
+Here, we have denoted ``d_a = \mathrm{dim}(a) = \mathrm{tr}(\mathrm{id}_a)`` for the quantum dimension of the simple objects ``a``.
+With this information, we can then compute ``[F^{a\bar{a}a}_a]``, which has a single element (it's a ``1 × 1`` matrix), and find ``[F^{a\bar{a}a}_a] = \frac{χ_a}{d_a}``, where we've used ``\tilde{η}_a = ϵ_a^†`` and the snake rules.
+Hence, both the quantum dimensions and the Frobenius-Schur indicator are encoded in the F-symbol.
+Hence, they do not represent new independent data.
+Again, the graphical representation is more enlightning:
```@raw html
```
-With these definitions, we can now also evaluate the action of the evaluation map on the
-splitting tensors, namely
+With these definitions, we can now also evaluate the action of the evaluation map on the splitting tensors, namely
```@raw html
```
-where again bar denotes complex conjugation in the second line, and we introduced two new
-families of matrices ``A^{ab}_c`` and ``B^{ab}_c``, whose entries are composed out of
-entries of the F-symbol, namely
+where again bar denotes complex conjugation in the second line, and we introduced two new families of matrices ``A^{ab}_c`` and ``B^{ab}_c``, whose entries are composed out of entries of the F-symbol, namely
-``[A^{ab}_c]^\nu_\mu = \sqrt{\frac{d_a d_b}{d_c}} χ_{\bar{a}} \ \overline{[F^{\bar{a}ab}_b]_{(111)}^{(cμν)}}``
+```math
+[A^{ab}_c]^\nu_\mu = \sqrt{\frac{d_a d_b}{d_c}} χ_{\bar{a}} \ \overline{[F^{\bar{a}ab}_b]_{(111)}^{(cμν)}}
+```
and
-``[B^{ab}_c]^\nu_\mu = \sqrt{\frac{d_a d_b}{d_c}} [F^{ab\bar{b}}_a]^{(111)}_{(cμν)}.``
+```math
+[B^{ab}_c]^\nu_\mu = \sqrt{\frac{d_a d_b}{d_c}} [F^{ab\bar{b}}_a]^{(111)}_{(cμν)}.
+```
-Composing the left hand side of first graphical equation with its dagger, and noting that
-the resulting element ``f ∈ \mathrm{End}(a)`` must satisfy
-``f = d_a^{-1} \mathrm{tr}(f) \mathrm{id}_a``, i.e.
+Composing the left hand side of first graphical equation with its dagger, and noting that the resulting element ``f ∈ \mathrm{End}(a)`` must satisfy ``f = d_a^{-1} \mathrm{tr}(f) \mathrm{id}_a``, i.e.
```@raw html
```
-allows to conclude that
-``∑_ν [B^{ab}_c]^{ν}_{μ} \overline{[B^{ab}_c]^{ν}_{μ′}} = \delta_{μ,μ′}``, i.e. ``B^{ab}_c``
-is a unitary matrix. The same result follows for ``A^{ab}_c`` in analogue fashion.
+allows to conclude that ``∑_ν [B^{ab}_c]^{ν}_{μ} \overline{[B^{ab}_c]^{ν}_{μ′}} = \delta_{μ,μ′}``, i.e. ``B^{ab}_c`` is a unitary matrix.
+The same result follows for ``A^{ab}_c`` in analogue fashion.
!!! note
- In the context of fusion categories, one often resorts to the so-called *isotopic*
- normalization convention, where splitting tensors are normalized as
- ``(X^{ab}_{c,μ})^† ∘ X^{ab}_{c′,\mu′} = \sqrt{\frac{d_a d_b}{d_c}} δ_{c,c′} δ_{μ,μ′} \mathrm{id}_c``.
- This kills some of the quantum dimensions in formulas like the ones above and
- essentially allows to rotate the graphical notation of splitting and fusion tensors (up
- to a unitary transformation). Nonetheless, for our implementation of tensors and
- manipulations thereof (in particular orthonormal factorizations such as the singular
- value decomposition), we find it more convenient to work with the original normalization
- convention.
-
-Let us again study in more detail the example ``\mathbf{Rep}_{\mathsf{G}}``. The quantum
-dimension ``d_a`` of an irrep ``a`` is just the normal vector space dimension (over ``𝕜``)
-of the space on which the irrep acts. The dual of an irrep ``a`` is its contragradient
-representation, which in the case of unitary representations amounts to the complex
-conjugate representation. This representation can be isomorphic to an already defined irrep
-``\bar{a}``, for example ``a`` itself. If that happens, it does not automatically imply that
-the irrep ``a`` is real-valued. For example, all irreps of ``\mathsf{SU}_2`` are self- dual,
-with the isomorphism given by a ``π`` rotation over the ``y``-axis (in the standard basis).
-The resulting Frobenius-Schur indicator is ``+1`` for integer spin irreps, and ``-1`` for
-half-integer spin irreps. The value ``χ_a=+1`` indicates that the representation can be made
-real, e.g. the integer spin representations can be written as tensor representations of
-``\mathsf{SO}_3`` by a change of basis. The value ``χ_a=-1`` indicates that the
-representation is quaternionic and cannot be made real.
-
-The (co)evaluation expresses that the standard contraction of a vector with a dual vector
-yields a scalar, i.e. a representation and its dual (the contragradient) yields the trivial
-representation when correctly contracted. The coevaluation together with the isomorphism
-between the conjugate of irrep ``a`` and some irrep ``\bar{a}`` yields a way to define the
-Clebsch-Gordan coefficients (i.e. the splitting and fusion tensor) for fusing
-``a ⊗ \bar{a}`` to the trivial irrep, i.e. to what is called a singlet in the case of
-``\mathsf{SU}_2``.
+ In the context of fusion categories, one often resorts to the so-called *isotopic* normalization convention, where splitting tensors are normalized as ``(X^{ab}_{c,μ})^† ∘ X^{ab}_{c′,\mu′} = \sqrt{\frac{d_a d_b}{d_c}} δ_{c,c′} δ_{μ,μ′} \mathrm{id}_c``.
+ This kills some of the quantum dimensions in formulas like the ones above and essentially allows to rotate the graphical notation of splitting and fusion tensors (up to a unitary transformation).
+ Nonetheless, for our implementation of tensors and manipulations thereof (in particular orthonormal factorizations such as the singular value decomposition), we find it more convenient to work with the original normalization convention.
+
+Let us again study in more detail the example ``\mathbf{Rep}_{\mathsf{G}}``.
+The quantum dimension ``d_a`` of an irrep ``a`` is just the normal vector space dimension (over ``𝕜``) of the space on which the irrep acts.
+The dual of an irrep ``a`` is its contragradient representation, which in the case of unitary representations amounts to the complex conjugate representation.
+This representation can be isomorphic to an already defined irrep ``\bar{a}``, for example ``a`` itself.
+If that happens, it does not automatically imply that the irrep ``a`` is real-valued.
+For example, all irreps of ``\mathsf{SU}_2`` are self- dual, with the isomorphism given by a ``π`` rotation over the ``y``-axis (in the standard basis).
+The resulting Frobenius-Schur indicator is ``+1`` for integer spin irreps, and ``-1`` for half-integer spin irreps.
+The value ``χ_a=+1`` indicates that the representation can be made real, e.g. the integer spin representations can be written as tensor representations of ``\mathsf{SO}_3`` by a change of basis.
+The value ``χ_a=-1`` indicates that the representation is quaternionic and cannot be made real.
+
+The (co)evaluation expresses that the standard contraction of a vector with a dual vector yields a scalar, i.e. a representation and its dual (the contragradient) yields the trivial representation when correctly contracted.
+The coevaluation together with the isomorphism between the conjugate of irrep ``a`` and some irrep ``\bar{a}`` yields a way to define the Clebsch-Gordan coefficients (i.e. the splitting and fusion tensor) for fusing ``a ⊗ \bar{a}`` to the trivial irrep, i.e. to what is called a singlet in the case of ``\mathsf{SU}_2``.
### Braidings and twists
-Finally, we can study the braiding structure of a pivotal fusion category. Not all fusion
-categories have a braiding structure. The existence of a braiding isomorphism
-``τ_{V,W}:V⊗W→W⊗V`` requires at the very least that ``N^{ab}_c = N^{ba}_c`` at the level of
-the simple objects. We can then express ``τ_{a,b}`` in terms of its matrix elements as
+Finally, we can study the braiding structure of a pivotal fusion category.
+Not all fusion categories have a braiding structure.
+The existence of a braiding isomorphism ``τ_{V,W}:V⊗W→W⊗V`` requires at the very least that ``N^{ab}_c = N^{ba}_c`` at the level of the simple objects.
+We can then express ``τ_{a,b}`` in terms of its matrix elements as
-``τ_{a,b} ∘ X^{ab}_{c,μ} = ∑_ν [R^{ab}_c]^ν_μ X^{ba}_{c,ν}``
+```math
+τ_{a,b} ∘ X^{ab}_{c,μ} = ∑_ν [R^{ab}_c]^ν_μ X^{ba}_{c,ν}
+```
or graphically
@@ -935,13 +644,13 @@ or graphically
```
-The hexagon coherence axiom for the braiding and the associator can then be reexpressed in
-terms of the F-symbols and R-symbols.
+The hexagon coherence axiom for the braiding and the associator can then be reexpressed in terms of the F-symbols and R-symbols.
-We can now compute the twist, which for simple objects needs to be scalars (or in fact
-complex phases because of unitarity) multiplying the identity morphism, i.e.
+We can now compute the twist, which for simple objects needs to be scalars (or in fact complex phases because of unitarity) multiplying the identity morphism, i.e.
-``θ_a = \mathrm{id}_a \sum_{b,μ} \frac{d_b}{d_a} [R^{aa}_b]^{μ}_{μ}``
+```math
+θ_a = \mathrm{id}_a \sum_{b,μ} \frac{d_b}{d_a} [R^{aa}_b]^{μ}_{μ}
+```
or graphically
@@ -949,63 +658,47 @@ or graphically
```
-Henceforth, we reserve ``θ_a`` for the scalar value itself. Note that ``θ_a = θ_{\bar{a}}``
-as our category is spherical and thus a ribbon category, and that the defining relation of
-a twist implies
+Henceforth, we reserve ``θ_a`` for the scalar value itself. Note that ``θ_a = θ_{\bar{a}}`` as our category is spherical and thus a ribbon category, and that the defining relation of a twist implies
-``[R^{ba}_c]^κ_μ [R^{ab}_c]^μ_ν = \frac{\theta_c}{θ_a θ_b} δ^κ_ν``
+```math
+[R^{ba}_c]^κ_μ [R^{ab}_c]^μ_ν = \frac{\theta_c}{θ_a θ_b} δ^κ_ν
+```
-If ``a = \bar{a}``, we can furthermore relate the twist, the braiding and the Frobenius-
-Schur indicator via ``θ_a χ_a R^{aa}_1 =1``, because of
+If ``a = \bar{a}``, we can furthermore relate the twist, the braiding and the Frobenius- Schur indicator via ``θ_a χ_a R^{aa}_1 =1``, because of
```@raw html
```
-For the recurring example of ``\mathbf{Rep}_{\mathsf{G}}``, the braiding acts simply as the
-swap of the two vector spaces on which the representations are acting and is thus symmetric,
-i.e. ``τ_{b,a} ∘ τ_{a,b} = \mathrm{id}_{a⊗b}``. All the twists are simply ``θ_a = 1``. For
-an irrep that is self-dual, i.e. ``\bar{a}=a``, the final expression simplifies to
-``R^{aa}_1 = χ_a`` and thus states that the fusion from ``a ⊗ a`` to the trivial sector is
-either symmetric under swaps if ``χ_a=1`` or antisymmetric if ``χ_a=-1``. For the case of
-``\mathsf{SU}_2``, the coupling of two spin ``j`` states to a singlet it symmetric for
-integer ``j`` and odd for half-integer ``j``.
-
-With this, we conclude our exposition of unitary fusion categories. There are many fusion
-categories that do not originate from the representation theory of groups, but are related
-to quantum groups and the representation theory of quasi-triangular Hopf algebras. They
-have non-integer quantum dimensions and generically admit for braidings which are not
-symmetric. A particular class of interesting fusion categories are *modular fusion
-categories*, which provide the mathematical structure for the theory of anyons and
-topological sectors in topological quantum states of matter. Thereto, one defines the
-modular S matrix, defined as
-
-``S_{a,b} = \frac{1}{D} \mathrm{tr}(τ_{a,b} ∘ τ_{b,a}) = \frac{1}{D} ∑_c N^{ab}_c d_c \frac{θ_c}{θ_a θ_b}.``
-
-The normalization constant is given by ``D = \sqrt{\sum_a d_a^2}``, and thus truly requires
-a fusion category with a finite number of (isomorphism classes of) simple objects. For a
-modular fusion category, the symmetric matrix ``S`` is non-degenerate, and in fact (for a
-unitary fusion category) unitary. Note, however, that for a symmetric braiding ``S_{a,b} =
-\frac{d_a d_b}{D}`` and thus ``S`` is a rank 1 matrix. In particular,
-``\mathbf{Rep}_{\mathsf{G}}`` is never a modular category and the properties associated with
-this are not of (direct) importance for TensorKit.jl. We refer to the references for further
-information about modular categories.
+For the recurring example of ``\mathbf{Rep}_{\mathsf{G}}``, the braiding acts simply as the swap of the two vector spaces on which the representations are acting and is thus symmetric, i.e. ``τ_{b,a} ∘ τ_{a,b} = \mathrm{id}_{a⊗b}``.
+All the twists are simply ``θ_a = 1``.
+For an irrep that is self-dual, i.e. ``\bar{a}=a``, the final expression simplifies to ``R^{aa}_1 = χ_a`` and thus states that the fusion from ``a ⊗ a`` to the trivial sector is either symmetric under swaps if ``χ_a=1`` or antisymmetric if ``χ_a=-1``.
+For the case of ``\mathsf{SU}_2``, the coupling of two spin ``j`` states to a singlet it symmetric for integer ``j`` and odd for half-integer ``j``.
+
+With this, we conclude our exposition of unitary fusion categories.
+There are many fusion categories that do not originate from the representation theory of groups, but are related to quantum groups and the representation theory of quasi-triangular Hopf algebras.
+They have non-integer quantum dimensions and generically admit for braidings which are not symmetric.
+A particular class of interesting fusion categories are *modular fusion categories*, which provide the mathematical structure for the theory of anyons and topological sectors in topological quantum states of matter.
+Thereto, one defines the modular S matrix, defined as
+
+```math
+S_{a,b} = \frac{1}{D} \mathrm{tr}(τ_{a,b} ∘ τ_{b,a}) = \frac{1}{D} ∑_c N^{ab}_c d_c \frac{θ_c}{θ_a θ_b}.
+```
+
+The normalization constant is given by ``D = \sqrt{\sum_a d_a^2}``, and thus truly requires a fusion category with a finite number of (isomorphism classes of) simple objects.
+For a modular fusion category, the symmetric matrix ``S`` is non-degenerate, and in fact (for a unitary fusion category) unitary.
+Note, however, that for a symmetric braiding ``S_{a,b} = \frac{d_a d_b}{D}`` and thus ``S`` is a rank 1 matrix.
+In particular, ``\mathbf{Rep}_{\mathsf{G}}`` is never a modular category and the properties associated with this are not of (direct) importance for TensorKit.jl.
+We refer to the references for further information about modular categories.
## Bibliography
-[^turaev]: Turaev, V. G., & Virelizier, A. (2017). Monoidal categories and topological field theory (Vol. 322).
- Birkhäuser.
+[^Turaev]: Turaev, V. G., & Virelizier, A. (2017). Monoidal categories and topological field theory (Vol. 322). Birkhäuser.
-[^selinger]: Selinger, P. (2010). A survey of graphical languages for monoidal categories.
- In New structures for physics (pp. 289-355). Springer, Berlin, Heidelberg.
- [https://arxiv.org/abs/0908.3347](https://arxiv.org/abs/0908.3347)
+[^Selinger]: Selinger, P. (2010). A survey of graphical languages for monoidal categories. In New structures for physics (pp. 289-355). Springer, Berlin, Heidelberg. [https://arxiv.org/abs/0908.3347](https://arxiv.org/abs/0908.3347)
-[^kassel]: Kassel, C. (2012). Quantum groups (Vol. 155).
- Springer Science & Business Media.
+[^Kassel]: Kassel, C. (2012). Quantum groups (Vol. 155). Springer Science & Business Media.
-[^kitaev]: Kitaev, A. (2006). Anyons in an exactly solved model and beyond.
- Annals of Physics, 321(1), 2-111.
+[^Kitaev]: Kitaev, A. (2006). Anyons in an exactly solved model and beyond. Annals of Physics, 321(1), 2-111.
-[^beer]: From categories to anyons: a travelogue
- Kerstin Beer, Dmytro Bondarenko, Alexander Hahn, Maria Kalabakov, Nicole Knust, Laura Niermann, Tobias J. Osborne, Christin Schridde, Stefan Seckmeyer, Deniz E. Stiegemann, and Ramona Wolf
- [https://arxiv.org/abs/1811.06670](https://arxiv.org/abs/1811.06670)
+[^Beer]: From categories to anyons: a travelogue Kerstin Beer, Dmytro Bondarenko, Alexander Hahn, Maria Kalabakov, Nicole Knust, Laura Niermann, Tobias J. Osborne, Christin Schridde, Stefan Seckmeyer, Deniz E. Stiegemann, and Ramona Wolf [https://arxiv.org/abs/1811.06670](https://arxiv.org/abs/1811.06670)
diff --git a/docs/src/appendix/symmetric_tutorial.md b/docs/src/appendix/symmetric_tutorial.md
index 88c9483ff..7eda0a35a 100644
--- a/docs/src/appendix/symmetric_tutorial.md
+++ b/docs/src/appendix/symmetric_tutorial.md
@@ -1,23 +1,13 @@
# [A symmetric tensor deep dive: constructing your first tensor map](@id s_symmetric_tutorial)
-In this tutorial, we will demonstrate how to construct specific [`TensorMap`](@ref)s which
-are relevant to some common physical systems, with an increasing degree of complexity. We
-will assume the reader is somewhat familiar with [the notion of a 'tensor map'](@ref
-ss_whatistensor) and has a rough idea of [what it means for a tensor map to be
-'symmetric'](@ref ss_symmetries). In going through these examples we aim to provide a
-relatively gentle introduction to the meaning of [symmetry sectors](@ref ss_sectors) and
-[vector spaces](@ref ss_rep) within the context of TensorKit.jl, [how to initialize a
-`TensorMap` over a given vector space](@ref ss_tensor_construction) and finally how to
-manually set the data of a [symmetric `TensorMap`](@ref ss_tutorial_symmetries). We will
-keep our discussion as intuitive and simple as possible, only adding as many technical
-details as strictly necessary to understand each example. When considering a different
-physical system of interest, you should then be able to adapt these recipes and the
-intuition behind them to your specific problem at hand.
+In this tutorial, we will demonstrate how to construct specific [`TensorMap`](@ref)s which are relevant to some common physical systems, with an increasing degree of complexity.
+We will assume the reader is somewhat familiar with [the notion of a *tensor map*](@ref ss_whatistensor) and has a rough idea of [what it means for a tensor map to be *symmetric*](@ref ss_symmetries).
+In going through these examples we aim to provide a relatively gentle introduction to the meaning of [symmetry sectors](@ref ss_sectors) and [vector spaces](@ref ss_rep) within the context of TensorKit.jl, [how to initialize a `TensorMap` over a given vector space](@ref ss_tensor_construction) and finally how to manually set the data of a [symmetric `TensorMap`](@ref ss_tutorial_symmetries).
+We will keep our discussion as intuitive and simple as possible, only adding as many technical details as strictly necessary to understand each example.
+When considering a different physical system of interest, you should then be able to adapt these recipes and the intuition behind them to your specific problem at hand.
!!! note
- Many of these examples are readily implemented in the
- [TensorKitTensors.jl package](https://github.com/QuantumKitHub/TensorKitTensors.jl), in
- which case we basically provide a narrated walk-through of the corresponding code.
+ Many of these examples are readily implemented in the [TensorKitTensors.jl package](https://github.com/QuantumKitHub/TensorKitTensors.jl), in which case we basically provide a narrated walk-through of the corresponding code.
#### Contents of the tutorial
@@ -38,9 +28,7 @@ using Test # for showcase testing
## Level 0: The transverse-field Ising model
-As the most basic example, we consider the
-[1-dimensional transverse-field Ising model](https://en.wikipedia.org/wiki/Transverse-field_Ising_model),
-whose Hamiltonian is given by
+As the most basic example, we consider the [1-dimensional transverse-field Ising model](https://en.wikipedia.org/wiki/Transverse-field_Ising_model), whose Hamiltonian is given by
```math
\begin{equation}
@@ -49,11 +37,8 @@ H = -J \left (\sum_{\langle i, j \rangle} Z_i Z_j + g \sum_{i} X_i\right).
\end{equation}
```
-Here, $X_i$ and $Z_i$ are the
-[Pauli operators](https://en.wikipedia.org/wiki/Pauli_matrices) acting on site $i$, and the
-first sum runs over pairs of nearest neighbors $\langle i, j \rangle$. This model has a
-global $\mathbb{Z}_2$ symmetry, as it is invariant under the transformation $U H U^\dagger =
-H$ where the symmetry transformation $U$ is given by a global spin flip,
+Here, ``X_i`` and ``Z_i`` are the [Pauli operators](https://en.wikipedia.org/wiki/Pauli_matrices) acting on site ``i``, and the first sum runs over pairs of nearest neighbors ``\langle i, j \rangle``.
+This model has a global ``\mathbb{Z}_2`` symmetry, as it is invariant under the transformation ``U H U^\dagger = H`` where the symmetry transformation ``U`` is given by a global spin flip,
```math
\begin{equation}
@@ -64,19 +49,13 @@ U = \prod_i X_i.
We will circle back to the implications of this symmetry later.
-As a warmup, we implement the Hamiltonian \eqref{eq:isingham} in the standard way by
-encoding the matrix elements of the single-site operators $X$ and $Z$ into an array of
-complex numbers, and then combine them in a suitable way to get the Hamiltonian terms.
-Instead of using plain Julia arrays, we use a representation in terms of `TensorMap`s
-over complex vector spaces. These are essentially just wrappers around base arrays at
-this point, but their construction requires some consideration of the notion of *spaces*,
-which generalize the notion of `size` for arrays. Each of the operators $X$ and $Z$ acts on
-a local 2-dimensional complex vector space. In the context of TensorKit.jl, such a space can
-be represented as `ComplexSpace(2)`, or using the convenient shorthand `ℂ^2`. A single-site
-Pauli operator maps from a domain physical space to a codomain physical space, and can
-therefore be represented as instances of a `TensorMap(..., ℂ^2 ← ℂ^2)`. The corresponding
-data can then be filled in by hand according to the familiar Pauli matrices in the following
-way:
+As a warmup, we implement the Hamiltonian \eqref{eq:isingham} in the standard way by encoding the matrix elements of the single-site operators ``X`` and ``Z`` into an array of complex numbers, and then combine them in a suitable way to get the Hamiltonian terms.
+Instead of using plain Julia arrays, we use a representation in terms of `TensorMap`s over complex vector spaces.
+These are essentially just wrappers around base arrays at this point, but their construction requires some consideration of the notion of *spaces*, which generalize the notion of `size` for arrays.
+Each of the operators ``X`` and ``Z`` acts on a local two-dimensional complex vector space.
+In the context of TensorKit.jl, such a space can be represented as `ComplexSpace(2)`, or using the convenient shorthand `ℂ^2`.
+A single-site Pauli operator maps from a domain physical space to a codomain physical space, and can therefore be represented as instances of a `TensorMap(..., ℂ^2 ← ℂ^2)`.
+The corresponding data can then be filled in by hand according to the familiar Pauli matrices in the following way:
```@example symmetric_tutorial
# initialize numerical data for Pauli matrices
@@ -94,9 +73,8 @@ Z = TensorMap(z_mat, V ← V)
ZZ = Z ⊗ Z
```
-We can easily verify that our operators have the desired form by checking their data in the
-computational basis. We can print this data by calling the [`blocks`](@ref) method (we'll
-explain exactly what these 'blocks' are further down):
+We can easily verify that our operators have the desired form by checking their data in the computational basis.
+We can print this data by calling the [`blocks`](@ref) method (we'll explain exactly what these *blocks* are further down):
```@example symmetric_tutorial
blocks(ZZ)
@@ -106,38 +84,26 @@ blocks(ZZ)
blocks(X)
```
-## Level 1: The $\mathbb{Z}_2$-symmetric Ising model
+## Level 1: The ``\mathbb{Z}_2``-symmetric Ising model
### The irrep basis and block sparsity
-Let us now return to the global $\mathbb{Z}_2$ invariance of the Hamiltonian
-\eqref{eq:isingham}, and consider what this implies for its local terms $ZZ$ and $X$.
-Representing these operators as `TensorMap`s, the invariance of $H$ under a global
-$\mathbb{Z}_2$ transformation implies the following identities for the local tensors:
+Let us now return to the global ``\mathbb{Z}_2`` invariance of the Hamiltonian \eqref{eq:isingham}, and consider what this implies for its local terms ``ZZ`` and ``X``.
+Representing these operators as `TensorMap`s, the invariance of ``H`` under a global ``\mathbb{Z}_2`` transformation implies the following identities for the local tensors:
```@raw html
```
-These identitities precisely mean that these local tensors transform trivially under a
-tensor product representation of $\mathbb{Z}_2$. This implies that, recalling [the
-introduction on symmetries](@ref ss_symmetries), in an appropriate basis for the local
-physical vector space, our local tensors would become block-diagonal where each so-called
-*matrix block* is labeled by a $\mathbb{Z}_2$ irrep. The appropriate local basis
-transformation is precisely the one that brings the local representation $X$ into
-block-diagonal form. Clearly, this transformation is nothing more than the Hadamard
-transformation which maps the computational basis of $Z$ eigenstates $\{\ket{\uparrow}, \ket{\downarrow}\}$ to that of the $X$ eigenstates $\{\ket{+}, \ket{-}\}$ defined as
-$\ket{+} = \frac{\ket{\uparrow} + \ket{\downarrow}}{\sqrt{2}}$ and
-$\ket{-} = \frac{\ket{\uparrow} - \ket{\downarrow}}{\sqrt{2}}$. In the current context,
-this basis is referred to as the *irrep basis* of $\mathbb{Z}_2$, since each basis state
-corresponds to a one-dimensional irreducible representation of $\mathbb{Z}_2$. Indeed, the
-local symmetry transformation $X$ acts trivially on the state $\ket{+}$, corresponding to
-the *trivial irrep*, and yields a minus sign when acting on $\ket{-}$, corresponding to the
-*sign irrep*.
-
-Next, let's make the statement that "the matrix blocks of the local tensors are labeled by
-$\mathbb{Z}_2$ irreps" more concrete. To this end, consider the action of $ZZ$ in the irrep
-basis, which is given by the four nonzero matrix elements
+These identitities precisely mean that these local tensors transform trivially under a tensor product representation of ``\mathbb{Z}_2``.
+This implies that, recalling [the introduction on symmetries](@ref ss_symmetries), in an appropriate basis for the local physical vector space, our local tensors would become block-diagonal where each so-called *matrix block* is labeled by a ``\mathbb{Z}_2`` irrep.
+The appropriate local basis transformation is precisely the one that brings the local representation ``X`` into block-diagonal form.
+Clearly, this transformation is nothing more than the Hadamard transformation which maps the computational basis of ``Z`` eigenstates ``\{\ket{\uparrow}, \ket{\downarrow}\}`` to that of the ``X`` eigenstates ``\{\ket{+}, \ket{-}\}`` defined as ``\ket{+} = \frac{\ket{\uparrow} + \ket{\downarrow}}{\sqrt{2}}`` and ``\ket{-} = \frac{\ket{\uparrow} - \ket{\downarrow}}{\sqrt{2}}``.
+In the current context, this basis is referred to as the *irrep basis* of ``\mathbb{Z}_2``, since each basis state corresponds to a one-dimensional irreducible representation of ``\mathbb{Z}_2``.
+Indeed, the local symmetry transformation ``X`` acts trivially on the state ``\ket{+}``, corresponding to the *trivial irrep*, and yields a minus sign when acting on ``\ket{-}``, corresponding to the *sign irrep*.
+
+Next, let's make the statement that "the matrix blocks of the local tensors are labeled by ``\mathbb{Z}_2`` irreps" more concrete.
+To this end, consider the action of ``ZZ`` in the irrep basis, which is given by the four nonzero matrix elements
```math
\begin{align}
@@ -150,94 +116,59 @@ ZZ : \mathbb C^2 \otimes \mathbb C^2 &\to \mathbb C^2 \otimes \mathbb C^2 : \\
\end{align}
```
-We will denote the trivial $\mathbb{Z}_2$ irrep by $'0'$, corresponding to a local $\ket{+}$
-state, and the sign irrep by $'1'$, corresponding to a local $\ket{-}$ state. Given this
-identification, we can naturally associate the tensor product of basis vectors in the irrep
-basis to the tensor product of the corresponding $\mathbb{Z}_2$ irreps. One of the key
-questions of the [representation theory of groups](representation_theory) is how the tensor
-product of two irreps can be decomposed into a direct sum of irreps. This decomposition is
-encoded in what are often called the
-[*fusion rules*](https://en.wikipedia.org/wiki/Fusion_rules),
+We will denote the trivial ``\mathbb{Z}_2`` irrep by ``'0'``, corresponding to a local ``\ket{+}`` state, and the sign irrep by ``'1'``, corresponding to a local ``\ket{-}`` state.
+Given this identification, we can naturally associate the tensor product of basis vectors in the irrep basis to the tensor product of the corresponding ``\mathbb{Z}_2`` irreps.
+One of the key questions of the [representation theory of groups](representation_theory) is how the tensor product of two irreps can be decomposed into a direct sum of irreps.
+This decomposition is encoded in what are often called the [*fusion rules*](https://en.wikipedia.org/wiki/Fusion_rules),
```math
a \otimes b \cong \bigoplus_c N_c^{ab} c,
```
-where $N_{ab}^c$ encodes the number of times the irrep $c$ occurs in the tensor product of
-irreps $a$ and $b$. These fusion rules are called *Abelian* if the tensor product of any two
-irreps corresponds to exactly one irrep. We will return to the implications of irreps with
-*non-Abelian* fusion rules [later](@ref ss_non_abelian).
+where ``N_{ab}^c`` encodes the number of times the irrep ``c`` occurs in the tensor product of irreps ``a`` and ``b``.
+These fusion rules are called *Abelian* if the tensor product of any two irreps corresponds to exactly one irrep.
+We will return to the implications of irreps with *non-Abelian* fusion rules [later](@ref ss_non_abelian).
!!! note
- Within TensorKit.jl, the nature of the fusion rules for charges of a given symmetry are
- represented by the [`FusionStyle`](@ref) of the corresponding `Sector` subtype. What we
- refer to as "Abelian" fusion rules in this tutorial corresponds to
- `UniqueFusion <: FusionStyle`. We will also consider [examples](@ref ss_non_abelian) of
- two different kinds of non-Abelian" fusion rules, corresponding to
- `MultipleFusion <: FusionStyle` styles.
-
-For the case of the $\mathbb{Z}_2$ irreps, the fusion rules are Abelian, and are given by
-addition modulo 2,
+ Within TensorKit.jl, the nature of the fusion rules for charges of a given symmetry are represented by the [`FusionStyle`](@ref) of the corresponding `Sector` subtype.
+ What we refer to as "Abelian" fusion rules in this tutorial corresponds to `UniqueFusion <: FusionStyle`.
+ We will also consider [examples](@ref ss_non_abelian) of two different kinds of non-Abelian" fusion rules, corresponding to `MultipleFusion <: FusionStyle` styles.
+
+For the case of the ``\mathbb{Z}_2`` irreps, the fusion rules are Abelian, and are given by addition modulo 2,
```math
0 \otimes 0 \cong 0, \quad 0 \otimes 1 \cong 1, \quad 1 \otimes 0 \cong 1, \quad 1 \otimes 1 \cong 0.
```
-To see how these fusion rules arise, we can consider the action of the symmetry
-transformation $XX$ on the possible two-site basis states, each of which corresponds to a
-tensor product of representations. We can see that $XX$ acts trivially on both $\ket{+}
-\otimes \ket{+}$ and $\ket{-} \otimes \ket{-}$, meaning these transform under the trivial
-representation, which gives the first and last entries of the fusion rules. Similarly, $XX$
-acts with a minus sign on both $\ket{+} \otimes \ket{-}$ and $\ket{-} \otimes \ket{+}$,
-meaning these transform under the sign representation, which gives the second and third
-entries of the fusion rules. Having introduced this notion of 'fusing' irreps, we can now
-associate a well-defined *coupled irrep* to each of the four two-site basis
-states, which is given by the tensor product of the two *uncoupled irreps* associated to
-each individual site. From the matrix elements of $ZZ$ given above, we clearly see that this
-operator only maps between states in the domain and codomain that have the same coupled
-irrep. This means that we can associate each of these matrix elements to a so-called *fusion
-tree* of $\mathbb{Z}_2$ irreps with a corresponding coefficient of 1,
+To see how these fusion rules arise, we can consider the action of the symmetry transformation ``XX`` on the possible two-site basis states, each of which corresponds to a tensor product of representations.
+We can see that ``XX`` acts trivially on both ``\ket{+} \otimes \ket{+}`` and ``\ket{-} \otimes \ket{-}``, meaning these transform under the trivial representation, which gives the first and last entries of the fusion rules.
+Similarly, ``XX`` acts with a minus sign on both ``\ket{+} \otimes \ket{-}`` and ``\ket{-} \otimes \ket{+}``, meaning these transform under the sign representation, which gives the second and third entries of the fusion rules.
+Having introduced this notion of 'fusing' irreps, we can now associate a well-defined *coupled irrep* to each of the four two-site basis states, which is given by the tensor product of the two *uncoupled irreps* associated to each individual site.
+From the matrix elements of ``ZZ`` given above, we clearly see that this operator only maps between states in the domain and codomain that have the same coupled irrep.
+This means that we can associate each of these matrix elements to a so-called *fusion tree* of ``\mathbb{Z}_2`` irreps with a corresponding coefficient of 1,
```@raw html
```
-This diagram should be read from top to bottom, where it represents the fusion of the two
-uncoupled irreps in the domain to the coupled irrep on the middle line, and the splitting of
-this coupled irrep to the uncoupled irreps in the codomain. From this our previous statement
-becomes very clear: the $ZZ$ operator indeed consists of two distinct two-dimensional
-matrix blocks, each of which are labeled by the value of the *coupled irrep* on the middle
-line of each fusion tree. The first block corresponds to the even coupled irrep '0', and
-acts within the two-dimensional subspace spanned by $\{\ket{+,+}, \ket{-,-}\}$, while the
-second block corresponds to the odd coupled irrep '1', and acts within the two-dimensional
-subspace spanned by $\{\ket{+,-}, \ket{-,+}\}$. In TensorKit.jl, this block-diagonal
-structure of a symmetric tensor is explicitly encoded into its representation as a
-`TensorMap`, where only the matrix blocks corresponding to each coupled irrep are stored.
-These matrix blocks associated to each coupled irrep are precisely what is accessed by the
-[`blocks`](@ref) method we have already used above.
-
-For our current purposes however, *we never really need to explicitly consider these matrix
-blocks*. Indeed, when constructing a `TensorMap` it is sufficient to set its data by
-manually assigning a matrix element to each [fusion tree of the form above](Z2_fusiontrees)
-labeled by a given tensor product of irreps. This matrix element is then automatically
-inserted into the appropriate matrix block. So, for the purpose of this tutorial **we will
-interpret a symmetric `TensorMap` simply as a list of fusion trees, to each of which
-corresponds a certain reduced tensor element**. In TensorKit.jl, these reduced tensor
-elements corresponding to the fusion trees of a `TensorMap` can be accessed through the
-[`subblocks`](@ref) method.
+This diagram should be read from top to bottom, where it represents the fusion of the two uncoupled irreps in the domain to the coupled irrep on the middle line, and the splitting of this coupled irrep to the uncoupled irreps in the codomain.
+From this our previous statement becomes very clear: the ``ZZ`` operator indeed consists of two distinct two-dimensional matrix blocks, each of which are labeled by the value of the *coupled irrep* on the middle line of each fusion tree.
+The first block corresponds to the even coupled irrep '0', and acts within the two-dimensional subspace spanned by ``\{\ket{+,+}, \ket{-,-}\}``, while the second block corresponds to the odd coupled irrep '1', and acts within the two-dimensional subspace spanned by ``\{\ket{+,-}, \ket{-,+}\}``.
+In TensorKit.jl, this block-diagonal structure of a symmetric tensor is explicitly encoded into its representation as a `TensorMap`, where only the matrix blocks corresponding to each coupled irrep are stored.
+These matrix blocks associated to each coupled irrep are precisely what is accessed by the [`blocks`](@ref) method we have already used above.
+
+For our current purposes however, *we never really need to explicitly consider these matrix blocks*.
+Indeed, when constructing a `TensorMap` it is sufficient to set its data by manually assigning a matrix element to each [fusion tree of the form above](Z2_fusiontrees) labeled by a given tensor product of irreps.
+This matrix element is then automatically inserted into the appropriate matrix block.
+So, for the purpose of this tutorial **we will interpret a symmetric `TensorMap` simply as a list of fusion trees, to each of which corresponds a certain reduced tensor element**.
+In TensorKit.jl, these reduced tensor elements corresponding to the fusion trees of a `TensorMap` can be accessed through the [`subblocks`](@ref) method.
!!! note
- In general, such a reduced tensor element is not necessarily a scalar, but rather an array
- whose size is determined by the degeneracy of the irreps in the codomain and domain of the
- fusion tree. For this reason, a reduced tensor element associated to a given fusion tree is
- also referred to as a *subblock*. In the following we will always use terms 'reduced tensor
- element' or 'subblock' for the reduced tensor elements, to make it clear that these are
- distinct from the matrix blocks in the block-diagonal decomposition of the tensor.
+ In general, such a reduced tensor element is not necessarily a scalar, but rather an array whose size is determined by the degeneracy of the irreps in the codomain and domain of the fusion tree.
+ For this reason, a reduced tensor element associated to a given fusion tree is also referred to as a *subblock*.
+ In the following we will always use terms *reduced tensor element* or *subblock* for the reduced tensor elements, to make it clear that these are distinct from the matrix blocks in the block-diagonal decomposition of the tensor.
### [Fusion trees and how to use them](@id sss_fusion_trees)
-This view of the underlying symmetry structure in terms of fusion trees of irreps and
-corresponding reduced tensor elements is a very convenient way of working with the
-`TensorMap` type. In fact, this symmetry structure is inherently ingrained in a `TensorMap`,
-and goes beyond the group-loke symmetries we have considered until now. In this more general
-setting, we will refer to the labels that appear on this fusion trees as *charges* or
-*sectors*. These can be thought of as generalization of group irreps, and appear in the
-context of TensorKit.jl as instances of the [`Sector`](@ref) type.
+This view of the underlying symmetry structure in terms of fusion trees of irreps and corresponding reduced tensor elements is a very convenient way of working with the `TensorMap` type.
+In fact, this symmetry structure is inherently ingrained in a `TensorMap`, and goes beyond the group-like symmetries we have considered until now.
+In this more general setting, we will refer to the labels that appear on this fusion trees as *charges* or *sectors*.
+These can be thought of as generalization of group irreps, and appear in the context of TensorKit.jl as instances of the [`Sector`](@ref) type.
Consider a generic fusion tree of the form
@@ -246,14 +177,11 @@ Consider a generic fusion tree of the form
```
which can be used to label a subblock of a `TensorMap` corresponding to a two-site operator.
-This object should actually be seen as a *pair of fusion trees*. The first member of the
-pair, related to the codomain of the `TensorMap`, is referred to as the *splitting tree* and
-encodes how the *coupled charge* $c$ splits into the *uncoupled charges* $s_1$ and $s_2$.
-The second member of the pair, related to the domain of the `TensorMap`, is referred to as
-the *fusion tree* and encodes how the uncoupled charges $f_1$ and $f_2$ fuse to the coupled
-charge $c$. Both the splitting and fusion tree can be represented as a [`FusionTree`](@ref)
-instance. You will find such a `FusionTree` has the following properties encoded into its
-fields:
+This object should actually be seen as a *pair of fusion trees*.
+The first member of the pair, related to the codomain of the `TensorMap`, is referred to as the *splitting tree* and encodes how the *coupled charge* ``c`` splits into the *uncoupled charges* ``s_1`` and ``s_2``.
+The second member of the pair, related to the domain of the `TensorMap`, is referred to as the *fusion tree* and encodes how the uncoupled charges ``f_1`` and ``f_2`` fuse to the coupled charge ``c``.
+Both the splitting and fusion tree can be represented as a [`FusionTree`](@ref) instance.
+You will find such a `FusionTree` has the following properties encoded into its fields:
- `uncoupled::NTuple{N,I}`: a list of `N` uncoupled charges of type `I<:Sector`
- `coupled::I`: a single coupled charge of type `I<:Sector`
@@ -261,47 +189,37 @@ fields:
- `innerlines::NTuple{M,I}`: a list of inner lines of type `I<:Sector` of length `M = N - 2`
- `vertices::NTuple{L,T}`: list of fusion vertex labels of type `T` and length `L = N - 1`
-For our current application only `uncoupled` and `coupled` are relevant, since
-$\mathbb{Z}_2$ irreps are self-dual and have Abelian fusion rules, so that irreps on the
-inner lines of a fusion tree are completely determined by the uncoupled irreps. We will come
-back to these other properties when discussion more involved applications. Given some
-`TensorMap`, the method [`fusiontrees`](@ref) returns an iterator over all pairs of
-splitting and fusion trees that label the subblocks of `t`.
+For our current application only `uncoupled` and `coupled` are relevant, since ``\mathbb{Z}_2`` irreps are self-dual and have Abelian fusion rules, so that irreps on the inner lines of a fusion tree are completely determined by the uncoupled irreps.
+We will come back to these other properties when discussion more involved applications.
+Given some `TensorMap`, the method [`fusiontrees`](@ref) returns an iterator over all pairs of splitting and fusion trees that label the subblocks of `t`.
-### Constructing a $\mathbb{Z}_2$-symmetric `TensorMap`
+### Constructing a ``\mathbb{Z}_2``-symmetric `TensorMap`
-We can now put this into practice by directly constructing the $ZZ$ operator in the irrep
-basis as a $\mathbb{Z}_2$-symmetric `TensorMap`. We will do this in three steps:
+We can now put this into practice by directly constructing the ``ZZ`` operator in the irrep basis as a ``\mathbb{Z}_2``-symmetric `TensorMap`.
+We will do this in three steps:
-- First we construct the physical space at each site as a $\mathbb{Z}_2$-graded vector space.
-- Then we initialize an empty `TensorMap` with the correct domain and codomain vector spaces
- built from the previously constructed physical space.
-- And finally we iterate over all splitting and fusion tree pairs and manually fill in the
- corresponding nonzero subblocks of the operator.
+1. First we construct the physical space at each site as a ``\mathbb{Z}_2``-graded vector space.
+2. Then we initialize an empty `TensorMap` with the correct domain and codomain vector spaces built from the previously constructed physical space.
+3. And finally we iterate over all splitting and fusion tree pairs and manually fill in the corresponding nonzero subblocks of the operator.
-In TensorKit.jl, the representations of $\mathbb{Z}_2$ are represented as instances of the
-[`Z2Irrep <: Sector`](@ref ZNIrrep) type. There are two such instances, corresponding to the
-trivial irrep `Z2Irrep(0)` and the sign irrep `Z2Irrep(1)`. We can fuse irreps with the `⊗`
-(`\otimes`) operator, which can for example be used to check their fusion rules,
+In TensorKit.jl, the representations of ``\mathbb{Z}_2`` are represented as instances of the [`Z2Irrep <: Sector`](@ref ZNIrrep) type.
+There are two such instances, corresponding to the trivial irrep `Z2Irrep(0)` and the sign irrep `Z2Irrep(1)`.
+We can fuse irreps with the `⊗` (`\otimes`) operator, which can for example be used to check their fusion rules,
```@example symmetric_tutorial
for a in values(Z2Irrep), b in values(Z2Irrep)
println("$a ⊗ $b = $(a ⊗ b)")
end
```
-After the basis transform to the irrep basis, we can view the two-dimensional complex
-physical vector space we started with as being spanned by the trivial and sign irrep of
-$\mathbb{Z}_2$. In the language of TensorKit.jl, this can be implemented as a `Z2Space`, an
-alias for a [graded vector space](@ref GradedSpace) `Vect[Z2Irrep]`. Such a graded vector
-space $V$ is a direct sum of irreducible representation spaces $V^{(a)}$ labeled by the
-irreps $a$ of the group,
+After the basis transform to the irrep basis, we can view the two-dimensional complex physical vector space we started with as being spanned by the trivial and sign irrep of ``\mathbb{Z}_2``.
+In the language of TensorKit.jl, this can be implemented as a `Z2Space`, an alias for a [graded vector space](@ref GradedSpace) `Vect[Z2Irrep]`.
+Such a graded vector space ``V`` is a direct sum of irreducible representation spaces ``V^{(a)}`` labeled by the irreps ``a`` of the group,
```math
V = \bigotimes_a N_a \cdot V^{(a)}.
```
-The number of times $N_a$ each irrep $a$ appears in the direct sum is called the
-*degeneracy* of the irrep. To construct such a graded space, we therefore have to specify
-which irreps it contains, and indicate the degeneracy of each irrep. Here, our physical
-vector space contains the trivial irrep `Z2Irrep(0)` with degeneracy 1 and the sign irrep
-`Z2Irrep(1)` with degeneracy 1. This means this particular graded space has the form
+The number of times ``N_a`` each irrep ``a`` appears in the direct sum is called the *degeneracy* of the irrep.
+To construct such a graded space, we therefore have to specify which irreps it contains, and indicate the degeneracy of each irrep.
+Here, our physical vector space contains the trivial irrep `Z2Irrep(0)` with degeneracy 1 and the sign irrep `Z2Irrep(1)` with degeneracy 1.
+This means this particular graded space has the form
```math
V = 1 \cdot V^{(0)} \oplus 1 \cdot V^{(1)},
```
@@ -309,8 +227,7 @@ which can be constructed in the following way,
```@example symmetric_tutorial
V = Z2Space(0 => 1, 1 => 1)
```
-As a consistency check, we can inspect its dimension as well as the degeneracies of the
-individual irreps:
+As a consistency check, we can inspect its dimension as well as the degeneracies of the individual irreps:
```@example symmetric_tutorial
dim(V)
```
@@ -321,25 +238,18 @@ dim(V, Z2Irrep(0))
dim(V, Z2Irrep(1))
```
-Given this physical space, we can initialize the $ZZ$ operator as an empty `TensorMap` with
-the appropriate structure.
+Given this physical space, we can initialize the ``ZZ`` operator as an empty `TensorMap` with the appropriate structure.
```@example symmetric_tutorial
ZZ = zeros(ComplexF64, V ⊗ V ← V ⊗ V)
```
-To assess the underlying structure of a symmetric tensor, it is often useful to inspect its
-[`subblocks`](@ref subblocks),
+To assess the underlying structure of a symmetric tensor, it is often useful to inspect its [`subblocks`](@ref subblocks),
```@example symmetric_tutorial
subblocks(ZZ)
```
-While all entries are zero, we see that all eight valid fusion trees with two incoming
-irreps and two outgoing irreps [of the type above](fusiontree) are listed with their
-corresponding subblock data. Each of these subblocks is an array of shape $(1, 1, 1, 1)$
-since each irrep occuring in the space $V$ has degeneracy 1. Using the [`fusiontrees`](@ref)
-method and the fact that we can index a `TensorMap` using a splitting/fusion tree pair, we
-can now fill in the nonzero subblocks of the operator by observing that the $ZZ$ operator
-flips the irreps of the uncoupled charges in the domain with respect to the codomain, as
-shown in the diagrams above. Flipping a given `Z2Irrep` in the codomain can be implemented
-by fusing them with the sign irrep `Z2Irrep(1)`, giving:
+While all entries are zero, we see that all eight valid fusion trees with two incoming irreps and two outgoing irreps [of the type above](fusiontree) are listed with their corresponding subblock data.
+Each of these subblocks is an array of shape ``(1, 1, 1, 1)`` since each irrep occuring in the space ``V`` has degeneracy 1.
+Using the [`fusiontrees`](@ref) method and the fact that we can index a `TensorMap` using a splitting/fusion tree pair, we can now fill in the nonzero subblocks of the operator by observing that the ``ZZ`` operator flips the irreps of the uncoupled charges in the domain with respect to the codomain, as shown in the diagrams above.
+Flipping a given `Z2Irrep` in the codomain can be implemented by fusing them with the sign irrep `Z2Irrep(1)`, giving:
```@example symmetric_tutorial
flip_charge(charge::Z2Irrep) = only(charge ⊗ Z2Irrep(1))
@@ -351,11 +261,9 @@ end
subblocks(ZZ)
```
-Indeed, the resulting `TensorMap` exactly encodes the matrix elements of the $ZZ$ operator
-shown in [the diagrams above](Z2_fusiontrees). The $X$ operator can be constructed in a
-similar way. Since it is by definition diagonal in the irrep basis with matrix blocks
-directly corresponding to the trivial and sign irrep, its construction is particularly
-simple:
+Indeed, the resulting `TensorMap` exactly encodes the matrix elements of the ``ZZ`` operator shown in [the diagrams above](Z2_fusiontrees).
+The ``X`` operator can be constructed in a similar way.
+Since it is by definition diagonal in the irrep basis with matrix blocks directly corresponding to the trivial and sign irrep, its construction is particularly simple:
```@example symmetric_tutorial
X = zeros(ComplexF64, V ← V)
@@ -369,35 +277,28 @@ end
subblocks(X)
```
-Given these local operators, we can use them to construct the full manifestly
-$\mathbb{Z}_2$-symmetric Hamiltonian.
+Given these local operators, we can use them to construct the full manifestly ``\mathbb{Z}_2``-symmetric Hamiltonian.
!!! note
- An important observation is that, when explicitly imposing the $\mathbb{Z}_2$ symmetry, we
- directly constructed the full $ZZ$ operator as a single symmetric tensor. This in contrast
- to the case without symmetries, where we constructed a single-site $Z$ operator and then
- combined them into a two-site operator. Clearly this can no longer be done when imposing
- $\mathbb{Z}_2$, since a single $Z$ is not invariant under conjugation with the symmetry
- operator $X$. One might wonder whether it is still possible to construct a two-site
- Hamiltonian term by combining local objects. This is possible if one introduces an auxiliary
- index on the local tensors that carries a non-trivial charge. The intuition behind this will
- become more clear in the next example.
-
-
-## Level 2: The $\mathrm{U}(1)$ Bose-Hubbard model
-
-For our next example, we consider the
-[Bose-Hubbard model](https://en.wikipedia.org/wiki/Bose%E2%80%93Hubbard_model), which
-describes interacting bosons on a lattice. The Hamiltonian of this model is given by
+ An important observation is that, when explicitly imposing the ``\mathbb{Z}_2`` symmetry, we directly constructed the full ``ZZ`` operator as a single symmetric tensor.
+ This in contrast to the case without symmetries, where we constructed a single-site ``Z`` operator and then combined them into a two-site operator.
+ Clearly this can no longer be done when imposing ``\mathbb{Z}_2``, since a single ``Z`` is not invariant under conjugation with the symmetry operator ``X``.
+ One might wonder whether it is still possible to construct a two-site Hamiltonian term by combining local objects.
+ This is possible if one introduces an auxiliary index on the local tensors that carries a non-trivial charge.
+ The intuition behind this will become more clear in the next example.
+
+
+## Level 2: The ``\mathsf{U}_1`` Bose-Hubbard model
+
+For our next example, we consider the [Bose-Hubbard model](https://en.wikipedia.org/wiki/Bose%E2%80%93Hubbard_model), which describes interacting bosons on a lattice.
+The Hamiltonian of this model is given by
```math
\begin{equation}
\label{eq:bhh}
H = -t \sum_{\langle i,j \rangle} \left( a_{i}^+ a_{j}^- + a_{i}^- a_{j}^+ \right) - \mu \sum_i N_i + \frac{U}{2} \sum_i N_i(N_i - 1).
\end{equation}
```
-This Hamiltonian is defined on the [Fock space](https://en.wikipedia.org/wiki/Fock_space) associated to a chain of bosons,
-where the action bosonic creation, annihilation and number operators $a^+$, $a^-$ and $N =
-a^+ a^-$ in the local occupation number basis is given by
+This Hamiltonian is defined on the [Fock space](https://en.wikipedia.org/wiki/Fock_space) associated to a chain of bosons, where the action bosonic creation, annihilation and number operators ``a^+``, ``a^-`` and ``N = a^+ a^-`` in the local occupation number basis is given by
```math
\begin{align}
\label{eq:bosonopmatel}
@@ -416,33 +317,24 @@ Their bosonic nature can be summarized by the familiar the commutation relations
\end{align*}
```
-This Hamiltonian is invariant under conjugation by the global particle number operator, $U H
-U^\dagger = H$, where
+This Hamiltonian is invariant under conjugation by the global particle number operator, ``U H U^\dagger = H``, where
```math
U = \sum_i N_i
```
-This invariance corresponds to a $\mathrm{U}(1)$ particle number symmetry, which can again
-be manifestly imposed when constructing the Hamiltonian terms as `TensorMap`s. From the
-representation theory of $\mathrm{U}(1)$, we know that its irreps are all one-dimensional
-and can be labeled by integers $n$ where the tensor product of two irreps is corresponds to
-addition of these labels, giving the Abelian fusion rules
+This invariance corresponds to a ``\mathsf{U}_1`` particle number symmetry, which can again be manifestly imposed when constructing the Hamiltonian terms as `TensorMap`s.
+From the representation theory of ``\mathsf{U}_1``, we know that its irreps are all one-dimensional and can be labeled by integers ``n`` where the tensor product of two irreps is corresponds to addition of these labels, giving the Abelian fusion rules
+
```math
n_1 \otimes n_2 \cong (n_1 + n_2).
```
-
### Directly constructing the Hamiltonian terms
-We recall from our discussion on the $\mathbb{Z}_2$ symmetric Ising model that, in order to
-construct the Hamiltonian terms as symmetric tensors, we should work in the irrep basis
-where the symmetry transformation is block diagonal. In the current case, the symmetry
-operation is the particle number operator, which is already diagonal in the occupation
-number basis. Therefore, we don't need an additional local basis transformation this time,
-and can just observe that each local basis state can be identified with the $\mathrm{U}(1)$
-irrep associated to the corresponding occupation number.
+We recall from our discussion on the ``\mathbb{Z}_2`` symmetric Ising model that, in order to construct the Hamiltonian terms as symmetric tensors, we should work in the irrep basis where the symmetry transformation is block diagonal.
+In the current case, the symmetry operation is the particle number operator, which is already diagonal in the occupation number basis.
+Therefore, we don't need an additional local basis transformation this time, and can just observe that each local basis state can be identified with the ``\mathsf{U}_1`` irrep associated to the corresponding occupation number.
-Following the same approach as before, we first write down the action of the Hamiltonian
-terms in the irrep basis:
+Following the same approach as before, we first write down the action of the Hamiltonian terms in the irrep basis:
```math
\begin{align*}
@@ -452,31 +344,24 @@ N \ket{n} &= n \ket{n}
\end{align*}
```
-It is then a simple observation that these matrix elements are exactly captured by the
-following $\mathrm{U}(1)$ fusion trees with corresponding subblock values:
+It is then a simple observation that these matrix elements are exactly captured by the following ``\mathsf{U}_1`` fusion trees with corresponding subblock values:
```@raw html
```
-This gives us all the information necessary to construct the corresponding `TensorMap`s. We
-follow the same steps as outlined in the previous example, starting with the construction of
-the physical space. This will now be a $\mathrm{U}(1)$ graded vector space `U1Space`, where
-each basis state $\ket{n}$ in the occupation number basis is represented by the
-corresponding $\mathrm{U}(1)$ irrep `U1Irrep(n)` with degeneracy 1. While this physical
-space is in principle infinite dimensional, we will impose a cutoff in occupation number at
-a maximum of 5 bosons per site, giving a 6-dimensional vector space:
+This gives us all the information necessary to construct the corresponding `TensorMap`s.
+We follow the same steps as outlined in the previous example, starting with the construction of the physical space.
+This will now be a ``\mathsf{U}_1`` graded vector space `U1Space`, where each basis state ``\ket{n}`` in the occupation number basis is represented by the corresponding ``\mathsf{U}_1`` irrep `U1Irrep(n)` with degeneracy 1.
+While this physical space is in principle infinite dimensional, we will impose a cutoff in occupation number at a maximum of 5 bosons per site, giving a 6-dimensional vector space:
```@example symmetric_tutorial
cutoff = 5
V = U1Space(n => 1 for n in 0:cutoff)
```
-We can now initialize the $a^+ a^-$, $a^- a^+$ and $N$ operators as empty `TensorMap`s with
-the correct domain and codomain vector spaces, and fill in the nonzero subblocks associated
-to [the fusion trees shown above](U1_fusiontrees). To do this we need access to the integer
-label of the $\mathrm{U}(1)$ irreps in the fusion and splitting trees, which can be accessed
-through the `charge` field of the `U1Irrep` type.
+We can now initialize the ``a^+ a^-``, ``a^- a^+`` and ``N`` operators as empty `TensorMap`s with the correct domain and codomain vector spaces, and fill in the nonzero subblocks associated to [the fusion trees shown above](U1_fusiontrees).
+To do this we need access to the integer label of the ``\mathsf{U}_1`` irreps in the fusion and splitting trees, which can be accessed through the `charge` field of the `U1Irrep` type.
```@example symmetric_tutorial
a⁺a⁻ = zeros(ComplexF64, V ⊗ V ← V ⊗ V)
@@ -506,26 +391,18 @@ end
N
```
-By inspecting the `subblocks` of each of these tensors you can directly verify that they
-each have the correct reduced tensor elements.
+By inspecting the `subblocks` of each of these tensors you can directly verify that they each have the correct reduced tensor elements.
### Creation and annihilation operators as symmetric tensors
-Just as in the $\mathbb{Z}_2$ case, it is obvious that we cannot directly construct the
-creation and annihilation operators as instances of a `TensorMap(..., V ← V)` since they are
-not invariant under conjugation by the symmetry operator. However, it is possible to
-construct them as `TensorMap`s using an *auxiliary vector space*, based on the following
-intuition. The creation operator $a^+$ violates particle number conservation by mapping the
-occupation number $n$ to $n + 1$. From the point of view of representation theory, this
-process can be thought of as the *fusion* of an `U1Irrep(n)` with an `U1Irrep(1)`, naturally
-giving the fusion product `U1Irrep(n + 1)`. This means we can represent $a^+$ as a
-`TensorMap(..., V ← V ⊗ A)`, where the auxiliary vector space `A` contains the $+1$ irrep
-with degeneracy 1, `A = U1Space(1 => 1)`. Similarly, the decrease in occupation number when
-acting with $a^-$ can be thought of as the *splitting* of an `U1Irrep(n)` into an
-`U1Irrep(n - 1)` and an `U1Irrep(1)`, leading to a representation in terms of a
-`TensorMap(..., A ⊗ V ← V)`. Based on these observations, we can represent the matrix
-elements \eqref{eq:bosonopmatel} as subblocks labeled by the $\mathrm{U}(1)$ fusion trees
+Just as in the ``\mathbb{Z}_2`` case, it is obvious that we cannot directly construct the creation and annihilation operators as instances of a `TensorMap(..., V ← V)` since they are not invariant under conjugation by the symmetry operator.
+However, it is possible to construct them as `TensorMap`s using an *auxiliary vector space*, based on the following intuition.
+The creation operator ``a^+`` violates particle number conservation by mapping the occupation number ``n`` to ``n + 1``.
+From the point of view of representation theory, this process can be thought of as the *fusion* of an `U1Irrep(n)` with an `U1Irrep(1)`, naturally giving the fusion product `U1Irrep(n + 1)`.
+This means we can represent ``a^+`` as a `TensorMap(..., V ← V ⊗ A)`, where the auxiliary vector space `A` contains the ``+1`` irrep with degeneracy 1, `A = U1Space(1 => 1)`.
+Similarly, the decrease in occupation number when acting with ``a^-`` can be thought of as the *splitting* of an `U1Irrep(n)` into an `U1Irrep(n - 1)` and an `U1Irrep(1)`, leading to a representation in terms of a `TensorMap(.
+., A ⊗ V ← V)`. Based on these observations, we can represent the matrix elements \eqref{eq:bosonopmatel} as subblocks labeled by the ``\mathsf{U}_1`` fusion trees
```@raw html
@@ -538,16 +415,11 @@ We can then combine these operators to get the appropriate Hamiltonian terms,
```
!!! note
- Although we have made a suggestive distinction between the 'left' and 'right' versions of
- the operators $a_L^\pm$ and $a_R^\pm$, one can actually be obtained from the other by
- permuting the physical and auxiliary indices of the corresponding `TensorMap`s. This
- permutation has no effect on the actual subblocks of the tensors due to the Abelian
- [`FusionStyle`](@ref) and bosonic [`BraidingStyle`](@ref) of $\mathrm{U}(1)$ irreps, so
- the left and right operators can in essence be seen as the 'same' tensors. This is no
- longer the case when considering non-Abelian symmetries, or symmetries associated with fermions or anyons. For these cases, permuting
- indices can in fact change the subblocks, as we will see next. As a consequence, it is
- much less clear how to construct two-site symmetric operators in terms of local
- symmetric objects.
+ Although we have made a suggestive distinction between the 'left' and 'right' versions of the operators ``a_L^\pm`` and ``a_R^\pm``, one can actually be obtained from the other by permuting the physical and auxiliary indices of the corresponding `TensorMap`s.
+ This permutation has no effect on the actual subblocks of the tensors due to the Abelian [`FusionStyle`](@ref) and bosonic [`BraidingStyle`](@ref) of ``\mathsf{U}_1`` irreps, so the left and right operators can in essence be seen as the 'same' tensors.
+ This is no longer the case when considering non-Abelian symmetries, or symmetries associated with fermions or anyons.
+ For these cases, permuting indices can in fact change the subblocks, as we will see next.
+ As a consequence, it is much less clear how to construct two-site symmetric operators in terms of local symmetric objects.
The explicit construction then looks something like
@@ -583,87 +455,55 @@ It is then simple to check that this is indeed what we expect.
```
!!! note
- From the construction of the Hamiltonian operators
- [in terms of creation and annihilation operators](bosonham) we clearly see that they are
- invariant under a transformation $a^\pm \to e^{\pm i\theta} a^\pm$. More generally, for
- a two-site operator that is defined as the contraction of two one-site operators across
- an auxiliary space, modifying the one-site operators by applying transformations $Q$ and
- $Q^{-1}$ on their respective auxiliary spaces for any invertible $Q$ leaves the
- resulting contraction unchanged. This ambiguity in the definition clearly shows that one
- should really always think in terms of the fully symmetric procucts of $a^+$ and $a^-$
- rather than in terms of these operators themselves. In particular, one can always
- decompose such a symmetric product into the [form above](bosonham) by means of an SVD.
+ From the construction of the Hamiltonian operators [in terms of creation and annihilation operators](bosonham) we clearly see that they are invariant under a transformation ``a^\pm \to e^{\pm i\theta} a^\pm``.
+ More generally, for a two-site operator that is defined as the contraction of two one-site operators across an auxiliary space, modifying the one-site operators by applying transformations ``Q`` and ``Q^{-1}`` on their respective auxiliary spaces for any invertible ``Q`` leaves the resulting contraction unchanged.
+ This ambiguity in the definition clearly shows that one should really always think in terms of the fully symmetric procucts of ``a^+`` and ``a^-`` rather than in terms of these operators themselves.
+ In particular, one can always decompose such a symmetric product into the [form above](bosonham) by means of an SVD.
## Level 3: Fermions and the Kitaev model
-While we have already covered quite a lot of ground towards understanding symmetric tensors
-in terms of fusion trees and corresponding subblocks, the symmetries considered so far have
-been quite 'simple' in the sense that sectors corresponding to irreps of $\mathbb{Z}_2$ and
-$\mathrm{U}(1)$ have [*Abelian fusion rules*](@ref FusionStyle) and
-[*bosonic exchange statistics*](@ref BraidingStyle).
-This means that the fusion of two irreps always gives a unique irrep as the fusion product,
-and that exchanging two irreps in a tensor product is trivial. In practice, this implies
-that for tensors with these symmetries the fusion trees are completely fixed by the
-uncoupled charges, which uniquely define both the inner lines and the coupled charge, and
-that tensor indices can be permuted freely without any 'strange' side effects.
+While we have already covered quite a lot of ground towards understanding symmetric tensors in terms of fusion trees and corresponding subblocks, the symmetries considered so far have been quite 'simple' in the sense that sectors corresponding to irreps of ``\mathbb{Z}_2`` and ``\mathsf{U}_1`` have [*Abelian fusion rules*](@ref FusionStyle) and [*bosonic exchange statistics*](@ref BraidingStyle).
+This means that the fusion of two irreps always gives a unique irrep as the fusion product, and that exchanging two irreps in a tensor product is trivial.
+In practice, this implies that for tensors with these symmetries the fusion trees are completely fixed by the uncoupled charges, which uniquely define both the inner lines and the coupled charge, and that tensor indices can be permuted freely without any strange side effects.
-In the following we will consider examples with fermionic and even anyonic exchange
-statistics, and non-Abelian fusion rules. In going through these examples it will become
-clear that the fusion trees labeling the subblocks of a symmetric tensor imply more information
-than just a labeling.
+In the following we will consider examples with fermionic and even anyonic exchange statistics, and non-Abelian fusion rules.
+In going through these examples it will become clear that the fusion trees labeling the subblocks of a symmetric tensor imply more information than just a labeling.
### Fermion parity symmetry
-As a simple example we will consider the Kitaev chain, which describes a chain of
-interacting spinless fermions with nearest-neighbor hopping and pairing terms. The
-Hamiltonian of this model is given by
+As a simple example we will consider the Kitaev chain, which describes a chain of interacting spinless fermions with nearest-neighbor hopping and pairing terms.
+The Hamiltonian of this model is given by
```math
\begin{equation}
\label{eq:kitaev}
H = \sum_{\langle i,j \rangle} \left(-\frac{t}{2}(c_i^+ c_j^- - c_i^- c_j^+) + \frac{\Delta}{2}(c_i^+ c_j^+ - c_i^- c_j^-) \right) - \mu \sum_{i} N_i
\end{equation}
```
-where $N_i = c_i^+ c_i^-$ is the local particle number operator. As opposed to the previous
-case, the fermionic creation and annihilation operators now satisfy the anticommutation
-relations
+where ``N_i = c_i^+ c_i^-`` is the local particle number operator.
+As opposed to the previous case, the fermionic creation and annihilation operators now satisfy the anticommutation relations
```math
\begin{align*}
\left\{c_i^-, c_j^-\right\} &= \left\{c_i^+, c_j^+\right\} = 0 \\
\left\{c_i^-, c_j^+\right\} &= \delta_{ij} .\\
\end{align*}
```
-These relations justify the choice of the relative minus sign in the hopping and pairing
-terms. Indeed, since fermionic operators on different sites always anticommute, these
-relative minus signs are needed to ensure that the Hamiltonian is Hermitian, since $\left(
-c_i^+ c_j^- \right)^\dagger = c_j^+ c_i^- = - c_i^- c_j^+$ and $\left( c_i^+ c_j^+
-\right)^\dagger = c_j^- c_i^- = - c_i^- c_j^-$. The anticommutation relations also naturally
-restrict the local occupation number to be 0 or 1, leading to a well-defined notion of
-*fermion-parity*. The local fermion-parity operator is related to the fermion number
-operator as $Q_i = (-1)^{N_i}$, and is diagonal in the occupation number basis. The
-Hamiltonian \eqref{eq:kitaev} is invariant under conjugation by the global fermion-parity
-operator, $Q H Q^\dagger = H$, where
+These relations justify the choice of the relative minus sign in the hopping and pairing terms.
+Indeed, since fermionic operators on different sites always anticommute, these relative minus signs are needed to ensure that the Hamiltonian is Hermitian, since ``\left( c_i^+ c_j^- \right)^\dagger = c_j^+ c_i^- = - c_i^- c_j^+`` and ``\left( c_i^+ c_j^+ \right)^\dagger = c_j^- c_i^- = - c_i^- c_j^-``.
+The anticommutation relations also naturally restrict the local occupation number to be 0 or 1, leading to a well-defined notion of *fermion-parity*.
+The local fermion-parity operator is related to the fermion number operator as ``Q_i = (-1)^{N_i}``, and is diagonal in the occupation number basis.
+The Hamiltonian \eqref{eq:kitaev} is invariant under conjugation by the global fermion-parity operator, ``Q H Q^\dagger = H``, where
```math
Q = \exp \left( i \pi \sum_i N_i \right) = (-1)^{\sum_i N_i}.
```
-This fermion parity symmetry, which we will denote as $f\mathbb{Z}_2$, is a
-$\mathbb{Z}_2$-like symmetry in the sense that it has a trivial representation, which we
-call *even* and again denote by '0', and a sign representation which we call *odd* and
-denote by '1'. The fusion rules of these irreps are the same as for $\mathbb{Z}_2$. Similar
-to the previous case, the local symmetry operator $Q_i$ is already diagonal, so the
-occupation number basis coincides with the irrep basis and we don't need an additional basis
-transform. The important difference with a regular $\mathbb{Z}_2$ symmetry is that the
-irreps of $f\mathbb{Z}_2$ have fermionic braiding statistics, in the sense that exchanging
-two odd irreps gives rise to a minus sign.
-
-In TensorKit.jl, an $f\mathbb{Z}_2$-graded vector spaces is represented as a
-`Vect[FermionParity]` space, where a given $f\mathbb{Z}_2$ irrep can be represented as a
-[`FermionParity`](@ref FermionParity)
-sector instance. Using the simplest instance of a vector space containing a single even and
-odd irrep, we can already demonstrate the corresponding fermionic braiding behavior by
-[performing a permutation](@ref TensorKit.permute)
-on a simple `TensorMap`.
+This fermion parity symmetry, which we will denote as ``f\mathbb{Z}_2``, is a ``\mathbb{Z}_2``-like symmetry in the sense that it has a trivial representation, which we call *even* and again denote by '0', and a sign representation which we call *odd* and denote by '1'.
+The fusion rules of these irreps are the same as for ``\mathbb{Z}_2``.
+Similar to the previous case, the local symmetry operator ``Q_i`` is already diagonal, so the occupation number basis coincides with the irrep basis and we don't need an additional basis transform.
+The important difference with a regular ``\mathbb{Z}_2`` symmetry is that the irreps of ``f\mathbb{Z}_2`` have fermionic braiding statistics, in the sense that exchanging two odd irreps gives rise to a minus sign.
+
+In TensorKit.jl, an ``f\mathbb{Z}_2``-graded vector spaces is represented as a `Vect[FermionParity]` space, where a given ``f\mathbb{Z}_2`` irrep can be represented as a [`FermionParity`](@ref FermionParity) sector instance.
+Using the simplest instance of a vector space containing a single even and odd irrep, we can already demonstrate the corresponding fermionic braiding behavior by [performing a permutation](@ref TensorKit.permute) on a simple `TensorMap`.
```@example symmetric_tutorial
V = Vect[FermionParity](0 => 1, 1 => 1)
@@ -675,20 +515,15 @@ subblocks(t)
tp = permute(t, ((1,), (3, 2)))
subblocks(tp)
```
-In other words, when exchanging the two domain vector spaces, the reduced tensor elements of
-the `TensorMap` for which both uncoupled irreps in the domain of the corresponding fusion
-tree are odd picks up a minus sign, exactly as we would expect for fermionic charges.
+In other words, when exchanging the two domain vector spaces, the reduced tensor elements of the `TensorMap` for which both uncoupled irreps in the domain of the corresponding fusion tree are odd picks up a minus sign, exactly as we would expect for fermionic charges.
### Constructing the Hamiltonian
-We can directly construct the Hamiltonian terms as symmetric `TensorMap`s using the same
-procedure as before starting from their matrix elements in the occupation number basis.
-However, in this case we should be a bit more careful about the precise definition of the
-basis states in composite systems. Indeed, the tensor product structure of fermionic systems
-is inherently tricky to deal with, and should ideally be treated in the context of
-[*super vector spaces*](https://en.wikipedia.org/wiki/Super_vector_space). For two sites, we
-can define the following basis states on top of the fermionic vacuuum $\ket{00}$:
+We can directly construct the Hamiltonian terms as symmetric `TensorMap`s using the same procedure as before starting from their matrix elements in the occupation number basis.
+However, in this case we should be a bit more careful about the precise definition of the basis states in composite systems.
+Indeed, the tensor product structure of fermionic systems is inherently tricky to deal with, and should ideally be treated in the context of [*super vector spaces*](https://en.wikipedia.org/wiki/Super_vector_space).
+For two sites, we can define the following basis states on top of the fermionic vacuuum ``\ket{00}``:
```math
\begin{align*}
\ket{01} &= c_2^+ \ket{00}, \\
@@ -696,8 +531,7 @@ can define the following basis states on top of the fermionic vacuuum $\ket{00}$
\ket{11} &= c_1^+ c_2^+ \ket{00}. \\
\end{align*}
```
-This definition in combination with the anticommutation relations above give rise to the
-nonzero matrix elements
+This definition in combination with the anticommutation relations above give rise to the nonzero matrix elements
```math
\begin{align*}
c_1^+ c_2^- \ket{0, 1} &= \ket{1, 0}, \\
@@ -707,25 +541,21 @@ c_1^- c_2^- \ket{1, 1} &= - \ket{0, 0}, \\
N \ket{n} &= n \ket{n}.
\end{align*}
```
-While the signs in these expressions may seem a little unintuitive at first sight, they are
-essential to the fermionic nature of the system. Indeed, if we for example work out the
-matrix element of $c_1^- c_2^+$ we find
+While the signs in these expressions may seem a little unintuitive at first sight, they are essential to the fermionic nature of the system.
+Indeed, if we for example work out the matrix element of ``c_1^- c_2^+`` we find
```math
\begin{align*}
c_1^- c_2^+ \ket{1, 0} = c_1^- c_2^+ c_1^+ \ket{0, 0} = - c_2^+ c_1^- c_1^+ \ket{0, 0} = - c_2^+ (\mathbb{1} - c_1^+ c_1^-) \ket{0, 0} = - c_2^+ \ket{0, 0} = - \ket{0, 1}. \\
\end{align*}
```
-Once we have these matrix elements the hard part is done, and we can straightforwardly
-associate these to the following $f\mathbb{Z}_2$ fusion trees with corresponding reduced
-tensor elements,
+Once we have these matrix elements the hard part is done, and we can straightforwardly associate these to the following ``f\mathbb{Z}_2`` fusion trees with corresponding reduced tensor elements,
```@raw html
```
-Given this information, we can go through the same procedure again to construct $c^+ c^-$,
-$c^- c^+$ and $N$ operators as `TensorMap`s over $f\mathbb{Z}_2$-graded vector spaces.
+Given this information, we can go through the same procedure again to construct ``c^+ c^-``, ``c^- c^+`` and ``N`` operators as `TensorMap`s over ``f\mathbb{Z}_2``-graded vector spaces.
```@example symmetric_tutorial
V = Vect[FermionParity](0 => 1, 1 => 1)
@@ -784,150 +614,90 @@ subblocks(N)
We can easily all the reduced tensor elements are indeed correct.
!!! note
- Working with fermionic systems is inherently tricky, as can already be seen from something
- as simple as computing matrix elements of fermionic operators. Similarly, while constructing
- symmetric tensors that correspond to the symmetric Hamiltonian terms was still quite
- straightforward, it is far less clear in this case how to construct these terms as
- contractions of local symmetric tensors representing individual creation and annihilation
- operators. While such a decomposition can always be in principle obtained using
- a (now explicitly fermionic) SVD, manually constructing such tensors as we did in the
- bosonic case is far from trivial. Trying this would be a good exercise in working with
- fermionic symmetries, but it is not something we will do here.
+ Working with fermionic systems is inherently tricky, as can already be seen from something as simple as computing matrix elements of fermionic operators.
+ Similarly, while constructing symmetric tensors that correspond to the symmetric Hamiltonian terms was still quite straightforward, it is far less clear in this case how to construct these terms as contractions of local symmetric tensors representing individual creation and annihilation operators.
+ While such a decomposition can always be in principle obtained using a (now explicitly fermionic) SVD, manually constructing such tensors as we did in the bosonic case is far from trivial.
+ Trying this would be a good exercise in working with fermionic symmetries, but it is not something we will do here.
## [Level 4: Non-Abelian symmetries and the quantum Heisenberg model](@id ss_non_abelian)
-We will now move on to systems which have more complicated *non-Abelian* symmetries. For a
-non-Abelian symmetry group $G$, the fact that its elements do not all commute has a profound
-impact on its representation theory. In particular, the irreps of such a group can be higher
-dimensional, and the fusion of two irreps can give rise to multiple different irreps. On the
-one hand, this means that fusion trees of these irreps are no longer completely determined by
-the uncoupled charges. Indeed, in this case some of the [internal structure of the
-`FusionTree` type](@ref sss_fusion_trees) we have ignored before will become relevant (of
-which we will give an [example below](@ref sss_sun_heisenberg)). On the other hand, it
-follows that fusion trees of irreps now not only label reduced tensor elements, but also
-encode a certain *nontrivial symmetry structure*. We will make this statement more precise
-in the following, but the fact that this is necessary is quite intuitive. If we recall our
-original statement that symmetric tensors consist of subblocks associated to fusion trees which
-carry irrep labels, then for higher-dimensional irreps the corresponding fusion trees must
-encode some additional information that implicitly takes into account the internal structure
-of the representation spaces. In particular, this means that the conversion of an operator,
-given its matrix elements in the irrep basis, to the subblocks of the corresponding symmetric
-`TensorMap` is less straightforward since it requires an understanding of exactly what this
-implied internal structure is. Therefore, we require some more discussion before we can
-actually move on to an example.
-
-We'll start by discussing the general structure of a `TensorMap` which is symmetric under a
-non-Abelian group symmetry. We then given an example based on $\mathrm{SU}(2)$, where we
-construct the Heisenberg Hamiltonian using two different approaches. Finally, we show how
-the more intuitive approach can be used to obtain an elegant generalization to the
-$\mathrm{SU}(N)$-symmetric case.
+We will now move on to systems which have more complicated *non-Abelian* symmetries.
+For a non-Abelian symmetry group ``G``, the fact that its elements do not all commute has a profound impact on its representation theory.
+In particular, the irreps of such a group can be higher dimensional, and the fusion of two irreps can give rise to multiple different irreps.
+On the one hand, this means that fusion trees of these irreps are no longer completely determined by the uncoupled charges.
+Indeed, in this case some of the [internal structure of the `FusionTree` type](@ref sss_fusion_trees) we have ignored before will become relevant (of which we will give an [example below](@ref sss_sun_heisenberg)).
+On the other hand, it follows that fusion trees of irreps now not only label reduced tensor elements, but also encode a certain *nontrivial symmetry structure*.
+We will make this statement more precise in the following, but the fact that this is necessary is quite intuitive.
+If we recall our original statement that symmetric tensors consist of subblocks associated to fusion trees which carry irrep labels, then for higher-dimensional irreps the corresponding fusion trees must encode some additional information that implicitly takes into account the internal structure of the representation spaces.
+In particular, this means that the conversion of an operator, given its matrix elements in the irrep basis, to the subblocks of the corresponding symmetric `TensorMap` is less straightforward since it requires an understanding of exactly what this implied internal structure is.
+Therefore, we require some more discussion before we can actually move on to an example.
+
+We'll start by discussing the general structure of a `TensorMap` which is symmetric under a non-Abelian group symmetry.
+We then given an example based on ``\mathsf{SU}_2``, where we construct the Heisenberg Hamiltonian using two different approaches.
+Finally, we show how the more intuitive approach can be used to obtain an elegant generalization to the ``\mathsf{SU}_N``-symmetric case.
### Block sparsity revisited: the Wigner-Eckart theorem
-Let us recall some basics of representation theory first. Consider a group $G$ and a
-corresponding representation space $V$, such that every element $g \in G$ can be realized as
-a unitary operator $U_g : V \to V$. Let $h$ be a `TensorMap` whose domain and codomain are
-given by the tensor product of two of these representation spaces. By definition, the
-statement that '$h$ is symmetric under $G$' means that
+Let us recall some basics of representation theory first. Consider a group ``G`` and a corresponding representation space ``V``, such that every element ``g \in G`` can be realized as a unitary operator ``U_g : V \to V``.
+Let ``h`` be a `TensorMap` whose domain and codomain are given by the tensor product of two of these representation spaces.
+By definition, the statement that '``h`` is symmetric under ``G``' means that
```@raw html
```
-for every $g \in G$. If we label the irreducible representations of $G$ by $l$, then any
-representation space can be decomposed into a direct sum of irreducible representations, $V
-= \bigoplus_l V^{(l)}$, in such a way that $U_g$ is block-diagonal where each matrix block
-is labeled by a particular irrep $l$. For each irrep space $V^{(l)}$ we can define an
-orthonormal basis labeled as $\ket{l, m}$, where the auxiliary label $m$ can take
-$\text{dim}\left( V^{(l)} \right)$ different values. Since we know that tensors are
-multilinear maps over tensor product spaces, it is natural to consider the tensor product of
-representation spaces in more detail.
-
-[From the representation theory of groups](https://en.wikipedia.org/wiki/Tensor_product_of_representations#Clebsch%E2%80%93Gordan_theory),
-it is known that the product of two irreps can in turn be decomposed into a direct sum of
-irreps, $V^{(l_1)} \otimes V^{(l_2)} \cong \bigoplus_{k} V^{(k)}$. The precise nature of
-this decomposition, also refered to as the *Clebsch-Gordan problem*, is given by the
-so-called *Clebsch-Gordan coefficients*, which we will denote as $C^{k}_{l_1,l_2}$. This set
-of coefficients, which can be interpreted as a $\text{dim}\left( V^{(l_1)} \right) \times
-\text{dim}\left( V^{(l_2)} \right) \times \text{dim}\left( V^{(k)} \right)$ array,
-encodes how a basis state $\ket{k,n} \in V^{(k)}$ corresponding to some term in the direct
-sum can be decomposed into a linear combination of basis vectors $\ket{l_1,m_1} \otimes
-\ket{l_2,m_2}$ of the tensor product space:
+for every ``g \in G``.
+If we label the irreducible representations of ``G`` by ``l``, then any representation space can be decomposed into a direct sum of irreducible representations, ``V = \bigoplus_l V^{(l)}``, in such a way that ``U_g`` is block-diagonal where each matrix block is labeled by a particular irrep ``l``.
+For each irrep space ``V^{(l)}`` we can define an orthonormal basis labeled as ``\ket{l, m}``, where the auxiliary label ``m`` can take ``\text{dim}\left( V^{(l)} \right)`` different values.
+Since we know that tensors are multilinear maps over tensor product spaces, it is natural to consider the tensor product of representation spaces in more detail.
+
+[From the representation theory of groups](https://en.wikipedia.org/wiki/Tensor_product_of_representations#Clebsch%E2%80%93Gordan_theory), it is known that the product of two irreps can in turn be decomposed into a direct sum of irreps, ``V^{(l_1)} \otimes V^{(l_2)} \cong \bigoplus_{k} V^{(k)}``.
+The precise nature of this decomposition, also refered to as the *Clebsch-Gordan problem*, is given by the so-called *Clebsch-Gordan coefficients*, which we will denote as ``C^{k}_{l_1,l_2}``.
+This set of coefficients, which can be interpreted as a ``\text{dim}\left( V^{(l_1)} \right) \times \text{dim}\left( V^{(l_2)} \right) \times \text{dim}\left( V^{(k)} \right)`` array, encodes how a basis state ``\ket{k,n} \in V^{(k)}`` corresponding to some term in the direct sum can be decomposed into a linear combination of basis vectors ``\ket{l_1,m_1} \otimes \ket{l_2,m_2}`` of the tensor product space:
```math
\begin{equation}
\label{eq:cg_decomposition}
\ket{k,n} = \sum_{m_1, m_2} \left( C^{k}_{l_1,l_2} \right)^{n}_{m_1, m_2} \ket{l_1,m_1} \otimes \ket{l_2,m_2}.
\end{equation}
```
-These recoupling coefficients turn out to be essential to the structure of symmetric
-tensors, which can be best understood in the context of the
-[Wigner-Eckart theorem](https://en.wikipedia.org/wiki/Wigner%E2%80%93Eckart_theorem). This
-theorem implies that for any
-[`TensorMap` $h$ that is symmetric under $G$](@ref ss_symmetries), its matrix elements in the
-tensor product irrep basis are given by the product of Clebsch-Gordan coefficients which
-characterize the coupling of the basis states in the domain and codomain, and a so-called
-*reduced tensor element* which only depends on the irrep labels. Concretely, the matrix
-element $\bra{l_1,m_1} \otimes \bra{l_2,m_2} h \ket{l_3,m_3} \otimes \ket{l_4,m_4}$ is given
-by
+These recoupling coefficients turn out to be essential to the structure of symmetric tensors, which can be best understood in the context of the [Wigner-Eckart theorem](https://en.wikipedia.org/wiki/Wigner%E2%80%93Eckart_theorem).
+This theorem implies that for any [`TensorMap` ``h`` that is symmetric under ``G``](@ref ss_symmetries), its matrix elements in the tensor product irrep basis are given by the product of Clebsch-Gordan coefficients which characterize the coupling of the basis states in the domain and codomain, and a so-called *reduced tensor element* which only depends on the irrep labels.
+Concretely, the matrix element ``\bra{l_1,m_1} \otimes \bra{l_2,m_2} h \ket{l_3,m_3} \otimes \ket{l_4,m_4}`` is given by
```@raw html
```
-Here, the sum runs over all possible irreps $k$ in the fusion product $l_3 \otimes l_4$ and
-over all basis states $\ket{k,n}$ of $V^{(k)}$. The reduced tensor elements $h_{\text{red}}$
-are independent of the basis state labels and only depend on the irrep labels themselves.
+Here, the sum runs over all possible irreps ``k`` in the fusion product ``l_3 \otimes l_4`` and over all basis states ``\ket{k,n}`` of ``V^{(k)}``.
+The reduced tensor elements ``h_{\text{red}}`` are independent of the basis state labels and only depend on the irrep labels themselves.
Each reduced tensor element should be interpreted as being labeled by an irrep fusion tree,
```@raw html
```
-The fusion tree itself in turn implies the Clebsch-Gordan coefficients $C^{k}_{l_1,l_2}$ and
-conjugate coefficients ${C^{\dagger}}_{k}^{l_1,l_2}$ encode the splitting (decomposition) of
-the coupled basis state $\ket{k,n}$ to the codomain basis states $\ket{l_1,m_1} \otimes
-\ket{l_2,m_2}$ and the coupling of the domain basis states $\ket{l_3,m_3} \otimes
-\ket{l_4,m_4}$ to the coupled basis state $\ket{k,n}$ respectively.
+The fusion tree itself in turn implies the Clebsch-Gordan coefficients ``C^{k}_{l_1,l_2}`` and conjugate coefficients ``{C^{\dagger}}_{k}^{l_1,l_2}`` encode the splitting (decomposition) of the coupled basis state ``\ket{k,n}`` to the codomain basis states ``\ket{l_1,m_1} \otimes \ket{l_2,m_2}`` and the coupling of the domain basis states ``\ket{l_3,m_3} \otimes \ket{l_4,m_4}`` to the coupled basis state ``\ket{k,n}`` respectively.
-The Wigner-Eckart theorem dictates that this structure in terms of Clebsch-Gordan
-coefficients is necessary to ensure that the corresponding tensor is symmetric. It is
-precisely this structure that is inherently encoded into the fusion tree part of a symmetric
-`TensorMap`. In particular, **the subblock value associated to each fusion tree in a
-symmetric tensor is precisely the reduced tensor element in the Clebsch-Gordan
-decomposition**.
+The Wigner-Eckart theorem dictates that this structure in terms of Clebsch-Gordan coefficients is necessary to ensure that the corresponding tensor is symmetric.
+It is precisely this structure that is inherently encoded into the fusion tree part of a symmetric `TensorMap`.
+In particular, **the subblock value associated to each fusion tree in a symmetric tensor is precisely the reduced tensor element in the Clebsch-Gordan decomposition**.
!!! note
- In the Clebsch-Gordan decomposition given above, our notation has actually silently
- assumed that each irrep $k$ only occurs once in the fusion product of the uncoupled
- irreps $l_1$ and $l_2$. However, there exist symmetries which have **fusion multiplicities**,
- where two irreps can fuse to a given coupled irrep in multiple *distinct* ways. In
- TensorKit.jl, these correspond to `Sector` types with a `GenericFusion <: FusionStyle`
- fusion style. In the presence of fusion multiplicities, the Clebsch-Gordan coefficients
- actually have an additional index which labels the particular fusion channel according
- to which $l_1$ and $l_2$ fuse to $k$. Since the fusion of $\mathrm{SU}(2)$ irreps is
- multiplicity-free, we could safely ignore this nuance here. We will encounter the
- implication of fusion multiplicities shortly, and will consider an example of a symmetry
- which has these multiplicities below.
-
-As a small demonstration of this fact, we can make a simple $\mathrm{SU}(2)$-symmetric
-tensor with trivial subblock values and verify that its implied symmetry structure exactly
-corresponds to the expected Clebsch-Gordan coefficient. First, we [recall](su2_irreps) that
-the irreps of $\mathrm{SU}(2)$ can be labeled by a halfinteger *spin* that takes values $l =
-0, \frac{1}{2}, 1, \frac{3}{2}, ...$, and where the dimension of the spin-$l$ representation
-is equal to $2l + 1$. The fusion rules of $\mathrm{SU}(2)$ are given by
-```math
+ In the Clebsch-Gordan decomposition given above, our notation has actually silently assumed that each irrep ``k`` only occurs once in the fusion product of the uncoupled irreps ``l_1`` and ``l_2``.
+ However, there exist symmetries which have **fusion multiplicities**, where two irreps can fuse to a given coupled irrep in multiple *distinct* ways.
+ In TensorKit.jl, these correspond to `Sector` types with a `GenericFusion <: FusionStyle` fusion style.
+ In the presence of fusion multiplicities, the Clebsch-Gordan coefficients actually have an additional index which labels the particular fusion channel according to which ``l_1`` and ``l_2`` fuse to ``k``.
+ Since the fusion of ``\mathsf{SU}_2`` irreps is multiplicity-free, we could safely ignore this nuance here.
+ We will encounter the implication of fusion multiplicities shortly, and will consider an example of a symmetry which has these multiplicities below.
+
+As a small demonstration of this fact, we can make a simple ``\mathsf{SU}_2``-symmetric tensor with trivial subblock values and verify that its implied symmetry structure exactly corresponds to the expected Clebsch-Gordan coefficient.
+First, we [recall](su2_irreps) that the irreps of ``\mathsf{SU}_2`` can be labeled by a halfinteger *spin* that takes values ``l = 0, \frac{1}{2}, 1, \frac{3}{2}, ...``, and where the dimension of the spin-``l`` representation is equal to ``2l + 1``.
+The fusion rules of ``\mathsf{SU}_2`` are given by
+ ```math
\begin{equation}
\label{eq:su2_fusion_rules}
l_1 \otimes l_2 \cong \bigoplus_{k=|l_1-l_2|}^{l_1+l_2}k.
\end{equation}
```
-These are clearly non-Abelian since multiple terms appear on the right hand side, for
-example $\frac{1}{2} \otimes \frac{1}{2} \cong 0 \oplus 1$. In TensorKit.jl, a
-$\mathrm{SU}(2)$-graded vector space is represented as an
-[`SU2Space`](@ref),
-where a given $\mathrm{SU}(2)$ irrep can be represented as an
-[`SU2Irrep`](@ref)
-instance of integer or halfinteger spin as encoded in its `j` field. If we construct a
-`TensorMap` whose symmetry structure corresponds to the coupling of two spin-$\frac{1}{2}$
-irreps to a spin-$1$ irrep in the sense of \eqref{eq:cg_decomposition}, we can then convert
-it to a plain array and compare it to the $\mathrm{SU}(2)$ Clebsch-Gordan coefficients
-implemented in the [WignerSymbols.jl package](https://github.com/Jutho/WignerSymbols.jl).
+These are clearly non-Abelian since multiple terms appear on the right hand side, for example ``\frac{1}{2} \otimes \frac{1}{2} \cong 0 \oplus 1``.
+In TensorKit.jl, a ``\mathsf{SU}_2``-graded vector space is represented as an [`SU2Space`](@ref), where a given ``\mathsf{SU}_2`` irrep can be represented as an [`SU2Irrep`](@ref) instance of integer or halfinteger spin as encoded in its `j` field.
+If we construct a `TensorMap` whose symmetry structure corresponds to the coupling of two spin-``\frac{1}{2}`` irreps to a spin-``1`` irrep in the sense of \eqref{eq:cg_decomposition}, we can then convert it to a plain array and compare it to the ``\mathsf{SU}_2`` Clebsch-Gordan coefficients implemented in the [WignerSymbols.jl package](https://github.com/Jutho/WignerSymbols.jl).
```@example symmetric_tutorial
V1 = SU2Space(1//2 => 1)
V2 = SU2Space(1 => 1)
@@ -937,13 +707,9 @@ t = ones(ComplexF64, V1 ⊗ V1 ← V2)
```@example symmetric_tutorial
ta = convert(Array, t)
```
-The conversion gives us a $2 \times 2 \times 3$ array, which exactly corresponds to the size
-of the $C^{1}_{\frac{1}{2},\frac{1}{2}}$ Clebsch-Gordan array. In order to explicitly
-compare whether the entries match we need to know the ordering of basis states assumed by
-TensorKit.jl when converting the tensor to its matrix elements in the irrep basis. For
-$\mathrm{SU}(2)$ the irrep basis is ordered in ascending magnetic quantum number $m$, which
-gives us a map $m = i - (l+1)$ for mapping an array index to a corresponding magnetic
-quantum number for the spin-$l$ irrep.
+The conversion gives us a ``2 \times 2 \times 3`` array, which exactly corresponds to the size of the ``C^{1}_{\frac{1}{2},\frac{1}{2}}`` Clebsch-Gordan array.
+In order to explicitly compare whether the entries match we need to know the ordering of basis states assumed by TensorKit.jl when converting the tensor to its matrix elements in the irrep basis.
+For ``\mathsf{SU}_2`` the irrep basis is ordered in ascending magnetic quantum number ``m``, which gives us a map ``m = i - (l+1)`` for mapping an array index to a corresponding magnetic quantum number for the spin-``l`` irrep.
```@example symmetric_tutorial
checks = map(Iterators.product(1:dim(V1), 1:dim(V1), 1:dim(V2))) do (i1, i2, i3)
# map basis state index to magnetic quantum number
@@ -956,70 +722,46 @@ end
@test all(checks)
```
-Based on this discussion, we can quantify the aforementioned 'difficulties' in the inverse
-operation of what we just demonstrated, namely converting a given operator to a symmetric
-`TensorMap` given only its matrix elements in the irrep basis. Indeed, it is now clear that
-this precisely requires isolating the reduced tensor elements introduced above. Given the
-matrix elements of the operator in the irrep basis, this can in general be done by solving
-the system of equations implied by the [Clebsch-Gordan decomposition](wignereckart). A
-simpler way to achieve the same thing is to make use of the fact that the
-[Clebsch-Gordan tensors form a complete orthonormal basis](https://en.wikipedia.org/wiki/Clebsch%E2%80%93Gordan_coefficients#Orthogonality_relations)
-on the coupled space. Indeed, by projecting out the appropriate Clebsch-Gordan coefficients
-and using their orthogonality relations, we can construct a diagonal operator on each
-coupled irrep space $V^{(k)}$. Each of these diagonal operators is proportional to the
-identity, where the proportionality factor is precisely the reduced tensor element
-associated to the corresponding irrep fusion tree.
+Based on this discussion, we can quantify the aforementioned 'difficulties' in the inverse operation of what we just demonstrated, namely converting a given operator to a symmetric `TensorMap` given only its matrix elements in the irrep basis.
+Indeed, it is now clear that this precisely requires isolating the reduced tensor elements introduced above.
+Given the matrix elements of the operator in the irrep basis, this can in general be done by solving the system of equations implied by the [Clebsch-Gordan decomposition](wignereckart).
+A simpler way to achieve the same thing is to make use of the fact that the [Clebsch-Gordan tensors form a complete orthonormal basis](https://en.wikipedia.org/wiki/Clebsch%E2%80%93Gordan_coefficients#Orthogonality_relations) on the coupled space.
+Indeed, by projecting out the appropriate Clebsch-Gordan coefficients and using their orthogonality relations, we can construct a diagonal operator on each coupled irrep space ``V^{(k)}``.
+Each of these diagonal operators is proportional to the identity, where the proportionality factor is precisely the reduced tensor element associated to the corresponding irrep fusion tree.
```@raw html
```
-This procedure works for any group symmetry, and all we need are matrix elements of the
-operator in the irrep basis and the Clebsch-Gordan coefficients. In the following, we
-demonstrate this explicit procedure for the particular example of $G = \mathrm{SU}(2)$.
-However, it should be noted that, for other non-Abelian groups, the Clebsch-Gordan coefficients may not
-be as easy to compute (generically, no closed formulas exist). In addition, the procedure for
-manually projecting out the reduced tensor elements requires being particularly careful
-about the correspondence between the basis states used to define the original matrix
-elements and those implied by the Clebsch-Gordan coefficients. Finally, for some symmetries
-supported in TensorKit.jl, there are simply no Clebsch-Gordan coefficients. Therefore, it is
-often easier and sometimes simply necessary to directly construct the symmetric tensor and
-then fill in its reduced tensor elements based on some representation theory. We will cover
-some examples of this below.
-
-Having introduced and demonstrated the Clebsch-Gordan decomposition, the corresponding
-coefficients and their role in symmetric tensors for the example of $\mathrm{SU}(2)$ using
-the WignerSymbols.jl package, we now continue our discussion using only TensorKit.jl
-internals. Within TensorKit.jl, the
-$\text{dim}\left( V^{(l_1)} \right) \times \text{dim}\left( V^{(l_2)} \right) \times \text{dim}\left( V^{(k)} \right)$
-array of coefficients that encodes the splitting of the irrep space $V^{(k)}$ to the tensor
-product of irrep spaces $V^{(l_1)} \otimes V^{(l_2)}$ according to the Clebsch-Gordan
-decomposition \eqref{eq:cg_decomposition} above can be explicitly constructed by calling the
-[`TensorKitSectors.fusiontensor`](@ref) method on the corresponding `Sector` instances,
-`fusiontensor(l₁, l₂, k)`. This `fusiontensor` is defined for any sector type corresponding
-to a symmetry which admits Clebsch-Gordan coefficients. For our example above,
-we can build the corresponding fusion tensor as
+This procedure works for any group symmetry, and all we need are matrix elements of the operator in the irrep basis and the Clebsch-Gordan coefficients.
+In the following, we demonstrate this explicit procedure for the particular example of ``G = \mathsf{SU}_2``.
+However, it should be noted that, for other non-Abelian groups, the Clebsch-Gordan coefficients may not be as easy to compute (generically, no closed formulas exist).
+In addition, the procedure for manually projecting out the reduced tensor elements requires being particularly careful about the correspondence between the basis states used to define the original matrix elements and those implied by the Clebsch-Gordan coefficients.
+Finally, for some symmetries supported in TensorKit.
+l, there are simply no Clebsch-Gordan coefficients.
+Therefore, it is often easier and sometimes simply necessary to directly construct the symmetric tensor and then fill in its reduced tensor elements based on some representation theory.
+We will cover some examples of this below.
+
+Having introduced and demonstrated the Clebsch-Gordan decomposition, the corresponding coefficients and their role in symmetric tensors for the example of ``\mathsf{SU}_2`` using the WignerSymbols.jl package, we now continue our discussion using only TensorKit.jl internals.
+Within TensorKit.jl, the ``\text{dim}\left( V^{(l_1)} \right) \times \text{dim}\left( V^{(l_2)} \right) \times \text{dim}\left( V^{(k)} \right)`` array of coefficients that encodes the splitting of the irrep space ``V^{(k)}`` to the tensor product of irrep spaces ``V^{(l_1)} \otimes V^{(l_2)}`` according to the Clebsch-Gordan decomposition \eqref{eq:cg_decomposition} above can be explicitly constructed by calling the [`TensorKitSectors.fusiontensor`](@ref) method on the corresponding `Sector` instances, `fusiontensor(l₁, l₂, k)`.
+This `fusiontensor` is defined for any sector type corresponding to a symmetry which admits Clebsch-Gordan coefficients.
+For our example above, we can build the corresponding fusion tensor as
```@example symmetric_tutorial
using TensorKit: fusiontensor
f = fusiontensor(SU2Irrep(1//2), SU2Irrep(1//2), SU2Irrep(1))
```
-We see that this fusion tensor has a size `2×2×3×1`, which contains an additional trailing
-`1` to what we might expect. In the general case, `fusiontensor` returns a 4-dimensional
-array, where the size of the first three dimensions corresponds to the dimensions of the
-irrep spaces under consideration, and the last index lables the different fusion channels,
-where its dimension corresponds to the number of distinct ways the irreps $l_1$ and $l_2$
-can fuse to irrep $k$. This is precicely the extra label of the Clebsch-Gordan coefficients
-that is required in the the presence of fusion multiplicities. Since $\mathrm{SU}(2)$ is
-multiplicity-free, we can just discard this last index here.
+We see that this fusion tensor has a size `2×2×3×1`, which contains an additional trailing `1` to what we might expect.
+In the general case, `fusiontensor` returns a four-dimensional array, where the size of the first three dimensions corresponds to the dimensions of the irrep spaces under consideration, and the last index lables the different fusion channels, where its dimension corresponds to the number of distinct ways the irreps ``l_1`` and ``l_2`` can fuse to irrep ``k``.
+This is precicely the extra label of the Clebsch-Gordan coefficients that is required in the the presence of fusion multiplicities.
+Since ``\mathsf{SU}_2`` is multiplicity-free, we can just discard this last index here.
We can now explicitly verify that this `fusiontensor` indeed does what we expect it to do:
```@example symmetric_tutorial
@test ta ≈ f[:, :, :, 1]
```
-Of course, in this case `fusiontensor` just calls `Wignersymbols.clebschgordan` under the
-hood. However, `TensorKitSectors.fusiontensor` works for general symmetries, and makes it
-so that we never have to manually assemble the coefficients into an array.
+Of course, in this case `fusiontensor` just calls `Wignersymbols.clebschgordan` under the hood.
+However, `TensorKitSectors.fusiontensor` works for general symmetries, and makes it so that we never have to manually assemble the coefficients into an array.
### The 'generic' approach to the spin-1 Heisenberg model: Wigner-Eckart in action
@@ -1028,17 +770,13 @@ Consider the spin-1 Heisenberg model with Hamiltonian
```math
H = J \sum_{\langle i,j \rangle} \vec{S}_i \cdot \vec{S}_j
```
-where $\vec{S} = (S^x, S^y, S^z)$ are the spin operators. The physical Hilbert space at each
-site is the three-dimensional spin-1 irrep of $\mathrm{SU}(2)$. Each two-site exchange
-operator $\vec{S}_i \cdot \vec{S}_j$ in the sum commutes with a global transformation $g \in
-\mathrm{SU}(2)$, so that it satisfies the [above symmetry condition](symmetric_tensor).
-Therefore, we can represent it as an $\mathrm{SU}(2)$-symmetric `TensorMap`, as long as we
-can isolate its reduced tensor elements.
-
-In order to apply the above procedure, we first require the matrix elements in the irrep
-basis. These can be constructed as a $3 \times 3 \times 3 \times 3$ array `SS` using the
-[familiar representation of the $\mathrm{SU}(2)$ generators in the spin-1 representation](https://en.wikipedia.org/wiki/Spin_(physics)#Higher_spins),
-with respect to the $\{\ket{1,-1}, \ket{1,0}, \ket{1,1}\}$ basis.
+where ``\vec{S} = (S^x, S^y, S^z)`` are the spin operators.
+The physical Hilbert space at each site is the three-dimensional spin-``1`` irrep of ``\mathsf{SU}_2``.
+Each two-site exchange operator ``\vec{S}_i \cdot \vec{S}_j`` in the sum commutes with a global transformation ``g \in \mathsf{SU}_2``, so that it satisfies the [above symmetry condition](symmetric_tensor).
+Therefore, we can represent it as an ``\mathsf{SU}_2``-symmetric `TensorMap`, as long as we can isolate its reduced tensor elements.
+
+In order to apply the above procedure, we first require the matrix elements in the irrep basis.
+These can be constructed as a ``3 \times 3 \times 3 \times 3`` array `SS` using the [familiar representation of the ``\mathsf{SU}_2`` generators in the spin-1 representation](https://en.wikipedia.org/wiki/Spin_(physics)#Higher_spins), with respect to the ``\{\ket{1,-1}, \ket{1,0}, \ket{1,1}\}`` basis.
```@example symmetric_tutorial
Sx = 1 / sqrt(2) * ComplexF64[0 1 0; 1 0 1; 0 1 0]
Sy = 1 / sqrt(2) * ComplexF64[0 1im 0; -1im 0 1im; 0 -1im 0]
@@ -1048,11 +786,9 @@ Sz = ComplexF64[-1 0 0; 0 0 0; 0 0 1]
nothing #hide
```
-The next step is to project out the reduced tensor elements by taking the overlap with the
-appropriate Clebsch-Gordan coefficients. In our current case of a spin-1 physical space, we
-have $l_1 = l_2 = l_3 = l_4 = 1$, and the coupled irrep $k$ can therefore take the values
-$0, 1, 2$. The reduced tensor element for a given $k$ can be implemented in the
-following way:
+The next step is to project out the reduced tensor elements by taking the overlap with the appropriate Clebsch-Gordan coefficients.
+In our current case of a spin-``1`` physical space, we have ``l_1 = l_2 = l_3 = l_4 = 1``, and the coupled irrep ``k`` can therefore take the values ``0, 1, 2``.
+The reduced tensor element for a given ``k`` can be implemented in the following way:
```@example symmetric_tutorial
function get_reduced_element(k::SU2Irrep)
# construct Clebsch-Gordan coefficients for coupling 1 ⊗ 1 to k
@@ -1065,7 +801,7 @@ function get_reduced_element(k::SU2Irrep)
return reduced_matrix[1, 1]
end
```
-If we use this to compute the reduced tensor elements for $k = 0, 1, 2$,
+If we use this to compute the reduced tensor elements for ``k = 0, 1, 2``,
```@example symmetric_tutorial
get_reduced_element(SU2Irrep(0))
```
@@ -1097,8 +833,7 @@ we can read off the entries
1,1
\end{smallmatrix} = 1, \quad
```
-These can then be used to construct the symmetric `TensorMap` representing the exchange
-interaction:
+These can then be used to construct the symmetric `TensorMap` representing the exchange interaction:
```@example symmetric_tutorial
V = SU2Space(1 => 1)
SS = zeros(ComplexF64, V ⊗ V ← V ⊗ V)
@@ -1109,69 +844,48 @@ end
subblocks(SS)
```
-We demonstrated this entire procedure of extracting the reduced tensor elements of a
-symmetric tensor map for each fusion tree by projecting out the corresponding fusion tensors
-as an explicit illustration of how symmetric tensor maps work under the hood. In practice
-however, there is no need to perform this procedure explicitly. Given a dense array
-representing the matrix elements of a tensor map in the irrep basis, we can convert this to
-the corresponding symmetric tensor map by passing the data array to the `TensorMap`
-constructor along with the corresponding spaces,
+We demonstrated this entire procedure of extracting the reduced tensor elements of a symmetric tensor map for each fusion tree by projecting out the corresponding fusion tensors as an explicit illustration of how symmetric tensor maps work under the hood.
+In practice however, there is no need to perform this procedure explicitly.
+Given a dense array representing the matrix elements of a tensor map in the irrep basis, we can convert this to the corresponding symmetric tensor map by passing the data array to the `TensorMap` constructor along with the corresponding spaces,
```@example symmetric_tutorial
SS_auto = TensorMap(SS_arr, V ⊗ V ← V ⊗ V)
@test SS_auto ≈ SS
```
!!! warning
- While the example demonstrated here seems fairly straightforward, there's some inherent
- challenges to directly initializing a symmetric tensor map from a full dense array. A first
- important point to reiterate here is that in order for this procedure to work, we had to
- initialize `SS_arr` by assuming an internal basis convention for the $\mathrm{SU}(2)$
- representation space $V^{(1)}$ that is consistent with the convention used by
- `fusiontensor`. While that choice here, corresponding to an ascending magnetic quantum
- number $m = -1, 0, 1$, seems quite natural, for many symmetries there is no transparent
- natural choice. In those cases, the only way to use this approach is to explicitly check the
- basis convention used by [`TensorKitSectors.fusiontensor`](@ref) for that specific symmetry.
- On top of this, there are some additional complications when considering graded spaces which
- contain multiple sectors with non-trivial degeneracies. In that case, to even initialize the
- dense data array in the first place, you would need to know the order in which the sectors
- appear in each space internally. This information can be obtained by calling `axes(V, c)`,
- where `V` and `c` are either an [`ElementarySpace`](@ref) and a [`Sector`](@ref), or a
- [`ProductSpace`](@ref) and a `Tuple` of `Sector`s respectively.
+ While the example demonstrated here seems fairly straightforward, there's some inherent challenges to directly initializing a symmetric tensor map from a full dense array.
+ A first important point to reiterate here is that in order for this procedure to work, we had to initialize `SS_arr` by assuming an internal basis convention for the ``\mathsf{SU}_2`` representation space ``V^{(1)}`` that is consistent with the convention used by `fusiontensor`.
+ While that choice here, corresponding to an ascending magnetic quantum number ``m = -1, 0, 1``, seems quite natural, for many symmetries there is no transparent natural choice.
+ In those cases, the only way to use this approach is to explicitly check the basis convention used by [`TensorKitSectors.fusiontensor`](@ref) for that specific symmetry.
+ On top of this, there are some additional complications when considering graded spaces which contain multiple sectors with non-trivial degeneracies.
+ In that case, to even initialize the dense data array in the first place, you would need to know the order in which the sectors appear in each space internally.
+ This information can be obtained by calling `axes(V, c)`, where `V` and `c` are either an [`ElementarySpace`](@ref) and a [`Sector`](@ref), or a [`ProductSpace`](@ref) and a `Tuple` of `Sector`s respectively.
### An 'elegant' approach to the Heisenberg model
-As noted above, the explicit procedure of projecting out the reduced tensor elements from
-the action of an operator in the irrep basis can be a bit cumbersome for more complicated
-groups. However, using some basic representation theory we can bypass this step altogether
-for the Heisenberg model. First, we rewrite the exchange interaction in the following way:
+As noted above, the explicit procedure of projecting out the reduced tensor elements from the action of an operator in the irrep basis can be a bit cumbersome for more complicated groups.
+However, using some basic representation theory we can bypass this step altogether for the Heisenberg model.
+First, we rewrite the exchange interaction in the following way:
```math
\begin{equation}
\label{eq:casimir_decomp}
\vec{S}_i \cdot \vec{S}_j = \frac{1}{2} \left( \left( \vec{S}_i + \vec{S}_j \right)^2 - \vec{S}_i^2 - \vec{S}_j^2 \right)
\end{equation}
```
-Here, $\vec{S}_i$ and $\vec{S}_j$ are spin operators on the physical irrep, while total spin
-operator $\vec{S}_i + \vec{S}_j$ can be decomposed onto the different coupled irreps $k$. It
-is a well known fact that the quadratic sum of the generators of $\mathrm{SU}(2)$, often
-refered to as the
-[*quadratic Casimir*](https://en.wikipedia.org/wiki/Representation_theory_of_SU(2)#The_Casimir_element),
-commutes with all generators. By
-[Schur's lemma](https://en.wikipedia.org/wiki/Schur%27s_lemma), it must then act
-proportionally to the identity on every irrep, where the corresponding eigenvalue is
-determined by the spin irrep label. In particular, we have for each irrep $l$
+Here, ``\vec{S}_i`` and ``\vec{S}_j`` are spin operators on the physical irrep, while total spin operator ``\vec{S}_i + \vec{S}_j`` can be decomposed onto the different coupled irreps ``k``.
+It is a well known fact that the quadratic sum of the generators of ``\mathsf{SU}_2``, often refered to as the [*quadratic Casimir*](https://en.wikipedia.org/wiki/Representation_theory_of_SU(2)#The_Casimir_element), commutes with all generators.
+By [Schur's lemma](https://en.wikipedia.org/wiki/Schur%27s_lemma), it must then act proportionally to the identity on every irrep, where the corresponding eigenvalue is determined by the spin irrep label.
+In particular, we have for each irrep ``l``
```math
\vec{S}^2 \ket{l,m} = l(l+1) \ket{l,m}.
```
-It then follows from Eq. \eqref{eq:casimir_decomp} that the reduced tensor elements of the
-exchange interaction are completely determined by the eigenvalue of the quadratic Casimir on
-the uncoupled and coupled irreps. Indeed, to each fusion tree we can associate a
-well-defined value
+It then follows from Eq. \eqref{eq:casimir_decomp} that the reduced tensor elements of the exchange interaction are completely determined by the eigenvalue of the quadratic Casimir on the uncoupled and coupled irreps.
+Indeed, to each fusion tree we can associate a well-defined value
```@raw html
```
-This gives us all we need to directly construct the exchange interaction as a symmetric
-`TensorMap`,
+This gives us all we need to directly construct the exchange interaction as a symmetric `TensorMap`,
```@example symmetric_tutorial
V = SU2Space(1 => 1)
SS = zeros(ComplexF64, V ⊗ V ← V ⊗ V)
@@ -1186,82 +900,62 @@ subblocks(SS)
which gives exactly the same result as the previous approach.
!!! note
- This last construction for the exchange interaction immediatly generalizes to any value of
- the physical spin. All we need is to fill in the appropriate values for the uncoupled irreps
- $l_1$, $l_2$, $l_3$ and $l_4$.
-
-
-### [$\mathrm{SU}(N)$ generalization](@id sss_sun_heisenberg)
-
-We end this subsection with some comments on the generalization of the above discussion to
-$\mathrm{SU}(N)$. As foreshadowed above, the irreps of $\mathrm{SU}(N)$ in general have an
-even more complicated structure. In particular, they can admit so-called *fusion
-multiplicities*, where the fusion of two irreps can have not only multiple distinct
-outcomes, but they can even fuse to a given irrep in multiple inequivalent ways. We can
-demonstrate this behavior for the adjoint representation of $\mathrm{SU}(3)$. For this we
-can use the the
-[SUNRepresentations.jl](https://github.com/QuantumKitHub/SUNRepresentations.jl)
-package which provides an interface for working with irreps of $\mathrm{SU}(N)$ and their
-Clebsch-Gordan coefficients. A particular representation is represented by an `SUNIrrep{N}`
-which can be used with TensorKit.jl. The eight-dimensional adjoint representation of
-$\mathrm{SU}(3)$ is given by
+ This last construction for the exchange interaction immediately generalizes to any value of the physical spin.
+ All we need is to fill in the appropriate values for the uncoupled irreps ``l_1``, ``l_2``, ``l_3`` and ``l_4``.
+
+### [``\mathsf{SU}_N`` generalization](@id sss_sun_heisenberg)
+
+We end this subsection with some comments on the generalization of the above discussion to ``\mathsf{SU}_N``.
+As foreshadowed above, the irreps of ``\mathsf{SU}_N`` in general have an even more complicated structure.
+In particular, they can admit so-called *fusion multiplicities*, where the fusion of two irreps can have not only multiple distinct outcomes, but they can even fuse to a given irrep in multiple inequivalent ways.
+We can demonstrate this behavior for the adjoint representation of ``\mathsf{SU}_3``.
+For this we can use the the [SUNRepresentations.jl](https://github.com/QuantumKitHub/SUNRepresentations.jl) package which provides an interface for working with irreps of ``\mathsf{SU}_N`` and their Clebsch-Gordan coefficients.
+A particular representation is represented by an `SUNIrrep{N}` which can be used with TensorKit.jl.
+The eight-dimensional adjoint representation of ``\mathsf{SU}_3`` is given by
```@setup symmetric_tutorial
SUNRepresentations.display_mode("dimension")
```
```@example symmetric_tutorial
l = SU3Irrep("8")
```
-If we look at the possible outcomes of fusing two adjoint irreps, we find the by now
-familiar non-Abelian fusion behavior,
+If we look at the possible outcomes of fusing two adjoint irreps, we find the by now familiar non-Abelian fusion behavior,
```@example symmetric_tutorial
collect(l ⊗ l)
```
-However, this particular fusion has multiplicities, since the adjoint irrep can actually
-fuse to itself in two distinct ways. The full decomposition of this fusion product is given
-by
+However, this particular fusion has multiplicities, since the adjoint irrep can actually fuse to itself in two distinct ways.
+The full decomposition of this fusion product is given by
```math
\mathbf{8} \otimes \mathbf{8} = \mathbf{1} \oplus \mathbf{3} \oplus 2 \cdot \mathbf{8} \oplus \mathbf{10} \oplus \mathbf{\overline{10}} \oplus \mathbf{27}
```
-This fusion multiplicity can be detected by using
-[`Nsymbol`](@ref)
-method from TensorKit.jl to inspect the number of times `l` appears in the fusion product
-`l ⊗ l`,
+This fusion multiplicity can be detected by using [`Nsymbol`](@ref) method from TensorKit.jl to inspect the number of times `l` appears in the fusion product `l ⊗ l`,
```@example symmetric_tutorial
Nsymbol(l, l, l)
```
-When working with irreps with fusion multiplicities, each `FusionTree` carries additional
-`vertices` labels which label which of the distinct fusion vertices is being referred to. We
-will return to this at the end of this section.
+When working with irreps with fusion multiplicities, each `FusionTree` carries additional `vertices` labels which label which of the distinct fusion vertices is being referred to.
+We will return to this at the end of this section.
-Given the generators $T^k$ of $\mathrm{SU}(N)$, we can define a generalized Heisenberg model
-using a similar exchange interaction, giving the Hamiltonian
+Given the generators ``T^k`` of ``\mathsf{SU}_N``, we can define a generalized Heisenberg model using a similar exchange interaction, giving the Hamiltonian
```math
H = J \sum_{\langle i,j \rangle} \vec{T}_i \cdot \vec{T}_j
```
-For a particular choice of physical irrep, the exchange interaction can again be constructed
-as a symmetric `TensorMap` by first rewriting it as
+For a particular choice of physical irrep, the exchange interaction can again be constructed as a symmetric `TensorMap` by first rewriting it as
```math
\vec{T}_i \cdot \vec{T}_j = \frac{1}{2} \left( \left( \vec{T}_i + \vec{T}_j \right)^2 - \vec{T}_i^2 - \vec{T}_j^2 \right).
```
-For any $N$, the [quadratic Casimir](https://en.wikipedia.org/wiki/Casimir_element#Quadratic_Casimir_element)
+For any ``N``, the [quadratic Casimir](https://en.wikipedia.org/wiki/Casimir_element#Quadratic_Casimir_element)
```math
\Omega = \sum_k T^k T^k
```
-commutes with all $\mathrm{SU}(N)$ generators, meaning it has a well defined eigenvalue in
-each irrep. This observation then immediately given the reduced tensor elements of the
-exchange interaction as
+commutes with all ``\mathsf{SU}_N`` generators, meaning it has a well defined eigenvalue in each irrep.
+This observation then immediately given the reduced tensor elements of the exchange interaction as
```@raw html
```
-Using these to directly construct the corresponding symmetric `TensorMap` is much simpler
-than going through the explicit projection procedure using Clebsch-Gordan coefficients.
+Using these to directly construct the corresponding symmetric `TensorMap` is much simpler than going through the explicit projection procedure using Clebsch-Gordan coefficients.
-For the particular example of $\mathrm{SU}(3)$, the generators are given by $T^k =
-\frac{1}{2} \lambda^k$ , where $\lambda^k$ are the
-[Gell-Mann matrices](https://en.wikipedia.org/wiki/Clebsch%E2%80%93Gordan_coefficients_for_SU(3)#Generators_of_the_Lie_algebra).
-Each irrep can be labeled as $l = D(p,q)$ where $p$ and $q$ are refered to as the *Dynkin
-labels*. The eigenvalue of the quadratic Casimir for a given irrep is given by
-[Freudenthal's formula](https://en.wikipedia.org/wiki/Weyl_character_formula#Freudenthal's_formula),
+For the particular example of ``\mathsf{SU}_3``, the generators are given by ``T^k = \frac{1}{2} \lambda^k`` , where ``\lambda^k`` are the [Gell-Mann matrices](https://en.wikipedia.org/wiki/Clebsch%E2%80%93Gordan_coefficients_for_SU(3)#Generators_of_the_Lie_algebra).
+Each irrep can be labeled as ``l = D(p,q)`` where ``p`` and ``q`` are refered to as the *Dynkin labels*.
+The eigenvalue of the quadratic Casimir for a given irrep is given by [Freudenthal's formula](https://en.wikipedia.org/wiki/Weyl_character_formula#Freudenthal's_formula),
```math
\Omega(D(p,q)) = \frac{1}{3} (p^2 + q^2 + 3p + 3q + pq).
```
@@ -1272,8 +966,7 @@ function casimir(l::SU3Irrep)
return (p^2 + q^2 + 3 * p + 3 * q + p * q) / 3
end
```
-If we use the adjoint representation of $\mathrm{SU}(3)$ as physical space, the Heisenberg
-exchange interaction can then be constructed as
+If we use the adjoint representation of ``\mathsf{SU}_3`` as physical space, the Heisenberg exchange interaction can then be constructed as
```@example symmetric_tutorial
V = Vect[SUNIrrep{3}](SU3Irrep("8") => 1)
TT = zeros(ComplexF64, V ⊗ V ← V ⊗ V)
@@ -1285,8 +978,7 @@ for (s, f) in fusiontrees(TT)
end
subblocks(TT)
```
-Circling back to our earlier remark, we clearly see that the fusion trees of this tensor
-indeed have non-trivial vertex labels.
+Circling back to our earlier remark, we clearly see that the fusion trees of this tensor indeed have non-trivial vertex labels.
```@example symmetric_tutorial
f = collect(fusiontrees(TT))[4][2]
```
@@ -1295,45 +987,33 @@ f.vertices
```
!!! note
- While we have given an explicit example using $\mathrm{SU}(3)$ with the adoint irrep on the
- physical level, the same construction holds for the general $\mathrm{SU}(N)$ with arbitrary
- physical irreps. All we require is the expression for the eigenvalues of the quadratic
- Casimir in each irrep.
+ While we have given an explicit example using ``\mathsf{SU}_3`` with the adoint irrep on the physical level, the same construction holds for the general ``\mathsf{SU}_N`` with arbitrary physical irreps.
+ All we require is the expression for the eigenvalues of the quadratic Casimir in each irrep.
## Level 5: Anyonic Symmetries and the Golden Chain
-While we have focussed exclusively on group-like symmetries in our discussion so far, the
-framework of symmetric tensors actually extends beyond groups to so-called
-[*categorical symmetries*](@ref ss_representationtheory).
-These are quite exotic symmetries characterized in terms of
-[the topological data of a unitary fusion category](@ref ss_topologicalfusion).
-While the precise details of all the terms in these statements fall beyond the scope of this
-tutorial, we can give a simple example of a Hamiltonian model with a categorical symmetry
-called [the golden chain](https://arxiv.org/abs/cond-mat/0612341).
-
-This is a one-dimensional system defined as a spin chain, where each physical 'spin'
-corresponds to a so-called [Fibonacci anyon](https://arxiv.org/abs/0902.3275). There are two
-such Fibonacci anyons, which we will denote as $1$ and $\tau$. They obey the fusion rules
+While we have focussed exclusively on group-like symmetries in our discussion so far, the framework of symmetric tensors actually extends beyond groups to so-called [*categorical symmetries*](@ref ss_representationtheory).
+These are quite exotic symmetries characterized in terms of [the topological data of a unitary fusion category](@ref ss_topologicalfusion).
+While the precise details of all the terms in these statements fall beyond the scope of this tutorial, we can give a simple example of a Hamiltonian model with a categorical symmetry called [the golden chain](https://arxiv.org/abs/cond-mat/0612341).
+
+This is a one-dimensional system defined as a spin chain, where each physical 'spin' corresponds to a so-called [Fibonacci anyon](https://arxiv.org/abs/0902.3275).
+There are two such Fibonacci anyons, which we will denote as ``1`` and ``\tau``.
+They obey the fusion rules
```math
1 \otimes 1 = 1, \quad 1 \otimes \tau = \tau, \quad \tau \otimes \tau = 1 \oplus \tau.
```
-The Hilbert space of a chain of Fibonacci anyons is not a regular tensor product space, but
-rather a *constrained Hilbert space* where the only allowed basis states are labeled by
-valid Fibonacci fusion configurations. In the golden chain model, we define a
-nearest-neighbor Hamiltonian on this Hilbert space by imposing an energy penalty when two
-neighboring anyons fuse to a $\tau$ anyon.
-
-Even just writing down an explicit expression for this interaction on such a constrained
-Hilbert space is not entirely straightforward. However, using the framework of symmetric
-tensors it can actually be explicitly constructed in a very straightforward way. Indeed,
-TensorKit.jl supports a dedicated [`FibonacciAnyon`](@ref) sector type which can be used to
-construct precisely such a constrained Fibonacci-graded vector space. A Hamiltonian
+The Hilbert space of a chain of Fibonacci anyons is not a regular tensor product space, but rather a *constrained Hilbert space* where the only allowed basis states are labeled by valid Fibonacci fusion configurations.
+In the golden chain model, we define a nearest-neighbor Hamiltonian on this Hilbert space by imposing an energy penalty when two neighboring anyons fuse to a ``\tau`` anyon.
+
+Even just writing down an explicit expression for this interaction on such a constrained Hilbert space is not entirely straightforward.
+However, using the framework of symmetric tensors it can actually be explicitly constructed in a very straightforward way.
+Indeed, TensorKit.jl supports a dedicated [`FibonacciAnyon`](@ref) sector type which can be used to construct precisely such a constrained Fibonacci-graded vector space.
+A Hamiltonian
```math
H = \sum_{\langle i,j \rangle} h_{ij}
```
-which favors neighboring anyons fusing to the vacuum can be constructed as a `TensorMap` on
-the product space of two Fibonacci-graded physical spaces
+which favors neighboring anyons fusing to the vacuum can be constructed as a `TensorMap` on the product space of two Fibonacci-graded physical spaces
```@example symmetric_tutorial
V = Vect[FibonacciAnyon](:τ => 1)
```
@@ -1341,8 +1021,7 @@ and assigning the following nonzero subblock value to the two-site fusion trees
```@raw html
```
-This allows us to define this, at first sight, exotic and complicated Hamiltonian in a few
-simple lines of code,
+This allows us to define this, at first sight, exotic and complicated Hamiltonian in a few simple lines of code,
```@example symmetric_tutorial
h = ones(V ⊗ V ← V ⊗ V)
for (s, f) in fusiontrees(h)
@@ -1352,11 +1031,6 @@ subblocks(h)
```
!!! note
- In the previous section we have stressed the role of Clebsch-Gordan coefficients in
- the structure of symmetric tensors, and how they can be used to map between the
- representation of an operator in the irrep basis and its symmetric tensor representation.
- However, for categorical symmetries such as the Fibonacci anyons, there are no
- Clebsch-Gordan coefficients. Therefore, the 'matrix elements of the operator in the irrep
- basis' are not well-defined, meaning that a Fibonacci-symmetric tensor cannot actually be
- converted to a plain array in a straightforward way.
-
+ In the previous section we have stressed the role of Clebsch-Gordan coefficients in the structure of symmetric tensors, and how they can be used to map between the representation of an operator in the irrep basis and its symmetric tensor representation.
+ However, for categorical symmetries such as the Fibonacci anyons, there are no Clebsch-Gordan coefficients.
+ Therefore, the 'matrix elements of the operator in the irrep basis' are not well-defined, meaning that a Fibonacci-symmetric tensor cannot actually be converted to a plain array in a straightforward way.
diff --git a/docs/src/index.md b/docs/src/index.md
index bdd93f541..4ff58a98f 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -8,34 +8,23 @@ CurrentModule = TensorKit
## Package summary
-TensorKit.jl aims to be a generic package for working with tensors as they appear throughout
-the physical sciences. TensorKit implements a parametric type [`Tensor`](@ref) (which is
-actually a specific case of the type [`TensorMap`](@ref)) and defines for these types a
-number of vector space operations (scalar multiplication, addition, norms and inner
-products), index operations (permutations) and linear algebra operations (multiplication,
-factorizations). Finally, tensor contractions can be performed using the `@tensor` macro
-from [TensorOperations.jl](https://github.com/QuantumKitHub/TensorOperations.jl).
-
-Currently, most effort is oriented towards tensors as they appear in the context of quantum
-many-body physics and in particular the field of tensor networks. Such tensors often have
-large dimensions and take on a specific structure when symmetries are present. By employing
-concepts from category theory, we can represent and manipulate tensors with a large
-variety of symmetries, including abelian and non-abelian symmetries, fermionic statistics,
-as well as generalized (a.k.a. non-invertible or anyonic) symmetries.
-
-At the same time, TensorKit.jl focusses on computational efficiency and performance. The
-underlying storage of a tensor's data can be any `DenseArray`. When the data is stored
-in main memory (corresponding to `Array`), multiple CPUs can be leveraged as many
-operations come with multithreaded implementations, either by distributing the different
-blocks in case of a structured tensor (i.e. with symmetries) or by using multithreading
-provided by the package [Strided.jl](https://github.com/Jutho/Strided.jl). Support for
-storing and manipulating tensors on NVidia and AMD GPUs is currently being developed,
-whereas support for distributed arrays is planned for the future.
+TensorKit.jl aims to be a generic package for working with tensors as they appear throughout the physical sciences.
+TensorKit implements a parametric type [`Tensor`](@ref) (which is actually a specific case of the type [`TensorMap`](@ref)) and defines for these types a number of vector space operations (scalar multiplication, addition, norms and inner products), index operations (permutations) and linear algebra operations (multiplication, factorizations).
+Finally, tensor contractions can be performed using the `@tensor` macro from [TensorOperations.jl](https://github.com/QuantumKitHub/TensorOperations.jl).
+
+Currently, most effort is oriented towards tensors as they appear in the context of quantum many-body physics and in particular the field of tensor networks.
+Such tensors often have large dimensions and take on a specific structure when symmetries are present.
+By employing concepts from category theory, we can represent and manipulate tensors with a large variety of symmetries, including abelian and non-abelian symmetries, fermionic statistics, as well as generalized (a.k.a. non-invertible or anyonic) symmetries.
+
+At the same time, TensorKit.jl focusses on computational efficiency and performance.
+The underlying storage of a tensor's data can be any `DenseArray`.
+When the data is stored in main memory (corresponding to `Array`), multiple CPUs can be leveraged as many operations come with multithreaded implementations, either by distributing the different blocks in case of a structured tensor (i.e. with symmetries) or by using multithreading provided by the package [Strided.jl](https://github.com/Jutho/Strided.jl).
+Support for storing and manipulating tensors on Nvidia and AMD GPUs is currently being developed, whereas support for distributed arrays is planned for the future.
## Contents of the manual
```@contents
-Pages = ["man/intro.md", "man/spaces.md", "man/sectors.md", "man/tensors.md"]
+Pages = ["man/intro.md", "man/spaces.md", "man/symmetries.md", "man/sectors.md", "man/gradedspaces.md", "man/fusiontrees.md", "man/tensors.md", "man/tensormanipulations.md"]
Depth = 2
```
diff --git a/docs/src/lib/fusiontrees.md b/docs/src/lib/fusiontrees.md
index b3cdb041e..e8eb84d68 100644
--- a/docs/src/lib/fusiontrees.md
+++ b/docs/src/lib/fusiontrees.md
@@ -31,8 +31,7 @@ braid(f::FusionTree{I,N}, levels::NTuple{N,Int}, p::NTuple{N,Int}) where {I<:Sec
permute(f::FusionTree{I,N}, p::NTuple{N,Int}) where {I<:Sector,N}
```
-These can be composed to implement elementary manipulations of fusion-splitting tree pairs,
-according to the following methods
+These can be composed to implement elementary manipulations of fusion-splitting tree pairs, according to the following methods
```julia
# TODO: add documentation for the following methods
@@ -44,9 +43,8 @@ TensorKit.cycleclockwise
TensorKit.cycleanticlockwise
```
-Finally, these are used to define large manipulations of fusion-splitting tree pairs, which
-are then used in the index manipulation of `AbstractTensorMap` objects. The following methods
-defined on fusion splitting tree pairs have an associated definition for tensors.
+Finally, these are used to define large manipulations of fusion-splitting tree pairs, which are then used in the index manipulation of `AbstractTensorMap` objects.
+The following methods defined on fusion splitting tree pairs have an associated definition for tensors.
```@docs
repartition(::FusionTree{I,N₁}, ::FusionTree{I,N₂}, ::Int) where {I<:Sector,N₁,N₂}
transpose(::FusionTree{I}, ::FusionTree{I}, ::IndexTuple{N₁}, ::IndexTuple{N₂}) where {I<:Sector,N₁,N₂}
diff --git a/docs/src/lib/sectors.md b/docs/src/lib/sectors.md
index 9d919c72c..d01ffe09c 100644
--- a/docs/src/lib/sectors.md
+++ b/docs/src/lib/sectors.md
@@ -35,10 +35,7 @@ TimeReversed
ProductSector
```
-Several more concrete sector types can be found in other packages such as
-[SUNRepresentations.jl](https://github.com/QuantumKitHub/SUNRepresentations.jl),
-[CategoryData.jl](https://github.com/QuantumKitHub/CategoryData.jl),
-[QWignerSymbols.jl](https://github.com/lkdvos/QWignerSymbols.jl), ...:
+Several more concrete sector types can be found in other packages such as [SUNRepresentations.jl](https://github.com/QuantumKitHub/SUNRepresentations.jl), [CategoryData.jl](https://github.com/QuantumKitHub/CategoryData.jl), [QWignerSymbols.jl](https://github.com/lkdvos/QWignerSymbols.jl), ...:
Some of these types are parameterized by a type parameter that represents a group.
We therefore also provide a number of types to represent groups:
@@ -54,8 +51,7 @@ TensorKitSectors.Dihedral
TensorKitSectors.ProductGroup
```
-The following types are used to characterise different properties of the different types
-of sectors:
+The following types are used to characterize different properties of the different types of sectors:
```@docs
FusionStyle
@@ -63,8 +59,7 @@ BraidingStyle
UnitStyle
```
-Finally, the following auxiliary types are defined to facilitate the implementation
-of some of the methods on sectors:
+Finally, the following auxiliary types are defined to facilitate the implementation of some of the methods on sectors:
```@docs
TensorKitSectors.SectorValues
@@ -73,8 +68,7 @@ TensorKitSectors.SectorProductIterator
## Useful constants
-The following constants are defined to facilitate obtaining the type associated
-with the group elements or the irreducible representations of a given group:
+The following constants are defined to facilitate obtaining the type associated with the group elements or the irreducible representations of a given group:
```@docs
Irrep
@@ -83,8 +77,7 @@ GroupElement
## Methods for characterizing and manipulating `Sector` objects
-The following methods can be used to obtain properties such as topological data
-of sector objects, or to manipulate them or create related sectors:
+The following methods can be used to obtain properties such as topological data of sector objects, or to manipulate them or create related sectors:
```@docs
unit
@@ -107,8 +100,7 @@ TensorKitSectors.sectorscalartype
deligneproduct(::Sector, ::Sector)
```
-We have also the following methods that are specific to certain types of sectors
-and serve as accessors to their fields:
+We have also the following methods that are specific to certain types of sectors and serve as accessors to their fields:
```@docs
charge
@@ -121,8 +113,14 @@ Furthermore, we also have one specific method acting on groups, represented as t
×
```
-Because we sometimes want to customize the string representation of our sector types,
-we also have the following method:
+Mapping between sectors and linear indices is only used for sectors `I` for which `Base.IteratorSize(values(I)) == HasLength()`.
+In that case, we map an index `i` to a sector `c` via `c = getindex(values(I), i)`, and provide an inverse mapping
+
+```@docs
+TensorKitSectors.findindex
+```
+
+Because we sometimes want to customize the string representation of our sector types, we also have the following method:
```@docs
TensorKitSectors.type_repr
@@ -133,4 +131,3 @@ Finally, we provide functionality to compile all revelant methods for a sector:
```@docs
TensorKitSectors.precompile_sector
```
-
diff --git a/docs/src/lib/spaces.md b/docs/src/lib/spaces.md
index 5aa7e8fd9..e5705fe3e 100644
--- a/docs/src/lib/spaces.md
+++ b/docs/src/lib/spaces.md
@@ -21,8 +21,7 @@ ProductSpace
HomSpace
```
-together with the following specific type for encoding the inner product structure of
-a space:
+together with the following specific type for encoding the inner product structure of a space:
```@docs
InnerProductStyle
@@ -30,16 +29,14 @@ InnerProductStyle
## Useful constants
-The following constants are defined to easily create the concrete type of `GradedSpace`
-associated with a given type of sector.
+The following constants are defined to easily create the concrete type of `GradedSpace` associated with a given type of sector.
```@docs
Vect
Rep
```
-In this respect, there are also a number of type aliases for the `GradedSpace` types
-associated with the most common sectors, namely
+In this respect, there are also a number of type aliases for the `GradedSpace` types associated with the most common sectors, namely
```julia
const ZNSpace{N} = Vect[ZNIrrep{N}]
@@ -110,16 +107,15 @@ isepimorphic
isisomorphic
```
-Inserting trivial space factors or removing such factors for `ProductSpace` instances
-can be done with the following methods.
+Inserting trivial space factors or removing such factors for `ProductSpace` instances can be done with the following methods.
+
```@docs
insertleftunit(::ProductSpace, ::Val{i}) where {i}
insertrightunit(::ProductSpace, ::Val{i}) where {i}
removeunit(::ProductSpace, ::Val{i}) where {i}
```
-There are also specific methods for `HomSpace` instances, that are used in determining
-the resuling `HomSpace` after applying certain tensor operations.
+There are also specific methods for `HomSpace` instances, that are used in determining the resulting `HomSpace` after applying certain tensor operations.
```@docs
flip(W::HomSpace{S}, I) where {S}
diff --git a/docs/src/lib/tensors.md b/docs/src/lib/tensors.md
index 6bb19dee7..ea491a843 100644
--- a/docs/src/lib/tensors.md
+++ b/docs/src/lib/tensors.md
@@ -19,8 +19,7 @@ AdjointTensorMap
BraidingTensor
```
-Of those, `TensorMap` provides the generic instantiation of our tensor concept. It supports
-various constructors, which are discussed in the next subsection.
+Of those, `TensorMap` provides the generic instantiation of our tensor concept. It supports various constructors, which are discussed in the next subsection.
Furthermore, some aliases are provided for convenience:
```@docs
@@ -37,13 +36,9 @@ A `TensorMap` with undefined data can be constructed by specifying its domain an
TensorMap{T}(::UndefInitializer, V::TensorMapSpace)
```
-The resulting object can then be filled with data using the `setindex!` method as discussed
-below, using functions such as `VectorInterface.zerovector!`, `rand!` or `fill!`, or it can
-be used as an output argument in one of the many methods that accept output arguments, or
-in an `@tensor output[...] = ...` expression.
+The resulting object can then be filled with data using the `setindex!` method as discussed below, using functions such as `VectorInterface.zerovector!`, `rand!` or `fill!`, or it can be used as an output argument in one of the many methods that accept output arguments, or in an `@tensor output[...] = ...` expression.
-Alternatively, a `TensorMap` can be constructed by specifying its data, codmain and domain
-in one of the following ways:
+Alternatively, a `TensorMap` can be constructed by specifying its data, codomain and domain in one of the following ways:
```@docs
TensorMap(data::AbstractDict{<:Sector,<:AbstractMatrix}, V::TensorMapSpace)
TensorMap(data::AbstractArray, V::TensorMapSpace; tol)
@@ -98,8 +93,7 @@ domainind
allind
```
-In `TensorMap` instances, all data is gathered in a single `AbstractVector`, which has an internal structure into blocks associated to total coupled charge, within which live subblocks
-associated with the different possible fusion-splitting tree pairs.
+In `TensorMap` instances, all data is gathered in a single `AbstractVector`, which has an internal structure into blocks associated to total coupled charge, within which live subblocks associated with the different possible fusion-splitting tree pairs.
To obtain information about the structure of the data, you can use:
```@docs
@@ -110,7 +104,8 @@ hasblock(::AbstractTensorMap, ::Sector)
fusiontrees(t::AbstractTensorMap)
```
-Data can be accessed (and modified) in a number of ways. To access the full matrix block associated with the coupled charges, you can use:
+Data can be accessed (and modified) in a number of ways.
+To access the full matrix block associated with the coupled charges, you can use:
```@docs
block
blocks
@@ -128,15 +123,12 @@ Base.getindex(::AbstractTensorMap, ::FusionTree, ::FusionTree)
Base.setindex!(::AbstractTensorMap, ::Any, ::FusionTree, ::FusionTree)
```
-For a tensor `t` with `FusionType(sectortype(t)) isa UniqueFusion`, fusion trees are
-completely determined by the outcoming sectors, and the data can be accessed in a more
-straightforward way:
+For a tensor `t` with `FusionStyle(sectortype(t)) isa UniqueFusion`, fusion trees are completely determined by the outcoming sectors, and the data can be accessed in a more straightforward way:
```@docs
Base.getindex(::AbstractTensorMap, ::Tuple{I,Vararg{I}}) where {I<:Sector}
```
-For tensor `t` with `sectortype(t) == Trivial`, the data can be accessed and manipulated
-directly as multidimensional arrays:
+For tensor `t` with `sectortype(t) == Trivial`, the data can be accessed and manipulated directly as multidimensional arrays:
```@docs
Base.getindex(::AbstractTensorMap)
Base.getindex(::AbstractTensorMap, ::Vararg{SliceIndex})
@@ -152,36 +144,25 @@ Random.randexp!
## `AbstractTensorMap` operations
-The operations that can be performed on an `AbstractTensorMap` can be organized into the
-following categories:
-
-* *vector operations*: these do not change the `space` or index strucure of a tensor and
- can be straightforwardly implemented on on the full data. All the methods described in
- [VectorInterface.jl](https://github.com/Jutho/VectorInterface.jl) are supported. For
- compatibility reasons, we also provide implementations for equivalent methods from
- LinearAlgebra.jl, such as `axpy!`, `axpby!`.
-
-* *index manipulations*: these change (permute) the index structure of a tensor, which
- affects the data in a way that is fully determined by the categorical data of the
- `sectortype` of the tensor.
-
-* *(planar) contractions* and *(planar) traces* (i.e., contractions with identity tensors).
- Tensor contractions correspond to a combination of some index manipulations followed by
- a composition or multiplication of the tensors in their role as linear maps.
- Tensor contractions are however of such important and frequency that they require a
- dedicated implementation.
-
-* *tensor factorisations*, which relies on their identification of tensors with linear maps
- between tensor spaces. The factorisations are applied as ordinary matrix factorisations
- to the matrix blocks associated with the coupled charges.
+The operations that can be performed on an `AbstractTensorMap` can be organized into the following categories:
+
+* *vector operations*: these do not change the `space` or index structure of a tensor and can be straightforwardly implemented on on the full data.
+ All the methods described in [VectorInterface.jl](https://github.com/Jutho/VectorInterface.jl) are supported.
+ For compatibility reasons, we also provide implementations for equivalent methods from LinearAlgebra.jl, such as `axpy!`, `axpby!`.
+
+* *index manipulations*: these change (permute) the index structure of a tensor, which affects the data in a way that is fully determined by the categorical data of the `sectortype` of the tensor .
+
+* *(planar) contractions* and *(planar) traces* (i.e., contractions with identity tensors).
+ Tensor contractions correspond to a combination of some index manipulations followed by a composition or multiplication of the tensors in their role as linear maps.
+ Tensor contractions are however of such importance and frequency that they require a dedicated implementation.
+
+* *tensor factorizations*, which relies on their identification of tensors with linear maps between tensor spaces.
+ The factorizations are applied as ordinary matrix factorizations to the matrix blocks associated with the coupled charges.
### Index manipulations
-A general index manipulation of a `TensorMap` object can be built up by considering some
-transformation of the fusion trees, along with a permutation of the stored data. They come
-in three flavours, which are either of the type `transform(!)` which are exported, or of the
-type `add_transform!`, for additional expert-mode options that allows for addition and
-scaling, as well as the selection of a custom backend.
+A general index manipulation of a `TensorMap` object can be built up by considering some transformation of the fusion trees, along with a permutation of the stored data.
+They come in three flavours, which are either of the type `transform(!)` which are exported, or of the type `add_transform!`, for additional expert-mode options that allows for addition and scaling, as well as the selection of a custom backend.
```@docs
permute(::AbstractTensorMap, ::Index2Tuple)
@@ -220,20 +201,15 @@ contract!
## `TensorMap` factorizations
-The factorisation methods are powered by [MatrixAlgebraKit.jl](https://github.com/QuantumKitHub/MatrixAlgebraKit.jl)
-and all follow the same strategy. The idea is that the `TensorMap` is interpreted as a linear
-map based on the current partition of indices between `domain` and `codomain`, and then the
-entire range of MatrixAlgebraKit functions can be called.
-Factorizing a tensor according to a different partition of the indices is possible
-by prepending the factorization step with an explicit call to [`permute`](@ref) or [`transpose`](@ref).
+The factorization methods are powered by [MatrixAlgebraKit.jl](https://github.com/QuantumKitHub/MatrixAlgebraKit.jl) and all follow the same strategy.
+The idea is that the `TensorMap` is interpreted as a linear map based on the current partition of indices between `domain` and `codomain`, and then the entire range of MatrixAlgebraKit functions can be called.
+Factorizing a tensor according to a different partition of the indices is possible by prepending the factorization step with an explicit call to [`permute`](@ref) or [`transpose`](@ref).
For the full list of factorizations, see [Decompositions](@extref MatrixAlgebraKit).
-Additionally, it is possible to obtain truncated versions of some of these factorizations
-through the [`MatrixAlgebraKit.TruncationStrategy`](@ref) objects.
+Additionally, it is possible to obtain truncated versions of some of these factorizations through the [`MatrixAlgebraKit.TruncationStrategy`](@ref) objects.
-The exact truncation strategy can be controlled through the strategies defined in [Truncations](@extref MatrixAlgebraKit),
-but for `TensorMap`s there is also the special-purpose scheme:
+The exact truncation strategy can be controlled through the strategies defined in [Truncations](@extref MatrixAlgebraKit), but for `TensorMap`s there is also the special-purpose scheme:
```@docs
truncspace
diff --git a/docs/src/man/fusiontrees.md b/docs/src/man/fusiontrees.md
new file mode 100644
index 000000000..4af387c13
--- /dev/null
+++ b/docs/src/man/fusiontrees.md
@@ -0,0 +1,289 @@
+# [Fusion trees](@id s_fusiontrees)
+
+```@setup fusiontrees
+using TensorKit
+```
+
+The gain in efficiency (both in memory occupation and computation time) obtained from using symmetric (equivariant) tensor maps is that, by Schur's lemma, they are block diagonal in the basis of coupled sectors, i.e. they exhibit block sparsity.
+To exploit this block diagonal form, it is however essential that we know the basis transformation from the individual (uncoupled) sectors appearing in the tensor product form of the domain and codomain, to the totally coupled sectors that label the different blocks.
+We refer to the latter as block sectors, as we already encountered in the previous section [`blocksectors`](@ref) and [`blockdim`](@ref) defined on the type [`ProductSpace`](@ref).
+
+This basis transformation consists of a basis of inclusion and projection maps, denoted as ``X^{a_1a_2…a_N}_{c,α}: R_c → R_{a_1} ⊗ R_{a_2} ⊗ … ⊗ R_{a_N}`` and their adjoints ``(X^{a_1a_2…a_N}_{c,α})^†``, such that
+
+```math
+(X^{a_1a_2…a_N}_{c,α})^† ∘ X^{a_1a_2…a_N}_{c′,α′} = δ_{c,c′} δ_{α,α′} \mathrm{id}_c
+```
+
+and
+
+```math
+∑_{c,α} X^{a_1a_2…a_N}_{c,α} ∘ (X^{a_1a_2…a_N}_{c,α})^† = \mathrm{id}_{a_1 ⊗ a_2 ⊗ … ⊗ a_N} = \mathrm{id}_{a_1} ⊗ \mathrm{id}_{a_2} ⊗ … ⊗ \mathrm{id}_{a_N}
+```
+
+Fusion trees provide a particular way to construct such a basis.
+It is useful to know about the existence of fusion trees and how they are represented, as discussed in the first subsection.
+The next two subsections discuss possible manipulations that can be performed with fusion trees.
+These are used under the hood when manipulating the indices of tensors, but a typical user would not need to use these manipulations on fusion trees directly.
+Hence, these last two sections can safely be skipped.
+
+## Canonical representation
+
+To couple or fuse the different sectors together into a single block sector, we can sequentially fuse together two sectors into a single coupled sector, which is then fused with the next uncoupled sector, using the splitting tensors ``X_{a,b}^{c,μ} : R_c → R_a ⊗ R_b`` and their adjoints.
+This amounts to the canonical choice of our tensor product, and for a given tensor mapping from ``(((W_1 ⊗ W_2) ⊗ W_3) ⊗ … )⊗ W_{N_2})`` to ``(((V_1 ⊗ V_2) ⊗ V_3) ⊗ … )⊗ V_{N_1})``, the corresponding fusion and splitting trees take the form
+
+```@raw html
+
+```
+
+for the specific case ``N_1 = 4`` and ``N_2 = 3``.
+We can separate this tree into the fusing part ``(b_1 ⊗ b_2) ⊗ b_3 → c`` and the splitting part ``c→(((a_1 ⊗ a_2) ⊗ a_3) ⊗ a_4)``.
+Given that the fusion tree can be considered to be the adjoint of a corresponding splitting tree ``c → (b_1 ⊗ b_2) ⊗ b_3``, we now first consider splitting trees in isolation.
+A splitting tree which goes from one coupled sector ``c`` to ``N`` uncoupled sectors ``a_1``, ``a_2``, …, ``a_N`` needs ``N-2`` additional internal sector labels ``e_1``, …, ``e_{N-2}``, and, if `FusionStyle(I) isa GenericFusion`, ``N-1`` additional multiplicity labels ``μ_1``, …, ``μ_{N-1}``.
+We henceforth refer to them as vertex labels, as they are associated with the vertices of the splitting tree.
+In the case of `FusionStyle(I) isa UniqueFusion`, the internal sectors ``e_1``, …, ``e_{N-2}`` are completely fixed, for `FusionStyle(I) isa MultipleFusion` they can also take different values.
+In our abstract notation of the splitting basis ``X^{a_1a_2…a_N}_{c,α}`` used above, ``α`` can be considered a collective label, i.e. ``α = (e_1, …, e_{N-2}; μ₁, … ,μ_{N-1})``.
+Indeed, we can check the orthogonality condition ``(X^{a_1a_2…a_N}_{c,α})^† ∘ X^{a_1a_2…a_N}_{c′,α′} = δ_{c,c′} δ_{α,α′} \mathrm{id}_c``, which now forces all internal lines ``e_k`` and vertex labels ``μ_l`` to be the same.
+
+There is one subtle remark that we have so far ignored.
+Within the specific subtypes of `Sector`, we do not explicitly distinguish between ``R_a^*`` (simply denoted as ``a^*`` and graphically depicted as an upgoing arrow ``a``) and ``R_{\bar{a}}`` (simply denoted as ``\bar{a}`` and depicted with a downgoing arrow), i.e. between the dual space of ``R_a`` on which the conjugated irrep acts, or the irrep ``\bar{a}`` to which the complex conjugate of irrep ``a`` is isomorphic.
+This distinction is however important, when certain uncoupled sectors in the fusion tree actually originate from a dual space.
+We use the isomorphisms ``Z_a : R_a^* → R_{\bar{a}}`` and its adjoint ``Z_a^† : R_{\bar{a}} → R_a^*``, as introduced in the section on [topological data of a fusion category](@ref ss_topologicalfusion), to build fusion and splitting trees that take the distinction between irreps and their conjugates into account.
+Hence, in the previous example, if e.g. the first and third space in the codomain and the second space in the domain of the tensor were dual spaces, the actual pair of splitting and fusion tree would look as
+
+```@raw html
+
+```
+
+The presence of these isomorphisms will be important when we start to bend lines, to move uncoupled sectors from the incoming to the outgoing part of the fusion-splitting tree.
+Note that we can still represent the fusion tree as the adjoint of a corresponding splitting tree, because we also use the adjoint of the ``Z`` isomorphisms in the splitting part, and the ``Z`` isomorphism in the fusion part.
+Furthermore, the presence of the ``Z`` isomorphisms does not affect the orthonormality.
+
+We represent splitting trees and their adjoints using a specific immutable type called `FusionTree` (which actually represents a splitting tree, but fusion tree is a more common term), defined as
+```julia
+struct FusionTree{I<:Sector,N,M,L}
+ uncoupled::NTuple{N,I}
+ coupled::I
+ isdual::NTuple{N,Bool}
+ innerlines::NTuple{M,I} # fixed to M = N-2
+ vertices::NTuple{L,Int} # fixed to L = N-1
+end
+```
+Here, the fields are probably self-explanatory.
+The `isdual` field indicates whether an isomorphism is present (if the corresponding value is `true`) or not.
+Note that the field `uncoupled` contains the sectors coming out of the splitting trees, before the possible ``Z`` isomorphism, i.e. the splitting tree in the above example would have `sectors = (a₁, a₂, a₃, a₄)`.
+The `FusionTree` type has a number of basic properties and capabilities, such as checking for equality with `==` and support for `hash(f::FusionTree, h::UInt)`, as splitting and fusion trees are used as keys in look-up tables (i.e. `AbstractDictionary` instances) to look up certain parts of the data of a tensor.
+
+`FusionTree` instances are not checked for consistency (i.e. valid fusion rules etc) upon creation, hence, they are assumed to be created correctly.
+The most natural way to create them is by using the `fusiontrees(uncoupled::NTuple{N, I}, coupled::I = unit(I))` method, which returns an iterator over all possible fusion trees from a set of `N` uncoupled sectors to a given coupled sector, which by default is assumed to be the trivial sector of that group or fusion category (i.e. the identity object in categorical nomenclature).
+The return type of `fusiontrees` is a custom type `FusionTreeIterator` which conforms to the complete interface of an iterator, and has a custom `length` function that computes the number of possible fusion trees without iterating over all of them explicitly. This is best illustrated with some examples
+
+```@repl fusiontrees
+s = Irrep[SU₂](1/2)
+collect(fusiontrees((s, s, s, s)))
+collect(fusiontrees((s, s, s, s, s), s, (true, false, false, true, false)))
+iter = fusiontrees(ntuple(n -> s, 16))
+sum(n -> 1, iter)
+length(iter)
+@elapsed sum(n -> 1, iter)
+@elapsed length(iter)
+s2 = s ⊠ s
+collect(fusiontrees((s2, s2, s2, s2)))
+```
+Note that `FusionTree` instances are shown (printed) in a way that is valid code to reproduce them, a property which also holds for both instances of `Sector` and instances of `VectorSpace`.
+All of those should be displayed in a way that can be copy pasted as valid code.
+Furthermore, we use context to determine how to print e.g. a sector.
+In isolation, `s2` is printed as `(Irrep[SU₂](1/2) ⊠ Irrep[SU₂](1/2))`, however, within the fusion tree, it is simply printed as `(1/2, 1/2)`, because it will be converted back into a `ProductSector`, namely `Irrep[SU₂] ⊠ Irrep[SU₂]` by the constructor of `FusionTree{Irrep[SU₂] ⊠ Irrep[SU₂]}`.
+
+## Manipulations on a fusion tree
+
+We now discuss elementary manipulations that we want to perform on or between fusion trees (where we actually mean splitting trees), which will form the building block for more general manipulations on a pair of a fusion and splitting tree discussed in the next subsection, and then for casting a general index manipulation of a tensor map as a linear operation in the basis of canonically ordered splitting and fusion trees.
+In this section, we will ignore the ``Z`` isomorphisms, as they are just trivially reshuffled under the different operations that we describe.
+These manipulations are used as low-level methods by the `TensorMap` methods discussed on the next page.
+As such, they are not exported by TensorKit.jl, nor do they overload similarly named methods from Julia Base (see `split` and `merge` below).
+
+The first operation we discuss is an elementary braid of two neighbouring sectors (indices), i.e. a so-called Artin braid or Artin generator of the braid group.
+Because these two sectors do not appear on the same fusion vertex, some recoupling is necessary.
+The following represents two different ways to compute the result of such a braid as a linear combination of new fusion trees in canonical order:
+
+```@raw html
+
+```
+
+While the upper path is the most intuitive, it requires two recouplings or F-moves (one forward and one reverse).
+On the other hand, the lower path requires only one (reverse) F- move, and two R-moves.
+The latter are less expensive to compute, and so the lower path is computationally more efficient.
+However, the end result should be the same, provided the pentagon and hexagon equations are satisfied.
+We always assume that these are satisfied for any new subtype of `Sector`, and it is up to the user to verify that they are when implementing new custom `Sector` types.
+This result is implemented in the function [`artin_braid(f::FusionTree, i; inv = false)`](@ref TensorKit.artin_braid) where `i` denotes the position of the first sector (i.e. labeled `b` in the above graph) which is then braided with the sector at position `i+1` in the fusion tree `f`.
+The keyword argument `inv` allows to select the inverse braiding operation, which amounts to replacing the R-matrix with its inverse (or thus, adjoint) in the above steps.
+The result is returned as a dictionary with possible output fusion trees as keys and corresponding coefficients as value.
+In the case of `FusionStyle(I) isa UniqueFusion`, their is only one resulting fusion tree, with corresponding coefficient a complex phase (which is one for the bosonic representation theory of an Abelian group), and the result is a special `SingletonDict<:AbstractDict`, a `struct` type defined in TensorKit.jl to hold a single key value pair.
+
+With the elementary `artin_braid`, we can then compute a more general braid.
+For this, we provide an interface
+
+[`braid(f::FusionTree{I, N}, levels::NTuple{N, Int}, permutation::NTuple{N, Int})`](@ref braid(f::FusionTree{I, N}, levels::NTuple{N, Int}, p::NTuple{N, Int}) where {I <: Sector, N})
+
+where the braid is specified as a permutation, such that the new sector at position `i` was originally at position `permutation[i]`, and where every uncoupled sector is also assigned a level or depth.
+The permutation is decomposed into swaps between neighbouring sectors, and when two sectors are swapped, their respective level will determine whether the left sector is braided over or under its right neighbor.
+This interface does not allow to specify the most general braid, and in particular will never wind one line around another, but can be used as a more general building block for arbitrary braids than the elementary Artin generators.
+A graphical example makes this probably more clear, i.e for `levels = (1, 2, 3, 4, 5)` and `permutation = (5, 3, 1, 4, 2)`, the corresponding braid is given by
+
+```@raw html
+
+```
+
+that is, the first sector or space goes to position 3, and crosses over all other lines, because it has the lowest level (i.e. think of level as depth in the third dimension), and so forth.
+We sketch this operation both as a general braid on the left hand side, and as a particular composition of Artin braids on the right hand side.
+
+When `BraidingStyle(I) == SymmetricBraiding()`, there is no distinction between applying the braiding or its inverse (i.e. lines crossing over or under each other in the graphical notation) and the whole operation simplifies down to a permutation.
+We then also support the interface
+
+[`permute(f::FusionTree{I, N}, permutation::NTuple{N, Int})`](@ref permute(f::FusionTree{I, N}, p::NTuple{N, Int}) where {I <: Sector, N})
+
+Other manipulations which are sometimes needed are
+
+* [`insertat(f1::FusionTree{I,N₁}, i::Int, f2::FusionTree{I,N₂})`](@ref TensorKit.insertat) : inserts a fusion tree `f2` at the `i`th uncoupled sector of fusion tree `f1` (this requires that the coupled sector `f2` matches with the `i`th uncoupled sector of `f1`, and that `!f1.isdual[i]`, i.e. that there is no ``Z``-isomorphism on the `i`th line of `f1`), and recouple this into a linear combination of trees in canonical order, with `N₁ + N₂ - 1` uncoupled sectors, i.e. diagrammatically for `i = 3`
+
+```@raw html
+
+```
+
+* [`split(f::FusionTree{I,N}, M::Int)`](@ref TensorKit.split) : splits a fusion tree `f` into two trees `f1` and `f2`, such that `f1` has the first `M` uncoupled sectors of `f`, and `f2` the remaining `N - M`.
+ This function is type stable if `M` is a compile time constant.
+
+ `split(f, M)` is the inverse of `insertat` in the sense that `insertat(f2, 1, f1)` should return a dictionary with a single key-value pair `f=>1`.
+ Diagrammatically, for `M = 4`, the function `split` returns
+
+```@raw html
+
+```
+
+* [`merge(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, c::I, [μ=1])`](@ref TensorKit.merge) : merges two fusion trees `f1` and `f2` by fusing the coupled sectors of `f1` and `f2` into a sector `c` (with vertex label `μ` if `FusionStyle(I) == GenericFusion()`), and reexpressing the result as a linear combination of fusion trees with `N₁ + N₂` uncoupled sectors in canonical order.
+ This is a simple application of `insertat`.
+ Diagrammatically, this operation is represented as:
+
+```@raw html
+
+```
+
+## Manipulations on a splitting - fusion tree pair
+
+In this subsection we discuss manipulations that act on a splitting and fusion tree pair, which we will always as two separate trees `f1, f2`, where `f1` is the splitting tree and `f2` represents the fusion tree, and they should have `f1.coupled == f2.coupled`.
+
+The most important manipulation on such a pair is to move sectors from one to the other.
+Given the canonical order of these trees, we exclusively use the *left duality* (see the section on [categories](@ref s_categories)), for which the evaluation and coevaluation maps establish isomorphisms between
+
+```math
+\begin{aligned}
+&\mathrm{Hom}((((b_1 ⊗ b_2) ⊗ …) ⊗ b_{N_2}), (((a_1 ⊗ a_2) ⊗ …) ⊗ a_{N_1}))\\
+&≂\mathrm{Hom}((((b_1 ⊗ b_2) ⊗ ...) ⊗ b_{N_2-1}), ((((a_1 ⊗ a_2) ⊗ ...) ⊗ a_{N_1}) ⊗ b_{N_2}^*))\\
+&≂\mathrm{Hom}(1, (((((((a_1 ⊗ a_2) ⊗ ...) ⊗ a_{N_1}) ⊗ b_{N_2}^*) ⊗ …) ⊗ b_2^*) ⊗ b_1^*) )
+\end{aligned}
+```
+
+where the last morphism space is then labeled by the basis of only splitting trees.
+We can then use the manipulations from the previous section, and then again use the left duality to bring this back to a pair of splitting and fusion tree with `N₂′` incoming and `N₁′` incoming sectors (with `N₁′ + N₂′ == N₁ + N₂`).
+
+We now discuss how to actually bend lines, and thus, move sectors from the incoming part (fusion tree) to the outgoing part (splitting tree).
+Hereby, we exploit the relations between the (co)evaluation (exact pairing) and the fusion tensors, discussed in [topological data of a fusion category](@ref ss_topologicalfusion).
+The main ingredient that we need is summarized in
+
+```@raw html
+
+```
+
+We will only need the B-symbol and not the A-symbol.
+Applying the left evaluation on the second sector of a splitting tensor thus yields a linear combination of fusion tensors (when `FusionStyle(I) == GenericFusion()`, or just a scalar times the corresponding fusion tensor otherwise), with corresponding ``Z`` ismorphism.
+Taking the adjoint of this relation yields the required relation to transform a fusion tensor into a splitting tensor with an added ``Z^†`` isomorphism.
+
+However, we have to be careful if we bend a line on which a ``Z`` isomorphism (or its adjoint) is already present.
+Indeed, it is exactly for this operation that we explicitly need to take the presence of these isomorphisms into account.
+Indeed, we obtain the relation
+
+```@raw html
+
+```
+
+Hence, bending an `isdual` sector from the splitting tree to the fusion tree yields an additional Frobenius-Schur factor, and of course leads to a normal sector (which is no longer `isdual` and does thus not come with a ``Z``-isomorphism) on the fusion side.
+We again use the adjoint of this relation to bend an `isdual` sector from the fusion tree to the splitting tree.
+
+The `FusionTree` interface to duality and line bending is given by
+
+[`repartition(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, N::Int)`](@ref repartition)
+
+which takes a splitting tree `f1` with `N₁` outgoing sectors, a fusion tree `f2` with `N₂` incoming sectors, and applies line bending such that the resulting splitting and fusion trees have `N` outgoing sectors, corresponding to the first `N` sectors out of the list ``(a_1, a_2, …, a_{N_1}, b_{N_2}^*, …, b_{1}^*)`` and `N₁ + N₂ - N` incoming sectors, corresponding to the dual of the last `N₁ + N₂ - N` sectors from the previous list, in reverse.
+This return values are correctly inferred if `N` is a compile time constant.
+
+Graphically, for `N₁ = 4`, `N₂ = 3`, `N = 2` and some particular choice of `isdual` in both the fusion and splitting tree:
+
+```@raw html
+
+```
+
+The result is returned as a dictionary with keys `(f1′, f2′)` and the corresponding `coeff` as value.
+Note that the summation is only over the ``κ_j`` labels, such that, in the case of `FusionStyle(I) isa MultiplicityFreeFusion`, the linear combination simplifies to a single term with a scalar coefficient.
+
+With this basic function, we can now perform arbitrary combinations of braids or permutations with line bendings, to completely reshuffle where sectors appear.
+The interface provided for this is given by
+
+[`braid(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, levels1::NTuple{N₁,Int}, levels2::NTuple{N₂,Int}, p1::NTuple{N₁′,Int}, p2::NTuple{N₂′,Int})`](@ref braid(::FusionTree{I}, ::FusionTree{I}, ::IndexTuple, ::IndexTuple, ::IndexTuple{N₁}, ::IndexTuple{N₂}) where {I<:Sector,N₁,N₂})
+
+where we now have splitting tree `f1` with `N₁` outgoing sectors, a fusion tree `f2` with `N₂` incoming sectors, `levels1` and `levels2` assign a level or depth to the corresponding uncoupled sectors in `f1` and `f2`, and we represent the new configuration as a pair `p1` and `p2`.
+Together, `(p1..., p2...)` represents a permutation of length `N₁ + N₂ = N₁′ + N₂′`, where `p1` indicates which of the original sectors should appear as outgoing sectors in the new splitting tree and `p2` indicates which appear as incoming sectors in the new fusion tree.
+Hereto, we label the uncoupled sectors of `f1` from `1` to `N₁`, followed by the uncoupled sectors of `f2` from `N₁ + 1` to `N₁ + N₂`.
+Note that simply repartitioning the splitting and fusion tree such that e.g. all sectors appear in the new splitting tree (i.e. are outgoing), amounts to chosing `p1 = (1,..., N₁, N₁ + N₂, N₁ + N₂ - 1, ... , N₁ + 1)` and `p2 = ()`, because the duality isomorphism reverses the order of the tensor product.
+
+This routine is implemented by indeed first making all sectors outgoing using the `repartition` function discussed above, such that only splitting trees remain, then braiding those using the routine from the previous subsection such that the new outgoing sectors appear first, followed by the new incoming sectors (in reverse order), and then again invoking the `repartition` routine to bring everything in final form.
+The result is again returned as a dictionary where the keys are `(f1′, f2′)` and the values the corresponding coefficients.
+
+As before, there is a simplified interface for the case where `BraidingStyle(I) isa SymmetricBraiding` and the levels are not needed.
+This is simply given by
+
+[`permute(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, p1::NTuple{N₁′,Int}, p2::NTuple{N₂′,Int})`](@ref permute(::FusionTree{I}, ::FusionTree{I}, ::IndexTuple{N₁}, ::IndexTuple{N₂}) where {I<:Sector,N₁,N₂})
+
+The `braid` and `permute` routines for double fusion trees will be the main access point for corresponding manipulations on tensors.
+As a consequence, results from this routine are memoized, i.e. they are stored in some package wide 'least-recently used' cache (from [LRUCache.jl](https://github.com/JuliaCollections/LRUCache.jl)) that can be accessed as `TensorKit.
+raidcache`. By default, this cache stores up to `10^5` different `braid` or `permute` resuls, where one result corresponds to one particular combination of `(f1, f2, p1, p2, levels1, levels2)`.
+This should be sufficient for most algorithms.
+While there are currently no (official) access methods to change the default settings of this cache (one can always resort to `resize!(TensorKit.permutecache)` and other methods from LRUCache.jl), this might change in the future.
+The use of this cache is however controlled by two constants of type `RefValue{Bool}`, namely `usebraidcache_abelian` and `usebraidcache_nonabelian`.
+The default values are given by `TensorKit.usebraidcache_abelian[] = false` and `TensorKit.usebraidcache_nonabelian[] = true`, and respectively reflect that the cache is likely not going to help (or even slow down) fusion trees with `FusionStyle(f) isa UniqueFusion`, but is probably useful for fusion trees with `FusionStyle(f) isa MultipleFusion`.
+One can change these values and test the effect on their application.
+
+The existence of `braidcache` also implies that potential inefficiencies in the fusion tree manipulations (which we nonetheless try to avoid) will not seriously affect performance of tensor manipulations.
+
+## Inspecting fusion trees as tensors
+
+For those cases where the fusion and splitting tensors have an explicit representation as a tensor, i.e. a morphism in the category `Vect` (this essentially coincides with the case of group representations), this explicit representation can be created, which can be useful for checking purposes.
+Hereto, it is necessary that the *splitting tensor* ``X^{ab}_{c,μ}``, i.e. the Clebsch-Gordan coefficients of the group, are encoded via the routine `fusiontensor(a, b, c, μ = nothing)`, where the last argument is only necessary in the case of `FusionStyle(I) == GenericFusion()`.
+We can then convert a `FusionTree{I, N}` into an `Array`, which will yield a rank `N + 1` array where the first `N` dimensions correspond to the uncoupled sectors, and the last dimension to the coupled sector.
+Note that this is mostly useful for the case of `FusionStyle(I) isa MultipleFusion` groups, as in the case of abelian groups, all irreps are one-dimensional.
+
+Some examples:
+```@repl fusiontrees
+using LinearAlgebra # hide
+s = Irrep[SU₂](1/2)
+iter = fusiontrees((s, s, s, s), SU2Irrep(1))
+f = first(iter)
+convert(Array, f)
+
+LinearAlgebra.I ≈ convert(Array, FusionTree((SU2Irrep(1/2),), SU2Irrep(1/2), (false,), ()))
+Z = adjoint(convert(Array, FusionTree((SU2Irrep(1/2),), SU2Irrep(1/2), (true,), ())))
+transpose(Z) ≈ frobenius_schur_phase(SU2Irrep(1/2)) * Z
+
+LinearAlgebra.I ≈ convert(Array, FusionTree((Irrep[SU₂](1),), Irrep[SU₂](1), (false,), ()))
+Z = adjoint(convert(Array, FusionTree((Irrep[SU₂](1),), Irrep[SU₂](1), (true,), ())))
+transpose(Z) ≈ frobenius_schur_phase(Irrep[SU₂](1)) * Z
+
+#check orthogonality
+for f1 in iter
+ for f2 in iter
+ dotproduct = dot(convert(Array, f1), convert(Array, f2))
+ println("<$f1, $f2> = $dotproduct")
+ end
+end
+```
+
+Note that we take the adjoint when computing `Z`, because `convert(Array, f)` assumes `f` to be splitting tree, which is built using ``Z^†``.
+Further note that the normalization (squared) of a fusion tree is given by the dimension of the coupled sector, as we are also tracing over the ``\mathrm{id}_c`` when checking the orthogonality by computing `dot` of the corresponding tensors.
diff --git a/docs/src/man/gradedspaces.md b/docs/src/man/gradedspaces.md
new file mode 100644
index 000000000..3ec64b4d1
--- /dev/null
+++ b/docs/src/man/gradedspaces.md
@@ -0,0 +1,177 @@
+```@meta
+CollapsedDocStrings = true
+```
+
+# [Graded spaces](@id s_gradedspaces)
+
+```@setup gradedspaces
+using TensorKit
+```
+
+We have introduced `Sector` subtypes as a way to label the irreps or sectors in the decomposition ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}``.
+To actually represent such spaces, we now also introduce a corresponding type `GradedSpace`, which is a subtype of `ElementarySpace`:
+
+```@docs; canonical=false
+GradedSpace
+```
+
+Here, `D` is a type parameter to denote the data structure used to store the degeneracy or multiplicity dimensions ``n_a`` of the different sectors.
+For convenience, `Vect[I]` will return the fully concrete type with `D` specified.
+
+Note that, conventionally, a graded vector space is a space that has a natural direct sum decomposition over some set of labels, i.e. ``V = ⨁_{a ∈ I} V_a`` where the label set ``I`` has the structure of a semigroup ``a ⊗ b = c ∈ I``.
+Here, we generalize this notation by using for ``I`` the fusion ring of a fusion category, ``a ⊗ b = ⨁_{c ∈ I} ⨁_{μ = 1}^{N_{a,b}^c} c``.
+However, this is mostly to lower the barrier, as really the instances of `GradedSpace` represent just general objects in a fusion category (or strictly speaking, a pre-fusion category, as we allow for an infinite number of simple objects, e.g. the irreps of a continuous group).
+
+## Implementation details
+
+As mentioned, the way in which the degeneracy dimensions ``n_a`` are stored depends on the specific sector type `I`, more specifically on the `IteratorSize` of `values(I)`.
+If `IteratorSize(values(I)) isa Union{IsInfinite, SizeUnknown}`, the different sectors ``a`` and their corresponding degeneracy ``n_a`` are stored as key value pairs in an `Associative` array, i.e. a dictionary `dims::SectorDict`.
+As the total number of sectors in `values(I)` can be infinite, only sectors ``a`` for which ``n_a`` are stored.
+Here, `SectorDict` is a constant type alias for a specific dictionary implementation, which currently resorts to `SortedVectorDict` implemented in TensorKit.jl.
+Hence, the sectors and their corresponding dimensions are stored as two matching lists (`Vector` instances), which are ordered based on the property `isless(a::I, b::I)`.
+This ensures that the space ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}`` has some unique canonical order in the direct sum decomposition, such that two different but equal instances created independently always match.
+
+If `IteratorSize(values(I)) isa Union{HasLength, HasShape}`, the degeneracy dimensions `n_a` are stored for all sectors `a ∈ values(I)` (also if `n_a == 0`) in a tuple, more specifically a `NTuple{N, Int}` with `N = length(values(I))`.
+The methods `getindex(values(I), i)` and `findindex(values(I), a)` are used to map between a sector `a ∈ values(I)` and a corresponding index `i ∈ 1:N`.
+As `N` is a compile time constant, these types can be created in a type stable manner.
+Note however that this implies that for large values of `N`, it can be beneficial to define `IteratorSize(values(a)) = SizeUnknown()` to not overly burden the compiler.
+
+## Constructing instances
+
+As mentioned, the convenience method `Vect[I]` will return the concrete type `GradedSpace{I, D}` with the matching value of `D`, so that should never be a user's concern.
+In fact, for consistency, `Vect[Trivial]` will just return `ComplexSpace`, which is not even a specific type of `GradedSpace`.
+For the specific case of group irreps as sectors, one can use `Rep[G]` with `G` the group, as inspired by the categorical name ``\mathbf{Rep}_{\mathsf{G}}``.
+Some illustrations:
+
+```@repl gradedspaces
+Vect[Trivial]
+Vect[U1Irrep]
+Vect[Irrep[U₁]]
+Rep[U₁]
+Rep[ℤ₂ × SU₂]
+Vect[Irrep[ℤ₂ × SU₂]]
+```
+
+Note that we also have the specific alias `U₁Space`.
+In fact, for all the common groups we have a number of aliases, both in ASCII and using Unicode:
+
+```julia
+# ASCII type aliases
+const ZNSpace{N} = GradedSpace{ZNIrrep{N}, NTuple{N,Int}}
+const Z2Space = ZNSpace{2}
+const Z3Space = ZNSpace{3}
+const Z4Space = ZNSpace{4}
+const U1Space = Rep[U₁]
+const CU1Space = Rep[CU₁]
+const SU2Space = Rep[SU₂]
+
+# Unicode alternatives
+const ℤ₂Space = Z2Space
+const ℤ₃Space = Z3Space
+const ℤ₄Space = Z4Space
+const U₁Space = U1Space
+const CU₁Space = CU1Space
+const SU₂Space = SU2Space
+```
+
+To create specific instances of those types, one can e.g. just use `V = GradedSpace(a => n_a, b => n_b, c => n_c)` or `V = GradedSpace(iterator)` where `iterator` is any iterator (e.g. a dictionary or a generator) that yields `Pair{I, Int}` instances.
+With those constructions, `I` is inferred from the type of sectors.
+However, it is often more convenient to specify the sector type explicitly (using one of the many alias provided), since then the sectors are automatically converted to the correct type.
+Thereto, one can use `Vect[I]`, or when `I` corresponds to the irreducible representations of a group, `Rep[G]`.
+Some examples:
+
+```@repl gradedspaces
+Vect[Irrep[U₁]](0 => 3, 1 => 2, -1 => 1) ==
+ GradedSpace(U1Irrep(0) => 3, U1Irrep(1) => 2, U1Irrep(-1) => 1) ==
+ U1Space(0 => 3, 1 => 2, -1 => 1)
+```
+The fact that `Rep[G]` also works with product groups makes it easy to specify e.g.
+```@repl gradedspaces
+Rep[ℤ₂ × SU₂]((0, 0) => 3, (1, 1/2) => 2, (0, 1) => 1) ==
+ GradedSpace((Z2Irrep(0) ⊠ SU2Irrep(0)) => 3, (Z2Irrep(1) ⊠ SU2Irrep(1/2)) => 2, (Z2Irrep(0) ⊠ SU2Irrep(1)) => 1)
+```
+
+## Methods
+
+There are a number of methods to work with instances `V` of `GradedSpace`.
+The function [`sectortype`](@ref) returns the type of the sector labels.
+It also works on other vector spaces, in which case it returns [`Trivial`](@ref).
+The function [`sectors`](@ref) returns an iterator over the different sectors `a` with non-zero `n_a`, for other `ElementarySpace` types it returns `(Trivial,)`.
+The degeneracy dimensions `n_a` can be extracted as `dim(V, a)`, it properly returns `0` if sector `a` is not present in the decomposition of `V`.
+With [`hassector(V, a)`](@ref) one can check if `V` contains a sector `a` with `dim(V, a) > 0`.
+Finally, `dim(V)` returns the total dimension of the space `V`, i.e. ``∑_a n_a d_a`` or thus `dim(V) = sum(dim(V, a) * dim(a) for a in sectors(V))`.
+Note that a representation space `V` has certain sectors `a` with dimensions `n_a`, then its dual `V'` will report to have sectors `dual(a)`, and `dim(V', dual(a)) == n_a`.
+There is a subtelty regarding the difference between the dual of a representation space ``R_a^*``, on which the conjugate representation acts, and the representation space of the irrep `dual(a) == conj(a)` that is isomorphic to the conjugate representation, i.e. ``R_{\overline{a}} ≂ R_a^*`` but they are not equal.
+We return to this in the section on [fusion trees](@ref ss_fusiontrees).
+This is true also in more general fusion categories beyond the representation categories of groups.
+
+Other methods for `ElementarySpace`, such as [`dual`](@ref), [`fuse`](@ref) and [`flip`](@ref) also work.
+In fact, `GradedSpace` is the reason `flip` exists, because in this case it is different than `dual`.
+The existence of flip originates from the non-trivial isomorphism between ``R_{\overline{a}}`` and ``R_{a}^*``, i.e. the representation space of the dual ``\overline{a}`` of sector ``a`` and the dual of the representation space of sector ``a``.
+In order for `flip(V)` to be isomorphic to `V`, it is such that, if `V = GradedSpace(a=>n_a,...)` then `flip(V) = dual(GradedSpace(dual(a)=>n_a,....))`.
+
+Furthermore, for two spaces `V1 = GradedSpace(a => n1_a, ...)` and `V2 = GradedSpace(a => n2_a, ...)`, we have `infimum(V1, V2) = GradedSpace(a => min(n1_a, n2_a), ....)` and similarly for `supremum`, i.e. they act on the degeneracy dimensions of every sector separately.
+Therefore, it can be that the return value of `infimum(V1, V2)` or `supremum(V1, V2)` is neither equal to `V1` or `V2`.
+
+For `W` a `ProductSpace{Vect[I], N}`, [`sectors(W)`](@ref) returns an iterator that generates all possible combinations of sectors `as` represented as `NTuple{I, N}`.
+The function [`dims(W, as)`](@ref) returns the corresponding tuple with degeneracy dimensions, while [`dim(W, as)`](@ref) returns the product of these dimensions.
+[`hassector(W, as)`](@ref) is equivalent to `dim(W, as) > 0`.
+Finally, there is the function [`blocksectors(W)`](@ref) which returns a list (of type `Vector`) with all possible "block sectors" or total/coupled sectors that can result from fusing the individual uncoupled sectors in `W`.
+Correspondingly, [`blockdim(W, a)`](@ref) counts the total degeneracy dimension of the coupled sector `a` in `W`.
+The machinery for computing this is the topic of the next section on [Fusion trees](@ref ss_fusiontrees), but first, it's time for some examples.
+
+## Examples
+
+Let's start with an example involving ``\mathsf{U}_1``:
+```@repl gradedspaces
+V1 = Rep[U₁](0=>3, 1=>2, -1=>1)
+V1 == U1Space(0=>3, 1=>2, -1=>1) == U₁Space(-1=>1, 1=>2,0=>3) # order doesn't matter
+(sectors(V1)...,)
+dim(V1, U1Irrep(1))
+dim(V1', Irrep[U₁](1)) == dim(V1, conj(U1Irrep(1))) == dim(V1, U1Irrep(-1))
+hassector(V1, Irrep[U₁](1))
+hassector(V1, Irrep[U₁](2))
+dual(V1)
+flip(V1)
+dual(V1) ≅ V1
+flip(V1) ≅ V1
+V2 = U1Space(0=>2, 1=>1, -1=>1, 2=>1, -2=>1)
+infimum(V1, V2)
+supremum(V1, V2)
+⊕(V1,V2)
+W = ⊗(V1,V2)
+collect(sectors(W))
+dims(W, (Irrep[U₁](0), Irrep[U₁](0)))
+dim(W, (Irrep[U₁](0), Irrep[U₁](0)))
+hassector(W, (Irrep[U₁](0), Irrep[U₁](0)))
+hassector(W, (Irrep[U₁](2), Irrep[U₁](0)))
+fuse(W)
+(blocksectors(W)...,)
+blockdim(W, Irrep[U₁](0))
+```
+and then with ``\mathsf{SU}_2``:
+```@repl gradedspaces
+V1 = Vect[Irrep[SU₂]](0=>3, 1//2=>2, 1=>1)
+V1 == SU2Space(0=>3, 1/2=>2, 1=>1) == SU₂Space(0=>3, 0.5=>2, 1=>1)
+(sectors(V1)...,)
+dim(V1, SU2Irrep(1))
+dim(V1', SU2Irrep(1)) == dim(V1, conj(SU2Irrep(1))) == dim(V1, Irrep[SU₂](1))
+dim(V1)
+hassector(V1, Irrep[SU₂](1))
+hassector(V1, Irrep[SU₂](2))
+dual(V1)
+flip(V1)
+V2 = SU2Space(0=>2, 1//2=>1, 1=>1, 3//2=>1, 2=>1)
+infimum(V1, V2)
+supremum(V1, V2)
+⊕(V1,V2)
+W = ⊗(V1,V2)
+collect(sectors(W))
+dims(W, (Irrep[SU₂](0), Irrep[SU₂](0)))
+dim(W, (Irrep[SU₂](0), Irrep[SU₂](0)))
+hassector(W, (SU2Irrep(0), SU2Irrep(0)))
+hassector(W, (SU2Irrep(2), SU2Irrep(0)))
+fuse(W)
+(blocksectors(W)...,)
+blockdim(W, SU2Irrep(0))
+```
diff --git a/docs/src/man/intro.md b/docs/src/man/intro.md
index 00232fb3c..40f31fa8e 100644
--- a/docs/src/man/intro.md
+++ b/docs/src/man/intro.md
@@ -1,152 +1,99 @@
# [Introduction](@id s_intro)
-Before providing a typical "user guide" and discussing the implementation of TensorKit.jl
-on the next pages, let us discuss some of the rationale behind this package.
+Before providing a typical *user guide* and discussing the implementation of TensorKit.jl on the next pages, let us discuss some of the rationale behind this package.
## [What is a tensor?](@id ss_whatistensor)
-At the very start we should ponder about the most suitable and sufficiently general
-definition of a tensor. A good starting point is the following:
-
-* A tensor ``t`` is an element from the
- [tensor product](https://en.wikipedia.org/wiki/Tensor_product) of ``N`` vector spaces
- ``V_1 , V_2, …, V_N``, where ``N`` is referred to as the *rank* or *order* of the
- tensor, i.e.
-
- ``t ∈ V_1 ⊗ V_2 ⊗ … ⊗ V_N.``
-
-If you think of a tensor as an object with indices, a rank ``N`` tensor has ``N`` indices
-where every index is associated with the corresponding vector space in that it labels a
-particular basis in that space. We will return to index notation at the very end of this
-manual.
-
-As the tensor product of vector spaces is itself a vector space, this implies that a tensor
-behaves as a vector, i.e. tensors from the same tensor product space can be added and
-multiplied by scalars. The tensor product is only defined for vector spaces over the same
-field of scalars, e.g. there is no meaning in ``ℝ^5 ⊗ ℂ^3``. When all the vector spaces in
-the tensor product have an inner product, this also implies an inner product for the tensor
-product space. It is hence clear that the different vector spaces in the tensor product
-should have some form of homogeneity in their structure, yet they do not need to be all
-equal and can e.g. have different dimensions. It goes without saying that defining the
-vector spaces and their properties will be an important part of the definition of a tensor.
-As a consequence, this also constitutes a significant part of the implementation, and is
-discussed in the section on [Vector spaces](@ref s_spaces).
-
-Aside from the interpretation of a tensor as a vector, we also want to interpret it as a
-matrix (or more correctly, a linear map) in order to decompose tensors using linear algebra
-factorisations (e.g. eigenvalue or singular value decomposition). Henceforth, we use the
-term "tensor map" as follows:
-
-* A tensor map ``t`` is a linear map from a source or *domain*
- ``W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2}`` to a target or *codomain* ``V_1 ⊗ V_2 ⊗ … ⊗ V_{N_1}``, i.e.
-
- ``t:W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2} → V_1 ⊗ V_2 ⊗ … ⊗ V_{N_1}.``
-
-A *tensor* of rank ``N`` is then just a special case of a tensor map with ``N_1 = N`` and
-``N_2 = 0``. A contraction between two tensors is just a composition of linear maps (i.e.
-matrix multiplication), where the contracted indices correspond to the domain of the first
-tensor and the codomain of the second tensor.
-
-In order to allow for arbitrary tensor contractions or decompositions, we need to be able to
-reorganise which vector spaces appear in the domain and the codomain of the tensor map, and
-in which order. This amounts to defining canonical isomorphisms between the different ways
-to order and partition the tensor indices (i.e. the vector spaces). For example, a linear
-map ``W → V`` is often denoted as a rank 2 tensor in ``V ⊗ W^*``, where ``W^*`` corresponds
-to the dual space of ``W``. This simple example introduces two new concepts.
-
-1. Typical vector spaces can appear in the domain and codomain in different related forms,
- e.g. as normal space or dual space. In fact, the most generic case is that every vector
- space ``V`` has associated with it
- a [dual space](https://en.wikipedia.org/wiki/Dual_space) ``V^*``,
- a [conjugate space](https://en.wikipedia.org/wiki/Complex_conjugate_vector_space)
- ``\overline{V}`` and a conjugate dual space ``\overline{V}^*``. The four different
- vector spaces ``V``, ``V^*``, ``\overline{V}`` and ``\overline{V}^*`` correspond to the
- representation spaces of respectively the fundamental, dual or contragredient, complex
- conjugate and dual complex conjugate representation of the general linear group
- ``\mathsf{GL}(V)``. In index notation these spaces are denoted with respectively
- contravariant (upper), covariant (lower), dotted contravariant and dotted covariant
- indices.
-
- For real vector spaces, the conjugate (dual) space is identical to the normal (dual)
- space and we only have upper and lower indices, i.e. this is the setting of e.g.
- general relativity. For (complex) vector spaces with a sesquilinear inner product
- ``\overline{V} ⊗ V → ℂ``, the inner product allows to define an isomorphism from the
- conjugate space to the dual space (known as
- [Riesz representation theorem](https://en.wikipedia.org/wiki/Riesz_representation_theorem)
- in the more general context of Hilbert spaces).
-
- In particular, in spaces with a Euclidean inner product (the setting of e.g. quantum
- mechanics), the conjugate and dual space are naturally isomorphic (because the dual and
- conjugate representation of the unitary group are the same). Again we only need upper
- and lower indices (or kets and bras).
-
- Finally, in ``ℝ^d`` with a Euclidean inner product, these four different spaces are all
- equivalent and we only need one type of index. The space is completely characterized by
- its dimension ``d``. This is the setting of much of classical mechanics and we refer to
- such tensors as cartesian tensors and the corresponding space as cartesian space. These
- are the tensors that can equally well be represented as multidimensional arrays (i.e.
- using some `AbstractArray{<:Real,N}` in Julia) without loss of structure.
+At the very start we should ponder about the most suitable and sufficiently general definition of a tensor.
+A good starting point is the following:
+
+* A tensor ``t`` is an element from the [tensor product](https://en.wikipedia.org/wiki/Tensor_product) of ``N`` vector spaces ``V_1 , V_2, …, V_N``, where ``N`` is referred to as the *rank* or *order* of the tensor, i.e.
+
+```math
+t \in V_1 ⊗ V_2 ⊗ … ⊗ V_N.
+```
+
+If you think of a tensor as an object with indices, a rank ``N`` tensor has ``N`` indices where every index is associated with the corresponding vector space in that it labels a particular basis in that space.
+We will return to index notation at the very end of this manual.
+
+As the tensor product of vector spaces is itself a vector space, this implies that a tensor behaves as a vector, i.e. tensors from the same tensor product space can be added and multiplied by scalars.
+The tensor product is only defined for vector spaces over the same field of scalars, e.g. there is no meaning in ``ℝ^5 ⊗ ℂ^3``.
+When all the vector spaces in the tensor product have an inner product, this also implies an inner product for the tensor product space.
+It is hence clear that the different vector spaces in the tensor product should have some form of homogeneity in their structure, yet they do not need to be all equal and can e.g. have different dimensions.
+It goes without saying that defining the vector spaces and their properties will be an important part of the definition of a tensor.
+As a consequence, this also constitutes a significant part of the implementation, and is discussed in the section on [Vector spaces](@ref s_spaces).
+
+Aside from the interpretation of a tensor as a vector, we also want to interpret it as a matrix (or more correctly, a linear map) in order to decompose tensors using linear algebra factorisations (e.g. eigenvalue or singular value decomposition).
+Henceforth, we use the term "tensor map" as follows:
+
+* A tensor map ``t`` is a linear map from a source or *domain* ``W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2}`` to a target or *codomain* ``V_1 ⊗ V_2 ⊗ … ⊗ V_{N_1}``, i.e.
+```math
+t : W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2} → V_1 ⊗ V_2 ⊗ … ⊗ V_{N_1}.
+```
+
+A *tensor* of rank ``N`` is then just a special case of a tensor map with ``N_1 = N`` and ``N_2 = 0``.
+A contraction between two tensors is just a composition of linear maps (i.e. matrix multiplication), where the contracted indices correspond to the domain of the first tensor and the codomain of the second tensor.
+
+In order to allow for arbitrary tensor contractions or decompositions, we need to be able to reorganise which vector spaces appear in the domain and the codomain of the tensor map, and in which order.
+This amounts to defining canonical isomorphisms between the different ways to order and partition the tensor indices (i.e. the vector spaces).
+For example, a linear map ``W → V`` is often denoted as a rank two tensor in ``V ⊗ W^*``, where ``W^*`` corresponds to the dual space of ``W``.
+This simple example introduces two new concepts.
+
+1. Typical vector spaces can appear in the domain and codomain in different related forms, e.g. as normal spaces or dual spaces.
+ In fact, the most generic case is that every vector space ``V`` has associated with it a [dual space](https://en.wikipedia.org/wiki/Dual_space) ``V^*``, a [conjugate space](https://en.wikipedia.org/wiki/Complex_conjugate_vector_space) ``\overline{V}`` and a conjugate dual space ``\overline{V}^*``.
+ The four different vector spaces ``V``, ``V^*``, ``\overline{V}`` and ``\overline{V}^*`` correspond to the representation spaces of respectively the fundamental, dual or contragredient, complex conjugate and dual complex conjugate representation of the general linear group ``\mathsf{GL}(V)``.
+ In index notation these spaces are denoted with respectively contravariant (upper), covariant (lower), dotted contravariant and dotted covariant indices.
+
+ For real vector spaces, the conjugate (dual) space is identical to the normal (dual) space and we only have upper and lower indices, i.e. this is the setting of e.g. general relativity.
+ For (complex) vector spaces with a sesquilinear inner product ``\overline{V} ⊗ V → ℂ``, the inner product allows to define an isomorphism from the conjugate space to the dual space (known as [Riesz representation theorem](https://en.wikipedia.org/wiki/Riesz_representation_theorem) in the more general context of Hilbert spaces).
+
+ In particular, in spaces with a Euclidean inner product (the setting of e.g. quantum mechanics), the conjugate and dual space are naturally isomorphic (because the dual and conjugate representation of the unitary group are the same).
+ Again we only need upper and lower indices (or kets and bras).
+
+ Finally, in ``ℝ^d`` with a Euclidean inner product, these four different spaces are all equivalent and we only need one type of index.
+ The space is completely characterized by its dimension ``d``.
+ This is the setting of much of classical mechanics and we refer to such tensors as cartesian tensors and the corresponding space as cartesian space.
+ These are the tensors that can equally well be represented as multidimensional arrays (i.e. using some `AbstractArray{<:Real, N}` in Julia) without loss of structure.
The implementation of all of this is discussed in [Vector spaces](@ref s_spaces).
-2. In the generic case, the identification between maps ``W → V`` and tensors in
- ``V ⊗ W^*`` is not an equivalence but an isomorphism, which needs to be defined.
- Similarly, there is an isomorphism between between ``V ⊗ W`` and ``W ⊗ V`` that can be
- non-trivial (e.g. in the case of fermions / super vector spaces). The correct formalism
- here is provided by theory of monoidal categories, the details of which are explained
- in the appendix. Nonetheless, we try to hide these canonical isomorphisms from the user
- wherever possible, and one does not need to know category theory to be able to use this
- package.
+2. In the generic case, the identification between maps ``W → V`` and tensors in ``V ⊗ W^*`` is not an equivalence but an isomorphism, which needs to be defined.
+ Similarly, there is an isomorphism between between ``V ⊗ W`` and ``W ⊗ V`` that can be non-trivial (e.g. in the case of fermions / super vector spaces).
+ The correct formalism here is provided by theory of monoidal categories, the details of which are explained in the appendix.
+ Nonetheless, we try to hide these canonical isomorphisms from the user wherever possible, and one does not need to know category theory to be able to use this package.
This brings us to our final (yet formal) definition
-* A tensor (map) is a homomorphism between two objects from the category ``\mathbf{Vect}``
- (or some subcategory thereof). In practice, this will be ``\mathbf{FinVect}``, the
- category of finite dimensional vector spaces. More generally even, our concept of a
- tensor makes sense, in principle, for any linear (a.k.a. ``\mathbf{Vect}``-enriched)
- monoidal category. For more details, we refer the curious reader to the appendix on
- "[Monoidal categories and their properties](@ref s_categories)".
+* A tensor (map) is a homomorphism between two objects from the category ``\mathbf{Vect}`` (or some subcategory thereof).
+ In practice, this will be ``\mathbf{FinVect}``, the category of finite dimensional vector spaces.
+ More generally even, our concept of a tensor makes sense, in principle, for any linear (a.k.a. ``\mathbf{Vect}``-enriched) monoidal category.
+ For more details, we refer the curious reader to the appendix on [Monoidal categories and their properties](@ref s_categories).
## [Symmetries and block sparsity](@id ss_symmetries)
-Physical problems often have some symmetry, i.e. the setup is invariant under the action of
-a group ``\mathsf{G}`` which acts on the vector spaces ``V`` in the problem according to a
-certain representation. Having quantum mechanics in mind, TensorKit.jl is so far restricted
-to unitary representations. A general representation space ``V`` can be specified as the
-number of times every irreducible representation (irrep) ``a`` of ``\mathsf{G}`` appears,
-i.e.
-
-``V = \bigoplus_{a} ℂ^{n_a} ⊗ R_a``
-
-with ``R_a`` the space associated with irrep ``a`` of ``\mathsf{G}``, which itself has
-dimension ``d_a`` (often called the quantum dimension), and ``n_a`` the number of times
-this irrep appears in ``V``. If the unitary irrep ``a`` for ``g ∈ \mathsf{G}`` is given by
-``u_a(g)``, then there exists a specific basis for ``V`` such that the group action of
-``\mathsf{G}`` on ``V`` is given by the unitary representation
-
-``u(g) = \bigoplus_{a} 𝟙_{n_a} ⊗ u_a(g)``
-
-with ``𝟙_{n_a}`` the ``n_a × n_a`` identity matrix. The total dimension of ``V`` is given
-by ``∑_a n_a d_a``.
-
-The reason for implementing symmetries is to exploit the computation and memory gains
-resulting from restricting to tensor maps ``t:W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2} → V_1 ⊗ V_2 ⊗ … ⊗
-V_{N_1}`` that are equivariant under the symmetry, i.e. that act as
-[intertwiners](https://en.wikipedia.org/wiki/Equivariant_map#Representation_theory)
-between the symmetry action on the domain and the codomain. Indeed, such tensors should be
-block diagonal because of [Schur's lemma](https://en.wikipedia.org/wiki/Schur%27s_lemma),
-but only after we couple the individual irreps in the spaces ``W_i`` to a joint irrep,
-which is then again split into the individual irreps of the spaces ``V_i``. The basis
-change from the tensor product of irreps in the (co)domain to the joint irrep is implemented
-by a sequence of Clebsch–Gordan coefficients, also known as a fusion (or splitting) tree.
-We implement the necessary machinery to manipulate these fusion trees under index
-permutations and repartitions for arbitrary groups ``\mathsf{G}``. In particular, this fits
-with the formalism of monoidal categories, and more specifically fusion categories,
-and only requires the *topological* data of the group, i.e. the fusion rules of the irreps,
-their quantum dimensions and the F-symbol (6j-symbol or more precisely Racah's W-symbol in
-the case of ``\mathsf{SU}_2``). In particular, we don't actually need the Clebsch–Gordan
-coefficients themselves (but they can be useful for checking purposes).
-
-Hence, a second major part of TensorKit.jl is the interface and implementation for
-specifying symmetries, and further details are provided in
-[Sectors, representation spaces and fusion trees](@ref s_sectorsrepfusion).
+Physical problems often have some symmetry, i.e. the setup is invariant under the action of a group ``\mathsf{G}`` which acts on the vector spaces ``V`` in the problem according to a certain representation.
+Having quantum mechanics in mind, TensorKit.jl is so far restricted to unitary representations.
+A general representation space ``V`` can be specified as the number of times every irreducible representation (irrep) ``a`` of ``\mathsf{G}`` appears, i.e.
+
+```math
+V = \bigoplus_{a} ℂ^{n_a} ⊗ R_a
+```
+
+with ``R_a`` the space associated with irrep ``a`` of ``\mathsf{G}``, which itself has dimension ``d_a`` (often called the quantum dimension), and ``n_a`` the number of times this irrep appears in ``V``.
+If the unitary irrep ``a`` for ``g ∈ \mathsf{G}`` is given by ``u_a(g)``, then there exists a specific basis for ``V`` such that the group action of ``\mathsf{G}`` on ``V`` is given by the unitary representation
+
+```math
+u(g) = \bigoplus_{a} 𝟙_{n_a} ⊗ u_a(g)
+```
+
+with ``𝟙_{n_a}`` the ``n_a × n_a`` identity matrix.
+The total dimension of ``V`` is given by ``∑_a n_a d_a``.
+
+The reason for implementing symmetries is to exploit the computation and memory gains resulting from restricting to tensor maps ``t:W_1 ⊗ W_2 ⊗ … ⊗ W_{N_2} → V_1 ⊗ V_2 ⊗ … ⊗ V_{N_1}`` that are equivariant under the symmetry, i.e. that act as [intertwiners](https://en.wikipedia.org/wiki/Equivariant_map#Representation_theory) between the symmetry action on the domain and the codomain.
+Indeed, such tensors should be block diagonal because of [Schur's lemma](https://en.wikipedia.org/wiki/Schur%27s_lemma), but only after we couple the individual irreps in the spaces ``W_i`` to a joint irrep, which is then again split into the individual irreps of the spaces ``V_i``.
+The basis change from the tensor product of irreps in the (co)domain to the joint irrep is implemented by a sequence of Clebsch–Gordan coefficients, also known as a fusion (or splitting) tree.
+We implement the necessary machinery to manipulate these fusion trees under index permutations and repartitions for arbitrary groups ``\mathsf{G}``.
+In particular, this fits with the formalism of monoidal categories, and more specifically fusion categories, and only requires the *topological* data of the group, i.e. the fusion rules of the irreps, their quantum dimensions and the F-symbol (6j-symbol or more precisely Racah's W-symbol in the case of ``\mathsf{SU}_2``).
+In particular, we don't actually need the Clebsch–Gordan coefficients themselves (but they can be useful for checking purposes).
+
+Hence, a second major part of TensorKit.jl is the interface and implementation for specifying symmetries, and further details are provided in [Sectors, representation spaces and fusion trees](@ref s_sectorsrepfusion).
diff --git a/docs/src/man/sectors.md b/docs/src/man/sectors.md
index 73c1630a0..7d561a8a4 100644
--- a/docs/src/man/sectors.md
+++ b/docs/src/man/sectors.md
@@ -1,352 +1,254 @@
-# [Sectors, graded spaces and fusion trees](@id s_sectorsrepfusion)
+```@meta
+CollapsedDocStrings = true
+```
+
+# [Sectors](@id ss_sectors)
```@setup sectors
using TensorKit
-import LinearAlgebra
+using TensorKit.TensorKitSectors
```
-Symmetries in a physical system often result in tensors which are invariant under the action
-of the symmetry group, where this group acts as a tensor product of group actions on every
-tensor index separately. The group action on a single index, or thus, on the corresponding
-vector space, can be decomposed into irreducible representations (irreps). Here, we
-restrict to unitary representations, such that the corresponding vector spaces also have a
-natural Euclidean inner product. In particular, the Euclidean inner product between two
-vectors is invariant under the group action and thus transforms according to the trivial
-representation of the group.
-
-The corresponding vector spaces will be canonically represented as
-``V = ⨁_a ℂ^{n_a} ⊗ R_{a}``, where ``a`` labels the different irreps, ``n_a`` is the number
-of times irrep ``a`` appears and ``R_a`` is the vector space associated with irrep ``a``.
-Irreps are also known as spin sectors (in the case of ``\mathsf{SU}_2``) or charge sectors
-(in the case of ``\mathsf{U}_1``), and we henceforth refer to ``a`` as a sector. As
-discussed in the section on [categories](@ref s_categories), and briefly summarized below,
-the approach we follow does in fact go beyond the case of irreps of groups, and sectors
-would more generally correspond to simple objects in a unitary ribbon fusion category.
-Nonetheless, every step can be appreciated by using the representation theory of
-``\mathsf{SU}_2`` or ``\mathsf{SU}_3`` as example. For practical reasons, we assume that
-there is a canonical order of the sectors, so that the vector space ``V`` is
-completely specified by the values of ``n_a``.
-
-The gain in efficiency (both in memory occupation and computation time) obtained from using
-(technically: equivariant) tensor maps is that, by Schur's lemma, they are block diagonal in
-the basis of coupled sectors. To exploit this block diagonal form, it is however essential
-that we know the basis transformation from the individual (uncoupled) sectors appearing in
-the tensor product form of the domain and codomain, to the totally coupled sectors that
-label the different blocks. We refer to the latter as block sectors. The transformation from
-the uncoupled sectors in the domain (or codomain) of the tensor map to the block sector is
-encoded in a fusion tree (or splitting tree). Essentially, it is a sequential application of
-pairwise fusion as described by the group's
-[Clebsch–Gordan (CG) coefficients](https://en.wikipedia.org/wiki/Clebsch–Gordan_coefficients).
-However, it turns out that we do not need the actual CG coefficients, but only how they
-transform under transformations such as interchanging the order of the incoming irreps or
-interchanging incoming and outgoing irreps. This information is known as the topological
-data of the group, i.e. mainly the F-symbols, which are also known as recoupling
-coefficients or [6j-symbols](https://en.wikipedia.org/wiki/6-j_symbol) (more accurately, the
-F-symbol is actually
-[Racah's W-coefficients](https://en.wikipedia.org/wiki/Racah_W-coefficient) in the case of
-``\mathsf{SU}_2``).
-
-Below, we describe how to specify a certain type of sector and what information about them
-needs to be implemented. Then, we describe how to build a space ``V`` composed of a direct
-sum of different sectors. In the third section, we explain the details of fusion trees, i.e.
-their construction and manipulation. Finally, we elaborate on the case of general fusion
-categories and the possibility of having fermionic or anyonic twists. But first, we provide
-a quick theoretical overview of the required data of the representation theory of a group.
-We refer to the section on [categories](@ref s_categories), and in particular the
-subsection on [topological data of a unitary fusion category](@ref ss_topologicalfusion),
-for further details.
-
-## [Representation theory and unitary fusion categories](@id ss_representationtheory)
-
-Let the different irreps or sectors be labeled as ``a``, ``b``, ``c``, … First and foremost,
-we need to specify the *fusion rules* ``a ⊗ b = ⨁ N^{ab}_{c} c`` with ``N^{ab}_{c}`` some
-non-negative integers. There should always exists a unique trivial sector ``u`` (called the
-identity object ``I`` or ``1`` in the language of categories) such that
-``a ⊗ u = a = u ⊗ a``. Furthermore, there should exist a unique sector ``\bar{a}``
-such that ``N^{a\bar{a}}_{u} = 1``, whereas for all ``b \neq \bar{a}``,
-``N^{ab}_{u} = 0``. For unitary irreps of groups, ``\bar{a}`` corresponds to the
-complex conjugate of the representation ``a``, or a representation isomorphic to it. For
-example, for the representations of ``\mathsf{SU}_2``, the trivial sector corresponds to
-spin zero and all irreps are self-dual (i.e. ``a = \bar{a}``), meaning that the
-conjugate representation is isomorphic to the non-conjugated one (they are however not
-equal but related by a similarity transform).
-
-The meaning of the fusion rules is that the space of transformations ``R_a ⊗ R_b → R_c``
-(or vice versa) has dimension ``N^{ab}_c``. In particular, we assume the existence of a
-basis consisting of unitary tensor maps ``X^{ab}_{c,μ} : R_c → R_a ⊗ R_b`` with
-``μ = 1, …, N^{ab}_c`` such that
-
-``(X^{ab}_{c,μ})^† X^{ab}_{c,ν} = δ_{μ,ν} \mathrm{id}_{R_c}``
-
-and
-
-``\sum_{c} \sum_{μ = 1}^{N^{ab}_c} X^{ab}_{c,μ} (X^{ab}_{c,μ})^\dagger = \mathrm{id}_{R_a ⊗ R_b}``
-
-The tensors ``X^{ab}_{c,μ}`` are the splitting tensors, their hermitian conjugate are the
-fusion tensors. They are only determined up to a unitary basis transform within the space,
-i.e. acting on the multiplicity label ``μ = 1, …, N^{ab}_c``. For ``\mathsf{SU}_2``, where
-``N^{ab}_c`` is zero or one and the multiplicity labels are absent, the entries of
-``X^{ab}_{c,μ}`` are precisely given by the CG coefficients. The point is that we do not
-need to know the tensors ``X^{ab}_{c,μ}`` explicitly, but only the topological data of
-(the representation category of) the group, which describes the following transformation:
-
-* F-move or recoupling: the transformation between ``(a ⊗ b) ⊗ c`` to ``a ⊗ (b ⊗ c)``:
-
- ``(X^{ab}_{e,μ} ⊗ \mathrm{id}_c) ∘ X^{ec}_{d,ν} = ∑_{f,κ,λ} [F^{abc}_{d}]_{e,μν}^{f,κλ} (\mathrm{id}_a ⊗ X^{bc}_{f,κ}) ∘ X^{af}_{d,λ}``
-
-* [Braiding](@ref ss_braiding) or permuting as defined by
- ``τ_{a,b}: R_a ⊗ R_b → R_b ⊗ R_a``:
- ``τ_{R_a,R_b} ∘ X^{ab}_{c,μ} = ∑_{ν} [R^{ab}_c]^ν_μ X^{ba}_{c,ν}``
-
-The dimensions of the spaces ``R_a`` on which representation ``a`` acts are denoted as
-``d_a`` and referred to as quantum dimensions. In particular ``d_u = 1`` and
-``d_a = d_{\bar{a}}``. This information is also encoded in the F-symbol as
-``d_a = | [F^{a \bar{a} a}_a]^u_u |^{-1}``. Note that there are no multiplicity labels
-in that particular F-symbol as ``N^{a\bar{a}}_u = 1``.
-
-There is a graphical representation associated with the fusion tensors and their
-manipulations, which we summarize here:
-
-```@raw html
-
+The first ingredient in order to define and construct symmetric tensors, is a framework to define symmetry sectors and their assocated fusion rules and topological data.
+[TensorKitSectors.jl](https://github.com/QuantumKitHub/TensorKitSectors.jl) defines an abstract supertype `Sector` that all sectors will be subtypes of
+
+```@docs; canonical=false
+Sector
```
-As always, we refer to the subsection on
-[topological data of a unitary fusion category](@ref ss_topologicalfusion) for further
-details.
+Any concrete subtype of `Sector` should be such that its instances represent a consistent set of sectors, corresponding to the irreps of some group, or, more generally, the simple objects of a (unitary) fusion category.
+Throughout TensorKit.jl, the method [`sectortype`](@ref) can be used to query the subtype of `Sector` associated with a particular object, i.e. a vector space, fusion tree, tensor map, or a sector.
+It works on both instances and in the type domain, and its use will be illustrated further on.
+
+## [Minimal sector interface](@id ss_sectorinterface)
-Finally, for the implementation, it will be useful to distinguish between a number of
-different possibilities regarding the fusion rules. If, for every ``a`` and ``b``, there is
-a unique ``c`` such that ``a ⊗ b = c`` (i.e. ``N^{ab}_{c} = 1`` and ``N^{ab}_{c′} = 0`` for
-all other ``c′``), the category is abelian. Indeed, the representations of a group have this
-property if and only if the group multiplication law is commutative. In that case, all
-spaces ``R_{a}`` associated with the representation are one-dimensional and thus trivial. In
-all other cases, the category is non-abelian. We find it useful to further distinguish
-between categories which have all ``N^{ab}_c`` equal to zero or one (such that no
-multiplicity labels are needed), e.g. the representations of ``\mathsf{SU}_2``, and those
-where some ``N^{ab}_c`` are larger than one, e.g. the representations of ``\mathsf{SU}_3``.
+The minimal data to completely specify a type of sector closely matches the [topological data](@ref ss_topologicalfusion) of a [fusion category](@ref ss_fusion) as reviewed in the appendix on [category theory](@ref s_categories), and is given by:
-## [Sectors](@id ss_sectors)
+* The fusion rules, i.e. `` a ⊗ b = ⨁ N^{ab}_{c} c ``, implemented as the function [`Nsymbol(a, b, c)`](@ref).
+* The list of fusion outputs from ``a ⊗ b``; while this information is contained in ``N^{ab}_c``, it might be costly or impossible to iterate over all possible values of `c` and test `Nsymbol(a,b,c)`; instead we require for [`a ⊗ b`](@ref), or equivalently, `otimes(a, b)`, to return an iterable object (e.g. tuple or array, but see [below](@ref ss_sectoradditionaltools) for a dedicated iterator struct) that generates all *unique* `c` for which ``N^{ab}_c ≠ 0`` (so only once for all ``c`` with ``N^{ab}_c ≥ 1``).
+* The identity object `u`, such that ``a ⊗ u = a = u ⊗ a``, implemented as the function [`unit(a)`](@ref) (and also in type domain), but `one(a)` from Julia Base also works as an alias to `unit(a)`.
+* The dual or conjugate object ``\overline{a}`` for which ``N^{a\bar{a}}_{u} = 1``, implemented as the function [`dual(a)`](@ref).
+ Because we restrict to unitary categories, `conj(a)` from the Julia `Base` library is also defined as an alias to `dual(a)`.
+* The F-symbol or recoupling coefficients ``[F^{abc}_{d}]^f_e``; implemented as the function [`Fsymbol(a, b, c, d, e, f)`](@ref).
+* If the category is braided (see below), the R-symbol ``R^{ab}_c``; implemented as the function [`Rsymbol(a, b, c)`](@ref).
+
+Furthermore, sectors should provide information about the structure of their fusion rules.
+For irreps of Abelian groups, we have that for every ``a`` and ``b``, there exists a unique ``c`` such that ``a ⊗ b = c``, i.
+. there is only a single fusion channel.
+This follows simply from the fact that all irreps are one-dimensional.
+In all other cases, there is at least one pair of (``a``, ``b``) exists such that ``a ⊗ b`` has multiple fusion outputs.
+This is often referred to as non-abelian fusion, and is the case for the irreps of a non-abelian group or some more general fusion category.
+We however still distinguish between the case where all entries of ``N^{ab}_c ≦ 1``, i.e. they are zero or one.
+In that case, ``[F^{abc}_{d}]^f_e`` and ``R^{ab}_c`` are scalars.
+If some ``N^{ab}_c > 1``, it means that the same sector ``c`` can appear more than once in the fusion product of ``a`` and ``b``, and we need to introduce some multiplicity label ``μ`` for the different copies, and ``[F^{abc}_{d}]^f_e`` and ``R^{ab}_c`` are respectively four- and two-dimensional arrays labelled by these multiplicity indices.
+To encode these different possibilities, we define a Holy-trait called [`FusionStyle`](@ref), i.e. a type hierarchy
-We introduce a new abstract type to represent different possible sectors
```julia
-abstract type Sector end
+abstract type FusionStyle end
+struct UniqueFusion <: FusionStyle end # unique fusion output when fusing two sectors
+abstract type MultipleFusion <: FusionStyle end
+struct SimpleFusion <: MultipleFusion end # multiple fusion but multiplicity free
+struct GenericFusion <: MultipleFusion end # multiple fusion with multiplicities
+const MultiplicityFreeFusion = Union{UniqueFusion, SimpleFusion}
```
-Any concrete subtype of `Sector` should be such that its instances represent a consistent
-set of sectors, corresponding to the irreps of some group, or, more generally, the simple
-objects of a (unitary) fusion category, as reviewed in the subsections on
-[fusion categories](@ref ss_fusion) and their [topological data](@ref ss_topologicalfusion)
-within the introduction to [category theory](@ref s_categories). Throughout TensorKit.jl,
-the method `sectortype` can be used to query the subtype of `Sector` associated with a
-particular object, i.e. a vector space, fusion tree, tensor map, or a sector. It works on
-both instances and in the type domain, and its use will be illustrated further on.
-
-The minimal data to completely specify a type of sector are
-* the fusion rules, i.e. `` a ⊗ b = ⨁ N^{ab}_{c} c ``; this is implemented by a function
- [`Nsymbol(a, b, c)`](@ref)
-* the list of fusion outputs from ``a ⊗ b``; while this information is contained in
- ``N^{ab}_c``, it might be costly or impossible to iterate over all possible values of
- `c` and test `Nsymbol(a,b,c)`; instead we implement for `a ⊗ b` to return an iterable
- object (e.g. tuple, array or a custom Julia type that listens to `Base.iterate`) and
- which generates all `c` for which ``N^{ab}_c ≠ 0`` (just once even if ``N^{ab}_c>1``)
-* the identity object `u`, such that ``a ⊗ u = a = u ⊗ a``; this is implemented by the
- function `one(a)` (and also in type domain) from Julia Base
-* the dual or conjugate representation ``\overline{a}`` for which
- ``N^{a\bar{a}}_{u} = 1``; this is implemented by `conj(a)` from Julia Base;
- `dual(a)` also works as alias, but `conj(a)` is the method that should be defined
-* the F-symbol or recoupling coefficients ``[F^{abc}_{d}]^f_e``, implemented as the
- function [`Fsymbol(a, b, c, d, e, f)`](@ref)
-* the R-symbol ``R^{ab}_c``, implemented as the function [`Rsymbol(a, b, c)`](@ref)
-For practical reasons, we also require some additional methods to be defined:
-* `isreal(::Type{<:Sector})` returns whether the topological data of this type of sector
- is real-valued or not (in which case it is complex-valued). Note that this does not
- necessarily require that the representation itself, or the Clebsch-Gordan coefficients,
- are real. There is a fallback implementation that checks whether the F-symbol and
- R-symbol evaluated with all sectors equal to the identity sector have real `eltype`.
-* `hash(a, h)` creates a hash of sectors, because sectors and objects created from them
- are used as keys in lookup tables (i.e. dictionaries)
-* `isless(a, b)` associates a canonical order to sectors (of the same type), in order to
- unambiguously represent representation spaces ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}``.
-
-Further information, such as the quantum dimensions ``d_a`` and Frobenius-Schur indicator
-``χ_a`` (only if ``a == \overline{a}``) are encoded in the F-symbol. They are obtained as
-[`dim(a)`](@ref) and [`frobenius_schur_phase(a)`](@ref). These functions have default definitions
-which extract the requested data from `Fsymbol(a, conj(a), a, a, one(a), one(a))`, but they
-can be overloaded in case the value can be computed more efficiently.
-
-We also define a parametric type to represent an indexable iterator over the different
-values of a sector as
+New sector types `I <: Sector` should then indicate which fusion style they have by defining `FusionStyle(::Type{I})`.
+
+In a similar manner, it is useful to distinguish between the structure and the different styles of the braiding of a sector type.
+Remember that for group representations, braiding acts as swapping or permuting the vector spaces involved.
+By definition, applying this operation twice leads us back to the original situation.
+If that is the case, the braiding is said to be symmetric.
+For more general fusion categories, associated with the physics of anyonic particles, this is generally not the case.
+Some categories do not even support a braiding rule, as this requires at least that ``a ⊗ b`` and ``b ⊗ a`` have the same fusion outputs for every ``a`` and ``b``.
+When braiding is possible, it might not be symmetric, and as a result, permutations of tensor indices are not unambiguously defined.
+The correct description is in terms of the braid group.
+This will be discussed in more detail below.
+Fermions are somewhat in between, as their braiding is symmetric, but they have a non-trivial *twist*.
+We thereto define a new trait [`BraidingStyle`](@ref) with associated the type hierarchy
+
```julia
-struct SectorValues{I<:Sector} end
-Base.IteratorEltype(::Type{<:SectorValues}) = HasEltype()
-Base.eltype(::Type{SectorValues{I}}) where {I<:Sector} = I
-Base.values(::Type{I}) where {I<:Sector} = SectorValues{I}()
+abstract type HasBraiding <: BraidingStyle end
+struct NoBraiding <: BraidingStyle end
+abstract type SymmetricBraiding <: HasBraiding end # symmetric braiding => actions of permutation group are well defined
+struct Bosonic <: SymmetricBraiding end # all twists are one
+struct Fermionic <: SymmetricBraiding end # twists one and minus one
+struct Anyonic <: HasBraiding end
```
-Note that an instance of the singleton type `SectorValues{I}` is obtained as `values(I)`.
-A new sector `I<:Sector` should define
+
+New sector types `I <: Sector` should then indicate which fusion style they have by defining `BraidingStyle(::Type{I})`.
+
+Note that `Bosonic()` braiding does not mean that all permutations are trivial and ``R^{ab}_c = 1``, but that ``R^{ab}_c R^{ba}_c = 1``.
+For example, for the irreps of ``\mathsf{SU}_2``, the R-symbol associated with the fusion of two spin-1/2 particles to spin zero is ``-1``, i.e. the singlet of two spin-1/2 particles is antisymmetric under swapping the two constituents.
+For a `Bosonic()` braiding style, all twists are simply ``+1``. The case of fermions and anyons are discussed below.
+
+For practical reasons, we also require some additional methods to be defined:
+* `hash(a, h)` creates a hash of sectors, because sectors and objects created from them are used as keys in lookup tables (i.e. dictionaries).
+ Julia provides a default implementation of `hash` for every new type, but it can be useful to overload it for efficiency, or to ensure that the same hash is obtained for different instances that represent the same sector (e.g. when the sector type is not a bitstype).
+* `isless(a, b)` associates a canonical order to sectors (of the same type), in order to unambiguously represent representation spaces ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}``.
+
+Lastly, we sometimes need to iterate over different values of a sector type `I <: Sector`, or at least have some basic information about the number of possible values of `I`
+Hereto, TensorKitSectors.jl defines `Base.values(I::Type{<:Sector})` to return the singleton instance of the parametric type [`SectorValues{I}`](@ref), which should behave as an iterator over all possible values of the sector type `I`.
+This means the following methods should be implemented for a new sector type `I <: Sector`:
+
+* `Base.iterate(::Type{SectorValues{I}} [, state])` should implement the iterator interface so as to enable iterating over all values of the sector `I` according to the canonical order defined by `isless`.
+* `Base.IteratorSize(::Type{SectorValues{I}})` should return `HasLength()` if the number of different values of sector `I` is finite and rather small, and `SizeUnknown()` or `IsInfinite()` otherwise.
+ This is used to encode the degeneracies of the different sectors in a `GradedSpace` object efficiently, as discussed in the next section on [Graded spaces](@ref ss_rep).
+* If `IteratorSize(::Type{SectorValues{I}}) == HasLength()`, then `Base.length(::Type{SectorValues{I}})` should return the number of different values of sector `I`.
+
+Furthermore, the standard definitions `Base.IteratorEltype(::Type{SectorValues{I}}) = HasEltype()` and `Base.eltype(::Type{SectorValues{I}}) = I` are provided by default in TensorKitSectors.jl.
+
+!!! note
+ A recent update in TensorKitSectors.jl has extended the minimal interface to also support multi-fusion categories, for which in particular the unit object is non-simple.
+ We do not discuss this extension here, but refer to the documentation of [`UnitStyle`](@ref), [`leftunit`](@ref), [`rightunit`](@ref) and [`allunits`](@ref) for more details.
+
+## [Additional methods](@id ss_sectoradditional)
+
+The sector interface contains a number of additional methods, that are useful, but whose return value can be computed from the minimal interface defined in the previous subsection.
+However, new sector types can override these default fallbacks with more efficient implementations.
+
+Firstly, the canonical order of sectors allows to enumerate the different values, and thus to associate each value with an integer.
+Hereto, the following methods are defined:
+
+* `Base.getindex(::SectorValues{I}, i::Int)`: returns the sector instance of type `I` that is associated with integer `i`.
+ The fallback implementation simply iterates through `values(I)` up to the `i`th value.
+* `findindex(::SectorValues{I}, c::I)`: reverse mapping that associates an index `i::Integer ∈ 1:length(values(I))` to a given sector `c::I`.
+ The fallback implementation simply searches linearly through the `values(I)` iterator.
+
+Note that `findindex` acts similar to `Base.indexin`, but with the order of the arguments reversed (so that is more similar to `getindex`), and returns an `Int` rather than an `Array{0, Union{Int, Nothing}}`.
+
+Secondly, it is often useful to know the scalar type in which the topological data in the F- and R-symbols are expressed.
+For this, the method [`sectorscalartype(I::Type{<:Sector})`](@ref) is provided, which has a default implementation that uses type inference on the return values of `Fsymbol` and `Rsymbol`.
+This function is also used to define `Base.isreal(I::Type{<:Sector})`, which indicates whether all topological data are real numbers.
+This is important because, if complex numbers appear in the topological data, it means tensor data will necessarily become complex after simple manipulations such as permuting indices, and should therefore probably be stored as complex numbers from the start.
+
+Finally, additional topological data can be extracted from the minimal interface.
+In particular, the quantum dimensions ``d_a`` and Frobenius-Schur phase ``χ_a`` and indicator (only if ``a == \overline{a}``) are encoded in the F-symbol.
+They are obtained as [`dim(a)`](@ref), [`frobenius_schur_phase(a)`](@ref) and [`frobenius_schur_indicator(a)`](@ref).
+These functions have default definitions which compute the requested data from `Fsymbol(a, conj(a), a, a, unit(a), unit(a))`, but they can be overloaded in case the value can be computed more efficiently.
+The same holds for related fusion manipulations such as the B-symbol, which is obtained as [`Bsymbol(a, b, c)`](@ref).
+Finally, the twist associated with a sector `a` is obtained as [`twist(a)`](@ref), which also has a default implementation in terms of the R-symbol.
+In addition, the function `isunit` is provided to facilitate checking whether a sector is a unit sector, in particular for the non-trivial case of the multi-fusion category case, which we do not discuss here.
+
+## [Additional tools](@id ss_sectoradditionaltools)
+
+The fusion product `a ⊗ b` of two sectors `a` and `b` is required to return an iterable object that generates all unique fusion outputs `c` for which ``N^{ab}_c ≥ 0``.
+When this list can easily be computed or constructed, it can be returned as a tuple or an array.
+However, when taking type stability and (memory) efficiency into account, it is often preferable to return a lazy iterator object that generates the different fusion outputs on the fly.
+Indeed, a tuple result is only type stable when the number of fusion outputs is constant for all possible inputs `a` and `b`, whereas a `Vector` result requires heap allocation.
+
+By default, [TensorKitSectors.jl](https://github.com/QuantumKitHub/TensorKitSectors.jl) defines
```julia
-Base.iterate(::SectorValues{I}[, state]) = ...
-Base.IteratorSize(::Type{SectorValues{I}}) = # HasLength() or IsInfinite()
-# if previous function returns HasLength():
-Base.length(::SectorValues{I}) = ...
-Base.getindex(::SectorValues{I}, i::Int) = ...
-findindex(::SectorValues{I}, c::I) = ...
+⊗(a::I, b::I) where {I <: Sector} = SectorProductIterator(a, b)
```
-If the number of values in a sector `I` is finite (i.e.
-`IteratorSize(values(I)) == HasLength()`), the methods `getindex` and `findindex` provide a
-way to map the different sector values from and to the standard range 1, 2, …,
-`length(values(I))`. This is used to efficiently represent `GradedSpace`
-objects for this type of sector, as discussed in the next section on
-[Graded spaces](@ref ss_rep). Note that `findindex` acts similar to `Base.indexin`,
-but with the order of the arguments reversed (so that is more similar to `getindex`), and
-returns an `Int` rather than an `Array{0,Union{Int,Nothing}}`.
-
-It is useful to distinguish between three cases with respect to the fusion rules. For irreps
-of Abelian groups, we have that for every ``a`` and ``b``, there exists a unique ``c`` such
-that ``a ⊗ b = c``, i.e. there is only a single fusion channel. This follows simply from the
-fact that all irreps are one-dimensional. All other cases are referred to as non-abelian,
-i.e. the irreps of a non-abelian group or some more general fusion category. We still
-distinguish between the case where all entries of ``N^{ab}_c ≦ 1``, i.e. they are zero or
-one. In that case, ``[F^{abc}_{d}]^f_e`` and ``R^{ab}_c`` are scalars. If some
-``N^{ab}_c > 1``, it means that the same sector ``c`` can appear more than once in the
-fusion product of ``a`` and ``b``, and we need to introduce some multiplicity label ``μ``
-for the different copies. We implement a "trait" (similar to `IndexStyle` for
-`AbstractArray`s in Julia Base), i.e. a type hierarchy
-```julia
-abstract type FusionStyle end
-struct UniqueFusion <: FusionStyle # unique fusion output when fusion two sectors
-end
-abstract type MultipleFusion <: FusionStyle end
-struct SimpleFusion <: MultipleFusion # multiple fusion but multiplicity free
-end
-struct GenericFusion <: MultipleFusion # multiple fusion with multiplicities
-end
-const MultiplicityFreeFusion = Union{UniqueFusion, SimpleFusion}
+
+where [`TensorKitSectors.SectorProductIterator`](@ref) is defined as
+
+```@docs; canonical=false
+TensorKitSectors.SectorProductIterator
```
-New sector types `I<:Sector` should then indicate which fusion style they have by defining
-`FusionStyle(::Type{I})`.
-
-In a similar manner, it is useful to distinguish between different styles of braiding.
-Remember that for group representations, braiding acts as swapping or permuting the vector
-spaces involved. By definition, applying this operation twice leads us back to the original
-situation. If that is the case, the braiding is said to be symmetric. For more general
-fusion categories, associated with the physics of anyonic particles, this is generally not
-the case and, as a result, permutations of tensor indices are not unambiguously defined.
-The correct description is in terms of the braid group. This will be discussed in more
-detail below. Fermions are somewhat in between, as their braiding is symmetric, but they
-have a non-trivial *twist*. We thereto define a new type hierarchy
+
+and can serve as a general iterator type.
+For defining the fusion rules of a sector `I`, instead of implementing `⊗(::I, ::I)` directly, it is thus possible to instead implement the iterator interface for `SectorProductIterator{I}`, i.e. provide definitions for
+
+* `Base.iterate(::SectorProductIterator{I}[, state])`
+* `Base.IteratorSize(::Type{SectorProductIterator{I}})`
+* `Base.length(::SectorProductIterator{I})` (if applicable)
+
+[TensorKitSectors.jl](https://github.com/QuantumKitHub/TensorKitSectors.jl) already defines
```julia
-abstract type BraidingStyle end # generic braiding
-abstract type SymmetricBraiding <: BraidingStyle end
-struct Bosonic <: SymmetricBraiding end
-struct Fermionic <: SymmetricBraiding end
-struct Anyonic <: BraidingStyle end
+Base.eltype(::Type{SectorProductIterator{I}}) where {I} = I
```
-New sector types `I<:Sector` should then indicate which fusion style they have by defining
-`BraidingStyle(::Type{I})`. Note that `Bosonic()` braiding does not mean that all
-permutations are trivial and ``R^{ab}_c = 1``, but that ``R^{ab}_c R^{ba}_c = 1``. For
-example, for the irreps of ``\mathsf{SU}_2``, the R-symbol associated with the fusion of
-two spin-1/2 particles to spin zero is ``-1``, i.e. the singlet of two spin-1/2 particles
-is antisymmetric. For a `Bosonic()` braiding style, all twists are simply ``+1``. The case
-of fermions and anyons are discussed below.
-
-Before discussing in more detail how a new sector type should be implemented, let us study
-the cases which have already been implemented. Currently, they all correspond to the irreps
-of groups.
-
-### [Existing group representations](@id sss_groups)
-
-The first sector type is called `Trivial`, and corresponds to the case where there is
-actually no symmetry, or thus, the symmetry is the trivial group with only an identity
-operation and a trivial representation. Its representation theory is particularly simple:
+and sets `Base.IteratorEltype(::Type{SectorProductIterator{I}})` accordingly.
+Furthermore, it provides custom pretty printing, so that `SectorProductIterator{I}(a, b)` is displayed as `a ⊗ b`.
+
+## [Group representations](@id ss_groups)
+
+In this subsection, we give an overview of some existing sector types provided by [TensorKitSectors.jl](https://github.com/QuantumKitHub/TensorKitSectors.jl).
+We also discuss the implementation of some of them in more detail, in order to illustrate the interface defined above.
+
+The first sector type is called `Trivial`, and corresponds to the case where there is actually no symmetry, or thus, the symmetry is the trivial group with only an identity operation and a trivial representation.
+Its representation theory is particularly simple:
```julia
-struct Trivial <: Sector
-end
-Base.one(a::Sector) = one(typeof(a))
-Base.one(::Type{Trivial}) = Trivial()
-Base.conj(::Trivial) = Trivial()
+struct Trivial <: Sector end
+
+# basic properties
+unit(::Type{Trivial}) = Trivial()
+dual(::Trivial) = Trivial()
+Base.isless(::Trivial, ::Trivial) = false
+
+# fusion rules
⊗(::Trivial, ::Trivial) = (Trivial(),)
Nsymbol(::Trivial, ::Trivial, ::Trivial) = true
+FusionStyle(::Type{Trivial}) = UniqueFusion()
Fsymbol(::Trivial, ::Trivial, ::Trivial, ::Trivial, ::Trivial, ::Trivial) = 1
+
+# braiding rules
Rsymbol(::Trivial, ::Trivial, ::Trivial) = 1
-Base.isreal(::Type{Trivial}) = true
-FusionStyle(::Type{Trivial}) = UniqueFusion()
BraidingStyle(::Type{Trivial}) = Bosonic()
-```
-The `Trivial` sector type is special cased in the construction of tensors, so that most of
-these definitions are not actually used.
-The most important class of sectors are irreducible representations of groups, for which we
-have an abstract supertype `Irrep{G}` that is parameterized on the type of group `G`. While
-the specific implementations of `Irrep{G}` depend on `G`, one can easily obtain the
-concrete type without knowing its name as `Irrep[G]`.
+# values iterator
+Base.IteratorSize(::Type{SectorValues{Trivial}}) = HasLength()
+Base.length(::SectorValues{Trivial}) = 1
+Base.iterate(::SectorValues{Trivial}, i = false) = return i ? nothing : (Trivial(), true)
+function Base.getindex(::SectorValues{Trivial}, i::Int)
+ return i == 1 ? Trivial() : throw(BoundsError(values(Trivial), i))
+end
+findindex(::SectorValues{Trivial}, c::Trivial) = 1
+```
+The `Trivial` sector type is special cased in the construction of tensors, so that most of these definitions are not actually used.
-A number of groups have been defined, namely
+The most important class of sectors are irreducible representations of groups.
+As we often use the group itself as a type parameter, an associated type hierarchy for groups has been defined, namely
```julia
abstract type Group end
abstract type AbelianGroup <: Group end
-abstract type ℤ{N} <: AbelianGroup end
+abstract type Cyclic{N} <: AbelianGroup end
+abstract type Dihedral{N} <: Group end
abstract type U₁ <: AbelianGroup end
-abstract type SU{N} <: Group end
abstract type CU₁ <: Group end
+const ℤ{N} = Cyclic{N}
const ℤ₂ = ℤ{2}
const ℤ₃ = ℤ{3}
const ℤ₄ = ℤ{4}
+const D₃ = Dihedral{3}
+const D₄ = Dihedral{4}
const SU₂ = SU{2}
```
-Groups themselves are abstract types without any functionality (at least for now). We also
-provide a number of convenient Unicode aliases. These group names are probably self-
-explanatory, except for `CU₁` which is explained below.
+Groups themselves are abstract types without any functionality (at least for now).
+However, as will become clear instantly, it is useful to identify abelian groups, because their representation theory is particularly simple.
+We also provide a number of convenient Unicode aliases.
+These group names are probably self-explanatory, except for `CU₁` which is explained below.
-For all group irreps, the braiding style is bosonic
+Irreps of groups will then be defined as subtypes of the abstract type
```julia
abstract type AbstractIrrep{G<:Group} <: Sector end # irreps have integer quantum dimensions
BraidingStyle(::Type{<:AbstractIrrep}) = Bosonic()
```
-while we gather some more common functionality for irreps of abelian groups (which exhaust
-all possibilities of fusion categories with abelian fusion)
+
+We will need different data structures to represent irreps of different groups, but it would be convenient to easily obtain the relevant structure for a given group `G` in a uniform manner.
+Hereto, we define a singleton type `IrrepTable` with an associated exported constant `Irrep = IrrepTable()` as the only instance.
+When a concrete type for representing the the irreps of a certain group `G` is iplemented, this type can the be "discovered" or obtained as `Irrep[G]`, provided it was registered by defining `Base.getindex(::IrrepTable, ::Type{G})` to return the concrete type.
+
+Furthermore, we combine the more common functionality for irreps of abelian groups
```julia
-const AbelianIrrep{G} = AbstractIrrep{G} where {G<:AbelianGroup}
+const AbelianIrrep{G} = AbstractIrrep{G} where {G <: AbelianGroup}
FusionStyle(::Type{<:AbelianIrrep}) = UniqueFusion()
-Base.isreal(::Type{<:AbelianIrrep}) = true
-
-Nsymbol(a::I, b::I, c::I) where {I<:AbelianIrrep} = c == first(a ⊗ b)
-Fsymbol(a::I, b::I, c::I, d::I, e::I, f::I) where {I<:AbelianIrrep} =
- Int(Nsymbol(a, b, e) * Nsymbol(e, c, d) * Nsymbol(b, c, f) * Nsymbol(a, f, d))
-frobeniusschur(a::AbelianIrrep) = 1
-Bsymbol(a::I, b::I, c::I) where {I<:AbelianIrrep} = Int(Nsymbol(a, b, c))
-Rsymbol(a::I, b::I, c::I) where {I<:AbelianIrrep} = Int(Nsymbol(a, b, c))
-```
+Base.sectorscalartype(::Type{<:AbelianIrrep}) = Int
-With these common definition, we implement the representation theory of the two most common
-Abelian groups, namely ``ℤ_N``
-```julia
-struct ZNIrrep{N} <: AbstractIrrep{ℤ{N}}
- n::Int8
- function ZNIrrep{N}(n::Integer) where {N}
- @assert N < 64
- new{N}(mod(n, N))
- end
+Nsymbol(a::I, b::I, c::I) where {I <: AbelianIrrep} = c == first(a ⊗ b)
+function Fsymbol(a::I, b::I, c::I, d::I, e::I, f::I) where {I <: AbelianIrrep}
+ return Int(Nsymbol(a, b, e) * Nsymbol(e, c, d) * Nsymbol(b, c, f) * Nsymbol(a, f, d))
end
-Base.getindex(::IrrepTable, ::Type{ℤ{N}}) where N = ZNIrrep{N}
-Base.convert(Z::Type{<:ZNIrrep}, n::Real) = Z(n)
-
-Base.one(::Type{ZNIrrep{N}}) where {N} =ZNIrrep{N}(0)
-Base.conj(c::ZNIrrep{N}) where {N} = ZNIrrep{N}(-c.n)
-⊗(c1::ZNIrrep{N}, c2::ZNIrrep{N}) where {N} = (ZNIrrep{N}(c1.n+c2.n),)
-
-Base.IteratorSize(::Type{SectorValues{ZNIrrep{N}}}) where N = HasLength()
-Base.length(::SectorValues{ZNIrrep{N}}) where N = N
-Base.iterate(::SectorValues{ZNIrrep{N}}, i = 0) where N =
- return i == N ? nothing : (ZNIrrep{N}(i), i+1)
-Base.getindex(::SectorValues{ZNIrrep{N}}, i::Int) where N =
- 1 <= i <= N ? ZNIrrep{N}(i-1) : throw(BoundsError(values(ZNIrrep{N}), i))
-findindex(::SectorValues{ZNIrrep{N}}, c::ZNIrrep{N}) where N = c.n + 1
+frobenius_schur_phase(a::AbelianIrrep) = 1
+Asymbol(a::I, b::I, c::I) where {I <: AbelianIrrep} = Int(Nsymbol(a, b, c))
+Bsymbol(a::I, b::I, c::I) where {I <: AbelianIrrep} = Int(Nsymbol(a, b, c))
+Rsymbol(a::I, b::I, c::I) where {I <: AbelianIrrep} = Int(Nsymbol(a, b, c))
```
-and ``\mathsf{U}_1``
+
+With these common definition in place, we implement the representation theory of the most common Abelian groups, starting with ``\mathsf{U}_1``, the full implementation of which is given by
+
```julia
struct U1Irrep <: AbstractIrrep{U₁}
charge::HalfInt
@@ -354,114 +256,170 @@ end
Base.getindex(::IrrepTable, ::Type{U₁}) = U1Irrep
Base.convert(::Type{U1Irrep}, c::Real) = U1Irrep(c)
-Base.one(::Type{U1Irrep}) = U1Irrep(0)
-Base.conj(c::U1Irrep) = U1Irrep(-c.charge)
-⊗(c1::U1Irrep, c2::U1Irrep) = (U1Irrep(c1.charge+c2.charge),)
+# basic properties
+charge(c::U1Irrep) = c.charge
+unit(::Type{U1Irrep}) = U1Irrep(0)
+dual(c::U1Irrep) = U1Irrep(-charge(c))
+@inline function Base.isless(c1::U1Irrep, c2::U1Irrep)
+ return isless(abs(charge(c1)), abs(charge(c2))) || zero(HalfInt) < charge(c1) == -charge(c2)
+end
+
+# fusion rules
+⊗(c1::U1Irrep, c2::U1Irrep) = (U1Irrep(charge(c1) + charge(c2)),)
+# values iterator
Base.IteratorSize(::Type{SectorValues{U1Irrep}}) = IsInfinite()
-Base.iterate(::SectorValues{U1Irrep}, i = 0) =
+function Base.iterate(::SectorValues{U1Irrep}, i::Int = 0)
return i <= 0 ? (U1Irrep(half(i)), (-i + 1)) : (U1Irrep(half(i)), -i)
-# the following are not used and thus not really necessary
+end
function Base.getindex(::SectorValues{U1Irrep}, i::Int)
i < 1 && throw(BoundsError(values(U1Irrep), i))
- return U1Irrep(iseven(i) ? half(i>>1) : -half(i>>1))
+ return U1Irrep(iseven(i) ? half(i >> 1) : -half(i >> 1))
end
-findindex(::SectorValues{U1Irrep}, c::U1Irrep) = (n = twice(c.charge); 2*abs(n)+(n<=0))
+function findindex(::SectorValues{U1Irrep}, c::U1Irrep)
+ return (n = twice(charge(c)); 2 * abs(n) + (n <= 0))
+end
+
+# hashing
+Base.hash(c::U1Irrep, h::UInt) = hash(c.charge, h)
```
-The `getindex` definition just below the type definition provides the mechanism to get the
-concrete type as `Irrep[G]` for a given group `G`. Here, `IrrepTable` is the singleton type
-of which the constant `Irrep` is the only instance. The `Base.convert` definition allows to
-convert real numbers to the corresponding type of sector, and thus to omit the type
-information of the sector whenever this is clear from the context.
-
-In the definition of `U1Irrep`, `HalfInt<:Number` is a Julia type defined in
-[HalfIntegers.jl](https://github.com/sostock/HalfIntegers.jl), which is also used for
-`SU2Irrep` below, that stores integer or half integer numbers using twice their value.
-Strictly speaking, the linear representations of `U₁` can only have integer charges, and
-fractional charges lead to a projective representation. It can be useful to allow half
-integers in order to describe spin 1/2 systems with an axis rotation symmetry. As a user,
-you should not worry about the details of `HalfInt`, and additional methods for
-automatic conversion and pretty printing are provided, as illustrated by the following
-example
+
+A few comments are in order: The `getindex` definition just below the type definition provides the mechanism to obtain `U1Irrep` as `Irrep[U₁]`, as discussed above.
+The `Base.convert` definition, while not required by the minimal sector interface, allows to convert real numbers to the corresponding type of sector, and thus to omit the type information of the sector whenever this is clear from the context.
+The `charge` function is again not part of the minimal sector interface, and is specific to `U1Irrep` (and `ZNIrrep` discussed next), as a mere convenience function to access the charge value.
+Finally, in the definition of `U1Irrep`, `HalfInt <: Number` is a Julia type defined in [HalfIntegers.jl](https://github.com/sostock/HalfIntegers.jl), which is also used for `SU2Irrep` below, that stores integer or half integer numbers using twice their value.
+Strictly speaking, the linear representations of `U₁` can only have integer charges, and fractional charges lead to a projective representation.
+It can be useful to allow half integers in order to describe spin 1/2 systems with an axis rotation symmetry.
+As a user, you should not worry about the details of `HalfInt` and additional methods for automatic conversion and pretty printing are provided, as illustrated by the following example
+
```@repl sectors
Irrep[U₁](0.5)
U1Irrep(0.4)
U1Irrep(1) ⊗ Irrep[U₁](1//2)
u = first(U1Irrep(1) ⊗ Irrep[U₁](1//2))
-Nsymbol(u, conj(u), one(u))
+Nsymbol(u, dual(u), unit(u))
```
-For `ZNIrrep{N}`, we use an `Int8` for compact storage, assuming that this type will not be
-used with `N>64` (we need `2*(N-1) <= 127` in order for `a ⊗ b` to work correctly). We also
-define some aliases for the first (and most commonly used `ℤ{N}` irreps)
+
+We similarly implement the irreps of the finite cyclic groups ``\mathbb{Z}_N``, where we distinguish between small and large values of `N` to optimize storage.
+The implementation is given by
+
+```julia
+const SMALL_ZN_CUTOFF = (typemax(UInt8) + 1) ÷ 2
+struct ZNIrrep{N} <: AbstractIrrep{ℤ{N}}
+ n::UInt8
+ function ZNIrrep{N}(n::Integer) where {N}
+ N ≤ SMALL_ZN_CUTOFF || throw(DomainError(N, "N exceeds the maximal value, use `LargeZNIrrep` instead"))
+ return new{N}(UInt8(mod(n, N)))
+ end
+end
+struct LargeZNIrrep{N} <: AbstractIrrep{ℤ{N}}
+ n::UInt
+ function LargeZNIrrep{N}(n::Integer) where {N}
+ N ≤ (typemax(UInt) ÷ 2) || throw(DomainError(N, "N exceeds the maximal value"))
+ return new{N}(UInt(mod(n, N)))
+ end
+
+end
+Base.getindex(::IrrepTable, ::Type{ℤ{N}}) where {N} = N ≤ SMALL_ZN_CUTOFF ? ZNIrrep{N} : LargeZNIrrep{N}
+...
+```
+and continues along simular lines of the `U1Irrep` implementation above, by replacing the arithmetic with modulo `N` arithmetic.
+
+The storage benefits for small `N` are not only due to a smaller integer type in the sector itself, but emerges as a result of the following distinction in the iterator size:
+```julia
+Base.IteratorSize(::Type{SectorValues{<:ZNIrrep}}) = HasLength()
+Base.IteratorSize(::Type{SectorValues{<:LargeZNIrrep}}) = SizeUnknown()
+```
+As a result, the `GradedSpace` implementation (see next section on [Graded spaces](@ref ss_rep)) to store general direct sum objects ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}`` will use a very different internal representation for those two cases.
+
+We furthermore define some aliases for the first (and most commonly used `ℤ{N}` irreps)
```julia
const Z2Irrep = ZNIrrep{2}
const Z3Irrep = ZNIrrep{3}
const Z4Irrep = ZNIrrep{4}
```
-so that we can do
+which we can illustrate via
```@repl sectors
z = Z3Irrep(1)
ZNIrrep{3}(1) ⊗ Irrep[ℤ₃](1)
-conj(z)
-one(z)
+dual(z)
+unit(z)
```
-As a further remark, even in the abelian case where `a ⊗ b` is equivalent to a single new
-label `c`, we return it as an iterable container, in this case a one-element tuple `(c,)`.
+As a final remark on the irreps of abelian groups, note that even though `a ⊗ b` is equivalent to a single new label `c`, we return this result as an iterable container, in this case a one-element tuple `(c,)`.
-As mentioned above, we also provide the following definitions
+The first example of irreps of a non-abelian group is that of ``\mathsf{SU}_2``, the implementation of which is summarized by
```julia
-Base.hash(c::ZNIrrep{N}, h::UInt) where {N} = hash(c.n, h)
-Base.isless(c1::ZNIrrep{N}, c2::ZNIrrep{N}) where {N} = isless(c1.n, c2.n)
-Base.hash(c::U1Irrep, h::UInt) = hash(c.charge, h)
-Base.isless(c1::U1Irrep, c2::U1Irrep) where {N} =
- isless(abs(c1.charge), abs(c2.charge)) || zero(HalfInt) < c1.charge == -c2.charge
-```
-Since sectors or objects made out of tuples of sectors (see the section on
-[Fusion Trees](@ref ss_fusiontrees) below) are often used as keys in look-up tables (i.e.
-subtypes of `AbstractDictionary` in Julia), it is important that they can be hashed
-efficiently. We just hash the sectors above based on their numerical value. Note that
-hashes will only be used to compare sectors of the same type. The `isless` function
-provides a canonical order for sectors of a given type `G<:Sector`, which is useful to
-uniquely and unambiguously specify a representation space ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}``, as
-described in the section on [Graded spaces](@ref ss_rep) below.
-
-The first example of a non-abelian representation category is that of ``\mathsf{SU}_2``, the
-implementation of which is summarized by
-```julia
-struct SU2Irrep <: AbstractIrrep{SU{2}}
+struct SU2Irrep <: AbstractIrrep{SU₂}
j::HalfInt
+ function SU2Irrep(j)
+ j >= zero(j) || error("Not a valid SU₂ irrep")
+ return new(j)
+ end
+end
+Base.getindex(::IrrepTable, ::Type{SU₂}) = SU2Irrep
+Base.convert(::Type{SU2Irrep}, j::Real) = SU2Irrep(j)
+
+# basic properties
+const _su2one = SU2Irrep(zero(HalfInt))
+unit(::Type{SU2Irrep}) = _su2one
+dual(s::SU2Irrep) = s
+dim(s::SU2Irrep) = twice(s.j) + 1
+Base.isless(s1::SU2Irrep, s2::SU2Irrep) = isless(s1.j, s2.j)
+
+# fusion product iterator
+const SU2IrrepProdIterator = SectorProductIterator{SU2Irrep}
+Base.IteratorSize(::Type{SU2IrrepProdIterator}) = Base.HasLength()
+Base.length(it::SU2IrrepProdIterator) = length(abs(it.a.j - it.b.j):(it.a.j + it.b.j))
+function Base.iterate(it::SU2IrrepProdIterator, state = abs(it.a.j - it.b.j))
+ return state > (it.a.j + it.b.j) ? nothing : (SU2Irrep(state), state + 1)
end
-Base.one(::Type{SU2Irrep}) = SU2Irrep(zero(HalfInt))
-Base.conj(s::SU2Irrep) = s
-⊗(s1::SU2Irrep, s2::SU2Irrep) = SectorSet{SU2Irrep}(abs(s1.j-s2.j):(s1.j+s2.j))
-dim(s::SU2Irrep) = twice(s.j)+1
+# fusion and braidingdata
FusionStyle(::Type{SU2Irrep}) = SimpleFusion()
-Base.isreal(::Type{SU2Irrep}) = true
+sectorscalartype(::Type{SU2Irrep}) = Float64
+
Nsymbol(sa::SU2Irrep, sb::SU2Irrep, sc::SU2Irrep) = WignerSymbols.δ(sa.j, sb.j, sc.j)
-Fsymbol(s1::SU2Irrep, s2::SU2Irrep, s3::SU2Irrep,
- s4::SU2Irrep, s5::SU2Irrep, s6::SU2Irrep) =
- WignerSymbols.racahW(s1.j, s2.j, s4.j, s3.j, s5.j, s6.j)*sqrt(dim(s5)*dim(s6))
+function Fsymbol(
+ s1::SU2Irrep, s2::SU2Irrep, s3::SU2Irrep,
+ s4::SU2Irrep, s5::SU2Irrep, s6::SU2Irrep
+ )
+ if all(==(_su2one), (s1, s2, s3, s4, s5, s6))
+ return 1.0
+ else
+ return sqrtdim(s5) * sqrtdim(s6) *
+ WignerSymbols.racahW(
+ sectorscalartype(SU2Irrep), s1.j, s2.j, s4.j, s3.j,
+ s5.j, s6.j
+ )
+ end
+end
function Rsymbol(sa::SU2Irrep, sb::SU2Irrep, sc::SU2Irrep)
- Nsymbol(sa, sb, sc) || return 0.
- iseven(convert(Int, sa.j+sb.j-sc.j)) ? 1.0 : -1.0
+ Nsymbol(sa, sb, sc) || return zero(sectorscalartype(SU2Irrep))
+ return iseven(convert(Int, sa.j + sb.j - sc.j)) ? one(sectorscalartype(SU2Irrep)) :
+ -one(sectorscalartype(SU2Irrep))
end
+# values iterator
Base.IteratorSize(::Type{SectorValues{SU2Irrep}}) = IsInfinite()
-Base.iterate(::SectorValues{SU2Irrep}, i = 0) = (SU2Irrep(half(i)), i+1)
-# unused and not really necessary:
-Base.getindex(::SectorValues{SU2Irrep}, i::Int) =
- 1 <= i ? SU2Irrep(half(i-1)) : throw(BoundsError(values(SU2Irrep), i))
-findindex(::SectorValues{SU2Irrep}, s::SU2Irrep) = twice(s.j)+1
+Base.iterate(::SectorValues{SU2Irrep}, i::Int = 0) = (SU2Irrep(half(i)), i + 1)
+function Base.getindex(::SectorValues{SU2Irrep}, i::Int)
+ return 1 <= i ? SU2Irrep(half(i - 1)) : throw(BoundsError(values(SU2Irrep), i))
+end
+findindex(::SectorValues{SU2Irrep}, s::SU2Irrep) = twice(s.j) + 1
+
+# hashing
+Base.hash(s::SU2Irrep, h::UInt) = hash(s.j, h)
```
-and some methods for pretty printing and converting from real numbers to irrep labels. As
-one can notice, the topological data (i.e. `Nsymbol` and `Fsymbol`) are provided by the
-package [WignerSymbols.jl](https://github.com/Jutho/WignerSymbols.jl). The iterable `a ⊗ b`
-is a custom type, that the user does not need to care about. Some examples
+and some methods for pretty printing and converting from real numbers to irrep labels.
+Here, the fusion rules are implemented lazily using the `SectorProductIterator` defined above.
+Furthermore, the topological data (i.e. `Nsymbol` and `Fsymbol`) are provided by the package [WignerSymbols.jl](https://github.com/Jutho/WignerSymbols.jl).
+Note that, while WignerSymbols.jl is able to generate the required data in arbitrary precision, we have explicitly restricted the scalar type of `SU2Irrep` to `Float64` for efficiency.
+
+The following example illustrates the usage of `SU2Irrep`
```@repl sectors
s = SU2Irrep(3//2)
-conj(s)
+dual(s)
dim(s)
collect(s ⊗ s)
for s2 in s ⊗ s
@@ -471,18 +429,13 @@ for s2 in s ⊗ s
end
```
-A final non-abelian representation theory is that of the semidirect product
-``\mathsf{U}₁ ⋉ ℤ_2``, where in the context of quantum systems, this occurs in the case of
-systems with particle hole symmetry and the non-trivial element of ``ℤ_2`` acts as charge
-conjugation ``C``. It has the effect of interchaning ``\mathsf{U}_1`` irreps ``n`` and
-``-n``, and turns them together in a joint 2-dimensional index, except for the case
-``n=0``. Irreps are therefore labeled by integers ``n ≧ 0``, however for ``n=0`` the ``ℤ₂``
-symmetry can be realized trivially or non-trivially, resulting in an even and odd one-
-dimensional irrep with ``\mathsf{U})_1`` charge ``0``. Given
-``\mathsf{U}_1 ≂ \mathsf{SO}_2``, this group is also simply known as ``\mathsf{O}_2``, and
-the two representations with `` n = 0`` are the scalar and pseudo-scalar, respectively.
-However, because we also allow for half integer representations, we refer to it as
-`Irrep[CU₁]` or `CU1Irrep` in full.
+Other non-abelian groups for which the irreps are implemented are the dihedral groups ``\mathsf{D}_N``, the alternating group of order four ``\mathsf{A}_4`` and the semidirect product ``\mathsf{U}₁ ⋉ ℤ_2``.
+In the context of quantum systems, the latter occurs in the case of systems with particle hole symmetry and the non-trivial element of ``ℤ_2`` acts as charge conjugation ``C``.
+It has the effect of interchanging ``\mathsf{U}_1`` irreps ``n`` and ``-n``, and turns them together in a joint two-dimensional index, except for the case ``n=0``.
+Irreps are therefore labeled by integers ``n ≧ 0``, however for ``n=0`` the ``ℤ₂`` symmetry can be realized trivially or non-trivially, resulting in an even and odd one-dimensional irrep with ``\mathsf{U}_1`` charge ``0``.
+Given ``\mathsf{U}_1 ≂ \mathsf{SO}_2``, this group is also simply known as ``\mathsf{O}_2``, and the two representations with `` n = 0`` are the scalar and pseudo-scalar, respectively.
+However, because we also allow for half integer representations, we refer to it as `Irrep[CU₁]` or `CU1Irrep` in full.
+
```julia
struct CU1Irrep <: AbstractIrrep{CU₁}
j::HalfInt # value of the U1 charge
@@ -499,67 +452,61 @@ struct CU1Irrep <: AbstractIrrep{CU₁}
end
end
-Base.one(::Type{CU1Irrep}) = CU1Irrep(zero(HalfInt), 0)
-Base.conj(c::CU1Irrep) = c
+unit(::Type{CU1Irrep}) = CU1Irrep(zero(HalfInt), 0)
+dual(c::CU1Irrep) = c
dim(c::CU1Irrep) = ifelse(c.j == zero(HalfInt), 1, 2)
FusionStyle(::Type{CU1Irrep}) = SimpleFusion()
...
```
-The rest of the implementation can be read in the source code, but is rather long due to all
-the different cases for the arguments of `Fsymbol`.
+The rest of the implementation can be read in the source code, but is rather long due to all the different cases for the arguments of `Fsymbol`.
+For the dihedrial groups ``\mathsf{D}_N``, which can be intepreted as the semidirect product ``\mathbb{Z}_N ⋉ ℤ_2``, the representation theory is obtained quite similarly, and is implmented as the type [`DNIrrep{N}`](@ref).
-By default, no sectors are included with `FusionStyle(G) == GenericFusion()`, though an
-example would be the representation theory of ``\mathsf{SU}_N``, i.e. represented by the
-group `SU{N}`, for `N>2`. Such sectors are supported through
-[SUNRepresentations.jl](https://github.com/QuantumKitHub/SUNRepresentations.jl), which
-provides numerical routines to compute the topological data of the representation theory of
-these groups, as no general analytic formula is available.
+Of the aforementioned groups, only ``\mathsf{A}_4`` has a representation theory for which `FusionStyle(I) == GenericFusion()`, i.e. where fusion mulitplicities are required.
+Another example where this does appear is for the irreps of `SU{N}` for ``N > 2``.
+Such sectors are supported through [SUNRepresentations.jl](https://github.com/QuantumKitHub/SUNRepresentations.jl), which implements numerical routines to compute the topological data of the representation theory of these groups, as no general analytic formula is available.
+## [Combining different sectors](@id ss_productsectors)
-### [Combining different sectors](@id sss_productsectors)
+It is also possible to combine two or more different types of symmetry sectors, e.g. when the total symmetry group is a direct product of individual simple groups.
+Such combined sectors are obtained using the binary operator `⊠`, which can be entered as `\boxtimes`+TAB.
+The resulting type is called [`ProductSector`](@ref), which simply wraps the individual sectors, but knows how to combine their fusion and braiding data correctly.
+First some examples
-It is also possible to define two or more different types of symmetries, e.g. when the total
-symmetry group is a direct product of individual simple groups. Such sectors are obtained
-using the binary operator `⊠`, which can be entered as `\boxtimes`+TAB. First some examples
```@repl sectors
a = Z3Irrep(1) ⊠ Irrep[U₁](1)
typeof(a)
-conj(a)
-one(a)
+dual(a)
+unit(a)
dim(a)
collect(a ⊗ a)
FusionStyle(a)
b = Irrep[ℤ₃](1) ⊠ Irrep[SU₂](3//2)
typeof(b)
-conj(b)
-one(b)
+dual(b)
+unit(b)
dim(b)
collect(b ⊗ b)
FusionStyle(b)
c = Irrep[SU₂](1) ⊠ SU2Irrep(3//2)
typeof(c)
-conj(c)
-one(c)
+dual(c)
+unit(c)
dim(c)
collect(c ⊗ c)
FusionStyle(c)
```
We refer to the source file of [`ProductSector`](@ref) for implementation details.
-The symbol `⊠` refers to the
-[Deligne tensor product](https://ncatlab.org/nlab/show/Deligne+tensor+product+of+abelian+categories)
-within the literature on category theory. Indeed, the category of representation of a
-product group `G₁ × G₂` corresponds to the Deligne tensor product of the categories of
-representations of the two groups separately. But this definition also extends to 𝕜-linear
-categories which are not the representation category of a group. Note that `⊠` also works
-in the type domain, i.e. `Irrep[ℤ₃] ⊠ Irrep[CU₁]` can be used to create
-`ProductSector{Tuple{Irrep[ℤ₃], Irrep[CU₁]}}`. Instances of this type can be constructed by
-giving a number of arguments, where the first argument is used to construct the first
-sector, and so forth. Furthermore, for representations of groups, we also enabled the
-notation `Irrep[ℤ₃ × CU₁]`, with `×` obtained using `\times+TAB`. However, this is merely
-for convience; as `Irrep[ℤ₃] ⊠ Irrep[CU₁]` is not a subtype of the abstract type
-`AbstractIrrep{ℤ₃ × CU₁}`. That behavior cannot be obtained with the Julia's type system.
+The symbol `⊠` refers to the [Deligne tensor product](https://ncatlab.org/nlab/show/Deligne+tensor+product+of+abelian+categories) within the literature on category theory.
+Indeed, the category of representation of a product group `G₁ × G₂` corresponds to the Deligne tensor product of the categories of representations of the two groups separately.
+But this definition also extends to other categories which are not associated with the representation theory of a group, as discussed below.
+Note that `⊠` also works in the type domain, i.e. `Irrep[ℤ₃] ⊠ Irrep[CU₁]` can be used to create `ProductSector{Tuple{Irrep[ℤ₃], Irrep[CU₁]}}`.
+Instances of this type can be constructed by giving a number of arguments, where the first argument is used to construct the first sector, and so forth.
+Furthermore, for representations of groups, we also enabled the notation `Irrep[ℤ₃ × CU₁]`, with `×` obtained using `\times+TAB`.
+However, this is merely for convenience, as `Irrep[ℤ₃] ⊠ Irrep[CU₁]` is not a subtype of the abstract type `AbstractIrrep{ℤ₃ × CU₁}`.
+As is often the case with the Julia type system, the purpose of subtyping `AbstractIrrep` was to share common functionality and thereby simplify the implementation of irreps of the different groups discussed above, but not to express a mathematical hierarchy.
+
Some more examples:
```@repl sectors
a = Z3Irrep(1) ⊠ Irrep[CU₁](1.5)
@@ -569,735 +516,148 @@ a isa AbstractIrrep{ℤ₃ × CU₁}
a == Irrep[ℤ₃ × CU₁](1, 1.5)
```
-### [Defining a new type of sector](@id sss_newsectors)
+## [Defining a new type of sector](@id ss_newsectors)
+
+By now, it should be clear how to implement a new `Sector` subtype.
+Ideally, a new `I <: Sector` type is a `struct I ... end` (immutable) that has `isbitstype(I) == true` (see Julia's manual), and implements the following minimal set of methods
-By now, it should be clear how to implement a new `Sector` subtype. Ideally, a new
-`I<:Sector` type is a `struct I ... end` (immutable) that has `isbitstype(I) == true` (see
-Julia's manual), and implements the following minimal set of methods
```julia
-Base.one(::Type{I}) = I(...)
-Base.conj(a::I) = I(...)
-Base.isreal(::Type{I}) = ... # true or false
-TensorKit.FusionStyle(::Type{I}) = ... # UniqueFusion(), SimpleFusion(), GenericFusion()
-TensorKit.BraidingStyle(::Type{I}) = ... # Bosonic(), Fermionic(), Anyonic()
-TensorKit.Nsymbol(a::I, b::I, c::I) = ...
- # Bool or Integer if FusionStyle(I) == GenericFusion()
-Base.:⊗(a::I, b::I) = ... # some iterable object that generates all possible fusion outputs
-TensorKit.Fsymbol(a::I, b::I, c::I, d::I, e::I, f::I)
-TensorKit.Rsymbol(a::I, b::I, c::I)
-Base.hash(a::I, h::UInt)
+TensorKit.unit(::Type{I}) = I(...)
+TensorKit.dual(a::I) = I(...)
Base.isless(a::I, b::I)
+
+TensorKit.FusionStyle(::Type{I}) = ... # UniqueFusion(), SimpleFusion(), GenericFusion()
+TensorKit.Nsymbol(a::I, b::I, c::I) = ... # Bool or Integer if FusionStyle(I) == GenericFusion()
+
+TensorKit.:⊗(a::I, b::I) = ... # some iterable object that generates all possible fusion outputs
+# or
+Base.iterate(::SectorProductIterator{I}[, state]) = ...
+Base.IteratorSize(::Type{SectorProductIterator{I}}) = ... # HasLength() or IsInfinite()
+Base.length(::SectorProductIterator{I}) = ... # if previous function returns HasLength()
+
+TensorKit.Fsymbol(a::I, b::I, c::I, d::I, e::I, f::I) = ...
+
+TensorKit.BraidingStyle(::Type{I}) = ... # NoBraiding(), Bosonic(), Fermionic(), Anyonic()
+TensorKit.Rsymbol(a::I, b::I, c::I) = ... # only if BraidingStyle(I) != NoBraiding()
+
Base.iterate(::TensorKit.SectorValues{I}[, state]) = ...
Base.IteratorSize(::Type{TensorKit.SectorValues{I}}) = ... # HasLenght() or IsInfinite()
# if previous function returns HasLength():
Base.length(::TensorKit.SectorValues{I}) = ...
+# optional, but recommended if IteratorSize returns HasLength():
Base.getindex(::TensorKit.SectorValues{I}, i::Int) = ...
TensorKit.findindex(::TensorKit.SectorValues{I}, c::I) = ...
+
+Base.hash(a::I, h::UInt)
```
Additionally, suitable definitions can be given for
```julia
+TensorKit.sectorscalartype(::Type{I}) = ... # Int, Float64, ComplexF64, ...
TensorKit.dim(a::I) = ...
-TensorKit.frobeniusschur(a::I) = ...
+TensorKit.frobeniusschur_phase(a::I) = ...
TensorKit.Bsymbol(a::I, b::I, c::I) = ...
```
-Out of these, we have not yet encountered the Frobenius-Schur indicator and the B-symbol.
-They were both defined in the section on
-[topological data of fusion categories](@ref ss_topologicalfusion) and are fully determined
-by the F-symbol, just like the quantum dimensions. Hence, there is a default implementation
-for each of these three functions that just relies on `Fsymbol`, and alternative
-definitions need to be given only if a more efficient version is available.
-
-If `FusionStyle(I) == GenericFusion()`, then the multiple outputs `c` in the tensor
-product of `a` and `b` will be labeled as `i=1`, `2`, …, `Nsymbol(a, b, c)`.
-
-The following type, which already appeared in the implementation of `SU2Irrep` above, can be
-useful for providing the return type of `a ⊗ b`
-```julia
-struct SectorSet{I<:Sector,F,S}
- f::F
- set::S
-end
-...
-function Base.iterate(s::SectorSet{I}, args...) where {I<:Sector}
- next = iterate(s.set, args...)
- next === nothing && return nothing
- val, state = next
- return convert(I, s.f(val)), state
-end
-```
-That is, `SectorSet(f, set)` behaves as an iterator that applies `x->convert(I, f(x))` on
-the elements of `set`; if `f` is not provided it is just taken as the function `identity`.
-
-### [Generalizations](@id sss_generalsectors)
-
-As mentioned before, the framework for sectors outlined above is in one-to-one
-correspondence to the topological data for specifying a unitary (spherical and braided, and
-hence ribbon) [fusion category](https://en.wikipedia.org/wiki/Fusion_category), which was
-reviewed at the end of the introduction to [category theory](@ref s_categories). For such
-categories, the objects are not necessarily vector spaces and the fusion and splitting
-tensors ``X^{ab}_{c,μ}`` do not necessarily exist as actual tensors. However, the morphism
-spaces ``c → a ⊗ b`` still behave as vector spaces, and the ``X^{ab}_{c,μ}`` act as generic
-basis for that space. As TensorKit.jl does not rely on the ``X^{ab}_{c,μ}`` themselves (even
-when they do exist) it can also deal with such general fusion categories. Note, though, that
-when ``X^{ab}_{c,μ}`` does exist, it is available as `fusiontensor(a, b, c, [μ])` (even
-though it is actually the splitting tensor) and can be useful for checking purposes, as
-illustrated below. By default TensorKit includes the Fibonacci category and the Ising
-category, but a list of additional fusion categories is provided in
-[CategoryData.jl](https://github.com/lkdvos/CategoryData.jl).
-
-## [Graded spaces](@id ss_rep)
-
-We have introduced `Sector` subtypes as a way to label the irreps or sectors in the
-decomposition ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}``. To actually represent such spaces, we now also
-introduce a corresponding type `GradedSpace`, which is a subtype of `ElementarySpace`, i.e.
-```julia
-struct GradedSpace{I<:Sector, D} <: ElementarySpace
- dims::D
- dual::Bool
-end
-```
-Here, `D` is a type parameter to denote the data structure used to store the degeneracy or
-multiplicity dimensions ``n_a`` of the different sectors. For conviency, `Vect[I]`
-will return the fully concrete type with `D` specified.
-
-Note that, conventionally, a graded vector space is a space that has a natural direct sum
-decomposition over some set of labels, i.e. ``V = ⨁_{a ∈ I} V_a`` where the label set ``I``
-has the structure of a semigroup ``a ⊗ b = c ∈ I``. Here, we generalize this notation by
-using for ``I`` the fusion ring of a fusion category,
-``a ⊗ b = ⨁_{c ∈ I} ⨁_{μ = 1}^{N_{a,b}^c} c``. However, this is mostly to lower the
-barrier, as really the instances of `GradedSpace` represent just general objects in a
-fusion category (or strictly speaking, a pre-fusion category, as we allow for an
-infinite number of simple objects, e.g. the irreps of a continuous group).
-
-### Implementation details
-
-As mentioned, the way in which the degeneracy dimensions ``n_a`` are stored depends on the
-specific sector type `I`, more specifically on the `IteratorSize` of `values(I)`. If
-`IteratorSize(values(I)) isa Union{IsInfinite, SizeUnknown}`, the different sectors ``a``
-and their corresponding degeneracy ``n_a`` are stored as key value pairs in an
-`Associative` array, i.e. a dictionary `dims::SectorDict`. As the total number of sectors
-in `values(I)` can be infinite, only sectors ``a`` for which ``n_a`` are stored. Here,
-`SectorDict` is a constant type alias for a specific dictionary implementation, which
-currently resorts to `SortedVectorDict` implemented in TensorKit.jl. Hence, the sectors and
-their corresponding dimensions are stored as two matching lists (`Vector` instances), which
-are ordered based on the property `isless(a::I, b::I)`. This ensures that the space
-``V = ⨁_a ℂ^{n_a} ⊗ R_{a}`` has some unique canonical order in the direct sum
-decomposition, such that two different but equal instances created independently always
-match.
-
-If `IteratorSize(values(I)) isa Union{HasLength, HasShape}`, the degeneracy dimensions
-`n_a` are stored for all sectors `a ∈ values(I)` (also if `n_a == 0`) in a tuple, more
-specifically a `NTuple{N, Int}` with `N = length(values(I))`. The methods
-`getindex(values(I), i)` and `findindex(values(I), a)` are used to map between a sector
-`a ∈ values(I)` and a corresponding index `i ∈ 1:N`. As `N` is a compile time constant,
-these types can be created in a type stable manner. Note however that this implies that for
-large values of `N`, it can be beneficial to define
-`IteratorSize(values(a)) = SizeUnknown()` to not overly burden the compiler.
-
-### Constructing instances
-
-As mentioned, the convenience method `Vect[I]` will return the concrete type
-`GradedSpace{I,D}` with the matching value of `D`, so that should never be a user's
-concern. In fact, for consistency, `Vect[Trivial]` will just return `ComplexSpace`,
-which is not even a specific type of `GradedSpace`. For the specific case of group irreps as
-sectors, one can use `Rep[G]` with `G` the group, as inspired by the categorical name
-``\mathbf{Rep}_{\mathsf{G}}``. Some illustrations:
-```@repl sectors
-Vect[Trivial]
-Vect[U1Irrep]
-Vect[Irrep[U₁]]
-Rep[U₁]
-Rep[ℤ₂ × SU₂]
-Vect[Irrep[ℤ₂ × SU₂]]
-```
-Note that we also have the specific alias `U₁Space`. In fact, for all the common groups we
-have a number of aliases, both in ASCII and using Unicode:
-```julia
-# ASCII type aliases
-const ZNSpace{N} = GradedSpace{ZNIrrep{N}, NTuple{N,Int}}
-const Z2Space = ZNSpace{2}
-const Z3Space = ZNSpace{3}
-const Z4Space = ZNSpace{4}
-const U1Space = Rep[U₁]
-const CU1Space = Rep[CU₁]
-const SU2Space = Rep[SU₂]
-
-# Unicode alternatives
-const ℤ₂Space = Z2Space
-const ℤ₃Space = Z3Space
-const ℤ₄Space = Z4Space
-const U₁Space = U1Space
-const CU₁Space = CU1Space
-const SU₂Space = SU2Space
-```
-
-To create specific instances of those types, one can e.g. just use
-`V = GradedSpace(a=>n_a, b=>n_b, c=>n_c)` or `V = GradedSpace(iterator)` where `iterator`
-is any iterator (e.g. a dictionary or a generator) that yields `Pair{I,Int}` instances.
-With those constructions, `I` is inferred from the type of sectors. However, it is often
-more convenient to specify the sector type explicitly (using one of the many alias
-provided), since then the sectors are automatically converted to the correct type. Thereto,
-one can use `Vect[I]`, or when `I` corresponds to the irreducible representations of a group,
-`Rep[G]`. Some examples:
-```@repl sectors
-Vect[Irrep[U₁]](0=>3, 1=>2, -1=>1) ==
- GradedSpace(U1Irrep(0)=>3, U1Irrep(1)=>2, U1Irrep(-1)=>1) ==
- U1Space(0=>3, 1=>2, -1=>1)
-```
-The fact that `Rep[G]` also works with product groups makes it easy to specify e.g.
-```@repl sectors
-Rep[ℤ₂ × SU₂]((0,0) => 3, (1,1/2) => 2, (0,1) => 1) ==
- GradedSpace((Z2Irrep(0) ⊠ SU2Irrep(0)) => 3, (Z2Irrep(1) ⊠ SU2Irrep(1/2)) => 2, (Z2Irrep(0) ⊠ SU2Irrep(1)) => 1)
-```
-
-### Methods
-
-There are a number of methods to work with instances `V` of `GradedSpace`. The
-function [`sectortype`](@ref) returns the type of the sector labels. It also works on other
-vector spaces, in which case it returns [`Trivial`](@ref). The function [`sectors`](@ref)
-returns an iterator over the different sectors `a` with non-zero `n_a`, for other
-`ElementarySpace` types it returns `(Trivial,)`. The degeneracy dimensions `n_a` can be
-extracted as `dim(V, a)`, it properly returns `0` if sector `a` is not present in the
-decomposition of `V`. With [`hassector(V, a)`](@ref) one can check if `V` contains a sector
-`a` with `dim(V, a) > 0`. Finally, `dim(V)` returns the total dimension of the space `V`, i.e.
-``∑_a n_a d_a`` or thus `dim(V) = sum(dim(V, a) * dim(a) for a in sectors(V))`. Note that a
-representation space `V` has certain sectors `a` with dimensions `n_a`, then its dual `V'`
-will report to have sectors `dual(a)`, and `dim(V', dual(a)) == n_a`. There is a subtelty
-regarding the difference between the dual of a representation space ``R_a^*``, on which the
-conjugate representation acts, and the representation space of the irrep
-`dual(a) == conj(a)` that is isomorphic to the conjugate representation, i.e.
-``R_{\overline{a}} ≂ R_a^*`` but they are not equal. We return to this in the section on
-[fusion trees](@ref ss_fusiontrees). This is true also in more general fusion categories
-beyond the representation categories of groups.
-
-Other methods for `ElementarySpace`, such as [`dual`](@ref), [`fuse`](@ref) and
-[`flip`](@ref) also work. In fact, `GradedSpace` is the reason `flip` exists, because
-in this case it is different than `dual`. The existence of flip originates from the
-non-trivial isomorphism between ``R_{\overline{a}}`` and ``R_{a}^*``, i.e. the
-representation space of the dual ``\overline{a}`` of sector ``a`` and the dual of the
-representation space of sector ``a``. In order for `flip(V)` to be isomorphic to `V`, it is
-such that, if `V = GradedSpace(a=>n_a,...)` then
-`flip(V) = dual(GradedSpace(dual(a)=>n_a,....))`.
-
-Furthermore, for two spaces `V1 = GradedSpace(a=>n1_a, ...)` and
-`V2 = GradedSpace(a=>n2_a, ...)`, we have
-`infimum(V1, V2) = GradedSpace(a=>min(n1_a, n2_a), ....)` and similarly for
-`supremum`, i.e. they act on the degeneracy dimensions of every sector separately.
-Therefore, it can be that the return value of `infimum(V1, V2)` or `supremum(V1, V2)` is
-neither equal to `V1` or `V2`.
-
-For `W` a `ProductSpace{Vect[I], N}`, [`sectors(W)`](@ref) returns an
-iterator that generates all possible combinations of sectors `as` represented as
-`NTuple{I,N}`. The function [`dims(W, as)`](@ref) returns the corresponding tuple with
-degeneracy dimensions, while [`dim(W, as)`](@ref) returns the product of these dimensions.
-[`hassector(W, as)`](@ref) is equivalent to `dim(W, as) > 0`. Finally, there is the function
-[`blocksectors(W)`](@ref) which returns a list (of type `Vector`) with all possible "block
-sectors" or total/coupled sectors that can result from fusing the individual uncoupled
-sectors in `W`. Correspondingly, [`blockdim(W, a)`](@ref) counts the total degeneracy
-dimension of the coupled sector `a` in `W`. The machinery for computing this is the topic
-of the next section on [Fusion trees](@ref ss_fusiontrees), but first, it's time for some
-examples.
-
-### Examples
-
-Let's start with an example involving ``\mathsf{U}_1``:
-```@repl sectors
-V1 = Rep[U₁](0=>3, 1=>2, -1=>1)
-V1 == U1Space(0=>3, 1=>2, -1=>1) == U₁Space(-1=>1, 1=>2,0=>3) # order doesn't matter
-(sectors(V1)...,)
-dim(V1, U1Irrep(1))
-dim(V1', Irrep[U₁](1)) == dim(V1, conj(U1Irrep(1))) == dim(V1, U1Irrep(-1))
-hassector(V1, Irrep[U₁](1))
-hassector(V1, Irrep[U₁](2))
-dual(V1)
-flip(V1)
-dual(V1) ≅ V1
-flip(V1) ≅ V1
-V2 = U1Space(0=>2, 1=>1, -1=>1, 2=>1, -2=>1)
-infimum(V1, V2)
-supremum(V1, V2)
-⊕(V1,V2)
-W = ⊗(V1,V2)
-collect(sectors(W))
-dims(W, (Irrep[U₁](0), Irrep[U₁](0)))
-dim(W, (Irrep[U₁](0), Irrep[U₁](0)))
-hassector(W, (Irrep[U₁](0), Irrep[U₁](0)))
-hassector(W, (Irrep[U₁](2), Irrep[U₁](0)))
-fuse(W)
-(blocksectors(W)...,)
-blockdim(W, Irrep[U₁](0))
-```
-and then with ``\mathsf{SU}_2``:
-```@repl sectors
-V1 = Vect[Irrep[SU₂]](0=>3, 1//2=>2, 1=>1)
-V1 == SU2Space(0=>3, 1/2=>2, 1=>1) == SU₂Space(0=>3, 0.5=>2, 1=>1)
-(sectors(V1)...,)
-dim(V1, SU2Irrep(1))
-dim(V1', SU2Irrep(1)) == dim(V1, conj(SU2Irrep(1))) == dim(V1, Irrep[SU₂](1))
-dim(V1)
-hassector(V1, Irrep[SU₂](1))
-hassector(V1, Irrep[SU₂](2))
-dual(V1)
-flip(V1)
-V2 = SU2Space(0=>2, 1//2=>1, 1=>1, 3//2=>1, 2=>1)
-infimum(V1, V2)
-supremum(V1, V2)
-⊕(V1,V2)
-W = ⊗(V1,V2)
-collect(sectors(W))
-dims(W, (Irrep[SU₂](0), Irrep[SU₂](0)))
-dim(W, (Irrep[SU₂](0), Irrep[SU₂](0)))
-hassector(W, (SU2Irrep(0), SU2Irrep(0)))
-hassector(W, (SU2Irrep(2), SU2Irrep(0)))
-fuse(W)
-(blocksectors(W)...,)
-blockdim(W, SU2Irrep(0))
-```
-
-## [Fusion trees](@id ss_fusiontrees)
-
-The gain in efficiency (both in memory occupation and computation time) obtained from using
-symmetric (equivariant) tensor maps is that, by Schur's lemma, they are block diagonal in
-the basis of coupled sectors, i.e. they exhibit block sparsity. To exploit this block
-diagonal form, it is however essential that we know the basis transformation from the
-individual (uncoupled) sectors appearing in the tensor product form of the domain and
-codomain, to the totally coupled sectors that label the different blocks. We refer to the
-latter as block sectors, as we already encountered in the previous section
-[`blocksectors`](@ref) and [`blockdim`](@ref) defined on the type [`ProductSpace`](@ref).
-
-This basis transformation consists of a basis of inclusion and projection maps, denoted as
-``X^{a_1a_2…a_N}_{c,α}: R_c → R_{a_1} ⊗ R_{a_2} ⊗ … ⊗ R_{a_N}`` and their adjoints
-``(X^{a_1a_2…a_N}_{c,α})^†``, such that
-
-``(X^{a_1a_2…a_N}_{c,α})^† ∘ X^{a_1a_2…a_N}_{c′,α′} = δ_{c,c′} δ_{α,α′} \mathrm{id}_c``
-and
+## [Fermionic sectors](@id ss_fermions)
-``∑_{c,α} X^{a_1a_2…a_N}_{c,α} ∘ (X^{a_1a_2…a_N}_{c,α})^† = \mathrm{id}_{a_1 ⊗ a_2 ⊗ … ⊗ a_N} = \mathrm{id}_{a_1} ⊗ \mathrm{id}_{a_2} ⊗ … ⊗ \mathrm{id}_{a_N} ``
-
-Fusion trees provide a particular way to construct such a basis. It is useful to know about
-the existence of fusion trees and how they are represented, as discussed in the first
-subsection. The next two subsections discuss possible manipulations that can be performed
-with fusion trees. These are used under the hood when manipulating the indices of tensors,
-but a typical user would not need to use these manipulations on fusion trees directly.
-Hence, these last two sections can safely be skipped.
-
-### Canonical representation
-
-To couple or fuse the different sectors together into a single block sector, we can
-sequentially fuse together two sectors into a single coupled sector, which is then fused
-with the next uncoupled sector, using the splitting tensors ``X_{a,b}^{c,μ} : R_c → R_a ⊗
-R_b`` and their adjoints. This amounts to the canonical choice of our tensor product, and
-for a given tensor mapping from ``(((W_1 ⊗ W_2) ⊗ W_3) ⊗ … )⊗ W_{N_2})`` to
-``(((V_1 ⊗ V_2) ⊗ V_3) ⊗ … )⊗ V_{N_1})``, the corresponding fusion and splitting trees take
-the form
-
-```@raw html
-
-```
-
-for the specific case ``N_1 = 4`` and ``N_2 = 3``. We can separate this tree into the fusing
-part ``(b_1 ⊗ b_2) ⊗ b_3 → c`` and the splitting part ``c→(((a_1 ⊗ a_2) ⊗ a_3) ⊗ a_4)``.
-Given that the fusion tree can be considered to be the adjoint of a corresponding splitting
-tree ``c → (b_1 ⊗ b_2) ⊗ b_3``, we now first consider splitting trees in isolation. A
-splitting tree which goes from one coupled sector ``c`` to ``N`` uncoupled sectors ``a_1``,
-``a_2``, …, ``a_N`` needs ``N-2`` additional internal sector labels ``e_1``, …, ``e_{N-2}``,
-and, if `FusionStyle(I) isa GenericFusion`, ``N-1`` additional multiplicity labels ``μ_1``,
-…, ``μ_{N-1}``. We henceforth refer to them as vertex labels, as they are associated with
-the vertices of the splitting tree. In the case of `FusionStyle(I) isa UniqueFusion`, the
-internal sectors ``e_1``, …, ``e_{N-2}`` are completely fixed, for
-`FusionStyle(I) isa MultipleFusion` they can also take different values. In our abstract
-notation of the splitting basis ``X^{a_1a_2…a_N}_{c,α}`` used above, ``α`` can be consided a
-collective label, i.e. ``α = (e_1, …, e_{N-2}; μ₁, … ,μ_{N-1})``. Indeed, we can check the
-orthogonality condition
-``(X^{a_1a_2…a_N}_{c,α})^† ∘ X^{a_1a_2…a_N}_{c′,α′} = δ_{c,c′} δ_{α,α′} \mathrm{id}_c``,
-which now forces all internal lines ``e_k`` and vertex labels ``μ_l`` to be the same.
-
-There is one subtle remark that we have so far ignored. Within the specific subtypes of
-`Sector`, we do not explicitly distinguish between ``R_a^*`` (simply denoted as ``a^*``
-and graphically depicted as an upgoing arrow ``a``) and ``R_{\bar{a}}`` (simply denoted as
-``\bar{a}`` and depicted with a downgoing arrow), i.e. between the dual space of ``R_a`` on
-which the conjugated irrep acts, or the irrep ``\bar{a}`` to which the complex conjugate of
-irrep ``a`` is isomorphic. This distinction is however important, when certain uncoupled
-sectors in the fusion tree actually originate from a dual space. We use the isomorphisms
-``Z_a : R_a^* → R_{\bar{a}}`` and its adjoint ``Z_a^† : R_{\bar{a}} → R_a^*``, as introduced
-in the section on [topological data of a fusion category](@ref ss_topologicalfusion), to
-build fusion and splitting trees that take the distinction between irreps and their
-conjugates into account. Hence, in the previous example, if e.g. the first and third space
-in the codomain and the second space in the domain of the tensor were dual spaces, the
-actual pair of splitting and fusion tree would look as
-
-```@raw html
-
-```
-
-The presence of these isomorphisms will be important when we start to bend lines, to move
-uncoupled sectors from the incoming to the outgoing part of the fusion-splitting tree. Note
-that we can still represent the fusion tree as the adjoint of a corresponding splitting
-tree, because we also use the adjoint of the ``Z`` isomorphisms in the splitting part, and
-the ``Z`` isomorphism in the fusion part. Furthermore, the presence of the ``Z``
-isomorphisms does not affect the orthonormality.
-
-We represent splitting trees and their adjoints using a specific immutable type called
-`FusionTree` (which actually represents a splitting tree, but fusion tree is a more common
-term), defined as
+All of the sectors discussed in [Group representations](@ref ss_groups) have a bosonic braiding style.
+This does not mean that `Rsymbol` is always trivial, as for example for `SU2Irrep` the definition was given by
```julia
-struct FusionTree{I<:Sector,N,M,L}
- uncoupled::NTuple{N,I}
- coupled::I
- isdual::NTuple{N,Bool}
- innerlines::NTuple{M,I} # fixed to M = N-2
- vertices::NTuple{L,Int} # fixed to L = N-1
+function Rsymbol(sa::SU2Irrep, sb::SU2Irrep, sc::SU2Irrep)
+ Nsymbol(sa, sb, sc) || return zero(sectorscalartype(SU2Irrep))
+ return iseven(convert(Int, sa.j + sb.j - sc.j)) ? one(sectorscalartype(SU2Irrep)) :
+ -one(sectorscalartype(SU2Irrep))
end
```
-Here, the fields are probably self-explanotary. The `isdual` field indicates whether an
-isomorphism is present (if the corresponding value is `true`) or not. Note that the field
-`uncoupled` contains the sectors coming out of the splitting trees, before the possible
-``Z`` isomorphism, i.e. the splitting tree in the above example would have
-`sectors = (a₁, a₂, a₃, a₄)`. The `FusionTree` type has a number of basic properties and
-capabilities, such as checking for equality with `==` and support for
-`hash(f::FusionTree, h::UInt)`, as splitting and fusion trees are used as keys in look-up
-tables (i.e. `AbstractDictionary` instances) to look up certain parts of the data of a
-tensor.
-
-`FusionTree` instances are not checked for consistency (i.e. valid fusion rules etc) upon
-creation, hence, they are assumed to be created correctly. The most natural way to create
-them is by using the `fusiontrees(uncoupled::NTuple{N,I}, coupled::I = one(I))` method,
-which returns an iterator over all possible fusion trees from a set of `N` uncoupled
-sectors to a given coupled sector, which by default is assumed to be the trivial sector of
-that group or fusion category (i.e. the identity object in categorical nomenclature). The
-return type of `fusiontrees` is a custom type `FusionTreeIterator` which conforms to the
-complete interface of an iterator, and has a custom `length` function that computes the
-number of possible fusion trees without iterating over all of them explicitly. This is best
-illustrated with some examples
+It does however mean that all twists ``θ_a`` are trivial (equal to ``1``).
+We refer to the appendix on [Category theory](@ref s_categories) for more details on the meaning of the twist.
+In summary, triviality of the twists implies that self-crossings of lines in tensor diagrams can be ignored, i.e. they can be removed without changing the value of the diagram.
-```@repl sectors
-s = Irrep[SU₂](1/2)
-collect(fusiontrees((s, s, s, s)))
-collect(fusiontrees((s, s, s, s, s), s, (true, false, false, true, false)))
-iter = fusiontrees(ntuple(n -> s, 16))
-sum(n -> 1, iter)
-length(iter)
-@elapsed sum(n -> 1, iter)
-@elapsed length(iter)
-s2 = s ⊠ s
-collect(fusiontrees((s2, s2, s2, s2)))
-```
-Note that `FusionTree` instances are shown (printed) in a way that is valid code to
-reproduce them, a property which also holds for both instances of `Sector` and instances of
-`VectorSpace`. All of those should be displayed in a way that can be copy pasted as valid
-code. Furthermore, we use context to determine how to print e.g. a sector. In isolation,
-`s2` is printed as `(Irrep[SU₂](1/2) ⊠ Irrep[SU₂](1/2))`, however, within the fusion tree,
-it is simply printed as `(1/2, 1/2)`, because it will be converted back into a
-`ProductSector`, namely `Irrep[SU₂] ⊠ Irrep[SU₂]` by the constructor of
-`FusionTree{Irrep[SU₂] ⊠ Irrep[SU₂]}`.
-
-### Manipulations on a fusion tree
-
-We now discuss elementary manipulations that we want to perform on or between fusion trees
-(where we actually mean splitting trees), which will form the building block for more
-general manipulations on a pair of a fusion and splitting tree discussed in the next
-subsection, and then for casting a general index manipulation of a tensor map as a linear
-operation in the basis of canonically ordered splitting and fusion trees. In this section,
-we will ignore the ``Z`` isomorphisms, as they are just trivially reshuffled under the
-different operations that we describe. These manipulations are used as low-level methods by
-the `TensorMap` methods discussed on the next page. As such, they are not exported by
-TensorKit.jl, nor do they overload similarly named methods from Julia Base (see `split` and
-`merge` below).
-
-The first operation we discuss is an elementary braid of two neighbouring sectors
-(indices), i.e. a so-called Artin braid or Artin generator of the braid group. Because
-these two sectors do not appear on the same fusion vertex, some recoupling is necessary.
-The following represents two different ways to compute the result of such a braid as a
-linear combination of new fusion trees in canonical order:
-
-```@raw html
-
-```
-
-While the upper path is the most intuitive, it requires two recouplings or F-moves (one
-forward and one reverse). On the other hand, the lower path requires only one (reverse) F-
-move, and two R-moves. The latter are less expensive to compute, and so the lower path is
-computationally more efficient. However, the end result should be the same, provided the
-pentagon and hexagon equations are satisfied. We always assume that these are satisfied for
-any new subtype of `Sector`, and it is up to the user to verify that they are when
-implementing new custom `Sector` types. This result is implemented in the function
-[`artin_braid(f::FusionTree, i; inv = false)`](@ref TensorKit.artin_braid) where `i`
-denotes the position of the first sector (i.e. labeled `b` in the above graph) which is then
-braided with the sector at position `i+1` in the fusion tree `f`. The keyword argument `inv`
-allows to select the inverse braiding operation, which amounts to replacing the R-matrix
-with its inverse (or thus, adjoint) in the above steps. The result is returned as a
-dictionary with possible output fusion trees as keys and corresponding coefficients as
-value. In the case of `FusionStyle(I) isa UniqueFusion`, their is only one resulting fusion
-tree, with corresponding coefficient a complex phase (which is one for the bosonic
-representation theory of an Abelian group), and the result is a special
-`SingletonDict<:AbstractDict`, a `struct` type defined in TensorKit.jl to hold a single key
-value pair.
-
-With the elementary `artin_braid`, we can then compute a more general braid. For this, we
-provide an interface
-
-[`braid(f::FusionTree{I,N}, levels::NTuple{N,Int}, permutation::NTuple{N,Int})`](@ref braid(f::FusionTree{I,N}, levels::NTuple{N,Int}, p::NTuple{N,Int}) where {I<:Sector,N})
-
-where the braid is specified as a permutation, such that the new sector at position `i` was
-originally at position `permutation[i]`, and where every uncoupled sector is also assigned
-a level or depth. The permutation is decomposed into swaps between neighbouring sectors,
-and when two sectors are swapped, their respective level will determine whether the left
-sector is braided over or under its right neighbor. This interface does not allow to
-specify the most general braid, and in particular will never wind one line around another,
-but can be used as a more general building block for arbitrary braids than the elementary
-Artin generators. A graphical example makes this probably more clear, i.e for
-`levels = (1, 2, 3, 4, 5)` and `permutation = (5, 3, 1, 4, 2)`, the corresponding braid is
-given by
-
-```@raw html
-
-```
-
-that is, the first sector or space goes to position 3, and crosses over all other lines,
-because it has the lowest level (i.e. think of level as depth in the third dimension), and
-so forth. We sketch this operation both as a general braid on the left hand side, and as a
-particular composition of Artin braids on the right hand side.
-
-When `BraidingStyle(I) == SymmetricBraiding()`, there is no distinction between applying
-the braiding or its inverse (i.e. lines crossing over or under each other in the graphical
-notation) and the whole operation simplifies down to a permutation. We then also support
-the interface
-
-[`permute(f::FusionTree{I,N}, permutation::NTuple{N,Int})`](@ref permute(f::FusionTree{I,N}, p::NTuple{N,Int}) where {I<:Sector,N})
-
-Other manipulations which are sometimes needed are
-* [`insertat(f1::FusionTree{I,N₁}, i::Int, f2::FusionTree{I,N₂})`](@ref TensorKit.insertat) :
- inserts a fusion tree `f2` at the `i`th uncoupled sector of fusion tree `f1` (this
- requires that the coupled sector `f2` matches with the `i`th uncoupled sector of `f1`,
- and that `!f1.isdual[i]`, i.e. that there is no ``Z``-isomorphism on the `i`th line of
- `f1`), and recouple this into a linear combination of trees in canonical order, with
- `N₁ + N₂ - 1` uncoupled sectors, i.e. diagrammatically for `i = 3`
-
-```@raw html
-
-```
-
-* [`split(f::FusionTree{I,N}, M::Int)`](@ref TensorKit.split) :
- splits a fusion tree `f` into two trees `f1` and `f2`, such that `f1` has the first `M`
- uncoupled sectors of `f`, and `f2` the remaining `N - M`. This function is type stable
- if `M` is a compile time constant.
-
- `split(f, M)` is the inverse of `insertat` in the sence that `insertat(f2, 1, f1)`
- should return a dictionary with a single key-value pair `f=>1`. Diagrammatically, for
- `M = 4`, the function `split` returns
-
-```@raw html
-
-```
+As is well known, this becomes more subtle when fermionic degrees are involved.
+Technically, fermions are described using super vector spaces, which are ``ℤ₂``-graded vector spaces ``V = V_0 ⊕ V_1``, i.e. the vector space is decomposed as an (orthogonal) direct sum into an even and odd subspace, corresponding to states with even and odd fermion parity, respectively.
+The tensor product of two super vector spaces ``V`` and ``W`` is again graded as ``(V ⊗ W)_0 = (V_0 ⊗ W_0) ⊕ (V_1 ⊗ W_1)`` and ``(V ⊗ W)_1 = (V_0 ⊗ W_1) ⊕ (V_1 ⊗ W_0)``.
+However, when exchanging two super vector spaces in such a tensor product, the natural isomorphism ``V ⊗ W → W ⊗ V`` takes into account the fermionic nature by acting with a minus sign in the subspace ``V_1 ⊗ W_1``.
+This is known as the Koszul sign rule.
-* [`merge(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, c::I, [μ=1])`](@ref TensorKit.merge) :
- merges two fusion trees `f1` and `f2` by fusing the coupled sectors of `f1` and `f2`
- into a sector `c` (with vertex label `μ` if `FusionStyle(I) == GenericFusion()`),
- and reexpressing the result as a linear combination of fusion trees with `N₁ + N₂`
- uncoupled sectors in canonical order. This is a simple application of `insertat`.
- Diagrammatically, this operation is represented as:
-
-```@raw html
-
+The super vector space structure fits naturally in the framework of TensorKit.jl.
+Indeed, the grading naturally corresponds to a ``ℤ₂``-valued sector structure, which we implement as [`FermionParity`](@ref):
+```julia
+struct FermionParity <: Sector
+ isodd::Bool
+end
+const fℤ₂ = FermionParity
+fermionparity(f::FermionParity) = f.isodd
```
-
-### Manipulations on a splitting - fusion tree pair
-
-In this subsection we discuss manipulations that act on a splitting and fusion tree pair,
-which we will always as two separate trees `f1, f2`, where `f1` is the splitting tree and
-`f2` represents the fusion tree, and they should have `f1.coupled == f2.coupled`.
-
-The most important manipulation on such a pair is to move sectors from one to the other.
-Given the canonical order of these trees, we exclusively use the *left duality* (see the
-section on [categories](@ref s_categories)), for which the evaluation and coevaluation maps
-establish isomorphisms between
-
-``\mathrm{Hom}((((b_1 ⊗ b_2) ⊗ …) ⊗ b_{N_2}), (((a_1 ⊗ a_2) ⊗ …) ⊗ a_{N_1}))``
-
-`` ≂ \mathrm{Hom}((((b_1 ⊗ b_2) ⊗ ...) ⊗ b_{N_2-1}), ((((a_1 ⊗ a_2) ⊗ ...) ⊗ a_{N_1}) ⊗ b_{N_2}^*))``
-
-`` ≂ \mathrm{Hom}(1, (((((((a_1 ⊗ a_2) ⊗ ...) ⊗ a_{N_1}) ⊗ b_{N_2}^*) ⊗ …) ⊗ b_2^*) ⊗ b_1^*) )``
-
-where the last morphism space is then labeled by the basis of only splitting trees. We can
-then use the manipulations from the previous section, and then again use the left duality
-to bring this back to a pair of splitting and fusion tree with `N₂′` incoming and `N₁′`
-incoming sectors (with `N₁′ + N₂′ == N₁ + N₂`).
-
-We now discuss how to actually bend lines, and thus, move sectors from the incoming part
-(fusion tree) to the outgoing part (splitting tree). Hereby, we exploit the relations
-between the (co)evaluation (exact pairing) and the fusion tensors, discussed in
-[topological data of a fusion category](@ref ss_topologicalfusion). The main ingredient
-that we need is summarized in
-
-```@raw html
-
+with straightforward fusion rules and associators
+```julia
+⊗(a::FermionParity, b::FermionParity) = (FermionParity(a.isodd ⊻ b.isodd),)
+function Nsymbol(a::FermionParity, b::FermionParity, c::FermionParity)
+ return (a.isodd ⊻ b.isodd) == c.isodd
+end
+function Fsymbol(a::I, b::I, c::I, d::I, e::I, f::I) where {I <: FermionParity}
+ return Int(Nsymbol(a, b, e) * Nsymbol(e, c, d) * Nsymbol(b, c, f) * Nsymbol(a, f, d))
+end
```
-
-We will only need the B-symbol and not the A-symbol. Applying the left evaluation on the
-second sector of a splitting tensor thus yields a linear combination of fusion tensors
-(when `FusionStyle(I) == GenericFusion()`, or just a scalar times the corresponding
-fusion tensor otherwise), with corresponding ``Z`` ismorphism. Taking the adjoint of this
-relation yields the required relation to transform a fusion tensor into a splitting tensor
-with an added ``Z^†`` isomorphism.
-
-However, we have to be careful if we bend a line on which a ``Z`` isomorphism (or its
-adjoint) is already present. Indeed, it is exactly for this operation that we explicitly
-need to take the presence of these isomorphisms into account. Indeed, we obtain the relation
-
-```@raw html
-
+but with non-trivial braiding and twist
+```julia
+function Rsymbol(a::I, b::I, c::I) where {I <: FermionParity}
+ return a.isodd && b.isodd ? -Int(Nsymbol(a, b, c)) : Int(Nsymbol(a, b, c))
+end
+twist(a::FermionParity) = a.isodd ? -1 : +1
```
-Hence, bending an `isdual` sector from the splitting tree to the fusion tree yields an
-additional Frobenius-Schur factor, and of course leads to a normal sector (which is no
-longer `isdual` and does thus not come with a ``Z``-isomorphism) on the fusion side. We
-again use the adjoint of this relation to bend an `isdual` sector from the fusion tree to
-the splitting tree.
-
-The `FusionTree` interface to duality and line bending is given by
+The super vector space structure can also be combined with other sector types using the `⊠` operator discussed [above](#ss_productsectors).
+In some cases, there is a richer symmetry than ``ℤ₂`` associated with the fermionic degrees of freedom, and there is a natural fermion parity associated with the sectors of that symmetry.
+An example would be a ``\mathsf{U}_1`` symmetry associated with fermion number conservation, where odd ``\mathsf{U}_1`` charges correspond to odd fermion parity.
+However, it is then always possible to separate out the fermion parity structure as a separate sector, and treat the original sectors as bosonic, by only restricting to combinations of sectors that satisfy the natural fermion parity association.
-[`repartition(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, N::Int)`](@ref repartition)
-
-which takes a splitting tree `f1` with `N₁` outgoing sectors, a fusion tree `f2` with `N₂`
-incoming sectors, and applies line bending such that the resulting splitting and fusion
-trees have `N` outgoing sectors, corresponding to the first `N` sectors out of the list
-``(a_1, a_2, …, a_{N_1}, b_{N_2}^*, …, b_{1}^*)`` and `N₁ + N₂ - N` incoming sectors,
-corresponding to the dual of the last `N₁ + N₂ - N` sectors from the previous list, in reverse.
-This return values are correctly inferred if `N` is a compile time constant.
-
-Graphically, for `N₁ = 4`, `N₂ = 3`, `N = 2` and some particular choice of `isdual` in both
-the fusion and splitting tree:
+For convenience (and partially due to legacy reasons), TensorKitSectors.jl does provide [`FermionNumber`](@ref) and [`FermionSpin`](@ref) constructors, which are defined as
+```julia
+const FermionNumber = U1Irrep ⊠ FermionParity
+const fU₁ = FermionNumber
+FermionNumber(a::Int) = U1Irrep(a) ⊠ FermionParity(isodd(a))
-```@raw html
-
+const FermionSpin = SU2Irrep ⊠ FermionParity
+const fSU₂ = FermionSpin
+FermionSpin(j::Real) = (s = SU2Irrep(j); s ⊠ FermionParity(isodd(twice(s.j))))
```
-The result is returned as a dictionary with keys `(f1′, f2′)` and the corresponding `coeff`
-as value. Note that the summation is only over the ``κ_j`` labels, such that, in the case
-of `FusionStyle(I) isa MultiplicityFreeFusion`, the linear combination simplifies to
-a single term with a scalar coefficient.
-
-With this basic function, we can now perform arbitrary combinations of braids or
-permutations with line bendings, to completely reshuffle where sectors appear. The
-interface provided for this is given by
-
-[`braid(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, levels1::NTuple{N₁,Int}, levels2::NTuple{N₂,Int}, p1::NTuple{N₁′,Int}, p2::NTuple{N₂′,Int})`](@ref braid(::FusionTree{I}, ::FusionTree{I}, ::IndexTuple, ::IndexTuple, ::IndexTuple{N₁}, ::IndexTuple{N₂}) where {I<:Sector,N₁,N₂})
-
-where we now have splitting tree `f1` with `N₁` outgoing sectors, a fusion tree `f2` with
-`N₂` incoming sectors, `levels1` and `levels2` assign a level or depth to the corresponding
-uncoupled sectors in `f1` and `f2`, and we represent the new configuration as a pair `p1`
-and `p2`. Together, `(p1..., p2...)` represents a permutation of length
-`N₁ + N₂ = N₁′ + N₂′`, where `p1` indicates which of the original sectors should appear as
-outgoing sectors in the new splitting tree and `p2` indicates which appear as incoming
-sectors in the new fusion tree. Hereto, we label the uncoupled sectors of `f1` from `1` to
-`N₁`, followed by the uncoupled sectors of `f2` from `N₁ + 1` to `N₁ + N₂`. Note that simply
-repartitioning the splitting and fusion tree such that e.g. all sectors appear in the new
-splitting tree (i.e. are outgoing), amounts to chosing
-`p1 = (1,..., N₁, N₁ + N₂, N₁ + N₂ - 1, ... , N₁ + 1)` and `p2 = ()`, because the duality
-isomorphism reverses the order of the tensor product.
-
-This routine is implemented by indeed first making all sectors outgoing using the
-`repartition` function discussed above, such that only splitting trees remain, then
-braiding those using the routine from the previous subsection such that the new outgoing
-sectors appear first, followed by the new incoming sectors (in reverse order), and then
-again invoking the `repartition` routine to bring everything in final form. The result is
-again returned as a dictionary where the keys are `(f1′, f2′)` and the values the
-corresponding coefficients.
-
-As before, there is a simplified interface for the case where
-`BraidingStyle(I) isa SymmetricBraiding` and the levels are not needed. This is simply
-given by
-
-[`permute(f1::FusionTree{I,N₁}, f2::FusionTree{I,N₂}, p1::NTuple{N₁′,Int}, p2::NTuple{N₂′,Int})`](@ref permute(::FusionTree{I}, ::FusionTree{I}, ::IndexTuple{N₁}, ::IndexTuple{N₂}) where {I<:Sector,N₁,N₂})
-
-The `braid` and `permute` routines for double fusion trees will be the main access point for
-corresponding manipulations on tensors. As a consequence, results from this routine are
-memoized, i.e. they are stored in some package wide 'least-recently used' cache (from
-[LRUCache.jl](https://github.com/JuliaCollections/LRUCache.jl)) that can be accessed as
-`TensorKit.braidcache`. By default, this cache stores up to `10^5` different `braid` or
-`permute` resuls, where one result corresponds to one particular combination of `(f1, f2,
-p1, p2, levels1, levels2)`. This should be sufficient for most algorithms. While there are
-currently no (official) access methods to change the default settings of this cache (one can
-always resort to `resize!(TensorKit.permutecache)` and other methods from LRUCache.jl), this
-might change in the future. The use of this cache is however controlled by two constants of
-type `RefValue{Bool}`, namely `usebraidcache_abelian` and `usebraidcache_nonabelian`. The
-default values are given by `TensorKit.usebraidcache_abelian[] = false` and
-`TensorKit.usebraidcache_nonabelian[] = true`, and respectively reflect that the cache is
-likely not going to help (or even slow down) fusion trees with
-`FusionStyle(f) isa UniqueFusion`, but is probably useful for fusion trees with
-`FusionStyle(f) isa MultipleFusion`. One can change these values and test the effect on
-their application.
-
-The existence of `braidcache` also implies that potential inefficiencies in the fusion
-tree manipulations (which we nonetheless try to avoid) will not seriously affect
-performance of tensor manipulations.
-
-### Inspecting fusion trees as tensors
-
-For those cases where the fusion and splitting tensors have an explicit representation as
-a tensor, i.e. a morphism in the category `Vect` (this essentially coincides with the case
-of group representations), this explicit representation can be created, which can be useful
-for checking purposes. Hereto, it is necessary that the *splitting tensor*
-``X^{ab}_{c,μ}``, i.e. the Clebsch-Gordan coefficients of the group, are encoded via the
-routine `fusiontensor(a,b,c [,μ = nothing])`, where the last argument is only necessary in
-the case of `FusionStyle(I) == GenericFusion()`. We can then convert a
-`FusionTree{I,N}` into an `Array`, which will yield a rank `N+1` array where the first `N`
-dimensions correspond to the uncoupled sectors, and the last dimension to the coupled
-sector. Note that this is mostly useful for the case of `FusionStyle(I) isa MultipleFusion`
-groups, as in the case of abelian groups, all irreps are one-dimensional.
-
-Some examples:
+We conclude this subsection with some examples.
```@repl sectors
-s = Irrep[SU₂](1/2)
-iter = fusiontrees((s, s, s, s), SU2Irrep(1))
-f = first(iter)
-convert(Array, f)
-
-LinearAlgebra.I ≈ convert(Array, FusionTree((SU2Irrep(1/2),), SU2Irrep(1/2), (false,), ()))
-Z = adjoint(convert(Array, FusionTree((SU2Irrep(1/2),), SU2Irrep(1/2), (true,), ())))
-transpose(Z) ≈ frobeniusschur(SU2Irrep(1/2)) * Z
-
-LinearAlgebra.I ≈ convert(Array, FusionTree((Irrep[SU₂](1),), Irrep[SU₂](1), (false,), ()))
-Z = adjoint(convert(Array, FusionTree((Irrep[SU₂](1),), Irrep[SU₂](1), (true,), ())))
-transpose(Z) ≈ frobeniusschur(Irrep[SU₂](1)) * Z
-
-#check orthogonality
-for f1 in iter
- for f2 in iter
- dotproduct = dot(convert(Array, f1), convert(Array, f2))
- println("< $f1, $f2> = $dotproduct")
- end
+p = FermionParity(true)
+p ⊗ p
+twist(p)
+FusionStyle(p)
+BraidingStyle(p)
+
+s = FermionSpin(3//2)
+dim(s)
+twist(s)
+typeof(s)
+FusionStyle(s)
+BraidingStyle(s)
+collect(s ⊗ s)
+for s2 in s ⊗ s
+ @show s2
+ @show Rsymbol(s, s, s2)
end
```
-Note that we take the adjoint when computing `Z`, because `convert(Array, f)` assumes `f`
-to be splitting tree, which is built using ``Z^†``. Further note that the normalization
-(squared) of a fusion tree is given by the dimension of the coupled sector, as we are also
-tracing over the ``\mathrm{id}_c`` when checking the orthogonality by computing `dot` of
-the corresponding tensors.
-
-## Fermions
-
-TODO: Update the documentation for this section.
-
-Fermionic sectors are represented by the type [`FermionParity`](@ref), which effectively
-behaves like a ℤ₂ sector, but with two modifications. Firstly, the exchange of two sectors
-with odd fermion parity should yield a minus sign, which is taken care of by virtue of the
-R-symbol. This ensures that permuting tensors behave as expected. Secondly, diagrams with
-self-crossing lines (aka twists) give rise to a minus sign for odd fermion parity. This is
-in essence equivalent to having supertraces, which is what ensures that `@tensor` has a
-result that is invariant under permutation of its input tensors. This does however lead to
-unwanted minus signs for certain types of diagrams. To avoid this, the `@planar` macro does
-not include a supertrace, but requires a manual resolution of all crossings in the diagram.
+Note in particular how the `Rsymbol` values have opposite signs to the bosonic case, where the fusion of two equal half-integer spins to the trivial sector is antisymmetric and would thus have `Rsymbol` value `-1`.
## Anyons
-There is currently one example of a `Sector` subtype that has anyonic braiding style,
-namely that of the Fibonacci fusion category. It has to (isomorphism classes of) simple
-objects, namely the identity `𝟙` and a non-trivial object known as `τ`, with fusion rules
-`τ ⊗ τ = 𝟙 ⊕ τ`. Let's summarize the topological data
+Both `Bosonic` and `Fermionic` braiding styles are `SymmetricBraiding` styles, which means that exchanging two sectors twice is equivalent to the identity operation.
+In tensor network diagrams, this implies that lines that cross twice are equivalent to them not crossing at all, or also, that there is no distinction betweeen a line crossing "above" or "below" another line.
+More technically, the relevant group describing the exchange processes is the permutation group, whereas in more general cases it would be the braid group.
+
+This more general case is denoted as the `Anyonic` braiding style in TensorKit.jl, because examples of this behaviour appear in the context of anyons in topological phases of matter.
+
+There are currently two well-known sector types with `Anyonic` braiding style implemented in TensorKitSectors.
+l, namely [`FibonacciAnyon`](@ref) and [`IsingAnyon`](@ref). Their values represent the (equivalence classes of) simple objects of the well-known Fibonicci and Ising fusion categories.
+As an example, we illustrate below the Fibonacci anyons, which has only two distinct sectors, namely the unit sector `𝟙` and one non-trivial sector denoted as `τ`.
+The fusion rules are given by `τ ⊗ τ = 𝟙 ⊕ τ`, and the topological data is summarized by the following code
```@repl sectors
𝟙 = FibonacciAnyon(:I)
@@ -1315,3 +675,23 @@ Rsymbol(τ,τ,𝟙) |> polar
Rsymbol(τ,τ,τ) |> polar
twist(τ) |> polar
```
+
+## [Further generalizations](@id ss_generalsectors)
+
+The `Anyonic` braiding style is one generalization beyond the bosonic and fermionic representation theory of groups, i.e. the action of groups on vector spaces and super vector spaces.
+It is also possible to consider fusion categories without braiding structure, represented as `NoBraiding` in TensorKitSectors.jl.
+Indeed, the framework for sectors outlined above is in one-to-one correspondence to the topological data for specifying a unitary (spherical and braided, and hence ribbon) [fusion category](https://en.wikipedia.org/wiki/Fusion_category), which is reviewed in the appendix on [category theory](@ref s_categories).
+For such categories, the objects are not necessarily vector spaces and the fusion and splitting tensors ``X^{ab}_{c,μ}`` do not necessarily exist as actual tensors.
+However, the morphism spaces ``c → a ⊗ b`` still behave as vector spaces, and the ``X^{ab}_{c,μ}`` act as generic basis for that space.
+As TensorKit.jl does not rely on the ``X^{ab}_{c,μ}`` themselves (even when they do exist), it can also deal with such general fusion categories.
+An extensive list of (the topological data of) such fusion categories, with and without braiding, is provided in [CategoryData.jl](https://github.com/lkdvos/CategoryData.jl).
+
+Within TensorKit.jl, the only sector with `NoBraiding` is the [`PlanarTrivial`](@ref) sector, which is actually equivalent to the `Trivial` sector, but where the braiding has been "disabled" for testing purposes.
+
+Finally, as mentioned above, a recent extension prepares TensorKitSectors.jl to deal with multi-fusion categories, where the sectors (simple objects) are organized in a matrix-like structure and thus have an additional row and column index.
+Fusion between sectors is only possible when the row and column indices match appropriately; otherwise the fusion product is empty.
+In this structure, the different *diagonal* sectors define separate fusion categories, whereas the *off-diagonal* sectors define bimodule categories between these fusion categories.
+Every diagonal set of sectors has its own unit sector, which also acts as the left / right unit for other sectors in the same column / row.
+The global unit object is not simple, but rather given by the direct sum of all diagonal unit sectors.
+We do not document or illustrate this structure here, but refer to the relevant functions [`leftunit`](@ref), [`rightunit`](@ref), [`allunits`](@ref) and [`UnitStyle`](@ref) for more information.
+Furthermore, we refer to [MultiTensorKit.jl](https://github.com/QuantumKitHub/MultiTensorKit.jl) for examples and ongoing development work on using multi-fusion categories.
diff --git a/docs/src/man/spaces.md b/docs/src/man/spaces.md
index 56ed1fe27..f994677e0 100644
--- a/docs/src/man/spaces.md
+++ b/docs/src/man/spaces.md
@@ -1,45 +1,46 @@
+```@meta
+CollapsedDocStrings = true
+```
+
# [Vector spaces](@id s_spaces)
```@setup tensorkit
using TensorKit
```
-From the [Introduction](@ref s_intro), it should be clear that an important aspect in the
-definition of a tensor (map) is specifying the vector spaces and their structure in the
-domain and codomain of the map. The starting point is an abstract type [`VectorSpace`](@ref)
+From the [Introduction](@ref s_intro), it should be clear that an important aspect in the definition of a tensor (map) is specifying the vector spaces and their structure in the domain and codomain of the map.
+The starting point is an abstract type [`VectorSpace`](@ref)
+
```julia
abstract type VectorSpace end
```
-Technically speaking, this name does not capture the full generality that TensorKit.jl supports,
-as instances of subtypes of `VectorSpace` can encode general objects in linear monoidal categories,
-which are not necessarily vector spaces. However, in order not to make the remaining discussion
-to abstract or complicated, we will simply use the nomenclature of vector spaces. In particular,
-we define two abstract subtypes
+Technically speaking, this name does not capture the full generality that TensorKit.jl supports, as instances of subtypes of `VectorSpace` can encode general objects in linear monoidal categories, which are not necessarily vector spaces.
+However, in order not to make the remaining discussion too abstract or complicated, we will simply use the nomenclature of vector spaces.
+In particular, we define two abstract subtypes
+
```julia
abstract type ElementarySpace <: VectorSpace end
const IndexSpace = ElementarySpace
abstract type CompositeSpace{S<:ElementarySpace} <: VectorSpace end
```
-Here, [`ElementarySpace`](@ref) is a super type for all vector spaces (objects) that can be
-associated with the individual indices of a tensor, as hinted to by its alias `IndexSpace`.
-On the other hand, subtypes of [`CompositeSpace{S}`](@ref) where `S<:ElementarySpace` are composed
-of a number of elementary spaces of type `S`. So far, there is a single concrete type
-[`ProductSpace{S,N}`](@ref) that represents the tensor product of `N` vector spaces of a homogeneous
-type `S`. Its properties are discussed in the section on [Composite spaces](@ref ss_compositespaces),
-together with possible extensions for the future.
+Here, [`ElementarySpace`](@ref) is a super type for all vector spaces (objects) that can be associated with the individual indices of a tensor, as hinted to by its alias `IndexSpace`.
+
+On the other hand, subtypes of [`CompositeSpace{S}`](@ref) where `S <: ElementarySpace` are composed of a number of elementary spaces of type `S`.
+So far, there is a single concrete type [`ProductSpace{S, N}`](@ref) that represents the tensor product of `N` vector spaces of a homogeneous type `S`.
+Its properties are discussed in the section on [Composite spaces](@ref ss_compositespaces), together with possible extensions for the future.
-Throughout TensorKit.jl, the function [`spacetype`](@ref) returns the type of `ElementarySpace`
-associated with e.g. a composite space or a tensor. It works both on instances and in the
-type domain. Its use will be illustrated below.
+Throughout TensorKit.jl, the function [`spacetype`](@ref) returns the type of `ElementarySpace` associated with e.g. a composite space or a tensor.
+It works both on instances and in the type domain.
+Its use will be illustrated below.
## [Fields](@id ss_fields)
-Vector spaces (and linear categories more generally) are defined over a field of scalars
-``𝔽``. We define a type hierarchy to specify the scalar field, but so far only support
-real and complex numbers, via
+Vector spaces (and linear categories more generally) are defined over a field of scalars ``𝔽``.
+We define a type hierarchy to specify the scalar field, but so far only support real and complex numbers, via
+
```julia
abstract type Field end
@@ -49,9 +50,11 @@ struct ComplexNumbers <: Field end
const ℝ = RealNumbers()
const ℂ = ComplexNumbers()
```
-Note that `ℝ` and `ℂ` can be typed as `\bbR`+TAB and `\bbC`+TAB. One reason for defining
-this new type hierarchy instead of recycling the types from Julia's `Number` hierarchy is
-to introduce some syntactic sugar without committing type piracy. In particular, we now have
+
+Note that `ℝ` and `ℂ` can be typed as `\bbR`+TAB and `\bbC`+TAB.
+One reason for defining this new type hierarchy instead of recycling the types from Julia's `Number` hierarchy is to introduce some syntactic sugar without committing type piracy.
+In particular, we now have
+
```@repl tensorkit
3 ∈ ℝ
5.0 ∈ ℂ
@@ -61,82 +64,58 @@ ComplexF64 ⊆ ℂ
ℝ ⊆ ℂ
ℂ ⊆ ℝ
```
-and furthermore —probably more usefully— `ℝ^n` and `ℂ^n` create specific elementary vector
-spaces as described in the next section. The underlying field of a vector space or tensor
-`a` can be obtained with [`field(a)`](@ref):
-```@docs; canonical=false
-field
-```
+and furthermore —probably more usefully— `ℝ^n` and `ℂ^n` create specific elementary vector spaces as described in the next section.
+The underlying field of a vector space or tensor `a` can be obtained with [`field(a)`](@ref):
## [Elementary spaces](@id ss_elementaryspaces)
-As mentioned at the beginning of this section, vector spaces that are associated with the
-individual indices of a tensor should be implemented as subtypes of `ElementarySpace`. As
-the domain and codomain of a tensor map will be the tensor product of such objects which all
-have the same type, it is important that associated vector spaces, such as the dual space,
-are objects of the same concrete type (i.e. with the same type parameters in case of a
-parametric type). In particular, every `ElementarySpace` should implement the following
-methods
+As mentioned at the beginning of this section, vector spaces that are associated with the individual indices of a tensor should be implemented as subtypes of `ElementarySpace`.
+As the domain and codomain of a tensor map will be the tensor product of such objects which all have the same type, it is important that associated vector spaces, such as the dual space, are objects of the same concrete type (i.e. with the same type parameters in case of a parametric type).
+In particular, every `ElementarySpace` should implement the following methods
```@docs; canonical=false
dim(::ElementarySpace)
field(::ElementarySpace)
-dual(::S) where {S<:ElementarySpace}
-conj(::S) where {S<:ElementarySpace}
+dual(::S) where {S <: ElementarySpace}
+conj(::S) where {S <: ElementarySpace}
```
-For convenience, the dual of a space `V` can also be obtained as `V'`. Furthermore, it is
-sometimes necessary to test whether a space is a dual or conjugate space, for which the
-methods [`isdual(::ElementarySpace)`](@ref) and [`isconj(::ElementarySpace)`](@ref) should
-be implemented.
+For convenience, the dual of a space `V` can also be obtained as `V'`.
+Furthermore, it is sometimes necessary to test whether a space is a dual or conjugate space, for which the methods [`isdual(::ElementarySpace)`](@ref) and [`isconj(::ElementarySpace)`](@ref) should be implemented.
We furthermore define a trait type
+
```@docs; canonical=false
InnerProductStyle
```
-to denote for a vector space `V` whether it has an inner product and thus a canonical
-mapping from `dual(V)` to `V` (for real fields `𝔽 ⊆ ℝ`) or from `dual(V)` to `conj(V)`
-(for complex fields). This mapping is provided by the metric, but no further support for
-working with vector spaces with general metrics is currently implemented.
-
-A number of concrete elementary spaces are implemented in TensorKit.jl. There is concrete
-type `GeneralSpace` which is completely characterized by its field `𝔽`, its dimension and
-whether its the dual and/or complex conjugate of $𝔽^d$.
-```julia
-struct GeneralSpace{𝔽} <: ElementarySpace
- d::Int
- dual::Bool
- conj::Bool
-end
+
+to denote for a vector space `V` whether it has an inner product and thus a canonical mapping from `dual(V)` to `V` (for real fields `𝔽 ⊆ ℝ`) or from `dual(V)` to `conj(V)` (for complex fields).
+This mapping is provided by the metric, but no further support for working with vector spaces with general metrics is currently implemented.
+
+A number of concrete elementary spaces are implemented in TensorKit.jl.
+There is concrete type `GeneralSpace` which is completely characterized by its field `𝔽`, its dimension and whether its the dual and/or complex conjugate of $𝔽^d$.
+
+```@docs; canonical=false
+GeneralSpace
```
-However, as the `InnerProductStyle` of `GeneralSpace` is currently set to `NoInnerProduct()`,
-this type of vector space is currently quite limited, though it supports constructing
-tensors and contracting them. However, most tensor factorizations will depend on the presence
-of an Euclidean inner product.
+However, as the `InnerProductStyle` of `GeneralSpace` is currently set to `NoInnerProduct()`, this type of vector space is currently quite limited, though it supports constructing tensors and contracting them.
+However, most tensor factorizations will depend on the presence of an Euclidean inner product.
-Spaces with the `EuclideanInnerProduct()` style, i.e. with a standard Euclidean metric,
-have the natural isomorphisms `dual(V) == V` (for `𝔽 == ℝ`) or `dual(V) == conj(V)`
-(for `𝔽 == ℂ`). In the language of the appendix on [categories](@ref s_categories),
-this trait represents [dagger or unitary categories](@ref ss_adjoints), and these vector
-spaces support an `adjoint` operation.
+Spaces with the `EuclideanInnerProduct()` style, i.e. with a standard Euclidean metric, have the natural isomorphisms `dual(V) == V` (for `𝔽 == ℝ`) or `dual(V) == conj(V)` (for `𝔽 == ℂ`).
+In the language of the appendix on [categories](@ref s_categories), this trait represents [dagger or unitary categories](@ref ss_adjoints), and these vector spaces support an `adjoint` operation.
In particular, two concrete types are provided:
-```julia
-struct CartesianSpace <: ElementarySpace
- d::Int
-end
-struct ComplexSpace <: ElementarySpace
- d::Int
- dual::Bool
-end
+
+```@docs; canonical=false
+CartesianSpace
+ComplexSpace
```
-They represent the Euclidean spaces $ℝ^d$ or $ℂ^d$ without further inner structure.
-They can be created using the syntax `CartesianSpace(d) == ℝ^d` and `ComplexSpace(d) == ℂ^d`,
-or `ComplexSpace(d, true) == ComplexSpace(d; dual = true) == (ℂ^d)'` for the dual
-space of the latter. Note that the brackets are required because of the precedence rules,
-since `d' == d` for `d::Integer`.
+
+They represent the Euclidean spaces ``ℝ^d`` or ``C^d`` without further inner structure.
+They can be created using the syntax `CartesianSpace(d) == ℝ^d` and `ComplexSpace(d) == ℂ^d`, or `ComplexSpace(d, true) == ComplexSpace(d; dual = true) == (ℂ^d)'` for the dual space of the latter.
+Note that the brackets are required because of the precedence rules, since `d' == d` for `d::Integer`.
Some examples:
```@repl tensorkit
@@ -155,35 +134,24 @@ InnerProductStyle(ℂ^5)
```
!!! note
- For `ℂ^n` the dual space is equal (or naturally isomorphic) to the conjugate space, but
- not to the space itself. This means that even for `ℂ^n`, arrows matter in the
- diagrammatic notation for categories or for tensors, and in particular that a
- contraction between two tensor indices will check that one is living in the space and
- the other in the dual space. This is in contrast with several other software packages,
- especially in the context of tensor networks, where arrows are only introduced when
- discussing symmetries. We believe that our more puristic approach can be useful to detect
- errors (e.g. unintended contractions). Only with `ℝ^n` will their be no distinction
- between a space and its dual. When creating tensors with indices in `ℝ^n` that have
- complex data, a one-time warning will be printed, but most operations should continue
- to work nonetheless.
-
-One more important concrete implementation of `ElementarySpace` with a `EuclideanInnerProduct()`
-is the [`GradedSpace`](@ref) type, which is used to represent a graded complex vector space,
-where the grading is provided by the irreducible representations of a group, or more generally,
-the simple objects of a unitary fusion category. We refer to the subsection on [graded spaces](@ref ss_rep)
-on the [next page](@ref s_sectorsrepfusion) for further information about `GradedSpace`.
+ For `ℂ^n` the dual space is equal (or naturally isomorphic) to the conjugate space, but not to the space itself.
+ This means that even for `ℂ^n`, arrows matter in the diagrammatic notation for categories or for tensors, and in particular that a contraction between two tensor indices will check that one is living in the space and the other in the dual space.
+ This is in contrast with several other software packages, especially in the context of tensor networks, where arrows are only introduced when discussing symmetries.
+ We believe that our more puristic approach can be useful to detect errors (e.g. unintended contractions).
+ Only with `ℝ^n` will their be no distinction between a space and its dual.
+ When creating tensors with indices in `ℝ^n` that have complex data, a one-time warning will be printed, but most operations should continue to work nonetheless.
+
+One more important concrete implementation of `ElementarySpace` with a `EuclideanInnerProduct()` is the [`GradedSpace`](@ref) type, which is used to represent a graded complex vector space, where the grading is provided by the irreducible representations of a group, or more generally, the simple objects of a unitary fusion category.
+We refer to the subsection on [graded spaces](@ref ss_rep) on the [next page](@ref s_sectorsrepfusion) for further information about `GradedSpace`.
## Operations with elementary spaces
-Instances of `ElementarySpace` support a number of useful operations. Firstly, we define the direct
-sum of two vector spaces `V1` and `V2` of the same `spacetype` (and with the same value of `isdual`)
-as [`V1 ⊕ V2`](@ref), where `⊕` is obtained by typing `\oplus`+TAB. [`zerospace(V)`](@ref) corresponds
-to the identity or zero element with respect to this direct sum operation, i.e. it corresponds to
-a zero-dimensional space. Furthermore, [`unitspace(V)`] (@ref) applied to an elementary space returns a
-one-dimensional space, that is isomorphic to the scalar field underlying the space itself. Finally,
-we have also introduced the non-standard convention `V1 ⊖ V2` (obtained by typing `\ominus`+TAB.)
-in order to obtain a space that is isomorphic to the quotient space of `V1` by `V2`, or thus,
-a particular choice of complement of `V2` in `V1` such that `V1 == V2 ⊕ (V1 ⊖ V2)` is satisfied.
+Instances of `ElementarySpace` support a number of useful operations.
+Firstly, we define the direct sum of two vector spaces `V1` and `V2` of the same `spacetype` (and with the same value of `isdual`) as [`V1 ⊕ V2`](@ref), where `⊕` is obtained by typing `\oplus`+TAB.
+[`zerospace(V)`](@ref) corresponds to the identity or zero element with respect to this direct sum operation, i.e. it corresponds to a zero-dimensional space.
+Furthermore, [`unitspace(V)`](@ref) applied to an elementary space returns a one-dimensional space, that is isomorphic to the scalar field underlying the space itself.
+Finally, we have also introduced the non-standard convention [`V1 ⊖ V2`](@ref) (obtained by typing `\ominus`+TAB.) in order to obtain a space that is isomorphic to the quotient space of `V1` by `V2`, or thus, a particular choice of complement of `V2` in `V1` such that `V1 == V2 ⊕ (V1 ⊖ V2)` is
+satisfied.
Some examples illustrate this better.
```@repl tensorkit
@@ -204,33 +172,24 @@ unitspace((ℂ^3)')
Note, finally, that we have defined `oplus` and `ominus` as ASCII alternatives for `⊕` and `⊖` respectively.
-A second type of operation with elementary spaces is the function [`flip(V::ElementarySpace)`](@ref),
-which returns a space that is isomorphic to `V` but has `isdual(flip(V)) == isdual(V')`, i.e., if `V`
-is a normal space, then `flip(V)` is a dual space. `flip(V)` is different from `dual(V)` in the case
-of [`GradedSpace`](@ref). It is useful to flip a tensor index from a ket to a bra (or vice versa),
-by contracting that index with a unitary map from `V1` to `flip(V1)`.
+A second type of operation with elementary spaces is the function [`flip(V::ElementarySpace)`](@ref), which returns a space that is isomorphic to `V` but has `isdual(flip(V)) == isdual(V')`, i.e., if `V` is a normal space, then `flip(V)` is a dual space.
+`flip(V)` is different from `dual(V)` in the case of [`GradedSpace`](@ref).
+It is useful to flip a tensor index from a ket to a bra (or vice versa), by contracting that index with a unitary map from `V1` to `flip(V1)`.
+
+While we provide some trivial examples here, we refer to the section on [graded spaces](@ref ss_rep) for examples where `flip` acts non-trivially and produces results that are different than `dual`.
-While we provide some trivial examples here, we refer to the section on [graded spaces](@ref ss_rep)
-for examples where `flip` acts non-trivially and produces results that are different than `dual`.
```@repl tensorkit
flip(ℂ^4)
flip(ℂ^4) ≅ ℂ^4
flip(ℂ^4) == ℂ^4
```
-Finally, we provide two methods [`infimum(V1, V2)`](@ref) and [`supremum(V1, V2)`](@ref) for
-elementary spaces `V1` and `V2` with the same `spacetype` and value of `isdual`. The former
-returns the "largest" elementary space `V::ElementarySpace` with the same value of `isdual`
-such that we can construct surjective morphisms from both `V1` and `V2` to `V`. Similarly,
-the latter returns the "smallest" elementary space `W::ElementarySpace` with the same value
-of `isdual` such that we can construct injective morphisms from both `V1` and `V2` to `W`.
-For `CartesianSpace` and `ComplexSpace`, this simply amounts to the space with minimal or maximal
-dimension, but it is again more interesting in the case of [`GradedSpace`](@ref), as discussed
-on the [next page](@ref ss_). It is that case where `infimum(V1, V2)` might be different from either
-`V1` or `V2`, and similar for `supremum(V1, V2)`, which justifies the choice of these names
-over simply `min` and `max`. Also note that these methods are a direct consequence of the
-partial order that we can define between vector spaces of the same `spacetype` more generally,
-as discussed below in the subsection ["More operations with vector spaces"](@ref ss_spaceops).
+Finally, we provide two methods [`infimum(V1, V2)`](@ref) and [`supremum(V1, V2)`](@ref) for elementary spaces `V1` and `V2` with the same `spacetype` and value of `isdual`.
+The former returns the "largest" elementary space `V::ElementarySpace` with the same value of `isdual` such that we can construct surjective morphisms from both `V1` and `V2` to `V`.
+Similarly, the latter returns the "smallest" elementary space `W::ElementarySpace` with the same value of `isdual` such that we can construct injective morphisms from both `V1` and `V2` to `W`.
+For `CartesianSpace` and `ComplexSpace`, this simply amounts to the space with minimal or maximal dimension, but it is again more interesting in the case of [`GradedSpace`](@ref), as discussed on the [next page](@ref ss_).
+It is that case where `infimum(V1, V2)` might be different from either `V1` or `V2`, and similar for `supremum(V1, V2)`, which justifies the choice of these names over simply `min` and `max`.
+Also note that these methods are a direct consequence of the partial order that we can define between vector spaces of the same `spacetype` more generally, as discussed below in the subsection [More operations with vector spaces](@ref ss_spaceops).
Some examples:
```@repl tensorkit
@@ -242,20 +201,16 @@ supremum((ℂ^5)', (ℂ^3)')
## [Composite spaces](@id ss_compositespaces)
-Composite spaces are vector spaces that are built up out of individual elementary vector
-spaces of the same type. The most prominent (and currently only) example is a tensor
-product of `N` elementary spaces of the same type `S`, which is implemented as
-```julia
-struct ProductSpace{S<:ElementarySpace, N} <: CompositeSpace{S}
- spaces::NTuple{N, S}
-end
+Composite spaces are vector spaces that are built up out of individual elementary vector spaces of the same type.
+The most prominent (and currently only) example is a tensor product of `N` elementary spaces of the same type `S`:
+
+```@docs; canonical=false
+ProductSpace
```
-Given some `V1::S`, `V2::S`, `V3::S` of the same type `S<:ElementarySpace`, we can easily
-construct `ProductSpace{S,3}((V1, V2, V3))` as `ProductSpace(V1, V2, V3)` or using
-`V1 ⊗ V2 ⊗ V3`, where `⊗` is simply obtained by typing `\otimes`+TAB. In fact, for
-convenience, also the regular multiplication operator `*` acts as tensor product between
-vector spaces, and as a consequence so does raising a vector space to a positive integer
-power, i.e.
+
+Given some `V1::S`, `V2::S`, `V3::S` of the same type `S<:ElementarySpace`, we can easily construct `ProductSpace{S, 3}((V1, V2, V3))` as `ProductSpace(V1, V2, V3)` or using `V1 ⊗ V2 ⊗ V3`, where `⊗` is simply obtained by typing `\otimes`+TAB.
+In fact, for convenience, also the regular multiplication operator `*` acts as tensor product between vector spaces, and as a consequence so does raising a vector space to a positive integer power, i.e.
+
```@repl tensorkit
V1 = ℂ^2
V2 = ℂ^3
@@ -267,21 +222,16 @@ dual(V1 ⊗ V2 ⊗ V1')
spacetype(V1 ⊗ V2)
spacetype(ProductSpace{ComplexSpace,3})
```
-Here, the newly introduced function `dims` maps `dim` to the individual spaces in a
-`ProductSpace` and returns the result as a tuple. The rationale for the dual space of
-a `ProductSpace` being the tensor product of the dual spaces in reverse order is
-explained in the subsection on [duality](@ref ss_dual) in the appendix on
-[category theory](@ref s_categories).
-
-Following Julia's Base library, the function `one` applied to an instance of `ProductSpace{S,N}`
-or of `S<:ElementarySpace` itself returns the multiplicative identity for these objects.
-Similar to Julia Base, `one` also works in the type domain. The multiplicative identity for
-vector spaces corresponds to the (monoidal) unit, which is represented as `ProductSpace{S,0}(())`
-and simply printed as `one(S)` for the specific type `S`. Note, however, that `one(S)` is
-strictly speaking only the multiplicative identity when multiplied with `ProductSpace{S,N}`
-instances. For elementary spaces `V::S`, `V ⊗ one(V)` will yield `ProductSpace{S,1}(V)` and not
-`V` itself. However, even though `V ⊗ one(V)` is not strictly equal to `V`, the object `ProductSpace(V)`,
-which can also be created as `⊗(V)`, does mathematically encapsulate the same vector space as `V`.
+
+Here, the newly introduced function `dims` maps `dim` to the individual spaces in a `ProductSpace` and returns the result as a tuple.
+The rationale for the dual space of a `ProductSpace` being the tensor product of the dual spaces in reverse order is explained in the subsection on [duality](@ref ss_dual) in the appendix on [category theory](@ref s_categories).
+
+Following Julia's Base library, the function `one` applied to an instance of `ProductSpace{S, N}` or of `S <: ElementarySpace` itself returns the multiplicative identity for these objects.
+Similar to Julia Base, `one` also works in the type domain.
+The multiplicative identity for vector spaces corresponds to the (monoidal) unit, which is represented as `ProductSpace{S, 0}(())` and simply printed as `one(S)` for the specific type `S`.
+Note, however, that `one(S)` is strictly speaking only the multiplicative identity when multiplied with `ProductSpace{S, N}` instances.
+For elementary spaces `V::S`, `V ⊗ one(V)` will yield `ProductSpace{S, 1}(V)` and not `V` itself.
+However, even though `V ⊗ one(V)` is not strictly equal to `V`, the object `ProductSpace(V)`, which can also be created as `⊗(V)`, does mathematically encapsulate the same vector space as `V`.
```@repl tensorkit
one(V1)
@@ -294,77 +244,51 @@ one(typeof(P))
P * one(P) == P == one(P) ⊗ P
```
-In the future, other `CompositeSpace` types could be added. For example, the wave function
-of an `N`-particle quantum system in first quantization would require the introduction of a
-`SymmetricSpace{S,N}` or a `AntiSymmetricSpace{S,N}` for bosons or fermions respectively,
-which correspond to the symmetric (permutation invariant) or antisymmetric subspace of
-`V^N`, where `V::S` represents the Hilbert space of the single particle system. Other
-scientific fields, like general relativity, might also benefit from tensors living in
-subspace with certain symmetries under specific index permutations.
+In the future, other `CompositeSpace` types could be added.
+For example, the wave function of an ``N``-particle quantum system in first quantization would require the introduction of a `SymmetricSpace{S, N}` or a `AntiSymmetricSpace{S, N}` for bosons or fermions respectively, which correspond to the symmetric (permutation invariant) or antisymmetric subspace of `V^N`, where `V::S` represents the Hilbert space of the single particle system.
+Other scientific fields, like general relativity, might also benefit from tensors living in subspace with certain symmetries under specific index permutations.
## [More operations with vector spaces](@id ss_spaceops)
-Vector spaces of the same `spacetype` can be given a partial order, based on whether there
-exist injective morphisms (a.k.a *monomorphisms*) or surjective morphisms (a.k.a.
-*epimorphisms*) between them. In particular, we define `ismonomorphic(V1, V2)`, with
-Unicode synonym `V1 ≾ V2` (obtained as `\precsim+TAB`), to express whether there exist
-monomorphisms in `V1→V2`. Similarly, we define `isepimorphic(V1, V2)`, with Unicode
-synonym `V1 ≿ V2` (obtained as `\succsim+TAB`), to express whether there exist
-epimorphisms in `V1→V2`. Finally, we define `isisomorphic(V1, V2)`, with Unicode
-alternative `V1 ≅ V2` (obtained as `\cong+TAB`), to express whether there exist
-isomorphism in `V1→V2`. In particular `V1 ≅ V2` if and only if `V1 ≾ V2 && V1 ≿ V2`.
-
-For completeness, we also export the strict comparison operators `≺` and `≻`
-(`\prec+TAB` and `\succ+TAB`), with definitions
+Vector spaces of the same `spacetype` can be given a partial order, based on whether there exist injective morphisms (a.k.a *monomorphisms*) or surjective morphisms (a.k.a. *epimorphisms*) between them.
+In particular, we define `ismonomorphic(V1, V2)`, with Unicode synonym `V1 ≾ V2` (obtained as `\precsim+TAB`), to express whether there exist monomorphisms in `V1 → V2`.
+Similarly, we define `isepimorphic(V1, V2)`, with Unicode synonym `V1 ≿ V2` (obtained as `\succsim+TAB`), to express whether there exist epimorphisms in `V1 → V2`.
+Finally, we define `isisomorphic(V1, V2)`, with Unicode alternative `V1 ≅ V2` (obtained as `\cong+TAB`), to express whether there exist isomorphism in `V1 → V2`.
+In particular `V1 ≅ V2` if and only if `V1 ≾ V2 && V1 ≿ V2`.
+
+For completeness, we also export the strict comparison operators `≺` and `≻` (`\prec+TAB` and `\succ+TAB`), with definitions
```julia
≺(V1::VectorSpace, V2::VectorSpace) = V1 ≾ V2 && !(V1 ≿ V2)
≻(V1::VectorSpace, V2::VectorSpace) = V1 ≿ V2 && !(V1 ≾ V2)
```
However, as we expect these to be less commonly used, no ASCII alternative is provided.
-In the context of `InnerProductStyle(V) <: EuclideanInnerProduct`, `V1 ≾ V2` implies that
-there exists isometries ``W:V1 → V2`` such that ``W^† ∘ W = \mathrm{id}_{V1}``, while
-`V1 ≅ V2` implies that there exist unitaries ``U:V1→V2`` such that
-``U^† ∘ U = \mathrm{id}_{V1}`` and ``U ∘ U^† = \mathrm{id}_{V2}``.
+In the context of `InnerProductStyle(V) <: EuclideanInnerProduct`, `V1 ≾ V2` implies that there exists isometries ``W : V1 → V2`` such that ``W^† ∘ W = \mathrm{id}_{V1}``, while `V1 ≅ V2` implies that there exist unitaries ``U : V1 → V2`` such that ``U^† ∘ U = \mathrm{id}_{V1}`` and ``U ∘ U^† = \mathrm{id}_{V2}``.
-Note that spaces that are isomorphic are not necessarily equal. One can be a dual space,
-and the other a normal space, or one can be an instance of `ProductSpace`, while the other
-is an `ElementarySpace`. There will exist (infinitely) many isomorphisms between the
-corresponding spaces, but in general none of those will be canonical.
+Note that spaces that are isomorphic are not necessarily equal.
+One can be a dual space, and the other a normal space, or one can be an instance of `ProductSpace`, while the other is an `ElementarySpace`.
+There will exist (infinitely) many isomorphisms between the corresponding spaces, but in general none of those will be canonical.
-There are also a number of convenience functions to create isomorphic spaces. The function
-`fuse(V1, V2, ...)` or `fuse(V1 ⊗ V2 ⊗ ...)` returns an elementary space that is isomorphic
-to `V1 ⊗ V2 ⊗ ...`.
+There are also a number of convenience functions to create isomorphic spaces.
+The function `fuse(V1, V2, ...)` or `fuse(V1 ⊗ V2 ⊗ ...)` returns an elementary space that is isomorphic to `V1 ⊗ V2 ⊗ ...`.
## [Space of morphisms](@id ss_homspaces)
-As mentioned in the introduction, we define tensor maps as linear maps from
-a `ProductSpace` domain to a `ProductSpace` codomain. The set of all tensor maps with a fixed
-domain and codomain constitutes a vector space, which we represent with the `HomSpace` type.
-```julia
-struct HomSpace{S<:ElementarySpace, P1<:CompositeSpace{S}, P2<:CompositeSpace{S}}
- codomain::P1
- domain::P2
-end
+
+As mentioned in the introduction, we define tensor maps as linear maps from a `ProductSpace` domain to a `ProductSpace` codomain.
+The set of all tensor maps with a fixed domain and codomain constitutes a vector space, which we represent with the `HomSpace` type.
+
+```@docs; canonical=false
+HomSpace
```
-Aside from the standard constructor, a `HomSpace` instance can be created as either
-`domain → codomain` or `codomain ← domain` (where the arrows are obtained as
-`\to+TAB` or `\leftarrow+TAB`, and as `\rightarrow+TAB` respectively). The
-reason for first listing the codomain and than the domain will become clear in the
-[section on tensor maps](@ref s_tensors).
-
-Note that `HomSpace` is not a subtype of `VectorSpace`, i.e. we restrict the latter to
-encode all spaces and generalizations thereof (i.e. objects in linear monoidal categories)
-that are associated with the indices and the domain and codomain of a tensor map. Even when
-these generalizations are no longer strictly vector spaces and have unconventional properties
-(such as non-integer dimensions), the space of tensor maps (homomorphisms) between a given
-domain and codomain, represented by a `HomSpace` instance, is always a vector space in the
-strict mathematical sense (with in particular an integer dimension). Because `HomSpace` and
-the different subtypes of `VectorSpace` represent very different mathematical concepts that do
-not directly interact, we have chosen to keep them separate in the type hierarchy.
-
-Furthermore, on these `HomSpace` instances, we define a number of useful methods that are a
-precursor to the corresponding methods that we will define to manipulate the actual tensors,
-as illustrated in the following example:
+
+Aside from the standard constructor, a `HomSpace` instance can be created as either `domain → codomain` or `codomain ← domain` (where the arrows are obtained as `\to+TAB` or `\leftarrow+TAB`, and as `\rightarrow+TAB` respectively).
+The reason for first listing the codomain and than the domain will become clear in the [section on tensor maps](@ref s_tensors).
+
+Note that `HomSpace` is not a subtype of `VectorSpace`, i.e. we restrict the latter to encode all spaces and generalizations thereof (i.e. objects in linear monoidal categories) that are associated with the indices and the domain and codomain of a tensor map.
+Even when these generalizations are no longer strictly vector spaces and have unconventional properties (such as non-integer dimensions), the space of tensor maps (homomorphisms) between a given domain and codomain, represented by a `HomSpace` instance, is always a vector space in the strict mathematical sense (with in particular an integer dimension).
+Because `HomSpace` and the different subtypes of `VectorSpace` represent very different mathematical concepts that do not directly interact, we have chosen to keep them separate in the type hierarchy.
+
+Furthermore, on these `HomSpace` instances, we define a number of useful methods that are a precursor to the corresponding methods that we will define to manipulate the actual tensors, as illustrated in the following example:
```@repl tensorkit
W = ℂ^2 ⊗ ℂ^3 → ℂ^3 ⊗ dual(ℂ^4)
field(W)
@@ -390,14 +314,9 @@ insertrightunit(W, 2)
removeunit(insertrightunit(W, 2), 3)
TensorKit.compose(W, adjoint(W))
```
-Note that indexing `W` follows an order that first targets the spaces in the codomain,
-followed by the dual of the spaces in the domain. This particular convention is useful
-in combination with the instances of type [`TensorMap`](@ref), which represent the actual
-morphisms living in such a `HomSpace`. Also note that `dim(W)` is here given by the product
-of the dimensions of the individual spaces, but that this is no longer true once symmetries
-are involved. At any time will `dim(::HomSpace)` represent the number of linearly independent
-morphisms in this space, or thus, the number of independent components that a corresponding
-`TensorMap` object will have.
-
-A complete list of methods defined on `HomSpace` instances together with the corresponding
-documentation is provided in the [library section on Vector spaces](@ref s_libvectorspaces).
+Note that indexing `W` follows an order that first targets the spaces in the codomain, followed by the dual of the spaces in the domain.
+This particular convention is useful in combination with the instances of type [`TensorMap`](@ref), which represent the actual morphisms living in such a `HomSpace`.
+Also note that `dim(W)` is here given by the product of the dimensions of the individual spaces, but that this is no longer true once symmetries are involved.
+At any time will `dim(::HomSpace)` represent the number of linearly independent morphisms in this space, or thus, the number of independent components that a corresponding `TensorMap` object will have.
+
+A complete list of methods defined on `HomSpace` instances together with the corresponding documentation is provided in the [library section on Vector spaces](@ref s_libvectorspaces).
diff --git a/docs/src/man/symmetries.md b/docs/src/man/symmetries.md
new file mode 100644
index 000000000..21145b41b
--- /dev/null
+++ b/docs/src/man/symmetries.md
@@ -0,0 +1,109 @@
+# [Symmetries](@id s_symmetries)
+
+```@setup sectors
+using TensorKit
+import LinearAlgebra
+```
+
+## Symmetries and symmetric tensors
+
+When a physical system exhibits certain symmetries, it can often be described using tensors that transform covariantly with respect to the corresponding symmetry group, where this group acts as a tensor product of group actions on every tensor index separately.
+The group action on a single index, or thus, on the corresponding vector space, can be decomposed into irreducible representations (irreps).
+Here, we restrict to unitary representations, and thus assume that the corresponding vector spaces also have a natural Euclidean inner product.
+In particular, the Euclidean inner product between two vectors is invariant under the group action and thus transforms according to the trivial representation of the group.
+
+The corresponding vector spaces will be canonically represented as ``V = ⨁_a ℂ^{n_a} ⊗ R_{a}``, where ``a`` labels the different irreps, ``n_a`` is the number of times irrep ``a`` appears and ``R_a`` is the vector space associated with irrep ``a``.
+Irreps are also known as spin sectors (in the case of ``\mathsf{SU}_2``) or charge sectors (in the case of ``\mathsf{U}_1``), and we henceforth refer to ``a`` as a sector.
+The number of times ``n_a`` that sector ``a`` appears will be referred to as the degeneracy of sector ``a`` in the space ``V``.
+In fact, the approach taken by TensorKit.jl goes beyond the case of irreps of groups, and, using the language from the Appendix on [categories](@ref s_categories), sectors correspond to (equivalence classes of) simple objects in a unitary fusion or multifusion category, whereas the "representation spaces" ``V`` correspond to general (semisimple) objects in such a category.
+Nonetheless, many aspects of the construction of symmetric tensors can already be appreciated by considering the representation theory of a non-abelian group such as ``\mathsf{SU}_2`` or ``\mathsf{SU}_3`` as example.
+For practical reasons, we assume that there is a canonical order of the sectors, so that the vector space ``V`` is completely specified by the values of ``n_a``.
+
+When considering a tensor product of such representation spaces, they can again be decomposed into a direct sum of "coupled" sectors and associated degeneracy spaces.
+However, a non-trivial basis transformation is required to go from the tensor product basis to the basis of coupled sectors.
+The gain in efficiency (both in memory occupation and computation time) obtained from using symmetric (technically: equivariant) tensor maps is that, by Schur's lemma, they are block diagonal in the basis of coupled sectors.
+Hence, to exploit this block diagonal form, it is essential that we know the basis transformation from the individual (uncoupled) sectors appearing in the tensor product form of the domain and codomain, to the totally coupled sectors that label the different blocks.
+We refer to the latter as block sectors.
+The transformation from the uncoupled sectors in the domain (or codomain) of the tensor map to the block sector is encoded in a fusion tree (or splitting tree).
+Essentially, it is a sequential application of pairwise fusion as described by the group's [Clebsch–Gordan (CG) coefficients](https://en.wikipedia.org/wiki/Clebsch–Gordan_coefficients).
+However, it turns out that we do not need to know or instantiate the actual CG coefficients that make up the fusion and splitting trees.
+Instead, we only need to know how the splitting and fusion trees transform under transformations such as interchanging the order of the incoming sectors or interchanging incoming and outgoing sectors.
+This information is known as the topological data of the group.
+It consists out of the fusion rules and the associativity relations encoded by the F-symbols, which are also known as recoupling coefficients or [6j-symbols](https://en.wikipedia.org/wiki/6-j_symbol) (more accurately, the F-symbol is actually [Racah's W-coefficients](https://en.wikipedia.org/wiki/Racah_W-coefficient) in the case of ``\mathsf{SU}_2``).
+
+In the next three sections of the manual, we describe how the above concepts are implemented in TensorKit.jl in greater detail.
+Firstly, we describe how sectors and their associated topological data are encoded using a specialized interface and type hierarchy.
+The second section describes how to build spaces ``V`` composed of a direct sum of different sectors of the same type, and which operations are supported on those spaces.
+In the third section, we explain the details of constructing and manipulating fusion trees.
+Finally, we elaborate on the case of general fusion categories and the possibility of having fermionic or anyonic twists.
+
+But first, on the remainder of this page, we provide a concise theoretical summary of the required data of the representation theory of a group.
+We refer to the appendix on [categories](@ref s_categories), and in particular the subsection on [topological data of a unitary fusion category](@ref ss_topologicalfusion), for more details.
+
+!!! note
+ The infrastructure for defining sectors is actually implemented in a standalone package, [TensorKitSectors.jl](https://github.com/QuantumKitHub/TensorKitSectors.jl), that is imported and reexported by TensorKit.jl.
+
+!!! note
+ On this and the next page of the manual, we assume some familiarity with the representation theory of non-abelian groups, and the structure of a symmetric tensor.
+ For a more pedagogical introduction based on physical examples, we recommend reading the first appendix, which provides a [tutorial-style introduction on the construction of symmetric tensors](@ref s_symmetric_tutorial).
+
+
+## [Representation theory and unitary fusion categories](@id ss_representationtheory)
+
+Let the different irreps or sectors be labeled as ``a``, ``b``, ``c``, …
+First and foremost, we need to specify the *fusion rules* ``a ⊗ b = ⨁ N^{ab}_{c} c`` with ``N^{ab}_{c}`` some non-negative integers.
+The meaning of the fusion rules is that the space of covariant maps ``R_a ⊗ R_b → R_c`` (or vice versa ``R_c → R_a ⊗ R_b``) has dimension ``N^{ab}_c``.
+In particular, there should always exists a unique trivial sector ``u`` (called the identity object ``I`` or ``1`` in the language of categories) such that ``a ⊗ u = a = u ⊗ a`` for every other sector ``a``.
+Furthermore, with respect to every sector ``a`` there should exist a unique sector ``\bar{a}`` such that ``N^{a\bar{a}}_{u} = 1``, whereas for all ``b \neq \bar{a}``, ``N^{ab}_{u} = 0``.
+For irreps of groups, ``\bar{a}`` corresponds to the complex conjugate of the representation ``a``, or some representation isomorphic to it.
+For example, for the representations of ``\mathsf{SU}_2``, the trivial sector corresponds to spin zero and all irreps are self-dual (i.e. ``a = \bar{a}``), meaning that the conjugate representation is isomorphic to the non-conjugated one (they are however not equal but related by a similarity transform).
+
+In particular, we now assume the existence of a basis for the ``N^{ab}_c``-dimensional space of covariant maps ``R_c → R_a ⊗ R_b``, which consists of unitary tensor maps ``X^{ab}_{c,μ} : R_c → R_a ⊗ R_b`` with ``μ = 1, …, N^{ab}_c`` such that
+
+```math
+X^{ab}_{c,μ})^† X^{ab}_{c,ν} = δ_{μ,ν} \mathrm{id}_{R_c}
+```
+
+and
+
+```math
+\sum_{c} \sum_{μ = 1}^{N^{ab}_c} X^{ab}_{c,μ} (X^{ab}_{c,μ})^\dagger = \mathrm{id}_{R_a ⊗ R_b}
+```
+
+The tensors ``X^{ab}_{c,μ}`` are the splitting tensors, and because we restrict to unitary representations (or unitary categories), the corresponding fusion tensors are obtained by hermitian conjugation.
+Different choices of orthonormal bases would be related by a unitary basis transform within the space, i.e. acting on the multiplicity label ``μ = 1, …, N^{ab}_c``.
+For ``\mathsf{SU}_2``, where ``N^{ab}_c`` is zero or one and the multiplicity labels are absent, this freedom reduces to a phase factor.
+In a standard convention, the entries of ``X^{ab}_{c,μ}`` are precisely given by the CG coefficients.
+However, the point is that we do not need to know the tensors ``X^{ab}_{c,μ}`` explicitly, but only the topological data of (the representation category of) the group, which describes the following transformation:
+
+* F-move or recoupling: the transformation from ``(a ⊗ b) ⊗ c`` to ``a ⊗ (b ⊗ c)``:
+
+```math
+(X^{ab}_{e,μ} ⊗ \mathrm{id}_c) ∘ X^{ec}_{d,ν} = ∑_{f,κ,λ} [F^{abc}_{d}]_{e,μν}^{f,κλ} (\mathrm{id}_a ⊗ X^{bc}_{f,κ}) ∘ X^{af}_{d,λ}
+```
+
+* [Braiding](@ref ss_braiding) or permuting: the transformation from ``a ⊗ b`` to ``b ⊗ a`` as defined by ``τ_{a, b}: R_a ⊗ R_b → R_b ⊗ R_a``:
+
+```math
+τ_{R_a,R_b} ∘ X^{ab}_{c,μ} = ∑_{ν} [R^{ab}_c]^ν_μ X^{ba}_{c,ν}
+```
+
+The dimensions of the spaces ``R_a`` on which representation ``a`` acts are denoted as ``d_a`` and referred to as quantum dimensions.
+In particular ``d_u = 1`` and ``d_a = d_{\bar{a}}``.
+This information is also encoded in the F-symbol as ``d_a = | [F^{a \bar{a} a}_a]^u_u |^{-1}``.
+Note that there are no multiplicity labels in that particular F-symbol as ``N^{a\bar{a}}_u = 1``.
+
+There is a graphical representation associated with the fusion tensors and their manipulations, which we summarize here:
+
+```@raw html
+
+```
+
+We refer to the appendix on [category theory](@ref s_categories), and in particular the section on [topological data of a unitary fusion category](@ref ss_topologicalfusion) for further details.
+
+Finally, for the implementation, it will be useful to distinguish between a number of different possibilities regarding the fusion rules.
+If, for every ``a`` and ``b``, there is a unique ``c`` such that ``a ⊗ b = c`` (i.e. ``N^{ab}_{c} = 1`` and ``N^{ab}_{c′} = 0`` for all other ``c′``), the sector type is said to have unique fusion.
+The representations of a group have this property if and only if the group multiplication law is commutative, i.e. if the group is abelian.
+In that case, all spaces ``R_{a}`` associated with the representation are one-dimensional and thus trivial.
+In the case of representations of non-abelian groups, or in the more general categorical case, there will always be at least one pair of sectors ``a`` and ``b`` (not necessarily distinct) for which the fusion product ``a ⊗ b`` contains more than one sector ``c`` with non-zero ``N^{ab}_c``.
+In those cases, we find it useful to further distinguish between sector types for which ``N^{ab}_c`` only takes the values zero or one, such that no multiplicity labels (the Greek letters ``μ``, ... are needed), e.g. the representations of ``\mathsf{SU}_2``, and those where some ``N^{ab}_c`` are larger than one, e.g. the representations of ``\mathsf{SU}_3``.
diff --git a/docs/src/man/tensormanipulations.md b/docs/src/man/tensormanipulations.md
new file mode 100644
index 000000000..15285fd78
--- /dev/null
+++ b/docs/src/man/tensormanipulations.md
@@ -0,0 +1,337 @@
+# [Manipulating tensors](@id s_tensormanipulations)
+
+## [Vector space and linear algebra operations](@id ss_tensor_linalg)
+
+`AbstractTensorMap` instances `t` represent linear maps, i.e. homomorphisms in a `𝕜`-linear category, just like matrices.
+To a large extent, they follow the interface of `Matrix` in Julia's `LinearAlgebra` standard library.
+Many methods from `LinearAlgebra` are (re)exported by TensorKit.jl, and can then us be used without `using LinearAlgebra` explicitly.
+In all of the following methods, the implementation acts directly on the underlying matrix blocks (typically using the same method) and never needs to perform any basis transforms.
+
+In particular, `AbstractTensorMap` instances can be composed, provided the domain of the first object coincides with the codomain of the second.
+Composing tensor maps uses the regular multiplication symbol as in `t = t1 * t2`, which is also used for matrix multiplication.
+TensorKit.jl also supports (and exports) the mutating method `mul!(t, t1, t2)`.
+We can then also try to invert a tensor map using `inv(t)`, though this can only exist if the domain and codomain are isomorphic, which can e.g. be checked as `fuse(codomain(t)) == fuse(domain(t))`.
+If the inverse is composed with another tensor `t2`, we can use the syntax `t1 \ t2` or `t2 / t1`.
+However, this syntax also accepts instances `t1` whose domain and codomain are not isomorphic, and then amounts to `pinv(t1)`, the Moore-Penrose pseudoinverse.
+This, however, is only really justified as minimizing the least squares problem if `InnerProductStyle(t) <: EuclideanProduct`.
+
+`AbstractTensorMap` instances behave themselves as vectors (i.e. they are `𝕜`-linear) and so they can be multiplied by scalars and, if they live in the same space, i.e. have the same domain and codomain, they can be added to each other.
+There is also a `zero(t)`, the additive identity, which produces a zero tensor with the same domain and codomain as `t`.
+In addition, `TensorMap` supports basic Julia methods such as `fill!` and `copy!`, as well as `copy(t)` to create a copy with independent data.
+Aside from basic `+` and `*` operations, TensorKit.jl reexports a number of efficient in-place methods from `LinearAlgebra`, such as `axpy!` (for `y ← α * x + y`), `axpby!` (for `y ← α * x + β * y`), `lmul!` and `rmul!` (for `y ← α * y` and `y ← y * α`, which is typically the same) and `mul!`, which can also be used for out-of-place scalar multiplication `y ← α * x`.
+
+For `S = spacetype(t)` where `InnerProductStyle(S) <: EuclideanProduct`, we can compute `norm(t)`, and for two such instances, the inner product `dot(t1, t2)`, provided `t1` and `t2` have the same domain and codomain.
+Furthermore, there is `normalize(t)` and `normalize!(t)` to return a scaled version of `t` with unit norm.
+These operations should also exist for `InnerProductStyle(S) <: HasInnerProduct`, but require an interface for defining a custom inner product in these spaces.
+Currently, there is no concrete subtype of `HasInnerProduct` that is not an `EuclideanProduct`.
+In particular, `CartesianSpace`, `ComplexSpace` and `GradedSpace` all have `InnerProductStyle(S) <: EuclideanProduct`.
+
+With tensors that have `InnerProductStyle(t) <: EuclideanProduct` there is associated an adjoint operation, given by `adjoint(t)` or simply `t'`, such that `domain(t') == codomain(t)` and `codomain(t') == domain(t)`.
+Note that for an instance `t::TensorMap{S, N₁, N₂}`, `t'` is simply stored in a wrapper called `AdjointTensorMap{S, N₂, N₁}`, which is another subtype of `AbstractTensorMap`.
+This should be mostly invisible to the user, as all methods should work for this type as well.
+It can be hard to reason about the index order of `t'`, i.e. index `i` of `t` appears in `t'` at index position `j = TensorKit.adjointtensorindex(t, i)`, where the latter method is typically not necessary and hence unexported.
+There is also a plural `TensorKit.adjointtensorindices` to convert multiple indices at once.
+Note that, because the adjoint interchanges domain and codomain, we have `space(t', j) == space(t, i)'`.
+
+`AbstractTensorMap` instances can furthermore be tested for exact (`t1 == t2`) or approximate (`t1 ≈ t2`) equality, though the latter requires that `norm` can be computed.
+
+When tensor map instances are endomorphisms, i.e. they have the same domain and codomain, there is a multiplicative identity which can be obtained as `one(t)` or `one!(t)`, where the latter overwrites the contents of `t`.
+The multiplicative identity on a space `V` can also be obtained using `id(A, V)` as discussed [above](@ref ss_tensor_construction), such that for a general homomorphism `t′`, we have `t′ == id(codomain(t′)) * t′ == t′ * id(domain(t′))`.
+Returning to the case of endomorphisms `t`, we can compute the trace via `tr(t)` and exponentiate them using `exp(t)`, or if the contents of `t` can be destroyed in the process, `exp!(t)`.
+Furthermore, there are a number of tensor factorizations for both endomorphisms and general homomorphism that we discuss below.
+
+Finally, there are a number of operations that also belong in this paragraph because of their analogy to common matrix operations.
+The tensor product of two `TensorMap` instances `t1` and `t2` is obtained as `t1 ⊗ t2` and results in a new `TensorMap` with `codomain(t1 ⊗ t2) = codomain(t1) ⊗ codomain(t2)` and `domain(t1 ⊗ t2) = domain(t1) ⊗ domain(t2)`.
+If we have two `TensorMap{T, S, N, 1}` instances `t1` and `t2` with the same codomain, we can combine them in a way that is analogous to `hcat`, i.e. we stack them such that the new tensor `catdomain(t1, t2)` has also the same codomain, but has a domain which is `domain(t1) ⊕ domain(t2)`.
+Similarly, if `t1` and `t2` are of type `TensorMap{T, S, 1, N}` and have the same domain, the operation `catcodomain(t1, t2)` results in a new tensor with the same domain and a codomain given by `codomain(t1) ⊕ codomain(t2)`, which is the analogy of `vcat`.
+Note that direct sum only makes sense between `ElementarySpace` objects, i.e. there is no way to give a tensor product meaning to a direct sum of tensor product spaces.
+
+Time for some more examples:
+```@repl tensors
+using TensorKit # hide
+V1 = ℂ^2
+t = randn(V1 ← V1 ⊗ V1 ⊗ V1)
+t == t + zero(t) == t * id(domain(t)) == id(codomain(t)) * t
+t2 = randn(ComplexF64, codomain(t), domain(t));
+dot(t2, t)
+tr(t2' * t)
+dot(t2, t) ≈ dot(t', t2')
+dot(t2, t2)
+norm(t2)^2
+t3 = copy!(similar(t, ComplexF64), t);
+t3 == t
+rmul!(t3, 0.8);
+t3 ≈ 0.8 * t
+axpby!(0.5, t2, 1.3im, t3);
+t3 ≈ 0.5 * t2 + 0.8 * 1.3im * t
+t4 = randn(fuse(codomain(t)), codomain(t));
+t5 = TensorMap{Float64}(undef, fuse(codomain(t)), domain(t));
+mul!(t5, t4, t) == t4 * t
+inv(t4) * t4 ≈ id(codomain(t))
+t4 * inv(t4) ≈ id(fuse(codomain(t)))
+t4 \ (t4 * t) ≈ t
+t6 = randn(ComplexF64, V1, codomain(t));
+numout(t4) == numout(t6) == 1
+t7 = catcodomain(t4, t6);
+foreach(println, (codomain(t4), codomain(t6), codomain(t7)))
+norm(t7) ≈ sqrt(norm(t4)^2 + norm(t6)^2)
+t8 = t4 ⊗ t6;
+foreach(println, (codomain(t4), codomain(t6), codomain(t8)))
+foreach(println, (domain(t4), domain(t6), domain(t8)))
+norm(t8) ≈ norm(t4)*norm(t6)
+```
+
+## [Index manipulations](@id ss_indexmanipulation)
+
+In many cases, the bipartition of tensor indices (i.e. `ElementarySpace` instances) between the codomain and domain is not fixed throughout the different operations that need to be performed on that tensor map, i.e. we want to use the duality to move spaces from domain to codomain and vice versa.
+Furthermore, we want to use the braiding to reshuffle the order of the indices.
+
+For this, we use an interface that is closely related to that for manipulating splitting- fusion tree pairs, namely [`braid`](@ref) and [`permute`](@ref), with the interface
+
+```julia
+braid(t::AbstractTensorMap{T,S,N₁,N₂}, (p1, p2)::Index2Tuple{N₁′,N₂′}, levels::IndexTuple{N₁+N₂,Int})
+```
+
+and
+
+```julia
+permute(t::AbstractTensorMap{T,S,N₁,N₂}, (p1, p2)::Index2Tuple{N₁′,N₂′}; copy = false)
+```
+
+both of which return an instance of `AbstractTensorMap{T, S, N₁′, N₂′}`.
+
+In these methods, `p1` and `p2` specify which of the original tensor indices ranging from `1` to `N₁ + N₂` make up the new codomain (with `N₁′` spaces) and new domain (with `N₂′` spaces).
+Hence, `(p1..., p2...)` should be a valid permutation of `1:(N₁ + N₂)`.
+Note that, throughout TensorKit.jl, permutations are always specified using tuples of `Int`s, for reasons of type stability.
+For `braid`, we also need to specify `levels` or depths for each of the indices of the original tensor, which determine whether indices will braid over or underneath each other (use the braiding or its inverse).
+We refer to the section on [manipulating fusion trees](@ref ss_fusiontrees) for more details.
+
+When `BraidingStyle(sectortype(t)) isa SymmetricBraiding`, we can use the simpler interface of `permute`, which does not require the argument `levels`.
+`permute` accepts a keyword argument `copy`.
+When `copy == true`, the result will be a tensor with newly allocated data that can independently be modified from that of the input tensor `t`.
+When `copy` takes the default value `false`, `permute` can try to return the result in a way that it shares its data with the input tensor `t`, though this is only possible in specific cases (e.g. when `sectortype(S) == Trivial` and `(p1..., p2...) = (1:(N₁+N₂)...)`).
+
+Both `braid` and `permute` come in a version where the result is stored in an already existing tensor, i.e. [`braid!(tdst, tsrc, (p1, p2), levels)`](@ref) and [`permute!(tdst, tsrc, (p1, p2))`](@ref).
+
+Another operation that belongs under index manipulations is taking the `transpose` of a tensor, i.e. `LinearAlgebra.transpose(t)` and `LinearAlgebra.transpose!(tdst, tsrc)`, both of which are reexported by TensorKit.jl.
+Note that `transpose(t)` is not simply equal to reshuffling domain and codomain with `braid(t, (1:(N₁+N₂)...), reverse(domainind(tsrc)), reverse(codomainind(tsrc))))`.
+Indeed, the graphical representation (where we draw the codomain and domain as a single object), makes clear that this introduces an additional (inverse) twist, which is then compensated in the `transpose` implementation.
+
+```@raw html
+
+```
+
+In categorical language, the reason for this extra twist is that we use the left coevaluation ``η``, but the right evaluation ``\tilde{ϵ}``, when repartitioning the indices between domain and codomain.
+
+There are a number of other index related manipulations.
+We can apply a twist (or inverse twist) to one of the tensor map indices via [`twist(t, i; inv = false)`](@ref) or [`twist!(t, i; inv = false)`](@ref).
+Note that the latter method does not store the result in a new destination tensor, but just modifies the tensor `t` in place.
+Twisting several indices simultaneously can be obtained by using the defining property
+
+```math
+θ_{V⊗W} = τ_{W,V} ∘ (θ_W ⊗ θ_V) ∘ τ_{V,W} = (θ_V ⊗ θ_W) ∘ τ_{W,V} ∘ τ_{V,W},
+```
+
+but is currently not implemented explicitly.
+
+For all sector types `I` with `BraidingStyle(I) == Bosonic()`, all twists are `1` and thus have no effect.
+Let us start with some examples, in which we illustrate that, albeit `permute` might act highly non-trivial on the fusion trees and on the corresponding data, after conversion to a regular `Array` (when possible), it just acts like `permutedims`
+
+```@repl tensors
+domain(t) → codomain(t)
+ta = convert(Array, t);
+t′ = permute(t, (1, 2, 3, 4));
+domain(t′) → codomain(t′)
+convert(Array, t′) ≈ ta
+t′′ = permute(t, ((4, 2, 3), (1,)));
+domain(t′′) → codomain(t′′)
+convert(Array, t′′) ≈ permutedims(ta, (4, 2, 3, 1))
+transpose(t)
+convert(Array, transpose(t)) ≈ permutedims(ta, (4, 3, 2, 1))
+dot(t2, t) ≈ dot(transpose(t2), transpose(t))
+transpose(transpose(t)) ≈ t
+twist(t, 3) ≈ t
+```
+
+Note that `transpose` acts like one would expect on a `TensorMap{T, S, 1, 1}`.
+On a `TensorMap{T, S, N₁, N₂}`, because `transpose` replaces the codomain with the dual of the domain, which has its tensor product operation reversed, this in the end amounts in a complete reversal of all tensor indices when representing it as a plain multi-dimensional `Array`.
+Also, note that we have not defined the conjugation of `TensorMap` instances.
+One definition that one could think of is `conj(t) = adjoint(transpose(t))`.
+However note that `codomain(adjoint(tranpose(t))) == domain(transpose(t)) == dual(codomain(t))` and similarly `domain(adjoint(tranpose(t))) == dual(domain(t))`, where `dual` of a `ProductSpace` is composed of the dual of the `ElementarySpace` instances, in reverse order of tensor product.
+This might be very confusing, and as such we leave tensor conjugation undefined.
+However, note that we have a conjugation syntax within the context of [tensor contractions](@ref ss_tensor_contraction).
+
+To show the effect of `twist`, we now consider a type of sector `I` for which `BraidingStyle(I) != Bosonic()`.
+In particular, we use `FibonacciAnyon`.
+We cannot convert the resulting `TensorMap` to an `Array`, so we have to rely on indirect tests to verify our results.
+
+```@repl tensors
+V1 = GradedSpace{FibonacciAnyon}(:I => 3, :τ => 2)
+V2 = GradedSpace{FibonacciAnyon}(:I => 2, :τ => 1)
+m = randn(Float32, V1, V2)
+transpose(m)
+twist(braid(m, ((2,), (1,)), (1, 2)), 1)
+t1 = randn(V1 * V2', V2 * V1);
+t2 = randn(ComplexF64, V1 * V2', V2 * V1);
+dot(t1, t2) ≈ dot(transpose(t1), transpose(t2))
+transpose(transpose(t1)) ≈ t1
+```
+
+A final operation that one might expect in this section is to fuse or join indices, and its inverse, to split a given index into two or more indices.
+For a plain tensor (i.e. with `sectortype(t) == Trivial`) amount to the equivalent of `reshape` on the multidimensional data.
+However, this represents only one possibility, as there is no canonically unique way to embed the tensor product of two spaces `V1 ⊗ V2` in a new space `V = fuse(V1 ⊗ V2)`.
+Such a mapping can always be accompagnied by a basis transform.
+However, one particular choice is created by the function `isomorphism`, or for `EuclideanProduct` spaces, `unitary`.
+Hence, we can join or fuse two indices of a tensor by first constructing `u = unitary(fuse(space(t, i) ⊗ space(t, j)), space(t, i) ⊗ space(t, j))` and then contracting this map with indices `i` and `j` of `t`, as explained in the section on [contracting tensors](@ref ss_tensor_contraction).
+Note, however, that a typical algorithm is not expected to often need to fuse and split indices, as e.g. tensor factorizations can easily be applied without needing to `reshape` or fuse indices first, as explained in the next section.
+
+## [Tensor factorizations](@id ss_tensor_factorization)
+
+As tensors are linear maps, they suport various kinds of factorizations.
+These functions all interpret the provided `AbstractTensorMap` instances as a map from `domain` to `codomain`, which can be thought of as reshaping the tensor into a matrix according to the current bipartition of the indices.
+
+TensorKit's factorizations are provided by [MatrixAlgebraKit.jl](https://github.com/QuantumKitHub/MatrixAlgebraKit.jl), which is used to supply both the interface, as well as the implementation of the various operations on the blocks of data.
+For specific details on the provided functionality, we refer to its [documentation page](https://quantumkithub.github.io/MatrixAlgebraKit.jl/stable/user_interface/decompositions/).
+
+Finally, note that each of the factorizations takes the current partition of `domain` and `codomain` as the *axis* along which to matricize and perform the factorization.
+In order to obtain factorizations according to a different bipartition of the indices, we can use any of the previously mentioned [index manipulations](@ref ss_indexmanipulation) before the factorization.
+
+Some examples to conclude this section
+```@repl tensors
+V1 = SU₂Space(0 => 2, 1/2 => 1)
+V2 = SU₂Space(0 => 1, 1/2 => 1, 1 => 1)
+
+t = randn(V1 ⊗ V1, V2);
+U, S, Vh = svd_compact(t);
+t ≈ U * S * Vh
+D, V = eigh_full(t' * t);
+D ≈ S * S
+U' * U ≈ id(domain(U))
+S
+
+Q, R = left_orth(t; alg = :svd);
+Q' * Q ≈ id(domain(Q))
+t ≈ Q * R
+
+U2, S2, Vh2, ε = svd_trunc(t; trunc = truncspace(V1));
+Vh2 * Vh2' ≈ id(codomain(Vh2))
+S2
+ε ≈ norm(block(S, Irrep[SU₂](1))) * sqrt(dim(Irrep[SU₂](1)))
+
+L, Q = right_orth(permute(t, ((1,), (2, 3))));
+codomain(L), domain(L), domain(Q)
+Q * Q'
+P = Q' * Q;
+P ≈ P * P
+t′ = permute(t, ((1,), (2, 3)));
+t′ ≈ t′ * P
+```
+
+## [Bosonic tensor contractions and tensor networks](@id ss_tensor_contraction)
+
+One of the most important operation with tensor maps is to compose them, more generally known as contracting them.
+As mentioned in the section on [category theory](@ref s_categories), a typical composition of maps in a ribbon category can graphically be represented as a planar arrangement of the morphisms (i.e. tensor maps, boxes with lines eminating from top and bottom, corresponding to source and target, i.e. domain and codomain), where the lines connecting the source and targets of the different morphisms should be thought of as ribbons, that can braid over or underneath each other, and that can twist.
+Technically, we can embed this diagram in ``ℝ × [0,1]`` and attach all the unconnected line endings corresponding objects in the source at some position ``(x,0)`` for ``x∈ℝ``, and all line endings corresponding to objects in the target at some position ``(x,1)``.
+The resulting morphism is then invariant under what is known as *framed three-dimensional isotopy*, i.e. three-dimensional rearrangements of the morphism that respect the rules of boxes connected by ribbons whose open endings are kept fixed.
+Such a two-dimensional diagram cannot easily be encoded in a single line of code.
+
+However, things simplify when the braiding is symmetric (such that over- and under- crossings become equivalent, i.e. just crossings), and when twists, i.e. self-crossings in this case, are trivial.
+This amounts to `BraidingStyle(I) == Bosonic()` in the language of TensorKit.jl, and is true for any subcategory of ``\mathbf{Vect}``, i.e. ordinary tensors, possibly with some symmetry constraint.
+The case of ``\mathbf{SVect}`` and its subcategories, and more general categories, are discussed below.
+
+In the case of trivial twists, we can deform the diagram such that we first combine every morphism with a number of coevaluations ``η`` so as to represent it as a tensor, i.e. with a trivial domain.
+We can then rearrange the morphism to be all ligned up horizontally, where the original morphism compositions are now being performed by evaluations ``ϵ``.
+This process will generate a number of crossings and twists, where the latter can be omitted because they act trivially.
+Similarly, double crossings can also be omitted.
+As a consequence, the diagram, or the morphism it represents, is completely specified by the tensors it is composed of, and which indices between the different tensors are connect, via the evaluation ``ϵ``, and which indices make up the source and target of the resulting morphism.
+If we also compose the resulting morphisms with coevaluations so that it has a trivial domain, we just have one type of unconnected lines, henceforth called open indices.
+We sketch such a rearrangement in the following picture
+
+```@raw html
+
+```
+
+Hence, we can now specify such a tensor diagram, henceforth called a tensor contraction or also tensor network, using a one-dimensional syntax that mimicks [abstract index notation](https://en.wikipedia.org/wiki/Abstract_index_notation) and specifies which indices are connected by the evaluation map using Einstein's summation conventation.
+Indeed, for `BraidingStyle(I) == Bosonic()`, such a tensor contraction can take the same format as if all tensors were just multi-dimensional arrays.
+For this, we rely on the interface provided by the package [TensorOperations.jl](https://github.com/QuantumKitHub/TensorOperations.jl).
+
+The above picture would be encoded as
+```julia
+@tensor E[a, b, c, d, e] := A[v, w, d, x] * B[y, z, c, x] * C[v, e, y, b] * D[a, w, z]
+```
+or
+```julia
+@tensor E[:] := A[1, 2, -4, 3] * B[4, 5, -3, 3] * C[1, -5, 4, -2] * D[-1, 2, 5]
+```
+where the latter syntax is known as NCON-style, and labels the unconnected or outgoing indices with negative integers, and the contracted indices with positive integers.
+
+A number of remarks are in order.
+TensorOperations.jl accepts both integers and any valid variable name as dummy label for indices, and everything in between `[ ]` is not resolved in the current context but interpreted as a dummy label.
+Here, we label the indices of a `TensorMap`, like `A::TensorMap{T, S, N₁, N₂}`, in a linear fashion, where the first position corresponds to the first space in `codomain(A)`, and so forth, up to position `N₁`.
+Index `N₁ + 1` then corresponds to the first space in `domain(A)`.
+However, because we have applied the coevaluation ``η``, it actually corresponds to the corresponding dual space, in accordance with the interface of [`space(A, i)`](@ref) that we introduced [above](@ref ss_tensor_properties), and as indiated by the dotted box around ``A`` in the above picture.
+The same holds for the other tensor maps.
+Note that our convention also requires that we braid indices that we brought from the domain to the codomain, and so this is only unambiguous for a symmetric braiding, where there is a unique way to permute the indices.
+
+With the current syntax, we create a new object `E` because we use the definition operator `:=`.
+Furthermore, with the current syntax, it will be a `Tensor`, i.e. it will have a trivial domain, and correspond to the dotted box in the picture above, rather than the actual morphism `E`.
+We can also directly define `E` with the correct codomain and domain by rather using
+```julia
+@tensor E[a b c;d e] := A[v, w, d, x] * B[y, z, c, x] * C[v, e, y, b] * D[a, w, z]
+```
+or
+```julia
+@tensor E[(a, b, c);(d, e)] := A[v, w, d, x] * B[y, z, c, x] * C[v, e, y, b] * D[a, w, z]
+```
+where the latter syntax can also be used when the codomain is empty.
+When using the assignment operator `=`, the `TensorMap` `E` is assumed to exist and the contents will be written to the currently allocated memory.
+Note that for existing tensors, both on the left hand side and right hand side, trying to specify the indices in the domain and the codomain seperately using the above syntax, has no effect, as the bipartition of indices are already fixed by the existing object.
+Hence, if `E` has been created by the previous line of code, all of the following lines are now equivalent
+```julia
+@tensor E[(a, b, c);(d, e)] = A[v, w, d, x] * B[y, z, c, x] * C[v, e, y, b] * D[a, w, z]
+@tensor E[a, b, c, d, e] = A[v w d; x] * B[(y, z, c); (x, )] * C[v e y; b] * D[a, w, z]
+@tensor E[a b; c d e] = A[v; w d x] * B[y, z, c, x] * C[v, e, y, b] * D[a w; z]
+```
+and none of those will or can change the partition of the indices of `E` into its codomain and its domain.
+
+Two final remarks are in order.
+Firstly, the order of the tensors appearing on the right hand side is irrelevant, as we can reorder them by using the allowed moves of the Penrose graphical calculus, which yields some crossings and a twist.
+As the latter is trivial, it can be omitted, and we just use the same rules to evaluate the newly ordered tensor network.
+For the particular case of matrix-matrix multiplication, which also captures more general settings by appropriotely combining spaces into a single line, we indeed find
+
+```@raw html
+
+```
+
+or thus, the following two lines of code yield the same result
+```julia
+@tensor C[i, j] := B[i, k] * A[k, j]
+@tensor C[i, j] := A[k, j] * B[i, k]
+```
+Reordering of tensors can be used internally by the `@tensor` macro to evaluate the contraction in a more efficient manner.
+In particular, the NCON-style of specifying the contraction gives the user control over the order, and there are other macros, such as `@tensoropt`, that try to automate this process.
+There is also an `@ncon` macro and `ncon` function, an we recommend reading the [manual of TensorOperations.jl](https://quantumkithub.github.io/TensorOperations.jl/stable/) to learn more about the possibilities and how they work.
+
+A final remark involves the use of adjoints of tensors.
+The current framework is such that the user should not be too worried about the actual bipartition into codomain and domain of a given `TensorMap` instance.
+Indeed, for tensor contractions the `@tensor` macro figures out the correct manipulations automatically.
+However, when wanting to use the `adjoint` of an instance `t::TensorMap{T, S, N₁, N₂}`, the resulting `adjoint(t)` is an `AbstractTensorMap{T, S, N₂, N₁}` and one needs to know the values of `N₁` and `N₂` to know exactly where the `i`th index of `t` will end up in `adjoint(t)`, and hence the index order of `t'`.
+Within the `@tensor` macro, one can instead use `conj()` on the whole index expression so as to be able to use the original index ordering of `t`.
+For example, for `TensorMap{T, S, 1, 1}` instances, this yields exactly the equivalence one expects, namely one between the following two expressions:
+
+```julia
+@tensor C[i, j] := B'[i, k] * A[k, j]
+@tensor C[i, j] := conj(B[k, i]) * A[k, j]
+```
+
+For e.g. an instance `A::TensorMap{T, S, 3, 2}`, the following two syntaxes have the same effect within an `@tensor` expression: `conj(A[a, b, c, d, e])` and `A'[d, e, a, b, c]`.
+
+Some examples:
+
+## Fermionic tensor contractions
+
+TODO
+
+## Anyonic tensor contractions
+
+TODO
diff --git a/docs/src/man/tensors.md b/docs/src/man/tensors.md
index 5a8aaf45a..2921e5f1b 100644
--- a/docs/src/man/tensors.md
+++ b/docs/src/man/tensors.md
@@ -1,249 +1,151 @@
-# [Tensors and the `TensorMap` type](@id s_tensors)
+# [Constructing tensors and the `TensorMap` type](@id s_tensors)
```@setup tensors
using TensorKit
using LinearAlgebra
```
-This last page explains how to create and manipulate tensors in TensorKit.jl. As this is
-probably the most important part of the manual, we will also focus more strongly on the
-usage and interface, and less so on the underlying implementation. The only aspect of the
-implementation that we will address is the storage of the tensor data, as this is important
-to know how to create and initialize a tensor, but will in fact also shed light on how some
-of the methods work.
+This last page explains how to create and manipulate tensors in TensorKit.jl.
+As this is probably the most important part of the manual, we will also focus more strongly on the usage and interface, and less so on the underlying implementation.
+The only aspect of the implementation that we will address is the storage of the tensor data, as this is important to know how to create and initialize a tensor, but will in fact also shed light on how some of the methods work.
-As mentioned, all tensors in TensorKit.jl are interpreted as linear maps (morphisms) from a
-domain (a `ProductSpace{S,N₂}`) to a codomain (another `ProductSpace{S,N₁}`), with the same
-`S<:ElementarySpace` that labels the type of spaces associated with the individual tensor
-indices. The overall type for all such tensor maps is `AbstractTensorMap{S, N₁, N₂}`. Note
-that we place information about the codomain before that of the domain. Indeed, we have
-already encountered the constructor for the concrete parametric type `TensorMap` in the
-form `TensorMap(..., codomain, domain)`. This convention is opposite to the mathematical
-notation, e.g. ``\mathrm{Hom}(W,V)`` or ``f:W→V``, but originates from the fact that a
-normal matrix is also denoted as having size `m × n` or is constructed in Julia as
-`Array(..., (m, n))`, where the first integer `m` refers to the codomain being `m`-
-dimensional, and the seond integer `n` to the domain being `n`-dimensional. This also
-explains why we have consistently used the symbol ``W`` for spaces in the domain and ``V``
-for spaces in the codomain. A tensor map ``t:(W_1 ⊗ … ⊗ W_{N_2}) → (V_1 ⊗ … ⊗ V_{N_1})`` will
-be created in Julia as `TensorMap(..., V1 ⊗ ... ⊗ VN₁, W1 ⊗ ... ⊗ WN₂)`.
+As mentioned, all tensors in TensorKit.jl are interpreted as linear maps (morphisms) from a domain (a `ProductSpace{S, N₂}`) to a codomain (another `ProductSpace{S, N₁}`), with the same `S <: ElementarySpace` that labels the type of spaces associated with the individual tensor indices.
+The overall type for all such tensor maps is `AbstractTensorMap{T, S, N₁, N₂}`.
+Note that we place information about the codomain before that of the domain.
+Indeed, we have already encountered the constructor for the concrete parametric type `TensorMap` in the form `TensorMap(..., codomain, domain)`.
+This convention is opposite to the mathematical notation, e.g. ``\mathrm{Hom}(W, V)`` or ``f : W → V``, but originates from the fact that a normal matrix is also denoted as having size `m × n` or is constructed in Julia as `Array(..., (m, n))`, where the first integer `m` refers to the codomain being `m`- dimensional, and the seond integer `n` to the domain being `n`-dimensional.
+This also explains why we have consistently used the symbol ``W`` for spaces in the domain and ``V`` for spaces in the codomain.
+A tensor map ``t : (W_1 ⊗ … ⊗ W_{N_2}) → (V_1 ⊗ … ⊗ V_{N_1})`` will be created in Julia as `TensorMap(..., V1 ⊗ ... ⊗ VN₁, W1 ⊗ ... ⊗ WN₂)`.
-Furthermore, the abstract type `AbstractTensor{S,N}` is just a synonym for
-`AbstractTensorMap{S,N,0}`, i.e. for tensor maps with an empty domain, which is equivalent
-to the unit of the monoidal category, or thus, the field of scalars ``𝕜``.
+Furthermore, the abstract type `AbstractTensor{T, S, N}` is just a synonym for `AbstractTensorMap{T, S, N, 0}`, i.e. for tensor maps with an empty domain, which is equivalent to the unit of the monoidal category, or thus, the field of scalars ``𝕜``.
-Currently, `AbstractTensorMap` has three subtypes. `TensorMap` provides the actual
-implementation, where the data of the tensor is stored in a `DenseArray` (more specifically
-a `DenseMatrix` as will be explained below). `AdjointTensorMap` is a simple wrapper type to
-denote the adjoint of an existing `TensorMap` object. `DiagonalTensorMap` provides an
-efficient representations of diagonal tensor maps. In the future, additional types could
-be defined, to deal with sparse data, static data, diagonal data, etc...
+Currently, `AbstractTensorMap` has three subtypes.
+`TensorMap` provides the actual implementation, where the data of the tensor is stored in a `DenseArray` (more specifically a `DenseMatrix` as will be explained below).
+`AdjointTensorMap` is a simple wrapper type to denote the adjoint of an existing `TensorMap` object.
+`DiagonalTensorMap` provides an efficient representations of diagonal tensor maps.
+In the future, additional types could be defined, to deal with sparse data, static data, etc...
## [Storage of tensor data](@id ss_tensor_storage)
-Before discussion how to construct and initalize a `TensorMap`, let us discuss what is
-meant by 'tensor data' and how it can efficiently and compactly be stored. Let us first
-discuss the case `sectortype(S) == Trivial` sector, i.e. the case of no symmetries. In that
-case the data of a tensor `t = TensorMap(..., V1 ⊗ ... ⊗ VN₁, W₁ ⊗ ... ⊗ WN₂)` can just be
-represented as a multidimensional array of size
+Before discussion how to construct and initalize a `TensorMap`, let us discuss what is meant by 'tensor data' and how it can efficiently and compactly be stored.
+Let us first discuss the case `sectortype(S) == Trivial` sector, i.e. the case of no symmetries.
+In that case the data of a tensor `t = TensorMap(..., V1 ⊗ ... ⊗ VN₁, W₁ ⊗ ... ⊗ WN₂)` can just be represented as a multidimensional array of size
-`(dim(V1), dim(V2), …, dim(VN₁), dim(W1), …, dim(WN₂))`
+```julia
+(dim(V1), dim(V2), …, dim(VN₁), dim(W1), …, dim(WN₂))
+```
which can also be reshaped into matrix of size
-`(dim(V1) * dim(V2) * … * dim(VN₁), dim(W1) * dim(W2) * … * dim(WN₂))`
+```julia
+(dim(V1) * dim(V2) * … * dim(VN₁), dim(W1) * dim(W2) * … * dim(WN₂))
+```
-and is really the matrix representation of the linear map that the tensor represents. In
-particular, given a second tensor `t2` whose domain matches with the codomain of `t`,
-function composition amounts to multiplication of their corresponding data matrices.
-Similarly, tensor factorizations such as the singular value decomposition, which we discuss
-below, can act directly on this matrix representation.
+and is really the matrix representation of the linear map that the tensor represents.
+In particular, given a second tensor `t2` whose domain matches with the codomain of `t`, function composition amounts to multiplication of their corresponding data matrices.
+Similarly, tensor factorizations such as the singular value decomposition, which we discuss below, can act directly on this matrix representation.
!!! note
- One might wonder if it would not have been more natural to represent the tensor data as
- `(dim(V1), dim(V2), …, dim(VN₁), dim(WN₂), …, dim(W1))` given how employing the duality
- naturally reverses the tensor product, as encountered with the interface of
- [`repartition`](@ref) for [fusion trees](@ref ss_fusiontrees). However, such a
- representation, when plainly `reshape`d to a matrix, would not have the above
- properties and would thus not constitute the matrix representation of the tensor in a
- compatible basis.
-
-Now consider the case where `sectortype(S) == I` for some `I` which has
-`FusionStyle(I) == UniqueFusion()`, i.e. the representations of an Abelian group, e.g.
-`I == Irrep[ℤ₂]` or `I == Irrep[U₁]`. In this case, the tensor data is associated with
-sectors `(a1, a2, …, aN₁) ∈ sectors(V1 ⊗ V2 ⊗ … ⊗ VN₁)` and
-`(b1, …, bN₂) ∈ sectors(W1 ⊗ … ⊗ WN₂)` such that they fuse to a same common charge, i.e.
-`(c = first(⊗(a1, …, aN₁))) == first(⊗(b1, …, bN₂))`. The data associated with this takes
-the form of a multidimensional array with size
-`(dim(V1, a1), …, dim(VN₁, aN₁), dim(W1, b1), …, dim(WN₂, bN₂))`, or equivalently, a matrix
-of with row size `dim(V1, a1) * … * dim(VN₁, aN₁) == dim(codomain, (a1, …, aN₁))` and column
-size `dim(W1, b1) * … * dim(WN₂, aN₂) == dim(domain, (b1, …, bN₂))`.
-
-However, there are multiple combinations of `(a1, …, aN₁)` giving rise to the same `c`, and
-so there is data associated with all of these, as well as all possible combinations of
-`(b1, …, bN₂)`. Stacking all matrices for different `(a1, …)` and a fixed value of `(b1, …)`
-underneath each other, and for fixed value of `(a1, …)` and different values of `(b1, …)`
-next to each other, gives rise to a larger block matrix of all data associated with the
-central sector `c`. The size of this matrix is exactly
-`(blockdim(codomain, c), blockdim(domain, c))` and these matrices are exactly the diagonal
-blocks whose existence is guaranteed by Schur's lemma, and which are labeled by the coupled
-sector `c`. Indeed, if we would represent the tensor map `t` as a matrix without explicitly
-using the symmetries, we could reorder the rows and columns to group data corresponding to
-sectors that fuse to the same `c`, and the resulting block diagonal representation would
-emerge. This basis transform is thus a permutation, which is a unitary operation, that will
-cancel or go through trivially for linear algebra operations such as composing tensor maps
-(matrix multiplication) or tensor factorizations such as a singular value decomposition. For
-such linear algebra operations, we can thus directly act on these large matrices, which
-correspond to the diagonal blocks that emerge after a basis transform, provided that the
-partition of the tensor indices in domain and codomain of the tensor are in line with our
-needs. For example, composing two tensor maps amounts to multiplying the matrices
-corresponding to the same `c` (provided that its subblocks labeled by the different
-combinations of sectors are ordered in the same way, which we guarantee by associating a
-canonical order with sectors). Henceforth, we refer to the `blocks` of a tensor map as those
-diagonal blocks, the existence of which is provided by Schur's lemma and which are labeled
-by the coupled sectors `c`. We directly store these blocks as `DenseMatrix` and gather them
-as values in a dictionary, together with the corresponding coupled sector `c` as key. For a
-given tensor `t`, we can access a specific block as `block(t, c)`, whereas `blocks(t)`
-yields an iterator over pairs `c => block(t, c)`.
-
-The subblocks corresponding to a particular combination of sectors then correspond to a
-particular view for some range of the rows and some range of the colums, i.e.
-`view(block(t, c), m₁:m₂, n₁:n₂)` where the ranges `m₁:m₂` associated with `(a1, …, aN₁)`
-and `n₁:n₂` associated with `(b₁, …, bN₂)` are stored within the fields of the instance `t`
-of type `TensorMap`. This `view` can then lazily be reshaped to a multidimensional array,
-for which we rely on the package [Strided.jl](https://github.com/Jutho/Strided.jl). Indeed,
-the data in this `view` is not contiguous, because the stride between the different columns
-is larger than the length of the columns. Nonetheless, this does not pose a problem and even
-as multidimensional array there is still a definite stride associated with each dimension.
-
-When `FusionStyle(I) isa MultipleFusion`, things become slightly more complicated. Not only
-do `(a1, …, aN₁)` give rise to different coupled sectors `c`, there can be multiply ways in
-which they fuse to `c`. These different possibilities are enumerated by the iterator
-`fusiontrees((a1, …, aN₁), c)` and `fusiontrees((b1, …, bN₂), c)`, and with each of those,
-there is tensor data that takes the form of a multidimensional array, or, after reshaping, a
-matrix of size `(dim(codomain, (a1, …, aN₁)), dim(domain, (b1, …, bN₂))))`. Again, we can
-stack all such matrices with the same value of `f₁ ∈ fusiontrees((a1, …, aN₁), c)`
-horizontally (as they all have the same number of rows), and with the same value of
-`f₂ ∈ fusiontrees((b1, …, bN₂), c)` vertically (as they have the same number of columns).
-What emerges is a large matrix of size `(blockdim(codomain, c), blockdim(domain, c))`
-containing all the tensor data associated with the coupled sector `c`, where
-`blockdim(P, c) = sum(dim(P, s)*length(fusiontrees(s, c)) for s in sectors(P))` for some
-instance `P` of `ProductSpace`. The tensor implementation does not distinguish between
-abelian or non-abelian sectors and still stores these matrices as a `DenseMatrix`,
-accessible via `block(t, c)`.
-
-At first sight, it might now be less clear what the relevance of this block is in relation
-to the full matrix representation of the tensor map, where the symmetry is not exploited.
-The essential interpretation is still the same. Schur's lemma now tells that there is a
-unitary basis transform which makes this matrix representation block diagonal, more
-specifically, of the form ``⨁_{c} B_c ⊗ 𝟙_{c}``, where ``B_c`` denotes `block(t,c)` and
-``𝟙_{c}`` is an identity matrix of size `(dim(c), dim(c))`. The reason for this extra
-identity is that the group representation is recoupled to act as ``⨁_{c} 𝟙 ⊗ u_c(g)`` for
-all ``g ∈ \mathsf{I}``, with ``u_c(g)`` the matrix representation of group element ``g``
-according to the irrep ``c``. In the abelian case, `dim(c) == 1`, i.e. all irreducible
-representations are one-dimensional and Schur's lemma only dictates that all off-diagonal
-blocks are zero. However, in this case the basis transform to the block diagonal
-representation is not simply a permutation matrix, but a more general unitary matrix
-composed of the different fusion trees. Indeed, let us denote the fusion trees
-`f₁ ∈ fusiontrees((a1, …, aN₁), c)` as ``X^{a_1, …, a_{N₁}}_{c,α}`` where
-``α = (e_1, …, e_{N_1-2}; μ₁, …, μ_{N_1-1})`` is a collective label for the internal sectors
-`e` and the vertex degeneracy labels `μ` of a generic fusion tree, as discussed in the
-[corresponding section](@ref ss_fusiontrees). The tensor is then represented as
+ One might wonder if it would not have been more natural to represent the tensor data as `(dim(V1), dim(V2), …, dim(VN₁), dim(WN₂), …, dim(W1))` given how employing the duality naturally reverses the tensor product, as encountered with the interface of [`repartition`](@ref) for [fusion trees](@ref ss_fusiontrees).
+ However, such a representation, when plainly `reshape`d to a matrix, would not have the above properties and would thus not constitute the matrix representation of the tensor in a compatible basis.
+
+Now consider the case where `sectortype(S) == I` for some `I` which has `FusionStyle(I) == UniqueFusion()`, i.e. the representations of an Abelian group, e.g. `I == Irrep[ℤ₂]` or `I == Irrep[U₁]`.
+In this case, the tensor data is associated with sectors `(a1, a2, …, aN₁) ∈ sectors(V1 ⊗ V2 ⊗ … ⊗ VN₁)` and `(b1, …, bN₂) ∈ sectors(W1 ⊗ … ⊗ WN₂)` such that they fuse to a same common charge, i.e. `(c = first(⊗(a1, …, aN₁))) == first(⊗(b1, …, bN₂))`.
+The data associated with this takes the form of a multidimensional array with size `(dim(V1, a1), …, dim(VN₁, aN₁), dim(W1, b1), …, dim(WN₂, bN₂))`, or equivalently, a matrix of with row size `dim(V1, a1) * … * dim(VN₁, aN₁) == dim(codomain, (a1, …, aN₁))` and column size `dim(W1, b1) * … * dim(WN₂, aN₂) == dim(domain, (b1, …, bN₂))`.
+
+However, there are multiple combinations of `(a1, …, aN₁)` giving rise to the same `c`, and so there is data associated with all of these, as well as all possible combinations of `(b1, …, bN₂)`.
+Stacking all matrices for different `(a1, …)` and a fixed value of `(b1, …)` underneath each other, and for fixed value of `(a1, …)` and different values of `(b1, …)` next to each other, gives rise to a larger block matrix of all data associated with the central sector `c`.
+The size of this matrix is exactly `(blockdim(codomain, c), blockdim(domain, c))` and these matrices are exactly the diagonal blocks whose existence is guaranteed by Schur's lemma, and which are labeled by the coupled sector `c`.
+Indeed, if we would represent the tensor map `t` as a matrix without explicitly using the symmetries, we could reorder the rows and columns to group data corresponding to sectors that fuse to the same `c`, and the resulting block diagonal representation would emerge.
+This basis transform is thus a permutation, which is a unitary operation, that will cancel or go through trivially for linear algebra operations such as composing tensor maps (matrix multiplication) or tensor factorizations such as a singular value decomposition.
+For such linear algebra operations, we can thus directly act on these large matrices, which correspond to the diagonal blocks that emerge after a basis transform, provided that the partition of the tensor indices in domain and codomain of the tensor are in line with our needs.
+For example, composing two tensor maps amounts to multiplying the matrices corresponding to the same `c` (provided that its subblocks labeled by the different combinations of sectors are ordered in the same way, which we guarantee by associating a canonical order with sectors). Henceforth, we refer to the `blocks` of a tensor map as those diagonal blocks, the existence of which is provided by Schur's lemma and which are labeled by the coupled sectors `c`.
+We directly concatenate these blocks as consecutive entries in a single larger `DenseVector`, together with metadata to retrieve a block by using the corresponding coupled sector `c` as key.
+For a given tensor `t`, we can access a specific block as `block(t, c)`, whereas `blocks(t)` yields an iterator over pairs `c => block(t, c)`.
+
+The subblocks corresponding to a particular combination of sectors then correspond to a particular view for some range of the rows and some range of the colums, i.e. `view(block(t, c), m₁:m₂, n₁:n₂)` where the ranges `m₁:m₂` associated with `(a1, …, aN₁)` and `n₁:n₂` associated with `(b₁, …, bN₂)` are stored within the fields of the instance `t` of type `TensorMap`.
+This `view` can then lazily be reshaped to a multidimensional array, for which we rely on the package [Strided.jl](https://github.com/Jutho/Strided.jl).
+Indeed, the data in this `view` is not contiguous, because the stride between the different columns is larger than the length of the columns.
+Nonetheless, this does not pose a problem and even as multidimensional array there is still a definite stride associated with each dimension.
+
+When `FusionStyle(I) isa MultipleFusion`, things become slightly more complicated.
+Not only do `(a1, …, aN₁)` give rise to different coupled sectors `c`, there can be multiply ways in which they fuse to `c`.
+These different possibilities are enumerated by the iterator `fusiontrees((a1, …, aN₁), c)` and `fusiontrees((b1, …, bN₂), c)`, and with each of those, there is tensor data that takes the form of a multidimensional array, or, after reshaping, a matrix of size `(dim(codomain, (a1, …, aN₁)), dim(domain, (b1, …, bN₂))))`.
+Again, we can stack all such matrices with the same value of `f₁ ∈ fusiontrees((a1, …, aN₁), c)` horizontally (as they all have the same number of rows), and with the same value of `f₂ ∈ fusiontrees((b1, …, bN₂), c)` vertically (as they have the same number of columns).
+What emerges is a large matrix of size `(blockdim(codomain, c), blockdim(domain, c))` containing all the tensor data associated with the coupled sector `c`, where `blockdim(P, c) = sum(dim(P, s) * length(fusiontrees(s, c)) for s in sectors(P))` for some instance `P` of `ProductSpace`.
+The tensor implementation does not distinguish between abelian or non-abelian sectors and still stores these matrices concatenated in a `DenseVector`, where each individual block is accessible via `block(t, c)`.
+
+At first sight, it might now be less clear what the relevance of this block is in relation to the full matrix representation of the tensor map, where the symmetry is not exploited.
+The essential interpretation is still the same.
+Schur's lemma now tells that there is a unitary basis transform which makes this matrix representation block diagonal, more specifically, of the form ``⨁_{c} B_c ⊗ 𝟙_{c}``, where ``B_c`` denotes `block(t, c)` and ``𝟙_{c}`` is an identity matrix of size `(dim(c), dim(c))`.
+The reason for this extra identity is that the group representation is recoupled to act as ``⨁_{c} 𝟙 ⊗ u_c(g)`` for all ``g ∈ \mathsf{I}``, with ``u_c(g)`` the matrix representation of group element ``g`` according to the irrep ``c``.
+In the abelian case, `dim(c) == 1`, i.e. all irreducible representations are one-dimensional and Schur's lemma only dictates that all off-diagonal blocks are zero.
+However, in this case the basis transform to the block diagonal representation is not simply a permutation matrix, but a more general unitary matrix composed of the different fusion trees.
+Indeed, let us denote the fusion trees `f₁ ∈ fusiontrees((a1, …, aN₁), c)` as ``X^{a_1, …, a_{N₁}}_{c,α}`` where ``α = (e_1, …, e_{N_1-2}; μ₁, …, μ_{N_1-1})`` is a collective label for the internal sectors `e` and the vertex degeneracy labels `μ` of a generic fusion tree, as discussed in the [corresponding section](@ref ss_fusiontrees).
+The tensor is then represented as
```@raw html
```
-In this diagram, we have indicated how the tensor map can be rewritten in terms of a block
-diagonal matrix with a unitary matrix on its left and another unitary matrix (if domain and
-codomain are different) on its right. So the left and right matrices should actually have
-been drawn as squares. They represent the unitary basis transform. In this picture, red and
-white regions are zero. The center matrix is most easy to interpret. It is the block
-diagonal matrix ``⨁_{c} B_c ⊗ 𝟙_{c}`` with diagonal blocks labeled by the coupled charge
-`c`, in this case it takes two values. Every single small square in between the dotted or
-dashed lines has size ``d_c × d_c`` and corresponds to a single element of ``B_c``,
-tensored with the identity ``\mathrm{id}_c``. Instead of ``B_c``, a more accurate labelling
-is ``t^c_{(a_1 … a_{N₁})α, (b_1 … b_{N₂})β}`` where ``α`` labels different fusion trees from
-``(a_1 … a_{N₁})`` to ``c``. The dashed horizontal lines indicate regions corresponding to
-different fusion (actually splitting) trees, either because of different sectors
-``(a_1 … a_{N₁})`` or different labels ``α`` within the same sector. Similarly, the dashed
-vertical lines define the border between regions of different fusion trees from the domain
-to `c`, either because of different sectors ``(b_1 … b_{N₂})`` or a different label ``β``.
+In this diagram, we have indicated how the tensor map can be rewritten in terms of a block diagonal matrix with a unitary matrix on its left and another unitary matrix (if domain and codomain are different) on its right.
+So the left and right matrices should actually have been drawn as squares.
+They represent the unitary basis transform.
+In this picture, red and white regions are zero.
+The center matrix is most easy to interpret.
+It is the block diagonal matrix ``⨁_{c} B_c ⊗ 𝟙_{c}`` with diagonal blocks labeled by the coupled charge `c`, in this case it takes two values.
+Every single small square in between the dotted or dashed lines has size ``d_c × d_c`` and corresponds to a single element of ``B_c``, tensored with the identity ``\mathrm{id}_c``.
+Instead of ``B_c``, a more accurate labelling is ``t^c_{(a_1 … a_{N₁})α, (b_1 … b_{N₂})β}`` where ``α`` labels different fusion trees from ``(a_1 … a_{N₁})`` to ``c``.
+The dashed horizontal lines indicate regions corresponding to different fusion (actually splitting) trees, either because of different sectors ``(a_1 … a_{N₁})`` or different labels ``α`` within the same sector.
+Similarly, the dashed vertical lines define the border between regions of different fusion trees from the domain to `c`, either because of different sectors ``(b_1 … b_{N₂})`` or a different label ``β``.
-To understand this better, we need to understand the basis transformation, e.g. on the left
-(codomain) side. In more detail, it is given by
+To understand this better, we need to understand the basis transformation, e.g. on the left (codomain) side.
+In more detail, it is given by
```@raw html
```
-Indeed, remembering that ``V_i = ⨁_{a_i} R_{a_i} ⊗ ℂ^{n_{a_i}}`` with ``R_{a_i}`` the
-representation space on which irrep ``a_i`` acts (with dimension ``\mathrm{dim}(a_i)``), we
-find
-``V_1 ⊗ … ⊗ V_{N_1} = ⨁_{a_1, …, a_{N₁}} (R_{a_1} ⊗ … ⊗ R_{a_{N_1}}) ⊗ ℂ^{n_{a_1} × … n_{a_{N_1}}}``.
-In the diagram above, the wiggly lines correspond to the direct sum over the different
-sectors ``(a_1, …, a_{N₁})``, there depicted taking three possible values ``(a…)``,
-``(a…)′`` and ``(a…)′′``. The tensor product
-``(R_{a_1} ⊗ … ⊗ R_{a_{N_1}}) ⊗ ℂ^{n_{a_1} × … n_{a_{N_1}}}`` is depicted as
-``(R_{a_1} ⊗ … ⊗ R_{a_{N_1}})^{⊕ n_{a_1} × … n_{a_{N_1}}}``, i.e. as a direct sum of the
-spaces ``R_{(a…)} = (R_{a_1} ⊗ … ⊗ R_{a_{N_1}})`` according to the dotted horizontal lines,
-which repeat ``n_{(a…)} = n_{a_1} × … n_{a_{N_1}}`` times. In this particular example,
-``n_{(a…)}=2``, ``n_{(a…)'}=3`` and ``n_{(a…)''}=5``. The thick vertical line represents the
-separation between the two different coupled sectors, denoted as ``c`` and ``c'``. Dashed
-vertical lines represent different ways of reaching the coupled sector, corresponding to
-different `α`. In this example, the first sector ``(a…)`` has one fusion tree to ``c``,
-labeled by ``c,α``, and two fusion trees to ``c'``, labeled by ``c',α`` and ``c',α'``. The
-second sector has only a fusion tree to ``c``, labeled by ``c,α'``. The third sector only
-has a fusion tree to ``c'``, labeld by ``c', α''``. Finally then, because the fusion trees
-do not act on the spaces ``ℂ^{n_{a_1} × … n_{a_{N_1}}}``, the dotted lines which represent
-the different ``n_{(a…)} = n_{a_1} × … n_{a_{N_1}}`` dimensions are also drawn vertically.
-In particular, for a given sector ``(a…)`` and a specific fusion tree
-``X^{(a…)}_{c,α} : R_{(a…)}→R_c``, the action is ``X^{(a…)}_{c,α} ⊗ 𝟙_{n_{(a…)}}``, which
-corresponds to the diagonal green blocks in this drawing where the same matrix
-``X^{(a…)}_{c,α}`` (the fusion tree) is repeated along the diagonal. Note that the fusion
-tree is not a vector or single column, but a matrix with number of rows equal to
-``\mathrm{dim}(R_{(a\ldots)}) = d_{a_1} d_{a_2} … d_{a_{N_1}} `` and number of columns
-equal to ``d_c``. A similar interpretation can be given to the basis transform on the
-right, by taking its adjoint. In this particular example, it has two different combinations
-of sectors ``(b…)`` and ``(b…)'``, where both have a single fusion tree to ``c`` as well as
-to ``c'``, and ``n_{(b…)}=2``, ``n_{(b…)'}=3``.
+Indeed, remembering that ``V_i = ⨁_{a_i} R_{a_i} ⊗ ℂ^{n_{a_i}}`` with ``R_{a_i}`` the representation space on which irrep ``a_i`` acts (with dimension ``\mathrm{dim}(a_i)``), we find
-Note that we never explicitly store or act with the basis transformations on the left and
-the right. For composing tensor maps (i.e. multiplying them), these basis transforms just
-cancel, whereas for tensor factorizations they just go through trivially. They transform
-non-trivially when reshuffling the tensor indices, both within or in between the domain and
-codomain. For this, however, we can completely rely on the manipulations of fusion trees to
-implicitly compute the effect of the basis transform and construct the new blocks ``B_c``
-that result with respect to the new basis.
-
-Hence, as before, we only store the diagonal blocks ``B_c`` of size
-`(blockdim(codomain(t), c), blockdim(domain(t), c))` as a `DenseMatrix`, accessible via
-`block(t, c)`. Within this matrix, there are regions of the form
-`view(block(t, c), m₁:m₂, n₁:n₂)` that correspond to the data
-``t^c_{(a_1 … a_{N₁})α, (b_1 … b_{N₂})β}`` associated with a pair of fusion trees
-``X^{(a_1 … a_{N₁})}_{c,α}`` and ``X^{(b_1 … b_{N₂})}_{c,β}``, henceforth again denoted as
-`f₁` and `f₂`, with `f₁.coupled == f₂.coupled == c`. The ranges where this subblock is
-living are managed within the tensor implementation, and these subblocks can be accessed
-via `t[f₁, f₂]`, and is returned as a `StridedArray` of size
-``n_{a_1} × n_{a_2} × … × n_{a_{N_1}} × n_{b_1} × … n_{b_{N₂}}``, or in code,
-`(dim(V1, a1), dim(V2, a2), …, dim(VN₁, aN₁), dim(W1, b1), …, dim(WN₂, bN₂))`. While the
-implementation does not distinguish between `FusionStyle isa UniqueFusion` or
-`FusionStyle isa MultipleFusion`, in the former case the fusion tree is completely
-characterized by the uncoupled sectors, and so the subblocks can also be accessed as
-`t[(a1, …, aN₁, b1, …, bN₂)]`. When there is no symmetry at all, i.e.
-`sectortype(t) == Trivial`, `t[]` returns the raw tensor data as a `StridedArray` of size
-`(dim(V1), …, dim(VN₁), dim(W1), …, dim(WN₂))`, whereas `block(t, Trivial())` returns the
-same data as a `DenseMatrix` of size `(dim(V1) * … * dim(VN₁), dim(W1) * … * dim(WN₂))`.
+```math
+V_1 ⊗ … ⊗ V_{N_1} = ⨁_{a_1, …, a_{N₁}} (R_{a_1} ⊗ … ⊗ R_{a_{N_1}}) ⊗ ℂ^{n_{a_1} × … n_{a_{N_1}}}.
+```
+
+In the diagram above, the wiggly lines correspond to the direct sum over the different sectors ``(a_1, …, a_{N₁})``, there depicted taking three possible values ``(a…)``, ``(a…)′`` and ``(a…)′′``.
+The tensor product ``(R_{a_1} ⊗ … ⊗ R_{a_{N_1}}) ⊗ ℂ^{n_{a_1} × … n_{a_{N_1}}}`` is depicted as ``(R_{a_1} ⊗ … ⊗ R_{a_{N_1}})^{⊕ n_{a_1} × … n_{a_{N_1}}}``, i.e. as a direct sum of the spaces ``R_{(a…)} = (R_{a_1} ⊗ … ⊗ R_{a_{N_1}})`` according to the dotted horizontal lines, which repeat ``n_{(a…)} = n_{a_1} × … n_{a_{N_1}}`` times.
+In this particular example, ``n_{(a…)}=2``, ``n_{(a…)'}=3`` and ``n_{(a…)''}=5``.
+The thick vertical line represents the separation between the two different coupled sectors, denoted as ``c`` and ``c'``.
+Dashed vertical lines represent different ways of reaching the coupled sector, corresponding to different `α`.
+In this example, the first sector ``(a…)`` has one fusion tree to ``c``, labeled by ``c,α``, and two fusion trees to ``c'``, labeled by ``c',α`` and ``c',α'``.
+The second sector has only a fusion tree to ``c``, labeled by ``c,α'``.
+The third sector only has a fusion tree to ``c'``, labeld by ``c', α''``.
+Finally then, because the fusion trees do not act on the spaces ``ℂ^{n_{a_1} × … n_{a_{N_1}}}``, the dotted lines which represent the different ``n_{(a…)} = n_{a_1} × … n_{a_{N_1}}`` dimensions are also drawn vertically.
+In particular, for a given sector ``(a…)`` and a specific fusion tree ``X^{(a…)}_{c,α} : R_{(a…)}→R_c``, the action is ``X^{(a…)}_{c,α} ⊗ 𝟙_{n_{(a…)}}``, which corresponds to the diagonal green blocks in this drawing where the same matrix ``X^{(a…)}_{c,α}`` (the fusion tree) is repeated along the diagonal.
+Note that the fusion tree is not a vector or single column, but a matrix with number of rows equal to ``\mathrm{dim}(R_{(a\ldots)}) = d_{a_1} d_{a_2} … d_{a_{N_1}} `` and number of columns equal to ``d_c``.
+A similar interpretation can be given to the basis transform on the right, by taking its adjoint.
+In this particular example, it has two different combinations of sectors ``(b…)`` and ``(b…)'``, where both have a single fusion tree to ``c`` as well as to ``c'``, and ``n_{(b…)}=2``, ``n_{(b…)'}=3``.
+
+Note that we never explicitly store or act with the basis transformations on the left and the right.
+For composing tensor maps (i.e. multiplying them), these basis transforms just cancel, whereas for tensor factorizations they just go through trivially.
+They transform non-trivially when reshuffling the tensor indices, both within or in between the domain and codomain.
+For this, however, we can completely rely on the manipulations of fusion trees to implicitly compute the effect of the basis transform and construct the new blocks ``B_c`` that result with respect to the new basis.
+
+Hence, as before, we only store the diagonal blocks ``B_c`` of size `(blockdim(codomain(t), c), blockdim(domain(t), c))` as a `DenseMatrix`, accessible via `block(t, c)`.
+Within this matrix, there are regions of the form `view(block(t, c), m₁:m₂, n₁:n₂)` that correspond to the data ``t^c_{(a_1 … a_{N₁})α, (b_1 … b_{N₂})β}`` associated with a pair of fusion trees ``X^{(a_1 … a_{N₁})}_{c,α}`` and ``X^{(b_1 … b_{N₂})}_{c,β}``, henceforth again denoted as `f₁` and `f₂`, with `f₁.coupled == f₂.coupled == c`.
+The ranges where this subblock is living are managed within the tensor implementation, and these subblocks can be accessed via `t[f₁, f₂]`, and is returned as a `StridedArray` of size ``n_{a_1} × n_{a_2} × … × n_{a_{N_1}} × n_{b_1} × … n_{b_{N₂}}``, or in code, `(dim(V1, a1), dim(V2, a2), …, dim(VN₁, aN₁), dim(W1, b1), …, dim(WN₂, bN₂))`.
+While the implementation does not distinguish between `FusionStyle isa UniqueFusion` or `FusionStyle isa MultipleFusion`, in the former case the fusion tree is completely characterized by the uncoupled sectors, and so the subblocks can also be accessed as `t[(a1, …, aN₁, b1, …, bN₂)]`.
+When there is no symmetry at all, i.e. `sectortype(t) == Trivial`, `t[]` returns the raw tensor data as a `StridedArray` of size `(dim(V1), …, dim(VN₁), dim(W1), …, dim(WN₂))`, whereas `block(t, Trivial())` returns the same data as a `DenseMatrix` of size `(dim(V1) * … * dim(VN₁), dim(W1) * … * dim(WN₂))`.
## [Constructing tensor maps and accessing tensor data](@id ss_tensor_construction)
-Having learned how a tensor is represented and stored, we can now discuss how to create
-tensors and tensor maps. From hereon, we focus purely on the interface rather than the
-implementation.
+Having learned how a tensor is represented and stored, we can now discuss how to create tensors and tensor maps.
+From hereon, we focus purely on the interface rather than the implementation.
### Random and uninitialized tensor maps
-The most convenient set of constructors are those that construct tensors or tensor maps
-with random or uninitialized data. They take the form
+The most convenient set of constructors are those that construct tensors or tensor maps with random or uninitialized data.
+They take the form
```julia
f(codomain, domain = one(codomain))
@@ -251,16 +153,12 @@ f(eltype::Type{<:Number}, codomain, domain = one(codomain))
TensorMap{eltype::Type{<:Number}}(undef, codomain, domain = one(codomain))
Tensor{eltype::Type{<:Number}}(undef, codomain)
```
-Here, `f` is any of the typical functions from Base that normally create arrays, namely
-`zeros`, `ones`, `rand`, `randn` and `Random.randexp`. Remember that `one(codomain)` is the
-empty `ProductSpace{S,0}()`. The third and fourth calling syntax use the `UndefInitializer`
-from Julia Base and generates a `TensorMap` with unitialized data, which can thus contain
-`NaN`s.
+Here, `f` is any of the typical functions from Base that normally create arrays, namely `zeros`, `ones`, `rand`, `randn` and `Random.randexp`.
+Remember that `one(codomain)` is the empty `ProductSpace{S, 0}()`.
+The third and fourth calling syntax use the `UndefInitializer` from Julia Base and generates a `TensorMap` with unitialized data, which can thus contain `NaN`s.
-In all of these constructors, the last two arguments can be replaced by `domain → codomain`
-or `codomain ← domain`, where the arrows are obtained as `\rightarrow+TAB` and
-`\leftarrow+TAB` and create a `HomSpace` as explained in the section on
-[Spaces of morphisms](@ref ss_homspaces). Some examples are perhaps in order
+In all of these constructors, the last two arguments can be replaced by `domain → codomain` or `codomain ← domain`, where the arrows are obtained as `\rightarrow+TAB` and `\leftarrow+TAB` and create a `HomSpace` as explained in the section on [Spaces of morphisms](@ref ss_homspaces).
+Some examples are perhaps in order
```@repl tensors
t1 = randn(ℂ^2 ⊗ ℂ^3, ℂ^2)
@@ -274,60 +172,41 @@ block(t1, Trivial()) |> disp
reshape(t1[], dim(codomain(t1)), dim(domain(t1))) |> disp
```
-Finally, all constructors can also be replaced by `Tensor(..., codomain)`, in which case
-the domain is assumed to be the empty `ProductSpace{S,0}()`, which can easily be obtained
-as `one(codomain)`. Indeed, the empty product space is the unit object of the monoidal
-category, equivalent to the field of scalars `𝕜`, and thus the multiplicative identity
-(especially since `*` also acts as tensor product on vector spaces).
+Finally, all constructors can also be replaced by `Tensor(..., codomain)`, in which case the domain is assumed to be the empty `ProductSpace{S, 0}()`, which can easily be obtained as `one(codomain)`.
+Indeed, the empty product space is the unit object of the monoidal category, equivalent to the field of scalars `𝕜`, and thus the multiplicative identity (especially since `*` also acts as tensor product on vector spaces).
-The matrices created by `f` are the matrices ``B_c`` discussed above, i.e. those returned
-by `block(t, c)`. Only numerical matrices of type `DenseMatrix` are accepted, which in
-practice just means Julia's intrinsic `Matrix{T}` for some `T<:Number`. In the future, we
-will add support for `CuMatrix` from [CuArrays.jl](https://github.com/JuliaGPU/CuArrays.jl)
-to harness GPU computing power, and maybe `SharedArray` from the Julia's `SharedArrays`
-standard library.
+The matrices created by `f` are the matrices ``B_c`` discussed above, i.e. those returned by `block(t, c)`.
+Only numerical matrices of type `DenseMatrix` are accepted, which in practice just means Julia's intrinsic `Matrix{T}` for some `T <: Number`.
+Ongoing work extends this to support for `CuMatrix` from [CuArrays.jl](https://github.com/JuliaGPU/CuArrays.jl) to harness GPU computing power, and future work might include distributed arrays.
-Support for static or sparse data is currently unavailable, and if it would be implemented,
-it would lead to new subtypes of `AbstractTensorMap` which are distinct from `TensorMap`.
+Support for static or sparse data is currently unavailable, and if it would be implemented, it would likely lead to new subtypes of `AbstractTensorMap` which are distinct from `TensorMap`.
Future implementations of e.g. `SparseTensorMap` or `StaticTensorMap` could be useful.
### Tensor maps from existing data
-To create a `TensorMap` with existing data, one can use the aforementioned form but with
-the function `f` replaced with the actual data, i.e. `TensorMap(data, codomain, domain)` or
-any of its equivalents.
+To create a `TensorMap` with existing data, one can use the aforementioned form but with the function `f` replaced with the actual data, i.e. `TensorMap(data, codomain, domain)` or any of its equivalents.
-Here, `data` can be of two types. It can be a dictionary (any `AbstractDict` subtype) which
-has blocksectors `c` of type `sectortype(codomain)` as keys, and the corresponding matrix
-blocks as value, i.e. `data[c]` is some `DenseMatrix` of size
-`(blockdim(codomain, c), blockdim(domain, c))`. This is the form of how the data is stored
-within the `TensorMap` objects.
+Here, `data` can be of two types.
+It can be a dictionary (any `AbstractDict` subtype) which has blocksectors `c` of type `sectortype(codomain)` as keys, and the corresponding matrix blocks as value, i.e. `data[c]` is some `DenseMatrix` of size `(blockdim(codomain, c), blockdim(domain, c))`.
-For those space types for which a `TensorMap` can be converted to a plain multidimensional
-array, the `data` can also be a general `DenseArray`, either of rank `N₁ + N₂` and with
-matching size `(dims(codomain)..., dims(domain)...)`, or just as a `DenseMatrix` with size
-`(dim(codomain), dim(domain))`. This is true in particular if the sector type is `Trivial`,
-e.g. for `CartesianSpace` or `ComplexSpace`. Then the `data` array is just reshaped into
-matrix form and referred to as such in the resulting `TensorMap` instance. When `spacetype`
-is `GradedSpace`, the `TensorMap` constructor will try to reconstruct the tensor data such
-that the resulting tensor `t` satisfies `data == convert(Array, t)`. This might not be
-possible, if the data does not respect the symmetry structure. This procedure can be
-sketched using a simple physical example, namely the SWAP gate on two qubits,
+For those space types for which a `TensorMap` can be converted to a plain multidimensional array, the `data` can also be a general `DenseArray`, either of rank `N₁ + N₂` and with matching size `(dims(codomain)..., dims(domain)...)`, or just as a `DenseMatrix` with size `(dim(codomain), dim(domain))`.
+This is true in particular if the sector type is `Trivial`, e.g. for `CartesianSpace` or `ComplexSpace`.
+Then the `data` array is just reshaped into matrix form and referred to as such in the resulting `TensorMap` instance.
+When `spacetype` is `GradedSpace`, the `TensorMap` constructor will try to reconstruct the tensor data such that the resulting tensor `t` satisfies `data == convert(Array, t)`.
+This might not be possible, if the data does not respect the symmetry structure.
+This procedure can be sketched using a simple physical example, namely the SWAP gate on two qubits,
```math
\begin{align*}
\mathrm{SWAP}: \mathbb{C}^2 \otimes \mathbb{C}^2 & \to \mathbb{C}^2 \otimes \mathbb{C}^2\\
|i\rangle \otimes |j\rangle &\mapsto |j\rangle \otimes |i\rangle.
\end{align*}
```
-This operator can be rewritten in terms of the familiar Heisenberg exchange interaction
-``\vec{S}_i \cdot \vec{S}_j`` as
+This operator can be rewritten in terms of the familiar Heisenberg exchange interaction ``\vec{S}_i \cdot \vec{S}_j`` as
```math
\mathrm{SWAP} = 2 \vec{S}_i \cdot \vec{S}_j + \frac{1}{2} 𝟙,
```
-where ``\vec{S} = (S^x, S^y, S^z)`` and the spin-1/2 generators of SU₂ ``S^k`` are defined
-defined in terms of the ``2 \times 2`` Pauli matrices ``\sigma^k`` as
-``S^k = \frac{1}{2}\sigma^k``. The SWAP gate can be realized as a rank-4 `TensorMap` in the
-following way:
+where ``\vec{S} = (S^x, S^y, S^z)`` and the spin-1/2 generators of SU₂ ``S^k`` are defined defined in terms of the ``2 \times 2`` Pauli matrices ``\sigma^k`` as ``S^k = \frac{1}{2}\sigma^k``.
+The SWAP gate can be realized as a rank-4 `TensorMap` in the following way:
```@repl tensors
# encode the matrix elements of the swap gate into a rank-4 array, where the first two
# indices correspond to the codomain and the last two indices correspond to the domain
@@ -346,41 +225,29 @@ for (c,b) in blocks(t3)
println()
end
```
-Hence, we recognize that the exchange interaction has eigenvalue ``-1`` in the coupled spin
-zero sector (`SU2Irrep(0)`), and eigenvalue ``+1`` in the coupled spin 1 sector
-(`SU2Irrep(1)`). Using `Irrep[U₁]` instead, we observe that both coupled charge
-`U1Irrep(+1)` and `U1Irrep(-1)` have eigenvalue ``+1``. The coupled charge `U1Irrep(0)`
-sector is two-dimensional, and has an eigenvalue ``+1`` and an eigenvalue ``-1``.
+Hence, we recognize that the exchange interaction has eigenvalue ``-1`` in the coupled spin zero sector (`SU2Irrep(0)`), and eigenvalue ``+1`` in the coupled spin 1 sector (`SU2Irrep(1)`).
+Using `Irrep[U₁]` instead, we observe that both coupled charge `U1Irrep(+1)` and `U1Irrep(-1)` have eigenvalue ``+1``.
+The coupled charge `U1Irrep(0)` sector is two-dimensional, and has an eigenvalue ``+1`` and an eigenvalue ``-1``.
-To construct the proper `data` in more complicated cases, one has to know where to find
-each sector in the range `1:dim(V)` of every index `i` with associated space `V`, as well
-as the internal structure of the representation space when the corresponding sector `c` has
-`dim(c) > 1`, i.e. in the case of `FusionStyle(c) isa MultipleFusion`. Currently, the only
-non-abelian sectors are `Irrep[SU₂]` and `Irrep[CU₁]`, for which the internal structure is
-the natural one.
+To construct the proper `data` in more complicated cases, one has to know where to find each sector in the range `1:dim(V)` of every index `i` with associated space `V`, as well as the internal structure of the representation space when the corresponding sector `c` has `dim(c) > 1`, i.e. in the case of `FusionStyle(c) isa MultipleFusion`.
+Currently, the only non-abelian sectors are `Irrep[SU₂]` and `Irrep[CU₁]`, for which the internal structure is the natural one.
-There are some tools available to facilitate finding the proper range of sector `c` in space
-`V`, namely `axes(V, c)`. This also works on a `ProductSpace`, with a tuple of sectors. An
-example
+There are some tools available to facilitate finding the proper range of sector `c` in space `V`, namely `axes(V, c)`.
+This also works on a `ProductSpace`, with a tuple of sectors. An example
```@repl tensors
V = SU2Space(0=>3, 1=>2, 2=>1)
P = V ⊗ V ⊗ V
axes(P, (SU2Irrep(1), SU2Irrep(0), SU2Irrep(2)))
```
-Note that the length of the range is the degeneracy dimension of that sector, times the
-dimension of the internal representation space, i.e. the quantum dimension of that sector.
+Note that the length of the range is the degeneracy dimension of that sector, times the dimension of the internal representation space, i.e. the quantum dimension of that sector.
### Assigning block data after initialization
-In order to avoid having to know the internal structure of each representation space to
-properly construct the full `data` array, it is often simpler to assign the block data
-directly after initializing an all zero `TensorMap` with the correct spaces. While this may
-seem more difficult at first sight since it requires knowing the exact entries associated to
-each valid combination of domain uncoupled sectors, coupled sector and codomain uncoupled
-sectors, this is often a far more natural procedure in practice.
+In order to avoid having to know the internal structure of each representation space to properly construct the full `data` array, it is often simpler to assign the block data directly after initializing an all zero `TensorMap` with the correct spaces.
+While this may seem more difficult at first sight since it requires knowing the exact entries associated to each valid combination of domain uncoupled sectors, coupled sector and codomain uncoupled sectors, this is often a far more natural procedure in practice.
-A first option is to directly set the full matrix block for each coupled sector in the
-`TensorMap`. For the example with U₁ symmetry, this can be done as
+A first option is to directly set the full matrix block for each coupled sector in the `TensorMap`.
+For the example with ``\mathsf{U}_1`` symmetry, this can be done as
```@repl tensors
t4 = zeros(V3 ⊗ V3, V3 ⊗ V3);
block(t4, U1Irrep(0)) .= [1 0; 0 1];
@@ -392,33 +259,19 @@ for (c, b) in blocks(t4)
println()
end
```
-While this indeed does not require considering the internal structure of the representation
-spaces, it still requires knowing the precise row and column indices corresponding to each
-set of uncoupled sectors in the codmain and domain respectively to correctly assign the
-nonzero entries in each block.
+While this indeed does not require considering the internal structure of the representation spaces, it still requires knowing the precise row and column indices corresponding to each set of uncoupled sectors in the codmain and domain respectively to correctly assign the nonzero entries in each block.
-Perhaps the most natural way of constructing a particular `TensorMap` is to directly assign
-the data slices for each splitting - fusion tree pair using the `fusiontrees(::TensorMap)`
-method. This returns an iterator over all tuples `(f₁, f₂)` of splitting - fusion tree pairs
-corresponding to all ways in which the set of domain uncoupled sectors can fuse to a coupled
-sector and split back into the set of codomain uncoupled sectors. By directly setting the
-corresponding data slice `t[f₁, f₂]` of size
-`(dims(codomain(t), f₁.uncoupled)..., dims(domain(t), f₂.uncoupled)...)`, we can construct
-all the block data without worrying about the internal ordering of row and column indices in
-each block. In addition, the corresponding value of each fusion tree slice is often directly
-informed by the object we are trying to construct in the first place. For example, in order
-to construct the Heisenberg exchange interaction on two spin-1/2 particles ``i`` and ``j``
-as an SU₂ symmetric `TensorMap`, we can make use of the observation that
+Perhaps the most natural way of constructing a particular `TensorMap` is to directly assign the data slices for each splitting - fusion tree pair using the `fusiontrees(::TensorMap)` method.
+This returns an iterator over all tuples `(f₁, f₂)` of splitting - fusion tree pairs corresponding to all ways in which the set of domain uncoupled sectors can fuse to a coupled sector and split back into the set of codomain uncoupled sectors.
+By directly setting the corresponding data slice `t[f₁, f₂]` of size `(dims(codomain(t), f₁.uncoupled)..., dims(domain(t), f₂.uncoupled)...)`, we can construct all the block data without worrying about the internal ordering of row and column indices in each block.
+In addition, the corresponding value of each fusion tree slice is often directly informed by the object we are trying to construct in the first place.
+For example, in order to construct the Heisenberg exchange interaction on two spin-1/2 particles ``i`` and ``j`` as an SU₂ symmetric `TensorMap`, we can make use of the observation that
```math
\vec{S}_i \cdot \vec{S}_j = \frac{1}{2} \left( \left( \vec{S}_i \cdot \vec{S}_j \right)^2 - \vec{S}_i^2 - \vec{S}_j^2 \right).
```
-Recalling some basic group theory, we know that the
-[quadratic Casimir of SU₂](https://en.wikipedia.org/wiki/Representation_theory_of_SU(2)#The_Casimir_element),
-``\vec{S}^2``, has a well-defined eigenvalue ``j(j+1)`` on every irrep of spin ``j``. From
-the above expressions, we can therefore directly read off the eigenvalues of the SWAP gate
-in terms of this Casimir eigenvalue on the domain uncoupled sectors and the coupled sector.
-This gives us exactly the prescription we need to assign the data slice corresponding to
-each splitting - fusion tree pair:
+Recalling some basic group theory, we know that the [quadratic Casimir of SU₂](https://en.wikipedia.org/wiki/Representation_theory_of_SU(2)#The_Casimir_element), ``\vec{S}^2``, has a well-defined eigenvalue ``j(j+1)`` on every irrep of spin ``j``.
+From the above expressions, we can therefore directly read off the eigenvalues of the SWAP gate in terms of this Casimir eigenvalue on the domain uncoupled sectors and the coupled sector.
+This gives us exactly the prescription we need to assign the data slice corresponding to each splitting - fusion tree pair:
```@repl tensors
C(s::SU2Irrep) = s.j * (s.j + 1)
t5 = zeros(V2 ⊗ V2, V2 ⊗ V2);
@@ -440,50 +293,31 @@ A third way to construct a `TensorMap` instance is to use `Base.similar`, i.e.
similar(t [, T::Type{<:Number}, codomain, domain])
```
-where `T` is a possibly different `eltype` for the tensor data, and `codomain` and `domain`
-optionally define a new codomain and domain for the resulting tensor. By default, these
-values just take the value from the input tensor `t`. The result will be a new `TensorMap`
-instance, with `undef` data, but whose data is stored in the same subtype of `DenseMatrix`
-(e.g. `Matrix` or `CuMatrix` or ...) as `t`. In particular, this uses the methods
-`storagetype(t)` and `TensorKit.similarstoragetype(t, T)`.
+where `T` is a possibly different `eltype` for the tensor data, and `codomain` and `domain` optionally define a new codomain and domain for the resulting tensor.
+By default, these values just take the value from the input tensor `t`.
+The result will be a new `TensorMap` instance, with `undef` data, but whose data is stored in the same subtype of `DenseVector` (e.g. `Vector` or `CuVector` or ...) as `t`.
+In particular, this uses the methods `storagetype(t)` and `TensorKit.similarstoragetype(t, T)`.
### Special purpose constructors
-Finally, there are methods `zero`, `one`, `id`, `isomorphism`, `unitary` and `isometry` to
-create specific new tensors. Tensor maps behave as vectors and can be added (if they have
-the same domain and codomain); `zero(t)` is the additive identity, i.e. a `TensorMap`
-instance where all entries are zero. For a `t::TensorMap` with `domain(t) == codomain(t)`,
-i.e. an endomorphism, `one(t)` creates the identity tensor, i.e. the identity under
-composition. As discussed in the section on [linear algebra operations](@ref
-ss_tensor_linalg), we denote composition of tensor maps with the multiplication operator
-`*`, such that `one(t)` is the multiplicative identity. Similarly, it can be created as
-`id(V)` with `V` the relevant vector space, e.g. `one(t) == id(domain(t))`. The identity
-tensor is currently represented with dense data, and one can use
-`id(A::Type{<:DenseMatrix}, V)` to specify the type of `DenseMatrix` (and its `eltype`),
-e.g. `A = Matrix{Float64}`. Finally, it often occurs that we want to construct a specific
-isomorphism between two spaces that are isomorphic but not equal, and for which there is no
-canonical choice. Hereto, one can use the method
-`u = isomorphism([A::Type{<:DenseMatrix}, ] codomain, domain)`, which will explicitly check
-that the domain and codomain are isomorphic, and return an error otherwise. Again, an
-optional first argument can be given to specify the specific type of `DenseMatrix` that is
-currently used to store the rather trivial data of this tensor. If
-`InnerProductStyle(u) <: EuclideanProduct`, the same result can be obtained with the method
-`u = unitary([A::Type{<:DenseMatrix}, ] codomain, domain)`. Note that reversing the domain
-and codomain yields the inverse morphism, which in the case of `EuclideanProduct` coincides
-with the adjoint morphism, i.e. `isomorphism(A, domain, codomain) == adjoint(u) == inv(u)`,
-where `inv` and `adjoint` will be further discussed [below](@ref ss_tensor_linalg).
-Finally, if two spaces `V1` and `V2` are such that `V2` can be embedded in `V1`, i.e. there
-exists an inclusion with a left inverse, and furthermore they represent tensor products of
-some `ElementarySpace` with `EuclideanProduct`, the function
-`w = isometry([A::Type{<:DenseMatrix}, ], V1, V2)` creates one specific isometric embedding,
-such that `adjoint(w) * w == id(V2)` and `w * adjoint(w)` is some hermitian idempotent
-(a.k.a. orthogonal projector) acting on `V1`. An error will be thrown if such a map cannot
-be constructed for the given domain and codomain.
+Finally, there are methods `zero`, `one`, `id`, `isomorphism`, `unitary` and `isometry` to create specific new tensors.
+Tensor maps behave as vectors and can be added (if they have the same domain and codomain); `zero(t)` is the additive identity, i.e. a `TensorMap` instance where all entries are zero.
+For a `t::TensorMap` with `domain(t) == codomain(t)`, i.e. an endomorphism, `one(t)` creates the identity tensor, i.e. the identity under composition.
+As discussed in the section on [linear algebra operations](@ref ss_tensor_linalg), we denote composition of tensor maps with the multiplication operator `*`, such that `one(t)` is the multiplicative identity.
+Similarly, it can be created as `id(V)` with `V` the relevant vector space, e.g. `one(t) == id(domain(t))`.
+The identity tensor is currently represented with dense data, and one can use `id(A::Type{<:DenseVector}, V)` to specify the type of `DenseVector` (and its `eltype`), e.g. `A = Vector{Float64}`.
+Finally, it often occurs that we want to construct a specific isomorphism between two spaces that are isomorphic but not equal, and for which there is no canonical choice.
+Hereto, one can use the method `u = isomorphism([A::Type{<:DenseVector}, ] codomain, domain)`, which will explicitly check that the domain and codomain are isomorphic, and return an error otherwise.
+Again, an optional first argument can be given to specify the specific type of `DenseVector` that is currently used to store the rather trivial data of this tensor.
+If `InnerProductStyle(u) <: EuclideanProduct`, the same result can be obtained with the method `u = unitary([A::Type{<:DenseVector}, ] codomain, domain)`.
+Note that reversing the domain and codomain yields the inverse morphism, which in the case of `EuclideanProduct` coincides with the adjoint morphism, i.e. `isomorphism(A, domain, codomain) == adjoint(u) == inv(u)`, where `inv` and `adjoint` will be further discussed [below](@ref ss_tensor_linalg).
+Finally, if two spaces `V1` and `V2` are such that `V2` can be embedded in `V1`, i.e. there exists an inclusion with a left inverse, and furthermore they represent tensor products of some `ElementarySpace` with `EuclideanProduct`, the function `w = isometry([A::Type{<:DenseMatrix}, ], V1, V2)` creates one specific isometric embedding, such that `adjoint(w) * w == id(V2)` and `w * adjoint(w)` is some hermitian idempotent (a.k.a. orthogonal projector) acting on `V1`.
+An error will be thrown if such a map cannot be constructed for the given domain and codomain.
Let's conclude this section with some examples with `GradedSpace`.
```@repl tensors
-V1 = ℤ₂Space(0=>3,1=>2)
-V2 = ℤ₂Space(0=>2,1=>1)
+V1 = ℤ₂Space(0 => 3, 1 => 2)
+V2 = ℤ₂Space(0 => 2, 1 => 1)
# First a `TensorMap{ℤ₂Space, 1, 1}`
m = randn(V1, V2)
convert(Array, m) |> disp
@@ -504,27 +338,21 @@ u' * u ≈ I ≈ v' * v
block(t, Z2Irrep(0)) |> disp
block(t, Z2Irrep(1)) |> disp
```
-Here, we illustrated some additional concepts. Firstly, note that we convert a `TensorMap`
-to an `Array`. This only works when `sectortype(t)` supports `fusiontensor`, and in
-particular when `BraidingStyle(sectortype(t)) == Bosonic()`, e.g. the case of trivial
-tensors (the category ``\mathbf{Vect}``) and group representations (the category
-``\mathbf{Rep}_{\mathsf{G}}``, which can be interpreted as a subcategory of
-``\mathbf{Vect}``). Here, we are in this case with ``\mathsf{G} = ℤ₂``. For a
-`TensorMap{S,1,1}`, the blocks directly correspond to the diagonal blocks in the block
-diagonal structure of its representation as an `Array`, there is no basis transform in
-between. This is no longer the case for `TensorMap{S,N₁,N₂}` with different values of `N₁`
-and `N₂`. Here, we use the operation `fuse(V)`, which creates an `ElementarySpace` which is
-isomorphic to a given space `V` (of type `ProductSpace` or `ElementarySpace`). The specific
-map between those two spaces constructed using the specific method `unitary` implements
-precisely the basis change from the product basis to the coupled basis. In this case, for a
-group `G` with `FusionStyle(Irrep[G]) isa UniqueFusion`, it is a permutation matrix. Specifically
-choosing `V` equal to the codomain and domain of `t`, we can construct the explicit basis
-transforms that bring `t` into block diagonal form.
+Here, we illustrated some additional concepts.
+Firstly, note that we convert a `TensorMap` to an `Array`.
+This only works when `sectortype(t)` supports `fusiontensor`, and in particular when `BraidingStyle(sectortype(t)) == Bosonic()`, e.g. the case of trivial tensors (the category ``\mathbf{Vect}``) and group representations (the category ``\mathbf{Rep}_{\mathsf{G}}``, which can be interpreted as a subcategory of ``\mathbf{Vect}``).
+Here, we are in this case with ``\mathsf{G} = ℤ₂``.
+For a `TensorMap{S, 1, 1}`, the blocks directly correspond to the diagonal blocks in the block diagonal structure of its representation as an `Array`, there is no basis transform in between.
+This is no longer the case for `TensorMap{S, N₁, N₂}` with different values of `N₁` and `N₂`.
+Here, we use the operation `fuse(V)`, which creates an `ElementarySpace` which is isomorphic to a given space `V` (of type `ProductSpace` or `ElementarySpace`).
+The specific map between those two spaces constructed using the specific method `unitary` implements precisely the basis change from the product basis to the coupled basis.
+In this case, for a group `G` with `FusionStyle(Irrep[G]) isa UniqueFusion`, it is a permutation matrix.
+Specifically choosing `V` equal to the codomain and domain of `t`, we can construct the explicit basis transforms that bring `t` into block diagonal form.
Let's repeat the same exercise for `I = Irrep[SU₂]`, which has `FusionStyle(I) isa MultipleFusion`.
```@repl tensors
-V1 = SU₂Space(0=>2,1=>1)
-V2 = SU₂Space(0=>1,1=>1)
+V1 = SU₂Space(0 => 2, 1 => 1)
+V2 = SU₂Space(0 => 1, 1 => 1)
# First a `TensorMap{SU₂Space, 1, 1}`
m = randn(V1, V2)
convert(Array, m) |> disp
@@ -546,50 +374,35 @@ block(t, SU2Irrep(0)) |> disp
block(t, SU2Irrep(1)) |> disp
block(t, SU2Irrep(2)) |> disp
```
-Note that the basis transforms `u` and `v` are no longer permutation matrices, but are
-still unitary. Furthermore, note that they render the tensor block diagonal, but that now
-every element of the diagonal blocks labeled by `c` comes itself in a tensor product with
-an identity matrix of size `dim(c)`, i.e. `dim(SU2Irrep(1)) = 3` and
-`dim(SU2Irrep(2)) = 5`.
+Note that the basis transforms `u` and `v` are no longer permutation matrices, but are still unitary.
+Furthermore, note that they render the tensor block diagonal, but that now every element of the diagonal blocks labeled by `c` comes itself in a tensor product with an identity matrix of size `dim(c)`, i.e. `dim(SU2Irrep(1)) = 3` and `dim(SU2Irrep(2)) = 5`.
## [Tensor properties](@id ss_tensor_properties)
-Given a `t::AbstractTensorMap{S,N₁,N₂}`, there are various methods to query its properties.
-The most important are clearly `codomain(t)` and `domain(t)`. For `t::AbstractTensor{S,N}`,
-i.e. `t::AbstractTensorMap{S,N,0}`, we can use `space(t)` as synonym for `codomain(t)`.
-However, for a general `AbstractTensorMap` this has no meaning. However, we can query
-`space(t, i)`, the space associated with the `i`th index. For `i ∈ 1:N₁`, this corresponds
-to `codomain(t, i) = codomain(t)[i]`. For `j = i-N₁ ∈ (1:N₂)`, this corresponds to
-`dual(domain(t, j)) = dual(domain(t)[j])`.
-
-The total number of indices, i.e. `N₁ + N₂`, is given by `numind(t)`, with `N₁ == numout(t)`
-and `N₂ == numin(t)`, the number of outgoing and incoming indices. There are also the
-unexported methods `TensorKit.codomainind(t)` and `TensorKit.domainind(t)` which return the
-tuples `(1, 2, …, N₁)` and `(N₁+1, …, N₁+N₂)`, and are useful for internal purposes. The
-type parameter `S<:ElementarySpace` can be obtained as `spacetype(t)`; the corresponding
-sector can directly obtained as `sectortype(t)` and is `Trivial` when
-`S != GradedSpace`. The underlying field scalars of `S` can also directly be obtained as
-`field(t)`. This is different from `eltype(t)`, which returns the type of `Number` in the
-tensor data, i.e. the type parameter `T` in the (subtype of) `DenseVector{T}` in which the
-matrix blocks are stored. Note that during construction, a (one-time) warning is printed if
-`!(T ⊂ field(S))`. The specific `DenseVector{T}` subtype in which the tensor data is stored
-is obtained as `storagetype(t)`. Each of the methods `numind`, `numout`, `numin`,
-`TensorKit.codomainind`, `TensorKit.domainind`, `spacetype`, `sectortype`, `field`, `eltype`
-and `storagetype` work in the type domain as well, i.e. they are encoded in `typeof(t)`.
+Given a `t::AbstractTensorMap{T, S, N₁, N₂}`, there are various methods to query its properties.
+The most important are clearly `codomain(t)` and `domain(t)`.
+For `t::AbstractTensor{S, N}`, i.e. `t::AbstractTensorMap{T, S, N, 0}`, we can use `space(t)` as synonym for `codomain(t)`.
+However, for a general `AbstractTensorMap` this has no meaning.
+However, we can query `space(t, i)`, the space associated with the `i`th index.
+For `i ∈ 1:N₁`, this corresponds to `codomain(t, i) = codomain(t)[i]`.
+For `j = i-N₁ ∈ (1:N₂)`, this corresponds to `dual(domain(t, j)) = dual(domain(t)[j])`.
+
+The total number of indices, i.e. `N₁ + N₂`, is given by `numind(t)`, with `N₁ == numout(t)` and `N₂ == numin(t)`, the number of outgoing and incoming indices.
+There are also the unexported methods `TensorKit.codomainind(t)` and `TensorKit.domainind(t)` which return the tuples `(1, 2, …, N₁)` and `(N₁+1, …, N₁+N₂)`, and are useful for internal purposes.
+The type parameter `S <: ElementarySpace` can be obtained as `spacetype(t)`; the corresponding sector can directly obtained as `sectortype(t)` and is `Trivial` when `S != GradedSpace`.
+The underlying field scalars of `S` can also directly be obtained as `field(t)`.
+This is different from `eltype(t)`, which returns the type of `Number` in the tensor data, i.e. the type parameter `T` in the (subtype of) `DenseVector{T}` in which the matrix blocks are stored.
+Note that during construction, a (one-time) warning is printed if `!(T ⊂ field(S))`.
+The specific `DenseVector{T}` subtype in which the tensor data is stored is obtained as `storagetype(t)`.
+Each of the methods `numind`, `numout`, `numin`, `TensorKit.codomainind`, `TensorKit.domainind`, `spacetype`, `sectortype`, `field`, `eltype` and `storagetype` work in the type domain as well, i.e. they are encoded in `typeof(t)`.
Finally, there are methods to probe the data, which we already encountered.
-`blocksectors(t)` returns an iterator over the different coupled sectors that can be
-obtained from fusing the uncoupled sectors available in the domain, but they must also be
-obtained from fusing the uncoupled sectors available in the codomain (i.e. it is the
-intersection of both `blocksectors(codomain(t))` and `blocksectors(domain(t))`). For a
-specific sector `c ∈ blocksectors(t)`, `block(t, c)` returns the corresponding data. Both
-are obtained together with `blocks(t)`, which returns an iterator over the pairs
-`c => block(t, c)`. Furthermore, there is `fusiontrees(t)` which returns an iterator over
-splitting-fusion tree pairs `(f₁, f₂)`, for which the corresponding data is given by
-`t[f₁, f₂]` (i.e. using Base.getindex).
+`blocksectors(t)` returns an iterator over the different coupled sectors that can be obtained from fusing the uncoupled sectors available in the domain, but they must also be obtained from fusing the uncoupled sectors available in the codomain (i.e. it is the intersection of both `blocksectors(codomain(t))` and `blocksectors(domain(t))`).
+For a specific sector `c ∈ blocksectors(t)`, `block(t, c)` returns the corresponding data.
+Both are obtained together with `blocks(t)`, which returns an iterator over the pairs `c => block(t, c)`.
+Furthermore, there is `fusiontrees(t)` which returns an iterator over splitting-fusion tree pairs `(f₁, f₂)`, for which the corresponding data is given by `t[f₁, f₂]` (i.e. using `Base.getindex`).
-Let's again illustrate these methods with an example, continuing with the tensor `t` from
-the previous example
+Let's again illustrate these methods with an example, continuing with the tensor `t` from the previous example
```@repl tensors
typeof(t)
codomain(t)
@@ -616,657 +429,11 @@ t[f1,f2]
## [Reading and writing tensors: `Dict` conversion](@id ss_tensor_readwrite)
-There are no custom or dedicated methods for reading, writing or storing `TensorMaps`,
-however, there is the possibility to convert a `t::AbstractTensorMap` into a `Dict`, simply
-as `convert(Dict, t)`. The backward conversion `convert(TensorMap, dict)` will return a
-tensor that is equal to `t`, i.e. `t == convert(TensorMap, convert(Dict, t))`.
-
-This conversion relies on that the string represenation of objects such as `VectorSpace`,
-`FusionTree` or `Sector` should be such that it represents valid code to recreate the
-object. Hence, we store information about the domain and codomain of the tensor, and the
-sector associated with each data block, as a `String` obtained with `repr`. This provides
-the flexibility to still change the internal structure of such objects, without this
-breaking the ability to load older data files. The resulting dictionary can then be stored
-using any of the provided Julia packages such as
-[JLD.jl](https://github.com/JuliaIO/JLD.jl),
-[JLD2.jl](https://github.com/JuliaIO/JLD2.jl),
-[BSON.jl](https://github.com/JuliaIO/BSON.jl),
-[JSON.jl](https://github.com/JuliaIO/JSON.jl), ...
-
-## [Vector space and linear algebra operations](@id ss_tensor_linalg)
-
-`AbstractTensorMap` instances `t` represent linear maps, i.e. homomorphisms in a `𝕜`-linear
-category, just like matrices. To a large extent, they follow the interface of `Matrix` in
-Julia's `LinearAlgebra` standard library. Many methods from `LinearAlgebra` are (re)exported
-by TensorKit.jl, and can then us be used without `using LinearAlgebra` explicitly. In all
-of the following methods, the implementation acts directly on the underlying matrix blocks
-(typically using the same method) and never needs to perform any basis transforms.
-
-In particular, `AbstractTensorMap` instances can be composed, provided the domain of the
-first object coincides with the codomain of the second. Composing tensor maps uses the
-regular multiplication symbol as in `t = t1*t2`, which is also used for matrix
-multiplication. TensorKit.jl also supports (and exports) the mutating method
-`mul!(t, t1, t2)`. We can then also try to invert a tensor map using `inv(t)`, though this
-can only exist if the domain and codomain are isomorphic, which can e.g. be checked as
-`fuse(codomain(t)) == fuse(domain(t))`. If the inverse is composed with another tensor
-`t2`, we can use the syntax `t1\t2` or `t2/t1`. However, this syntax also accepts instances
-`t1` whose domain and codomain are not isomorphic, and then amounts to `pinv(t1)`, the
-Moore-Penrose pseudoinverse. This, however, is only really justified as minimizing the
-least squares problem if `InnerProductStyle(t) <: EuclideanProduct`.
-
-`AbstractTensorMap` instances behave themselves as vectors (i.e. they are `𝕜`-linear) and
-so they can be multiplied by scalars and, if they live in the same space, i.e. have the same
-domain and codomain, they can be added to each other. There is also a `zero(t)`, the
-additive identity, which produces a zero tensor with the same domain and codomain as `t`. In
-addition, `TensorMap` supports basic Julia methods such as `fill!` and `copy!`, as well
-as `copy(t)` to create a copy with independent data. Aside from basic `+` and `*`
-operations, TensorKit.jl reexports a number of efficient in-place methods from
-`LinearAlgebra`, such as `axpy!` (for `y ← α * x + y`), `axpby!` (for `y ← α * x + β * y`),
-`lmul!` and `rmul!` (for `y ← α*y` and `y ← y*α`, which is typically the same) and `mul!`,
-which can also be used for out-of-place scalar multiplication `y ← α*x`.
-
-For `S = spacetype(t)` where `InnerProductStyle(S) <: EuclideanProduct`, we can compute
-`norm(t)`, and for two such instances, the inner product `dot(t1, t2)`, provided `t1` and
-`t2` have the same domain and codomain. Furthermore, there is `normalize(t)` and
-`normalize!(t)` to return a scaled version of `t` with unit norm. These operations should
-also exist for `InnerProductStyle(S) <: HasInnerProduct`, but require an interface for
-defining a custom inner product in these spaces. Currently, there is no concrete subtype of
-`HasInnerProduct` that is not an `EuclideanProduct`. In particular, `CartesianSpace`,
-`ComplexSpace` and `GradedSpace` all have `InnerProductStyle(S) <: EuclideanProduct`.
-
-With tensors that have `InnerProductStyle(t) <: EuclideanProduct` there is associated an
-adjoint operation, given by `adjoint(t)` or simply `t'`, such that
-`domain(t') == codomain(t)` and `codomain(t') == domain(t)`. Note that for an instance
-`t::TensorMap{S,N₁,N₂}`, `t'` is simply stored in a wrapper called
-`AdjointTensorMap{S,N₂,N₁}`, which is another subtype of `AbstractTensorMap`. This should
-be mostly unvisible to the user, as all methods should work for this type as well. It can
-be hard to reason about the index order of `t'`, i.e. index `i` of `t` appears in `t'` at
-index position `j = TensorKit.adjointtensorindex(t, i)`, where the latter method is
-typically not necessary and hence unexported. There is also a plural
-`TensorKit.adjointtensorindices` to convert multiple indices at once. Note that, because
-the adjoint interchanges domain and codomain, we have `space(t', j) == space(t, i)'`.
-
-`AbstractTensorMap` instances can furthermore be tested for exact (`t1 == t2`) or
-approximate (`t1 ≈ t2`) equality, though the latter requires that `norm` can be computed.
-
-When tensor map instances are endomorphisms, i.e. they have the same domain and codomain,
-there is a multiplicative identity which can be obtained as `one(t)` or `one!(t)`, where the
-latter overwrites the contents of `t`. The multiplicative identity on a space `V` can also
-be obtained using `id(A, V)` as discussed [above](@ref ss_tensor_construction), such that
-for a general homomorphism `t′`, we have
-`t′ == id(codomain(t′)) * t′ == t′ * id(domain(t′))`. Returning to the case of
-endomorphisms `t`, we can compute the trace via `tr(t)` and exponentiate them using
-`exp(t)`, or if the contents of `t` can be destroyed in the process, `exp!(t)`. Furthermore,
-there are a number of tensor factorizations for both endomorphisms and general homomorphism
-that we discuss below.
-
-Finally, there are a number of operations that also belong in this paragraph because of
-their analogy to common matrix operations. The tensor product of two `TensorMap` instances
-`t1` and `t2` is obtained as `t1 ⊗ t2` and results in a new `TensorMap` with
-`codomain(t1 ⊗ t2) = codomain(t1) ⊗ codomain(t2)` and
-`domain(t1 ⊗ t2) = domain(t1) ⊗ domain(t2)`. If we have two `TensorMap{T,S,N,1}` instances
-`t1` and `t2` with the same codomain, we can combine them in a way that is analoguous to
-`hcat`, i.e. we stack them such that the new tensor `catdomain(t1, t2)` has also the same
-codomain, but has a domain which is `domain(t1) ⊕ domain(t2)`. Similarly, if `t1` and `t2`
-are of type `TensorMap{T,S,1,N}` and have the same domain, the operation
-`catcodomain(t1, t2)` results in a new tensor with the same domain and a codomain given by
-`codomain(t1) ⊕ codomain(t2)`, which is the analogy of `vcat`. Note that direct sum only
-makes sense between `ElementarySpace` objects, i.e. there is no way to give a tensor product
-meaning to a direct sum of tensor product spaces.
-
-Time for some more examples:
-```@repl tensors
-t == t + zero(t) == t * id(domain(t)) == id(codomain(t)) * t
-t2 = randn(ComplexF64, codomain(t), domain(t));
-dot(t2, t)
-tr(t2' * t)
-dot(t2, t) ≈ dot(t', t2')
-dot(t2, t2)
-norm(t2)^2
-t3 = copy!(similar(t, ComplexF64), t);
-t3 == t
-rmul!(t3, 0.8);
-t3 ≈ 0.8 * t
-axpby!(0.5, t2, 1.3im, t3);
-t3 ≈ 0.5 * t2 + 0.8 * 1.3im * t
-t4 = randn(fuse(codomain(t)), codomain(t));
-t5 = TensorMap{Float64}(undef, fuse(codomain(t)), domain(t));
-mul!(t5, t4, t) == t4 * t
-inv(t4) * t4 ≈ id(codomain(t))
-t4 * inv(t4) ≈ id(fuse(codomain(t)))
-t4 \ (t4 * t) ≈ t
-t6 = randn(ComplexF64, V1, codomain(t));
-numout(t4) == numout(t6) == 1
-t7 = catcodomain(t4, t6);
-foreach(println, (codomain(t4), codomain(t6), codomain(t7)))
-norm(t7) ≈ sqrt(norm(t4)^2 + norm(t6)^2)
-t8 = t4 ⊗ t6;
-foreach(println, (codomain(t4), codomain(t6), codomain(t8)))
-foreach(println, (domain(t4), domain(t6), domain(t8)))
-norm(t8) ≈ norm(t4)*norm(t6)
-```
-
-## Index manipulations
-
-In many cases, the bipartition of tensor indices (i.e. `ElementarySpace` instances) between
-the codomain and domain is not fixed throughout the different operations that need to be
-performed on that tensor map, i.e. we want to use the duality to move spaces from domain to
-codomain and vice versa. Furthermore, we want to use the braiding to reshuffle the order of
-the indices.
-
-For this, we use an interface that is closely related to that for manipulating splitting-
-fusion tree pairs, namely [`braid`](@ref) and [`permute`](@ref), with the interface
-
-```julia
-braid(t::AbstractTensorMap{T,S,N₁,N₂}, (p1, p2)::Index2Tuple{N₁′,N₂′}, levels::IndexTuple{N₁+N₂,Int})
-```
-
-and
-
-```julia
-permute(t::AbstractTensorMap{T,S,N₁,N₂}, (p1, p2)::Index2Tuple{N₁′,N₂′}; copy = false)
-```
-
-both of which return an instance of `AbstractTensorMap{T,S,N₁′,N₂′}`.
-
-In these methods, `p1` and `p2` specify which of the original tensor indices ranging from
-`1` to `N₁ + N₂` make up the new codomain (with `N₁′` spaces) and new domain (with `N₂′`
-spaces). Hence, `(p1..., p2...)` should be a valid permutation of `1:(N₁+N₂)`. Note that,
-throughout TensorKit.jl, permutations are always specified using tuples of `Int`s, for
-reasons of type stability. For `braid`, we also need to specify `levels` or depths for each
-of the indices of the original tensor, which determine whether indices will braid over or
-underneath each other (use the braiding or its inverse). We refer to the section on
-[manipulating fusion trees](@ref ss_fusiontrees) for more details.
-
-When `BraidingStyle(sectortype(t)) isa SymmetricBraiding`, we can use the simpler interface
-of `permute`, which does not require the argument `levels`. `permute` accepts a keyword
-argument `copy`. When `copy == true`, the result will be a tensor with newly allocated data
-that can independently be modified from that of the input tensor `t`. When `copy` takes the
-default value `false`, `permute` can try to return the result in a way that it shares its
-data with the input tensor `t`, though this is only possible in specific cases (e.g. when
-`sectortype(S) == Trivial` and `(p1..., p2...) = (1:(N₁+N₂)...)`).
-
-Both `braid` and `permute` come in a version where the result is stored in an already
-existing tensor, i.e. [`braid!(tdst, tsrc, (p1, p2), levels)`](@ref) and
-[`permute!(tdst, tsrc, (p1, p2))`](@ref).
-
-Another operation that belongs under index manipulations is taking the `transpose` of a
-tensor, i.e. `LinearAlgebra.transpose(t)` and `LinearAlgebra.transpose!(tdst, tsrc)`, both
-of which are reexported by TensorKit.jl. Note that `transpose(t)` is not simply equal to
-reshuffling domain and codomain with
-`braid(t, (1:(N₁+N₂)...), reverse(domainind(tsrc)), reverse(codomainind(tsrc))))`. Indeed,
-the graphical representation (where we draw the codomain and domain as a single object),
-makes clear that this introduces an additional (inverse) twist, which is then compensated
-in the `transpose` implementation.
-
-```@raw html
-
-```
-
-In categorical language, the reason for this extra twist is that we use the left
-coevaluation ``η``, but the right evaluation ``\tilde{ϵ}``, when repartitioning the indices
-between domain and codomain.
-
-There are a number of other index related manipulations. We can apply a twist (or inverse
-twist) to one of the tensor map indices via [`twist(t, i; inv = false)`](@ref) or
-[`twist!(t, i; inv = false)`](@ref). Note that the latter method does not store the result
-in a new destination tensor, but just modifies the tensor `t` in place. Twisting several
-indices simultaneously can be obtained by using the defining property
-
-``θ_{V⊗W} = τ_{W,V} ∘ (θ_W ⊗ θ_V) ∘ τ_{V,W} = (θ_V ⊗ θ_W) ∘ τ_{W,V} ∘ τ_{V,W}.``
-
-but is currently not implemented explicitly.
-
-For all sector types `I` with `BraidingStyle(I) == Bosonic()`, all twists are `1` and thus
-have no effect. Let us start with some examples, in which we illustrate that, albeit
-`permute` might act highly non-trivial on the fusion trees and on the corresponding data,
-after conversion to a regular `Array` (when possible), it just acts like `permutedims`
-```@repl tensors
-domain(t) → codomain(t)
-ta = convert(Array, t);
-t′ = permute(t, (1,2,3,4));
-domain(t′) → codomain(t′)
-convert(Array, t′) ≈ ta
-t′′ = permute(t, (4,2,3),(1,));
-domain(t′′) → codomain(t′′)
-convert(Array, t′′) ≈ permutedims(ta, (4,2,3,1))
-m
-transpose(m)
-convert(Array, transpose(t)) ≈ permutedims(ta,(4,3,2,1))
-dot(t2, t) ≈ dot(transpose(t2), transpose(t))
-transpose(transpose(t)) ≈ t
-twist(t, 3) ≈ t
-# as twist acts trivially for
-BraidingStyle(sectortype(t))
-```
-Note that `transpose` acts like one would expect on a `TensorMap{T,S,1,1}`. On a
-`TensorMap{TS,N₁,N₂}`, because `transpose` replaces the codomain with the dual of the
-domain, which has its tensor product operation reversed, this in the end amounts in a
-complete reversal of all tensor indices when representing it as a plain mutli-dimensional
-`Array`. Also, note that we have not defined the conjugation of `TensorMap` instances. One
-definition that one could think of is `conj(t) = adjoint(transpose(t))`. However note that
-`codomain(adjoint(tranpose(t))) == domain(transpose(t)) == dual(codomain(t))` and similarly
-`domain(adjoint(tranpose(t))) == dual(domain(t))`, where `dual` of a `ProductSpace` is
-composed of the dual of the `ElementarySpace` instances, in reverse order of tensor
-product. This might be very confusing, and as such we leave tensor conjugation undefined.
-However, note that we have a conjugation syntax within the context of
-[tensor contractions](@ref ss_tensor_contraction).
-
-To show the effect of `twist`, we now consider a type of sector `I` for which
-`BraidingStyle{I} != Bosonic()`. In particular, we use `FibonacciAnyon`. We cannot convert
-the resulting `TensorMap` to an `Array`, so we have to rely on indirect tests to verify our
-results.
-
-```@repl tensors
-V1 = GradedSpace{FibonacciAnyon}(:I=>3,:τ=>2)
-V2 = GradedSpace{FibonacciAnyon}(:I=>2,:τ=>1)
-m = TensorMap(randn, Float32, V1, V2)
-transpose(m)
-twist(braid(m, (1,2), (2,), (1,)), 1)
-t1 = randn(V1 * V2', V2 * V1);
-t2 = randn(ComplexF64, V1 * V2', V2 * V1);
-dot(t1, t2) ≈ dot(transpose(t1), transpose(t2))
-transpose(transpose(t1)) ≈ t1
-```
-
-A final operation that one might expect in this section is to fuse or join indices, and its
-inverse, to split a given index into two or more indices. For a plain tensor (i.e. with
-`sectortype(t) == Trivial`) amount to the equivalent of `reshape` on the multidimensional
-data. However, this represents only one possibility, as there is no canonically unique way
-to embed the tensor product of two spaces `V1 ⊗ V2` in a new space `V = fuse(V1 ⊗ V2)`. Such a
-mapping can always be accompagnied by a basis transform. However, one particular choice is
-created by the function `isomorphism`, or for `EuclideanProduct` spaces, `unitary`.
-Hence, we can join or fuse two indices of a tensor by first constructing
-`u = unitary(fuse(space(t, i) ⊗ space(t, j)), space(t, i) ⊗ space(t, j))` and then
-contracting this map with indices `i` and `j` of `t`, as explained in the section on
-[contracting tensors](@ref ss_tensor_contraction). Note, however, that a typical algorithm
-is not expected to often need to fuse and split indices, as e.g. tensor factorizations can
-easily be applied without needing to `reshape` or fuse indices first, as explained in the
-next section.
-
-## [Tensor factorizations](@id ss_tensor_factorization)
-
-### Eigenvalue decomposition
-
-As tensors are linear maps, they have various kinds of factorizations. Endomorphism, i.e.
-tensor maps `t` with `codomain(t) == domain(t)`, have an eigenvalue decomposition. For
-this, we overload both `LinearAlgebra.eigen(t; kwargs...)` and
-`LinearAlgebra.eigen!(t; kwargs...)`, where the latter destroys `t` in the process. The
-keyword arguments are the same that are accepted by `LinearAlgebra.eigen(!)` for matrices.
-The result is returned as `D, V = eigen(t)`, such that `t * V ≈ V * D`. For given
-`t::TensorMap{T,S,N,N}`, `V` is a `TensorMap{T,S,N,1}`, whose codomain corresponds to that of
-`t`, but whose domain is a single space `S` (or more correctly a `ProductSpace{S,1}`), that
-corresponds to `fuse(codomain(t))`. The eigenvalues are encoded in `D`, a
-`TensorMap{S,1,1}`, whose domain and codomain correspond to the domain of `V`. Indeed, we
-cannot reasonably associate a tensor product structure with the different eigenvalues. Note
-that `D` stores the eigenvalues in a dedicated `DiagonalTensorMap` type.
-
-We also define `LinearAlgebra.ishermitian(t)`, which can only return true for spacetypes
-that have a Euclidean inner product. In all other cases, as the inner product is not
-defined, there is no notion of hermiticity (i.e. we are not working in a `†`-category). We
-also define and export the routines `eigh` and `eigh!`, which compute the eigenvalue
-decomposition under the guarantee (not checked) that the map is hermitian. Hence,
-eigenvalues will be real and `V` will be unitary with `eltype(V) == eltype(t)`. We also
-define and export `eig` and `eig!`, which similarly assume that the `TensorMap` is not
-hermitian (hence this does not require `EuclideanTensorMap`), and always returns complex
-values eigenvalues and eigenvectors. Like for matrices, `LinearAlgebra.eigen` is type
-unstable and checks hermiticity at run-time, then falling back to either `eig` or `eigh`.
-
-### Orthogonal factorizations
-
-Other factorizations that are provided by TensorKit.jl are orthogonal or unitary in nature,
-and thus always require a Euclidean inner product. However, they don't require equal
-domain and codomain. Let us first discuss the *singular value decomposition*, for which we
-define and export the methods [`tsvd`](@ref) and `tsvd!` (where as always, the latter
-destroys the input).
-
-```julia
-U, Σ, Vʰ, ϵ = tsvd(t; trunc = notrunc(), p::Real = 2,
- alg::OrthogonalFactorizationAlgorithm = SDD())
-```
-
-This computes a (possibly truncated) singular value decomposition of
-`t::TensorMap{T,S,N₁,N₂}` (with `InnerProductStyle(t)<:EuclideanProduct`), such that
-`norm(t - U * Σ * Vʰ) ≈ ϵ`, where `U::TensorMap{T,S,N₁,1}`,
-`Σ::DiagonalTensorMap{real(T),S}`, `Vʰ::TensorMap{T,S,1,N₂}` and `ϵ::Real`. `U` is an
-isometry, i.e. `U' * U` approximates the identity, whereas `U * U'` is an idempotent
-(squares to itself). The same holds for `adjoint(Vʰ)`. The domain of `U` equals the domain
-and codomain of `Σ` and the codomain of `Vʰ`. In the case of `trunc = notrunc()` (default
-value, see below), this space is given by `min(fuse(codomain(t)), fuse(domain(t)))`. The
-singular values are contained in `Σ` and are stored in a specialized `DiagonalTensorMap`,
-similar to the eigenvalues before.
-
-The keyword argument `trunc` provides a way to control the truncation, and is connected to
-the keyword argument `p`. The default value `notrunc()` implies no truncation, and thus
-`ϵ = 0`. Other valid options are
-
-* `truncerr(η::Real)`: truncates such that the `p`-norm of the truncated singular values
- is smaller than `η` times the `p`-norm of all singular values;
-
-* `truncdim(χ::Integer)`: finds the optimal truncation such that the equivalent total
- dimension of the internal vector space is no larger than `χ`;
-
-* `truncspace(W)`: truncates such that the dimension of the internal vector space is no
- greater than that of `W` in any sector, i.e. with
- `W₀ = min(fuse(codomain(t)), fuse(domain(t)))` this option will result in
- `domain(U) == domain(Σ) == codomain(Σ) == codomain(Vᵈ) == min(W, W₀)`;
-
-* `trunbelow(η::Real)`: truncates such that every singular value is larger then `η`; this
- is different from `truncerr(η)` with `p = Inf` because it works in absolute rather than
- relative values.
-
-Furthermore, the `alg` keyword can be either `SVD()` or `SDD()` (default), which
-corresponds to two different algorithms in LAPACK to compute singular value decompositions.
-The default value `SDD()` uses a divide-and-conquer algorithm and is typically the
-fastest, but can loose some accuracy. The `SVD()` method uses a QR-iteration scheme and can
-be more accurate, but is typically slower. Since Julia 1.3, these two algorithms are also
-available in the `LinearAlgebra` standard library, where they are specified as
-`LinearAlgebra.DivideAndConquer()` and `LinearAlgebra.QRIteration()`.
-
-Note that we defined the new method `tsvd` (truncated or tensor singular value
-decomposition), rather than overloading `LinearAlgebra.svd`. We (will) also support
-`LinearAlgebra.svd(t)` as alternative for `tsvd(t; trunc = notrunc())`, but note that
-the return values are then given by `U, Σ, V = svd(t)` with `V = adjoint(Vʰ)`.
-
-We also define the following pair of orthogonal factorization algorithms, which are useful
-when one is not interested in truncating a tensor or knowing the singular values, but only
-in its image or coimage.
-
-* `Q, R = leftorth(t; alg::OrthogonalFactorizationAlgorithm = QRpos(), kwargs...)`:
- this produces an isometry `Q::TensorMap{T,S,N₁,1}` (i.e. `Q' * Q` approximates the
- identity, `Q * Q'` is an idempotent, i.e. squares to itself) and a general tensor map
- `R::TensorMap{T,1,N₂}`, such that `t ≈ Q * R`. Here, the domain of `Q` and thus codomain
- of `R` is a single vector space of type `S` that is typically given by
- `min(fuse(codomain(t)), fuse(domain(t)))`.
-
- The underlying algorithm used to compute this decomposition can be chosen among `QR()`,
- `QRpos()`, `QL()`, `QLpos()`, `SVD()`, `SDD()`, `Polar()`. `QR()` uses the underlying
- `qr` decomposition from `LinearAlgebra`, while `QRpos()` (the default) adds a correction
- to that to make sure that the diagonal elements of `R` are positive.
- Both result in upper triangular `R`, which are square when `codomain(t) ≾ domain(t)`
- and wide otherwise. `QL()` and `QLpos()` similarly result in a lower triangular
- matrices in `R`, but only work in the former case, i.e. `codomain(t) ≾ domain(t)`,
- which amounts to `blockdim(codomain(t), c) >= blockdim(domain(t), c)` for all
- `c ∈ blocksectors(t)`.
-
- One can also use `alg = SVD()` or `alg = SDD()`, with extra keywords to control the
- absolute (`atol`) or relative (`rtol`) tolerance. We then set `Q=U` and `R=Σ * Vʰ` from
- the corresponding singular value decomposition, where only these singular values
- `σ >= max(atol, norm(t) * rtol)` (and corresponding singular vectors in `U`) are kept.
- More finegrained control on the chosen singular values can be obtained with `tsvd` and
- its `trunc` keyword.
-
- Finally, `Polar()` sets `Q = U * Vʰ` and `R = (Vʰ)' * Σ * Vʰ`, such that `R` is positive
- definite; in this case `SDD()` is used to actually compute the singular value
- decomposition and no `atol` or `rtol` can be provided.
-
-* `L, Q = rightorth(t; alg::OrthogonalFactorizationAlgorithm = QRpos())`:
- this produces a general tensor map `L::TensorMap{T,S,N₁,1}` and the adjoint of an
- isometry `Q::TensorMap{T,S,1,N₂}`, such that `t ≈ L * Q`. Here, the domain of `L` and
- thus codomain of `Q` is a single vector space of type `S` that is typically given by
- `min(fuse(codomain(t)), fuse(domain(t)))`.
-
- The underlying algorithm used to compute this decomposition can be chosen among `LQ()`,
- `LQpos()`, `RQ()`, `RQpos()`, `SVD()`, `SDD()`, `Polar()`. `LQ()` uses the underlying
- `qr` decomposition from `LinearAlgebra` on the transposed data, and leads to lower
- triangular matrices in `L`; `LQpos()` makes sure the diagonal elements are
- positive. The matrices `L` are square when `codomain(t) ≿ domain(t)` and tall otherwise.
- Similarly, `RQ()` and `RQpos()` result in upper triangular matrices in `L`, but only
- works if `codomain(t) ≿ domain(t)`, i.e. when
- `blockdim(codomain(t), c) <= blockdim(domain(t), c)` for all `c ∈ blocksectors(t)`.
-
- One can also use `alg = SVD()` or `alg = SDD()`, with extra keywords to control the
- absolute (`atol`) or relative (`rtol`) tolerance. We then set `L = U * Σ` and `Q = Vʰ` from
- the corresponding singular value decomposition, where only these singular values
- `σ >= max(atol, norm(t) * rtol)` (and corresponding singular vectors in `Vʰ`) are kept.
- More finegrained control on the chosen singular values can be obtained with `tsvd` and
- its `trunc` keyword.
-
- Finally, `Polar()` sets `L = U * Σ * U'` and `Q=U*Vʰ`, such that `L` is positive definite;
- in this case `SDD()` is used to actually compute the singular value decomposition and no
- `atol` or `rtol` can be provided.
-
-Furthermore, we can compute an orthonormal basis for the orthogonal complement of the image
-and of the co-image (i.e. the kernel) with the following methods:
-
-* `N = leftnull(t; alg::OrthogonalFactorizationAlgorithm = QR(), kwargs...)`:
- returns an isometric `TensorMap{T,S,N₁,1}` (i.e. `N' * N` approximates the identity) such
- that `N' * t` is approximately zero.
-
- Here, `alg` can be `QR()` (`QRpos()` acts identically in this case), which assumes that
- `t` is full rank in all of its blocks and only returns an orthonormal basis for the
- missing columns.
-
- If this is not the case, one can also use `alg = SVD()` or `alg = SDD()`, with extra
- keywords to control the absolute (`atol`) or relative (`rtol`) tolerance. We then
- construct `N` from the left singular vectors corresponding to singular values
- `σ < max(atol, norm(t) * rtol)`.
-
-* `N = rightnull(t; alg::OrthogonalFactorizationAlgorithm = QR(), kwargs...)`:
- returns a `TensorMap{T,S,1,N₂}` with isometric adjoint (i.e. `N * N'` approximates the
- identity) such that `t * N'` is approximately zero.
-
- Here, `alg` can be `LQ()` (`LQpos()` acts identically in this case), which assumes that
- `t` is full rank in all of its blocks and only returns an orthonormal basis for the
- missing rows.
-
- If this is not the case, one can also use `alg = SVD()` or `alg = SDD()`, with extra
- keywords to control the absolute (`atol`) or relative (`rtol`) tolerance. We then
- construct `N` from the right singular vectors corresponding to singular values
- `σ < max(atol, norm(t) * rtol)`.
-
-Note that the methods `leftorth`, `rightorth`, `leftnull` and `rightnull` also come in a
-form with exclamation mark, i.e. `leftorth!`, `rightorth!`, `leftnull!` and `rightnull!`,
-which destroy the input tensor `t`.
-
-### Factorizations for custom index bipartions
-
-Finally, note that each of the factorizations take a single argument, the tensor map `t`,
-and a number of keyword arguments. They perform the factorization according to the given
-codomain and domain of the tensor map. In many cases, we want to perform the factorization
-according to a different bipartition of the indices. When `BraidingStyle(sectortype(t)) isa
-SymmetricBraiding`, we can immediately specify an alternative bipartition of the indices of
-`t` in all of these methods, in the form
-
-```julia
-factorize(t::AbstracTensorMap, (pleft, pright)::Index2Tuple{N₁′,N₂′}; kwargs...)
-```
-
-where `pleft` will be the indices in the codomain of the new tensor map, and `pright` the
-indices of the domain. Here, `factorize` is any of the methods `LinearAlgebra.eigen`, `eig`,
-`eigh`, `tsvd`, `LinearAlgebra.svd`, `leftorth`, `rightorth`, `leftnull` and `rightnull`.
-This signature does not allow for the exclamation mark, because it amounts to
-
-```julia
-factorize!(permute(t, (pleft, pright); copy = true); kwargs...)
-```
-
-where [`permute`](@ref) was introduced and discussed in the previous section. When the
-braiding is not symmetric, the user should manually apply [`braid`](@ref) to bring the
-tensor map in proper form before performing the factorization.
-
-Some examples to conclude this section
-```@repl tensors
-V1 = SU₂Space(0=>2, 1/2=>1)
-V2 = SU₂Space(0=>1, 1/2=>1, 1=>1)
-
-t = randn(V1 ⊗ V1, V2);
-U, S, W = tsvd(t);
-t ≈ U * S * W
-D, V = eigh(t' * t);
-D ≈ S * S
-U' * U ≈ id(domain(U))
-S
-
-Q, R = leftorth(t; alg = Polar());
-isposdef(R)
-Q ≈ U * W
-R ≈ W' * S * W
-
-U2, S2, W2, ε = tsvd(t; trunc = truncspace(V1));
-W2 * W2' ≈ id(codomain(W2))
-S2
-ε ≈ norm(block(S, Irrep[SU₂](1))) * sqrt(dim(Irrep[SU₂](1)))
-
-L, Q = rightorth(t, (1,), (2,3));
-codomain(L), domain(L), domain(Q)
-Q * Q'
-P = Q' * Q;
-P ≈ P * P
-t′ = permute(t, ((1,), (2, 3)));
-t′ ≈ t′ * P
-```
-
-## [Bosonic tensor contractions and tensor networks](@id ss_tensor_contraction)
-
-One of the most important operation with tensor maps is to compose them, more generally
-known as contracting them. As mentioned in the section on [category theory](@ref
-s_categories), a typical composition of maps in a ribbon category can graphically be
-represented as a planar arrangement of the morphisms (i.e. tensor maps, boxes with lines
-eminating from top and bottom, corresponding to source and target, i.e. domain and
-codomain), where the lines connecting the source and targets of the different morphisms
-should be thought of as ribbons, that can braid over or underneath each other, and that can
-twist. Technically, we can embed this diagram in ``ℝ × [0,1]`` and attach all the
-unconnected line endings corresponding objects in the source at some position ``(x,0)`` for
-``x∈ℝ``, and all line endings corresponding to objects in the target at some position
-``(x,1)``. The resulting morphism is then invariant under what is known as *framed
-three-dimensional isotopy*, i.e. three-dimensional rearrangements of the morphism that
-respect the rules of boxes connected by ribbons whose open endings are kept fixed. Such a
-two-dimensional diagram cannot easily be encoded in a single line of code.
-
-However, things simplify when the braiding is symmetric (such that over- and under-
-crossings become equivalent, i.e. just crossings), and when twists, i.e. self-crossings in
-this case, are trivial. This amounts to `BraidingStyle(I) == Bosonic()` in the language of
-TensorKit.jl, and is true for any subcategory of ``\mathbf{Vect}``, i.e. ordinary tensors,
-possibly with some symmetry constraint. The case of ``\mathbf{SVect}`` and its
-subcategories, and more general categories, are discussed below.
-
-In the case of trivial twists, we can deform the diagram such that we first combine every
-morphism with a number of coevaluations ``η`` so as to represent it as a tensor, i.e. with a
-trivial domain. We can then rearrange the morphism to be all ligned up horizontally, where
-the original morphism compositions are now being performed by evaluations ``ϵ``. This
-process will generate a number of crossings and twists, where the latter can be omitted
-because they act trivially. Similarly, double crossings can also be omitted. As a
-consequence, the diagram, or the morphism it represents, is completely specified by the
-tensors it is composed of, and which indices between the different tensors are connect, via
-the evaluation ``ϵ``, and which indices make up the source and target of the resulting
-morphism. If we also compose the resulting morphisms with coevaluations so that it has a
-trivial domain, we just have one type of unconnected lines, henceforth called open indices.
-We sketch such a rearrangement in the following picture
-
-```@raw html
-
-```
-
-Hence, we can now specify such a tensor diagram, henceforth called a tensor contraction or
-also tensor network, using a one-dimensional syntax that mimicks
-[abstract index notation](https://en.wikipedia.org/wiki/Abstract_index_notation)
-and specifies which indices are connected by the evaluation map using Einstein's summation
-conventation. Indeed, for `BraidingStyle(I) == Bosonic()`, such a tensor contraction can
-take the same format as if all tensors were just multi-dimensional arrays. For this, we
-rely on the interface provided by the package
-[TensorOperations.jl](https://github.com/QuantumKitHub/TensorOperations.jl).
-
-The above picture would be encoded as
-```julia
-@tensor E[a,b,c,d,e] := A[v,w,d,x] * B[y,z,c,x] * C[v,e,y,b] * D[a,w,z]
-```
-or
-```julia
-@tensor E[:] := A[1,2,-4,3] * B[4,5,-3,3] * C[1,-5,4,-2] * D[-1,2,5]
-```
-where the latter syntax is known as NCON-style, and labels the unconnected or outgoing
-indices with negative integers, and the contracted indices with positive integers.
-
-A number of remarks are in order. TensorOperations.jl accepts both integers and any valid
-variable name as dummy label for indices, and everything in between `[ ]` is not resolved in
-the current context but interpreted as a dummy label. Here, we label the indices of a
-`TensorMap`, like `A::TensorMap{T,S,N₁,N₂}`, in a linear fashion, where the first position
-corresponds to the first space in `codomain(A)`, and so forth, up to position `N₁`. Index
-`N₁ + 1`then corresponds to the first space in `domain(A)`. However, because we have applied
-the coevaluation ``η``, it actually corresponds to the corresponding dual space, in
-accordance with the interface of [`space(A, i)`](@ref) that we introduced
-[above](@ref ss_tensor_properties), and as indiated by the dotted box around ``A`` in the
-above picture. The same holds for the other tensor maps. Note that our convention also
-requires that we braid indices that we brought from the domain to the codomain, and so this
-is only unambiguous for a symmetric braiding, where there is a unique way to permute the
-indices.
-
-With the current syntax, we create a new object `E` because we use the definition operator
-`:=`. Furthermore, with the current syntax, it will be a `Tensor`, i.e. it will have a
-trivial domain, and correspond to the dotted box in the picture above, rather than the
-actual morphism `E`. We can also directly define `E` with the correct codomain and domain by rather using
-```julia
-@tensor E[a b c;d e] := A[v,w,d,x] * B[y,z,c,x] * C[v,e,y,b] * D[a,w,z]
-```
-or
-```julia
-@tensor E[(a,b,c);(d,e)] := A[v,w,d,x] * B[y,z,c,x] * C[v,e,y,b] * D[a,w,z]
-```
-where the latter syntax can also be used when the codomain is empty. When using the
-assignment operator `=`, the `TensorMap` `E` is assumed to exist and the contents will be
-written to the currently allocated memory. Note that for existing tensors, both on the left
-hand side and right hand side, trying to specify the indices in the domain and the codomain
-seperately using the above syntax, has no effect, as the bipartition of indices are already
-fixed by the existing object. Hence, if `E` has been created by the previous line of code,
-all of the following lines are now equivalent
-```julia
-@tensor E[(a,b,c);(d,e)] = A[v,w,d,x] * B[y,z,c,x] * C[v,e,y,b] * D[a,w,z]
-@tensor E[a,b,c,d,e] = A[v w d;x] * B[(y,z,c);(x,)] * C[v e y; b] * D[a,w,z]
-@tensor E[a b; c d e] = A[v; w d x] * B[y,z,c,x] * C[v,e,y,b] * D[a w;z]
-```
-and none of those will or can change the partition of the indices of `E` into its codomain
-and its domain.
-
-Two final remarks are in order. Firstly, the order of the tensors appearing on the right
-hand side is irrelevant, as we can reorder them by using the allowed moves of the Penrose
-graphical calculus, which yields some crossings and a twist. As the latter is trivial, it
-can be omitted, and we just use the same rules to evaluate the newly ordered tensor
-network. For the particular case of matrix-matrix multiplication, which also captures more
-general settings by appropriotely combining spaces into a single line, we indeed find
-
-```@raw html
-
-```
-
-or thus, the following to lines of code yield the same result
-```julia
-@tensor C[i,j] := B[i,k] * A[k,j]
-@tensor C[i,j] := A[k,j] * B[i,k]
-```
-Reordering of tensors can be used internally by the `@tensor` macro to evaluate the
-contraction in a more efficient manner. In particular, the NCON-style of specifying the
-contraction gives the user control over the order, and there are other macros, such as
-`@tensoropt`, that try to automate this process. There is also an `@ncon` macro and `ncon`
-function, an we recommend reading the
-[manual of TensorOperations.jl](https://quantumkithub.github.io/TensorOperations.jl/stable/) to
-learn more about the possibilities and how they work.
-
-A final remark involves the use of adjoints of tensors. The current framework is such that
-the user should not be to worried about the actual bipartition into codomain and domain of
-a given `TensorMap` instance. Indeed, for factorizations one just specifies the requested
-bipartition via the `factorize(t, (pleft, pright))` interface, and for tensor contractions
-the `@contract` macro figures out the correct manipulations automatically. However, when
-wanting to use the `adjoint` of an instance `t::TensorMap{T,S,N₁,N₂}`, the resulting
-`adjoint(t)` is a `AbstractTensorMap{T,S,N₂,N₁}` and one need to know the values of `N₁` and
-`N₂` to know exactly where the `i`th index of `t` will end up in `adjoint(t)`, and hence to
-know and understand the index order of `t'`. Within the `@tensor` macro, one can instead use
-`conj()` on the whole index expression so as to be able to use the original index ordering
-of `t`. Indeed, for matrices of thus, `TensorMap{T,S,1,1}` instances, this yields exactly the
-equivalence one expects, namely equivalence between the following to expressions.
-```julia
-@tensor C[i,j] := B'[i,k] * A[k,j]
-@tensor C[i,j] := conj(B[k,i]) * A[k,j]
-```
-For e.g. an instance `A::TensorMap{T,S,3,2}`, the following two syntaxes have the same effect
-within an `@tensor` expression: `conj(A[a,b,c,d,e])` and `A'[d,e,a,b,c]`.
-
-Some examples:
-
-## Fermionic tensor contractions
-
-TODO
+There are no custom or dedicated methods for reading, writing or storing `TensorMap`s, however, there is the possibility to convert a `t::AbstractTensorMap` into a `Dict`, simply as `convert(Dict, t)`.
+The backward conversion `convert(TensorMap, dict)` will return a tensor that is equal to `t`, i.e. `t == convert(TensorMap, convert(Dict, t))`.
-## Anyonic tensor contractions
+This conversion relies on that the string represenation of objects such as `VectorSpace`, `FusionTree` or `Sector` should be such that it represents valid code to recreate the object.
+Hence, we store information about the domain and codomain of the tensor, and the sector associated with each data block, as a `String` obtained with `repr`.
+This provides the flexibility to still change the internal structure of such objects, without this breaking the ability to load older data files.
+The resulting dictionary can then be stored using any of the provided Julia packages such as [JLD.jl](https://github.com/JuliaIO/JLD.jl), [JLD2.jl](https://github.com/JuliaIO/JLD2.jl), [BSON.jl](https://github.com/JuliaIO/BSON.jl), [JSON.jl](https://github.com/JuliaIO/JSON.jl), ...
-TODO
diff --git a/docs/src/man/tutorial.md b/docs/src/man/tutorial.md
index c89d5be47..3039a584c 100644
--- a/docs/src/man/tutorial.md
+++ b/docs/src/man/tutorial.md
@@ -1,8 +1,7 @@
# [Tutorial](@id s_tutorial)
-Before discussing at length all aspects of this package, both its usage and implementation,
-we start with a short tutorial to sketch the main capabilities. Thereto, we start by
-loading TensorKit.jl
+Before discussing at length all aspects of this package, both its usage and implementation, we start with a short tutorial to sketch the main capabilities.
+Thereto, we start by loading TensorKit.jl
```@repl tutorial
using TensorKit
@@ -10,19 +9,15 @@ using TensorKit
## Cartesian tensors
-The most important objects in TensorKit.jl are tensors, which we now create with random
-(normally distributed) entries in the following manner
+The most important objects in TensorKit.jl are tensors, which we now create with random (normally distributed) entries in the following manner
```@repl tutorial
A = randn(ℝ^3 ⊗ ℝ^2 ⊗ ℝ^4)
```
-Note that we entered the tensor size not as plain dimensions, by specifying the vector
-space associated with these tensor indices, in this case `ℝ^n`, which can be obtained by
-typing `\bbR+TAB`. The tensor then lives in the tensor product of the different spaces,
-which we can obtain by typing `⊗` (i.e. `\otimes+TAB`), although for simplicity also the
-usual multiplication sign `*` does the job. Note also that `A` is printed as an instance of
-a parametric type `TensorMap`, which we will discuss below and contains `Tensor`.
+Note that we entered the tensor size not as plain dimensions, but by specifying the vector space associated with these tensor indices, in this case `ℝ^n`, which can be obtained by typing `\bbR+TAB`.
+The tensor then lives in the tensor product of the different spaces, which we can obtain by typing `⊗` (i.e. `\otimes+TAB`), although for simplicity also the usual multiplication sign `*` does the job.
+Note also that `A` is printed as an instance of a parametric type `TensorMap`, which we will discuss below and contains `Tensor`.
Let us briefly sidetrack into the nature of `ℝ^n`:
@@ -35,8 +30,8 @@ supertype(ElementarySpace)
```
i.e. `ℝ^n` can also be created without Unicode using the longer syntax `CartesianSpace(n)`.
-It is subtype of `ElementarySpace`, with a standard (Euclidean) inner product
-over the real numbers. Furthermore,
+It is a subtype of `ElementarySpace`, with a standard (Euclidean) inner product over the real numbers.
+Furthermore,
```@repl tutorial
W = ℝ^3 ⊗ ℝ^2 ⊗ ℝ^4
@@ -45,81 +40,68 @@ supertype(ProductSpace)
supertype(CompositeSpace)
```
-i.e. the tensor product of a number of `CartesianSpace`s is some generic parametric
-`ProductSpace` type, specifically `ProductSpace{CartesianSpace,N}` for the tensor product of
-`N` instances of `CartesianSpace`.
+i.e. the tensor product of a number of `CartesianSpace`s is some generic parametric `ProductSpace` type, specifically `ProductSpace{CartesianSpace,N}` for the tensor product of `N` instances of `CartesianSpace`.
-Tensors are itself vectors (but not `Vector`s or even `AbstractArray`s), so we can compute
-linear combinations, provided they live in the same space.
+Tensors are itself vectors (but not `Vector`s or even `AbstractArray`s), so we can compute linear combinations, provided they live in the same space.
```@repl tutorial
B = randn(ℝ^3 * ℝ^2 * ℝ^4);
-C = 0.5*A + 2.5*B
+C = 0.5 * A + 2.5 * B
```
-Given that they are behave as vectors, they also have a scalar product and norm, which they
-inherit from the Euclidean inner product on the individual `ℝ^n` spaces:
+Given that they behave as vectors, they also have a scalar product and norm, which they inherit from the Euclidean inner product on the individual `ℝ^n` spaces:
```@repl tutorial
-scalarBA = dot(B,A)
-scalarAA = dot(A,A)
+scalarBA = dot(B, A)
+scalarAA = dot(A, A)
normA² = norm(A)^2
```
-More generally, our tensor objects implement the full interface layed out in
-[VectorInterface.jl](https://github.com/Jutho/VectorInterface.jl).
+More generally, our tensor objects implement the full interface layed out in [VectorInterface.jl](https://github.com/Jutho/VectorInterface.jl).
-If two tensors live on different spaces, these operations have no meaning and are thus not
-allowed
+If two tensors live on different spaces, these operations have no meaning and are thus not allowed
```@repl tutorial
B′ = randn(ℝ^4 * ℝ^2 * ℝ^3);
space(B′) == space(A)
C′ = 0.5 * A + 2.5 * B′
-scalarBA′ = dot(B′,A)
+scalarBA′ = dot(B′, A)
```
-However, in this particular case, we can reorder the indices of `B′` to match space of `A`,
-using the routine `permute` (we deliberately choose not to overload `permutedims` from
-Julia Base, for reasons that become clear below):
+However, in this particular case, we can reorder the indices of `B′` to match space of `A`, using the routine `permute` (we deliberately choose not to overload `permutedims` from Julia Base, for reasons that become clear below):
```@repl tutorial
space(permute(B′, (3, 2, 1))) == space(A)
```
-We can contract two tensors using Einstein summation convention, which takes the interface
-from [TensorOperations.jl](https://github.com/quantumkithub/TensorOperations.jl). TensorKit.jl
-reexports the `@tensor` macro
+We can contract two tensors using Einstein summation convention, which takes the interface from [TensorOperations.jl](https://github.com/quantumkithub/TensorOperations.jl).
+TensorKit.jl reexports the `@tensor` macro
```@repl tutorial
-@tensor D[a,b,c,d] := A[a,b,e] * B[d,c,e]
-@tensor d = A[a,b,c] * A[a,b,c]
+@tensor D[a, b, c, d] := A[a, b, e] * B[d, c, e]
+@tensor d = A[a, b, c] * A[a, b, c]
d ≈ scalarAA ≈ normA²
```
-We hope that the index convention is clear. The `:=` is to create a new tensor `D`, without
-the `:` the result would be written in an existing tensor `D`, which in this case would
-yield an error as no tensor `D` exists. If the contraction yields a scalar, regular
-assignment with `=` can be used.
+We hope that the index convention is clear.
+The `:=` is to create a new tensor `D`, without the `:` the result would be written in an existing tensor `D`, which in this case would yield an error as no tensor `D` exists.
+If the contraction yields a scalar, regular assignment with `=` can be used.
-Finally, we can factorize a tensor, creating a bipartition of a subset of its indices and
-its complement. With a plain Julia `Array`, one would apply `permutedims` and `reshape` to
-cast the array into a matrix before applying e.g. the singular value decomposition. With
-TensorKit.jl, one just specifies which indices go to the left (rows) and right (columns)
+Finally, we can factorize a tensor, creating a bipartition of a subset of its indices and its complement.
+With a plain Julia `Array`, one would apply `permutedims` and `reshape` to cast the array into a matrix before applying e.g. the singular value decomposition.
+With TensorKit.jl, one just specifies which indices go to the left (rows) and right (columns) with a tuple of tuples, selecting the respective indices for either side.
```@repl tutorial
-U, S, Vd = tsvd(A, ((1,3), (2,)));
-@tensor A′[a,b,c] := U[a,c,d] * S[d,e] * Vd[e,b];
+A_matrix = permute(A, ((1, 3), (2,)));
+U, S, Vd = svd_compact(A_matrix);
+@tensor A′[a, b, c] := U[a, c, d] * S[d, e] * Vd[e, b];
A ≈ A′
U
```
-Note that the `tsvd` routine returns the decomposition of the linear map as three factors,
-`U`, `S` and `Vd`, each of them a `TensorMap`, such that `Vd` is already what is commonly
-called `V'`. Furthermore, observe that `U` is printed differently then `A`, i.e. as a
-`TensorMap((ℝ^3 ⊗ ℝ^4) ← ProductSpace(ℝ^2))`. What this means is that tensors (or more
-appropriately, `TensorMap` instances) in TensorKit.jl are always considered to be linear
-maps between two `ProductSpace` instances, with
+Note that the `svd_compact` routine returns the decomposition of the linear map as three factors, `U`, `S` and `Vd`, each of them a `TensorMap`, such that `Vd` is already what is commonly called `V'`.
+Furthermore, observe that `U` is printed differently then `A`, i.e. as a `TensorMap((ℝ^3 ⊗ ℝ^4) ← ProductSpace(ℝ^2))`.
+What this means is that tensors (or more appropriately, `TensorMap` instances) in TensorKit.jl are always considered to be linear maps between two `ProductSpace` instances, with
```@repl tutorial
codomain(U)
@@ -128,11 +110,8 @@ codomain(A)
domain(A)
```
-An instance of `TensorMap` thus represents a linear map from its domain to its codomain,
-making it an element of the space of homomorphisms between these two spaces. That space is
-represented using its own type `HomSpace` in TensorKit.jl, and which admits a direct
-constructor as well as a unicode alternative using the symbol `→` (obtained as `\to+TAB` or
-`\rightarrow+TAB`) or `←` (obtained as `\leftarrow+TAB`).
+An instance of `TensorMap` thus represents a linear map from its domain to its codomain, making it an element of the space of homomorphisms between these two spaces.
+That space is represented using its own type `HomSpace` in TensorKit.jl, and which admits a direct constructor as well as a unicode alternative using the symbol `→` (obtained as `\to+TAB` or `\rightarrow+TAB`) or `←` (obtained as `\leftarrow+TAB`).
```@repl tutorial
P = space(U)
@@ -140,8 +119,7 @@ space(U) == HomSpace(ℝ^3 ⊗ ℝ^4, ℝ^2) == (ℝ^3 ⊗ ℝ^4 ← ℝ^2) == (
(codomain(P), domain(P))
```
-Furthermore, a `Tensor` instance such as `A` is just a specific case of `TensorMap` with an
-empty domain, i.e. a `ProductSpace{CartesianSpace,0}` instance. Analogously, we can
+Furthermore, a `Tensor` instance such as `A` is just a specific case of `TensorMap` with an empty domain, i.e. a `ProductSpace{CartesianSpace,0}` instance. Analogously, we can
represent a vector `v` and matrix `m` as
```@repl tutorial
@@ -153,16 +131,11 @@ M₃ = M₂ * M₁ # matrix-matrix product
space(M₃)
```
-Note that for the construction of `M₁`, in accordance with how one specifies the dimensions
-of a matrix (e.g. `randn(4,3)`), the first space is the codomain and the second the domain.
-This is somewhat opposite to the general notation for a function `f:domain→codomain`, so
-that we also support this more mathemical notation, as illustrated in the construction of
-`M₂`. However, as this is confusing from the perspective of rows and columns, we also
-support the syntax `codomain ← domain` and actually use this as the default way of printing
-`HomSpace` instances.
+Note that for the construction of `M₁`, in accordance with how one specifies the dimensions of a matrix (e.g. `randn(4, 3)`), the first space is the codomain and the second the domain.
+This is somewhat opposite to the general notation for a function ``f : \text{domain} \rightarrow \text{codomain}``, so that we also support this more mathematical notation, as illustrated in the construction of `M₂`.
+However, as this is confusing from the perspective of rows and columns, we also support the syntax `codomain ← domain` and actually use this as the default way of printing `HomSpace` instances.
-The 'matrix-vector' or 'matrix-matrix' product can be computed between any two `TensorMap`
-instances for which the domain of the first matches with the codomain of the second, e.g.
+The *matrix-vector* or *matrix-matrix* product can be computed between any two `TensorMap` instances for which the domain of the first matches with the codomain of the second, e.g.
```@repl tutorial
v′ = v ⊗ v
@@ -171,25 +144,23 @@ w′ = M₁′ * v′
w′ ≈ w ⊗ w
```
-Another example involves checking that `U` from the singular value decomposition is a
-unitary, or at least a (left) isometric tensor
+Another example involves checking that `U` from the singular value decomposition is a unitary, or at least a (left) isometric tensor
```@repl tutorial
codomain(U)
domain(U)
space(U)
U' * U # should be the identity on the corresponding domain = codomain
-U' * U ≈ one(U'*U)
+U' * U ≈ one(U' * U)
P = U * U' # should be a projector
P * P ≈ P
```
-Here, the adjoint of a `TensorMap` results in a new tensor map (actually a simple wrapper
-of type `AdjointTensorMap <: AbstractTensorMap`) with domain and codomain interchanged.
+Here, the adjoint of a `TensorMap` results in a new tensor map (actually a simple wrapper of type `AdjointTensorMap <: AbstractTensorMap`) with domain and codomain interchanged.
-Our original tensor `A` living in `ℝ^4 * ℝ^2 * ℝ^3` is isomorphic to e.g. a linear map
-`ℝ^3 → ℝ^4 * ℝ^2`. This is where the full power of `permute` emerges. It allows to specify a
-permutation where some indices go to the codomain, and others go to the domain, as in
+Our original tensor `A` living in `ℝ^4 * ℝ^2 * ℝ^3` is isomorphic to e.g. a linear map `ℝ^3 → ℝ^4 * ℝ^2`.
+This is where the full power of `permute` emerges.
+It allows to specify a permutation where some indices go to the codomain, and others go to the domain, as in
```@repl tutorial
A2 = permute(A, ((1, 2), (3,)))
@@ -197,35 +168,29 @@ codomain(A2)
domain(A2)
```
-In fact, `tsvd(A, ((1, 3), (2,)))` is a shorthand for `tsvd(permute(A, ((1, 3), (2,))))`,
-where `tsvd(A::TensorMap)` will just compute the singular value decomposition according to
-the given codomain and domain of `A`.
+In fact, this was already what we used in `svd_compact(A_matrix)` to create the matricized tensor `A_matrix`, and where `svd_compact(A::AbstractTensorMap)` will just compute the singular value decomposition according to the given codomain and domain of `A`.
-Note, finally, that the `@tensor` macro treats all indices at the same footing and thus
-does not distinguish between codomain and domain. The linear numbering is first all indices
-in the codomain, followed by all indices in the domain. However, when `@tensor` creates a
-new tensor (i.e. when using `:=`), the default syntax always creates a `Tensor`, i.e. with
-all indices in the codomain.
+Note, finally, that the `@tensor` macro treats all indices at the same footing and thus does not distinguish between codomain and domain.
+The linear numbering is first all indices in the codomain, followed by all indices in the domain.
+However, when `@tensor` creates a new tensor (i.e. when using `:=`), the default syntax always creates a `Tensor`, i.e. with all indices in the codomain.
```@repl tutorial
-@tensor A′[a,b,c] := U[a,c,d] * S[d,e] * Vd[e,b];
+@tensor A′[a, b, c] := U[a, c, d] * S[d, e] * Vd[e, b];
codomain(A′)
domain(A′)
-@tensor A2′[(a,b); (c,)] := U[a,c,d] * S[d,e] * Vd[e,b];
+@tensor A2′[(a, b); (c,)] := U[a, c, d] * S[d, e] * Vd[e, b];
codomain(A2′)
domain(A2′)
-@tensor A2′′[a b; c] := U[a,c,d] * S[d,e] * Vd[e,b];
+@tensor A2′′[a b; c] := U[a, c, d] * S[d, e] * Vd[e, b];
A2 ≈ A2′ == A2′′
```
-As illustrated for `A2′` and `A2′′`, additional syntax is available that enables one to
-immediately specify the desired codomain and domain indices.
+As illustrated for `A2′` and `A2′′`, additional syntax is available that enables one to immediately specify the desired codomain and domain indices.
## Complex tensors
For applications in e.g. quantum physics, we of course want to work with complex tensors.
-Trying to create a complex-valued tensor with `CartesianSpace` indices is of course
-somewhat contrived and prints a (one-time) warning
+Trying to create a complex-valued tensor with `CartesianSpace` indices is of course somewhat contrived and prints a (one-time) warning
```@repl tutorial
A = randn(ComplexF64, ℝ^3 ⊗ ℝ^2 ⊗ ℝ^4)
@@ -238,17 +203,17 @@ Indeed, we instead want to work with complex vector spaces
A = randn(ComplexF64, ℂ^3 ⊗ ℂ^2 ⊗ ℂ^4)
```
-where `ℂ` is obtained as `\bbC+TAB` and we also have the non-Unicode alternative
-`ℂ^n == ComplexSpace(n)`. Most functionality works exactly the same
+where `ℂ` is obtained as `\bbC+TAB` and we also have the non-Unicode alternative `ℂ^n == ComplexSpace(n)`.
+Most functionality works exactly the same
```@repl tutorial
B = randn(ℂ^3 * ℂ^2 * ℂ^4);
-C = im*A + (2.5 - 0.8im) * B
+C = im * A + (2.5 - 0.8im) * B
scalarBA = dot(B, A)
scalarAA = dot(A, A)
normA² = norm(A)^2
-U, S, Vd = tsvd(A, ((1, 3), (2,)));
-@tensor A′[a,b,c] := U[a,c,d] * S[d,e] * Vd[e,b];
+U, S, Vd = svd_compact(permute(A, ((1, 3), (2,))));
+@tensor A′[a, b, c] := U[a, c, d] * S[d, e] * Vd[e, b];
A′ ≈ A
permute(A, ((1, 3), (2,))) ≈ U * S * Vd
```
@@ -256,28 +221,25 @@ permute(A, ((1, 3), (2,))) ≈ U * S * Vd
However, trying the following
```@repl tutorial
-@tensor D[a,b,c,d] := A[a,b,e] * B[d,c,e]
-@tensor d = A[a,b,c] * A[a,b,c]
+@tensor D[a, b, c, d] := A[a, b, e] * B[d, c, e]
+@tensor d = A[a, b, c] * A[a, b, c]
```
-we obtain `SpaceMismatch` errors. The reason for this is that, with `ComplexSpace`, an
-index in a space `ℂ^n` can only be contracted with an index in the dual space
-`dual(ℂ^n) == (ℂ^n)'`. Because of the complex Euclidean inner product, the dual space is
-equivalent to the complex conjugate space, but not the space itself.
+we obtain `SpaceMismatch` errors.
+The reason for this is that, with `ComplexSpace`, an index in a space `ℂ^n` can only be contracted with an index in the dual space `dual(ℂ^n) == (ℂ^n)'`.
+Because of the complex Euclidean inner product, the dual space is equivalent to the complex conjugate space, but not the space itself.
```@repl tutorial
dual(ℂ^3) == conj(ℂ^3) == (ℂ^3)'
(ℂ^3)' == ℂ^3
-@tensor d = conj(A[a,b,c]) * A[a,b,c]
+@tensor d = conj(A[a, b, c]) * A[a, b, c]
d ≈ normA²
```
-This might seem overly strict or puristic, but we believe that it can help to catch errors,
-e.g. unintended contractions. In particular, contracting two indices both living in `ℂ^n`
-would represent an operation that is not invariant under arbitrary unitary basis changes.
+This might seem overly strict or puristic, but we believe that it can help to catch errors, e.g. unintended contractions.
+In particular, contracting two indices both living in `ℂ^n` would represent an operation that is not invariant under arbitrary unitary basis changes.
-It also makes clear the isomorphism between linear maps `ℂ^n → ℂ^m` and tensors in
-`ℂ^m ⊗ (ℂ^n)'`:
+It also makes clear the isomorphism between linear maps `ℂ^n → ℂ^m` and tensors in `ℂ^m ⊗ (ℂ^n)'`:
```@repl tutorial
m = randn(ComplexF64, ℂ^3, ℂ^4)
@@ -287,26 +249,19 @@ space(m, 1)
space(m, 2)
```
-Hence, spaces become their corresponding dual space if they are 'permuted' from the domain
-to the codomain or vice versa. Also, spaces in the domain are reported as their dual when
-probing them with `space(A, i)`. Generalizing matrix-vector and matrix-matrix multiplication
-to arbitrary tensor contractions require that the two indices to be contracted have spaces
-which are each others dual. Knowing this, all the other functionality of tensors with
-`CartesianSpace` indices remains the same for tensors with `ComplexSpace` indices.
+Hence, spaces become their corresponding dual space if they are 'permuted' from the domain to the codomain or vice versa.
+Also, spaces in the domain are reported as their dual when probing them with `space(A, i)`.
+Generalizing matrix-vector and matrix-matrix multiplication to arbitrary tensor contractions require that the two indices to be contracted have spaces which are each others dual.
+Knowing this, all the other functionality of tensors with `CartesianSpace` indices remains the same for tensors with `ComplexSpace` indices.
## [Symmetries](@id ss_tutorial_symmetries)
-So far, the functionality that we have illustrated seems to be just a convenient (or
-inconvenient?) wrapper around dense multidimensional arrays, e.g. Julia's Base `Array`.
-More power becomes visible when involving symmetries. With symmetries, we imply that there
-is some symmetry action defined on every vector space associated with each of the indices
-of a `TensorMap`, and the `TensorMap` is then required to be equivariant, i.e. it acts as
-an intertwiner between the tensor product representation on the domain and that on the
-codomain. By Schur's lemma, this means that the tensor is block diagonal in some basis
-corresponding to the irreducible representations that can be coupled to by combining the
-different representations on the different spaces in the domain or codomain. For Abelian
-symmetries, this does not require a basis change and it just imposes that the tensor has
-some block sparsity. Let's clarify all of this with some examples.
+So far, the functionality that we have illustrated seems to be just a convenient (or inconvenient?) wrapper around dense multidimensional arrays, e.g. Julia's Base `Array`.
+More power becomes visible when involving symmetries.
+With symmetries, we imply that there is some symmetry action defined on every vector space associated with each of the indices of a `TensorMap`, and the `TensorMap` is then required to be equivariant, i.e. it acts as an intertwiner between the tensor product representation on the domain and that on the codomain.
+By Schur's lemma, this means that the tensor is block diagonal in some basis corresponding to the irreducible representations that can be coupled to by combining the different representations on the different spaces in the domain or codomain.
+For Abelian symmetries, this does not require a basis change and it just imposes that the tensor has some block sparsity.
+Let's clarify all of this with some examples.
We start with a simple ``ℤ₂`` symmetry:
@@ -319,21 +274,17 @@ A = randn(V1 * V1 * V2')
convert(Array, A)
```
-Here, we create a 5-dimensional space `V1`, which has a three-dimensional subspace
-associated with charge 0 (the trivial irrep of ``ℤ₂``) and a two-dimensional subspace with
-charge 1 (the non-trivial irrep). Similar for `V2`, where both subspaces are one-
-dimensional. Representing the tensor as a dense `Array`, we see that it is zero in those
-regions where the charges don't add to zero (modulo 2). Of course, the `Tensor(Map)` type
-in TensorKit.jl won't store these zero blocks, and only stores the non-zero information,
-which we can recognize also in the full `Array` representation.
+Here, we create a 5-dimensional space `V1`, which has a three-dimensional subspace associated with charge 0 (the trivial irrep of ``ℤ₂``) and a two-dimensional subspace with charge 1 (the non-trivial irrep).
+Similar for `V2`, where both subspaces are one-dimensional.
+Representing the tensor as a dense `Array`, we see that it is zero in those regions where the charges don't add to zero (modulo 2).
+Of course, the `Tensor(Map)` type in TensorKit.jl won't store these zero blocks, and only stores the non-zero information, which we can recognize also in the full `Array` representation.
-From there on, the resulting tensors support all of the same operations as the ones we
-encountered in the previous examples.
+From there on, the resulting tensors support all of the same operations as the ones we encountered in the previous examples.
```@repl tutorial
B = randn(V1' * V1 * V2);
-@tensor C[a,b] := A[a,c,d] * B[c,b,d]
-U, S, V = tsvd(A, ((1, 3), (2,)));
+@tensor C[a, b] := A[a, c, d] * B[c, b, d]
+U, S, V = svd_compact(permute(A, ((1, 3), (2,))));
U' * U # should be the identity on the corresponding domain = codomain
U' * U ≈ one(U'*U)
P = U * U' # should be a projector
@@ -343,66 +294,56 @@ P * P ≈ P
We also support other abelian symmetries, e.g.
```@repl tutorial
-V = U₁Space(0=>2, 1=>1, -1=>1)
+V = U₁Space(0 => 2, 1 => 1, -1 => 1)
dim(V)
A = randn(V * V, V)
dim(A)
convert(Array, A)
-V = Rep[U₁×ℤ₂]((0, 0) => 2, (1, 1) => 1, (-1, 0) => 1)
+V = Rep[U₁ × ℤ₂]((0, 0) => 2, (1, 1) => 1, (-1, 0) => 1)
dim(V)
A = randn(V * V, V)
dim(A)
convert(Array, A)
```
-Here, the `dim` of a `TensorMap` returns the number of linearly independent components, i.e.
-the number of non-zero entries in the case of an abelian symmetry. Also note that we can use
-`×` (obtained as `\times+TAB`) to combine different symmetry groups. The general space
-associated with symmetries is a `GradedSpace`, which is parametrized to the type of
-symmetry. For a group `G`, the fully specified type can be obtained as `Rep[G]`, while for
-more general sectortypes `I` it can be constructed as `Vect[I]`. Furthermore, `ℤ₂Space`
-(also `Z2Space` as non-Unicode alternative) and `U₁Space` (or `U1Space`) are just convenient
-synonyms, e.g.
+Here, the `dim` of a `TensorMap` returns the number of linearly independent components, i.e. the number of non-zero entries in the case of an abelian symmetry.
+Also note that we can use `×` (obtained as `\times+TAB`) to combine different symmetry groups.
+The general space associated with symmetries is a `GradedSpace`, which is parametrized to the type of symmetry.
+For a group `G`, the fully specified type can be obtained as `Rep[G]`, while for more general sectortypes `I` it can be constructed as `Vect[I]`.
+Furthermore, `ℤ₂Space` (also `Z2Space` as non-Unicode alternative) and `U₁Space` (or `U1Space`) are just convenient synonyms, e.g.
```@repl tutorial
-Rep[U₁](0=>3, 1=>2, -1=>1) == U1Space(-1=>1, 1=>2, 0=>3)
-V = U₁Space(1=>2, 0=>3, -1=>1)
+Rep[U₁](0 => 3, 1 => 2, -1 => 1) == U1Space(-1 => 1, 1 => 2, 0 => 3)
+V = U₁Space(1 => 2, 0 => 3, -1 => 1)
for s in sectors(V)
@show s, dim(V, s)
end
-U₁Space(-1=>1, 0=>3, 1=>2) == GradedSpace(Irrep[U₁](1)=>2, Irrep[U₁](0)=>3, Irrep[U₁](-1)=>1)
+U₁Space(-1 => 1, 0 => 3, 1 => 2) == GradedSpace(Irrep[U₁](1) => 2, Irrep[U₁](0) => 3, Irrep[U₁](-1) => 1)
supertype(GradedSpace)
```
-Note that `GradedSpace` is not immediately parameterized by some group `G`, but actually by
-the set of irreducible representations of `G`, denoted as `Irrep[G]`. Indeed, `GradedSpace`
-also supports a grading that is derived from the fusion ring of a (unitary) pre-fusion
-category. Note furthermore that the order in which the charges and their corresponding
-subspace dimensionality are specified is irrelevant, and that the charges, henceforth called
-sectors (which is a more general name for charges or quantum numbers) are of a specific
-type, in this case `Irrep[U₁] == U1Irrep`. However, the `Vect[I]` constructor automatically
-converts the keys in the list of `Pair`s it receives to the correct type. Alternatively, we
-can directly create the sectors of the correct type and use the generic `GradedSpace`
-constructor. We can probe the subspace dimension of a certain sector `s` in a space `V` with
-`dim(V, s)`. Finally, note that `GradedSpace` still has the standard Euclidean inner product
-and we assume all representations to be unitary.
-
-TensorKit.jl also allows for non-abelian symmetries such as `SU₂`. In this case, the vector
-space is characterized via the spin quantum number (i.e. the irrep label of `SU₂`) for each
-of its subspaces, and is created using `SU₂Space` (or `SU2Space` or `Rep[SU₂]` or
-`Vect[Irrep[SU₂]]`)
+Note that `GradedSpace` is not immediately parameterized by some group `G`, but actually by the set of irreducible representations of `G`, denoted as `Irrep[G]`.
+Indeed, `GradedSpace` also supports a grading that is derived from the fusion ring of a (unitary) pre-fusion category.
+Note furthermore that the order in which the charges and their corresponding subspace dimensionality are specified is irrelevant, and that the charges, henceforth called sectors (which is a more general name for charges or quantum numbers) are of a specific type, in this case `Irrep[U₁] == U1Irrep`.
+However, the `Vect[I]` constructor automatically converts the keys in the list of `Pair`s it receives to the correct type.
+Alternatively, we can directly create the sectors of the correct type and use the generic `GradedSpace` constructor.
+We can probe the subspace dimension of a certain sector `s` in a space `V` with `dim(V, s)`.
+Finally, note that `GradedSpace` still has the standard Euclidean inner product and we assume all representations to be unitary.
+
+TensorKit.jl also allows for non-abelian symmetries such as `SU₂`.
+In this case, the vector space is characterized via the spin quantum number (i.e. the irrep label of `SU₂`) for each of its subspaces, and is created using `SU₂Space` (or `SU2Space` or `Rep[SU₂]` or `Vect[Irrep[SU₂]]`)
```@repl tutorial
-V = SU₂Space(0=>2, 1/2=>1, 1=>1)
+V = SU₂Space(0 => 2, 1/2 => 1, 1 => 1)
dim(V)
-V == Vect[Irrep[SU₂]](0=>2, 1=>1, 1//2=>1)
+V == Vect[Irrep[SU₂]](0 => 2, 1 => 1, 1 // 2 => 1)
```
-Note that now `V` has a two-dimensional subspace with spin zero, and two one-dimensional
-subspaces with spin 1/2 and spin 1. However, a subspace with spin `j` has an additional
-`2j + 1` dimensional degeneracy on which the irreducible representation acts. This brings
-the total dimension to `2*1 + 1*2 + 1*3`. Creating a tensor with `SU₂` symmetry yields
+Note that now `V` has a two-dimensional subspace with spin zero, and two one-dimensional subspaces with spin 1/2 and spin 1.
+However, a subspace with spin `j` has an additional `2j + 1` dimensional degeneracy on which the irreducible representation acts.
+This brings the total dimension to `2*1 + 1*2 + 1*3`.
+Creating a tensor with `SU₂` symmetry yields
```@repl tutorial
A = randn(V * V, V)
@@ -411,27 +352,16 @@ convert(Array, A)
norm(A) ≈ norm(convert(Array, A))
```
-In this case, the full `Array` representation of the tensor has again many zeros, but it is
-less obvious to recognize the dense blocks, as there are additional zeros and the numbers
-in the original tensor data do not match with those in the `Array`. The reason is of course
-that the original tensor data now needs to be transformed with a construction known as
-fusion trees, which are made up out of the Clebsch-Gordan coefficients of the group.
-Indeed, note that the non-zero blocks are also no longer labeled by a list of sectors, but
-by pairs of fusion trees. This will be explained further in the manual. However, the
-Clebsch-Gordan coefficients of the group are only needed to actually convert a tensor to an
-`Array`. For working with tensors with `SU₂Space` indices, e.g. contracting or factorizing
-them, the Clebsch-Gordan coefficients are never needed explicitly. Instead, recoupling
-relations are used to symbolically manipulate the basis of fusion trees, and this only
-requires what is known as the topological data of the group (or its representation theory).
-
-In fact, this formalism extends beyond the case of group representations on vector spaces,
-and can also deal with super vector spaces (to describe fermions) and more general
-(unitary) fusion categories. Support for all of these generalizations is present in
-TensorKit.jl. Indeed, all of these concepts will be explained throughout the remainder of
-this manual, including several details regarding their implementation. However, to just use
-tensors and their manipulations (contractions, factorizations, ...) in higher level
-algorithms (e.g. tensoer network algorithms), one does not need to know or understand most
-of these details, and one can immediately refer to the general interface of the `TensorMap`
-type, discussed on the [last page](@ref s_tensors). Adhering to this interface should yield
-code and algorithms that are oblivious to the underlying symmetries and can thus work with
-arbitrary symmetric tensors.
+In this case, the full `Array` representation of the tensor has again many zeros, but it is less obvious to recognize the dense blocks, as there are additional zeros and the numbers in the original tensor data do not match with those in the `Array`.
+The reason is of course that the original tensor data now needs to be transformed with a construction known as fusion trees, which are made up out of the Clebsch-Gordan coefficients of the group.
+Indeed, note that the non-zero subblocks are also no longer labeled by a list of sectors, but by pairs of fusion trees.
+This will be explained further in the manual.
+However, the Clebsch-Gordan coefficients of the group are only needed to actually convert a tensor to an `Array`.
+For working with tensors with `SU₂Space` indices, e.g. contracting or factorizing them, the Clebsch-Gordan coefficients are never needed explicitly.
+Instead, recoupling relations are used to symbolically manipulate the basis of fusion trees, and this only requires what is known as the topological data of the group (or its representation theory).
+
+In fact, this formalism extends beyond the case of group representations on vector spaces, and can also deal with super vector spaces (to describe fermions) and more general (unitary) fusion categories.
+Support for all of these generalizations is present in TensorKit.jl.
+Indeed, all of these concepts will be explained throughout the remainder of this manual, including several details regarding their implementation.
+However, to just use tensors and their manipulations (contractions, factorizations, ...) in higher level algorithms (e.g. tensoer network algorithms), one does not need to know or understand most of these details, and one can immediately refer to the general interface of the `TensorMap` type, discussed on the [last page](@ref s_tensors).
+Adhering to this interface should yield code and algorithms that are oblivious to the underlying symmetries and can thus work with arbitrary symmetric tensors.
diff --git a/src/tensors/braidingtensor.jl b/src/tensors/braidingtensor.jl
index 97fb2fbba..0070bc2d4 100644
--- a/src/tensors/braidingtensor.jl
+++ b/src/tensors/braidingtensor.jl
@@ -346,10 +346,10 @@ function planartrace!(
return planartrace!(C, TensorMap(A), p, q, α, β, backend, allocator)
end
-# function planarcontract!(C::AbstractTensorMap{S,N₁,N₂},
+# function planarcontract!(C::AbstractTensorMap{<:Any,S,N₁,N₂},
# A::BraidingTensor{S},
# (oindA, cindA)::Index2Tuple{0,4},
-# B::AbstractTensorMap{S},
+# B::AbstractTensorMap{<:Any,S},
# (cindB, oindB)::Index2Tuple{4,<:Any},
# (p1, p2)::Index2Tuple{N₁,N₂},
# α::Number, β::Number,
@@ -414,8 +414,8 @@ end
# end
# return C
# end
-# function planarcontract!(C::AbstractTensorMap{S,N₁,N₂},
-# A::AbstractTensorMap{S},
+# function planarcontract!(C::AbstractTensorMap{<:Any,S,N₁,N₂},
+# A::AbstractTensorMap{<:Any,S},
# (oindA, cindA)::Index2Tuple{0,4},
# B::BraidingTensor{S},
# (cindB, oindB)::Index2Tuple{4,<:Any},
@@ -482,10 +482,10 @@ end
# end
# return C
# end
-# function planarcontract!(C::AbstractTensorMap{S,N₁,N₂},
+# function planarcontract!(C::AbstractTensorMap{<:Any,S,N₁,N₂},
# A::BraidingTensor{S},
# (oindA, cindA)::Index2Tuple{1,3},
-# B::AbstractTensorMap{S},
+# B::AbstractTensorMap{<:Any,S},
# (cindB, oindB)::Index2Tuple{1,<:Any},
# (p1, p2)::Index2Tuple{N₁,N₂},
# α::Number, β::Number,
@@ -544,8 +544,8 @@ end
# end
# return C
# end
-# function planarcontract!(C::AbstractTensorMap{S,N₁,N₂},
-# A::AbstractTensorMap{S},
+# function planarcontract!(C::AbstractTensorMap{<:Any,S,N₁,N₂},
+# A::AbstractTensorMap{<:Any,S},
# (oindA, cindA)::Index2Tuple{<:Any,3},
# B::BraidingTensor{S},
# (cindB, oindB)::Index2Tuple{3,1},
diff --git a/src/tensors/indexmanipulations.jl b/src/tensors/indexmanipulations.jl
index 906ad6379..1f2b7105c 100644
--- a/src/tensors/indexmanipulations.jl
+++ b/src/tensors/indexmanipulations.jl
@@ -219,14 +219,15 @@ function LinearAlgebra.transpose(
end
"""
- repartition!(tdst::AbstractTensorMap{S}, tsrc::AbstractTensorMap{S}) where {S} -> tdst
+ repartition!(tdst::AbstractTensorMap, tsrc::AbstractTensorMap) -> tdst
Write into `tdst` the result of repartitioning the indices of `tsrc`. This is just a special
case of a transposition that only changes the number of in- and outgoing indices.
See [`repartition`](@ref) for creating a new tensor.
"""
-@propagate_inbounds function repartition!(tdst::AbstractTensorMap{S}, tsrc::AbstractTensorMap{S}) where {S}
+@propagate_inbounds function repartition!(tdst::AbstractTensorMap, tsrc::AbstractTensorMap)
+ check_spacetype(tdst, tsrc)
numind(tsrc) == numind(tdst) ||
throw(ArgumentError("tsrc and tdst should have an equal amount of indices"))
all_inds = (codomainind(tsrc)..., reverse(domainind(tsrc))...)
@@ -236,12 +237,12 @@ See [`repartition`](@ref) for creating a new tensor.
end
"""
- repartition(tsrc::AbstractTensorMap{S}, N₁::Int, N₂::Int; copy::Bool=false) where {S}
- -> tdst::AbstractTensorMap{S,N₁,N₂}
+ repartition(
+ tsrc::AbstractTensorMap{T, S}, N₁::Int, N₂::Int; copy::Bool=false
+ ) where {T, S} -> tdst::AbstractTensorMap{T, S, N₁, N₂}
Return tensor `tdst` obtained by repartitioning the indices of `t`.
-The codomain and domain of `tdst` correspond to the first `N₁` and last `N₂` spaces of `t`,
-respectively.
+The codomain and domain of `tdst` correspond to the first `N₁` and last `N₂` spaces of `t`, respectively.
If `copy=false`, `tdst` might share data with `tsrc` whenever possible. Otherwise, a copy is always made.
diff --git a/src/tensors/linalg.jl b/src/tensors/linalg.jl
index 900ee84fe..ac36dee9d 100644
--- a/src/tensors/linalg.jl
+++ b/src/tensors/linalg.jl
@@ -478,7 +478,7 @@ for f in (:sqrt, :log, :asin, :acos, :acosh, :atanh, :acoth)
end
# concatenate tensors
-function catdomain(t1::TT, t2::TT) where {S, N₁, TT <: AbstractTensorMap{<:Any, S, N₁, 1}}
+function catdomain(t1::AbstractTensorMap{<:Any, S, N₁, 1}, t2::AbstractTensorMap{<:Any, S, N₁, 1}) where {S, N₁}
codomain(t1) == codomain(t2) ||
throw(
SpaceMismatch("codomains of tensors to concatenate must match:\n$(codomain(t1)) ≠ $(codomain(t2))")
@@ -497,7 +497,7 @@ function catdomain(t1::TT, t2::TT) where {S, N₁, TT <: AbstractTensorMap{<:Any
end
return t
end
-function catcodomain(t1::TT, t2::TT) where {S, N₂, TT <: AbstractTensorMap{<:Any, S, 1, N₂}}
+function catcodomain(t1::AbstractTensorMap{<:Any, S, 1, N₂}, t2::AbstractTensorMap{<:Any, S, 1, N₂}) where {S, N₂}
domain(t1) == domain(t2) ||
throw(SpaceMismatch("domains of tensors to concatenate must match:\n$(domain(t1)) ≠ $(domain(t2))"))
V1, = codomain(t1)