diff --git a/ext/TensorKitCUDAExt/cutensormap.jl b/ext/TensorKitCUDAExt/cutensormap.jl index 3274e654a..f065c2ec1 100644 --- a/ext/TensorKitCUDAExt/cutensormap.jl +++ b/ext/TensorKitCUDAExt/cutensormap.jl @@ -138,6 +138,10 @@ function Base.promote_rule( return CuTensorMap{T, S, N₁, N₂} end +TensorKit.promote_storage_rule(::Type{CuArray{T, N}}, ::Type{<:CuArray{T, N}}) where {T, N} = + CuArray{T, N, CUDA.default_memory} + + # CuTensorMap exponentation: function TensorKit.exp!(t::CuTensorMap) domain(t) == codomain(t) || diff --git a/test/cuda/tensors.jl b/test/cuda/tensors.jl index a31015a4a..0fad13473 100644 --- a/test/cuda/tensors.jl +++ b/test/cuda/tensors.jl @@ -55,6 +55,14 @@ for V in spacelist @test domain(t) == one(W) @test typeof(t) == TensorMap{Float64, spacetype(t), 5, 0, CuVector{Float64, CUDA.DeviceMemory}} end + for f in (Base.ones, Base.zeros) + t = @constinferred f(CuVector{Float64, CUDA.DeviceMemory}, W) + @test scalartype(t) == Float64 + @test codomain(t) == W + @test space(t) == (W ← one(W)) + @test domain(t) == one(W) + @test typeof(t) == TensorMap{Float64, spacetype(t), 5, 0, CuVector{Float64, CUDA.DeviceMemory}} + end for f in (rand, randn) t = @constinferred f(CuVector{Float64, CUDA.DeviceMemory}, W) @test scalartype(t) == Float64 diff --git a/test/tensors/tensors.jl b/test/tensors/tensors.jl index 74cb9cfa0..813c25ea5 100644 --- a/test/tensors/tensors.jl +++ b/test/tensors/tensors.jl @@ -44,6 +44,15 @@ for V in spacelist @test space(t) == (W ← one(W)) @test domain(t) == one(W) @test typeof(t) == TensorMap{T, spacetype(t), 5, 0, Vector{T}} + # Array type input + t = @constinferred zeros(Vector{T}, W) + @test @constinferred(hash(t)) == hash(deepcopy(t)) + @test scalartype(t) == T + @test norm(t) == 0 + @test codomain(t) == W + @test space(t) == (W ← one(W)) + @test domain(t) == one(W) + @test typeof(t) == TensorMap{T, spacetype(t), 5, 0, Vector{T}} # blocks bs = @constinferred blocks(t) if !isempty(blocksectors(t)) # multifusion space ending on module gives empty data