'''参考：https://blog.csdn.net/xholes/article/details/81667211'''
import torch as t
import numpy as np

#创建
print('创建')
a0 = t.Tensor(2,3) #跟empty相同？
print(a0,a0.dtype)
x = np.array([[1,2,3],[4,5,6]])
a1 = t.Tensor(x) #(data, dtype=None, device=None, requires_grad=False)
a2 = t.tensor(x) #Tensor是类（float类型），tensor是函数（类型同x）
a3 = t.from_numpy(x) #与x共享内存
print(a1)
print(a2)
x[0,0] = -1
print(a1)
print(a3)
b1 = t.zeros(3, 2, dtype=t.long)
print(b1)
b2 = t.rand(2,3)
print(b2)
b3 = t.FloatTensor([[1,2,3],[4,5,6]])
print(b3)
print(t.ones(3,3))
print(t.eye(3,3))
"""7种数据类型
t.FloatTensor(2,3)  
t.DoubleTensor(2,3)  
t.ByteTensor(2,3)  
t.CharTensor(2,3)  
t.ShortTensor(2,3)  
t.IntTensor(2,3)  
t.LongTensor(2,3)"""
b4 = t.randn(1)
print(b4.item()) #1*1的用item取出值
y = a1.numpy()

#属性
print('属性')
print(a1.size(), a1.dtype, t.numel(a1))

#运算
print('运算')
c1 = a1 + b2 #c1 = t.add(a1,b2)
c2 = a1.clone()
c2.add_(b2) #c2 += b2
print(c2)
print(c2.t()) #只针对2D tensor转置
a = t.randn(7,)
b = t.randn(7,8)
print(t.matmul(a,b).shape) #(8)
a = t.randn(7,8)
b = t.randn(8,)
print(t.matmul(a,b).shape) #(7)
a = t.randn(3,4,5,6)
b = t.randn(1,4,6,7)
print(t.matmul(a,b).shape) #(3,4,5,7), a的最后2个维度跟b的最后2个维度能按矩阵乘法运算，前面的维度能广播
print(t.norm(b2,2,0)) #计算0维上的2范数

#操作
print('操作')
d1 = t.arange(0,12).view(3,4)
print(d1)
d2 = t.unsqueeze(a1,0)
print(d2.size())
d3 = t.squeeze(d2) #将输入张量形状中的1 去除并返回
print(d3.size())
d4 = t.cat([a1,b2],0) #在给定维度上对输入的张量序列seq 进行连接操作。
print(d4)
d5 = t.stack([d1,d1],2) #在给定维度上对输入的张量序列seq 进行连接操作。
print(d5.size())
print(d5.transpose(0,2).size()) #交换维度
print(d1.topk(2,largest=True)) #沿给定dim维度(默认最后一维)返回输入张量input中k个最大值。返回value和indices

#GPU
print('GPU')
device = t.device("cuda" if t.cuda.is_available() else "cpu")
print(device)
e1 = a1.to(device)
e2 = a1.cuda()
e3 = a1.clone().cuda(1)
print(e1.get_device(), e2.get_device(), e3.get_device())
