import torch

x = torch.arange(12)

print(x)
print(x.shape)

X = x.reshape(3, 4)
print(X)
print(X.shape)


X = torch.zeros(2, 3, 4)
print(X)

print(torch.randn(3,4))

print(torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]))


x = torch.tensor([1.0, 2, 4, 8])
y = torch.tensor([2, 2, 2, 2])
print(x + y)
print(x - y)
print(x * y)
print(x / y)
# **运算符是求幂运算
print(x ** y)



print('tensor concatenation')
X = torch.arange(12, dtype=torch.float32).reshape((3,4))
Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])

print(X)
print(Y) 
NX = torch.cat((X, Y), dim=0)
print(NX)
print(NX.shape) # torch.Size([6, 4])
NY = torch.cat((X, Y), dim=1)
print(NY)
print(NY.shape) # torch.Size([3, 8])

print(X == Y)
print(X.sum())


print('broadcasting mechanism')
a = torch.arange(3).reshape((3, 1))
b = torch.arange(2).reshape((1, 2))
print(a)   
print(b) 
print(a + b)
print(a * b)

print('indexing and slicing')
X = torch.arange(12).reshape((3,4))
print(X)
print(X[1])
print(X[-1])
print(X[1, 2])
print(X[1:3, 1:3])
print(X[1:3, 1:3].shape)

print('节省内存')
before = id(Y) 
Y=Y+X
print(id(Y) == before) # False


Z = torch.zeros_like(Y)
print('id(Z):', id(Z))
Z[:] = X + Y
print('id(Z):', id(Z))


X = torch.zeros_like(Y)
before = id(X)
# 可以使用X[:] = X + Y或X += Y来减少操作的内存开销
X += Y
print(id(X) == before)


print('转换为其他Python对象')
A = X.numpy()
B = torch.tensor(A)
print(type(A))
print(type(B))


a = torch.tensor([3.5])
print(a)
print(a.item())
print(a.int())
print(float(a))
print(int(a))