#
#启程 pytorch

#Tensor运算，相当于传统编程中的 a=1,b =2,然后 加 减  乘  除......

import torch
import numpy as np

#list
data = [[1,2],[3,4],[5,6]]
print(data)
data_tensor = torch.Tensor(data)
print(data_tensor)
'''输出：
[[1, 2], [3, 4], [5, 6]]
tensor([[1., 2.],
        [3., 4.],
        [5., 6.]])
'''
print("(1)"+"*"*80)
#numpy
np_array = np.array([1,2,3,4,5,6])
print(np_array)
#tensor
np_array_tensor = torch.tensor(np_array)
print(np_array_tensor)
'''输出
[1 2 3 4 5 6]
tensor([1, 2, 3, 4, 5, 6])
'''
print("(2)"+"*"*80)
#tensor to  numpy
data_tensor = torch.ones(2,3) #2,3为 维度
print(data_tensor)
data_np_array = np.array(data_tensor)
print(data_np_array)
'''输出
tensor([[1., 1., 1.],
        [1., 1., 1.]])
[[1. 1. 1.]
 [1. 1. 1.]]
'''


print("(3)"+"*"*80)
shape = (3,3)
rand_tensor = torch.rand(shape) #rand输出为0-1之间
print(rand_tensor)
'''输出
tensor([[0.8549, 0.3752, 0.3489],
        [0.5750, 0.0738, 0.6826],
        [0.3099, 0.8349, 0.5644]])
'''

print("(4)"+"*"*80)
randn_tensor = torch.randn(shape)#randn输出为-1到1之间，之和为0
print(randn_tensor)
ones_tensor = torch.ones(shape)#输出全为1的张量
print(ones_tensor)
zeros_tensor = torch.zeros(shape)#输出全为0的张量
print(zeros_tensor)
'''输出
tensor([[-1.0040,  0.3871, -0.5658],
        [ 0.3008,  0.2571, -0.4030],
        [ 1.1579, -1.0687,  0.0387]])
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]])
tensor([[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]])
'''
print("(5)"+"*"*80)
print(data_tensor.shape)
print(data_tensor.size())
print(data_tensor.device)
print(data_tensor.dtype)
print(data_tensor.requires_grad)
print(data_tensor.data[0])
'''
torch.Size([2, 3])
torch.Size([2, 3])
cpu
torch.float32
False
tensor([1., 1., 1.])
'''
print("(6)"+"*"*80)
#tensor拼接，横拼，竖拼
a = torch.tensor([[1., 1., 1.],[2.,2.,2.]])
b = torch.tensor([[1., 1., 1.],[3.,3.,3.]])
print(a)
print(b)
c = torch.cat([a,b],dim=0)
print(c)
c = torch.cat([a,b],dim=1)
print(c)
print("(7)"+"*"*80)

torch.manual_seed(2024) #设置种子保证随机结果一样
d = torch.rand([2,3])
print(d)
e = d.T #转置，大白话就是 行列互换
print(e)
e = d@e
print(e)





































































































