# _*_coding:utf-8_*_
#05 线性代数
#按特定轴求和
#利用一个规律 按哪个轴求和就去掉shape中对应的那一个维度
#若要 keep dimension  True 那么若是按哪个轴求和 就把对应的那个轴的shape改为1
#axis 为轴
import torch
a = torch.ones([2,5,4])
x = a.sum()
print(x)
# tensor(40.)
x0 = a.shape    #求shape 不加=() 直接 a。shape
print(x0)
x1 = a.sum(axis=0)
print(x1.shape)
# torch.Size([5, 4])
x2 = a.sum(axis=1)
print(x2.shape)
# torch.Size([2, 4])
x3 = a.sum(axis=2)
print(x3.shape)
# torch.Size([2, 5])
x4 = a.sum([0,2]).shape
print(x4)
# torch.Size([5])

x5 = a.sum(axis=1,keepdims=True)
print(x5.shape)
# torch.Size([2, 1, 4])

x6 = a.sum(axis=[0,2],keepdims=True)
print(x6.shape)
# torch.Size([1, 5, 1])

#线性代数
#标量：
x = torch.tensor([3.0])
y = torch.tensor([5.0])
print(x/y)
# tensor([0.6000])
#你可以将标量视为标量值的列表
x = torch.arange(4)
print(x)
# tensor([0, 1, 2, 3])

#你可以通过张量的索引来访问任意元素
b = x[3]
print(b)
# tensor(3)

#访问张量的长度
b1 = len(x)
print(b1)
# 4

#只有一个轴的 axis的张量，形状只有一个元素
b2 = x.shape
print(b2)
# torch.Size([4])

#通过指定两个分量m和n来创建一个形状为m*n 的矩阵
a = torch.arange(20).reshape(5,4)
print(a)
'''
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11],
        [12, 13, 14, 15],
        [16, 17, 18, 19]])
'''

#矩阵的转置
b = a.T
print(b)
'''
tensor([[ 0,  4,  8, 12, 16],
        [ 1,  5,  9, 13, 17],
        [ 2,  6, 10, 14, 18],
        [ 3,  7, 11, 15, 19]])
'''

#对称矩阵，矩阵a等于矩阵a的转置
#验证
a = torch.tensor([[1,2,3],[2,0,4],[3,4,5]])
b = a==a.T
print(b)
'''
tensor([[True, True, True],
        [True, True, True],
        [True, True, True]])
'''

#向量是标量的推广 矩阵是向量的推广 我们可以按轴来划分数据结构
x = torch.arange(24).reshape(2,3,4)
print(x)
'''
tensor([[[ 0,  1,  2,  3],
         [ 4,  5,  6,  7],
         [ 8,  9, 10, 11]],

        [[12, 13, 14, 15],
         [16, 17, 18, 19],
         [20, 21, 22, 23]]])
'''

a = torch.arange(20,dtype=torch.float32).reshape(5,4)
b = a.clone()
print(a)
print(b)
print(a+b)
'''
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  5.,  6.,  7.],
        [ 8.,  9., 10., 11.],
        [12., 13., 14., 15.],
        [16., 17., 18., 19.]])
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  5.,  6.,  7.],
        [ 8.,  9., 10., 11.],
        [12., 13., 14., 15.],
        [16., 17., 18., 19.]])
tensor([[ 0.,  2.,  4.,  6.],
        [ 8., 10., 12., 14.],
        [16., 18., 20., 22.],
        [24., 26., 28., 30.],
        [32., 34., 36., 38.]])

'''
#在数学中极为不常用的乘法 两个矩阵按元素相乘
c = a*b
print(c)

#矩阵加法 每个元素都加
#矩阵的数量积 每个元素都乘‘
x = 2
a = torch.arange(24,dtype=torch.float32).reshape(2,3,4)

print(a)
print(a*x)
print(a+x)

#求和
print(a.sum())
print(a.shape)

#求平均
b = a.mean()
print(b)
c = a.sum()/a.numel()    #a的和除以a的元素的个数
print(c)
# tensor(11.5000)
# tensor(11.5000)

#计算平均和求和时保持维度这个信息 不变 keepdims=True
x = a.sum(axis=0,keepdims=True)
print(x)
a_mean = x/a.numel()        #求平均
print(a_mean)
'''
tensor([[[12., 14., 16., 18.],
         [20., 22., 24., 26.],
         [28., 30., 32., 34.]]])
tensor([[[0.5000, 0.5833, 0.6667, 0.7500],
         [0.8333, 0.9167, 1.0000, 1.0833],
         [1.1667, 1.2500, 1.3333, 1.4167]]])
'''

#向量相乘 求点积
y = torch.ones(4,dtype=torch.float32)
x = torch.arange(4,dtype=torch.float32)
print(y*x)    #这是按照矩阵相乘的形式搞得向量相乘
print(torch.dot(x,y))   #这是点积
# tensor([0., 1., 2., 3.])
# tensor(6.)

#矩阵和向量的乘积
a = torch.arange(20,dtype=torch.int).reshape(5,4)
b = torch.ones(4,dtype=torch.int)
print(b)
# tensor([1., 1., 1., 1.])
print(a*b)  #5,4;1,4
print(b*a)  #1,4;5,4
'''
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  5.,  6.,  7.],
        [ 8.,  9., 10., 11.],
        [12., 13., 14., 15.],
        [16., 17., 18., 19.]])
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  5.,  6.,  7.],
        [ 8.,  9., 10., 11.],
        [12., 13., 14., 15.],
        [16., 17., 18., 19.]])
'''
#还是按矩阵相乘做的
c = torch.mv(a,b)
print(c)
# RuntimeError: expected scalar type Long but found Float
# tensor([ 6, 22, 38, 54, 70], dtype=torch.int32)   dtype=int

#矩阵乘法
a = torch.ones(4,3)
b = torch.ones(5,4)
# c = torch.mm(a,b)
# print(c)
# RuntimeError: mat1 and mat2 shapes cannot be multiplied (4x3 and 5x4)

d = torch.mm(b,a)
print(d)
print(d.shape)
'''
tensor([[4., 4., 4.],
        [4., 4., 4.],
        [4., 4., 4.],
        [4., 4., 4.],
        [4., 4., 4.]])
torch.Size([5, 3])
'''

#l2范数，向量元素平方和的平方根
a = torch.tensor([3.0,-4.0])
b = torch.norm(a)
print(b)
# tensor(5.)   norm 范数

#l1范数 表示向量元素的绝对值之和
c = torch.abs(a).sum()
print(c)
# tensor(7.)

#矩阵的范数 最常用的是 f范数 也就是矩阵元素的平方和的平方根
x = torch.norm(torch.ones(4,9))
print(x)
# tensor(6.)
