import numpy as np
import torch
from torch import autograd
import cv2
from tensorboardX import SummaryWriter


def test1():
    t = torch.Tensor([1, 2, 3])
    print(t)
    arr = np.arange(12).reshape(3, 4)
    t2 = torch.Tensor(arr)
    print(t2)
    t3 = torch.randint(low=1, high=10, size=[5, 6])
    print(t3)


def test2():
    t = torch.Tensor([[[1, 2, 7]]])
    print(t.size())
    print(t.size(1))
    print(t.size(-1))
    # 形状修改，要求乘积不变，但是不准
    print(t.view(3))
    # 形状修改
    print(t.reshape(3))
    # 形状修改，交换数值
    print(t.permute(1, 0, 2))
    # 获取维度
    print(t.dim())
    # 获取最值
    print(t.max())
    # 获取方差
    print(t.std())
    # 转换成numpy
    nt = t.numpy()
    print(nt)


def test3():
    print(torch.Tensor(1, 2))  # 创建一行两列形状
    print(torch.FloatTensor(1))

    x = torch.rand(5, 4)
    y = torch.rand(5, 4)
    print(x)
    print(x.add(y))  # 不会改变x
    print(x)
    print(x.add_(y))  # 会改变x
    print(x)


# 自动求导
def test4():
    x = torch.tensor(1.)
    a = torch.tensor(1., requires_grad=True)
    b = torch.tensor(2., requires_grad=True)
    c = torch.tensor(3., requires_grad=True)

    y = a ** 2 * x + b * x + c

    print("before:\n", a.grad, b.grad, c.grad)
    grads = autograd.grad(y, [a, b, c])
    print("after:\n", grads[0], grads[1], grads[2])


# 使用GPU
def test5():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    x = torch.zeros(2, 3, device=device)
    print(x)


def test6():
    # requires_grad 记录对该张量的所有操作，用于追踪计算历史
    x = torch.ones(2, 2, requires_grad=True)
    print(x)

    y = x + 2
    print(y)  # 记录在grad_fn中
    print(y.grad_fn)

    # requires_grad为true的张量对象转换为numpy要先用detach抽取数据，不能直接转换了
    print(x.detach().numpy())


def test7():
    a = torch.arange(0, 11, 3)
    print(a)
    b = torch.linspace(2, 10, 4)  # 创建等差序列
    print(b)
    c = torch.randperm(10)  # 乱序
    print(c)
    # 稀疏张量：含有0越多越稀疏
    i = torch.tensor([[0, 1, 2], [0, 1, 2]])  # (0,0)(1,1)(2,2)这三个点的数值为1，2，3
    v = torch.tensor([1, 2, 3])
    d = torch.sparse_coo_tensor(i, v, (3, 3))
    print(d.to_dense())


def test8():
    a = torch.rand(2, 3)
    b = torch.rand(2, 3)
    print(a)
    print(b)
    print(a + b)
    print(a - b)
    print(a * b)
    print(a / b)
    print(torch.add(a, b))
    print(torch.sub(a, b))
    print(torch.mul(a, b))
    print(torch.div(a, b))

    print("*" * 200)
    a = torch.rand(3, 2)
    b = torch.rand(2, 3)
    print(a)
    print(b)
    print(torch.matmul(a, b))
    print(torch.mm(a, b))

    print("*" * 200)
    # 高纬度，最后两位保持对应
    a = torch.ones(1, 2, 3, 4)
    b = torch.ones(1, 2, 4, 3)
    print(torch.matmul(a, b))

    print("*" * 200)
    # 指数运算
    a = torch.tensor([1, 2])
    print(torch.pow(a, 3))
    print(a ** 3)
    print(torch.exp(a))  # e的a次方
    print(torch.log(a))
    print(torch.log2(a))
    print(torch.log10(a))
    print(torch.sqrt(a))


def test9():
    a = torch.rand(2, 1, 1, 3)
    b = torch.rand(4, 2, 3)
    # 广播机制，最终变为2，4，2，3
    # 但是有一个条件，就是右对齐的时候，两个tensor对应位置的一对数值中，要不相同要不其中一个是1
    c = a + b
    print(c.shape)
    c = a - b
    print(c.shape)
    c = a * b
    print(c.shape)
    c = a / b
    print(c.shape)


def test10():
    a = torch.rand(2, 3) * 10
    print(a)
    print("*" * 100)
    print(torch.floor(a))  # 向下取整
    print("*" * 100)
    print(torch.ceil(a))  # 向上取整
    print("*" * 100)
    print(torch.round(a))  # 四舍五入
    print("*" * 100)
    print(torch.trunc(a))  # 取整
    print("*" * 100)
    print(torch.frac(a))  # 取小数
    print("*" * 100)
    print(a % 2)  # 取余数


def test11():
    a = torch.rand(2, 3)
    b = torch.rand(2, 3)
    print(torch.eq(a, b))  # 每个值进行比较
    print(torch.equal(a, b))  # 整体进行比较，全相等才为true
    print(torch.ge(a, b))  # >=
    print(torch.gt(a, b))  # >
    print(torch.le(a, b))  # <=
    print(torch.lt(a, b))  # <
    print(torch.ne(a, b))  # !=


def test12():
    a = torch.Tensor([[1, 7, 3, 8], [9, 1, 5, 4]])
    print(torch.sort(a, dim=0, descending=False))  # 对第1维度进行排序，可以想象成一个魔方，把每一列的最小值拧到了最上面，所以第2列7下去了1上来了
    print("*" * 100)
    print(torch.sort(a, dim=1, descending=False))  # dim=1和-1效果一样，都是对所有的维度进行排序了
    print("*" * 100)
    print(torch.sort(a, dim=-1, descending=False))


def test13():
    # 通过dim我理解了dim=-1代表着最小粒度
    a = torch.Tensor([[1, 7, 3, 8], [9, 1, 5, 4]])
    print(a.shape)
    print(torch.topk(a, k=2))  # 前两个最大值
    print("*" * 100)
    print(torch.topk(a, k=2, dim=0))  # 前两个最大值
    print("*" * 100)
    print(torch.topk(a, k=2, dim=1))  # 前两个最大值
    print("*" * 100)
    print(torch.kthvalue(a, k=2, dim=1))  # 这里的k代表着第二小，不是前两小


def test14():
    a = torch.rand(2, 3)
    print(torch.isfinite(a))  # 是否有界
    print(torch.isfinite(a / 0))
    print(torch.isinf(a))  # 是否无界，inf就是最大值，-inf就是最小值，出现这两个代表无界
    print(torch.isinf(a / 0))  # 是否无界，inf就是最大值，-inf就是最小值，出现这两个代表无界

    a = torch.tensor([2, 3, np.nan])
    print(torch.isnan(a))  # 是否有none


def test15():
    a = torch.rand(2, 3)
    print(torch.cos(a))


def test16():
    a = torch.rand(2, 3)
    print(torch.mean(a))
    print(torch.sum(a))
    print(torch.prod(a))  # 求乘积
    print(torch.max(a))
    print(torch.min(a))
    print(torch.std(a))  # 标准差
    print(torch.var(a))  # 方差
    print(torch.median(a))  # 中数
    print(torch.mode(a))  # 众数，最常出现的数
    print("*" * 100)
    print(torch.mean(a, dim=0))
    print(torch.sum(a, dim=0))
    print(torch.prod(a, dim=0))  # 求乘积
    print(torch.max(a, dim=0))
    print(torch.min(a, dim=0))


def test17():
    a = torch.rand(2, 3) * 10
    print(a)
    print(torch.histc(a, 6, 0, 12))  # 直方图：可以想象成一个柱状统计表，0-12分成6块，看a中的值落在这六块中的个数
    print("*" * 100)
    a = torch.randint(0, 10, [10])
    print(a)
    print(torch.bincount(a))  # 统计0-9里每个数字出现的次数


def test18():
    torch.manual_seed(1)  # 有随机种子，只要种子不变，随机的结果就不变
    mean = torch.rand(1, 2)
    std = torch.rand(1, 2)
    print(torch.normal(mean, std))  # 正态分布


def test19():
    a = torch.rand(2, 3)
    b = torch.rand(2, 3)
    # 求ab的l1距离
    ret = torch.dist(a, b, p=1)
    print(ret)
    # 求ab的l2距离
    ret = torch.dist(a, b, p=2)
    print(ret)
    # 1范数
    print(torch.norm(a, p=1))
    # 2范数
    print(torch.norm(a, p=2))
    # 核范数
    print(torch.norm(a, p="fro"))


def test20():
    a = torch.rand(2, 3) * 10
    print(a)
    print(torch.clamp(a, 1, 5))  # 张量裁剪：约束到了1-5，超出的就定义为1或者5


def test21():
    a = torch.rand(2, 3)
    b = torch.rand(2, 3)
    print(torch.where(a > b, a, b))
    a = torch.rand(4, 4)
    print(a)
    print(torch.index_select(a, dim=0, index=torch.tensor([0, 3, 2])))  # dim*index，相当于0维度上的0，3，2这三个索引
    print(torch.index_select(a, dim=1, index=torch.tensor([0, 3, 2])))  # dim*index，相当于1维度上的0，3，2这三个索引

    print("*" * 100)
    a = torch.linspace(1, 16, 16)
    print(a)
    a = a.view(4, 4)
    print(a)
    # dim=0,out[i,j,k]=input[index[i,j,k],j,k]
    # dim=1,out[i,j,k]=input[i,index[i,j,k],k]
    # dim=2,out[i,j,k]=input[i,j,index[i,j,k]]
    # dim是几，index就是在第几维度上迭代
    print(torch.gather(a, dim=0, index=torch.tensor([[0, 1, 1, 1], [0, 1, 2, 2], [0, 1, 3, 3]])))

    print("*" * 100)
    print(torch.masked_select(a, mask=torch.gt(a, 8)))  # 大于8的整成一行

    print("*" * 100)
    print(torch.take(a, index=torch.tensor([0, 15, 13, 10])))  # 大于8的整成一行

    print("*" * 100)
    a = torch.tensor([[0, 1, 2, 0], [2, 3, 0, 1]])  # 输出非0索引
    print(torch.nonzero(a))


def test22():
    a = torch.zeros((2, 4))
    b = torch.ones((2, 4))
    print(torch.cat((a, b), dim=0))
    print(torch.cat((a, b), dim=-1))

    print("*" * 100)

    a = torch.linspace(1, 6, 6).view(2, 3)
    b = torch.linspace(7, 12, 6).view(2, 3)
    print(torch.stack((a, b), dim=0))  # stack相对于cat会保留维度或者增加维度，能看出来数据属于之前哪个张量


def test23():
    a = torch.rand((3, 4))
    print(a)
    print(torch.chunk(a, 2, dim=0))  # 对张量进行了切片，因为dim=0上是3，所以切成了一个2，一个1
    print(torch.chunk(a, 2, dim=1))
    print(torch.split(a, 3, dim=0))

    a = torch.rand((10, 4))
    print(torch.split(a, [1, 3, 6], dim=0))  # 切成1，3，6三种长度


def test24():
    a = torch.rand(2, 3)
    print(torch.reshape(a, (3, 2)))
    print(torch.t(a))
    print("*" * 100)
    a = torch.rand(1, 2, 3)
    print(a)
    print(torch.transpose(a, 1, 2))

    print(torch.squeeze(a, dim=-1))
    print(torch.unbind(a, dim=1))

    print(torch.flip(a, dims=[1, 2]))  # 先第1维度进行翻转，再进行第2维度的翻转
    print(torch.rot90(a, -1, dims=[1, 2]))


def test25():
    # 填充操作
    a = torch.full((2, 3), 3.14)
    print(a)


def test26():
    img = cv2.imread('img.png', 0)
    cv2.imshow('image', img)
    # cv2.waitKey(0)

    out = torch.from_numpy(img)
    print(out)

    out = torch.flip(out, dims=[0])
    data = out.numpy()
    cv2.imshow("test", data)
    cv2.waitKey(0)


class Line(torch.autograd.Function):
    @staticmethod
    def forward(ctx, w, x, b):
        # y=w*x+b
        ctx.save_for_backward(w, x, b)
        return w * x + b

    @staticmethod
    def backward(ctx, grad_out):
        print("*****grad_out*****")
        print(grad_out)
        print("*****grad_out*****")
        w, x, b = ctx.saved_tensors
        grad_w = grad_out * x  # w的导数是x，上一级的梯度乘以w的导数就是当前w的梯度
        grad_x = grad_out * w  # x的导数是w
        grad_b = grad_out * 1  # b的导数是1
        return grad_w, grad_x, grad_b


def test27():
    w = torch.rand(2, 2, requires_grad=True)
    x = torch.rand(2, 2, requires_grad=True)
    b = torch.rand(2, 2, requires_grad=True)
    print(w * x + b)
    out = Line.apply(w, x, b)
    print(out)
    out.backward(torch.ones(2, 2))
    print(w, x, b)
    print(w.grad, x.grad, b.grad)


def test28():
    writer = SummaryWriter("log")
    for i in range(100):
        writer.add_scalar("a", i, global_step=i)
        writer.add_scalar("b", i, global_step=i)
    writer.close()


if __name__ == '__main__':
    # print(torch.__version__)
    test28()
