import gzip
import gxl_ai_utils
import numpy as np
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn
from ai.utils import utils_model


def do_test_lstm():
    """
    RNN基本机理:
        1. input: [batch_size, seq_len, input_size]
        2. output: [batch_size, seq_len, hidden_size]
        计算公式:
        h_t = x_t * w_xt + h_{t-1} * w_ht + b
    """
    model = nn.LSTM(input_size=32,
                    hidden_size=56,
                    batch_first=True)
    input = torch.randn(123, 10, 32)
    x_lens = torch.randint(1, 11, (123,))
    init_h = torch.randn(1, 123, 56)
    init_c = torch.randn(1, 123, 56)
    print(x_lens)
    input = rnn.pack_padded_sequence(input, batch_first=True, lengths=x_lens, enforce_sorted=False)
    output, hidden = model(input, (init_h, init_c), )
    output, x_lens = rnn.pad_packed_sequence(output, batch_first=True)
    print(output.shape)


def do_test_torch_concat():
    """"""
    n = 4
    data_list = [torch.randn(1, 10) for _ in range(n)]
    c = torch.cat(data_list, dim=0)
    print(c.shape)
    d = torch.concat(data_list, dim=0)
    print(d.shape)


def do_test_torch_split():
    """
    torch.split()的第二个参数是指每一块的size
    """
    input = torch.randn(4, 123, 1024)
    output = torch.split(input, 1, dim=0)
    print(len(output))


def do_test_cnn():
    for i in range(10):
        input_dim = i
        output_dim = utils_model.get_output_dim_for_conv(input_dim, 3, 2)
        output_dim1 = utils_model.get_output_dim_for_conv(output_dim, 3, 2)
        print(output_dim1)


def do_crossEntropyLoss():
    crossEntropyLoss = nn.CrossEntropyLoss()
    nllloss = nn.NLLLoss(reduction="none")
    input = torch.tensor([
        [1, 200, 3, 42],
        [1, 200, 3, 42],
        [1, 200, 3, 42],
    ], dtype=torch.float32)
    label = torch.tensor([3, 3, 3])
    label2 = torch.tensor([0, 0, 0])
    print(crossEntropyLoss(input, label))
    print(crossEntropyLoss(input, label2))
    print(nllloss(input, label))
    print(nllloss(input, label2))


def do_ctc_loss():
    """"""
    crossEntropyLoss = nn.CrossEntropyLoss(reduction="sum")
    nllloss = nn.NLLLoss(reduction="sum")
    input = torch.tensor([
        [1, 112, 3, 42],
        [1, 232, 3, 42],
        [1, 11112, 3, 42],
    ], dtype=torch.float32)
    input2 = input.unsqueeze(0)
    label = torch.tensor([1, 3, 2])
    label2 = label.unsqueeze(0)
    print(crossEntropyLoss(input, label))
    print(nllloss(input, label))
    ctcLoss = nn.CTCLoss(reduction="none")
    print(ctcLoss(input2.transpose(0, 1), label2, torch.tensor([3, ]), torch.tensor([3, ])))
    input2 = torch.tensor([
        [[1, 112, 3, 42],
         [1, 232, 3, 42],
         [1, 11112, 3, 42], ],
        [[1, 112, 3, 42],
         [1, 232, 3, 42],
         [1, 11112, 3, 42], ]
    ], dtype=torch.float32)
    label2 = label = torch.tensor([[2, 1, 2], [1, 3, 2]])
    # 关于mean的解释: 不是batch级别的平均, 而是每个字级别的平均, 也就是所采用的NLLLoss使用也使用mean
    ctcLoss = nn.CTCLoss(reduction="mean")
    ctcLoss2 = nn.CTCLoss(reduction="sum")
    ctcLoss3 = nn.CTCLoss(reduction="none")
    print(ctcLoss(input2.transpose(0, 1), label2, torch.tensor([3, 3]), torch.tensor([3, 3])))
    print(ctcLoss2(input2.transpose(0, 1), label2, torch.tensor([3, 3]), torch.tensor([3, 3])))
    print(ctcLoss3(input2.transpose(0, 1), label2, torch.tensor([3, 3]), torch.tensor([3, 3])))


def do_test_pytorch_tensor():
    input = torch.randn(10, 1)
    input[-1] = 123
    print(input)
    input = torch.tensor([[1, 2, 3], [4, 5, 6]])
    for i, item in enumerate(input):
        print(item, i)


def do_test_tuple():
    """
    tuple可以相加
    集合为空时, 对应的bool为false
    """
    a = tuple([3])
    b = tuple([1, 2, 3])
    c = tuple()
    if c and c[-1] == 'a':
        print('hello')

    print(a + b)


def do_test_topk():
    input = torch.randn(3, 10)
    output1, output2 = torch.topk(input, dim=1, k=4)
    print(output1)  # 具体的值
    print(output2)  # topk的索引


def do_test_means():
    """
    这里的dim=0是指的是对0维度做平均， 剩下的自然是第1维度的数量
    """
    inputs = torch.randint(0, 100, (10, 6))
    print(torch.mean(inputs.to(torch.float32), dim=0))


def do_test_triu():
    a = torch.empty(5, 5).fill_(float(3)).triu_(diagonal=1)
    print(a)


def do_test_to_sparse():
    input = torch.tensor([[0, 0, 0],
                          [0, 1, 0],
                          [0, 0, 2]])
    output = input.to_sparse_coo()
    # output = input.to_sparse()# 效果一样,该函数已放弃
    print(output)


def do_test_base85():
    import base64
    import struct
    # 定义一个bool型的数组
    bools = [True, False, True, True, False, False, True, False]
    # 将bool型的数组转换为一个字节串
    bytes = struct.pack('8?', *bools)
    # 将字节串编码为一个base85编码的字符串
    base85 = base64.a85encode(bytes)
    print(base85)
    array = np.frombuffer(
        gzip.decompress(base64.b85decode(bytes)), dtype=bool
    ).copy()
    print(array)
    # # 将字节串转换为普通字符串
    # string = base85.decode('ascii')
    # # 打印结果
    # print(string)


if __name__ == '__main__':
    """"""
    gxl_ai_utils.hello_ai()
    a = gxl_ai_utils.utils.utils_file.join_path("sds","dsds")
    print(a)
