import os
import time

import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
from model import crnn

dic = {" ": 0, "0": 1, "1": 2, "2": 3, "3": 4, "4": 5, "5": 6, "6": 7, "7": 8, "8": 9, "9": 10, "东": 11, "供": 12,
       "儋": 13, "养": 14, "冷": 15, "华": 16, "台": 17, "吴": 18, "城": 19, "州": 20, "广": 21, "廉": 22, "昌": 23,
       "江": 24, "油": 25, "洋": 26, "渔": 27, "湛": 28, "滨": 29, "珠": 30, "琼": 31, "电": 32, "粤": 33, "船": 34,
       "茂": 35, "西": 36, "货": 37, "运": 38, "远": 39, "遂": 40, "阳": 41, "万": 42, "中": 43, "临": 44, "香": 45,
       "陆": 46, "门": 47, "蛇": 48, "湾": 49, "深": 50, "汕": 51, "斗": 52, "尾": 53, "莞": 54}

STR = " 0123456789东供儋养冷华台吴城州广廉昌江油洋渔湛滨珠琼电粤船茂西货运远遂阳万中临香陆门蛇湾深汕斗尾莞"
n_class = 56
label_sources = r"D:\PycharmProjects\yolo_v8\recognition\labels\data1.txt"
image_sources = r"D:\PycharmProjects\yolo_v8\recognition\images\train"
use_gpu = True
learning_rate = 0.0001
max_epoch = 64
batch_size = 4


# 调整图像大小和归一化操作
class resizeAndNormalize():
    def __init__(self, size, interpolation=cv2.INTER_LINEAR):
        # 注意对于opencv,size的格式是(w,h)
        self.size = size
        self.interpolation = interpolation
        self.toTensor = transforms.ToTensor()

    def __call__(self, image):
        # (x,y) 对于opencv来说，图像宽对应x轴，高对应y轴
        image = cv2.resize(image, self.size, interpolation=self.interpolation)
        # 转为tensor的数据结构
        image = self.toTensor(image)
        # 对图像进行归一化操作
        image = image.sub_(0.5).div_(0.5)
        return image


class resizeNormalize(object):
    def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = size
        self.interpolation = interpolation
        self.toTensor = transforms.ToTensor()

    def __call__(self, img):
        img = img.resize(self.size, self.interpolation)
        img = self.toTensor(img)
        img = img.sub_(0.5).div_(0.5)
        return img


def load_data(label_folder, image_folder):
    image_file, label_file = [], []
    with open(label_folder, 'r', encoding='utf-8') as f:
        for line in f:
            image_line, label_line = line.strip().split(',')
            image_line = os.path.join(image_folder, image_line)
            image_file.append(image_line)
            label_file.append(label_line)
    return image_file, label_file


def zl2lable(zl):
    label_list = []
    for char in zl:
        label_list.append(dic[char])
    return label_list


class NewDataSet(Dataset):
    def __init__(self, label_source, image_source, train=True):
        super(NewDataSet, self).__init__()
        self.image_file, self.label_file = load_data(label_source, image_source)

    def __len__(self):
        return len(self.image_file)

    def __getitem__(self, index):
        txt = self.label_file
        img = cv2.imread('{}'.format(self.image_file[index]), cv2.IMREAD_GRAYSCALE)
        zl = txt[index]  # 1

        (height, width) = img.shape
        # 由于crnn网络输入图像的高为32，故需要resize原始图像的height
        size_height = 32
        # ratio = 32 / float(height)
        size_width = 100
        transform = resizeAndNormalize((size_width, size_height))
        # 图像预处理
        imageTensor = transform(img)

        # label tensor
        l = zl2lable(zl)
        labelTensor = torch.IntTensor(l)
        return imageTensor, labelTensor


class CRNNDataSet(Dataset):
    def __init__(self, imageRoot, labelRoot):
        self.image_root = imageRoot
        self.image_dict = self.readfile(labelRoot)
        self.image_name = [fileName for fileName, _ in self.image_dict.items()]

    def __getitem__(self, index):
        image_path = os.path.join(self.image_root, self.image_name[index])
        keys = self.image_dict.get(self.image_name[index])
        label = [int(x) for x in keys]

        image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
        # if image is None:
        #     return None,None
        (height, width) = image.shape

        # 由于crnn网络输入图像的高为32，故需要resize原始图像的height
        size_height = 32
        ratio = 32 / float(height)
        size_width = int(ratio * width)
        transform = resizeAndNormalize((size_width, size_height))
        # 图像预处理
        image = transform(image)
        # 标签格式转换为IntTensor
        label = torch.IntTensor(label)
        return image, label

    def __len__(self):
        return len(self.image_name)

    def readfile(self, fileName):
        res = []
        with open(fileName, 'r') as f:
            lines = f.readlines()
            for line in lines:
                res.append(line.strip())
        dic = {}
        total = 0
        for line in res:
            part = line.split(' ')
            # 由于会存在训练过程中取图像的时候图像不存在导致异常，所以在初始化的时候就判断图像是否存在
            if not os.path.exists(os.path.join(self.image_root, part[0])):
                print(os.path.join(self.image_root, part[0]))
                total += 1
            else:
                dic[part[0]] = part[1:]
        print(total)

        return dic


trainData = NewDataSet(label_sources, image_sources)

trainLoader = DataLoader(dataset=trainData, batch_size=1, shuffle=True, num_workers=0)


def toSTR(l):
    str_l = []
    if isinstance(l, int):
        l = [l]
    for i in range(len(l)):
        str_l.append(STR[l[i]])
    return str_l


def toRES(l):
    new_l = []
    new_str = ' '
    for i in range(len(l)):
        if (l[i] == ' '):
            new_str = ' '
            continue
        elif new_str != l[i]:
            new_l.append(l[i])
            new_str = l[i]
    return new_l


def val(model=torch.load("pytorch-crnn.pth")):
    # 将模式切换为验证评估模式
    loss_func = torch.nn.CTCLoss(blank=0, reduction='mean')
    model.eval()

    test_n = 90
    count = 0
    for i, (data, label) in enumerate(trainLoader):
        if (i > test_n):
            break
        output = model(data.cuda())
        pred_label = output.max(2)[1]
        input_lengths = torch.IntTensor([output.size(0)] * int(output.size(1)))
        target_lengths = torch.IntTensor([label.size(1)] * int(label.size(0)))
        targets = label.cuda()
        loss = loss_func(output.cpu(), targets.cpu(), input_lengths, target_lengths)

        pred_l = np.array(pred_label.cpu().squeeze()).tolist()
        label_l = np.array(targets.cpu().squeeze()).tolist()
        print(i, ":", loss, "pred:", toRES(toSTR(pred_l)), "label_l", toSTR(label_l))
        x = toRES(toSTR(pred_l))
        y = toSTR(label_l)
        if x == y:
            count = count + 1
    accuracy = count / test_n
    print("Accuracy:", accuracy)


def train():
    model = crnn.CRNN(32, 1, n_class, 256)
    if torch.cuda.is_available() and use_gpu:
        model.cuda()

    loss_func = torch.nn.CTCLoss(blank=0, reduction='mean')
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.999))

    lossTotal = 0.0
    k = 0
    printInterval = 100
    start_time = time.time()
    loss_list = []
    total_list = []
    for epoch in range(max_epoch):
        n = 0
        data_list = []
        label_list = []
        label_len = []
        for i, (data, label) in enumerate(trainLoader):
            #
            data_list.append(data)
            label_list.append(label)
            label_len.append(label.size(1))
            n = n + 1
            if n % batch_size != 0:
                continue
            k = k + 1
            data = torch.cat(data_list, dim=0)
            data_list.clear()

            label = torch.cat(label_list, dim=1).squeeze(0)
            label_list.clear()

            target_lengths = torch.tensor(np.array(label_len))
            label_len.clear()
            # 开启训练模式
            model.train()

            if torch.cuda.is_available and use_gpu:
                data = data.cuda()
                loss_func = loss_func.cuda()
                label = label.cuda()

            output = model(data)
            log_probs = output
            # example 建议使用这样，貌似直接把output送进去loss fun也没发现什么问题
            # log_probs = output.log_softmax(2).requires_grad_()
            targets = label.cuda()
            input_lengths = torch.IntTensor([output.size(0)] * int(output.size(1)))
            # forward(self, log_probs, targets, input_lengths, target_lengths)
            # targets =torch.zeros(targets.shape)
            loss = loss_func(log_probs.cpu(), targets, input_lengths, target_lengths) / batch_size
            lossTotal += float(loss)
            print("epoch:", epoch + 1, "num:", i, "loss:", float(loss))
            loss_list.append(float(loss))
            if k % printInterval == 0:
                print("[%d/%d] [%d/%d] loss:%f" % (
                    epoch, max_epoch, i + 1, len(trainLoader), lossTotal / printInterval))
                total_list.append(lossTotal / printInterval)
                lossTotal = 0.0
                torch.save(model, 'pytorch-crnn.pth')

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    plt.figure()
    plt.plot(loss_list)
    plt.savefig("loss.jpg")

    plt.clf()
    plt.figure()
    plt.plot(total_list)
    plt.savefig("total.jpg")
    end_time = time.time()
    print("takes {}s".format((end_time - start_time)))
    return model


def recognition(imagepath):
    modelpath = 'D:/PycharmProjects/yolo_v8/recognition/pytorch-crnn.pth'
    if os.path.exists(modelpath):
        print('Load model from "%s" ...' % modelpath)
        model = torch.load(modelpath)
        print('Done!')
    if torch.cuda.is_available and use_gpu:
        model.cuda()

    img = cv2.imread('{}'.format(imagepath), cv2.IMREAD_GRAYSCALE)
    (height, width) = img.shape
    # 由于crnn网络输入图像的高为32，故需要resize原始图像的height
    size_height = 32
    # ratio = 32 / float(height)
    size_width = 100
    transform = resizeAndNormalize((size_width, size_height))
    # 图像预处理
    imageTensor = transform(img).unsqueeze(0)

    if torch.cuda.is_available and use_gpu:
        imageTensor = imageTensor.cuda()
    model.eval()
    preds = model(imageTensor)
    preds = preds.max(2)
    preds = preds[1]
    preds = np.array(preds.cpu().squeeze()).tolist()
    pred_text = toRES(toSTR(preds))
    print('predict == >', pred_text)
    return pred_text


if __name__ == '__main__':
    # train()
    val()
    # recognition()
