import torch
from time import time
from os import path, walk
from random import shuffle
from PIL.Image import open
from torch.autograd import Variable
from numpy import argmax, asarray, float32
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import Compose, Resize, Grayscale, ToTensor


class LPRDataLoader(Dataset):
    def __init__(self, img_dir, new=True):
        self.img_paths = []
        for i in range(len(img_dir)):
            self.img_paths += [_ for _ in list_images(img_dir[i])]
        shuffle(self.img_paths)
        self.img_size = (94, 24)
        self.new = new

    def __len__(self):
        return len(self.img_paths)

    def __getitem__(self, index):
        filename = self.img_paths[index]
        image = open(filename)
        if self.new:
            image = Compose([Resize((24, 94)), Grayscale(), ToTensor()])(image)
        else:
            image = Compose([Resize((24, 94)), ToTensor()])(image)
        img_name, suffix = path.splitext(path.basename(filename))
        img_name = img_name.split("-")[-1]  # 2022绿牌数据集中重复较多，生成结果较少，此处支持直接以车牌为文件名或序号-车牌为文件名
        return image, [chars_dict[c] for c in img_name]


chars = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑', '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂',
         '湘', '粤', '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁', '新',
         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ' ']  # 66个
chars_dict = {char: index for index, char in enumerate(chars)}


def valuation(_net, _val_data, _batch_size, _device):  # 评估函数
    epoch_size = len(_val_data) // _batch_size
    if epoch_size == 0:
        print('验证集过少或batch_size过大！')
        return 0
    batch_iterator = iter(DataLoader(_val_data, _batch_size, shuffle=True, num_workers=3, collate_fn=collate_fn))
    true, f_long, f_char = 0, 0, 0  # 全对，长度错误，长度对但是字符错
    t0 = time()
    for _ in range(epoch_size):
        images, labels, lengths = next(batch_iterator)
        start, targets = 0, []
        for length in lengths:
            label = labels[start:start + length]
            targets.append(label)
            start += length

        images = Variable(images) if str(_device) == 'cpu' else Variable(images.cuda())
        predicts = _net(images).cpu().detach().numpy()
        pre_labels = []
        for i in range(predicts.shape[0]):
            predict = predicts[i, :, :]
            pre_index = [65]
            pre_label_number = []
            for j in range(predict.shape[1]):
                pre_number = argmax(predict[:, j], axis=0)
                pre_index.append(65 if pre_number == pre_index[-1] else pre_number)
            for index in pre_index:
                if index != 65:
                    pre_label_number.append(index)
            pre_labels.append(pre_label_number)
        for i, label in enumerate(pre_labels):
            if len(label) != len(targets[i]):
                f_long += 1
                continue
            if (asarray(targets[i]) == asarray(label)).all():
                true += 1
            else:
                f_char += 1
    true_ = true / (true + f_long + f_char)
    t_, len_ = time() - t0, len(_val_data)
    print(f'验证正确率:{true_} 全部正确:{true} 长度错误:{f_long} 字符错误:{f_char} 验证总数:{(true + f_long + f_char)}')
    print(f'验证集数量:{len_} 耗时: {t_} 速度:{t_ / len_}s/张 FPS:{len_ / t_}')
    return true_, true, f_long, f_char


def collate_fn(_batch):  # 整理数据集
    images, labels, lengths = [], [], []
    for image, label in _batch:
        images.append(image)
        labels.extend(label)
        lengths.append(len(label))
    labels = asarray(labels).flatten().astype(float32)
    return torch.stack(images, 0), torch.from_numpy(labels), lengths


def list_images(_base_path, _valid_exit=(".jpg", ".jpeg", ".png", ".bmp")):
    for (rootDir, dirNames, filenames) in walk(_base_path):
        for filename in filenames:
            ext = filename[filename.rfind("."):].lower()
            if ext.endswith(_valid_exit):
                image_path = path.join(rootDir, filename).replace(" ", "\\ ")
                yield image_path


def sparse_tuple(t_length, lengths):  # 损失函数target格式
    input_lengths, target_lengths = [], []
    for ch in lengths:
        input_lengths.append(t_length)
        target_lengths.append(ch)
    return tuple(input_lengths), tuple(target_lengths)


def adjust_lr(optimizer, total_epoch, now_epoch, lr):  # 线性学习率
    if isinstance(lr, tuple):
        _max, _min = max(lr), min(lr)
        _lr = _max - (_max - _min) / total_epoch * now_epoch
        for param_group in optimizer.param_groups:
            param_group['lr'] = _lr
        return _lr
    else:
        return lr
