"""
端到端不定长验证码识别模型
"""

import os
import glob
import numpy as np
from tqdm import tqdm
import PIL.Image as Image
import matplotlib.pyplot as plt
from collections import deque

import torch
import torch.nn as nn
import torch.utils.data
from torchvision import transforms
from torchsummary import summary


class Config(object):
    """单例模式，配置参数"""
    # 类别：[2,3,4,5,6,7,8,9,A,B,C,D,E,F,G,H,I,J,K,L,M,N,P,Q,R,S,T,U,V,W,X,Y,Z]
    # 33个字符（去掉易混淆的0,1,O）
    charset = ['2', '3', '4', '5', '6', '7', '8', '9',
               'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
               'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
    classes = {c: i for i, c in enumerate(charset)}  # 通过字符查找索引
    classes.update({i: c for i, c in enumerate(charset)})  # 通过索引查找字符
    num_classes = len(charset)
    batch_size = 128
    num_epochs = 2
    nw = os.cpu_count()
    learning_rate = 0.001
    model_path = 'model/checkpoint/captcha.ctc-model.pth'
    device_cuda = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    device_cpu = torch.device('cpu')

    _instance = None

    def __new__(cls, *args, **kw):
        if cls._instance is None:
            cls._instance = object.__new__(cls, *args, **kw)
        return cls._instance


class CaptchaSet(torch.utils.data.Dataset):
    """数据集"""

    def __init__(self, root, transform):
        self.transform = transform
        self.images = glob.glob(os.path.join(root, "*.png"))

    def __getitem__(self, index):
        file = self.images[index]
        labels = os.path.basename(file).rstrip(".png").split('.')[0]
        labels = [Config.classes[c] for c in labels]
        image = Image.open(file)
        if self.transform:
            image = self.transform(image)
        return image, torch.IntTensor(labels)

    def __len__(self):
        return len(self.images)


def collate_fn(batch):
    """自定义数据集加载方式：将标签转为维度一致的数组"""
    seq_lengths = torch.IntTensor([label.size(0) for _, label in batch])
    seq_tensor = torch.cat([label for _, label in batch])
    img_tensor = torch.tensor([img.numpy() for img, _ in batch])
    return img_tensor, seq_tensor, seq_lengths


class CaptchaLoader:
    """数据集加载器"""
    config = Config()
    batch_size = config.batch_size
    nw = config.nw

    transform = transforms.Compose([transforms.ToTensor(),
                                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

    trainset = CaptchaSet('dataset/captcha/train', transform)
    trainloader = torch.utils.data.DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True, num_workers=nw,
                                              collate_fn=collate_fn)

    testset = CaptchaSet('dataset/captcha/test', transform)
    testloader = torch.utils.data.DataLoader(dataset=testset, batch_size=batch_size, shuffle=True, num_workers=nw,
                                             collate_fn=collate_fn)

    @classmethod
    def get_trans_img(cls, x):
        return cls.transform(x).unsqueeze(0)

    _instance = None

    def __new__(cls, *args, **kw):
        if cls._instance is None:
            cls._instance = object.__new__(cls, *args, **kw)
        return cls._instance


def _make_convolutional(in_channels, out_channels, kernel_size, stride=1, padding=0):
    return nn.Sequential(
        nn.Conv2d(in_channels, out_channels, (kernel_size, kernel_size),
                  (stride, stride), padding, bias=False),
        nn.BatchNorm2d(out_channels),
        nn.LeakyReLU(inplace=True))


class ResidualBlock(nn.Module):
    """残差块"""

    def __init__(self, in_channels):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, in_channels //
                               2, (1, 1), stride=(1, 1), padding=0, bias=False)
        self.bn1 = nn.BatchNorm2d(in_channels // 2)
        self.conv2 = nn.Conv2d(in_channels // 2, in_channels,
                               (3, 3), stride=(1, 1), padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(in_channels)
        self.relu = nn.LeakyReLU(inplace=True)

    def forward(self, x):
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)
        out += x
        out = self.relu(out)
        return out


class QLNet(nn.Module):
    """特征提取网络"""

    def __init__(self):
        super(QLNet, self).__init__()
        self.conv1 = _make_convolutional(3, 32, 3, 1, 1)
        self.conv2 = _make_convolutional(32, 64, 3, 2, 1)
        self.layer1 = self._make_layer(64, 1)
        self.conv3 = _make_convolutional(64, 128, 3, 2, 1)
        self.layer2 = self._make_layer(128, 2)
        self.adapt_max_pool2d = nn.AdaptiveMaxPool2d((1, 15))
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(
                    m.weight, mode='fan_out', nonlinearity='leaky_relu')

    @staticmethod
    def _make_layer(in_channels, repeat_count):
        layers = []
        for _ in range(repeat_count):
            layers.append(ResidualBlock(in_channels))
        return nn.Sequential(*layers)

    def forward(self, x):
        out = self.conv1(x)
        out = self.conv2(out)
        out = self.layer1(out)
        out = self.conv3(out)
        out = self.layer2(out)
        out = self.adapt_max_pool2d(out)
        return out


class Model(nn.Module):
    def __init__(self, output_size, device=Config.device_cuda):
        super().__init__()
        self.device = device
        self.feature_extractor = QLNet()
        self.num_layers = 2
        self.hidden_size = 80
        self.gru = nn.GRU(input_size=128, hidden_size=self.hidden_size, num_layers=self.num_layers,
                          bidirectional=True, batch_first=True)
        self.fc = nn.Linear(self.hidden_size * 2, output_size)
        self.log_softmax = nn.LogSoftmax(2)

    def forward(self, x):
        out = self.feature_extractor(x)
        out = out.squeeze(2)
        out = out.permute(0, 2, 1)
        hidden = torch.zeros(
            (self.num_layers * 2, out.size(0), self.hidden_size), device=self.device)
        out, _ = self.gru(out, hidden)
        out = self.fc(out)
        out = out.permute(1, 0, 2)
        output = self.log_softmax(out)
        output_lengths = torch.full(size=(out.size(1),), fill_value=out.size(
            0), dtype=torch.long, device=self.device)
        return output, output_lengths


def update_lr(optimizer, lr):
    """更新学习率"""
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def decode_output(outputs):
    """解码输出"""
    output = torch.argmax(outputs, dim=-1)
    output = output.permute(1, 0)
    preds = []
    for predict in output:
        predict = torch.unique_consecutive(predict)
        predict = predict[predict != Config.num_classes]  # 去除占位符
        pred = ''.join(Config.classes[i.item()] for i in predict)
        preds.append(pred)
    return preds


def decode_labels(labels, label_length):
    """解码输入标签"""
    label_list = []
    start = 0
    for length in label_length:
        label = labels[start:start+length]
        start += length.item()
        lab = ''.join(Config.classes[i.item()] for i in label)
        label_list.append(lab)
    return label_list


def calculate_acc(output, target, target_length, call_label=False):
    """计算正确率，call_label=False时计算标签数以及预算正确数，；否则计算真实标签及预测标签"""
    preds = decode_output(output)
    labels = decode_labels(target, target_length)

    if call_label:
        return preds, labels
    else:
        correct_num = 0
        total_num = len(preds)
        for pred, label in zip(preds, labels):
            if pred == label:
                correct_num += 1
        return correct_num, total_num


def predict(x):
    """预测结果"""
    model = Model(Config.num_classes+1, device=Config.device_cpu)
    print('load model from: ', Config.model_path)
    model.load_state_dict(torch.load(Config.model_path, map_location=Config.device_cpu))
    model.eval()

    x = CaptchaLoader.get_trans_img(x)
    outputs, _ = model(x)
    preds = decode_output(outputs)

    return preds[0]


def train():
    """训练模型"""
    model = Model(Config.num_classes+1).to(Config.device_cuda)
    summary(model, (3, 60, 160))
    criterion = torch.nn.CTCLoss(blank=Config.num_classes)
    optimizer = torch.optim.Adam(model.parameters(), lr=Config.learning_rate)
    print(f'training on device: {Config.device_cuda}')
    model_path = Config.model_path
    if os.path.isfile(model_path):
        print('load model from: ', model_path)
        model.load_state_dict(torch.load(model_path))
        model.train()

    trainloader = CaptchaLoader.trainloader
    curr_lr = Config.learning_rate
    for epoch in range(Config.num_epochs):
        print(f'Epoch:{epoch + 1}/{Config.num_epochs}')
        pbar = tqdm(trainloader)
        running_loss = deque(maxlen=10)
        for i, (images, labels, label_length) in enumerate(pbar):
            images, labels, label_length = images.to(Config.device_cuda), labels.to(
                Config.device_cuda), label_length.to(Config.device_cuda)
            outputs, output_length = model(images)
            loss = criterion(outputs, labels, output_length, label_length)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            running_loss.append(loss.item())
            corr, tot = calculate_acc(outputs, labels, label_length)
            pbar.set_description_str(
                f'loss:{np.around(np.nanmean(running_loss), 5)} acc:{np.around(corr/tot*100,2)}%')

        if (epoch + 1) % 20 == 0:
            curr_lr /= 3
            update_lr(optimizer, curr_lr)

        # save model
        print(f'save model to: {model_path}')
        if not os.path.isdir(os.path.dirname(model_path)):
            os.makedirs(os.path.dirname(model_path))
        torch.save(model.state_dict(), model_path)


def test():
    """测试模型"""
    config = Config()
    model_path = config.model_path
    device = config.device_cuda
    model = Model(Config.num_classes+1, device=device).to(device)
    if os.path.isfile(model_path):
        print('load model from: ', model_path)
        model.load_state_dict(torch.load(model_path, map_location=device))

    model.eval()
    testloader = CaptchaLoader.testloader
    print('testloader size:', len(testloader))
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels, label_length in tqdm(testloader):
            images, labels = images.to(device), labels.to(device)
            outputs, _ = model(images)
            corr, tot = calculate_acc(outputs, labels, label_length)
            correct += corr
            total += tot
        print(f'Test accuracy on {total} examples:{round(correct / total * 100, 2)}%')


def view():
    """可视化显示样本"""
    model_path = Config.model_path
    model = Model(Config.num_classes+1, device=Config.device_cpu)
    if os.path.isfile(model_path):
        print('load model from: ', model_path)
        model.load_state_dict(torch.load(model_path, map_location=Config.device_cpu))

    model.eval()
    testloader = CaptchaLoader.testloader
    images, labels, label_length = next(iter(testloader))
    with torch.no_grad():
        outputs, _ = model(images)
        preds, truthes = calculate_acc(outputs, labels, label_length, call_label=True)
    plt.figure()
    for i, image in enumerate(images.numpy()[:16, :, :]):
        plt.subplot(4, 4, i + 1)
        plt.imshow(np.array(image.transpose(1, 2, 0)*255, dtype=np.uint8))
        plt.title(f'pred:{preds[i]},label:{truthes[i]}')
        plt.axis('off')
    plt.show()


def random_pred():
    from PIL import Image
    from random import choice
    files = os.listdir('dataset/captcha/train/')
    file_name = choice(files)
    label = file_name.rstrip('.png').split('.')[0]
    path = os.path.join('dataset/captcha/train/', file_name)
    img = Image.open(path)
    pred = predict(img)
    plt.imshow(img)
    plt.axis('off')
    plt.title(f'pred:{pred}   label:{label}')
    plt.show()


if __name__ == '__main__':
    # train()
    test()
    view()
    random_pred()
