import torch
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch import nn
from torch.utils.data import DataLoader

from verify_code.main import CodeDataset
import numpy as np


class DigitsModel(nn.Module):

    def __init__(self,char_kinds):
        super(DigitsModel, self).__init__()
        self.embed = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=3, stride=(2, 1), padding=(1, 1), bias=False),
            nn.ELU(),
            nn.Conv2d(16, 32, kernel_size=3, stride=(1, 1), padding=(1, 1), bias=False),
            nn.ELU(),
            nn.Conv2d(32, 64, kernel_size=3, stride=(2, 2), padding=(1, 1), bias=False),
            nn.ELU(),
            nn.Conv2d(64, 32, kernel_size=3, stride=(1, 1), padding=(1, 1), bias=False),
            nn.ELU()
        )
        self.rnns = nn.ModuleList()
        dim = 416
        hidden_size = 256
        self.rnns.append(nn.LSTM(input_size=dim, hidden_size=hidden_size))
        self.output_layer = nn.Linear(in_features=hidden_size, out_features=char_kinds)  # len(DIGITS_MAP) + 1

    def forward(self, features):
        # features: n, 1, h, w
        embedding = self.embed(features)
        n, c, h, w = embedding.size()
        embedding = embedding.view(n, c * h, w).permute(2, 0, 1)
        # embed: t, n, f
        h = embedding
        for l in self.rnns:
            h, _ = l(h)
        logits = self.output_layer(h)
        # lengths = torch.zeros((n,)).fill_(w)
        return logits

class myctc_loss(nn.Module):
    def __int__(self):
        super(myctc_loss, self).__init__()

    def forward(self, inputs, targets):
        log_probs = F.log_softmax(inputs, 2).requires_grad_()
        inputs_length = torch.full(
            size=(batch_size,), fill_value=log_probs.size(0), dtype=torch.int32
        )
        targets_length = torch.full(
            size=(batch_size,), fill_value=targets.size(1), dtype=torch.int32
        )
        loss = nn.CTCLoss(blank=0)(
            log_probs, targets, inputs_length, targets_length
        )

        return loss


def run_eval(model, test_set,label_map):
    predictions = []
    labels = []
    for data_batch, labels_batch in test_set:
        if torch.cuda.is_available():
            data_batch = data_batch.cuda()
        logits = model(data_batch)
        x = torch.transpose(logits, 0, 1)
        # x = torch.transpose(x, 2, 1)
        probs = F.softmax(x, dim=2).data.cpu()

        from fast_ctc_decode import beam_search, viterbi_search
        for d, l in zip(probs, labels_batch):
            seq, path = beam_search(np.array(d), label_map, beam_size=5, beam_cut_threshold=0.001)
            print([dataset.num_to_char[i] for i in np.array(l).tolist()])
            print(seq)
            pass


def main():
    model = DigitsModel(char_kinds)
    model = model.cuda() if torch.cuda.is_available() else model
    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    for e in range(epochs):
        epoch_loss = 0
        for data_batch, label_batch in train_dataloader:
            optimizer.zero_grad()
            if torch.cuda.is_available():
                data_batch = data_batch.cuda()
            logits = model(data_batch)
            loss = ctc(logits, label_batch)
            print(loss.item())#0.18255090713500977
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()

        if (e + 1) % 1 == 0:
            torch.save(model.state_dict(), "models/checkpoints.pt")
            with torch.no_grad():
                run_eval(model, test_dataloader,DIGITS_MAP)


if __name__ == '__main__':
    epochs = 100
    batch_size = 8

    my_transforms = transforms.Compose([
        transforms.ToPILImage(),  # 不转换为PIL会报错
        transforms.Resize([50, 50]),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5,), std=(0.5, 0.5, 0.5,))
    ])

    dataset = CodeDataset(path=r'C:\Users\Administrator\Desktop\WangTing\five_numbers_verification_code',
                          transform=my_transforms)  # , transform=transforms.ToTensor()
    t_dataset = CodeDataset(path=r'C:\Users\Administrator\Desktop\WangTing\five_number',
                          transform=my_transforms)
    train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    test_dataloader = torch.utils.data.DataLoader(t_dataset, batch_size=4, shuffle=True, num_workers=4)
    DIGITS_MAP = dataset.characters
    char_kinds = dataset.myclass_len
    ctc = myctc_loss()
    main()
