import os
import sys

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from dataset import SVHNDataset


class CNNWithLSTM(nn.Module):
    # noinspection PyTypeChecker
    def __init__(self, seq_feature=512):
        super().__init__()
        self.seq_len = 32
        self.seq_feature = seq_feature
        self.conv = nn.Sequential(
            nn.Conv2d(3, 32, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(32),
            nn.Conv2d(32, 32, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(32),
            nn.MaxPool2d(kernel_size=2),
            nn.Dropout(),

            nn.Conv2d(32, 64, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(64),
            nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(64),
            nn.MaxPool2d(kernel_size=2),
            nn.Dropout(),

            nn.Conv2d(64, 96, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(96),
            nn.Conv2d(96, 96, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(96),
            nn.MaxPool2d(kernel_size=2),
            nn.Dropout(),

            nn.Conv2d(96, 128, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(128),
            nn.MaxPool2d(kernel_size=(2, 1)),
            nn.Dropout(),

            nn.Conv2d(128, 256, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(256),
            nn.Conv2d(256, 256, 3, padding=1), nn.ReLU(),
            nn.BatchNorm2d(256),
            nn.MaxPool2d(kernel_size=(2, 1)),
            nn.Dropout(),
        )

        self.fc0 = nn.Sequential(
            nn.Linear(in_features=1024, out_features=self.seq_feature), nn.ReLU(),
        )
        self.lstm = nn.LSTM(input_size=self.seq_feature, hidden_size=256, batch_first=True, bidirectional=True)
        self.fc = nn.Linear(in_features=512, out_features=11)

    def forward(self, inputs):
        output = self.conv(inputs)
        output = output.view(output.size(0), 256 * 4, self.seq_len).permute(0, 2, 1)
        output = self.fc0(output)
        output, (h_n, c_n) = self.lstm(output)
        output = self.fc(output)
        return F.log_softmax(output, dim=2)

    def init_layers(self):
        for layer in self.conv:
            if isinstance(layer, nn.Conv2d):
                nn.init.kaiming_uniform_(layer.weight, nonlinearity='relu')
        for layer in self.fc0:
            if isinstance(layer, nn.Linear):
                nn.init.kaiming_uniform_(layer.weight, nonlinearity='relu')
        nn.init.orthogonal_(self.lstm.weight_ih_l0)
        nn.init.orthogonal_(self.lstm.weight_hh_l0)
        nn.init.orthogonal_(self.lstm.weight_ih_l0_reverse)
        nn.init.orthogonal_(self.lstm.weight_hh_l0_reverse)
        nn.init.xavier_uniform_(self.fc.weight)


def remove_blank(seq):
    return [i for i in seq if i != 0]


def decode_ctc(seq):
    return [v for i, v in enumerate(seq) if i == 0 or v != seq[i - 1]]


def correct_sum(y: torch.Tensor, a: torch.Tensor):
    y = y.detach().cpu().numpy()
    a = a.detach().cpu().numpy()
    cnt = 0
    for y0, a0 in zip(y, a):
        yy = decode_ctc(y0.argmax(axis=1))
        if remove_blank(a0) == remove_blank(yy):
            cnt += 1
    return cnt


def clip_gradient(model, clip_value):
    params = list(filter(lambda p_: p_.grad is not None, model.parameters()))
    for p in params:
        p.grad.data.clamp_(-clip_value, clip_value)


def validate(model, loader):
    model.eval()
    with torch.no_grad():
        loss_sum = 0
        sample_cnt = 0
        correct_cnt = 0
        for x, y, yl in tqdm(loader):
            bs = x.size(0)
            in_len = torch.full((bs,), model.seq_len).cuda()
            out = model(x.cuda())
            loss = F.ctc_loss(out.permute(1, 0, 2), y.cuda(), in_len, yl.cuda(), blank=0,
                              reduction='sum',
                              zero_infinity=True)
            cc = correct_sum(out, y)
            loss_sum += loss.item()
            sample_cnt += bs
            correct_cnt += cc
        epoch_loss = loss_sum / sample_cnt
        epoch_acc = correct_cnt / sample_cnt
        print("validate loss: {}, correct: {}/{} ({})".format(epoch_loss,
                                                              correct_cnt, sample_cnt,
                                                              epoch_acc))
        return epoch_loss, epoch_acc


NO_IMPROVE_LIMIT = 100
BATCH_SIZE = 32
MODEL_PATH = os.path.join('data', 'model_state.pkl')


def main():
    transforms = T.Compose([
        T.ToTensor(),
        T.Resize((128, 256)),
    ])
    num_workers = 4
    if sys.platform == 'win32':
        print("platform does not support fork, disable workers")
        num_workers = 0
    data_train = SVHNDataset(splits='train', transforms=transforms)
    data_valid = SVHNDataset(splits='test', transforms=transforms)
    train_loader = DataLoader(data_train, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True, num_workers=num_workers)
    valid_loader = DataLoader(data_valid, batch_size=BATCH_SIZE, pin_memory=True, num_workers=num_workers)
    model = CNNWithLSTM().cuda()
    if os.path.exists(MODEL_PATH):
        print("Found trained model, running validation...")
        print("NOTE: if you want to train a new model, please delete {}".format(MODEL_PATH))
        model.load_state_dict(torch.load(MODEL_PATH))
        validate(model, valid_loader)
        return

    model.init_layers()
    optimizer = torch.optim.Adam(model.parameters())
    # optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)

    sw_train = SummaryWriter('runs/train')
    sw_valid = SummaryWriter('runs/valid')

    best_val_acc = -1.0
    best_val_loss = 1000000.0
    no_improve = 0
    for epoch in range(10000):
        model.train()
        loss_sum = 0
        sample_cnt = 0
        correct_cnt = 0
        for x, y, yl in tqdm(train_loader):
            bs = x.size(0)
            in_len = torch.full((bs,), model.seq_len).cuda()
            out = model(x.cuda())
            loss = F.ctc_loss(out.permute(1, 0, 2), y.cuda(), in_len, yl.cuda(), blank=0,
                              reduction='sum',
                              zero_infinity=True)
            optimizer.zero_grad()
            (loss / bs).backward()
            # clip_gradient(model, 0.1)
            optimizer.step()
            cc = correct_sum(out, y)
            # print(loss.item() / bs, cc, sample_cnt)
            loss_sum += loss.item()
            sample_cnt += bs
            correct_cnt += cc
        tr_epoch_loss = loss_sum / sample_cnt
        tr_epoch_acc = correct_cnt / sample_cnt
        print("Epoch {}, train loss: {}, correct: {}/{} ({})".format(epoch + 1, tr_epoch_loss,
                                                                     correct_cnt, sample_cnt,
                                                                     tr_epoch_acc))
        va_epoch_loss, va_epoch_acc = validate(model, valid_loader)
        sw_train.add_scalar('loss', tr_epoch_loss, epoch)
        sw_valid.add_scalar('loss', va_epoch_loss, epoch)
        sw_train.add_scalar('acc', tr_epoch_acc, epoch)
        sw_valid.add_scalar('acc', va_epoch_acc, epoch)
        sw_train.flush()
        sw_valid.flush()
        no_improve += 1
        if va_epoch_loss < best_val_loss:
            print("Got better loss, resetting stop counter")
            best_val_loss = va_epoch_loss
            no_improve = 0
        if va_epoch_acc > best_val_acc:
            print("Got better acc, resetting stop counter")
            print("Saving to {} ...".format(MODEL_PATH))
            best_val_acc = va_epoch_acc
            no_improve = 0
            torch.save(model.state_dict(), MODEL_PATH)
        print("Current best loss: {} acc: {}".format(best_val_loss, best_val_acc))
        if no_improve > 0:
            print("No improvement in {} epochs".format(no_improve))
            if no_improve >= NO_IMPROVE_LIMIT:
                print("Reached limit, STOP")
                break
    sw_train.close()
    sw_valid.close()
    print("Best acc: {}".format(best_val_acc))


if __name__ == '__main__':
    main()
