import torch as pt
import numpy as np
from python_ai.common.xcommon import *
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import sys
import glob
import re
import signal


def compile(device, model, criterion, optim, ALPHA, metric):
    model_dict = {}
    model_dict['device'] = device
    model_dict['model'] = model
    model_dict['criterion'] = criterion
    model_dict['metric'] = metric
    model_dict['optim'] = optim(params=model.parameters(), lr=ALPHA)
    return model_dict


def accuracy(y, pred):
    y = y.long()
    pred = pred.argmax(dim=1)
    acc = pt.eq(y, pred).float().mean()
    return acc


def process_data(model_dict, dl, is_train, label, epoch=0, writer=None):
    device = model_dict['device']
    model = model_dict['model']
    optim = model_dict['optim']
    criterion = model_dict['criterion']
    metric = model_dict['metric']
    len_dl = len(dl)
    GROUP = int(np.ceil(len_dl / 10))
    avg_loss = 0.
    avg_acc = 0.
    for i, (bx, by) in enumerate(dl):
        bx = bx.float().to(device)
        by = by.long().to(device)
        if is_train:
            model.train(True)
            optim.zero_grad()
            h = model(bx)
            loss = criterion(h, by)
            loss.backward()
            optim.step()
            acc = metric(by, h)
            model.train(False)
        else:
            model.train(False)
            h = model(bx)
            loss = criterion(h, by)
            acc = metric(by, h)
        if writer is not None:
            writer.add_scalar(f'{label}: loss', loss, epoch * len_dl + i)
            writer.add_scalar(f'{label}: acc', acc, epoch * len_dl + i)
        lossv = loss.detach().cpu().numpy()
        accv = acc.detach().cpu().numpy()
        avg_loss += lossv
        avg_acc += accv
        if i % GROUP == 0:
            print(f'{label}: epoch#{epoch + 1}: #{i + 1} loss = {lossv}, acc = {accv}')
    if i % GROUP != 0:
        print(f'{label}: epoch#{epoch + 1}: #{i + 1} loss = {lossv}, acc = {accv}')
    avg_loss /= i + 1
    avg_acc /= i + 1
    return avg_loss, avg_acc


def fit(model_dict, dl_train, dl_val, n_epochs, logdir=None, save_path=None, save_freq=1, base_epoch=0):

    model = model_dict['model']
    epoch = base_epoch

    def save_model():
        nonlocal epoch
        if save_path is None:
            return
        dir, base = os.path.split(save_path)
        main, ext = os.path.splitext(base)
        the_path = os.path.join(dir, main + '.' + str(epoch + 1) + ext)
        print(f'Saving to {the_path}')
        os.makedirs(dir, exist_ok=True)
        sdict = model.state_dict()
        pt.save(sdict, the_path)
        print('Saved')

    def signal_handler(sig, frame):
        save_model()
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)

    if logdir is not None:
        writer = SummaryWriter(logdir)
    else:
        writer = None
    loss_his = []
    acc_his = []
    loss_his_val = []
    acc_his_val = []
    for epoch in range(n_epochs):
        epoch += base_epoch
        sep(epoch + 1)
        avg_loss, avg_acc = process_data(model_dict, dl_train, True, 'train', epoch, writer)
        if epoch % save_freq == 0:
            save_model()
        avg_loss_val, avg_acc_val = process_data(model_dict, dl_val, False, 'val', epoch, writer)
        loss_his.append(avg_loss)
        loss_his_val.append(avg_loss_val)
        acc_his.append(avg_acc)
        acc_his_val.append(avg_acc_val)
        print(
            f'epoch#{epoch + 1}: loss = {avg_loss} acc = {avg_acc}, loss_val = {avg_loss_val}, acc_val = {avg_acc_val}')

    save_model()

    return loss_his, acc_his, loss_his_val, acc_his_val


def evaluate(model_dict, dl_test):
    sep('Test')
    avg_loss_test, avg_acc_test = process_data(model_dict, dl_test, False, 'test')
    print(f'Test loss = {avg_loss_test}, acc = {avg_acc_test}')


def trans_data(x, model, device, batch_size):
    x = pt.Tensor(x)
    ds_x = TensorDataset(x)
    dl_x = DataLoader(ds_x, batch_size, drop_last=False)

    net = model
    net.eval()
    x = None
    for bx in dl_x:
        bx = bx[0]
        bx = bx.to(device)
        bx = net(bx)
        bx = bx.detach().cpu().numpy()
        if x is None:
            x = bx
        else:
            x = np.concatenate([x, bx], axis=0)
    print('x:', x.shape)
    return x


get_max_epoch_regex = re.compile(r'\.(\d+)(?:\.[^\.]+)?$')


def get_max_epoch(save_path):
    xlist = glob.glob(save_path + '.*')
    max_epoch = 0
    for file in xlist:
        _, base = os.path.split(file)
        matcher = get_max_epoch_regex.search(base)
        epoch = int(matcher.group(1))
        if epoch > max_epoch:
            max_epoch = epoch
    return max_epoch
