from model.Model import Model
import torch
from torch import nn
import argparse
from torch.nn import DataParallel
from tqdm import tqdm
import numpy as np
import os
import json
from data.dataloader import DataLoader
from data.dataset import TextMatchDataset
import numpy as np
from process.process import Processor
from data.dataset import TextMatchDataset

parser = argparse.ArgumentParser()
parser.add_argument('--train_path',
                    type=str,
                    default="./gaiic_track3_round1_train_20210228.tsv",
                    dest="train_path",
                    help='the location of train data')

parser.add_argument('--test_path',
                    type=str,
                    default="./gaiic_track3_round1_testA_20210228.tsv",
                    dest="test_path",
                    help='the location of test data')

parser.add_argument('--config_path',
                    type=str,
                    default="./config.json",
                    dest="config_path",
                    help='to write the model config parameters')

parser.add_argument('--batch_size',
                    type=int,
                    default=512,
                    dest="batch_size",
                    help='batch size')

parser.add_argument('--device_ids',
                    type=str,
                    default="0",
                    dest="device_ids",
                    help='device ids')

args = parser.parse_args()
test_loss_list = []
train_loss_list = []


def collate_fun(batch):
    batch_size = len(batch)

    s1 = []
    s2 = []
    labels = []
    for i in range(batch_size):
        _s1, _s2, _label = batch[i]
        s1.append(torch.Tensor(_s1))
        s2.append(torch.Tensor(_s2))
        labels.append(_label)
        # print("-------", _label)
    s1 = torch.stack(s1)
    s2 = torch.stack(s2)
    # print(labels)
    labels = torch.Tensor(labels)
    # print(labels)
    return s1, s2, labels


def get_data(train_path, config):
    train_processor = Processor(train_path)
    # test_processor = Processor(test_path, test=True)

    sentence1, sentence2, labels = train_processor.process()
    # test_record = test_processor.process()

    train_data_set = TextMatchDataset(x1=sentence1, x2=sentence2, label=labels, num_steps=config["max_seq_len"])
    # test_data_set = TextMatchDataset(**test_record)

    train_data_loader = DataLoader(train_data_set,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   collate_fn=collate_fun
                                   )
    # test_data_loader = DataLoader(test_data_set,
    #                               batch_size=args.batch_size,
    #                               shuffle=True,
    #                               num_workers=4,
    #                               collate_fn=collate_fun
    #                               )

    return train_data_loader


def score(model, data, loss_fun, device):
    index = 0
    loss_total = 0
    model.eval()
    for s1, s2, labels in data.__next__():
        bsz = s1.size(0)
        index += bsz
        s1 = s1.to(device=device, dtype=torch.long)
        s2 = s2.to(device=device, dtype=torch.long)
        labels = labels.to(device=device, dtype=torch.long)

        predict_y = model(s1, s2)
        loss = loss_fun(predict_y, labels)
        loss_total += loss.cpu().detach().numpy() * bsz

    return loss_total / index


def train(model, train_data, device):
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-6)  # optimize all parameters
    loss_func = nn.CrossEntropyLoss()
    for i in range(10000):
        # with torch.no_grad():
        model.train()
        optimizer.zero_grad()
        loss_total = 0
        index = 0
        for s1, s2, labels in train_data.__next__():
            # bsz = s1.size(0)
            # index += bsz
            s1 = s1.to(device=device, dtype=torch.long)
            s2 = s2.to(device=device, dtype=torch.long)
            labels = labels.to(device=device, dtype=torch.long)

            predict_y = model(s1, s2)
            # print(predict_y)
            loss = loss_func(predict_y, labels)
            # loss_total += loss.cpu().detach().numpy() * bsz
            # print(f"epoch {i} index {index} loss {loss}")
            index += 1
            if (index + 1) % 20 == 0:
                print(f"epoch {i} and iter {index} train loss", loss.cpu().detach().numpy())
            # loss_total += loss.detach().numpy() * bsz
            loss.backward()
            optimizer.step()
        # test_loss = score(model, test_data, loss_func, device)
        train_loss = score(model, train_data, loss_func, device)
        # test_loss_list.append(test_loss)
        train_loss_list.append(train_loss)
        print(f"epoch {i} train loss", train_loss)

        if (i + 1) % 10 == 0:
            checkpoint = {
                "net": model.state_dict(),
                'optimizer': optimizer.state_dict(),
                "epoch": i
            }
            if not os.path.exists("./trained_model/checkpoint"):
                os.mkdir("./trained_model/checkpoint")
            torch.save(checkpoint, './trained_model/checkpoint/ckpt_best_%s.pth' % (str(i)))


if __name__ == '__main__':
    # train_data_loader = get_data(args.train_path)
    # print(args.embedding_size)
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    with open(args.config_path, "r") as file:
        # print(config)
        config = json.load(file)
    train_data_loader = get_data(args.train_path, config)
    # print(args.embedding_size)
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    model = Model(**config)
    model.to(device)
    if torch.cuda.device_count() > 1:
        model = DataParallel(
            model,
            device_ids=[int(device_id) for device_id in args.device_ids.split(",")]
        )
    train(model, train_data_loader, device)
    with open("./test_loss.json", "w") as file:
        print(test_loss_list)
        json.dump(test_loss_list, file)

    with open("./train_loss.json", "w") as file:
        print(train_loss_list)
        json.dump(train_loss_list, file)
