from model.Model import Model
import torch
from torch import nn
import argparse
from torch.nn import DataParallel
from tqdm import tqdm
import numpy as np
import os
import json
from data.dataloader import DataLoader
from data.dataset import TextMatchDataset
import numpy as np
from process.process import Processor
from data.dataset import TextMatchDataset
import pandas as pd

parser = argparse.ArgumentParser()
parser.add_argument('--train_path',
                    type=str,
                    default="./gaiic_track3_round1_train_20210228.tsv",
                    dest="train_path",
                    help='the location of train data')

parser.add_argument('--test_path',
                    type=str,
                    default="./gaiic_track3_round1_testA_20210228.tsv",
                    dest="test_path",
                    help='the location of test data')
parser.add_argument('--checkpoint',
                    type=str,
                    default="./trained_model/checkpoint/ckpt_best_9219.pth",
                    dest="checkpoint",
                    help='the location of test data')

parser.add_argument('--config_path',
                    type=str,
                    default="./config.json",
                    dest="config_path",
                    help='to write the model config parameters')

parser.add_argument('--batch_size',
                    type=int,
                    default=8,
                    dest="batch_size",
                    help='batch size')

parser.add_argument('--device_ids',
                    type=str,
                    default="0,1,2,3,4,5,6,7",
                    dest="device_ids",
                    help='device ids')

args = parser.parse_args()
test_loss_list = []
train_loss_list = []


def collate_fun(batch):
    batch_size = len(batch)

    s1 = []
    s2 = []
    labels = []
    for i in range(batch_size):
        _s1, _s2 = batch[i]
        s1.append(torch.Tensor(_s1))
        s2.append(torch.Tensor(_s2))
        
    s1 = torch.stack(s1)
    s2 = torch.stack(s2)

    return s1, s2


def get_data(path, config):
    test_processor = Processor(path, test=True)

    s1, s2,_ = test_processor.process()

    test_data_set = TextMatchDataset(x1=s1, x2=s2, label=[], num_steps=config["num_steps"],test=True)

    length=test_data_set.__len__()
    test_data_loader = DataLoader(test_data_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  test=True,
                                  collate_fn=collate_fun
                                  )

    return test_data_loader,length



def test(model, data, device,length):
    lr_predictions = np.zeros((length, 2))
    scores = []
    with torch.no_grad():
        model.eval()
        for s1, s2 in data.__next__():
            s1 = s1.to(device=device, dtype=torch.long)
            s2 = s2.to(device=device, dtype=torch.long)
            outs = model(s1, s2)
            prob=torch.nn.functional.softmax(outs, dim=-1).detach().cpu().numpy()
            scores.extend(prob)
    lr_predictions+=scores        
    pd.DataFrame(lr_predictions[:,1]).to_csv("prediction_result/result.csv", index=False, header=False)



def load_model(path,config,device):
    model = Model(**config)
    cpt = torch.load(path, map_location="cpu")
    model.load_state_dict(cpt["net"])
    model.to(device)
    return model
    
if __name__ == '__main__':

    with open(args.config_path, "r") as file:
        config = json.load(file)
    data_loader,length = get_data(args.test_path, config)
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    model=load_model(args.checkpoint, config, device)
    
    if torch.cuda.device_count() > 1:
        model = DataParallel(
            model,
            device_ids=[int(device_id) for device_id in args.device_ids.split(",")]
        )
    test(model, data_loader, device,length)

