import datetime
import math
import time
import gensim
import pandas as pd
from math import sqrt
import os
from sklearn.metrics import mean_absolute_error #平方绝对误差
import torch
from data.ASAG.semaclear import clean_str
import torch.nn as nn
import numpy as np
import random
from models.GCN import GCN
from Matchnetwork import MatchNetwork
from tqdm import tqdm, trange
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,SubsetRandomSampler,
                              TensorDataset)
from transformers import BertTokenizer, BertModel, RobertaModel, RobertaTokenizer
import stanfordnlp
nlp = stanfordnlp.Pipeline()

model = gensim.models.Word2Vec.load('data/ASAG/wordvec_model')

def multiply(a,b):
    #a,b两个列表的数据一一对应相乘之后求和
    sum_ab=0.0
    for i in range(len(a)):
        temp=a[i]*b[i]
        sum_ab+=temp
    return sum_ab
def cal_pearson(x,y):
    n=len(x)
    #求x_list、y_list元素之和
    sum_x=sum(x)
    sum_y=sum(y)
    #求x_list、y_list元素乘积之和
    sum_xy=multiply(x,y)
    #求x_list、y_list的平方和
    sum_x2 = sum([pow(i,2) for i in x])
    sum_y2 = sum([pow(j,2) for j in y])
    molecular=sum_xy-(float(sum_x)*float(sum_y)/n)
    #计算Pearson相关系数，molecular为分子，denominator为分母
    denominator=sqrt((sum_x2-float(sum_x**2)/n)*(sum_y2-float(sum_y**2)/n))
    return molecular/denominator



def collate(samples):
    # The input `samples` is a list of pairs
    #  (graph, label).
    adj, sent1, sent2, labels, adj_entity  = map(list, zip(*samples))
    # batched_graph = dgl.batch(graphs)
    return adj, sent1, sent2, torch.tensor(labels), adj_entity


class GDataset(object):
    def __init__(self, Gset, Gsent, Glabels, Gentity):
        super(GDataset, self).__init__()
        self.Gset = Gset
        # self.Gcls = Gcls
        self.Gsent = Gsent
        self.Glabels = Glabels
        self.Gentity = Gentity

    def __getitem__(self, idx):
        # return self.Gset[idx], self.Gcls[idx], self.Glabels[idx]
        return self.Gset[idx], self.Gsent[idx * 2], self.Gsent[idx * 2 + 1], self.Glabels[idx], self.Gentity[idx]

    def __len__(self):
        # You should change 0 to the total size of your dataset.
        return len(self.Gset)



def dependency_results_process(srcs, dsts, sentences, result):
    sent = []
    src = []
    dst = []
    for res in result:
        sent.append(res[3])
        src.append(int(res[2]))
        try:
            dst.append(int(res[4]))
        except IndexError:
            print(result)
            exit()
    sentences.append(sent)
    srcs.append(src)
    dsts.append(dst)

def load_depen(data_dir, dependency_results):
    with open(data_dir) as fr:
        dependency_result = []
        for line in fr.readlines():
            if line == '\n':
                if dependency_result == []:
                    dependency_result.append(['root', '_ROOT', '0', 'None', '1'])
                    # dependency_results.append([['root', '_ROOT', '0', '', '1']])
                    # dependency_result = []
                else:
                    dependency_results.append(dependency_result)
                    dependency_result = []
            else:
                line = line.strip('\n').split('\t')
                dependency_result.append(line)



def fetch_data_and_graph(hyps, force_re_parsing_depen=False):
    # dependency_file_list, label_file_list, edge_types, finetune_depen, use_glob_node,
    df = pd.read_csv('data/ASAG/mohler_dataset_edited.csv')
    # columns = ['Unnamed: 0', 'id', 'question', 'desired_answer', 'student_answer',
    # 'score_me', 'score_other', 'score_avg']

    # Get the student answers from dataset
    student_answers = df['student_answer'].to_list()
    desired_answers = df['desired_answer'].to_list()

    entity_sentence = []
    for answer in range(len(student_answers)):
        entity_sentence.append(desired_answers[answer])
        entity_sentence.append(student_answers[answer])

    noun = []
    for item in range(len(entity_sentence)):
        sent_ = nlp(clean_str(entity_sentence[item]))
        noun_word = []
        for ind in range(len(sent_._conll_file.sents[0])):
            # if sent_._conll_file.sents[0][index][4] == 'NN' or sent_._conll_file.sents[0][index][4] == 'NNS':
            if sent_._conll_file.sents[0][ind][4] == 'NN':
                noun_word.append(sent_._conll_file.sents[0][ind][1])
        noun.append(noun_word)

    edge_types = 3
    entity_type = 3
    sentences = []
    labels = []
    adj = []
    adj_entity = []

    dependency_file_list = hyps["input_data_list"]
    label_file_list = hyps["input_label_list"]
    assert len(dependency_file_list) == len(label_file_list)

    print("Analysing dependency file...")

    len_list = []

    for idx in tqdm(range(len(dependency_file_list)), desc="Dependency files"):
        dependency_results = []
        srcs = []
        dsts = []

        load_depen(dependency_file_list[idx], dependency_results)

        labels_curr = np.loadtxt(label_file_list[idx], delimiter=',')
        labels = np.concatenate((labels, labels_curr))

        len_list.append(int(len(dependency_results) / 2))

        for i in range(int(len(dependency_results) / 2)):
            dependency_results_process(srcs, dsts, sentences, dependency_results[i * 2])
            dependency_results_process(srcs, dsts, sentences, dependency_results[i * 2 + 1])


        for i in trange(int(len(dsts) / 2), desc="Iteration"):  # trange(i) equlvalent to tqdm(range(i))
            # g = dgl.DGLGraph()
            len1 = dsts[i * 2][-1]  # length of sentence 1
            len2 = dsts[i * 2 + 1][-1]  # length of sentence 2


            adj_curr1 = torch.zeros(edge_types, len1 + 1, len1 + 1)
            adj_curr2 = torch.zeros(edge_types, len2 + 1, len2 + 1)


            for idx in range(len1 + 1):
                adj_curr1[0][idx][idx] = 1
            for idx in range(len2 + 1):
                adj_curr2[0][idx][idx] = 1


            for idx in range(len1):
                adj_curr1[1][srcs[i * 2][idx]][dsts[i * 2][idx]] = 1
            for idx in range(len2):
                adj_curr2[1][srcs[i * 2 + 1][idx]][dsts[i * 2 + 1][idx]] = 1


            for idx in range(len1):
                adj_curr1[2][dsts[i * 2][idx]][srcs[i * 2][idx]] = 1
            for idx in range(len2):
                adj_curr2[2][dsts[i * 2 + 1][idx]][srcs[i * 2 + 1][idx]] = 1




            adj_entity1 = torch.zeros(entity_type, len1 + 1, len1 + 1)
            adj_entity2 = torch.zeros(entity_type, len2 + 1, len2 + 1)

            for idx in range(len1 + 1):
                adj_entity1[0][idx][idx] = 1
            for idx in range(len2 + 1):
                adj_entity2[0][idx][idx] = 1




            for a in range(len(noun[i * 2])):
                for b in range(a + 1, len(noun[i * 2])):
                    adj_entity1[1][a][b] = torch.tensor(model.similarity(noun[i * 2][a], noun[i * 2][b]))
                    adj_entity1[2][b][a] = torch.tensor(model.similarity(noun[i * 2][a], noun[i * 2][b]))


            for a in range(len(noun[i * 2 + 1])):
                for b in range(a + 1, len(noun[i * 2 + 1])):
                    adj_entity2[1][a][b] = torch.tensor(model.similarity(noun[i * 2 + 1][a], noun[i * 2 + 1][b]))
                    adj_entity2[2][b][a] = torch.tensor(model.similarity(noun[i * 2 + 1][a], noun[i * 2 + 1][b]))


            adj_entity.append([adj_entity1,adj_entity2])


            adj.append([adj_curr1, adj_curr2])

    assert len(adj) == len(labels)

    return sentences, adj, labels, len_list,adj_entity




def main():
    hyps = {}
    hyps["device"] = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")

    hyps["encoder_type"] = "bert"
    hyps["use_gnn"] = True

    hyps["attn_size"] = 512
    hyps["epochs"] = 50
    hyps["lr"] = 1e-4  # 5e-3, 1e-5, 2e-5, 1e-4, 2e-4, 5e-4, 1e-3
    hyps["batch_size"] = 32
    hyps["max_grad_norm"] = 1.0
    hyps["weight_decay"] = 0  # 1e-3
    hyps["adam_epsilon"] = 1e-8
    hyps["train_len"] = 2273
    hyps["warmup_steps"] = int((360000 / hyps["batch_size"]) / 10)
    hyps["dropout_inter"] = 0
    hyps["input_data_list"] = ['data/ASAG/train.tsv']
    hyps["input_label_list"] = ['data/ASAG/train_labels.txt']
    hyps["rec_result"] = True  # whether to record training and evaluation results
    hyps["mutual_link"] = "no_link"  # "co_attn", "same_word", "no_link"

    hyps["eval_on_hans"] = False  # whether to evaluate the model on HANS dataset
    hyps["eval_model_folder"] = "results/bert_co_attn/seed69/lr1e-04_202110271729"  # where to load the evaluated model

    seed = 69

    hyps["encoder_type"] = hyps["encoder_type"].strip()

    hyps["encoder_type"] = "bert"
    hyps["embed_size"] = 768
    hyps["encoder_output_size"] = 768


    hyps["gnn"] = {}
    hyps["gnn"]["edge_types"] = 3
    hyps["gnn"]["gcn_layers"] = 3  # 3, 5
    hyps["gnn"]["in_features"] = hyps["encoder_output_size"]
    hyps["gnn"]["out_features"] = hyps["encoder_output_size"]
    hyps["gnn"]["gcn_dp"] = 0
    hyps["gnn"]["gcn_use_bn"] = True
    hyps["gnn"]["use_highway"] = False


    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    sentences, adj, labels, len_list,adj_entity = fetch_data_and_graph(hyps)



    trainset = GDataset(adj[:hyps["train_len"]], sentences[:hyps["train_len"] * 2], labels[:hyps["train_len"]], adj_entity[:hyps["train_len"]])
    test_split = 0.3
    dataset_size = len(trainset)
    indices = list(range(dataset_size))
    split = int(np.floor(test_split * dataset_size))
    shuffle_dataset = True
    if shuffle_dataset:
        np.random.seed(seed)
        np.random.shuffle(indices)
    train_indices, test_indices = indices[split:], indices[:split]
    train_sampler = SubsetRandomSampler(train_indices)
    test_sampler = SubsetRandomSampler(test_indices)

    traindata_loader = DataLoader(trainset, batch_size=hyps["batch_size"], sampler=train_sampler, shuffle=False, collate_fn=collate)
    testdata_loader = DataLoader(trainset, batch_size=hyps["batch_size"], sampler=test_sampler, shuffle=False,collate_fn=collate)
    t_total = int(360000 / hyps["batch_size"]) * hyps["epochs"]

    network = MatchNetwork(hyps, t_total)

    assert (hyps["train_len"] == len_list[0])
    criterion = nn.MSELoss()



    print("Training Starts")
    total_step = len(traindata_loader)
    for epoch in tqdm(range(hyps["epochs"]), desc="Training Epochs"):
        loss_history = []
        PCCs_history = []
        MAE_history = []
        RMSE_history = []
        network.train(True)
        for iter, data_batch in enumerate(traindata_loader):
             network.optimizer_other.zero_grad()
             similarity,score = network(data_batch)
             similarity = (similarity+1)/2
             score = score/5
             loss = criterion(similarity, score)
             loss.requires_grad_()
             PCCs = cal_pearson(similarity.detach().numpy(), score.detach().numpy())
             MAE = mean_absolute_error(similarity.detach().numpy(), score.detach().numpy())
             RMSE = torch.sqrt(criterion(similarity, score))

             loss.backward()
             network.optimizer_other.step()
             # torch.nn.utils.clip_grad_norm_(network.parameters(), hyps["max_grad_norm"])
             # if hyps["encoder_type"] == "bert":
             #     network.optimizer_bert.step()
             # network.optimizer_other.step()
             # if hyps["encoder_type"] == "bert":
             #     network.scheduler_bert.step()
             #     network.optimizer_bert.zero_grad()
             # network.optimizer_other.zero_grad()

             if (iter + 1) % 2 == 0:
                loss_history.append(loss.item())
                PCCs_history.append(PCCs.item())
                RMSE_history.append(RMSE.item())
                MAE_history.append(MAE.item())
                print('Epoch [{}/{}], Step [{}/{}], MAE: {:.4f}, RMSE: {:.4f}, PCCs: {:.4f},Loss: {:.4f}'.format(epoch + 1,
                                                                                                              hyps["epochs"],iter + 1,
                                                                                                              total_step,
                                                                                                              np.mean(MAE_history),
                                                                                                              np.mean(RMSE_history),
                                                                                                              np.mean(PCCs_history),
                                                                                                              np.mean(loss_history)))

        print('Training Loss: {:.4f}'.format(np.mean(loss_history)))

        MSE_test = []
        PCCs_test = []
        MAE_test = []
        RMSE_test = []
        for iter, test_data_batch in enumerate(testdata_loader):
            network.eval()
            with torch.no_grad():
                similarity_score, test_score = network(test_data_batch)
                similarity_score = (similarity_score + 1) / 2
                test_score = test_score / 5

                test_loss = criterion(similarity_score, test_score)
                # loss.requires_grad_()
                test_PCCs = cal_pearson(similarity_score.detach().numpy(), test_score.detach().numpy())
                test_MAE = mean_absolute_error(similarity_score.detach().numpy(), test_score.detach().numpy())
                test_RMSE = torch.sqrt(criterion(similarity_score, test_score))
                MSE_test.append(test_loss)
                PCCs_test.append(test_PCCs)
                MAE_test.append(test_MAE)
                RMSE_test.append(test_RMSE)




        print('Test Set Size {},  Test_Loss {}, Test_PCCs {}, Test_MAE {},Test_RMSE {},'.format(len(test_indices),
                                                                                                 np.mean(MSE_test),
                                                                                                 np.mean(PCCs_test),
                                                                                                 np.mean(MAE_test),
                                                                                                 np.mean(RMSE_test)))



if __name__ == "__main__":
    start = datetime.datetime.now()
    print("~~~~~~~~ Start Time: " + str(start))
    main()
    end = datetime.datetime.now()
    print("~~~~~~~~ End Time: " + str(end))

    def changeTime(seconds):
        day = 24 * 60 * 60
        hour = 60 * 60
        min = 60
        if seconds < 60:
            return "%d sec" % math.ceil(seconds)
        elif seconds > day:
            days = divmod(seconds, day)
            return "%d day, %s" % (int(days[0]), changeTime(days[1]))
        elif seconds > hour:
            hours = divmod(seconds, hour)
            return "%d hour, %s" % (int(hours[0]), changeTime(hours[1]))
        else:
            mins = divmod(seconds, min)
            return "%d min, %d sec" % (int(mins[0]), math.ceil(mins[1]))

    print("~~~~~~~~ Finally Time Cost: " + changeTime((end-start).seconds))