# coding: utf-8
# 2021/4/23 @ zengxiaonan

import logging

import numpy as np
import torch
import tqdm
from torch import nn
from torch.autograd import Variable
from sklearn import metrics
import matplotlib.pyplot as plt
import pandas as pd
from EduKTM import KTM
f = open('printlog.txt','a')

device = torch.device("cuda")

class Net(nn.Module):
    def __init__(self, num_questions, hidden_size, num_layers):
        super(Net, self).__init__()
        self.hidden_dim = hidden_size
        self.layer_dim = num_layers
        self.rnn = nn.RNN(num_questions * 2, hidden_size, num_layers, batch_first=True)
        self.gru = nn.GRU(num_questions * 2,hidden_size)
        self.fc = nn.Linear(self.hidden_dim, num_questions)

    def forward(self, x):
        h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim))
        out, _ = self.rnn(x, h0)
        res = torch.sigmoid(self.fc(out))
        return res


def process_raw_pred(raw_question_matrix, raw_pred, num_questions: int) -> tuple:
    questions = torch.nonzero(raw_question_matrix)[1:, 1] % num_questions
    length = questions.shape[0]
    pred = raw_pred[: length]
    pred = pred.gather(1, questions.view(-1, 1)).flatten()
    truth = torch.nonzero(raw_question_matrix)[1:, 1] // num_questions
    return pred, truth


class DKT(KTM):
    def __init__(self, num_questions, hidden_size, num_layers):
        super(DKT, self).__init__()
        self.num_questions = num_questions
        self.dkt_model = Net(num_questions, hidden_size, num_layers)

#lr 0.002->0.01
    def train(self, params,train_data, vali_data=None) -> ...:
        loss_function = nn.BCELoss()
        optimizer = torch.optim.Adam(self.dkt_model.parameters(), lr=params['lr'])

        self.dkt_model.train()


        epochs = []
        valid_aucs = []
        train_aucs = []
        train_accs = []
        valid_accs = []
        max_valid_auc = 0
        epoch = params['max_iter']
        for idx in range(epoch):
            epochs.append(idx)

            y_pred, y_truth = torch.Tensor([]), torch.Tensor([])
            #训练
            for batch in tqdm.tqdm(train_data, "Epoch %s" % idx):
                integrated_pred = self.dkt_model(batch)
                batch_size = batch.shape[0]
                for student in range(batch_size):
                    pred, truth = process_raw_pred(batch[student], integrated_pred[student], self.num_questions)
                    y_pred = torch.cat([y_pred, pred])
                    y_truth = torch.cat([y_truth, truth.float()])
            train_loss = loss_function(y_pred, y_truth)
            # back propagation
            optimizer.zero_grad()
            train_loss.backward()
            optimizer.step()

            #计算指标
            train_auc = 0.0
            try:
                train_auc = metrics.roc_auc_score(y_truth.detach().numpy(), y_pred.detach().numpy())
            except:
                pass
            y_pred[y_pred >= 0.5] = 1.0
            y_pred[y_pred < 0.5] = 0.0
            #保存
            train_acc = metrics.accuracy_score(y_truth.detach().numpy(), y_pred.detach().numpy())
            train_accs.append(train_acc)

            train_aucs.append(train_auc)
            print('Epoch %d/%d, loss : %3.5f, auc : %3.5f, accuracy : %3.5f' %
                  (idx + 1, epoch, train_loss, train_auc, train_acc))
            print('Epoch %d/%d, loss : %3.5f, auc : %3.5f, accuracy : %3.5f' %
                  (idx + 1, epoch, train_loss, train_auc, train_acc), file=f)

            if vali_data is not None:
                valid_auc,valid_acc = self.eval(vali_data)
                if valid_auc > max_valid_auc:
                    print("valid auc improve: %.6f to %.6f" % ( max_valid_auc, valid_auc))
                    print("valid auc improve: %.6f to %.6f" % (max_valid_auc, valid_auc), file=f)
                    max_valid_auc = valid_auc
                    self.save('dkt.params')
                valid_aucs.append(valid_auc)
                valid_accs.append(valid_acc)

        df = pd.DataFrame()
        df['DKT_train_auc'] = train_aucs
        df['DKT_valid_auc'] = valid_aucs
        df.to_csv('auc_count_DKT.csv')

        plt.xlabel('epoch')
        plt.ylabel('auc')
        plt.title('Valid AUC Line(DKT)')
        plt.plot(epochs, train_aucs, linewidth=1, color="red", marker="o", label="Train AUC Line")
        plt.plot(epochs, valid_aucs, linewidth=1, color="blue", marker="+", label="Valid AUC Line")
        plt.legend()
        plt.savefig('Valid AUC Line(DKT).jpg')
        plt.show()

    def eval(self, test_data) -> float:
        self.dkt_model.eval()
        y_pred = torch.Tensor([])
        y_truth = torch.Tensor([])
        for batch in tqdm.tqdm(test_data, "evaluating"):
            integrated_pred = self.dkt_model(batch)
            batch_size = batch.shape[0]
            for student in range(batch_size):
                pred, truth = process_raw_pred(batch[student], integrated_pred[student], self.num_questions)
                y_pred = torch.cat([y_pred, pred])
                y_truth = torch.cat([y_truth, truth])
        auc = 0.0
        try:
            auc = metrics.roc_auc_score(y_truth.detach().numpy(), y_pred.detach().numpy())
        except:
            pass

        y_pred[y_pred >= 0.5] = 1.0
        y_pred[y_pred < 0.5] = 0.0
        acc = metrics.accuracy_score(y_truth.detach().numpy(), y_pred.detach().numpy())
        print('valid auc : %3.5f, valid accuracy : %3.5f' % (auc, acc))
        print('valid auc : %3.5f, valid accuracy : %3.5f' % (auc, acc), file=f)
        return auc,acc

    def save(self, filepath):
        torch.save(self.dkt_model.state_dict(), filepath)
        logging.info("save parameters to %s" % filepath)

    def load(self, filepath):
        self.dkt_model.load_state_dict(torch.load(filepath))
        logging.info("load parameters from %s" % filepath)
