# from Luyao.OIRT.dataGen import dataGen
# from Luyao.OIRT.dataSample import dataSample
# from Luyao.OIRT.logistic import logistic
# from Luyao.OIRT.EP import EP
# from Luyao.OIRT.VI import VI

# from VII import VI
# from dataSample import dataSample



import numpy as np
import pyspark
import pandas as pd 
import torch
import numpy as np
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import pandas as pd
from torch.autograd import Variable
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import os


#spark = SparkSession.builder.getOrCreate()

def OIRT(datafunc, solver, train_split, **kwargs):
    genParams = kwargs['dataParameters']
    verbose = kwargs.setdefault("verbose", False)
    # needCorr = (datafunc == dataGen)
    needCorr = False

    # if solver == "Logistic":
    #     model = logistic(datafunc, genParams, train_split, verbose)
    #     print("************************ Logistic Training ************************")
    #     model.train()
    #     print("************************ Logistic  Testing ************************")
    #     model.test(needCorr=needCorr)

    # if solver == "EP":
    #     epParams = kwargs.setdefault('params', [0.01, 20])  # threshold, max iteration
    #     model = EP(datafunc, genParams, epParams, train_split, verbose)
    #     print("************************ EP Training ************************")
    #     model.train()
    #     print("************************ EP  Testing ************************")
    #     model.test(needCorr=needCorr)

    # if solver == "MCMC":
    #     mcmcParams = kwargs.setdefault('params', [800, 200, 4, 'cuda:0'])  # samples, warmups, cores, device
    #     model = MCMC(datafunc, genParams, mcmcParams, train_split, verbose)
    #     print("************************ MCMC Training ************************")
    #     model.train()
    #     print("************************ MCMC  Testing ************************")
    #     model.test(needCorr=needCorr)

    if solver == "VI":
        viParams = kwargs.setdefault('params', [0.0005, 0.002, 0.5, 7, 256, 'cuda:0', acc_p, acc_t])  # lr, th, decay, enhance, batch
        model = VI(datafunc, genParams, viParams, train_split, verbose, needCorr)
        print("************************ VI Training new ************************")
        model.train()
        print("************************ VI  Testing ************************")
        # model.test(needCorr=needCorr)

        return model



class ConcatDataset(torch.utils.data.Dataset):
    def __init__(self, *datasets):
        self.datasets = datasets

    def __getitem__(self, i):
        return tuple(d[i] for d in self.datasets)

    def __len__(self):
        return min(len(d) for d in self.datasets)


class VI_OIRT(nn.Module):
    def __init__(self, nexam, nitem, ntopic):
        super().__init__()

        self.nexam = nexam
        self.nitem = nitem
        self.ntopic = ntopic
        self.anneal = torch.tensor(1.0, requires_grad=False)
        self.anneal_effect = torch.tensor(0.95, requires_grad=False)
        self.enhance = torch.tensor(1.0, requires_grad=False)

        self.learning_success_mu = nn.Embedding.from_pretrained(torch.zeros([self.nexam, self.ntopic]))
        self.learning_success_mu.requires_grad_(True)
        self.learning_success_logvar = nn.Embedding.from_pretrained(torch.zeros([self.nexam, self.ntopic]) -2)
        self.learning_success_logvar.requires_grad_(True)
        # self.learning_success_logvar = nn.Embedding(self.nexam, self.ntopic)

        self.learning_failure_mu = nn.Embedding.from_pretrained(torch.zeros([self.nexam, self.ntopic]))
        self.learning_failure_mu.requires_grad_(True)
        self.learning_failure_logvar = nn.Embedding.from_pretrained(torch.zeros([self.nexam, self.ntopic]) -2)
        self.learning_failure_logvar.requires_grad_(True)
        # self.learning_failure_logvar = nn.Embedding(self.nexam, self.ntopic)

        self.ability_mu = nn.Embedding(self.nexam, 1)
        self.ability_logvar = nn.Embedding(self.nexam, 1)

        self.item_mu = nn.Embedding(self.nitem, 1)
        self.item_logvar = nn.Embedding(self.nitem, 1)

        self.time_mu = nn.Embedding.from_pretrained(torch.zeros([1, 1]))
        self.time_mu.requires_grad_(True)
        self.time_logvar = nn.Embedding.from_pretrained(torch.zeros([1, 1]))
        self.time_logvar.requires_grad_(True)
        
        self.apply(self.weights_init)

    def forward(self, ns, nf, persons, items, topic_token, times, t, train=1):
        l_s, l_s_m, l_s_logvar, l_f, l_f_m, l_f_logvar, \
             abi, abi_m, abi_logvar, itm, itm_m, itm_logvar, time, time_m, time_logvar = self.encode(persons, items, t, train)
        response = self.decode(abi, itm, ns, nf, l_s, l_f, topic_token, times, time)

        return l_s, l_s_m, l_s_logvar, l_f, l_f_m, l_f_logvar, time, time_m, time_logvar, \
               abi, abi_m, abi_logvar, itm, itm_m, itm_logvar, response

    def encode(self, persons, items, t, train=1):
        if train == 1:
            l_s_m = self.learning_success_mu(persons)
            l_s_logvar = self.learning_success_logvar(persons)
            l_s = self.reparameterize_gaussian(l_s_m, l_s_logvar)

            l_f_m = self.learning_failure_mu(persons)
            l_f_logvar = self.learning_failure_logvar(persons)
            l_f = self.reparameterize_gaussian(l_f_m, l_f_logvar)

            abi_m = self.ability_mu(persons)
            abi_logvar = self.ability_logvar(persons)
            abi = self.reparameterize_gaussian(abi_m, abi_logvar)

            itm_m = self.item_mu(items)
            itm_logvar = self.item_logvar(items)
            itm = self.reparameterize_gaussian(itm_m, itm_logvar)

            time_m = self.time_mu(t)
            time_logvar = self.time_logvar(t)
            time = self.reparameterize_gaussian(time_m, time_logvar)

        else:
            l_s_m = self.learning_success_mu(persons)
            l_s_logvar = self.learning_success_logvar(persons)
            l_s = l_s_m

            l_f_m = self.learning_failure_mu(persons)
            l_f_logvar = self.learning_failure_logvar(persons)
            l_f = l_f_m

            abi_m = self.ability_mu(persons)
            abi_logvar = self.ability_logvar(persons)
            abi = abi_m

            itm_m = self.item_mu(items)
            itm_logvar = self.item_logvar(items)
            itm = itm_m

            time_m = self.time_mu(t)
            time_logvar = self.time_logvar(t)
            time = time_m

        return l_s, l_s_m, l_s_logvar, \
               l_f, l_f_m, l_f_logvar, \
               abi, abi_m, abi_logvar, \
               itm, itm_m, itm_logvar, \
               time, time_m, time_logvar

    def decode(self, abi, itm, ns, nf, l_s, l_f, topic_token, times, time):
        logit = abi - itm + torch.sum(torch.exp(l_s) * ns * topic_token, dim=1, keepdim=True) \
                + torch.sum(torch.exp(l_f) * nf * topic_token, dim=1, keepdim=True) + time * times
        response = torch.sigmoid(logit)
        return response

    def elbo(self, persons, items, response, y,
                   l_s_m, l_s_logvar, l_f_m, l_f_logvar,
                   abi_m, abi_logvar, itm_m, itm_logvar, time_m, time_logvar,
                   pri_l_s_m, pri_l_s_logvar, pri_l_f_m, pri_l_f_logvar,
                   pri_abi_m, pri_abi_logvar, pri_itm_m, pri_itm_logvar,
                   pri_time_m, pri_time_logvar):
        kl_ability = self.kl_divergence(abi_m, abi_logvar, pri_abi_m[persons, ], pri_abi_logvar[persons, ]).sum()
        kl_item    = self.kl_divergence(itm_m, itm_logvar, pri_itm_m[items, ], pri_itm_logvar[items, ]).sum()
        kl_l_s = self.kl_divergence(l_s_m, l_s_logvar, pri_l_s_m[persons, ], pri_l_s_logvar[persons, ]).sum()
        kl_l_f = self.kl_divergence(l_f_m, l_f_logvar, pri_l_f_m[persons, ], pri_l_f_logvar[persons, ]).sum()
        kl_time = self.kl_divergence(time_m, time_logvar, pri_time_m, pri_time_logvar).sum()

        p_given_l_d_a = self.bernoulli_log_pdf(y, response).sum()
        elbo = p_given_l_d_a - self.anneal * (self.enhance * (kl_ability + kl_item) + kl_l_s + kl_l_f + kl_time)

        return -elbo, -p_given_l_d_a

    @staticmethod
    def weights_init(m):
        if isinstance(m, (nn.Linear, nn.Conv2d)):
            init.xavier_normal_(m.weight.data, gain=init.calculate_gain('relu'))
            init.constant_(m.bias.data, 0)

    @staticmethod
    def reparameterize_gaussian(mean, logvar):
        std = torch.exp(0.5 * logvar)
        eps = torch.randn_like(std)
        return eps.mul(std).add_(mean)

    @staticmethod
    def reparameterize_log_gaussian(log_mean, logvar):
        std = torch.exp(0.5 * logvar)
        eps = torch.randn_like(std)
        mean = torch.exp(log_mean)
        return eps.mul(std).add_(mean)


    @staticmethod
    def kl_divergence(p_mu, p_logvar, q_mu, q_logvar):
        kl_div = 0.5 * (-1 + q_logvar - p_logvar
                        + (p_logvar.exp() + (p_mu - q_mu).pow(2))
                        / q_logvar.exp())
        kl_div = torch.sum(kl_div, dim=1)
        return kl_div

    @staticmethod
    def log_kl_divergence(p_mu, p_logvar, q_mu, q_logvar):
        kl_div = 0.5 * (-1 + q_logvar - p_logvar
                        + (p_logvar.exp() + (p_mu.exp() - q_mu.exp()).pow(2))
                        / q_logvar.exp())
        kl_div = torch.sum(kl_div, dim=1)
        return kl_div

    @staticmethod
    def bernoulli_log_pdf(y, probs):
        dist = torch.distributions.bernoulli.Bernoulli(probs=probs)
        log_prob = dist.log_prob(y)
        return log_prob

    def update(self):
        self.anneal = self.anneal * self.anneal_effect

    def reset(self, effect, enhance):
        self.anneal = torch.tensor(1.0, requires_grad=False)
        self.anneal_effect = torch.tensor(effect, requires_grad=False)
        self.enhance = torch.tensor(enhance, requires_grad=False)


class VI:
    def __init__(self, datafunc, genParams, viParams, train_split, verbose, needCorr):
        self.dataHandler = datafunc(*genParams)
        self.train_split = train_split
        self.num_max = genParams[-1]
        self.verbose = verbose
        self.w = np.nan

        self.device = torch.device(viParams[5])
        self.learning_rate = viParams[0]
        self.corr_thresh = viParams[1]
        self.decay_rate = torch.tensor(viParams[2]).to(self.device)
        self.enhance = viParams[3]
        self.batch = viParams[4]
        self.ability_mu_pri =  torch.tensor(viParams[6], requires_grad=False).to(self.device)
        self.item_mu_pri =  torch.tensor(viParams[7], requires_grad=False).to(self.device)

        self.needCorr = needCorr

        # print("===========> new new new <===========")
        self.tmpdataA = np.zeros(shape=(60,6800))
        self.tmpdataU = np.zeros(shape=(60,6800))
        self.tmpdataL = np.zeros(shape=(60,6800))

        self.realA = np.zeros(shape=(60,6800))
        self.y_matA = pd.read_csv("y_mat.csv").to_numpy()[:,1:].astype(np.float32)
        self.qt_matA = pd.read_csv("q_t_mat.csv").to_numpy()[:,1:].astype(np.float32)
        self.sumQuestions1 = 0
        self.sumQuestions2 = 0
        self.correctNum1 = 0
        self.errorNum1 = 0
        self.correctNum2 = 0
        self.errorNum2 = 0
        # print("===========> y_matA <===========")


    def train(self):
        print("===========> VI Initialize Begins <===========")
        # initialize parameters
        nexam, nitem, ntopic, total = self.dataHandler.getParams()
        # load model
        # model = VI_OIRT(nexam, nitem, ntopic)
        # if torch.cuda.device_count()>1:
        #     model = nn.DataParallel(model)
        # model.to(self.device)
        model = VI_OIRT(nexam, nitem, ntopic).to(self.device)

        # initialize the embedding weight
        with torch.no_grad():
            pri_l_s_m = model.learning_success_mu.weight * 0
            pri_l_f_m = model.learning_failure_mu.weight * 0
            pri_l_s_logvar = model.learning_success_logvar.weight * 0 - 2
            pri_l_f_logvar = model.learning_failure_logvar.weight * 0 - 2

            pri_abi_m = self.ability_mu_pri #model.ability_mu.weight * 0
            pri_itm_m = self.item_mu_pri #model.item_mu.weight * 0
            pri_abi_logvar = model.ability_logvar.weight * 0
            pri_itm_logvar = model.item_logvar.weight * 0

            pri_time_m = model.time_mu.weight * 0
            pri_time_logvar = model.time_logvar.weight * 0
        
        # init the correct value
        if self.needCorr:
            true_abi = self.dataHandler.getAbility().reshape([nexam, ])
            true_itm = self.dataHandler.getDifficulty().reshape([nitem, ])
            true_ls, true_lf = self.dataHandler.getLearningSpeed()
            true_ls = true_ls.reshape([nexam * ntopic, ])
            true_lf = true_lf.reshape([nexam * ntopic, ])

        # begin a group
        print("===========> VI Reading Data & Training Begins <===========")

        T = 60
        ts = 0
        self.AbilityTracked = np.zeros([T, nexam, ntopic])
        self.LowerBoundTracked = np.zeros([T, nexam, ntopic])
        self.UpperBoundTracked = np.zeros([T, nexam, ntopic])
        self.realATracked = np.zeros([T, nexam, ntopic])
        # self.ctt_acc = np.zeros([T, nexam, ntopic])
        self.N_S = np.zeros([T, nexam, ntopic])
        self.N_F = np.zeros([T, nexam, ntopic])
        self.allSelected = np.zeros([T, nexam, nitem])
        self.allSelectedTopic = np.zeros([T, nexam, ntopic])
        self.tmpAns = np.zeros(shape=(nexam,4)) # 0:correct 1:error 2:sum 3:t1

        curr = 0
        while self.dataHandler.idx / total < self.train_split:
            curr += 1
            print("curr")
            print(curr)
            optimizer = optim.Adam(model.parameters(), lr=self.learning_rate)
            # get data
            persons, ns, nf, items, topic_token, y, num_real_succ, times, n_s, n_f = self.dataHandler.generateSeq()
            t = np.array([0]*persons.shape[0])
            concat_dataset = ConcatDataset(persons, items, ns, nf, topic_token, times, y, t)
            loader = torch.utils.data.DataLoader(concat_dataset, batch_size=self.batch, shuffle=False)

            # denote convergence
            old_corr = np.array([0., 0., 0., 0.])
            new_corr = np.array([1., 1., 1., 1.])

            # begin train
            model.train()
            iteration = 0
            while (np.sum(np.abs(old_corr - new_corr)) > self.corr_thresh or iteration < 10) and iteration <= 400:
                old_corr = new_corr.copy()
                iteration += 1
                # for each epoch
                for epoch, (b_persons, b_items, b_ns, b_nf, b_topic_token, b_times, b_y, b_t) in enumerate(loader):
                    # change to tensor
                    b_persons = b_persons.to(self.device)
                    b_items = b_items.to(self.device)
                    b_ns = b_ns.to(self.device)
                    b_nf = b_nf.to(self.device)
                    b_topic_token = b_topic_token.to(self.device)
                    b_times = b_times.to(self.device)
                    b_y = b_y.to(self.device)
                    b_t = b_t.to(self.device)

                    # zero grad
                    optimizer.zero_grad()

                    # train
                    l_s, l_s_m, l_s_logvar, l_f, l_f_m, l_f_logvar, time, time_m, time_logvar, \
                    abi, abi_m, abi_logvar, itm, itm_m, itm_logvar, response = model(b_ns, b_nf, b_persons, b_items,
                                                                                     b_topic_token, b_times, b_t)
                    # loss
                    loss, p_given_l_d_a = model.elbo(b_persons, b_items, response, b_y,
                                                     l_s_m, l_s_logvar, l_f_m, l_f_logvar,
                                                     abi_m, abi_logvar, itm_m, itm_logvar, time_m, time_logvar,
                                                     pri_l_s_m, pri_l_s_logvar, pri_l_f_m, pri_l_f_logvar,
                                                     pri_abi_m, pri_abi_logvar, pri_itm_m, pri_itm_logvar,
                                                     pri_time_m, pri_time_logvar)

                    # bp
                    loss.backward()
                    optimizer.step()

                # update
                model.update()
                # calculate corr and see if convergence
                if self.needCorr:
                    if self.device.type == "cuda":
                        new_corr[0] = np.corrcoef(model.item_mu.weight.cpu().detach().numpy()[:, 0], true_itm)[0, 1]
                        new_corr[1] = np.corrcoef(model.ability_mu.weight.cpu().detach().numpy()[:, 0], true_abi)[0, 1]
                        if self.dataHandler.idx == y.shape[0]:
                            new_corr[2] = 0
                            new_corr[3] = 0
                        else:
                            tmp_c = np.corrcoef(
                                            model.learning_success_mu.weight.cpu().detach().numpy().reshape(true_ls.shape),
                                            true_ls)[0, 1]
                            new_corr[2] = 0 if np.isnan(tmp_c) else tmp_c
                            tmp_c = np.corrcoef(
                                            model.learning_failure_mu.weight.cpu().detach().numpy().reshape(true_lf.shape),
                                            true_lf)[0, 1]
                            new_corr[3] = 0 if np.isnan(tmp_c) else tmp_c
                    else:
                        new_corr[0] = np.corrcoef(model.item_mu.weight.detach().numpy()[:, 0], true_itm)[0, 1]
                        new_corr[1] = np.corrcoef(model.ability_mu.weight.detach().numpy()[:, 0], true_abi)[0, 1]
                        if self.dataHandler.idx == y.shape[0]:
                            new_corr[2] = 0
                            new_corr[3] = 0
                        else:
                            tmp_c = np.corrcoef(
                                            model.learning_success_mu.weight.detach().numpy().reshape(true_ls.shape),
                                            true_ls)[0, 1]
                            new_corr[2] = 0 if np.isnan(tmp_c) else tmp_c
                            tmp_c = np.corrcoef(
                                            model.learning_failure_mu.weight.detach().numpy().reshape(true_lf.shape),
                                            true_lf)[0, 1]
                            new_corr[3] = 0 if np.isnan(tmp_c) else tmp_c
                else:
                    new_corr = old_corr - 1
                # print
                # if self.verbose and self.needCorr:
                #     print("Iteration: {}, the correlation is {:.2f}, {:.2f}, {:.2f}, {:.2f}". format(iteration,
                #                                                                                      new_corr[0],
                #                                                                                      new_corr[1],
                #                                                                                      new_corr[2],
                #                                                                                      new_corr[3]))

            # update
            # initialize the embedding weight
            with torch.no_grad():
                pri_l_s_m = self.decay_rate * pri_l_s_m + (1 - self.decay_rate) * model.learning_success_mu.weight
                pri_l_f_m = self.decay_rate * pri_l_f_m + (1 - self.decay_rate) * model.learning_failure_mu.weight
                pri_l_s_logvar = self.decay_rate * pri_l_s_logvar + (
                                 1 - self.decay_rate) * model.learning_success_logvar.weight
                pri_l_f_logvar = self.decay_rate * pri_l_f_logvar + (
                                 1 - self.decay_rate) * model.learning_failure_logvar.weight

                pri_abi_m = self.decay_rate * pri_abi_m + (1 - self.decay_rate) * model.ability_mu.weight
                pri_itm_m = self.decay_rate * pri_itm_m + (1 - self.decay_rate) * model.item_mu.weight
                pri_abi_logvar = self.decay_rate * pri_abi_logvar + (1 - self.decay_rate) * model.ability_logvar.weight
                pri_itm_logvar = self.decay_rate * pri_itm_logvar + (1 - self.decay_rate) * model.item_logvar.weight

                pri_time_m = self.decay_rate * pri_time_m + (1 - self.decay_rate) * model.time_mu.weight
                pri_time_logvar = self.decay_rate * pri_time_logvar + (1 - self.decay_rate) * model.time_logvar.weight

            # reset
            model.reset(1.0, 1.0 + self.enhance * self.dataHandler.idx / total)

            scaler = MinMaxScaler() 
            ls =  scaler.fit_transform(np.exp(model.learning_success_mu.weight.cpu().detach().numpy()))
            lf =  scaler.fit_transform(np.exp(model.learning_failure_mu.weight.cpu().detach().numpy()))
            # print("===========> ls && lf <===========")

            self.abi = model.ability_mu.weight.cpu().detach().numpy()
            self.lslv = model.learning_success_logvar.weight.cpu().detach().numpy() -2
            self.lflv = model.learning_failure_logvar.weight.cpu().detach().numpy() -2
            abilv = model.ability_logvar.weight.cpu().detach().numpy()
            

            if ts < T:
                self.N_S[ts] = n_s
                self.N_F[ts] = n_f
                # update self.allSelected
                for i in range(len(persons)): 
                    self.allSelected[ts][persons[i]][items[i]] = self.y_matA[persons[i]][items[i]]
                # convert to self.allSelectedTopic 
                topicNum = np.zeros(shape=(nexam, ntopic)) # 属于这个topic的题目总数
                for i in range(len(persons)): # 遍历每个同学以及对应选中的题目
                    for j in range(self.qt_matA.shape[1]):
                        if self.qt_matA[items[i]][j] == 1: # 当前这个题目属于这个topic
                            topicNum[persons[i]][j] += 1
                            if self.allSelected[ts][persons[i]][items[i]] == 1:
                                self.allSelectedTopic[ts][persons[i]][j] += 1
                            break
                for i in range(len(persons)): 
                    for j in range(self.qt_matA.shape[1]):
                        if self.allSelectedTopic[ts][persons[i]][j] != 0:
                            self.allSelectedTopic[ts][persons[i]][j] /= topicNum[persons[i]][j]
                        elif topicNum[persons[i]][j] != 0: # 题目全部做错的话topic为-1
                            self.allSelectedTopic[ts][persons[i]][j] = -1

                for p in persons:
                    self.AbilityTracked[ts:, p, :] = ls[p,:]*n_s[p,:]/nitem + lf[p,:]*n_f[p,:]/nitem  #+ abi[p,:]
                    self.UpperBoundTracked[ts:, p, :] = ls[p,:]*n_s[p,:]/nitem +1.645*(np.exp(self.lslv[p,:])**0.5) + lf[p,:]*n_f[p,:]/nitem +1.645*(np.exp(self.lflv[p,:])**0.5) #+ abi[p,:]+1.645*(np.exp(abilv[p,:])**0.5)
                    self.LowerBoundTracked[ts:, p, :] = ls[p,:]*n_s[p,:]/nitem -1.645*(np.exp(self.lslv[p,:])**0.5) + lf[p,:]*n_f[p,:]/nitem -1.645*(np.exp(self.lflv[p,:])**0.5) #+ abi[p,:]-1.645*(np.exp(abilv[p,:])**0.5)
                    # print("===========> All result 222 <===========")
                    pAbility    = sum(self.AbilityTracked[ts][p])    / len(self.AbilityTracked[ts][p])
                    pUpperBound = sum(self.UpperBoundTracked[ts][p]) / len(self.UpperBoundTracked[ts][p])
                    pLowerBound = sum(self.LowerBoundTracked[ts][p]) / len(self.LowerBoundTracked[ts][p])
                    self.tmpdataA[ts][p] = pAbility
                    self.tmpdataU[ts][p] = pUpperBound
                    self.tmpdataL[ts][p] = pLowerBound


                    # print("===========> Real data <===========")
                    self.correctNum1 = self.tmpAns[p][0]
                    self.errorNum1 = self.tmpAns[p][1]
                    self.sumQuestions1 = self.tmpAns[p][2]
                    noZero = 0
                    # 更新tmpAns
                    for q in range(ntopic):
                        if self.allSelectedTopic[ts][p][q] != 0:
                            noZero += 1
                        if self.allSelectedTopic[ts][p][q] == 1:
                            self.tmpAns[p][0] += 1
                        elif self.allSelectedTopic[ts][p][q] == -1:
                            self.tmpAns[p][1] += 1
                    self.tmpAns[p][2] += noZero # 应该是加上allSelected中当前时刻的所有非0值

                    self.correctNum2 = self.tmpAns[p][0]
                    self.errorNum2 = self.tmpAns[p][1]
                    self.sumQuestions2 = self.tmpAns[p][2]
                    if self.sumQuestions1 == 0:
                        self.realA[ts][p] = 0
                    elif ts > 1:
                        print(ts)
                        print(self.tmpAns[p][3])
                        self.realATracked[ts, p, :] = (self.correctNum2 / self.sumQuestions2 - self.correctNum1 / self.sumQuestions1) / (ts - self.tmpAns[p][3]) * n_s[p,:] / nitem \
                                        - (self.errorNum2 / self.sumQuestions2 - self.errorNum1 / self.sumQuestions1) / (ts - self.tmpAns[p][3]) * n_f[p,:] / nitem
                        self.realA[ts][p] = sum(self.realATracked[ts][p]) / len(self.realATracked[ts][p])
                    print(self.realA[ts][p])
                for p in persons:
                    self.tmpAns[p][3] = ts

                ts += 1
            
            dataA = pd.DataFrame(self.tmpdataA)
            dataU = pd.DataFrame(self.tmpdataU)
            dataL = pd.DataFrame(self.tmpdataL)
            dataA.to_csv('tmpdataA.csv')
            dataU.to_csv('tmpdataU.csv')
            dataL.to_csv('tmpdataL.csv')

            realA = pd.DataFrame(self.realA)
            realA.to_csv('realA.csv')

            np.savez('allSelected.npz',self.allSelected)
            np.savez('allSelectedTopic.npz',self.allSelectedTopic)
            np.savez('N_S.npz',self.N_S)
            np.savez('N_F.npz',self.N_F)

            # print progress
            if self.verbose:
                print("Now processing: " + str(min([self.dataHandler.idx / total / self.train_split, 1]) * 100) + "%")
        
        # end
        print("===========> VI Solving Ends <===========")

        self.w = torch.hstack([model.learning_success_mu.weight.reshape(nexam*ntopic),
                               model.learning_failure_mu.weight.reshape(nexam*ntopic),
                               model.ability_mu.weight[:, 0],
                               model.item_mu.weight[:, 0],
                               model.time_mu.weight[:,0]])
        if self.device.type == "cuda":
            self.w = self.w.cpu().detach().numpy()
        else:
            self.w = self.w.detach().numpy()


    def test(self, needCorr=False):
        # generate the remaining data
        print("===========> VI Remaining Data Generating <===========")
        nexam, nitem, ntopic, total = self.dataHandler.getParams()
        persons, ns, nf, items, topic_token, tmp_y, num_s, times = self.dataHandler.generateMask()

        if len(persons) == 0:
            print("!!!!! no data left !!!!!")
        else:
            # initialize idx
            idx = 0
            # initialize a holder for y
            r = np.zeros_like(tmp_y)

            while idx < persons.shape[0]:
                # init
                x1 = np.zeros([self.batch, nexam * ntopic])
                x2 = np.zeros([self.batch, nexam * ntopic])
                x3 = np.zeros([self.batch, nexam])
                x4 = np.zeros([self.batch, nitem])
                x5 = np.zeros([self.batch, 1])

                upper_bound = min(persons.shape[0], idx + self.batch)
                for i in range(idx, upper_bound):
                    x1[i - idx, (persons[i] * ntopic):(persons[i] * ntopic + ntopic)] = ns[i, ] * topic_token[i, ]
                    x2[i - idx, (persons[i] * ntopic):(persons[i] * ntopic + ntopic)] = nf[i, ] * topic_token[i, ]
                    x3[i - idx, persons[i]] = 1
                    x4[i - idx, items[i]] = -1
                    x5[i - idx, 0] = times[i]

                # get x and r
                tmp_x = np.hstack((x1, x2, x3, x4, x5))
                tmp_r = tmp_x.dot(self.w.reshape(len(self.w), 1)) #> 0
                tmp_r = tmp_r.astype(np.float32)

                # update r
                r[idx:upper_bound, 0] = tmp_r[0: (upper_bound - idx), 0]

                # update idx
                idx = upper_bound

            # correlation calculations
            if needCorr:
                print("===========> VI Calculating Correlation Coef <===========")
                # true value
                a = self.dataHandler.getAbility()
                d = self.dataHandler.getDifficulty()
                l_s, l_f = self.dataHandler.getLearningSpeed()

                # get corr
                l_s_c = np.corrcoef(l_s.reshape(nexam * ntopic, ), self.w[0:nexam * ntopic])[0, 1]
                l_f_c = np.corrcoef(l_f.reshape(nexam * ntopic, ), self.w[nexam * ntopic:nexam * ntopic * 2])[0, 1]
                abi_c = np.corrcoef(a[:, 0], self.w[nexam * ntopic * 2:(nexam * ntopic * 2 + nexam)])[0, 1]
                dif_c = np.corrcoef(d[:, 0], self.w[(nexam * ntopic * 2 + nexam):len(self.w)])[0, 1]

                # print
                print("ability                  corr: " + str(abi_c))
                print("difficulty               corr: " + str(dif_c))
                print("learning rate of success corr: " + str(l_s_c))
                print("learning rate of failure corr: " + str(l_f_c))

            # print accuracy
            print("===========> VI Prediction Accuracy<===========")

            true_acc = num_s / len(persons) * 100
            pred_auc = metrics.roc_auc_score(tmp_y[:, 0], r[:, 0])
            r_ = r[:, 0] > 0
            pred_acc = np.sum([a == b for a, b in zip(r_.astype(np.float32), tmp_y[:, 0])]) / len(persons) * 100
            
            print("Correct rate: {:.2f}% and the Predicted rate: {:.2f}%, {:.5f}".format(true_acc, pred_acc, pred_auc))



class dataSample:
    def __init__(self, priority, y_mat, q_t_mat, u_qt_mat, nmax=1000):
        # set input values
        self.nexam = y_mat.shape[0] # people
        self.nitem = y_mat.shape[1] # question
        self.ntopic = q_t_mat.shape[1] # topic
        # self.u_q_mat = u_q_mat
        self.q_t_mat = q_t_mat
        self.y_mat = y_mat

        # generate sample sequence
        xy_pos = np.where(y_mat != 0)
        x_pos = xy_pos[0]
        y_pos = xy_pos[1]

        # get priority
        sample = np.argsort(priority[x_pos, y_pos])
        self.persons = x_pos[sample]
        self.items = y_pos[sample]
        self.times = u_qt_mat[y_mat != 0][sample]

        # max number of (person, item) pairs in one sequence
        self.nmax = nmax
        self.idx = 0

        # initialize N
        # 第二维是topic。
        self.n_s = np.zeros([self.nexam, self.ntopic])
        self.n_f = np.zeros([self.nexam, self.ntopic])

        # total
        self.total = x_pos.shape[0]

    def getParams(self):
        return self.nexam, self.nitem, self.ntopic, self.total

    def generateSeq(self, type='seq'):
        # choose a random number
        if type != 'seq':
            num = self.total
        else:
            num = np.random.choice(np.arange(self.idx + 1, self.idx + self.nmax + 1), 1)
            num = min(num[0], self.total)

        # if no much left
        if self.idx == self.total:
            # nothing left
            return [], [], [], [], [], [], []
        else:
            # choose and output
            persons = self.persons[self.idx:num]
            items = self.items[self.idx:num]
            times = self.times[self.idx:num]

            n_s = self.n_s * 1.0
            n_f = self.n_f * 1.0   

            # get ns, nf
            ns = self.n_s[persons, ]
            nf = self.n_f[persons, ]

            # get y
            y = self.y_mat[persons, items].reshape(persons.shape[0], 1) / 2 + 0.5

            # get topic_token
            topic_token = self.q_t_mat[items, ]

            # update ns, nf
            for i in range(persons.shape[0]):
                self.n_s[persons[i], ] = self.n_s[persons[i], ] + topic_token[i, ] * y[i]
                self.n_f[persons[i], ] = self.n_f[persons[i], ] + topic_token[i, ] * (1 - y[i])

            # update
            self.idx = num

            # topic percentage
            topic_token = topic_token / np.sum(topic_token, axis=1, keepdims=True)
            #topic_token[np.isnan(topic_token)] = 0
            return persons, ns/10, nf/10, items, topic_token, y, 0, times, n_s, n_f

    def generateMask(self):
        return self.generateSeq(type='mask')





##### Riiid
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
q_t_mat = pd.read_csv("q_t_mat.csv")
q_t_mat = q_t_mat.to_numpy()[:,1:]
q_t_mat = q_t_mat.astype(np.float32).astype(np.int32)

y_mat = pd.read_csv("y_mat.csv")
y_mat = y_mat.to_numpy()[:,1:]
y_mat = y_mat.astype(np.float32)

priority = pd.read_csv("priority.csv")
priority = priority.to_numpy()[:,1:]
priority = priority.astype(np.float32).astype(np.int32)

u_qt_mat = pd.read_csv("u_qt_mat.csv")
u_qt_mat = u_qt_mat.to_numpy()[:,1:]
u_qt_mat = u_qt_mat.astype(np.float32).astype(np.int32) /1000

xy_pos = np.where(y_mat != 0)
x_pos = xy_pos[0]
y_pos = xy_pos[1]

sample = np.argsort(priority[x_pos, y_pos])
persons = x_pos[sample]
items = y_pos[sample]
y = y_mat[persons, items].reshape(persons.shape[0], 1) / 2 + 0.5

acc = []
for p in np.unique(persons):
    acc.append(np.mean(y[persons==p]))
acc_p = np.expand_dims(np.array(acc), 1)

acc = []
for i in np.unique(items):
    acc.append(np.mean(y[items==i]))
acc_t = np.expand_dims(np.array(acc), 1)


m = OIRT(dataSample, 'VI', 0.8, verbose=True, dataParameters=[priority, y_mat, q_t_mat, u_qt_mat, 10000], params=[0.0005, 0.0002, 0.5, 7, 256, 'cuda:0', acc_p, acc_t])
