import numpy as np
import pandas as pd
import itertools
from scipy import stats
from scipy import special


class RespAttr(object):

    def __init__(self):
        super(object, self).__init__()

    def all_pattern(self, k, k_l=2):
        """
        得到所有可能的属性模式
        Args:
            k: 属性的个数
            k_l:属性的层级数

        Returns：
            所有可能的属性
        """
        ite = [i for j in range(k) for i in range(k_l)]
        ite = np.array(ite).reshape(-1, k_l)
        all_pattern = list(itertools.product(*ite))
        return np.array(all_pattern)

    def uniform_attr(self, n, k, k_l=2):
        resp_attr = np.random.binomial(k_l - 1, 0.5, n * k).reshape(n, k)
        return resp_attr

    def cor_attr(self, k, n, cor, k_l=2, loc=0, cut_point=None, input_cor=False):
        """
        生成被试的所有属性，先用多维正态生成被试的能力矩阵（n，k），再根据切点将它们转化为离散属性（n，k）
        Args:
            k: 属性的个数
            n: 被试的数量
            cor: 属性之间的相关，只有在input_cor==False的时候触发
            k_l: 多分属性分为几级
            loc: 能力的均值
            cut_point: 将连续能力转化为多分离散属性的切点，shape=(k,k_cnt-1)，默认为None,实现效果为均匀分割。
            input_cor: 是否自己输入协方差矩阵，默认为False

        Returns:被试的多分属性矩阵

        """
        x = np.repeat(loc, k)
        cor_ = input_cor
        if input_cor == False:
            cor_ = np.ones((k, k)) * 0.6 - np.eye(k) * cor + np.eye(k)
        n_attr = stats.multivariate_normal.rvs(x, cor_, n)
        if cut_point == None:
            cut_point = [stats.norm.ppf(i / k_l, loc=loc, scale=cor) for i in range(1, k_l)]

        discret_attr = self.__cotinue_to_attri(n_attr, cut_point)
        return discret_attr

    def __cotinue_to_attri(self, n_attr, cut_point):
        """
        此函数为类内部函数
        根据切点将被试的连续能力转化为多分属性
        Args:
            n_attr: 被试的能力矩阵（连续值）shape=（n，k）
            cut_point: 切点 type=list

        Returns: 切割后的离散多分属性矩阵

        """
        cut_point = np.array(cut_point)
        if len(cut_point.shape) == 1:
            cut_point = np.insert(cut_point, 0, np.squeeze(-np.inf), 0)
            cut_point = np.insert(cut_point, cut_point.shape[0], np.squeeze(np.inf), 0)
        if len(cut_point.shape) == 2:
            cut_point = np.insert(cut_point, 0, np.squeeze(-np.inf), 1)
            cut_point = np.insert(cut_point, cut_point.shape[0] + 1, np.squeeze(np.inf), 1)
        att_list = []  # 用于暂时储存被试属性
        for p in range(len(cut_point) - 1):
            att_list.append(((n_attr > cut_point[p]) & (n_attr < cut_point[p + 1])) * (p))

        discret_attr = np.array(att_list).sum(axis=0)
        return discret_attr

    def ho_attr(self, theta, beta_list, delta, return_possibility=False):
        theta, beta_list = np.array(theta), np.array(beta_list)
        att_list = []
        for i in range(len(theta)):
            att = self.__ho_attri_possi(theta[i], beta_list, delta)
            att_list.append(att)
        att_possi = stats.logistic.cdf(att_list)
        resp_att = pd.DataFrame(att_possi).applymap(
            lambda x: np.random.binomial(1, x, 1)).values.astype(int)
        if return_possibility == True:
            return resp_att, att_possi
        return resp_att

    def __ho_attri_possi(self, theta, beta_list, delta):
        att = np.outer(theta, beta_list)

        att = att.sum(axis=0) - delta

        return att.reshape(len(beta_list), )

    def resp_theta(self, n, loc=0, scale=1, cor=0.5, cnt=1):
        if cnt == 1:
            theta = stats.norm.rvs(loc=loc, scale=scale, size=n)
        if cnt > 1:
            scale = np.tril(np.eye(cnt) + np.eye(cnt) * (-cor)) + cor
            loc = [loc for i in range(cnt)]
            theta = stats.multivariate_normal.rvs(mean=loc, cov=scale, size=n)
        return np.array(theta)

    def __poly_raw_theta_beta_mat(self, theta, beta, delta):
        att = np.outer(theta, beta)

        att = att.sum(axis=0).reshape(len(beta), -1) - delta
        return att

    def ho_global_logit(self, theta, beta, delta, return_possibility=False):
        raw_matrix = []
        for i in range(len(theta)):
            raw_matrix.append(self.__poly_raw_theta_beta_mat(theta[i], beta, delta))
        raw_matrix = np.array(raw_matrix)
        possi_matrix_ = self.__global_raw_mat_to_possi_mat(raw_matrix)

        resp_att = self.__poly_possi_to_alpha(possi_matrix_)
        if return_possibility == True:
            return resp_att, possi_matrix_
        return resp_att

    def __global_raw_mat_to_possi_mat(self, raw_matrix):
        row_possi_mat = np.array(stats.logistic.cdf(raw_matrix))
        possi_matrix_ = np.zeros((raw_matrix.shape[0], raw_matrix.shape[1] + 1, raw_matrix.shape[2]))
        row_possi_mat = np.insert(row_possi_mat, 0, 1, axis=1)
        row_possi_mat = np.insert(row_possi_mat, row_possi_mat.shape[1], 0, axis=1)
        for j in range(row_possi_mat.shape[1] - 1):
            possi_matrix_[:, j, :] = row_possi_mat[:, j, :] - row_possi_mat[:, j + 1, :]
        return possi_matrix_

    def __poly_possi_to_alpha(self, possi_matrix_):
        resp_att = []
        for resp in range(possi_matrix_.shape[0]):
            alpha_for_one = []
            for k in range(possi_matrix_.shape[2]):
                possi = possi_matrix_[resp, :, k]
                alpha = np.random.choice(range(possi_matrix_.shape[1]), p=possi.flatten())
                alpha_for_one.append(alpha)
            resp_att.append(alpha_for_one)
        return np.array(resp_att)

    def ho_local_logit(self, theta, beta, delta, return_possibility=False):
        # 计算完截距和斜率的属性
        raw_matrix = []
        for theta__ in theta:
            raw_matrix.append(self.__poly_raw_theta_beta_mat(theta__, beta, delta))
        raw_matrix = np.array(raw_matrix)
        # 按照公式计算概率
        possi_mat = self.__local_raw_mat_to_possi_mat(raw_matrix)
        # 将概率矩阵转化为模拟生成的属性
        resp_att = self.__poly_possi_to_alpha(possi_mat)
        if return_possibility == True:
            return resp_att, possi_mat
        return resp_att

    def __local_raw_mat_to_possi_mat(self, raw_mat):
        raw_mat = np.insert(raw_mat, 0, 0, axis=1)
        possi_list = np.zeros_like(raw_mat)
        for resp in range(raw_mat.shape[0]):
            resp_alpha = []
            for l in range(raw_mat.shape[1]):
                resp_alpha.append(np.exp(raw_mat[resp][:l + 1].sum(axis=0)))
            sum_ = np.nansum(resp_alpha, axis=0)
            possi_list[resp] = np.array(resp_alpha) / sum_
        return possi_list

    def continuation_ratio_logit(self, theta, beta, delta):
        # 计算完截距和斜率的属性
        raw_matrix = []
        for theta__ in theta:
            raw_matrix.append(self.__poly_raw_theta_beta_mat(theta__, beta, delta))
        raw_matrix = np.array(raw_matrix)

        # 按照公式计算概率
        possi_mat = self.__continuation_ratio_raw_mat_to_possi_mat(raw_matrix)
        # 将概率矩阵转化为模拟生成的属性
        resp_att = self.__poly_possi_to_alpha(possi_mat)
        return resp_att

    def __continuation_ratio_raw_mat_to_possi_mat(self, raw_matrix):
        raw_matrix = stats.logistic.cdf(raw_matrix)
        possi_matrix_ = np.zeros((raw_matrix.shape[0], raw_matrix.shape[1] + 1, raw_matrix.shape[2]))
        raw_matrix = np.insert(raw_matrix, 0, 1, axis=1)
        raw_matrix = np.insert(raw_matrix, raw_matrix.shape[1], 0, axis=1)
        for l in range(raw_matrix.shape[1] - 1):
            correct_t = np.nanprod(raw_matrix[:, :l + 1, :], axis=1)
            error_tplus1 = raw_matrix[:, l + 1, :]
            possi_matrix_[:, l, :] = correct_t * (1 - error_tplus1)
        return possi_matrix_


class ItemPara(object):

    def __init__(self):
        super(object).__init__()

    def s_g_uniform(self, i_cnt, s_low, s_high, g_low=None, g_high=None):
        if g_low == None or g_high == None:
            g_low, g_high = s_low, s_high
        s = np.random.uniform(s_low, s_high, i_cnt)
        g = np.random.uniform(g_low, g_high, i_cnt)
        return s, g

    def s_g_cor(self, i_cnt, mean=0.1, scale=1, cor=-0.6,return_correlation=False):
        mean = [special.logit(mean) for i in range(2)]
        scale = [[scale, cor], [cor, scale]]
        logit_s_g = stats.multivariate_normal.rvs(mean=mean, cov=scale, size=i_cnt)
        s_g = np.exp(logit_s_g) / (1 + np.exp(logit_s_g))
        correlation = stats.pearsonr(s_g[:, 0], s_g[:, 1])
        print("s与g皮尔逊相关为:", correlation)
        if return_correlation == True:
            return s_g, correlation
        return s_g[:,0].flatten(),s_g[:,1].flatten()

    def beta(self,i_cnt,k_cnt,mean=0.5,scale=1,beta0_mean=-1.5):
        beta_ = stats.halfnorm.rvs(loc=mean,scale=scale,size=(i_cnt,k_cnt))
        beta0 = stats.halfnorm.rvs(loc=beta0_mean,scale=scale,size=(i_cnt))
        return beta0,beta_

    def s_g_pdina(self,i_cnt,alpha_l,s_change_rate=0.3,g_change_rate=0.3,s=None,g=None):
        if s==None or g==None:
            s,g = self.s_g_cor(i_cnt)
        s_matrix = np.ones((alpha_l,i_cnt))
        g_matrix = np.zeros((alpha_l,i_cnt))
        s_matrix[0],g_matrix[0]=s,g
        for l in range(s_matrix.shape[0]-1):
            s_matrix[l+1] = s_matrix[l]*(1+s_change_rate)
            g_matrix[l+1] = s_matrix[l]*(1-g_change_rate)
        return s_matrix,g_matrix
if __name__ == '__main__':
    # attr = RespAttr()
    # theta = attr.resp_theta(100, loc=2)
    # beta = np.array([[1, 1, 1], [1, 1, 1]]).reshape(3, 2)
    # delta = np.array(([1, 1, 1], [2, 2, 2])).reshape(3, 2)
    # att = attr.continuation_ratio_logit(theta, beta, delta)
    # print(att.sum(axis=0))
    item_para = ItemPara()
    print(item_para.s_g_pdina(20,3))