import random
import numpy as np
import timeit
from factor_analyzer.factor_analyzer import FactorAnalyzer, calculate_kmo, calculate_bartlett_sphericity
import pandas as pd
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
from enums.relation_generate_type import RelationGenerateType
from itertools import combinations

# 导入psych包
psych = importr('psych')
pandas2ri.activate()


def make_group_data(total_count, ratio, cols, size):
    """
    :param total_count: 一共需要生成多少行数据
    :param ratio: 偏向的占比【0.7代表偏高数量占比是0.7】
    :param cols: 一共需要生成多少列数据
    :param size: 有多少选项，若size=5，则表示可以选择1-5的数据
    :return:
    """
    length = len(cols)
    # 生成数组，True和False的比例为固定比例
    bool_arr = [False] * total_count
    for key in range(int(total_count * ratio)):
        bool_arr[key] = True
    # 将数组乱序
    random.shuffle(bool_arr)
    # 生成一个二维数组
    data = [[0] * length for _ in range(total_count)]
    # 生成对应的数据数组[1,2,3,4,5]
    size_list = list(range(1, size + 1))
    small_arr = size_list[:-1]
    middle_arr = size_list[int(size / 2):]
    # 随机填充每一行的数据
    for i in range(total_count):
        # 生成小的权重数据
        sw = []
        cur_rate = 0.9 / len(small_arr)
        for cur_i in range(len(small_arr)):
            sw.append(cur_rate)
        sw[-1] = 0.1
        if size >= 6:
            sw[-2] = 0.1

        # 生成一整行的数据
        is_large = bool_arr[i]
        if is_large:
            row_data = random.choices(middle_arr, k=length)
        else:
            row_data = random.choices(small_arr, weights=sw, k=length)
        # 将这一行的数据填入二维数组中
        for j in range(length):
            data[i][j] = row_data[j]
    arr = np.array(data)
    df = pd.DataFrame(arr, columns=cols)
    return df


def make_group_data_v2(total_count: int, cols: dict):
    """
    :param total_count: 一共需要生成多少行数据 200
    :param cols: 一共需要生成多少列数据 {0: 2, 6: 5}  [{'group_index': 1, 'indexes': {0: 2, 6: 5}}, {'group_index': 2, 'indexes': {1: 4, 7: 5}}]
    :return:
    """
    ratio = random.uniform(0.5, 0.6)
    length = len(cols)
    cols_index = [index_key for index_key, _ in cols.items()]
    # 生成数组，True和False的比例为固定比例
    bool_arr = [False] * total_count
    for key in range(int(total_count * ratio)):
        bool_arr[key] = True
    # 将数组乱序
    random.shuffle(bool_arr)
    # 生成一个二维数组
    data = [[0] * length for _ in range(total_count)]
    # 生成对应的数据数组[1,2,3,4,5]
    # 随机填充每一行的数据
    for i in range(total_count):
        # 这一行的第几列数据
        column_num = 0
        # 生成一整行的数据
        for index_key, index_value in cols.items():
            size = index_value
            size_list = list(range(1, size + 1))
            small_arr = size_list[:-1]
            middle_arr = size_list[int(size / 2):]
            sw = []
            cur_rate = 0.9 / len(small_arr)
            for cur_i in range(len(small_arr)):
                sw.append(cur_rate)
            sw[-1] = 0.1
            if size >= 6:
                sw[-2] = 0.1

            is_large = bool_arr[i]
            if is_large:
                row_data = random.choices(middle_arr, k=1)
            else:
                row_data = random.choices(small_arr, weights=sw, k=1)
            # 将这一行的数据填入二维数组中
            data[i][column_num] = row_data[0]
            column_num += 1

    arr = np.array(data)
    df = pd.DataFrame(arr, columns=cols_index)
    return df


def do_sample(df, total_rows: int):
    """
    设置相关性
    :param total_rows:
    :param df:
    :param total_group_count:
    :param current_group:
    :return:
    """
    # 相关性操作
    df = df.loc[df.sum(axis=1).sort_values(ascending=False).index].reset_index(drop=True)  # 按照总和的降序排列，并重置索引
    a1 = int(total_rows * 0.03)
    a2 = int(total_rows * 0.18)
    dfa = df.iloc[a1: a2]
    b1 = int(total_rows * 0.45)
    b2 = int(total_rows * 0.55)
    dfb = df.iloc[b1: b2]
    c1 = int(total_rows * 0.82)
    c2 = int(total_rows * 0.97)
    dfc = df.iloc[c1: c2]
    sample_df = df.sample(frac=1, ignore_index=True)  # 对中间的数据进行随机打乱
    df = sample_df
    df[a1:a2] = dfa
    df[b1:b2] = dfb
    df[c1:c2] = dfc
    # df = df.sample(frac=1, ignore_index=True) # 降低kmo值大小，但相关性会降低
    return df


def get_conner_df(total_count: int, cols_arr: [], size: int) -> pd.DataFrame:
    rate = random.uniform(0.5, 0.6)
    d = make_group_data(total_count, rate, cols_arr, size)
    d = do_sample(df=d, total_rows=total_count)
    return d


def get_conner_df_v2(total_count: int, cols_arr: dict) -> pd.DataFrame:
    d = make_group_data_v2(total_count, cols_arr)
    return do_sample(df=d, total_rows=total_count)


def build_datas(total_count: int, groups: []) -> pd.DataFrame:
    """
    构建数据
    :return:
    """
    res_df = pd.DataFrame
    for i in groups:
        # nwe_df = get_conner_df(total_count, i['indexes'], i['count'])
        nwe_df = get_conner_df_v2(total_count, i['indexes'])
        if res_df.empty:
            res_df = nwe_df
        else:
            res_df = res_df.join(nwe_df)
    return res_df


class MakeRelationData(object):
    def __init__(self, total_count: int, relation_generate_type: str, groups):
        self.total_count = total_count  # 总行数
        self.relation_generate_type = relation_generate_type  # 生成类型： "normal":正常，"reality":真实
        self.groups = groups  # 生成类型： "normal":正常，"reality":真实

    def generate_reality_data(self, cols: dict) -> pd.DataFrame:
        """
        生成更真实的数据
        :param total_count: 一共需要生成多少行数据 200
        :param cols: 一共需要生成多少列数据 {0: 2, 6: 5}  [{'group_index': 1, 'indexes': {0: 2, 6: 5}}, {'group_index': 2, 'indexes': {1: 4, 7: 5}}]
        :return:
        """
        total_count = self.total_count
        ratio = random.uniform(0.5, 0.6)
        length = len(cols)
        cols_index = [index_key for index_key, _ in cols.items()]
        # 生成数组，True和False的比例为固定比例
        bool_arr = [False] * total_count
        for key in range(int(total_count * ratio)):
            bool_arr[key] = True
        # 将数组乱序
        random.shuffle(bool_arr)
        # 生成一个二维数组
        data = [[0] * length for _ in range(total_count)]
        # 生成对应的数据数组[1,2,3,4,5]
        # 随机填充每一行的数据
        for i in range(total_count):
            # 这一行的第几列数据
            column_num = 0
            # 生成一整行的数据
            for index_key, index_value in cols.items():
                size = index_value
                size_list = list(range(1, size + 1))
                small_arr = size_list[:-1]
                middle_arr = size_list[int(size / 2):]
                sw = []
                cur_rate = 0.9 / len(small_arr)
                for cur_i in range(len(small_arr)):
                    sw.append(cur_rate)
                sw[-1] = 0.1
                if size >= 6:
                    sw[-2] = 0.1

                is_large = bool_arr[i]
                if is_large:
                    row_data = random.choices(middle_arr, k=1)
                else:
                    row_data = random.choices(small_arr, weights=sw, k=1)
                # 将这一行的数据填入二维数组中
                data[i][column_num] = row_data[0]
                column_num += 1

        arr = np.array(data)
        df = pd.DataFrame(arr, columns=cols_index)
        return df

    def reality_sample(self, df) -> pd.DataFrame:
        """
        设置相关性
        :param df:
        :return:
        """
        total_rows = self.total_count
        # 相关性操作
        df = df.loc[df.sum(axis=1).sort_values(ascending=False).index].reset_index(drop=True)  # 按照总和的降序排列，并重置索引
        a1 = int(total_rows * 0.03)
        a2 = int(total_rows * 0.18)
        dfa = df.iloc[a1: a2]
        b1 = int(total_rows * 0.45)
        b2 = int(total_rows * 0.55)
        dfb = df.iloc[b1: b2]
        c1 = int(total_rows * 0.82)
        c2 = int(total_rows * 0.97)
        dfc = df.iloc[c1: c2]
        sample_df = df.sample(frac=1, ignore_index=True)  # 对中间的数据进行随机打乱
        df = sample_df
        df[a1:a2] = dfa
        df[b1:b2] = dfb
        df[c1:c2] = dfc
        return df

    def generate_skewness_data(self, cols: dict) -> pd.DataFrame:
        """
        生成偏态数据 - 信效度更高 - SKEWNESS
        :return:
        """
        total_count = self.total_count
        # 生成一个二维数组
        length = len(cols)
        data = [[0] * length for _ in range(total_count)]
        cols_index = [index_key for index_key, _ in cols.items()]
        ratio = random.uniform(0.65, 0.85)
        if len(self.groups) >= 7:
            ratio = random.uniform(0.65, 0.8)
        # print('当前占比', ratio)
        # 生成数组，True和False的比例为固定比例
        bool_arr = [False] * total_count
        for key in range(int(total_count * ratio)):
            bool_arr[key] = True
        # 将数组乱序
        random.shuffle(bool_arr)
        # 随机填充每一行的数据
        for i in range(total_count):
            # 这一行的第几列数据
            column_num = 0
            # 生成一整行的数据
            for index_key, index_value in cols.items():
                size = index_value
                size_list = list(range(1, size + 1))
                is_large = bool_arr[i]
                # 单独处理1-10级量表
                if size == 1:
                    data[i][column_num] = 1
                elif size == 2:
                    small_arr = [1]
                    large_arr = [2]
                    row_data = random.choices(large_arr if is_large else small_arr, k=1)
                    data[i][column_num] = row_data[0]  # 将这一行的数据填入二维数组中
                elif size == 3:
                    small_arr = [1, 2]
                    large_arr = [2, 3]
                    row_data = random.choices(large_arr if is_large else small_arr, k=1)
                    data[i][column_num] = row_data[0]
                elif size == 4:
                    small_arr = [1, 2, 3]
                    large_arr = [3, 4]
                    large_weight = [random.uniform(60, 80), random.uniform(70, 80)]
                    weight = large_weight if is_large else [30, 30, 10]
                    row_data = random.choices(large_arr if is_large else small_arr, weights=weight, k=1)
                    data[i][column_num] = row_data[0]
                elif size == 5:
                    small_arr = [1, 2, 3, 4]
                    large_arr = [3, 4, 5]
                    large_weight = [random.uniform(20, 30), random.uniform(60, 80), random.uniform(70, 80)]
                    weight = large_weight if is_large else [25, 25, 20, 10]
                    row_data = random.choices(large_arr if is_large else small_arr, weights=weight, k=1)
                    data[i][column_num] = row_data[0]
                elif size == 6:
                    small_arr = [1, 2, 3, 4, 5]
                    large_arr = [4, 5, 6]
                    large_weight = [random.uniform(40, 70), random.uniform(60, 80), random.uniform(40, 70)]
                    weight = large_weight if is_large else [30, 30, 30, 10, 20]
                    row_data = random.choices(large_arr if is_large else small_arr, weights=weight, k=1)
                    data[i][column_num] = row_data[0]
                elif size == 7:
                    small_arr = [1, 2, 3, 4]
                    large_arr = [5, 6, 7]
                    large_weight = [random.uniform(40, 70), random.uniform(70, 90), random.uniform(40, 70)]
                    weight = large_weight if is_large else [random.uniform(10, 30), 30, 30, random.uniform(10, 30)]
                    row_data = random.choices(large_arr if is_large else small_arr, weights=weight, k=1)
                    data[i][column_num] = row_data[0]
                elif size == 8:
                    small_arr = [1, 2, 3, 4, 5, 6, 7]
                    large_arr = [6, 7, 8]
                    large_weight = [random.uniform(40, 70), random.uniform(70, 90), random.uniform(40, 70)]
                    weight = large_weight if is_large else [20, 20, 30, 30, 40, 30, 20]
                    row_data = random.choices(large_arr if is_large else small_arr, weights=weight, k=1)
                    data[i][column_num] = row_data[0]
                elif size == 9:
                    small_arr = [1, 2, 3, 4, 5, 6, 7]
                    large_arr = [6, 7, 8, 9]
                    large_weight = [random.uniform(30, 40), random.uniform(30, 40), random.uniform(70, 90),
                                    random.uniform(40, 70)]
                    weight = large_weight if is_large else [20, 20, 30, 30, 40, 30, 20]
                    row_data = random.choices(large_arr if is_large else small_arr, weights=weight, k=1)
                    data[i][column_num] = row_data[0]
                # elif size == 10:
                #     pass
                else:
                    small_arr = size_list[:-1]
                    middle_arr = size_list[int(size / 2):]
                    is_large = bool_arr[i]
                    if is_large:
                        row_data = random.choices(middle_arr, k=1)
                    else:
                        row_data = random.choices(small_arr, k=1)
                    # 将这一行的数据填入二维数组中
                    data[i][column_num] = row_data[0]
                column_num += 1

        arr = np.array(data)
        df = pd.DataFrame(arr, columns=cols_index)
        return df

    def normal_sample(self, df) -> pd.DataFrame:
        """
        设置相关性
        :param df:
        :return:
        """
        total_rows = self.total_count
        # 相关性操作
        df = df.loc[df.sum(axis=1).sort_values(ascending=False).index].reset_index(drop=True)  # 按照总和的降序排列，并重置索引
        a1 = int(total_rows * 0.1)
        a2 = int(total_rows * 0.3)
        dfa = df.iloc[a1: a2]
        # b1 = int(total_rows * 0.4)
        # b2 = int(total_rows * 0.6)
        # dfb = df.iloc[b1: b2]
        c1 = int(total_rows * 0.8)
        c2 = int(total_rows * 0.9)
        dfc = df.iloc[c1: c2]
        sample_df = df.sample(frac=1, ignore_index=True)  # 对中间的数据进行随机打乱
        df = sample_df
        df[a1:a2] = dfa
        # df[b1:b2] = dfb
        df[c1:c2] = dfc
        return df

    def skewness_sample(self, df) -> pd.DataFrame:
        """
        设置相关性
        :param df:
        :return:
        """
        total_rows = self.total_count
        # 相关性操作
        df = df.loc[df.sum(axis=1).sort_values(ascending=False).index].reset_index(drop=True)  # 按照总和的降序排列，并重置索引
        # 将 DataFrame 拆分为三个部分
        # 注释的定死的数据，这一版本效果比较好，分组配置为：所有选项都是5个题，分5组，每组的选项个数为：[4，4，3，3，3]
        # a1 = int(total_rows * 0.1)
        # a2 = int(total_rows * 0.2)
        # a3 = int(total_rows * 0.7)
        # a4 = int(total_rows * 0.85)
        # 动态配置
        a1 = int(total_rows * random.uniform(0.1, 0.15))
        a2 = int(total_rows * random.uniform(0.3, 0.4))
        a3 = int(total_rows * random.uniform(0.7, 0.75))
        a4 = int(total_rows * random.uniform(0.85, 0.9))
        if len(self.groups) >= 7:
            a2 = int(total_rows * random.uniform(0.1, 0.35))
            a3 = int(total_rows * random.uniform(0.7, 0.8))
            a4 = int(total_rows * random.uniform(0.8, 0.9))
        # rows1 = df.iloc[:a1]
        # rows2 = df.iloc[a1:a2]
        rows2 = df.iloc[:a2]
        rows3 = df.iloc[a2:a3]
        rows4 = df.iloc[a3:a4]
        rows5 = df.iloc[a4:]
        # shuffled_df = pd.concat([rows1, rows3, rows5])
        shuffled_df = pd.concat([rows3, rows5])
        # 对中间的行进行随机打乱
        # shuffled_rows = middle_rows.sample(frac=1)
        shuffled_df = shuffled_df.sample(frac=1)
        # 将三个部分重新合并为新的 DataFrame
        # new_df = pd.concat([top_rows, shuffled_rows, bottom_rows])
        new_df = pd.concat([rows2, shuffled_df, rows4])
        # 重置索引
        new_df.reset_index(drop=True, inplace=True)
        new_df = new_df.sample(frac=1)
        return new_df

    def generate_now(self) -> pd.DataFrame:
        """
        构建数据
        :param groups: 分组
        :return:
        """
        res_df = pd.DataFrame
        for i in self.groups:
            if RelationGenerateType.NORMALITY.value == self.relation_generate_type:  # 正态
                d = self.generate_reality_data(i['indexes'])  # 生成数据
                item_df = self.reality_sample(df=d)  # 随机打乱数据
            else:  # 偏态
                item_df = self.generate_skewness_data(i['indexes'])  # 生成数据
                item_df = self.skewness_sample(df=item_df)  # 随机打乱数据
            if res_df.empty:
                res_df = item_df
            else:
                res_df = res_df.join(item_df)
        return res_df
