# 信度分析
import pandas as pd
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri, default_converter
from rpy2.robjects.conversion import localconverter

# 导入psych包
psych = importr('psych')
pandas2ri.activate()


def do_analysis(df: pd.DataFrame, mean_df: pd.DataFrame, groups: []):
    """
    进行信度分析
    :param mean_df:
    :param df: 参与相关性算法计算的数据
    :param groups: 分组信息 {'group_index': group_index, 'indexes': indexes, 'count': base_option_count}
    :return:
    """

    def process_nan(value):
        if isinstance(value, float) and value != value:
            return None  # 替换 NaN 值为 None 或其他值
        elif isinstance(value, dict):
            return {k: str(process_nan(v)) for k, v in value.items()}
        elif isinstance(value, list):
            return [str(process_nan(v)) for v in value]
        else:
            return str(value)

    def result_pass(inner_factors, all_itemize):
        score = 0
        is_pass = False
        # 1. 判断总的信度
        total_list = inner_factors.get('total')
        if total_list:
            first_o = total_list[0]
            values = first_o.get('values')
            if values:
                std_alpha = values.get('std.alpha')  # 判断标准alpha值
                std_alpha_f = float(std_alpha)
                if std_alpha and 0.7 < std_alpha_f:
                    score += std_alpha_f  # 若总的信度通过，增加对应的数据
                    is_pass = True
        # 2. 若总的信度通过了，则判断分维度的信度
        if is_pass:
            is_pass = False
            for item_factor_index in range(len(all_itemize)):
                item_factor = all_itemize[item_factor_index]
                total_list = item_factor.get('total')
                if total_list:
                    first_o = total_list[0]
                    values = first_o.get('values')
                    if values:
                        std_alpha = values.get('std.alpha')  # 判断标准alpha值
                        std_alpha_f = float(std_alpha)
                        if std_alpha and 0.7 < std_alpha_f:
                            score += std_alpha_f  # 若总的信度通过，增加对应的数据
                            is_pass = True
                if not is_pass:  # 如果有一个没有通过，则直接返回
                    break
                if item_factor_index < len(all_itemize) - 1:
                    is_pass = False  # 如果不是最后一个，且通过了，则至为False进入下一个循环判断
        return is_pass, score

    def get_result(obj: pd.DataFrame) -> []:
        output = []
        for index, row in obj.iterrows():
            output.append({'row_key': str(index), 'values': {str(col): process_nan(row[col]) for col in obj.columns}})
        return output

    def calculate_reliability(inner_df: pd.DataFrame):
        with localconverter(default_converter + pandas2ri.converter):
            r_data = pandas2ri.py2rpy(inner_df)
            # 计算信度和项-总相关
            alpha_o = psych.alpha(r_data)
            # 输出结果
            # for i in range(len(alpha)):
            #     # print(i)
            #     print(alpha[i])
            d = dict(alpha_o)  # R的dict转换为python的dict
            total = d['total']
            res_group1 = get_result(total)
            alpha_drop = d['alpha.drop']
            res_group2 = get_result(alpha_drop)
            item_stats = d['item.stats']
            res_group3 = get_result(item_stats)
        return {'total': res_group1, 'alpha_drop': res_group2, 'item_status': res_group3}

    # 1.计算每组的均值然后参与计算
    factors_res = {}
    if len(mean_df.columns) > 1:
        # factors_res = calculate_reliability(mean_df)  # 因子间的相关性
        factors_res = calculate_reliability(df)  # 因子间的相关性
    # 2.分别计算每一组的信度数据
    itemize = []
    for i in groups:
        cur_arr = i['indexes']
        if len(cur_arr) <= 0:
            continue
        cur_arr_loc = [key for key, _ in cur_arr.items()]
        group_df = df.loc[:, cur_arr_loc]
        if len(group_df.columns) > 1:
            itemize.append(calculate_reliability(group_df))
    # 返回结果
    res = {'factors': factors_res, 'itemize': itemize}
    # 判断结果是否通过
    is_pass, score = result_pass(factors_res, itemize)
    return res, is_pass, score


def report_do_analysis(df: pd.DataFrame, mean_df: pd.DataFrame, groups: []):
    """
    分析报告专用接口
    进行信度分析
    :param mean_df:
    :param df: 参与相关性算法计算的数据
    :param groups: 分组信息，结构如下
            {
                "name":"第一组的名称",
                "tid":[1，2，3，4，5，6],  # 第一组的题目的tid
            }
    :return:
    """

    def process_nan(value):
        if isinstance(value, float) and value != value:
            return None  # 替换 NaN 值为 None 或其他值
        elif isinstance(value, dict):
            return {k: str(process_nan(v)) for k, v in value.items()}
        elif isinstance(value, list):
            return [str(process_nan(v)) for v in value]
        else:
            return str(value)

    def get_result(obj: pd.DataFrame) -> []:
        output = []
        for index, row in obj.iterrows():
            output.append({'row_key': str(index), 'values': {str(col): process_nan(row[col]) for col in obj.columns}})
        return output

    def calculate_reliability(inner_df: pd.DataFrame, group_name):
        with localconverter(default_converter + pandas2ri.converter):
            inner_df = inner_df.astype(float)  # 把所有数据转为float
            r_data = pandas2ri.py2rpy(inner_df)
            # 计算信度和项-总相关
            alpha_o = psych.alpha(r_data)
            d = dict(alpha_o)  # R的dict转换为python的dict
            total = d['total']
            res_group1 = get_result(total)
            alpha_drop = d['alpha.drop']
            res_group2 = get_result(alpha_drop)
            item_stats = d['item.stats']
            res_group3 = get_result(item_stats)
        return {'group_name': group_name, 'total': res_group1, 'alpha_drop': res_group2, 'item_status': res_group3}

    # 1.计算每组的均值然后参与计算
    factors_res = {}
    if len(mean_df.columns) > 1:
        factors_res = calculate_reliability(df, "")  # 因子间的相关性
    # 2.分别计算每一组的信度数据
    itemize = []
    for i in groups:
        items = i.get('items')
        if len(items) <= 0:
            continue
        cur_arr_loc = []
        for cv in items:
            cur_arr_loc.append(cv.get('name'))
        if not cur_arr_loc or len(cur_arr_loc) <= 0:
            continue
        group_df = df.loc[:, cur_arr_loc]
        if len(group_df.columns) > 1:
            c_group_name = i.get('name')
            itemize.append(calculate_reliability(group_df, c_group_name))
    # 返回结果
    return {'factors': factors_res, 'itemize': itemize}
