from apps.logger_config import logger
from functools import cmp_to_key

from apps.derived_variable import derived_variable_handling, generate_by_option_handling, \
    test_code_mapping
from apps.parse_vlm import parse_vlm
from apps.query_db import *
from apps.utils import *


# 标准域转换
def domain_convert(path, dataset_df_dic, project_id, domain, sheet_list, sheet_to_domain_dict, extra_supp_domain_df_dic):
    logger.info("======开始进行%s域的转换===========" % domain)
    # 1. 获取域的所有变量信息， 包含子变量
    all_variable_df = get_all_variable_info(project_id, domain)
    standard_variable = get_standard_variable_info(project_id, domain)
    # 2. 填充DataFrame的表头字段
    lst = list(standard_variable['variable'])  # 只填充父变量
    # 调用自定义排序函数, 排序表头字段
    all_variable_list = get_all_sdtm_variable()
    var_list = custom_variable_sort(lst, all_variable_list, domain)
    data_df = pd.DataFrame(columns=var_list)

    # 3. 合并原始数据集， 并进行关键变量的衍生处理（真正合并之前）
    # 获取关键变量
    key_variable_lst = get_key_variable(project_id, domain)
    merge_table(sheet_list, dataset_df_dic, key_variable_lst, domain, all_variable_df)

    # 变量所在的domain对应的DataFrame
    dataset_df = dataset_df_dic[domain]  # dataset_df_dic[domain]得到的是合并后数据
    field_lst = dataset_df.columns  # 所有字段列表

    # 4. 遍历所有SDTM变量, 进行SDTM生成
    # 4.1 先生成关键变量， 因为值级元数据的生成依赖于关键变量
    for var in var_list:
        # 关键变量直接从原始数据集中取（合并后的）， 因为合并的数据集处理完了关键变量
        if var in key_variable_lst:
            data_df[var] = dataset_df[var]

    # 4.2 生成非关键变量
    for var in var_list:
        # 特殊处理--TEST变量， 合并表格时数据可能丢失
        testcd_var = domain + 'TESTCD'
        if var.endswith('TEST') and testcd_var in var_list:
            # code-应用名称-数值含义-展示顺序-匹配数据-递交值
            # C20197@#$男性@#$2@#$2@#$2@#$M |-| C16576@#$女性@#$1@#$1@#$1@#$F
            testcd_code = np.array(all_variable_df.loc[all_variable_df['variable'] == testcd_var]['ct_code'])[0]
            test_code = np.array(all_variable_df.loc[all_variable_df['variable'] == var]['ct_code'])[0]
            code_dict = test_code_mapping(testcd_code, test_code)

            # 根据映射关系填充--TEST列
            for key, value in code_dict.items():
                data_df.loc[data_df[testcd_var] == key, var] = value
            continue
        if var not in key_variable_lst:
            '''
            获取变量是否衍生、衍生方式，以及在原始数据集中对应的字段field和表名sheet_name
            derived_pattern == 1: 有衍生结果时覆盖原始数据，没有时保留原始数据
            derived_pattern == 2: 仅在没有原始数据时填充衍生结果
            derived_pattern == 3: 使用衍生结果替换全部原始数据，无论原始数据是否有值
            
            derived_type == 1: 常量
            derived_type == 2: 变量
            derived_type == 3: 自定义
            derived_type == 4: 按选项生成
            '''
            derived_flag = np.array(all_variable_df.loc[all_variable_df['variable'] == var]['derived_flag'])[0]
            derived_pattern = np.array(all_variable_df.loc[all_variable_df['variable'] == var]['derived_pattern'])[0]
            derived_type = np.array(all_variable_df.loc[all_variable_df['variable'] == var]['derived_type'])[0]
            mapping_field = np.array(all_variable_df.loc[all_variable_df['variable'] == var]['field'])[0]
            data_type = np.array(all_variable_df.loc[all_variable_df['variable'] == var]['data_type'])[0]
            # sheet_name = np.array(all_variable_df.loc[all_variable_df['variable'] == var]['sheet_name'])[0]
            try:
                # 4.2 先处理衍生变量
                if (np.isnan(derived_flag) == False) and int(derived_flag) == 1:
                    # TODO if derived_pattern == 1
                    if np.isnan(derived_pattern) or int(derived_pattern) == 1 or int(derived_pattern) == 3 or (
                            int(derived_pattern) == 2 and var not in field_lst):
                        derived_variable_handling(var, data_df, dataset_df_dic, all_variable_df, domain, derived_type,
                                                  sheet_to_domain_dict)

                    if data_type in ['datetime', 'Datetime', 'date', 'Date']:
                        data_df = convert_to_iso8601(data_df, var, data_type)

                # 4.3 处理非衍生变量
                else:
                    vlm_flag = np.array(all_variable_df.loc[all_variable_df['variable'] == var]['vlm_flag'])[0]
                    # 4.3.1 处理包含值级元数据的变量 （这种变量应该不是衍生变量）
                    if vlm_flag == 1:
                        if var in field_lst:  # 如果数据集里面有这个变量， 直接获取
                            data_df[var] = dataset_df[var]
                            if data_type in ['datetime', 'Datetime', 'date', 'Date']:
                                data_df = convert_to_iso8601(data_df, var, data_type)
                            continue
                        else:
                            value_list = get_value_list(project_id, domain, var)  # 所有值级元数据

                            # 依次处理每个子变量
                            add_column_lst = []  # 需要额外添加的列， 合并df前会删掉
                            for index, value_info in value_list.iterrows():
                                derived_flag2 = value_info['derived_flag']
                                if derived_flag2 != 1:
                                    continue

                                # 如果是衍生变量，则处理
                                # todo 目前假定所有值级元数据都需要衍生
                                derived_type2 = value_info['derived_type']
                                # var_name = value_info['var_name']
                                variable = value_info['variable']  # 子变量对应的列名
                                add_column_lst.append(variable)
                                derived_variable_handling(variable, data_df, dataset_df_dic, all_variable_df, domain,
                                                          derived_type2, sheet_to_domain_dict)

                                # 解析值级元数据条件
                                condition = ''
                                if 'vlm' in value_info:
                                    condition = value_info['vlm']  # vlm条件
                                elif 'VLM' in value_info:
                                    condition = value_info['VLM']
                                vlm_component_lst = parse_vlm(condition)  # [['SC.SCTESTCD', '=', 'EMPJOB']]
                                vlm_component = vlm_component_lst[0]  # todo 目前假设只有一个条件
                                op, value = vlm_component[1], vlm_component[2]
                                filter_column = vlm_component[0].split('.')[1]

                                # 将子变量的多选字段写入extra_supp_domain_df_dic
                                handle_multi_select_field(data_df, extra_supp_domain_df_dic, variable, domain, all_variable_df, filter_column, op, value)

                                # 处理子变量的受控术语替换
                                try:
                                    ct_handling(variable, variable, data_df, all_variable_df)
                                except Exception as e:
                                    logger.error(f'受控术语替换处理出错：{var} {e}')

                                # 将子变量的值根据条件复制给父变量
                                data_df = apply_operation(data_df, op, value, filter_column, variable, var)

                            # 删除额外的列
                            data_df.drop(columns=add_column_lst, inplace=True)

                            if data_type in ['datetime', 'Datetime', 'date', 'Date']:
                                data_df = convert_to_iso8601(data_df, var, data_type)

                            continue

                    # 没有填映射字段时
                    # 若原始数据集有这个变量，直接填充；若没有，则跳过
                    if mapping_field is None:
                        if var in field_lst:
                            data_df[var] = dataset_df[var]
                    # 填写了映射字段时
                    # 若原始数据集有这个映射字段，直接填充;若没有，则跳过
                    else:
                        if mapping_field in field_lst:
                            data_df[var] = dataset_df[mapping_field]
                    if data_type in ['datetime', 'Datetime', 'date', 'Date']:
                        data_df = convert_to_iso8601(data_df, var, data_type)
            except Exception as error:
                logger.error(f'变量生成出错：{var} {error}')

    # 5. 替换受控术语
    for var in var_list:
        try:
            ct_handling(var, var, data_df, all_variable_df)
        except Exception as e:
            logger.error(f'衍生变量处理出错：{var} {e}')

    # 6. 超长文本处理
    handle_long_text(data_df, extra_supp_domain_df_dic, domain, all_variable_df)

    print("填入", domain, "域的数据为============>\n", data_df)

    # 6. 写入excel
    if not data_df.empty:
        # 将Dataframe写入excel
        with pd.ExcelWriter(path=path, mode='a') as writer:
            data_df.to_excel(writer, sheet_name=domain, index=False)
        print("=====写入成功=======")


# 合并数据集,并进行关键变量的衍生处理
# sheet_list: domian包含的所有sheet的名称
# dataset_df_dic: 所有sheet对应的DataFrame
# key_variable_list: domain包含的关键变量
# all_variable_df: domain包含的所有变量信息
def merge_table(sheet_list, dataset_df_dic, key_variable_list, domain, all_variable_df):
    print("----------开始合并表格---------")
    # 1. 首先进行关键变量的衍生处理， 因为合并表格需要根据关键变量进行合并， 所以需要先生成所有关键变量
    for sheet in sheet_list:
        dataset_df = dataset_df_dic[sheet]
        field_list = dataset_df.columns.tolist()
        for key_variable in key_variable_list:
            derived_type = np.array(all_variable_df.loc[all_variable_df['variable'] == key_variable]['derived_type'])[0]
            derived_flag = np.array(all_variable_df.loc[all_variable_df['variable'] == key_variable]['derived_flag'])[0]
            data_type = np.array(all_variable_df.loc[all_variable_df['variable'] == key_variable]['data_type'])[0]
            # 关键变量不在原始数据集中，则需要衍生处理
            # 若关键变量在原始数据集中，但是是衍生变量，也需要衍生处理
            # 比如CMTRT变量
            if key_variable not in field_list or (np.isnan(derived_flag) == False) and int(derived_flag) == 1:
                try:
                    df = derived_variable_handling(key_variable, dataset_df, dataset_df_dic, all_variable_df, sheet,
                                                   derived_type, {})
                    if isinstance(df, DataFrame):
                        dataset_df = df
                        dataset_df_dic[sheet] = df
                except Exception as e:
                    logger.error("========关键变量衍生处理出错===========" + key_variable)
                    logger.error(e)
            if data_type in ['datetime', 'Datetime', 'date', 'Date']:
                dataset_df = convert_to_iso8601(dataset_df, key_variable, data_type)

    # 2. 根据关键变量横向拼接
    dataset_df_dic[domain] = dataset_df_dic[sheet_list[0]]
    for sheet in sheet_list[1:]:
        try:
            dataset_df_dic[domain] = merge(dataset_df_dic[sheet], dataset_df_dic[domain], key_variable_list)
        except Exception as e:
            logger.error("========合并表格出错===========")
            logger.error(e)

    # 3. 去除任一关键变量为空的行
    try:
        dataset_df_dic[domain].dropna(subset=key_variable_list, inplace=True)
    except Exception as e:
        logger.error("========去空行出错===========")
        logger.error(e)

    # 4. 按照关键变量升序
    dataset_df_dic[domain].sort_values(by=key_variable_list, ascending=True, inplace=True)

    # 5. 生成--SEQ字段
    # DM、TDM、RELREC、SUPP--均不生成--SEQ，其他域如果在域变量配置中已设置--SEQ，则生成时忽略相关配置
    if domain not in ['DM', 'TDM', 'RELREC']:
        seq_field = domain + "SEQ"  # AESEQ、CMSEQ等
        dataset_df_dic[domain][seq_field] = 0  # 默认--SEQ字段为0
        id_index = dataset_df_dic[domain].columns.get_loc('USUBJID')
        seq_index = dataset_df_dic[domain].columns.get_loc(seq_field)
        seq_field_handling(dataset_df_dic[domain], id_index, seq_index)


# 根据关键变量，横向合并两个dataframe, 对于都有的列(比如DOMAIN), 取df1中的数据
def merge(df1, df2, key_variable_list):
    # 使用外连接合并两个dataframe
    df_merged = pd.merge(df1, df2, on=key_variable_list, how='outer', suffixes=('_left', '_right'))

    # 对于每一列，如果它在两个dataframe中都存在（即有后缀 _left 和 _right），则合并这些列
    for col in df1.columns:
        if col not in key_variable_list and col in df2.columns:
            # 定义新的列，如果左边的列不是NaN则取左边的值，否则取右边的值
            df_merged[col] = df_merged[col + '_left'].combine_first(df_merged[col + '_right'])
            # 删除原来的带后缀的列
            df_merged.drop([col + '_left', col + '_right'], axis=1, inplace=True)

    # 删除剩下所有带后缀的列（这些是只在一个df中出现的列）
    df_merged = df_merged.loc[:, ~df_merged.columns.str.contains('_left|_right')]
    return df_merged


# 生成SEQ字段
def seq_field_handling(df, id_index, seq_index):
    for i in range(len(df)):
        if i == 0 or (i > 0 and df.iloc[i, id_index] != df.iloc[i - 1, id_index]):
            df.iloc[i, seq_index] = 1
        else:
            df.iloc[i, seq_index] = df.iloc[i - 1, seq_index] + 1


# 处理受控术语替换递交值
# var: 需要查找受控术语的变量名
# field: 需要处理（换值）的列名, 补充数据集为'QVAL', 标准数据集与var一样
def ct_handling(var: str, field: str, data_df: DataFrame, all_variable_df: DataFrame):
    # logger.info("----------开始处理受控术语---------")
    # 获取变量的一行
    df = all_variable_df.loc[all_variable_df['variable'] == var]
    # 受控术语类型
    ct_type = np.array(df['ct_type'])[0]
    if ct_type == '标准' or ct_type == '自定义':
        ct_code = np.array(df['ct_code'])[0]
        if ct_code is not None and ct_code != '':
            dic = get_ct_mapping(ct_code, ct_type)
            if '*' in dic.keys():  # ’C29848-年‘ 类型, 整列填充为‘年’
                data_df[field] = dic['*']
            else:
                data_df[field].replace(dic, inplace=True)


# 处理值级元数据赋值
# operations：包含5个元素的列表
# operations = ['=', 'a', 'A', 'B', 'C']  代表：对于A列中的值为a的每一行， C列取B列的值
def apply_operation(df, op, value, filter_column, value_column, target_column):
    # op, value, filter_column, value_column, target_column = operations

    # 根据操作符和操作数筛选行
    # mask是一个布尔型的 Series，用来表示每行数据是否满足指定的条件
    if op == '=':
        mask = df[filter_column] == value  #
    elif op == '<':
        mask = df[filter_column] < value
    elif op == '>':
        mask = df[filter_column] > value
    elif op == '<=':
        mask = df[filter_column] <= value
    elif op == '>=':
        mask = df[filter_column] >= value
    elif op == '!=':
        mask = df[filter_column] != value
    elif op == 'in':
        if not isinstance(value, list):
            raise ValueError("For 'in' operation, value must be a list")
        mask = df[filter_column].isin(value)
    elif op == 'not in':
        if not isinstance(value, list):
            raise ValueError("For 'not in' operation, value must be a list")
        mask = ~df[filter_column].isin(value)
    else:
        raise ValueError("Unsupported operator")

    # 将目标列的值设为被取值列的值
    df.loc[mask, target_column] = df.loc[mask, value_column]

    return df


# 在进行多选字段的处理时， 筛选符合条件的行
def filter_row(filter_column, op, value, row):
    # 获取行中指定列的值
    column_value = row[filter_column]

    # 根据操作符进行条件判断
    if op == "=":
        return column_value == value
    elif op == ">":
        return column_value > value
    elif op == "<":
        return column_value < value
    elif op == ">=":
        return column_value >= value
    elif op == "<=":
        return column_value <= value
    elif op == "!=":
        return column_value != value
    elif op == "in":
        if not isinstance(value, list):
            raise ValueError("For 'in' operation, value must be a list")
        # 确保value是一个集合、列表或元组
        return column_value in value
    elif op == "not in":
        if not isinstance(value, list):
            raise ValueError("For 'not in' operation, value must be a list")
        # 确保value是一个集合、列表或元组
        return column_value not in value
    else:
        raise ValueError("Unsupported operation")


# 处理多选字段
def handle_multi_select_field(origin_df, extra_supp_domain_df_dic, target_column, domain, all_variable_df, filter_column, op, target_value):
    supp_domain_variable_list = ['STUDYID', 'RDOMAIN', 'USUBJID', 'IDVAR', 'IDVARVAL', 'QNAM', 'QLABEL', 'QVAL',
                                 'QORIG', 'QEVAL']
    target_df = pd.DataFrame(columns=supp_domain_variable_list)

    # 遍历原 DataFrame 的每一行
    for index, row in origin_df.iterrows():
        if filter_row(filter_column, op, target_value, row):
            # 检查目标列是否包含逗号（中文或英文）
            if ',' in str(row[target_column]) or '，' in str(row[target_column]):
                # 将目标列的值替换为"多选"
                origin_df.at[index, target_column] = "多选"

                # 分割目标列的值，处理中英文逗号
                values = str(row[target_column]).replace('，', ',').split(',')

                seq_column = domain + 'SEQ'
                test_column = domain + 'TEST'
                testcd_column = domain + 'TESTCD'
                # 对于分割后的每个值，将其与其他列的值一起添加到新 DataFrame 中
                index = 1
                for value in values:
                    # 生成STUDYID、USUBJID、QVAL、IDVARVAL、QNAM、 QLABEL、QORIG七列
                    new_row = {'STUDYID': row['STUDYID'],
                               'USUBJID': row['USUBJID'],
                               'IDVARVAL': row[seq_column],
                               'QORIG': 'CRF',
                               'QVAL': value.strip(),
                               'QNAM': str(row[testcd_column]) + str(index),
                               'QLABEL': str(row[test_column]) + str(index)
                               }
                    target_df.loc[len(target_df)] = new_row
                    index += 1
    # 对拆分到补充数据集的数据进行受控术语替换处理
    ct_handling(var=target_column, field='QVAL', data_df=target_df, all_variable_df=all_variable_df)

    if domain not in extra_supp_domain_df_dic.keys():
        extra_supp_domain_df_dic[domain] = target_df
    else:
        extra_supp_domain_df_dic[domain] = pd.concat([extra_supp_domain_df_dic[domain], target_df])


# 超长文本处理
def handle_long_text(origin_df, extra_supp_domain_df_dic, domain, all_variable_df):
    supp_domain_variable_list = ['STUDYID', 'RDOMAIN', 'USUBJID', 'IDVAR', 'IDVARVAL', 'QNAM', 'QLABEL', 'QVAL',
                                 'QORIG', 'QEVAL']
    target_df = pd.DataFrame(columns=supp_domain_variable_list)
    seq_column = domain + 'SEQ'
    for col_name in origin_df.columns:
        if origin_df[col_name].dtype == 'object' and col_name not in ['STUDYID', 'USUBJID', 'DOMAIN']:
            long_text_indices = origin_df[origin_df[col_name].astype(str).apply(lambda x: len(x) > 200)].index
            var_name = np.array(all_variable_df.loc[all_variable_df['variable'] == col_name]['var_name'])[0]
            for idx in long_text_indices:
                long_text_parts = [origin_df.loc[idx, col_name][i:i + 200]
                                   for i in range(200, len(origin_df.loc[idx, col_name]), 200)]
                for i, part in enumerate(long_text_parts):
                    new_row = {'STUDYID': origin_df.loc[idx, 'STUDYID'],
                               'USUBJID': origin_df.loc[idx, 'USUBJID'],
                               'IDVARVAL': origin_df.loc[idx, seq_column],
                               'QORIG': 'CRF',
                               'QVAL': part,
                               'QNAM': str(col_name) + str(i+1),
                               'QLABEL': str(var_name)
                               }
                    target_df.loc[len(target_df)] = new_row
                origin_df.loc[idx, col_name] = origin_df.loc[idx, col_name][:200]

    if domain not in extra_supp_domain_df_dic.keys():
        extra_supp_domain_df_dic[domain] = target_df
    else:
        extra_supp_domain_df_dic[domain] = pd.concat([extra_supp_domain_df_dic[domain], target_df])


if __name__ == '__main__':
    project_id = '86af5e3968c9455aad692acf7037f29b'
    df_dic = get_combined_dataframe(project_id)
    df1 = df_dic['RS']
    df2 = df_dic['RS_0']
    all_variable_df = get_all_variable_info(project_id, 'RS')
    df1 = generate_by_option_handling('RSTESTCD', df1, all_variable_df)
    df2 = generate_by_option_handling('RSTESTCD', df2, all_variable_df)
    df1.to_csv('df1.csv', index=False)
    df2.to_csv('df2.csv', index=False)
    key_variable_lst = ['SUBJID', 'SITEID', 'VISITNUM', 'RSTESTCD']
    df_merged = merge(df1, df2, key_variable_lst)
    print(df_merged)
    # 去除任一关键变量为空的行
    df_merged.dropna(subset=key_variable_lst, inplace=True)
    df_merged.to_csv('test.csv', index=False)
    # 示例数据
    # 示例数据
    # data1 = {
    #     'A': ['a', 'b', 'c', 'd'],
    #     'B': [5, 6, 7, 8],
    #     'C': [10, None, 12, 13],
    #     'D': ['x', 'y', 'z', 'w']
    # }
    # data2 = {
    #     'A': ['a', 'b', 'c'],
    #     'B': [5, 6, 9],
    #     'C': [1, 11, 12],
    #     'D': [None, None, None]
    # }
    #
    # df1 = pd.DataFrame(data1)
    # df2 = pd.DataFrame(data2)
    #
    # # 合并DataFrame
    # result = merge(df1, df2, ['A', 'B'])
    # print('----合并前---')
    # print(df1)
    # print(df2)
    # print('----合并后---')
    # print(result)
    #
    # df = pd.DataFrame(data)
    # print('----操作前---')
    # print(df)
    # # 示例操作，['=', 'a', 'A', 'B', 'C']
    # operations = ['=', 'a', 'A', 'B', 'C']
    # result_df = apply_operation(df, operations[0], operations[1], operations[2], operations[3], operations[4])
    # print('----操作后---')
    # print(result_df)