import random
from utils.wjx.common_obj import GenerateObj
import ujson as json
import pandas as pd
from dao import work_order_dao, topic_dao, strategy_dao, work_order_extend_dao
from enums.sys_type_enum import SysTypeEnum
from enums.data_cache_enum import DataCacheEnum
from enums.link_load_type import LinkLoadTypeEnum
import service.web_service as web_service
import service.explain_service as explain_service
from exception.api_exception import APIException
import numpy as np
import model.model as ut
from utils.common.init_app import app
from business_common.redis_prefix import REDIS_EXPLAIN_PREFIX, REDIS_WORK_ORDER_INFO_PREFIX, REDIS_PROGRAMME_PREFIX
from utils.common.redis_client import Redis
import time
import traceback


def generate_data(work_order_id: int, data: json):
    start = time.time()
    app.logger.debug("1.generate_data_start_time:" + str(start))
    # all_weights = search_programme_from_redis(work_order_id)
    # if not all_weights:
    #     all_weights = data.get('weights')  # 缓存没拿到就拿页面获取到的数据
    all_weights = data.get('config')
    if not all_weights:
        raise APIException(message="题目权重配置为空，无法生成数据")
    weights_objects = list(map(lambda co: GenerateObj.conver_json_2_obj(co), all_weights))
    # 0. 将redis中保存的权重数据保存到数据库
    # save_programme_weight_2_db(work_order_id, data)
    # 1. 通过工单id查询问卷链接
    cur_work_order = work_order_dao.query_work_order_by_id(work_order_id)
    if not cur_work_order:
        raise APIException(message="工单不存在")
    total_count = int(cur_work_order.total_size)  # 总数量
    app.logger.debug('2.通过工单id查询问卷链接:' + str(time.time() - start))
    # 2. 生成当前数据, 生成一个 total_count 行 total_column 列的 DataFrame，初始值设为 -100
    counter = 1
    total = len(weights_objects)
    total_column = 0
    while counter <= total:
        wo = weights_objects[total - counter]
        counter += 1
        if hasattr(wo, 'data_index') and wo.data_index is not None and len(wo.data_index) > 0:
            total_column = wo.data_index[-1]
            break
    # total_column = weights_objects[-1].data_index[-1]
    df = pd.DataFrame(-100, index=range(total_count), columns=range(total_column + 1))
    app.logger.debug('3.生成基础数据、方案和相关题目:' + str(time.time() - start))
    # 查询题目的数据库索引关系
    virtual_titles = work_order_extend_dao.search_title_by_work_order_id(work_order_id)
    virtual_dict = dict()
    if virtual_titles:
        for c_i in virtual_titles[0]:
            virtual_dict[c_i.get('index')] = c_i
    try:
        # 3. 生成基础数据、方案和相关题目
        # TODO 再生成一份纯数字的答案，用来修改数据
        need_drop_column_index = []
        for i in weights_objects:
            if not (hasattr(i, 'data_index') and i.data_index is not None and len(i.data_index) > 0):
                continue
            column_slice = i.data_index  # 列切片范围为索引列
            sys_type = i.sys_type
            if SysTypeEnum.DESCRIPTION.value == sys_type:
                need_drop_column_index.extend(column_slice)
                continue
            # filtered_df = df.loc[df.iloc[:, column_slice].eq(filter_value).any(axis=1)] # 只要有一个值是-100就会过滤出来
            filtered_df = df.loc[(df.iloc[:, column_slice] == -100).all(axis=1)]  # 过滤这一行全部等于-100的数据
            generate_item_data(i, filtered_df, df, virtual_dict)
        df = df.drop(need_drop_column_index, axis=1)  # 删除描述性题目，不需要生成数据
        df = df.applymap(lambda x: int(x) if isinstance(x, float) else x)  # 把所有浮点数转为整数
        # df = df.astype(str)
        weight_dict = dict()  # 计算题目id的映射关系
        for i in weights_objects:
            weight_dict[i.title_id] = i
        # 4. 处理跳题，将跳题之间的数据过滤为-3
        deal_jump_title(df, weight_dict, weights_objects)
        app.logger.debug('4.处理跳题，将跳题之间的数据过滤为-3:' + str(time.time() - start))
        # 5. 处理相关题目
        deal_relation_title(df, weight_dict, weights_objects)
        app.logger.debug('5.处理相关题目:' + str(time.time() - start))
        # 6. 把-3的数据处理为跳过
        # for i in weights_objects:
        #     sys_type = i.sys_type
        #     data_index = i.data_index
        #     if sys_type == SysTypeEnum.SINGLE_SLIDE.value:
        #         df.loc[df[data_index] == -3, [data_index]] = "(跳过)"
        for i in weights_objects:
            sys_type = i.sys_type
            data_index = i.data_index
            if sys_type == SysTypeEnum.SINGLE_SLIDE.value:
                for index, row in df.iterrows():
                    item_arr = row.to_numpy()
                    for j_index in range(len(item_arr)):
                        # 如果小于等于开始索引，大于等于结束索引，都不操作
                        if j_index in data_index:
                            if -3 == item_arr[j_index]:
                                item_arr[j_index] = "(跳过)"
                    df.loc[index] = item_arr
        app.logger.debug('6.处理相关题目:' + str(time.time() - start))
    except Exception as e:
        traceback.print_exc()
        app.logger.debug(e)
        raise APIException(message="生成基础数据失败")
    # 7. 删除工单下所有数据，数据按照work_order_id落库，修改工单状态
    topic_dao.delete_result_by_word_order_id(work_order_id)
    insert_into_topic(df, work_order_id)
    work_order_dao.modify_work_order_status(id=work_order_id, cur_status=2)
    app.logger.debug('7. 删除工单下所有数据，数据按照work_order_id落库，修改工单状态:' + str(time.time() - start))
    return "生成成功！"


def insert_into_topic(df: pd.DataFrame, work_order_id: int):
    """
    生成数据-批量提交
    :param work_order_id:
    :param df:
    :return:
    """
    topics = []
    for index, row in df.iterrows():
        r = np.array(row).tolist()
        json_str = json.dumps(r, ensure_ascii=False)
        topics.append(ut.Topic(data=json_str, work_order_id=work_order_id, submitted=0))
    topic_dao.batch_insert(topics)
    return


def deal_relation_title(df, weight_dict, weights_objects):
    for i in weights_objects:
        cur_relation = i.relation
        data_indexes = i.data_index
        if not cur_relation or not data_indexes:  # 没有相关性逻辑,或者没有索引 则跳过
            continue
        if '|' in cur_relation:  # 与逻辑（and）同时满足所有条件才进行修改
            relations = cur_relation.split('|')
            cur_df = df
            for item_relation in relations:
                title_options = item_relation.split(',')  # 逗号分割题目和选项
                if not title_options:  # 没找到则跳过
                    continue
                relation_title_id = title_options[0]  # 关联的题目id
                if len(title_options) == 1:  # 如果是0或者-1直接把答案处理为-3，否则跳过，不做处理
                    if ('0' or '-1') == str(relation_title_id):
                        df[:, data_indexes] = -3
                    continue
                relation_title_obj = weight_dict.get(relation_title_id)
                relation_sys_type = relation_title_obj.sys_type  # 关联的题目类型
                title = relation_title_obj.title
                cur_options = title_options[1]  # 选项
                options = cur_options.split(';')  # 分号分割题目和选项
                if relation_sys_type == SysTypeEnum.SINGLE.value:
                    cur_df = cur_df[cur_df[title].isin(options)]
                elif relation_sys_type == SysTypeEnum.MULTIPLE.value:
                    indexes = relation_title_obj.data_index  # 拿到多选下的所有index
                    multiple_db_index = {i: indexes[i] for i in range(len(indexes))}  # 变为下标和db索引的字典
                    for option in options:  # 遍历题目的选项
                        cur_db_index = multiple_db_index.get(int(option) - 1)  # 需要关联的db的索引
                        # cur_db_index_title = db_index_obj_dict.get(cur_db_index)  # 当前db索引对应的列的名称
                        cur_df = cur_df[cur_df[cur_db_index] == 1]  # 过滤出不需要处理的数据（这一列等于1的数据）
            # 过滤这个题对应的df数据
            # df.loc[cur_df.index, df.columns[data_indexes]] = -3
            df.loc[~df.index.isin(cur_df.index), df.columns[data_indexes]] = -3
        elif '$' in cur_relation:  # 或逻辑（or）
            relations = cur_relation.split('$')
            valid_index_arr = []  # 合法的数据索引
            for item_relation in relations:
                title_options = item_relation.split(',')  # 逗号分割题目和选项
                if not title_options:  # 没找到则跳过
                    continue
                relation_title_id = title_options[0]  # 题目id
                if len(title_options) == 1:  # 如果是0或者-1直接把答案处理为-3，否则跳过，不做处理
                    if ('0' or '-1') == str(relation_title_id):
                        df[:, data_indexes] = -3
                    continue
                relation_title_obj = weight_dict.get(relation_title_id)
                relation_sys_type = relation_title_obj.sys_type  # 关联的题目类型
                relation_indexes = relation_title_obj.data_index
                cur_options = title_options[1]  # 选项
                options = cur_options.split(';')  # 分号分割题目和选项
                if relation_sys_type == SysTypeEnum.SINGLE.value:
                    for option in options:
                        cur_df = df[df[relation_indexes[0]].isin([str(option)])]
                        valid_index_arr.extend(cur_df.index)  # 过滤这个题对应的df数据
                elif relation_sys_type == SysTypeEnum.MULTIPLE.value:
                    for index in relation_indexes:
                        cur_df = df[df[index] == 1]  # 过滤出不需要处理的数据（这一列等于1的数据）
                        valid_index_arr.extend(cur_df.index)  # 过滤这个题对应的df数据
            # 处理最终的dataframe
            # df[valid_index_arr, data_indexes] = -3
            # df.loc[valid_index_arr, df.columns[data_indexes]] = -3
            df.loc[~df.index.isin(valid_index_arr), df.columns[data_indexes]] = -3
        else:  # 只有一个逻辑（单独处理）
            title_options = cur_relation.split(',')  # 逗号分割题目和选项
            if not title_options:  # 没找到则跳过
                continue
            relation_title_id = title_options[0]  # 关联的题目id
            if len(title_options) == 1:  # 如果是0或者-1直接把答案处理为-3，否则跳过，不做处理
                if ('0' or '-1') == str(relation_title_id):
                    df[data_indexes] = -3
                continue
            relation_title_obj = weight_dict.get(relation_title_id)
            relation_sys_type = relation_title_obj.sys_type  # 关联的题目类型
            relation_indexes = relation_title_obj.data_index
            cur_options = title_options[1]  # 选项
            options = cur_options.split(';')  # 分号分割题目和选项
            # 根据题目类型判断是否需要关联
            valid_index_arr = []  # 合法的数据索引
            if relation_sys_type == SysTypeEnum.SINGLE.value:
                for option in options:
                    cur_df = df[df[relation_indexes[0]].isin([str(option)])]
                    # 过滤这个题对应的df数据
                    valid_index_arr.extend(cur_df.index)
            elif relation_sys_type == SysTypeEnum.MULTIPLE.value:
                for index in relation_indexes:
                    # cur_df = df[df[index].str.startwith(1)]  # 过滤出不需要处理的数据（这一列等于1的数据）
                    cur_df = df[[index]].apply(lambda x: x.astype(str).str.startswith('1')).all()
                    # cur_df = df[df[index].isin([str(1)])]  # 过滤出不需要处理的数据（这一列等于1的数据）
                    valid_index_arr.extend(cur_df.index)  # 过滤这个题对应的df数据
            # df[valid_index_arr, data_indexes] = -3
            df.loc[~df.index.isin(valid_index_arr), df.columns[data_indexes]] = -3


def deal_jump_title(df, weight_dict, weights_objects):
    for i in weights_objects:
        title_is_jump = i.is_jump
        if not title_is_jump:
            continue
        sys_type = i.sys_type
        i_index0 = i.data_index[0]
        if i_index0 == 20:
            print(i_index0)
        if SysTypeEnum.SINGLE.value == sys_type or SysTypeEnum.MULTIPLE.value == sys_type or SysTypeEnum.SORTED.value == sys_type or SysTypeEnum.SCALE.value == sys_type:
            for option in i.options:
                if not option.get('is_jump') or not option.get('jump_to'):
                    continue
                single_value = str(option.get('value'))
                cur_df = df[df[i_index0].astype(str).str.startswith(single_value)]
                jump_start_index = i.data_index[-1] + 1  # 如果是多选的话，需要获取到题目的最后一个索引的位置，再开始跳过
                jump_to_title = option.get('jump_to')
                if str(jump_to_title) == '1':  # 跳至卷尾
                    df.loc[cur_df.index, cur_df.columns[jump_start_index:]] = -3
                else:
                    jump_end_index = weight_dict.get(jump_to_title).data_index[0]
                    if not jump_end_index:
                        continue
                    df.loc[cur_df.index, cur_df.columns[jump_start_index:jump_end_index]] = -3  # 修改df里所有符合条件的数据
        elif sys_type == SysTypeEnum.TEXT.value or sys_type == SysTypeEnum.AREA.value or sys_type == SysTypeEnum.DATE.value:
            for option in i.options:
                if not option.get('is_jump') or not option.get('jump_to'):
                    continue
                cur_df = df[df[i_index0].astype(str).str.strip() != '']
                jump_start_index = i_index0 + 1
                jump_to_title = option.get('jump_to')
                if str(jump_to_title) == '1':  # 跳至卷尾
                    df.loc[cur_df.index, cur_df.columns[jump_start_index:]] = -3
                else:
                    jump_end_index = weight_dict.get(jump_to_title).data_index[0]
                    if not jump_end_index:
                        continue
                    df.loc[cur_df.index, cur_df.columns[jump_start_index:jump_end_index]] = -3  # 修改df里所有符合条件的数据
        elif sys_type == SysTypeEnum.RATE.value:
            for option in i.options:
                if not option.get('is_jump') or not option.get('jump_to'):
                    continue
                min_oper_num = float(option.get('min_oper_num'))
                max_oper_num = float(option.get('max_oper_num'))
                option_title = option.get('title')
                cur_df = df[df[option_title].astype(float).between(min_oper_num, max_oper_num)]
                jump_start_index = i.data_index[-1] + 1
                jump_to_title = option.get('jump_to')
                if str(jump_to_title) == '1':  # 跳至卷尾
                    df.loc[cur_df.index, cur_df.columns[jump_start_index:]] = -3
                else:
                    jump_end_index = weight_dict.get(jump_to_title).data_index[0]
                    if not jump_end_index:
                        continue
                    df.loc[cur_df.index, cur_df.columns[jump_start_index:jump_end_index]] = -3  # 修改df里所有符合条件的数据


def generate_item_data(obj: GenerateObj, filtered_df: pd.DataFrame, data_df: pd.DataFrame, db_data_index_dict: dict):
    # 1. 生成一列基础数据
    # 2. 方案进行分组，拆分出对应的df
    # 3. 生成每个方案对应的数据，并赋值给对应的列（直接赋值给最原始的df）
    # 4. 若有子方案，过滤出子方案需要的数据，将当前过滤出来的df递归按照上面的方式进行处理

    # 生成这一列的数据
    data_index = obj.data_index
    # todo 增加判断条件：如果有子项，则遍历子项拿到所有的index作为代替的 data_index
    sys_type = obj.sys_type
    options = obj.options
    column_slice = data_index  # 列切片范围为索引列
    filtered_rows = filtered_df.shape[0]  # 过滤后的数据
    if filtered_rows <= 0:
        return
    generated_data, only_value_result = generate_data_by_sys_type(obj, filtered_rows)  # 这些-100需要填充的数据
    # 1. 判断是否有相关其他题目
    filtered_index = filtered_df.index
    if isinstance(only_value_result, pd.Series):  # 重置索引为dataframe需要的索引
        only_value_result = pd.Series(only_value_result.values, index=filtered_index)
    if isinstance(generated_data, pd.Series):  # 重置索引为dataframe需要的索引
        generated_data = pd.Series(generated_data.values, index=filtered_index)
    filtered_df.loc[filtered_index, column_slice] = only_value_result
    # 2. 将纯数字的数据赋值给两个dataframe（选项答案如果带填空题:例如"2"，而不是"2^填空答案"）
    data_df.loc[filtered_index, column_slice] = only_value_result
    if sys_type == SysTypeEnum.MATRIX.value:
        children = obj.children
        for child in children:
            g_child = GenerateObj.conver_json_2_obj(child)
            sys_relation = g_child.sys_relation
            if sys_relation:  # 获取相关的data_index,生成对应的数据，如果原始数据是填空题，则将原始数据的is_text设为False，再生成数据
                only_value_result = data_df[g_child.data_index]
                # do_sys_relation_matirx(data_df, db_data_index_dict, filtered_index, only_value_result, sys_relation)
                do_sys_relation(data_df, db_data_index_dict, filtered_index, only_value_result, sys_relation)
    else:
        sys_relation = obj.sys_relation
        if sys_relation:  # 获取相关的data_index,生成对应的数据，如果原始数据是填空题，则将原始数据的is_text设为False，再生成数据
            do_sys_relation(data_df, db_data_index_dict, filtered_index, only_value_result, sys_relation)
        # else:
        # # 2. 将纯数字的数据赋值给两个dataframe（选项答案如果带填空题:例如"2"，而不是"2^填空答案"）
        #  data_df.loc[filtered_index, column_slice] = only_value_result
        # 3. 判断是否有方案
        if sys_type == (SysTypeEnum.SINGLE.value or SysTypeEnum.SCALE.value):  # 如果是单选和量表题才进行方案解析
            for item_option in options:
                sys_scheme = item_option.get('sys_scheme')
                if not sys_scheme:
                    continue
                option_value = item_option.get('value')
                # 过滤出这个选项对应的filtered_dff
                option_filtered_df = filtered_df.loc[(filtered_df.iloc[:, column_slice] == option_value).all(axis=1)]
                total_score = sum(int(item_sys_scheme.get('score')) for item_sys_scheme in sys_scheme)  # 总权重
                # total_score = 0
                # for item_sys_scheme in sys_scheme:
                #     item_score = item_sys_scheme.get('score')
                #     total_score += item_score
                start_rows = 0
                total_rows = option_filtered_df.shape[0]
                for scheme_index in range(len(sys_scheme)):  # 根据权重生成数据
                    item_sys_scheme = sys_scheme[scheme_index]
                    cur_rows = start_rows + int(total_rows * int(item_sys_scheme.get('score')) / total_score)
                    next_filtered_df = option_filtered_df[start_rows:cur_rows]
                    if scheme_index == len(sys_scheme) - 1:
                        next_filtered_df = option_filtered_df[start_rows:]
                    # 递归处理
                    scheme_titles = item_sys_scheme.get('scheme_titles')
                    for item_scheme_title in scheme_titles:
                        scheme_obj = GenerateObj.conver_json_2_obj(item_scheme_title)
                        generate_item_data(scheme_obj, next_filtered_df, data_df, db_data_index_dict)
                    start_rows = cur_rows
        # 4. 最后将生成的数据结果赋值给data_df（选项+填空的答案:例如2^填空内容）
        # filtered_df.loc[filtered_index, column_slice] = generated_data # 后续filtered_df没有使用的地方，减少赋值步骤
    data_df.loc[filtered_index, column_slice] = generated_data
    return


def do_sys_relation_matirx(data_df, db_data_index_dict, filtered_index, only_value_result, sys_relation):
    for i in sys_relation:
        # 使用only_value_result的数据，修改其中一部分数据。 1. 复制数据并按照比例修改；2. 增加无的选项；3. 赋值给最终的df
        relation_index = i.get('relation_index')  # 相关的题目索引是哪个
        change_score = i.get('change_score')  # 需要浮动的比例
        item_d = db_data_index_dict.get(relation_index)
        if not item_d:
            continue
        # 1. 复制数据并按照比例修改
        copied_arr = only_value_result
        if not change_score:
            change_score = 80

        # 2. 增加无的选项
        relation_options = item_d.get('options')
        index = item_d.get('index')
        valid_values = []
        for relation_option in relation_options:
            if relation_option.get('is_text'):
                valid_values.append(relation_option.get('value'))
        if valid_values:
            for item_copy_arr_index in range(len(copied_arr)):
                if copied_arr[item_copy_arr_index] in valid_values:
                    copied_arr[item_copy_arr_index] = copied_arr[item_copy_arr_index] + "^无"
        # 3. 赋值给最终的df
        data_df.loc[filtered_index, [index]] = only_value_result


def do_sys_relation(data_df, db_data_index_dict, filtered_index, only_value_result, sys_relation):
    for i in sys_relation:
        # 使用only_value_result的数据，修改其中一部分数据。 1. 复制数据并按照比例修改；2. 增加无的选项；3. 赋值给最终的df
        relation_index = i.get('relation_index')  # 相关的题目索引是哪个
        change_score = i.get('change_score')  # 需要浮动的比例
        item_d = db_data_index_dict.get(relation_index)
        if not item_d:
            continue
        relation_options = item_d.get('options')
        index = item_d.get('index')
        # 1. 复制数据并按照比例修改
        copied_arr = only_value_result
        if not change_score:
            change_score = 80
        float_rate = change_score / 100
        float_values1 = copied_arr[:int(len(copied_arr) * float_rate)]  # 取出默认的关联长度
        need_change_arr = copied_arr[int(len(copied_arr) * float_rate):]  # 需要变更的数据范围
        change_choices_arr = list(range(1, len(relation_options) + 1))
        extend_arr = random.choices(change_choices_arr, k=len(need_change_arr))
        copied_arr = np.concatenate((float_values1, extend_arr))
        # 2. 增加无的选项
        valid_values = []
        for relation_option in relation_options:
            if relation_option.get('is_text'):
                valid_values.append(relation_option.get('value'))
        if valid_values:
            for item_copy_arr_index in range(len(copied_arr)):
                if copied_arr[item_copy_arr_index] in valid_values:
                    copied_arr[item_copy_arr_index] = copied_arr[item_copy_arr_index] + "^无"
        # 3. 赋值给最终的df，把数组的值转换为字符串格式
        data_df.loc[filtered_index, [index]] = list(map(str, copied_arr))


def save_programme_weight_2_db(wo_id, inner_data):
    """
    保存权重
    :return:
    """
    item = strategy_dao.search_by_work_order_id(wo_id)
    if item is None:
        objs = [ut.Strategy(work_order_id=wo_id, strategy_weight=inner_data)]
        strategy_dao.batch_insert(objs)
    else:
        strategy_dao.modify_by_work_order_id(wo_id=wo_id, strategy_weight_res=inner_data)
    return


def generate_data_by_sys_type(obj: GenerateObj, total_count: int):
    """
    处理传入数据中的每一个对象
    :param obj: 当前对象的所有属性
    :param total_count: 总数量
    :return:
    """

    def get_rate_valid_data(arrays: [[]]):
        def enumerate_combinations(arrays):
            combinations = []
            current_combination = [0] * len(arrays)
            backtrack(arrays, 0, current_combination, combinations)
            return combinations

        def backtrack(arrays, index, current_combination, combinations):
            if index > len(arrays):
                return

            if index == len(arrays):
                if sum(current_combination) == 100:
                    combinations.append(current_combination.copy())
                    # print('cur_current_combination:', current_combination)
                return

            for i in range(len(arrays[index])):
                current_combination[index] = arrays[index][i]
                # 剪枝条件：如果当前组合的部分和已经大于 100，则终止当前分支的搜索
                if sum(current_combination[:index + 1]) > 100:
                    break
                backtrack(arrays, index + 1, current_combination, combinations)

        # 示例用法: arrays = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
        combinations = enumerate_combinations(arrays)
        return combinations

    def valid_text_content(cur_text, is_repeat, req, title_id, total_count_in) -> []:
        """
        校验填空的数据是否通过
        :param cur_text:
        :param is_repeat:
        :param req:
        :param title_id:
        :param total_count_in:
        :return:
        """
        # 前置校验 - 必填不能为空
        if '1' == req and not cur_text and is_repeat:
            raise APIException(message="第" + str(title_id) + "题为必选题，答案为空")
        origin_texts = cur_text.split("\n")  # 通过逗号分割开的数据
        texts = [x for x in origin_texts if x.strip() != ""]  # 过滤到\n,\t,空格
        if '1' == req and not texts and is_repeat:
            raise APIException(message="第" + str(title_id) + "题为必选题，答案为空")
        # 处理数据
        text_arr = []
        if '1' == req and is_repeat:  # 必选+去重
            if total_count_in != len(texts):
                msg = ("第" + str(title_id) + "题，答案个数与总份数不同，输入" + str(len(texts))
                       + "个答案，需要" + str(total_count_in) + "个答案")
                if len(origin_texts) != texts:
                    msg += "，可能是空格，Tab或回车造成"
                raise APIException(message=msg)
            text_arr = texts
        elif '1' == req and not is_repeat:  # 必选+不去重
            text_arr = random.choices(texts, k=total_count_in) if texts else ["无"] * total_count_in
        elif '1' != req and is_repeat:  # 非必选+去重
            if not texts:
                text_arr = ["无"] * total_count_in
            else:
                cur_for_i = 0
                while cur_for_i < total_count_in:
                    if cur_for_i < len(texts):
                        text_arr.append(texts[cur_for_i])
                    else:
                        text_arr.append('无')
                    cur_for_i += 1
        else:  # 非必选+不去重
            text_arr = random.choices(texts, k=total_count_in) if texts else ["无"] * total_count_in
        return text_arr

    def build_multiple_matrix(obj_in: GenerateObj, total_count_in: int) -> np.array:
        """
        矩阵多选
        :param obj_in:
        :param total_count_in:
        :return:
        """
        # out_obj = GenerateObj(**obj_in)
        children = obj_in.children
        res_df = pd.DataFrame()
        ovr_df = pd.DataFrame()
        for child in children:
            child_obj = GenerateObj.conver_json_2_obj(child)
            item_df, item_ovr_df = build_multiple_datas(child_obj, total_count_in)
            if item_df is not None:
                if res_df.empty:
                    res_df = item_ovr_df
                else:
                    res_df = res_df.join(item_ovr_df)
            if not item_ovr_df.empty:
                if ovr_df.empty:
                    ovr_df = item_ovr_df
                else:
                    ovr_df = ovr_df.join(item_ovr_df)
        return res_df.values, ovr_df.values

    def build_matrix_text(obj_in: GenerateObj, total_count_in: int) -> np.array:
        """
        矩阵填空
        :param obj_in:
        :param total_count_in:
        :return:
        """
        children = obj_in.children
        res_data = dict()  # {'r1c1': ['无', '无', '无'], 'r1c2': ['无', '无', '无'], 'r1c3': ['无', '无', '无'], ...,'r2c3': ['无', '无', '无']}
        name_list = list()
        for child in children:
            options = child['options']
            for cur_opt in options:
                name_list.append(cur_opt['title'])
                res_data[cur_opt['title']] = ["无"] * total_count_in
        res_df = pd.DataFrame(res_data)
        print(name_list)
        return res_df.values

    def build_multiple_text(obj_in: GenerateObj, total_count_in: int) -> np.array:
        """
        生成多项填空数据
        :param obj_in:
        :param total_count_in:
        :return:
        """
        # out_obj = GenerateObj(**obj_in)
        children = obj_in.children  # 该题目的所有子节点
        cur_req = obj_in.req  # 该题目是否必选
        title_id = obj_in.title_id  # title_id
        children = list(map(lambda co: GenerateObj.conver_json_2_obj(co), children))
        parent_title = obj_in.title  # 标题
        df = pd.DataFrame()
        for child in children:
            title = child.title  # 子标题
            is_repeat = child.is_repeat  # 是否去重
            name = parent_title + '.' + title
            cur_text = child.text  # 当前的填空内容是什么
            # text_arr = []
            # if cur_text:
            #     texts = cur_text.split("\n")  # 通过逗号分割开的数据
            #     texts = [x for x in texts if x.strip() != ""]  # 过滤到\n,\t,空格
            #     if not texts:  # 过滤后全是空
            #         for j in range(total_count_in):
            #             text_arr.append("无")
            #     else:
            #         for j in range(total_count_in):
            #             need_text = random.choices(texts, k=1)[0]
            #             text_arr.append(need_text)
            # else:
            #     for j in range(total_count_in):
            #         text_arr.append("无")
            text_arr = valid_text_content(cur_text, is_repeat, cur_req, title_id, total_count_in)
            inner_df = pd.DataFrame({name: text_arr})
            inner_df = inner_df.sample(frac=1, ignore_index=True)
            if df.empty:
                df = inner_df
            else:
                df = df.join(inner_df)
        return df.values

    def build_matrix_datas(obj_in: GenerateObj, total_count_in: int) -> np.array:
        """
        生成矩阵题数据
        :param total_count_in: 生成的总行数
        :param obj_in: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        # out_obj = GenerateObj(**obj_in)
        children = obj_in.children  # 该题目的所有子节点
        parent_title = obj_in.title  # 标题
        children = list(map(lambda co: GenerateObj.conver_json_2_obj(co), children))
        matrix_df = pd.DataFrame()
        for i in children:
            title = i.title + i.name  # 子标题
            cur_options = i.options  # 选项
            pers = list(map(lambda co: GenerateObj.conver_json_2_obj(co), cur_options))
            total_weight = 0
            for j in pers:
                total_weight += j.rate
            if total_weight <= 0:
                raise APIException(message="生成失败，" + title + "，此题总比重为0")
            m = dict()  # 根据当前权重计算比例
            can_use_nums = []
            can_use_num_weights = []
            for j in range(len(pers)):
                inner_obj = pers[j]
                m[inner_obj.value] = inner_obj.rate / total_weight
                can_use_nums.append(inner_obj.value)
                can_use_num_weights.append(inner_obj.rate)
            # 生成数据
            child_res = []
            cur_total = 0
            for j in range(len(cur_options)):
                cur_value = cur_options[j].get('value')
                need_add_count = int(total_count_in * m[cur_value])
                if j == len(m) - 1:
                    need_add_count = total_count_in - cur_total
                if need_add_count > 0:
                    cur_total += need_add_count
                    if (j == len(m) - 1) and m[cur_value] == 0:  # 当前的比例是0，不能生成当前数据，要从可以生成数据的数据里选择一个
                        cur_value = random.choices(can_use_nums, weights=can_use_num_weights, k=need_add_count)
                        for res_item in cur_value:
                            child_res.append(res_item)
                    else:
                        child_res += [cur_value] * need_add_count
            name = parent_title + '.' + title
            inner_df = pd.DataFrame({name: child_res})
            inner_df = inner_df.sample(frac=1, ignore_index=True)
            if matrix_df.empty:
                matrix_df = inner_df
            else:
                matrix_df = matrix_df.join(inner_df)
        return matrix_df.values

    def build_select_datas(obj_in: GenerateObj, total_count: int) -> pd.Series:
        """
        生成下拉选择题数据
        :param total_count: 生成的总行数
        :param obj_in: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        # out_obj = GenerateObj(**obj)
        title = obj_in.title  # 标题
        cur_options = obj_in.options  # 选项
        pers = list(map(lambda co: GenerateObj.conver_json_2_obj(co), cur_options))
        total_weight = 0
        for j in pers:
            total_weight += j.rate
        if total_weight <= 0:
            raise APIException(message="生成失败，" + title + "，此题总比重为0")
        m = dict()  # 根据当前权重计算比例
        can_use_nums = []
        can_use_num_weights = []
        for j in range(len(pers)):
            inner_obj = pers[j]
            m[inner_obj.value] = inner_obj.rate / total_weight
            can_use_nums.append(inner_obj.value)
            can_use_num_weights.append(inner_obj.rate)
        # 生成数据
        res = []
        cur_total = 0
        for i in range(len(pers)):
            cur_value = pers[i].value
            need_add_count = int(total_count * m[cur_value])
            if i == len(m) - 1:
                need_add_count = total_count - cur_total
            if need_add_count > 0:
                cur_total += need_add_count
                if (i == len(m) - 1) and m[cur_value] == 0:  # 当前的比例是0，不能生成当前数据，要从可以生成数据的数据里选择一个
                    cur_value = random.choices(can_use_nums, weights=can_use_num_weights, k=need_add_count)
                    for res_item in cur_value:
                        res.append(res_item)
                else:
                    res += [cur_value] * need_add_count
        my_array = np.array(res)
        np_res = np.random.choice(my_array, size=len(my_array), replace=False)
        return pd.Series(np_res)

    def build_text_datas(obj_in: GenerateObj, total_count_in: int) -> np.array:
        """
        生成填空题数据
        :param total_count_in: 生成的总行数
        :param obj_in: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        # item = GenerateObj(**obj)
        # title = obj.get('title')  # 标题
        # cur_text = obj.get('text')  # 当前的填空内容是什么
        title_id = obj_in.title_id  # 标题id
        # title = obj_in.title  # 标题
        cur_text = obj_in.text  # 当前的填空内容是什么
        req = obj_in.req  # 是否比选 == 1
        is_repeat = obj_in.is_repeat  # 是否去重 True
        text_arr = valid_text_content(cur_text, is_repeat, req, title_id, total_count_in)
        # text_df = pd.DataFrame({title: text_arr})
        return text_arr

    def build_rate_datas(obj_in: GenerateObj, total_count_in: int) -> np.array:
        """
        生成比重题数据v2
        :param total_count_in: 生成的总行数
        :param obj_in: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        options = obj_in.options  # 该题目的所有子节点
        min_sum = 0
        max_max_sum = 0
        name_dict = dict()
        arrs = []
        for index in range(len(options)):
            i = options[index]
            # cur_in_obj = GenerateObj(**i)
            cur_in_obj = GenerateObj.conver_json_2_obj(i)
            name_dict[index] = cur_in_obj.title
            cur_min = int(cur_in_obj.min_oper_num)
            cur_max = int(cur_in_obj.max_oper_num)
            item_arr = list(np.arange(cur_min, cur_max))
            arrs.append(item_arr)
            min_sum += cur_min
            max_max_sum += cur_max
        if min_sum > 100:
            raise APIException(message="最小值的和不能大于100")
        if max_max_sum < 100:
            raise APIException(message="最大值的和不能大于100")

        # 生成所有符合条件的数据，具体生成的内容从里面随机筛选固定的条数进行处理
        valid_arrays = get_rate_valid_data(arrs)
        if not valid_arrays:
            raise APIException(message="没有可选的比重，请查看比重配置是否正确")
        res_arr = random.choices(valid_arrays, k=total_count_in)
        return np.array(res_arr)

    def build_sorted_datas(obj_in: GenerateObj, total_count: int) -> np.array:
        """
        生成排序数据
        :param total_count: 生成的总行数
        :param obj_in: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        # out_obj = GenerateObj(**obj)
        title = obj_in.title
        in_options = obj_in.options  # 当前各个选项的权重是多少
        min_oper_num = obj_in.min_oper_num  # 最少选多少个选项
        min_oper_num = int(min_oper_num) if min_oper_num else 1
        max_oper_num = obj_in.max_oper_num  # 最多选多少个选项
        max_oper_num = int(max_oper_num) if max_oper_num else len(in_options)
        pers = list(map(lambda co: GenerateObj.conver_json_2_obj(co), in_options))
        # 生成每一列的名字和比例
        # name_dict = dict()
        total_weight = 0
        for j in range(len(pers)):
            total_weight += pers[j].rate
        #     name_dict[j] = title + '.' + pers[j].name
        if total_weight <= 0:
            raise APIException(message="生成失败，" + title + "，此题总比重为0")
        ratios = []
        for j in range(len(pers)):
            ratios.append(pers[j].rate / total_weight)
        ew_arr = []
        option_length = len(pers)
        nums = [total_count * cur_i for cur_i in ratios]  # 根据权重计算后的结果
        calc_total_count = total_count
        for i in range(len(nums)):
            max_length = random.randint(min_oper_num, max_oper_num)
            count = nums[i]
            while count > 0 and calc_total_count > 0:
                calc_total_count -= 1
                arr = np.arange(option_length + 1)[1:]
                for index in range(len(arr)):
                    if index > max_length - 1:
                        arr[index] = -2
                tmp = arr[0]
                arr[0] = arr[i]
                arr[i] = tmp

                fixed_index = i

                fixed_value = arr[fixed_index]  # 保存固定的值
                arr = np.delete(arr, fixed_index)  # 删除固定的值

                np.random.shuffle(arr)  # 对剩余的元素进行乱序

                arr = np.insert(arr, fixed_index, fixed_value)  # 将固定的值插入回数组中
                count -= 1
                ew_arr.append(arr)
        sorted_res_df = pd.DataFrame(ew_arr)
        # sorted_res_df = sorted_res_df.rename(columns=name_dict)
        sorted_res_df = sorted_res_df.sample(frac=1, ignore_index=True)
        return sorted_res_df.values

    def build_multiple_datas(obj_in: GenerateObj, total_count: int) -> (np.array, np.array):
        """
        生成多选数据
        :param total_count: 生成的总行数
        :param obj_in: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """

        # cur_obj = GenerateObj(**obj)
        title_id = obj_in.title_id
        options = obj_in.options
        req = obj_in.req
        option_objs = list(map(lambda co: GenerateObj.conver_json_2_obj(co), options))
        d = dict()  # 数据位置与对应个数的映射
        total_1_count = 0
        for option_index in range(len(option_objs)):
            item = option_objs[option_index]
            d_count = int(item.rate / 1000 * total_count)
            total_1_count += d_count
            d[option_index] = d_count
        min_oper_num = obj_in.min_oper_num  # 最少选多少个选项
        max_oper_num = obj_in.max_oper_num or len(option_objs)  # 最多选多少个选项
        # 如果最小值等于最大值，则表示固定需要生成这么多个1
        if min_oper_num == max_oper_num:
            real_count = min_oper_num * total_count
            need_deal_count = real_count - total_1_count
            if need_deal_count > 0:
                for k, v in d.items():
                    if v > 0:
                        d[k] = v + need_deal_count
                        total_1_count += need_deal_count
                        break
            elif need_deal_count < 0:
                for k, v in d.items():
                    if v > 0 and v > abs(need_deal_count):
                        d[k] = v + need_deal_count
                        total_1_count += need_deal_count
                        break
        if total_1_count < min_oper_num * total_count or total_1_count > max_oper_num * total_count:
            msg = '第' + str(title_id) + '题配置有误，最少选择' + str(min_oper_num * total_count) + '，最多选择' + str(
                max_oper_num * total_count) + '，当前需要生成' + str(total_1_count)
            raise APIException(message=msg)
        name_dict = dict()  # 索引和标题的映射
        for cur_j in range(len(option_objs)):
            title = option_objs[cur_j].title
            name_dict[cur_j] = title
        option_counts = len(option_objs)  # 总共有多少个选项
        for cur_j in range(option_counts):
            pass
        # 1.生成m行n列的0，处理对应数据
        array = [[0] * option_counts for _ in range(total_count)]
        for num in range(min_oper_num, max_oper_num + 1):
            for row_index in range(len(array)):  # 遍历每一行数据
                row = array[row_index]
                for col_index in range(len(row)):  # 遍历每一列数据
                    count_ones = row.count(1)  # 统计1的个数
                    if count_ones < num:  # 如果需要处理（1的个数小于上限）
                        element = row[col_index]
                        if element == 1:  # 如果元素不是0，则为1，则跳过当前位置
                            continue
                        # 若col_index列有剩余的1，则取其中一个1进行填充
                        if d[col_index] > 0:
                            d[col_index] -= 1
                            row[col_index] = 1
                array[row_index] = row  # 修改二维数组的结果
        # 数组转为np数组的结果
        np_array = np.array(array)
        np_array = np_array.astype(int)
        multiple_res_df = pd.DataFrame(np_array)
        multiple_res_df = multiple_res_df.rename(columns=name_dict)
        multiple_res_df = multiple_res_df.sample(frac=1, ignore_index=True)  # 多选题乱序
        ovr = multiple_res_df  # 纯数字的结果，没有填空选项
        # 遍历所有数据，如果对应的列是1，且需要填空，则增加上对应的填空数据
        for option in option_objs:
            if option.is_text:
                title = option.title
                # 判断这一列数据有多少个1
                column_data = multiple_res_df[title]
                total_count_in = column_data.eq(1).sum()
                texts = option.text
                is_repeat = option.is_repeat
                text_arr = valid_text_content(texts, is_repeat, req, title_id, total_count_in)
                # 获取需要填充的列数据
                column_data_list = column_data.tolist()
                # 确定 1 的位置
                indices = [i for i, val in enumerate(column_data_list) if val == 1]
                # 确保 1 的数量与随机字符串数量一致
                random_strings = text_arr[:len(indices)]
                # 将随机字符串填充到 1 的位置
                for i, string in zip(indices, random_strings):
                    column_data[i] = str(column_data_list[i]) + '^' + string
                # 将填充后的数据赋值回 DataFrame 的对应列
                multiple_res_df[title] = column_data
        return multiple_res_df, ovr

    def build_single_datas(obj_in: GenerateObj, total_count: int) -> (pd.Series, pd.Series):
        """
        生成单选数据
        :param obj_in:
        :param total_count:
        :return:
        """
        # out_obj = GenerateObj(**obj_in)
        name = obj_in.title  # 标题
        title_id = obj_in.title_id  # 标题id
        cur_options = obj_in.options  # 该题目的选项
        req = obj_in.req  # 该题目是否必选题
        total_weight = 0
        # option_objs = list(map(lambda co: GenerateObj(**co), cur_options))
        option_objs = list(map(lambda co: GenerateObj.conver_json_2_obj(co), cur_options))
        for j in option_objs:
            total_weight += int(j.rate)
        if total_weight <= 0:
            raise APIException(message="生成失败，" + obj_in.title + "，此题总比重为0")
        m = dict()  # 根据当前权重计算比例
        can_use_nums = []
        can_use_num_weights = []
        option_obj_dict = dict()  # 存储答案和答案整个对象的关系
        for j in range(len(option_objs)):
            inner_obj = option_objs[j]
            m[inner_obj.value] = int(inner_obj.rate) / total_weight
            option_obj_dict[inner_obj.value] = j
            can_use_nums.append(inner_obj.value)
            can_use_num_weights.append(inner_obj.rate)
        # 生成数据
        res = []
        cur_total = 0
        for i in range(len(option_objs)):
            inner_obj = option_objs[i]
            cur_value = inner_obj.value  # 当前选项的值
            need_add_count = int(total_count * m[cur_value])
            if i == len(m) - 1:
                need_add_count = total_count - cur_total
            if need_add_count > 0:
                cur_total += need_add_count
                if (i == len(m) - 1) and m[cur_value] == 0:  # 当前的比例是0，不能生成当前数据，要从可以生成数据的数据里选择一个
                    cur_value = random.choices(can_use_nums, weights=can_use_num_weights, k=1)[0]
                item_option_res = [cur_value] * need_add_count
                for j in item_option_res:
                    res.append(j)
        my_array = np.array(res)
        random_order = np.random.choice(my_array, size=len(my_array), replace=False)  # 随机数据
        ovr = random_order
        column_data_list = random_order.tolist()
        # 遍历所有数据，如果对应的列是1，且需要填空，则增加上对应的填空数据
        for option in option_objs:
            if option.is_text:
                # 判断这一列数据有多少个1
                # total_count_in = random_order.eq(option.value).sum()
                total_count_in = np.count_nonzero(my_array == option.value)
                texts = option.text
                is_repeat = option.is_repeat
                text_arr = valid_text_content(texts, is_repeat, req, title_id, total_count_in)
                # 获取需要填充的列数据
                # 确定 1 的位置
                indices = [i for i, val in enumerate(column_data_list) if val == option.value]
                # 确保 1 的数量与随机字符串数量一致
                random_strings = text_arr[:len(indices)]
                # 将随机字符串填充到 1 的位置
                for i, string in zip(indices, random_strings):
                    column_data_list[i] = str(column_data_list[i]) + '^' + string
                # 将填充后的数据赋值回 DataFrame 的对应列
        return pd.Series(column_data_list), pd.Series(ovr)

    def build_slide_datas(obj_in: GenerateObj, total_count_in: int) -> np.array:
        """
        生成滑动题数据
        :param obj_in:
        :param total_count_in:
        :return:
        """
        # out_obj = GenerateObj(**obj_in)
        # options = out_obj.options  # 该题目的所有子节点
        children = obj_in.children
        in_df = pd.DataFrame()
        for i in children:
            # child_option = GenerateObj(**i)
            child_option = GenerateObj.conver_json_2_obj(i)
            min_oper_num = int(child_option.min_oper_num)  # 最小值是多
            max_oper_num = int(child_option.max_oper_num)  # 最大值是多
            name = child_option.title + '.' + child_option.name  # 标题
            # TODO 判断最小值不能大于最大值
            min_oper_num = 0 if min_oper_num < 0 else min_oper_num
            max_oper_num = max_oper_num if max_oper_num < 100 else 100
            np_arr = np.random.randint(min_oper_num, max_oper_num + 1, size=total_count_in)
            child_df = pd.DataFrame({name: np_arr})
            if in_df.empty:
                in_df = child_df
            else:
                in_df = in_df.join(child_df)
        return in_df.values

    def build_single_slide_datas(obj_in: GenerateObj, total_count_in: int) -> np.array:
        """
        生成单项滑动题数据
        :param obj_in:
        :param total_count_in:
        :return:
        """
        # out_obj = GenerateObj(**obj_in)
        # options = obj_in.options  # 该题目的所有子节点
        children = obj_in.children
        in_df = pd.DataFrame()
        for i in children:
            # child_option = GenerateObj(**i)
            child_option = GenerateObj.conver_json_2_obj(i)
            cur_options = child_option.options
            for item_option in cur_options:
                # option = GenerateObj(**item_option)
                option = GenerateObj.conver_json_2_obj(item_option)
                min_oper_num = int(option.min_oper_num)  # 最小值是多
                max_oper_num = int(option.max_oper_num)  # 最大值是多
                name = option.title  # 标题
                # 判断最小值不能大于最大值
                # if min_oper_num > max_oper_num:
                #     raise APIException(message="最小值大于最大值")
                # min_oper_num = 0 if min_oper_num < 0 else min_oper_num
                # max_oper_num = max_oper_num if max_oper_num < 100 else 100
                np_arr = np.random.randint(min_oper_num, max_oper_num + 1, size=total_count_in)
                child_df = pd.DataFrame({name: np_arr})
                if in_df.empty:
                    in_df = child_df
                else:
                    in_df = in_df.join(child_df)
        return in_df.values

    cur_type = obj.sys_type  # 该题目的类型
    res = np.array([])  # res是完整答案（例如A选项需要填空，这里的答案是：1^填空答案）
    only_value_result = np.array([])  # only_value_result是只有选项的答案（例如A选项需要填空，这里的答案是：1）
    if cur_type == SysTypeEnum.SINGLE.value:  # 单选题
        res, only_value_result = build_single_datas(obj, total_count)
    elif cur_type == SysTypeEnum.SINGLE_SELECT.value:  # 单选题(下拉选择)
        res = build_select_datas(obj, total_count)
    elif cur_type == SysTypeEnum.MULTIPLE.value:  # 多选题
        cur_v, cur_o_v = build_multiple_datas(obj, total_count)
        res = cur_v.values
        only_value_result = cur_o_v.values
    elif cur_type == SysTypeEnum.SCALE.value:  # 量表题
        res = build_select_datas(obj, total_count)
    elif cur_type == SysTypeEnum.MATRIX.value:  # 矩阵题
        res = build_matrix_datas(obj, total_count)
    elif cur_type == SysTypeEnum.SINGLE_SLIDE.value:  # 单项滑动题
        res = build_single_slide_datas(obj, total_count)
    elif cur_type == SysTypeEnum.SLIDE.value:  # （矩阵）滑动题
        res = build_slide_datas(obj, total_count)
    elif cur_type == SysTypeEnum.SORTED.value:  # 排序题
        res = build_sorted_datas(obj, total_count)
    elif cur_type == SysTypeEnum.RATE.value:  # 比重题
        res = build_rate_datas(obj, total_count)
    elif cur_type == SysTypeEnum.TEXT.value:  # 填空题
        res = build_text_datas(obj, total_count)
    elif cur_type == SysTypeEnum.AREA.value:  # 地区题
        res = build_text_datas(obj, total_count)
    elif cur_type == SysTypeEnum.DATE.value:  # 日期题
        res = build_text_datas(obj, total_count)
    elif cur_type == SysTypeEnum.MULTIPLE_TEXT.value:  # 多项填空
        res = build_multiple_text(obj, total_count)
    elif cur_type == SysTypeEnum.MULTIPLE_MATRIX.value:  # 矩阵多选
        res, only_value_result = build_multiple_matrix(obj, total_count)
    elif cur_type == SysTypeEnum.MATRIX_TEXT.value:  # 矩阵填空
        # res = build_multiple_text(obj, total_count)
        res = build_matrix_text(obj, total_count)
    if only_value_result.shape == (0,):  # 如果是空数组，则说明不需要额外的填空
        only_value_result = res
    return res, only_value_result


def search_programme_from_redis(wo_id: int, load_type: str, link: str):
    """
    查询权重
    :param wo_id: 工单Id
    :param load_type: LinkLoadTypeEnum
    :param link: 修改后的链接
    :return:
    """
    res = dict()
    res['data_type'] = DataCacheEnum.CACHE.value
    res['data'] = None
    try:
        if LinkLoadTypeEnum.RELOAD.value == load_type:
            # 1,modify work_order
            work_order_dao.modify_work_order_link(wo_id, link)
            # 2.删除所有历史配置
            strategy_dao.delete_by_word_order_id(wo_id)
            # 3.解析链接
            # explained_res = web_service.explain_link_v2(wo_id)
            explained_res = explain_service.explain_link_v3(wo_id)
            res['data_type'] = DataCacheEnum.EXPLAINED.value
            res['data'] = explained_res.get('list')
            return res
        else:
            # 暂时不使用redis，性能够用
            # key = REDIS_PROGRAMME_PREFIX + str(wo_id)
            # cache_res = Redis.read(key)
            # if cache_res:  # 没有缓存的结果
            #     res['data'] = json.loads(cache_res)
            #     Redis.expire(name=key, expire=60 * 60 * 24)  # 更新过期时间为一天
            #     return res
            strategy_weight = strategy_dao.search_strategy_weight_by_work_order_id(wo_id)
            if strategy_weight:  # 数据库查询到了结果
                cur_config = strategy_weight[0]
                if cur_config:
                    res['data'] = cur_config
                    # save_programme_to_redis(wo_id, strategy_weight)  # 查询到权重，将结果保存到缓存
                    return res
            # 否则查询是否解析过链接，再返回数据，且不写入缓存
            res['data_type'] = DataCacheEnum.EXPLAINED.value
            woe_res = work_order_extend_dao.search_by_work_order_id(wo_id)
            if woe_res and woe_res.title_maps:
                # 若解析过链接，则直接用数据库的链接返回
                res['data'] = woe_res.title_maps
            else:
                # 若没有解析过链接，则重新进行解析
                # explained_res = web_service.explain_link_v2(wo_id)
                explained_res = explain_service.explain_link_v3(wo_id)
                res['data'] = explained_res.get('list')
    except Exception as e:
        app.logger.error(e)
        pass
        raise APIException(message="查询权重失败/解析链接失败：" + str(e.__dict__.get('message')))
    return res


def change_work_order_size(work_order_id: int, size: int):
    """
    修改工单的份数
    :param work_order_id:
    :param size:
    :return:
    """
    # 1.删除基础数据，2.修改工单份数
    topic_dao.delete_result_by_word_order_id(work_order_id)
    work_order_dao.modify_work_order_size(work_order_id, size)
    return "修改成功"


def preview_link(link: str):
    """
    查询权重
    :param link: 链接
    :return:
    """
    try:
        title_maps, titles = explain_service.explain_link_v3_by_link(link)
        res = {'list': title_maps, 'data_index': titles, 'total_data_index': len(titles)}
    except Exception as e:
        app.logger.error(e)
        raise APIException(message="查询权重失败/解析链接失败：" + str(e.__dict__.get('message')))
    return res


def save_programme_to_redis(wo_id, inner_data):
    """
    保存权重
    :return:
    """
    try:
        key = REDIS_PROGRAMME_PREFIX + str(wo_id)
        Redis.write(key=key, value=json.dumps(inner_data), expire=60 * 60 * 24)  # 默认保存1天
    except Exception as e:
        app.logger.error(e)
        raise APIException(message="保存权重失败")
    return


def save_programme_weight(wo_id, inner_data):
    """
    保存权重
    :return:
    """
    try:
        item = strategy_dao.search_by_work_order_id(wo_id)
        if item is None:
            objs = [ut.Strategy(work_order_id=wo_id, strategy_weight=inner_data)]
            strategy_dao.batch_insert(objs)
        else:
            strategy_dao.modify_by_work_order_id(wo_id=wo_id, strategy_weight_res=inner_data)
    except Exception as e:
        app.logger.error(e)
        raise APIException(message="保存权重失败")
    return
