import json
import random
import time

from exception.api_exception import APIException
from dao import work_order_dao, topic_dao, relation_dao, relation_extend_dao, strategy_dao, work_order_extend_dao, \
    user_dao, shop_dao
import utils.wjx.wjx_explain as wjx_explain
import utils.make_data.make_relation_data as make_relation_data
from utils.analysis import validity_analysis, reliability_analysis, correlation_analysis, regressive_analysis, \
    frequency_analysis
from flask_jwt_extended import (
    create_access_token, create_refresh_token, jwt_required, get_jwt_identity, get_jwt
)
import traceback
import model.model as ut
import numpy as np
import pandas as pd
import view.submit as v_submit
import utils.wjx.wjx_explain_v2 as wjx_explain_v2
import openpyxl
import os, io
from enums.sys_type_enum import SysTypeEnum
from enums.link_load_type import LinkLoadTypeEnum
from utils.common.redis_client import Redis
from enums.relation_process_type import RelationProcessType
import requests

from utils.credamo.credamo_explain import CredamoExplain
from utils.gg.gg_explain import GgExplain
from utils.qualtrics.qualtrics_explain import QualtricsExplain
from utils.wjw.wjw_explain import WjwExplain
from utils.wjx.common_obj import ItemProgrammeObject
import copy
from business_common.redis_prefix import REDIS_EXPLAIN_PREFIX, REDIS_WORK_ORDER_INFO_PREFIX, REDIS_PROGRAMME_PREFIX
import service.explain_service as explain_service
from utils.tx.tx_explain import TxExplain


def register(obj: v_submit.RegisterView):
    """
    注册
    :param obj:
    :return:
    """
    user = user_dao.query_user_by_user_name(obj.user_name)
    if user:
        raise APIException(message="用户名已存在", code=401)
    user_dao.insert_user(obj.user_name_zh, obj.user_name, obj.password)
    return

def modifyPwd(obj: v_submit.ModifyPwdView):
    """
    修改密码
    :param obj:
    :return:
    """
    user = user_dao.query_user_by_user_name(obj.user_name)
    if (not user) or (not user.verify_password(obj.old_pwd)):
        raise APIException(message="用户名或密码不正确", code=401)
    user_dao.modify_pwd(obj.user_name, obj.new_pwd)
    return


def login(obj: v_submit.UserView):
    """
    登录接口
    :param obj:
    :return:
    """
    user = user_dao.query_user_by_user_name(obj.user_name)
    if (not user) or (not user.verify_password(obj.password)):
        raise APIException(message="用户名或密码不正确", code=401)
    try:
        o = {'user_name': user.user_name, 'id': user.id}
        access_token = create_access_token(identity=o)
    except Exception:
        raise APIException("获取token异常", code=500)
    return {'user_id': user.id, 'token': 'Bearer ' + access_token, 'user_name': user.user_name, 'name_zh': user.name_zh}


def build_datas_v2(obj: json, final_df: pd.DataFrame, total_count: int):
    """
    处理传入数据中的每一个对象
    :param obj: 当前对象的所有属性
    :param final_df: 最终的DataFrame
    :param total_count: 总数量
    :return:
    """

    def get_rate_valid_data(arrays: [[]]):
        def enumerate_combinations(arrays):
            combinations = []
            current_combination = [0] * len(arrays)
            backtrack(arrays, 0, current_combination, combinations)
            return combinations

        def backtrack(arrays, index, current_combination, combinations):
            if index > len(arrays):
                return

            if index == len(arrays):
                if sum(current_combination) == 100:
                    combinations.append(current_combination.copy())
                    print('cur_current_combination:', current_combination)
                return

            for i in range(len(arrays[index])):
                current_combination[index] = arrays[index][i]
                # 剪枝条件：如果当前组合的部分和已经大于 100，则终止当前分支的搜索
                if sum(current_combination[:index + 1]) > 100:
                    break
                backtrack(arrays, index + 1, current_combination, combinations)

        # 示例用法: arrays = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
        combinations = enumerate_combinations(arrays)
        return combinations

    def valid_text_content(cur_text, is_repeat, req, title_id, total_count_in) -> []:
        """
        校验填空的数据是否通过
        :param cur_text:
        :param is_repeat:
        :param req:
        :param title_id:
        :param total_count_in:
        :return:
        """
        # 前置校验 - 必填不能为空
        if '1' == req and not cur_text and is_repeat:
            raise APIException(message="第" + str(title_id) + "题为必选题，答案为空")
        origin_texts = cur_text.split("\n")  # 通过逗号分割开的数据
        texts = [x for x in origin_texts if x.strip() != ""]  # 过滤到\n,\t,空格
        if '1' == req and not texts and is_repeat:
            raise APIException(message="第" + str(title_id) + "题为必选题，答案为空")
        # 处理数据
        text_arr = []
        if '1' == req and is_repeat:  # 必选+去重
            if total_count_in != len(texts):
                msg = ("第" + str(title_id) + "题，答案个数与总份数不同，输入" + str(len(texts))
                       + "个答案，需要" + str(total_count_in) + "个答案")
                if len(origin_texts) != texts:
                    msg += "，可能是空格，Tab或回车造成"
                raise APIException(message=msg)
            text_arr = texts
        elif '1' == req and not is_repeat:  # 必选+不去重
            text_arr = random.choices(texts, k=total_count_in) if texts else ["无"] * total_count_in
        elif '1' != req and is_repeat:  # 非必选+去重
            if not texts:
                text_arr = ["无"] * total_count_in
            else:
                cur_for_i = 0
                while cur_for_i < total_count_in:
                    if cur_for_i < len(texts):
                        text_arr.append(texts[cur_for_i])
                    else:
                        text_arr.append('无')
                    cur_for_i += 1
        else:  # 非必选+不去重
            text_arr = random.choices(texts, k=total_count_in) if texts else ["无"] * total_count_in
        return text_arr

    def build_matrix_text(obj_in: json, total_count_in: int) -> pd.DataFrame:
        """
        矩阵填空
        :param obj_in:
        :param total_count_in:
        :return:
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        children = out_obj.children
        res_df = pd.DataFrame()
        for child in children:
            pass
        return res_df
        pass

    def build_multiple_matrix(obj_in: json, total_count_in: int) -> pd.DataFrame:
        """
        矩阵多选
        :param obj_in:
        :param total_count_in:
        :return:
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        children = out_obj.children
        res_df = pd.DataFrame()
        for child in children:
            item_df = build_multiple_datas_v2(child, total_count_in)
            if item_df is not None:
                if res_df.empty:
                    res_df = item_df
                else:
                    res_df = res_df.join(item_df)
        return res_df

    def build_multiple_text(obj_in: json, total_count_in: int) -> pd.DataFrame:
        """
        生成多项填空数据
        :param obj_in:
        :param total_count_in:
        :return:
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        children = out_obj.children  # 该题目的所有子节点
        cur_req = out_obj.req  # 该题目是否必选
        title_id = out_obj.title_id  # title_id
        children = list(map(lambda co: wjx_explain_v2.ItemWjxOb(**co), children))
        parent_title = out_obj.title  # 标题
        df = pd.DataFrame()
        for child in children:
            title = child.title  # 子标题
            is_repeat = child.is_repeat  # 是否去重
            name = parent_title + '.' + title
            cur_text = child.text  # 当前的填空内容是什么
            # text_arr = []
            # if cur_text:
            #     texts = cur_text.split("\n")  # 通过逗号分割开的数据
            #     texts = [x for x in texts if x.strip() != ""]  # 过滤到\n,\t,空格
            #     if not texts:  # 过滤后全是空
            #         for j in range(total_count_in):
            #             text_arr.append("无")
            #     else:
            #         for j in range(total_count_in):
            #             need_text = random.choices(texts, k=1)[0]
            #             text_arr.append(need_text)
            # else:
            #     for j in range(total_count_in):
            #         text_arr.append("无")
            text_arr = valid_text_content(cur_text, is_repeat, cur_req, title_id, total_count_in)
            inner_df = pd.DataFrame({name: text_arr})
            inner_df = inner_df.sample(frac=1, ignore_index=True)
            if df.empty:
                df = inner_df
            else:
                df = df.join(inner_df)
        return df

    def build_matrix_datas(obj_in: json, total_count_in: int) -> pd.DataFrame:
        """
        生成矩阵题数据
        :param total_count_in: 生成的总行数
        :param obj_in: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        children = out_obj.children  # 该题目的所有子节点
        children = list(map(lambda co: wjx_explain_v2.ItemWjxOb(**co), children))
        parent_title = out_obj.title  # 标题
        matrix_df = pd.DataFrame()
        for i in children:
            title = i.title + i.name  # 子标题
            cur_options = i.options  # 选项
            pers = list(map(lambda co: wjx_explain_v2.ItemWjxOb(**co), cur_options))
            total_weight = 0
            for j in pers:
                total_weight += j.rate
            if total_weight <= 0:
                raise APIException(message="生成失败，" + title + "，此题总比重为0")
            m = dict()  # 根据当前权重计算比例
            can_use_nums = []
            can_use_num_weights = []
            for j in range(len(pers)):
                inner_obj = pers[j]
                m[inner_obj.value] = inner_obj.rate / total_weight
                can_use_nums.append(inner_obj.value)
                can_use_num_weights.append(inner_obj.rate)
            # 生成数据
            child_res = []
            cur_total = 0
            for j in range(len(cur_options)):
                cur_value = cur_options[j].get('value')
                need_add_count = int(total_count * m[cur_value])
                if j == len(m) - 1:
                    need_add_count = total_count - cur_total
                if need_add_count > 0:
                    cur_total += need_add_count
                    if (j == len(m) - 1) and m[cur_value] == 0:  # 当前的比例是0，不能生成当前数据，要从可以生成数据的数据里选择一个
                        cur_value = random.choices(can_use_nums, weights=can_use_num_weights, k=need_add_count)
                        for res_item in cur_value:
                            child_res.append(res_item)
                    else:
                        child_res += [cur_value] * need_add_count
                # if j == len(m) - 1:
                #     need_add_coun
                #     = total_count_in - cur_total
                #     if need_add_count > 0:
                #         child_res += [cur_value] * need_add_count
                # else:
                #     need_add_count = int(total_count_in * m[cur_value])
                #     cur_total += need_add_count
                #     child_res += [cur_value] * need_add_count
            name = parent_title + '.' + title
            inner_df = pd.DataFrame({name: child_res})
            inner_df = inner_df.sample(frac=1, ignore_index=True)
            if matrix_df.empty:
                matrix_df = inner_df
            else:
                matrix_df = matrix_df.join(inner_df)
        return matrix_df

    def build_select_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成下拉选择题数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj)
        title = out_obj.title  # 标题
        cur_options = out_obj.options  # 选项
        pers = list(map(lambda co: wjx_explain_v2.ItemWjxOb(**co), cur_options))
        total_weight = 0
        for j in pers:
            total_weight += j.rate
        if total_weight <= 0:
            raise APIException(message="生成失败，" + title + "，此题总比重为0")
        m = dict()  # 根据当前权重计算比例
        can_use_nums = []
        can_use_num_weights = []
        for j in range(len(pers)):
            inner_obj = pers[j]
            m[inner_obj.value] = inner_obj.rate / total_weight
            can_use_nums.append(inner_obj.value)
            can_use_num_weights.append(inner_obj.rate)
        # 生成数据
        res = []
        cur_total = 0
        for i in range(len(pers)):
            cur_value = pers[i].value
            need_add_count = int(total_count * m[cur_value])
            if i == len(m) - 1:
                need_add_count = total_count - cur_total
            if need_add_count > 0:
                cur_total += need_add_count
                if (i == len(m) - 1) and m[cur_value] == 0:  # 当前的比例是0，不能生成当前数据，要从可以生成数据的数据里选择一个
                    cur_value = random.choices(can_use_nums, weights=can_use_num_weights, k=need_add_count)
                    for res_item in cur_value:
                        res.append(res_item)
                else:
                    res += [cur_value] * need_add_count
        my_array = np.array(res)
        my_df = pd.DataFrame({title: my_array})
        random_order = np.random.choice(my_array, size=len(my_array), replace=False)
        my_df[title] = random_order
        return my_df

    def build_date_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成填空题数据-日期
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        title = obj.get('title')  # 标题
        text_arr = []
        for i in range(total_count):
            text_arr.append("无")
        text_df = pd.DataFrame({title: text_arr})
        return text_df

    def build_area_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成填空题数据-区域
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        title = obj.get('title')  # 标题
        text_arr = []
        for i in range(total_count):
            text_arr.append("无")
        text_df = pd.DataFrame({title: text_arr})
        return text_df

    def build_text_datas(obj: json, total_count_in: int) -> pd.DataFrame:
        """
        生成填空题数据
        :param total_count_in: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        item = wjx_explain_v2.ItemWjxOb(**obj)
        # title = obj.get('title')  # 标题
        # cur_text = obj.get('text')  # 当前的填空内容是什么
        title_id = item.title_id  # 标题id
        title = item.title  # 标题
        cur_text = item.text  # 当前的填空内容是什么
        req = item.req  # 是否比选 == 1
        is_repeat = item.is_repeat  # 是否去重 True
        text_arr = valid_text_content(cur_text, is_repeat, req, title_id, total_count_in)
        text_df = pd.DataFrame({title: text_arr})
        return text_df

    def build_rate_datas(obj_in: json, total_count: int) -> pd.DataFrame:
        """
        生成比重题数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        options = out_obj.options  # 该题目的所有子节点
        min_sum = 0
        name_dict = dict()
        for index in range(len(options)):
            i = options[index]
            cur_in_obj = wjx_explain_v2.ItemWjxOb(**i)
            name_dict[index] = cur_in_obj.title
            min_oper_num = int(cur_in_obj.min_oper_num)  # 最小值是多
            # max_oper_num = i.get('max_oper_num')  # 最大值是多
            min_sum += min_oper_num
        if min_sum > 100:
            # TODO 报错，最小值的和不能大于100
            raise APIException(message="最小值的和不能大于100")

        res_arr = []
        for _ in range(total_count):
            item_arr = []
            for i in options:
                cur_in_obj = wjx_explain_v2.ItemWjxOb(**i)
                min_oper_num = int(cur_in_obj.min_oper_num)  # 最小值是多
                max_oper_num = int(cur_in_obj.max_oper_num)  # 最大值是多
                cur_int = random.randint(min_oper_num, max_oper_num)
                item_arr.append(cur_int)
            res_arr.append(item_arr)
        rate_res_df = pd.DataFrame(res_arr)
        rate_res_df = rate_res_df.rename(columns=name_dict)
        return rate_res_df

    def build_rate_datas_v2(obj_in: json, total_count: int) -> pd.DataFrame:
        """
        生成比重题数据v2
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        options = out_obj.options  # 该题目的所有子节点
        min_sum = 0
        max_max_sum = 0
        name_dict = dict()
        arrs = []
        for index in range(len(options)):
            i = options[index]
            cur_in_obj = wjx_explain_v2.ItemWjxOb(**i)
            name_dict[index] = cur_in_obj.title
            cur_min = int(cur_in_obj.min_oper_num)
            cur_max = int(cur_in_obj.max_oper_num)
            item_arr = list(np.arange(cur_min, cur_max))
            arrs.append(item_arr)
            min_sum += cur_min
            max_max_sum += cur_max
        if min_sum > 100:
            raise APIException(message="最小值的和不能大于100")
        if max_max_sum < 100:
            raise APIException(message="最大值的和不能大于100")

        # 生成所有符合条件的数据，具体生成的内容从里面随机筛选固定的条数进行处理
        valid_arrays = get_rate_valid_data(arrs)
        if not valid_arrays:
            raise APIException(message="没有可选的比重，请查看比重配置是否正确")
        res_arr = random.choices(valid_arrays, k=total_count)
        rate_res_df = pd.DataFrame(res_arr)
        rate_res_df = rate_res_df.rename(columns=name_dict)
        return rate_res_df

    def build_sorted_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成排序数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj)
        title = out_obj.title
        in_options = out_obj.options  # 当前各个选项的权重是多少
        min_oper_num = out_obj.min_oper_num  # 最少选多少个选项
        min_oper_num = int(min_oper_num) if min_oper_num else 1
        max_oper_num = out_obj.max_oper_num  # 最多选多少个选项
        max_oper_num = int(max_oper_num) if max_oper_num else len(in_options)
        pers = list(map(lambda co: wjx_explain_v2.ItemWjxOb(**co), in_options))
        # 生成每一列的名字和比例
        name_dict = dict()
        total_weight = 0
        for j in range(len(pers)):
            total_weight += pers[j].rate
            name_dict[j] = title + '.' + pers[j].name
        if total_weight <= 0:
            raise APIException(message="生成失败，" + title + "，此题总比重为0")
        ratios = []
        for j in range(len(pers)):
            ratios.append(pers[j].rate / total_weight)
        ew_arr = []
        option_length = len(pers)
        nums = [total_count * cur_i for cur_i in ratios]  # 根据权重计算后的结果
        for i in range(len(nums)):
            max_length = random.randint(min_oper_num, max_oper_num)
            count = nums[i]
            while count > 0:
                arr = np.arange(option_length + 1)[1:]
                for index in range(len(arr)):
                    if index > max_length - 1:
                        arr[index] = -2
                tmp = arr[0]
                arr[0] = arr[i]
                arr[i] = tmp

                fixed_index = i

                fixed_value = arr[fixed_index]  # 保存固定的值
                arr = np.delete(arr, fixed_index)  # 删除固定的值

                np.random.shuffle(arr)  # 对剩余的元素进行乱序

                arr = np.insert(arr, fixed_index, fixed_value)  # 将固定的值插入回数组中
                count -= 1
                ew_arr.append(arr)
        sorted_res_df = pd.DataFrame(ew_arr)
        sorted_res_df = sorted_res_df.rename(columns=name_dict)
        sorted_res_df = sorted_res_df.sample(frac=1, ignore_index=True)
        return sorted_res_df

    def build_multiple_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成多选数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """

        def generate_array(m, n, x_min, x_max, col_ratio, cur_option_values):
            """
            生成二维数据
            :param m: 行数
            :param n: 列数
            :param x_min: 最小值
            :param x_max: 最大值
            :param col_ratio: 每一列的占比
            :param cur_option_values: 当前题目的选项
            :return: 一个符合条件的二维数组
            """
            # 初始化一个全为0的数组
            arr = np.zeros((m, n), dtype=int)
            numbers = np.array(cur_option_values)

            # 对于每一列，生成对应比例的1
            for j in range(n):
                ones_count = int(m * col_ratio[j])
                ones_indices = np.random.choice(m, ones_count, replace=False)
                arr[ones_indices, j] = 1

            # 确保每行的和不超过x
            for i in range(m):
                cur_sum = arr[i, :].sum()
                if cur_sum > x_max:
                    ones_indices = np.where(arr[i, :] == 1)[0]
                    np.random.shuffle(ones_indices)
                    arr[i, ones_indices[x_max:]] = 0
                if cur_sum < x_min:
                    ones_indices = np.where(arr[i, :] == 0)[0]
                    np.random.shuffle(ones_indices)
                    arr[i, ones_indices[x_min:]] = 1
            return arr

        def generate_array_v2(rows, cols, ones_per_row_range):
            array = [[0] * cols for _ in range(rows)]
            for i in range(rows):
                ones_per_row = random.randint(ones_per_row_range[0], ones_per_row_range[1])
                indices = random.sample(range(cols), ones_per_row)
                for index in indices:
                    array[i][index] = 1
            return np.array(array)

        title = obj.get('title')  # 标题
        cur_options = obj.get('options')  # 当前各个选项的权重是多少
        min_oper_num = obj.get('min_oper_num')  # 最少选多少个选项
        min_oper_num = int(min_oper_num) if min_oper_num else 1
        max_oper_num = obj.get('max_oper_num')  # 最多选多少个选项
        max_oper_num = int(max_oper_num) if max_oper_num else len(cur_options)
        pers = list(map(lambda co: wjx_explain_v2.ItemWjxOb(**co), cur_options))
        name_dict = dict()
        total_weight = 0
        for cur_j in range(len(pers)):
            total_weight += pers[cur_j].rate
            title = pers[cur_j].title
            name_dict[cur_j] = title
        if total_weight <= 0:
            raise APIException(message="生成失败，" + title + "，此题总比重为0")
        ratios = []
        option_values = []
        for cur_j in pers:
            option_values.append(cur_j.value)
            ratios.append(cur_j.rate / total_weight)
        # np_array = generate_array(total_count, len(cur_options), min_oper_num, max_oper_num, ratios, option_values)
        np_array = generate_array_v2(total_count, len(cur_options), (min_oper_num, max_oper_num))
        np_array = np_array.astype(int)
        multiple_res_df = pd.DataFrame(np_array)
        multiple_res_df = multiple_res_df.rename(columns=name_dict)
        return multiple_res_df

    def build_multiple_datas_v2(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成多选数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """

        cur_obj = wjx_explain_v2.ItemWjxOb(**obj)
        title_id = cur_obj.title_id
        options = cur_obj.options
        req = cur_obj.req
        option_objs = list(map(lambda co: wjx_explain_v2.ItemWjxOb(**co), options))
        d = dict()  # 数据位置与对应个数的映射
        total_1_count = 0
        for option_index in range(len(option_objs)):
            item = option_objs[option_index]
            d_count = int(item.rate / 1000 * total_count)
            total_1_count += d_count
            d[option_index] = d_count
        min_oper_num = cur_obj.min_oper_num  # 最少选多少个选项
        max_oper_num = cur_obj.max_oper_num or len(option_objs)  # 最多选多少个选项
        if total_1_count < min_oper_num * total_count or total_1_count > max_oper_num * total_count:
            msg = '第' + str(title_id) + '题配置有误，最少选择' + str(min_oper_num * total_count) + '，最多选择' + str(
                max_oper_num * total_count) + '，当前需要生成' + str(total_1_count)
            raise APIException(message=msg)
        name_dict = dict()  # 索引和标题的映射
        for cur_j in range(len(option_objs)):
            title = option_objs[cur_j].title
            name_dict[cur_j] = title
        option_counts = len(option_objs)  # 总共有多少个选项
        for cur_j in range(option_counts):
            pass
        # 1.生成m行n列的0，处理对应数据
        array = [[0] * option_counts for _ in range(total_count)]
        for num in range(min_oper_num, max_oper_num + 1):
            for row_index in range(len(array)):  # 遍历每一行数据
                row = array[row_index]
                for col_index in range(len(row)):  # 遍历每一列数据
                    count_ones = row.count(1)  # 统计1的个数
                    if count_ones < num:  # 如果需要处理（1的个数小于上限）
                        element = row[col_index]
                        if element == 1:  # 如果元素不是0，则为1，则跳过当前位置
                            continue
                        # 若col_index列有剩余的1，则取其中一个1进行填充
                        if d[col_index] > 0:
                            d[col_index] -= 1
                            row[col_index] = 1
                array[row_index] = row  # 修改二维数组的结果
        # 数组转为np数组的结果
        np_array = np.array(array)
        np_array = np_array.astype(int)
        multiple_res_df = pd.DataFrame(np_array)
        multiple_res_df = multiple_res_df.rename(columns=name_dict)
        multiple_res_df = multiple_res_df.sample(frac=1, ignore_index=True)  # 多选题乱序
        # 遍历所有数据，如果对应的列是1，且需要填空，则增加上对应的填空数据
        for option in option_objs:
            if option.is_text:
                title = option.title
                # 判断这一列数据有多少个1
                column_data = multiple_res_df[title]
                total_count_in = column_data.eq(1).sum()
                texts = option.text
                is_repeat = option.is_repeat
                text_arr = valid_text_content(texts, is_repeat, req, title_id, total_count_in)
                # 获取需要填充的列数据
                column_data_list = column_data.tolist()
                # 确定 1 的位置
                indices = [i for i, val in enumerate(column_data_list) if val == 1]
                # 确保 1 的数量与随机字符串数量一致
                random_strings = text_arr[:len(indices)]
                # 将随机字符串填充到 1 的位置
                for i, string in zip(indices, random_strings):
                    column_data[i] = str(column_data_list[i]) + '^' + string
                # 将填充后的数据赋值回 DataFrame 的对应列
                multiple_res_df[title] = column_data
        return multiple_res_df

    def deal_single_text(title_id, is_text, value, texts, total_count_in, req, is_repeat) -> []:
        """
        处理单选题的填空信息
        :param title_id:  题目id
        :param is_text:  是否需要输入文字
        :param texts:  需要填充的文字是多少
        :param total_count_in:   需要生成多少个数据
        :param value: 选项的值是多少
        :param req: 是否必选题
        :param is_repeat: 是否去重
        :return:
        """
        values = [str(value)] * total_count_in
        if not is_text:
            return values
        text_arr = valid_text_content(texts, is_repeat, req, title_id, total_count_in)
        for index in range(len(values)):
            cur_value = values[index]
            if len(text_arr) >= index + 1:
                values[index] = cur_value + '^' + text_arr[index]
        return values

    def build_single_datas_v3(obj_in: json, total_count: int) -> pd.DataFrame:
        """
        生成单选数据
        :param obj_in:
        :param total_count:
        :return:
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        name = out_obj.title  # 标题
        title_id = out_obj.title_id  # 标题id
        cur_options = out_obj.options  # 该题目的选项
        req = out_obj.req  # 该题目是否必选题
        total_weight = 0
        option_objs = list(map(lambda co: wjx_explain_v2.ItemWjxOb(**co), cur_options))
        for j in option_objs:
            total_weight += int(j.rate)
        if total_weight <= 0:
            raise APIException(message="生成失败，" + out_obj.title + "，此题总比重为0")
        m = dict()  # 根据当前权重计算比例
        can_use_nums = []
        can_use_num_weights = []
        option_obj_dict = dict()  # 存储答案和答案整个对象的关系
        for j in range(len(option_objs)):
            inner_obj = option_objs[j]
            m[inner_obj.value] = int(inner_obj.rate) / total_weight
            option_obj_dict[inner_obj.value] = j
            can_use_nums.append(inner_obj.value)
            can_use_num_weights.append(inner_obj.rate)
        # 生成数据
        res = []
        cur_total = 0
        for i in range(len(option_objs)):
            inner_obj = option_objs[i]
            cur_value = inner_obj.value  # 当前选项的值
            need_add_count = int(total_count * m[cur_value])
            if i == len(m) - 1:
                need_add_count = total_count - cur_total
            if need_add_count > 0:
                cur_total += need_add_count
                if (i == len(m) - 1) and m[cur_value] == 0:  # 当前的比例是0，不能生成当前数据，要从可以生成数据的数据里选择一个
                    cur_value = random.choices(can_use_nums, weights=can_use_num_weights, k=1)[0]
                item_option_res = [cur_value] * need_add_count
                for j in item_option_res:
                    res.append(j)
        my_array = np.array(res)
        my_df = pd.DataFrame({name: my_array})
        random_order = np.random.choice(my_array, size=len(my_array), replace=False)  # 随机数据
        my_df[name] = random_order
        # 遍历所有数据，如果对应的列是1，且需要填空，则增加上对应的填空数据
        for option in option_objs:
            if option.is_text:
                # 判断这一列数据有多少个1
                column_data = my_df[name]
                total_count_in = column_data.eq(option.value).sum()
                texts = option.text
                is_repeat = option.is_repeat
                text_arr = valid_text_content(texts, is_repeat, req, title_id, total_count_in)
                # 获取需要填充的列数据
                column_data_list = column_data.tolist()
                # 确定 1 的位置
                indices = [i for i, val in enumerate(column_data_list) if val == option.value]
                # 确保 1 的数量与随机字符串数量一致
                random_strings = text_arr[:len(indices)]
                # 将随机字符串填充到 1 的位置
                for i, string in zip(indices, random_strings):
                    column_data[i] = str(column_data_list[i]) + '^' + string
                # 将填充后的数据赋值回 DataFrame 的对应列
                my_df[name] = column_data
        return my_df

    def build_single_datas_v2(obj_in: json, total_count: int) -> pd.DataFrame:
        """
        生成单选数据
        :param obj_in:
        :param total_count:
        :return:
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        name = out_obj.title  # 标题
        title_id = out_obj.title_id  # 标题id
        cur_options = out_obj.options  # 该题目的选项
        req = out_obj.req  # 该题目是否必选题
        total_weight = 0
        for j in cur_options:
            inner_obj = wjx_explain_v2.ItemWjxOb(**j)
            total_weight += int(inner_obj.rate)
        if total_weight <= 0:
            raise APIException(message="生成失败，" + out_obj.title + "，此题总比重为0")
        m = dict()  # 根据当前权重计算比例
        can_use_nums = []
        can_use_num_weights = []
        option_obj_dict = dict()  # 存储答案和答案整个对象的关系
        for j in range(len(cur_options)):
            inner_obj = wjx_explain_v2.ItemWjxOb(**cur_options[j])
            m[inner_obj.value] = int(inner_obj.rate) / total_weight
            option_obj_dict[inner_obj.value] = inner_obj
            can_use_nums.append(inner_obj.value)
            can_use_num_weights.append(inner_obj.rate)
        # 生成数据
        res = []
        cur_total = 0
        for i in range(len(cur_options)):
            inner_obj = wjx_explain_v2.ItemWjxOb(**cur_options[i])
            cur_value = inner_obj.value  # 当前选项的值
            cur_is_text = inner_obj.is_text  # 当前选项是否有文字
            cur_text = inner_obj.text  # 当前选项的文字是什么内容
            is_repeat = inner_obj.is_repeat  # 是否去重
            need_add_count = int(total_count * m[cur_value])
            if i == len(m) - 1:
                need_add_count = total_count - cur_total
            if need_add_count > 0:
                cur_total += need_add_count
                if (i == len(m) - 1) and m[cur_value] == 0:  # 当前的比例是0，不能生成当前数据，要从可以生成数据的数据里选择一个
                    cur_value = random.choices(can_use_nums, weights=can_use_num_weights, k=1)[0]
                    cur_is_text = option_obj_dict.get(cur_value).is_text  # 获取选中的这个值是否需要输入文字
                    cur_text = option_obj_dict.get(cur_value).text  # 获取选中的这个值是文字内容是什么
                    is_repeat = option_obj_dict.get(cur_value).is_repeat  # 是否去重
                item_option_res = deal_single_text(title_id, cur_is_text, cur_value, cur_text, need_add_count, req,
                                                   is_repeat)
                for i in item_option_res:
                    res.append(i)
        my_array = np.array(res)
        my_df = pd.DataFrame({name: my_array})
        random_order = np.random.choice(my_array, size=len(my_array), replace=False)
        my_df[name] = random_order
        return my_df

    def build_single_datas(obj_in: json, total_count: int) -> pd.DataFrame:
        """
        生成单选数据
        :param obj_in:
        :param total_count:
        :return:
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        name = out_obj.title  # 标题
        cur_options = out_obj.options  # 该题目的选项
        total_weight = 0
        for j in cur_options:
            inner_obj = wjx_explain_v2.ItemWjxOb(**j)
            total_weight += int(inner_obj.rate)
        if total_weight <= 0:
            raise APIException(message="生成失败，" + out_obj.title + "，此题总比重为0")
        m = dict()  # 根据当前权重计算比例
        can_use_nums = []
        can_use_num_weights = []
        option_obj_dict = dict()  # 存储答案和答案整个对象的关系
        for j in range(len(cur_options)):
            inner_obj = wjx_explain_v2.ItemWjxOb(**cur_options[j])
            m[inner_obj.value] = int(inner_obj.rate) / total_weight
            option_obj_dict[inner_obj.value] = inner_obj
            can_use_nums.append(inner_obj.value)
            can_use_num_weights.append(inner_obj.rate)
        # 生成数据
        res = []
        cur_total = 0
        for i in range(len(cur_options)):
            inner_obj = wjx_explain_v2.ItemWjxOb(**cur_options[i])
            cur_value = inner_obj.value  # 当前选项的值
            cur_is_text = inner_obj.is_text  # 当前选项是否有文字
            cur_text = inner_obj.text  # 当前选项的文字是什么内容
            need_add_count = int(total_count * m[cur_value])
            if i == len(m) - 1:
                need_add_count = total_count - cur_total
            if need_add_count > 0:
                cur_total += need_add_count
                for x in range(need_add_count):
                    if (i == len(m) - 1) and m[cur_value] == 0:  # 当前的比例是0，不能生成当前数据，要从可以生成数据的数据里选择一个
                        cur_value = random.choices(can_use_nums, weights=can_use_num_weights, k=1)[0]
                        inner_option_is_text = option_obj_dict.get(cur_value).is_text  # 判断选中的这个值是否需要输入文字
                        if bool(inner_option_is_text):  # 如果需要输入文字
                            if cur_text:
                                texts = cur_text.split(',')  # 通过逗号分割开的数据
                                need_text = random.choices(texts, k=1)[0]
                                res.append(str(cur_value) + "^" + need_text)
                            else:
                                res.append(str(cur_value) + "^" + "无")
                        else:
                            res.append(cur_value)
                    else:
                        if bool(cur_is_text):  # 如果需要输入文字
                            if cur_text:
                                texts = cur_text.split(',')  # 通过逗号分割开的数据
                                need_text = random.choices(texts, k=1)[0]
                                res.append(str(cur_value) + "^" + need_text)
                            else:
                                res.append(str(cur_value) + "^" + "无")
                        else:
                            res.append(cur_value)
        my_array = np.array(res)
        my_df = pd.DataFrame({name: my_array})
        random_order = np.random.choice(my_array, size=len(my_array), replace=False)
        my_df[name] = random_order
        return my_df

    def build_slide_datas(obj_in: json, total_count_in: int) -> pd.DataFrame:
        """
        生成滑动题数据
        :param obj_in:
        :param total_count_in:
        :return:
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        options = out_obj.options  # 该题目的所有子节点
        children = out_obj.children
        in_df = pd.DataFrame()
        for i in children:
            child_option = wjx_explain_v2.ItemWjxOb(**i)
            min_oper_num = int(child_option.min_oper_num)  # 最小值是多
            max_oper_num = int(child_option.max_oper_num)  # 最大值是多
            name = child_option.title + '.' + child_option.name  # 标题
            # TODO 判断最小值不能大于最大值
            min_oper_num = 0 if min_oper_num < 0 else min_oper_num
            max_oper_num = max_oper_num if max_oper_num < 100 else 100
            np_arr = np.random.randint(min_oper_num, max_oper_num + 1, size=total_count_in)
            child_df = pd.DataFrame({name: np_arr})
            if in_df.empty:
                in_df = child_df
            else:
                in_df = in_df.join(child_df)
        return in_df

    def build_single_slide_datas(obj_in: json, total_count_in: int) -> pd.DataFrame:
        """
        生成滑动题数据
        :param obj_in:
        :param total_count_in:
        :return:
        """
        out_obj = wjx_explain_v2.ItemWjxOb(**obj_in)
        options = out_obj.options  # 该题目的所有子节点
        children = out_obj.children
        in_df = pd.DataFrame()
        for i in children:
            child_option = wjx_explain_v2.ItemWjxOb(**i)
            cur_options = child_option.options
            for item_option in cur_options:
                option = wjx_explain_v2.ItemWjxOb(**item_option)
                min_oper_num = int(option.min_oper_num)  # 最小值是多
                max_oper_num = int(option.max_oper_num)  # 最大值是多
                name = option.title  # 标题
                # 判断最小值不能大于最大值
                # if min_oper_num > max_oper_num:
                #     raise APIException(message="最小值大于最大值")
                # min_oper_num = 0 if min_oper_num < 0 else min_oper_num
                # max_oper_num = max_oper_num if max_oper_num < 100 else 100
                np_arr = np.random.randint(min_oper_num, max_oper_num + 1, size=total_count_in)
                child_df = pd.DataFrame({name: np_arr})
                if in_df.empty:
                    in_df = child_df
                else:
                    in_df = in_df.join(child_df)
        return in_df

    cur_type = obj.get('sys_type')  # 该题目的类型
    cur_df = pd.DataFrame()
    if cur_type == SysTypeEnum.SINGLE.value:  # 单选题
        # cur_df = build_single_datas(obj, total_count)
        # cur_df = build_single_datas_v2(obj, total_count)
        cur_df = build_single_datas_v3(obj, total_count)
    elif cur_type == SysTypeEnum.SINGLE_SELECT.value:  # 单选题(下拉选择)
        cur_df = build_select_datas(obj, total_count)
    elif cur_type == SysTypeEnum.MULTIPLE.value:  # 多选题
        # cur_df = build_multiple_datas(obj, total_count)
        cur_df = build_multiple_datas_v2(obj, total_count)
    elif cur_type == SysTypeEnum.SCALE.value:  # 量表题
        cur_df = build_select_datas(obj, total_count)
    elif cur_type == SysTypeEnum.MATRIX.value:  # 矩阵题
        cur_df = build_matrix_datas(obj, total_count)
    elif cur_type == SysTypeEnum.SINGLE_SLIDE.value:  # 单项滑动题
        cur_df = build_single_slide_datas(obj, total_count)
    elif cur_type == SysTypeEnum.SLIDE.value:  # 滑动题
        cur_df = build_slide_datas(obj, total_count)
    elif cur_type == SysTypeEnum.SORTED.value:  # 排序题
        cur_df = build_sorted_datas(obj, total_count)
    elif cur_type == SysTypeEnum.RATE.value:  # 比重题
        # cur_df = build_rate_datas(obj, total_count)
        cur_df = build_rate_datas_v2(obj, total_count)
    elif cur_type == SysTypeEnum.TEXT.value:  # 填空题
        cur_df = build_text_datas(obj, total_count)
    elif cur_type == SysTypeEnum.AREA.value:  # 地区题
        # cur_df = build_area_datas(obj, total_count)
        cur_df = build_text_datas(obj, total_count)
    elif cur_type == SysTypeEnum.DATE.value:  # 日期题
        # cur_df = build_date_datas(obj, total_count)
        cur_df = build_text_datas(obj, total_count)
    elif cur_type == SysTypeEnum.MULTIPLE_TEXT.value:  # 多项填空
        cur_df = build_multiple_text(obj, total_count)
    elif cur_type == SysTypeEnum.MULTIPLE_MATRIX.value:  # 矩阵多选
        cur_df = build_multiple_matrix(obj, total_count)
    elif cur_type == SysTypeEnum.MATRIX_TEXT.value:  # 矩阵填空
        # cur_df = build_matrix_text(obj, total_count)
        cur_df = build_multiple_text(obj, total_count)
    # 添加到结果
    if cur_df is not None:
        if final_df.empty:
            final_df = cur_df
        else:
            final_df = final_df.join(cur_df)
    return final_df


def build_datas(obj: json, final_df: pd.DataFrame, total_count: int):
    """
    处理传入数据中的每一个对象
    :param obj: 当前对象的所有属性
    :param final_df: 最终的DataFrame
    :param total_count: 总数量
    :return:
    """

    def build_matrix_datas(obj_in: json, total_count_in: int) -> pd.DataFrame:
        """
        生成矩阵题数据
        :param total_count_in: 生成的总行数
        :param obj_in: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        children = obj_in.get('children')  # 该题目的所有子节点
        parent_title = obj_in.get('title')  # 标题
        matrix_df = pd.DataFrame()
        for i in children:
            title = i.get('title')  # 子标题
            cur_options = i.get('options')  # 选项
            total_weight = 0
            for j in cur_options:
                total_weight += j.get('rate')
            m = dict()  # 根据当前权重计算比例
            for j in range(len(cur_options)):
                m[cur_options[j].get('value')] = cur_options[j].get('rate') / total_weight
            # 生成数据
            child_res = []
            cur_total = 0
            for i in range(len(cur_options)):
                cur_value = cur_options[i].get('value')
                if i == len(m) - 1:
                    need_add_count = total_count_in - cur_total
                    if need_add_count > 0:
                        child_res += [cur_value] * need_add_count
                else:
                    need_add_count = int(total_count_in * m[cur_value])
                    cur_total += need_add_count
                    child_res += [cur_value] * need_add_count
            name = parent_title + '.' + title
            inner_df = pd.DataFrame({name: child_res})
            inner_df = inner_df.sample(frac=1, ignore_index=True)
            if matrix_df.empty:
                matrix_df = inner_df
            else:
                matrix_df = matrix_df.join(inner_df)
        return matrix_df

    def build_select_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成下拉选择题数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        title = obj.get('title')  # 标题
        cur_options = obj.get('options')  # 选项
        total_weight = 0
        for j in cur_options:
            total_weight += j.get('rate')
        m = dict()  # 根据当前权重计算比例
        for j in range(len(cur_options)):
            m[cur_options[j].get('value')] = cur_options[j].get('rate') / total_weight
        # 生成数据
        res = []
        cur_total = 0
        for i in range(len(cur_options)):
            cur_value = cur_options[i].get('value')
            if i == len(m) - 1:
                need_add_count = total_count - cur_total
                if need_add_count > 0:
                    res += [cur_value] * need_add_count
            else:
                need_add_count = int(total_count * m[cur_value])
                cur_total += need_add_count
                res += [cur_value] * need_add_count
        my_array = np.array(res)
        my_df = pd.DataFrame({title: my_array})
        random_order = np.random.choice(my_array, size=len(my_array), replace=False)
        my_df[title] = random_order
        return my_df

    def build_text_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成填空题数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        title = obj.get('title')  # 标题
        text_arr = []
        for i in range(total_count):
            text_arr.append("无")
        text_df = pd.DataFrame({title: text_arr})
        return text_df

    def build_rate_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成比重题数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        title = obj.get('title')  # 标题
        children = obj.get('children')  # 该题目的所有子节点
        min_sum = 0
        name_dict = dict()
        for index in range(len(children)):
            i = children[index]
            name_dict[index] = title + '.' + i.get('title')
            min_oper_num = i.get('min_oper_num')  # 最小值是多
            max_oper_num = i.get('max_oper_num')  # 最大值是多
            min_sum += min_oper_num
        if min_sum > 100:
            # TODO 报错，最小值的和不能大于100
            # msg = '题目：' + title + '，所有比重的最小值之和超过100，请重新设置比重'
            # raise APIException(message="msg")
            pass

        res_arr = []
        for _ in range(total_count):
            item_arr = []
            for i in children:
                min_oper_num = i.get('min_oper_num')  # 最小值是多
                max_oper_num = i.get('max_oper_num')  # 最大值是多
                cur_int = random.randint(min_oper_num, max_oper_num)
                item_arr.append(cur_int)
            res_arr.append(item_arr)
        rate_res_df = pd.DataFrame(res_arr)
        rate_res_df = rate_res_df.rename(columns=name_dict)
        return rate_res_df

    def build_sorted_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成排序数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """
        title = obj.get('title')  # 标题
        min_oper_num = obj.get('min_oper_num')  # 最少选多少个选项
        max_oper_num = obj.get('max_oper_num')  # 最多选多少个选项
        cur_options = obj.get('options')  # 当前各个选项的权重是多少

        # 生成每一列的名字和比例
        name_dict = dict()
        total_weight = 0
        for j in range(len(cur_options)):
            total_weight += cur_options[j].get('rate')
            name_dict[j] = title + '.' + cur_options[j].get('name')
        ratios = []
        for j in range(len(cur_options)):
            ratios.append(cur_options[j].get('rate') / total_weight)
        ew_arr = []
        option_length = len(cur_options)
        max_length = max_oper_num
        nums = [total_count * cur_i for cur_i in ratios]  # 根据权重计算后的结果
        for i in range(len(nums)):
            count = nums[i]
            while count > 0:
                arr = np.arange(option_length + 1)[1:]
                for index in range(len(arr)):
                    if index > max_length - 1:
                        arr[index] = -2
                tmp = arr[0]
                arr[0] = arr[i]
                arr[i] = tmp

                fixed_index = i

                fixed_value = arr[fixed_index]  # 保存固定的值
                arr = np.delete(arr, fixed_index)  # 删除固定的值

                np.random.shuffle(arr)  # 对剩余的元素进行乱序

                arr = np.insert(arr, fixed_index, fixed_value)  # 将固定的值插入回数组中
                count -= 1
                ew_arr.append(arr)
        sorted_res_df = pd.DataFrame(ew_arr)
        sorted_res_df = sorted_res_df.rename(columns=name_dict)
        sorted_res_df = sorted_res_df.sample(frac=1, ignore_index=True)
        return sorted_res_df

    def build_multiple_datas(obj: json, total_count: int) -> pd.DataFrame:
        """
        生成多选数据
        :param total_count: 生成的总行数
        :param obj: 当前的需要生成的比例对象
        :return: 返回当前生成数据的DataFrame
        """

        def generate_array(m, n, x_min, x_max, col_ratio, cur_option_values):
            """
            生成二维数据
            :param m: 行数
            :param n: 列数
            :param x_min: 最小值
            :param x_max: 最大值
            :param col_ratio: 每一列的占比
            :param cur_option_values: 当前题目的选项
            :return: 一个符合条件的二维数组
            """
            # 初始化一个全为0的数组
            arr = np.zeros((m, n), dtype=int)
            numbers = np.array(cur_option_values)

            # 遍历每一行
            for i in range(arr.shape[0]):
                inner_numbers = numbers
                # 将数字数组洗牌
                np.random.shuffle(inner_numbers)
                # 将洗牌后的数组赋值给当前行
                arr[i] = inner_numbers

            # # 对于每一列，生成对应比例的1
            # for j in range(n):
            #     ones_count = int(m * col_ratio[j])
            #     ones_indices = np.random.choice(m, ones_count, replace=False)
            #     arr[ones_indices, j] = 1
            #
            # # 确保每行的和不超过x
            # for i in range(m):
            #     cur_sum = arr[i, :].sum()
            #     if cur_sum > x_max:
            #         ones_indices = np.where(arr[i, :] == 1)[0]
            #         np.random.shuffle(ones_indices)
            #         arr[i, ones_indices[x_max:]] = 0
            #     if cur_sum < x_min:
            #         ones_indices = np.where(arr[i, :] == 0)[0]
            #         np.random.shuffle(ones_indices)
            #         arr[i, ones_indices[x_min:]] = 1
            return arr

        min_oper_num = obj.get('min_oper_num')  # 最少选多少个选项
        max_oper_num = obj.get('max_oper_num')  # 最多选多少个选项
        title = obj.get('title')  # 标题
        cur_options = obj.get('options')  # 当前各个选项的权重是多少
        name_dict = dict()
        total_weight = 0
        for j in range(len(cur_options)):
            total_weight += cur_options[j].get('rate')
            name_dict[j] = title + '.' + cur_options[j].get('name')
        ratios = []
        option_values = []
        for j in range(len(cur_options)):
            option_values.append(cur_options[j].get('value'))
            ratios.append(cur_options[j].get('rate') / total_weight)
        np_array = generate_array(total_count, len(cur_options), min_oper_num, max_oper_num, ratios, option_values)
        np_array = np_array.astype(int)
        multiple_res_df = pd.DataFrame(np_array)
        multiple_res_df = multiple_res_df.rename(columns=name_dict)
        return multiple_res_df

    def build_single_datas(name, cur_options, total_count) -> pd.DataFrame:
        """
        生成单选数据
        :param name:
        :param cur_options:
        :param total_count:
        :return:
        """
        total_weight = 0
        for j in cur_options:
            total_weight += j.get('rate')
        m = dict()  # 根据当前权重计算比例
        for j in range(len(cur_options)):
            m[cur_options[j].get('value')] = cur_options[j].get('rate') / total_weight
        # 生成数据
        res = []
        text_value_arr = []
        cur_total = 0
        for i in range(len(cur_options)):
            cur_name = cur_options[i].get('name')
            cur_value = cur_options[i].get('value')
            cur_is_text = cur_options[i].get('is_text')
            if bool(cur_is_text):
                text_value_arr.append(cur_value)
            if i == len(m) - 1:
                need_add_count = total_count - cur_total
                if need_add_count > 0:
                    res += [cur_value] * need_add_count
            else:
                need_add_count = int(total_count * m[cur_value])
                cur_total += need_add_count
                res += [cur_value] * need_add_count
        if text_value_arr:
            for i in text_value_arr:
                for j in range(len(res)):
                    if i == res[j]:
                        # res[j] = str(res[j]) + '^' + '填空答案'
                        res[j] = str(res[j])
        my_array = np.array(res)
        my_df = pd.DataFrame({name: my_array})
        random_order = np.random.choice(my_array, size=len(my_array), replace=False)
        my_df[name] = random_order
        return my_df

    def build_slide_datas(obj_in: json, total_count_in: int) -> pd.DataFrame:
        """
        生成滑动题数据
        :param obj_in:
        :param total_count_in:
        :return:
        """
        children = obj_in.get('children')  # 该题目的所有子节点
        parent_title = obj_in.get('title')  # 最多选多少个选项
        in_df = pd.DataFrame()
        for i in children:
            min_oper_num = i.get('min_oper_num')  # 最小值是多
            max_oper_num = i.get('max_oper_num')  # 最大值是多
            title = i.get('title')  # 最多选多少个选项
            name = parent_title + '.' + title
            # TODO 判断最小值不能大于最大值
            min_oper_num = 0 if min_oper_num < 0 else min_oper_num
            max_oper_num = max_oper_num if max_oper_num < 100 else 100
            np_arr = np.random.randint(min_oper_num, max_oper_num + 1, size=total_count_in)
            child_df = pd.DataFrame({name: np_arr})
            if in_df.empty:
                in_df = child_df
            else:
                in_df = in_df.join(child_df)
        return in_df

    cur_type = obj.get('type')  # 该题目的类型
    cur_df = pd.DataFrame()
    if cur_type == 3:  # 单选题
        title = obj.get('title')  # 标题
        cur_options = obj.get('options')  # 该题目的选项
        cur_df = build_single_datas(title, cur_options, total_count)
    elif cur_type == 1:  # 填空题
        cur_df = build_text_datas(obj, total_count)
    elif cur_type == 4:  # 多选题
        cur_df = build_multiple_datas(obj, total_count)
    elif cur_type == 5:  # 多选题
        cur_df = build_select_datas(obj, total_count)
    elif cur_type == 6:  # 矩阵题
        cur_df = build_matrix_datas(obj, total_count)
    elif cur_type == 7:  # 多选题
        cur_df = build_select_datas(obj, total_count)
        pass
    elif cur_type == 9:  # 滑动题
        cur_df = build_slide_datas(obj, total_count)
    elif cur_type == 11:  # 排序题
        cur_df = build_sorted_datas(obj, total_count)
    elif cur_type == 12:  # 比重题
        cur_df = build_rate_datas(obj, total_count)
    # 添加到结果
    if cur_df is not None:
        if final_df.empty:
            final_df = cur_df
        else:
            final_df = final_df.join(cur_df)
    return final_df


def receiving(work_order_id: int, cur_user_id: int):
    """
    :param work_order_id: 工单id
    :param cur_user_id: 通过jwt获取 - 测试默认写死3
    :return:
    """
    try:
        item = work_order_dao.query_work_order_by_id(work_order_id)
    except Exception as e:
        raise APIException("查询工单失败")
    if not item:
        raise APIException("接单失败，工单不存在")
    if item.status > 0:
        raise APIException("接单失败，当前工单已被接单")
    try:
        work_order_dao.modify_work_order_status_and_get_user_id(work_order_id, 1, cur_user_id)
    except Exception as e:
        raise APIException("接单失败，修改工单状态失败")
    return


def make_datas_v2(work_order_id: int, data: json):
    def insert_into_topic(df: pd.DataFrame, work_order_id: int):
        """
        生成数据-批量提交
        :param work_order_id:
        :param df:
        :return:
        """
        topics = []
        for index, row in df.iterrows():
            r = np.array(row).tolist()
            json_str = json.dumps(r, ensure_ascii=False)
            topics.append(ut.Topic(data=json_str, work_order_id=work_order_id, submitted=0))
        topic_dao.batch_insert(topics)
        return

    def deep_search_programme_v2(variables: json, dvariable: json, children: json, out_dict: dict, df: pd.DataFrame):
        if not variables or not dvariable:
            return df
        d_v_df = df
        # 因变量
        for iv in dvariable:  # 把所有的因变量转换成对应的obj
            cur_obj = ItemProgrammeObject(**iv)
            if cur_obj.sys_type == SysTypeEnum.SINGLE.value:  # 单选题
                options = cur_obj.options
                column_index = cur_obj.title
                d_weight = []
                d_value = []
                for option in options:
                    d_weight.append(option.get('rate'))
                    d_value.append(option.get('value'))
                need_value = random.choices(d_value, weights=d_weight, k=len(d_v_df))
                count = 0
                for index, row in d_v_df.iterrows():
                    d_v_df.loc[index, column_index] = need_value[count]
                    count += 1
        # df = d_v_df
        # 子配置
        if children:
            v_dict = dict()  # 每个题目+选项的总权重是多少
            uuid_dict = dict()  # 每个uuid对应的权重起始score是多少
            for item_child in children:
                child_va = item_child.get('variable')  # 自变量

                child_children = item_child.get('children')  # 子配置
                # 判断是否有重复的自变量
                variable_objs = list(map(lambda iv: ItemProgrammeObject(**iv), child_va))  # 把所有的自变量转换成对应的obj
                for cur_obj in variable_objs:  # 把所有的自变量转换成对应的obj
                    checked = cur_obj.checked  # 已选择的选项
                    cur_obj_title = cur_obj.title  # 已选择的标题
                    cur_obj_uuid = cur_obj.uuid  # 当前变量的uuid
                    union_key = str(cur_obj_title) + str(checked)  # 这个方案的唯一key
                    if v_dict.get(union_key):
                        cur_score = v_dict.get(union_key)
                        uuid_dict[cur_obj_uuid] = cur_score  # 设置起始的权重
                        v_dict[union_key] += cur_obj.score  # 设置对应的权重
                    else:
                        uuid_dict[cur_obj_uuid] = 0

            for item_programme in children:
                child_va = item_programme.get('variable')  # 自变量
                item_id = item_programme.get('id')  # id-生成id的数据

            for item_programme in children:
                item_programme_df = d_v_df
                # 过滤出需要处理的DataFrame
                child_va = item_programme.get('variable')  # 自变量
                item_dvariable = item_programme.get('dvariable')  # 因变量
                item_children = item_programme.get('children')  # 子配置
                if not child_va or not item_dvariable:
                    continue
                # 判断是否有重复的自变量
                variable_objs = list(map(lambda iv: ItemProgrammeObject(**iv), child_va))  # 把所有的自变量转换成对应的obj
                # 自变量
                iv_dict = copy.deepcopy(out_dict)  # {'0':[1,2]}
                for cur_obj in variable_objs:
                    if cur_obj.sys_type == SysTypeEnum.SINGLE.value:  # 单选题
                        checked = cur_obj.checked
                        options = cur_obj.options
                        column_index = cur_obj.title
                        for option in options:
                            if option.get('name') in checked:
                                iv_dict_value = iv_dict.get(column_index)
                                option_value = option.get('value')
                                if not iv_dict_value:
                                    iv_dict[column_index] = [option_value]
                                else:
                                    iv_dict_value.append(option_value)
                                    iv_dict[column_index] = iv_dict_value
                # 过滤符合条件的df
                for iv_dict_index in iv_dict:
                    value = iv_dict[iv_dict_index]
                    if value:
                        item_programme_df = item_programme_df[item_programme_df[iv_dict_index].isin(value)]
                for cur_obj in variable_objs:
                    checked = cur_obj.checked  # 已选择的选项
                    cur_obj_title = cur_obj.title  # 已选择的标题
                    cur_obj_uuid = cur_obj.uuid  # 当前变量的uuid
                    union_key = str(cur_obj_title) + str(checked)  # 这个方案的唯一key
                    total_score = v_dict.get(union_key)  # 总权重
                    start_score = uuid_dict.get(cur_obj_uuid)  # 起始权重
                    cur_obj_score = cur_obj.score  # 当前选项的权重
                    start_index = int(len(df) * start_score / total_score)
                    end_index = int(len(df) * (start_score + cur_obj_score) / total_count)
                    item_programme_df = item_programme_df.iloc[start_index:end_index]  # 过滤出对应行的数据
                    # 使用 reset_index() 方法，保留原始索引
                    item_programme_df = item_programme_df.reset_index(drop=True)
                item_programme_df = deep_search_programme_v2(child_va, item_dvariable, item_children, iv_dict,
                                                             item_programme_df)
                # df = pd.merge(df, item_programme_df, left_index=True, right_index=True, how='left')
                # 写回对应索引数据
                rows = item_programme_df.index.tolist()
                for row_index in rows:
                    d_v_df = d_v_df.drop(row_index, axis=0)
                # 返回赋值
                d_v_df = pd.concat([d_v_df, item_programme_df], axis=0)
        # 写回对应索引数据
        rows = d_v_df.index.tolist()
        for row_index in rows:
            df = df.drop(row_index, axis=0)
        # 返回赋值
        df = pd.concat([df, d_v_df], axis=0)
        return df

    def deal_programme_v2(obj: json, df: pd.DataFrame):
        """
        处理方案
        :param obj: 方案的对象
        :param df: 当前的数据
        :return:
        """
        v_dict = dict()  # 每个题目+选项的总权重是多少
        uuid_dict = dict()  # 每个uuid对应的权重起始score是多少
        for item_programme in obj:
            item_variables = item_programme.get('variable')  # 自变量
            item_id = item_programme.get('id')  # id-生成id的数据
            # 判断是否有重复的自变量
            variable_objs = list(map(lambda iv: ItemProgrammeObject(**iv), item_variables))  # 把所有的自变量转换成对应的obj
            for cur_obj in variable_objs:  # 把所有的自变量转换成对应的obj
                checked = cur_obj.checked  # 已选择的选项
                cur_obj_title = cur_obj.title  # 已选择的标题
                cur_obj_uuid = cur_obj.uuid  # 当前变量的uuid
                union_key = str(cur_obj_title) + str(checked)  # 这个方案的唯一key
                if v_dict.get(union_key):
                    cur_score = v_dict.get(union_key)
                    uuid_dict[cur_obj_uuid] = cur_score  # 设置起始的权重
                    v_dict[union_key] += cur_obj.score  # 设置对应的权重
                else:
                    uuid_dict[cur_obj_uuid] = 0
                    v_dict[union_key] = cur_obj.score  # 设置对应的权重

        for item_programme in obj:
            item_programme_df = df
            # 过滤出需要处理的DataFrame
            item_variables = item_programme.get('variable')  # 自变量
            item_dvariable = item_programme.get('dvariable')  # 因变量
            item_children = item_programme.get('children')  # 子配置
            if not item_variables or not item_dvariable:
                continue
            # 判断是否有重复的自变量
            variable_objs = list(map(lambda iv: ItemProgrammeObject(**iv), item_variables))  # 把所有的自变量转换成对应的obj
            # 自变量
            iv_dict = dict()  # {'0':[1,2]}
            for cur_obj in variable_objs:
                if cur_obj.sys_type == SysTypeEnum.SINGLE.value:  # 单选题
                    checked = cur_obj.checked
                    options = cur_obj.options
                    column_index = cur_obj.title
                    for option in options:
                        if option.get('name') in checked:
                            iv_dict_value = iv_dict.get(column_index)
                            option_value = option.get('value')
                            if not iv_dict_value:
                                iv_dict[column_index] = [option_value]
                            else:
                                iv_dict_value.append(option_value)
                                iv_dict[column_index] = iv_dict_value
            # 过滤符合条件的df
            for iv_dict_index in iv_dict:
                value = iv_dict[iv_dict_index]
                if value:
                    item_programme_df = item_programme_df[item_programme_df[iv_dict_index].isin(value)]
            for cur_obj in variable_objs:
                checked = cur_obj.checked  # 已选择的选项
                cur_obj_title = cur_obj.title  # 已选择的标题
                cur_obj_uuid = cur_obj.uuid  # 当前变量的uuid
                union_key = str(cur_obj_title) + str(checked)  # 这个方案的唯一key
                total_score = v_dict.get(union_key)  # 总权重
                start_score = uuid_dict.get(cur_obj_uuid)  # 起始权重
                cur_obj_score = cur_obj.score  # 当前选项的权重
                start_index = int(len(item_programme_df) * start_score / total_score)
                end_index = int(len(item_programme_df) * (start_score + cur_obj_score) / total_count)
                item_programme_df = item_programme_df.iloc[start_index:end_index]  # 过滤出对应行的数据
                # 使用 reset_index() 方法，保留原始索引
                item_programme_df = item_programme_df.reset_index(drop=True)
            # 过滤对应的权重数据
            item_programme_df = deep_search_programme_v2(item_variables, item_dvariable, item_children, iv_dict,
                                                         item_programme_df)
            # df = pd.merge(df, item_programme_df, left_index=True, right_index=True, how='left')
            # 写回对应索引数据
            rows = item_programme_df.index.tolist()
            for row_index in rows:
                df = df.drop(row_index, axis=0)
            # 返回赋值
            df = pd.concat([df, item_programme_df], axis=0)
        return df

    def deep_search_programme(variables: json, dvariable: json, children: json, out_dict: dict, df: pd.DataFrame):
        # 判断自变量的题目类型
        iv_dict = copy.deepcopy(out_dict)  # {'0':[1,2]}
        if not variables or not dvariable:
            return df
        cur_df = df
        # 自变量
        for iv in variables:
            if iv.get('sys_type') == SysTypeEnum.SINGLE.value:  # 单选题
                checked = iv.get('checked')
                options = iv.get('options')
                column_index = iv.get('title')
                for option in options:
                    if option.get('name') in checked:
                        iv_dict_value = iv_dict.get(column_index)
                        if not iv_dict_value:
                            iv_dict[column_index] = [option.get('value')]
                        else:
                            iv_dict_value.append(option.get('value'))
                            iv_dict[column_index] = iv_dict_value
        # 过滤符合条件的df
        for iv_dict_index in iv_dict:
            value = iv_dict[iv_dict_index]
            if value:
                cur_df = cur_df[cur_df[iv_dict_index].isin(value)]
        # 因变量
        for iv in dvariable:
            if iv.get('sys_type') == SysTypeEnum.SINGLE.value:  # 单选题
                options = iv.get('options')
                column_index = iv.get('title')
                d_weight = []
                d_value = []
                for option in options:
                    d_weight.append(option.get('rate'))
                    d_value.append(option.get('value'))
                need_value = random.choices(d_value, weights=d_weight, k=len(cur_df))
                count = 0
                for index, row in cur_df.iterrows():
                    cur_df.loc[index, column_index] = need_value[count]
                    count += 1
        # 子配置
        if children:
            for item_child in children:
                child_va = item_child.get('variable')  # 自变量
                child_dv = item_child.get('dvariable')  # 因变量
                child_children = item_child.get('children')  # 子配置
                cur_df = deep_search_programme(child_va, child_dv, child_children, iv_dict, cur_df)
        # 写回对应索引数据
        # 获取行索引
        rows = cur_df.index.tolist()
        for row_index in rows:
            df = df.drop(row_index, axis=0)
        # 返回赋值
        df = pd.concat([df, cur_df], axis=0)
        return df

    def deal_programme(obj: json, df: pd.DataFrame):
        """
        处理方案
        :param obj: 方案的对象
        :param df: 当前的数据
        :return:
        """
        for item_programme in obj:
            item_variables = item_programme.get('variable')  # 自变量
            item_dvariable = item_programme.get('dvariable')  # 因变量
            item_children = item_programme.get('children')  # 子配置
            # 判断自变量的题目类型
            out_dict = dict()  # {'0':[1,2]}
            # 自变量
            df = deep_search_programme(item_variables, item_dvariable, item_children, out_dict, df)
        return df

    cur_list = data.get('weight')  # 所有题目的权重
    if not cur_list:
        return '请求参数权重为空'
    # 0. 保存权重
    # save_programme_weight(work_order_id, cur_list) # 上一步的时候调用了保存权重，因此这里不需要保存
    # 1。通过工单id查询问卷链接
    cur_work_order = work_order_dao.query_work_order_by_id(work_order_id)
    link = cur_work_order.link  # 问卷链接
    total_count = int(cur_work_order.total_size)  # 总数量
    # 2。生成当前数据
    final_df = pd.DataFrame()
    for i in cur_list:
        final_df = build_datas_v2(i, final_df, total_count)
    # print(final_df)
    # 3. 处理方案
    programme = data.get('programme')
    if programme:
        save_programme(wo_id=work_order_id, inner_data=programme)
        final_df = deal_programme(programme, final_df)
        # final_df = deal_programme_v2(programme, final_df)
    # 4. 计算题目和下标的映射关系
    title_id_index_dict = dict()  # 标题对应的第一个index
    title_id_indexes_dict = dict()  # 标题对应的所有index
    title_id_title_dict = dict()  # 标题id和标题的对应关系
    title_id_sys_type_dict = dict()  # 标题id和系统关系对应的dict
    for i in cur_list:
        title_id = i.get('title_id')  # 标题
        title_id_title_dict[title_id] = i.get('title')
        title_id_sys_type_dict[title_id] = i.get('sys_type')
        title_id_indexes_dict[title_id] = i.get('data_index')
        data_index_list = i.get('data_index')
        if data_index_list:
            data_index0 = data_index_list[0]  # 标题对应的index
            title_id_index_dict[title_id] = data_index0
    ext_obj = work_order_extend_dao.search_by_work_order_id(work_order_id)
    # db_index_title_maps = json.loads(ext_obj.titles)  # 拿到index和每一列的映射关系
    db_index_title_maps = ext_obj.titles  # 拿到index和每一列的映射关系
    db_index_obj_dict = dict()  # 数据库索引和标题的映射关系
    for i in db_index_title_maps:
        db_index_obj_dict[i.get('index')] = i.get('title')
    # 5。判断跳题，将跳题之间的数据过滤为-3
    for i in cur_list:
        is_jump = i.get('is_jump')
        if is_jump:
            title = i.get('title')
            title_data_index = i.get('data_index')  # 题目的所有index
            sys_type = i.get('sys_type')
            # 根据题目类型判断需要jump到哪里
            if sys_type == SysTypeEnum.SINGLE.value or sys_type == SysTypeEnum.MULTIPLE.value or sys_type == SysTypeEnum.SORTED.value or sys_type == SysTypeEnum.SCALE.value:
                options = i.get('options')
                for option in options:
                    single_value = option.get('value')
                    # cur_df = final_df[final_df[title].isin([single_value])]
                    cur_df = final_df[final_df[title].astype(str).str.startswith(single_value)]
                    if option.get('is_jump') and option.get('jump_to'):
                        jump_start_index = option.get('data_index')
                        jump_to_title = option.get('jump_to')
                        if str(jump_to_title) == '1':  # 跳至卷尾
                            # 过滤这个题对应的df数据
                            for index, row in cur_df.iterrows():
                                item_arr = row.to_numpy()
                                for j_index in range(len(item_arr)):
                                    # 如果小于等于开始索引，大于等于结束索引，都不操作
                                    if j_index > jump_start_index:
                                        item_arr[j_index] = -3
                                final_df.loc[index] = item_arr
                        else:
                            jump_end_index = title_id_index_dict.get(jump_to_title)
                            if not jump_end_index:
                                continue
                            # 过滤这个题对应的df数据
                            for index, row in cur_df.iterrows():
                                item_arr = row.to_numpy()
                                for j_index in range(len(item_arr)):
                                    # 如果小于等于开始索引，大于等于结束索引，都不操作
                                    if jump_start_index < j_index < jump_end_index:
                                        item_arr[j_index] = -3
                                final_df.loc[index] = item_arr
            elif sys_type == SysTypeEnum.TEXT.value or sys_type == SysTypeEnum.AREA.value or sys_type == SysTypeEnum.DATE.value:
                # 如果是填空，区域，日期题，则判断是不是空字符串
                options = i.get('options')
                for option in options:
                    cur_df = final_df[final_df[title].astype(str).str.strip() != '']
                    if option.get('is_jump') and option.get('jump_to'):
                        jump_start_index = option.get('data_index')
                        jump_to_title = option.get('jump_to')
                        if str(jump_to_title) == '1':  # 跳至卷尾
                            # 过滤这个题对应的df数据
                            for index, row in cur_df.iterrows():
                                item_arr = row.to_numpy()
                                for j_index in range(len(item_arr)):
                                    # 如果小于等于开始索引，大于等于结束索引，都不操作
                                    if j_index > jump_start_index:
                                        item_arr[j_index] = -3
                                final_df.loc[index] = item_arr
                        else:
                            jump_end_index = title_id_index_dict.get(jump_to_title)
                            if not jump_end_index:
                                continue
                            # 过滤这个题对应的df数据
                            for index, row in cur_df.iterrows():
                                item_arr = row.to_numpy()
                                for j_index in range(len(item_arr)):
                                    # 如果小于等于开始索引，大于等于结束索引，都不操作
                                    if jump_start_index < j_index < jump_end_index:
                                        item_arr[j_index] = -3
                                final_df.loc[index] = item_arr
            elif sys_type == SysTypeEnum.RATE.value:
                # 滑杆或者比重提，在最小值和最大值范围内的数据直接跳过，如果不在则不进行处理
                options = i.get('options')
                for option in options:
                    min_oper_num = float(option.get('min_oper_num'))
                    max_oper_num = float(option.get('max_oper_num'))
                    # cur_df = final_df[final_df[title].astype(float).between(min_oper_num, max_oper_num)]
                    option_title = option.get('title')
                    cur_df = final_df[final_df[option_title].astype(float).between(min_oper_num, max_oper_num)]
                    jump_start_index = title_data_index[-1]
                    if option.get('is_jump') and option.get('jump_to'):
                        # jump_start_index = option.get('data_index')
                        jump_to_title = option.get('jump_to')
                        if str(jump_to_title) == '1':  # 跳至卷尾
                            # 过滤这个题对应的df数据
                            for index, row in cur_df.iterrows():
                                item_arr = row.to_numpy()
                                for j_index in range(len(item_arr)):
                                    # 如果小于等于开始索引，大于等于结束索引，都不操作
                                    if j_index > jump_start_index:
                                        item_arr[j_index] = -3
                                final_df.loc[index] = item_arr
                        else:
                            jump_end_index = title_id_index_dict.get(jump_to_title)
                            if not jump_end_index:
                                continue
                            # 过滤这个题对应的df数据
                            for index, row in cur_df.iterrows():
                                item_arr = row.to_numpy()
                                for j_index in range(len(item_arr)):
                                    # 如果小于等于开始索引，大于等于结束索引，都不操作
                                    if jump_start_index < j_index < jump_end_index:
                                        item_arr[j_index] = -3
                                final_df.loc[index] = item_arr
    # 6. 判断相关题目
    for i in cur_list:
        cur_relation = i.get('relation')
        data_indexes = i.get('data_index')
        sys_type = i.get('sys_type')
        if not cur_relation or not data_indexes:  # 没有相关性逻辑,或者没有索引 则跳过
            continue
        if '|' in cur_relation:  # 与逻辑（and）同时满足所有条件才进行修改
            relations = cur_relation.split('|')
            cur_df = final_df
            for item_relation in relations:
                title_options = item_relation.split(',')  # 逗号分割题目和选项
                if not title_options:  # 没找到则跳过
                    continue
                relation_title_id = title_options[0]  # 关联的题目id
                if len(title_options) == 1:  # 如果是0或者-1直接把答案处理为-3，否则跳过，不做处理
                    if '0' == str(relation_title_id) or '-1' == str(relation_title_id):
                        for index, row in final_df.iterrows():
                            item_arr = row.to_numpy()
                            for j_index in range(len(item_arr)):
                                # 如果当前index需要处理，则处理为-3
                                if j_index in data_indexes:
                                    item_arr[j_index] = -3
                            final_df.loc[index] = item_arr
                        continue
                    else:
                        continue  # 否则直接跳过
                relation_sys_type = title_id_sys_type_dict.get(relation_title_id)  # 关联的题目类型
                title = title_id_title_dict.get(relation_title_id)
                cur_options = title_options[1]  # 选项
                options = cur_options.split(';')  # 分号分割题目和选项
                if relation_sys_type == SysTypeEnum.SINGLE.value:
                    cur_df = cur_df[cur_df[title].isin(options)]
                elif relation_sys_type == SysTypeEnum.MULTIPLE.value:
                    indexes = title_id_indexes_dict.get(relation_title_id)  # 拿到多选下的所有index
                    multiple_db_index = {i: indexes[i] for i in range(len(indexes))}  # 变为下标和db索引的字典
                    for option in options:  # 遍历题目的选项
                        cur_db_index = multiple_db_index.get(int(option) - 1)  # 需要关联的db的索引
                        cur_db_index_title = db_index_obj_dict.get(cur_db_index)  # 当前db索引对应的列的名称
                        cur_df = cur_df[cur_df[cur_db_index_title] == 1]  # 过滤出不需要处理的数据（这一列等于1的数据）
            # 过滤这个题对应的df数据
            valid_indexes = cur_df.index
            for index, row in final_df.iterrows():
                if index not in valid_indexes:  # 不在符合条件的里面就处理为-3
                    item_arr = row.to_numpy()
                    for j_index in range(len(item_arr)):
                        # 如果当前index需要处理，则处理为-3
                        if j_index in data_indexes:
                            item_arr[j_index] = -3
                    final_df.loc[index] = item_arr
        elif '$' in cur_relation:  # 或逻辑（or）
            relations = cur_relation.split('$')
            valid_index_arr = []  # 合法的数据索引
            for item_relation in relations:
                title_options = item_relation.split(',')  # 逗号分割题目和选项
                if not title_options:  # 没找到则跳过
                    continue
                relation_title_id = title_options[0]  # 题目id
                if len(title_options) == 1:  # 如果是0或者-1直接把答案处理为-3，否则跳过，不做处理
                    if '0' == str(relation_title_id) or '-1' == str(relation_title_id):
                        for index, row in final_df.iterrows():
                            item_arr = row.to_numpy()
                            for j_index in range(len(item_arr)):
                                # 如果当前index需要处理，则处理为-3
                                if j_index in data_indexes:
                                    item_arr[j_index] = -3
                            final_df.loc[index] = item_arr
                        continue
                    else:
                        continue  # 否则直接跳过
                relation_sys_type = title_id_sys_type_dict.get(relation_title_id)  # 关联的题目类型
                title = title_id_title_dict.get(relation_title_id)
                cur_options = title_options[1]  # 选项
                options = cur_options.split(';')  # 分号分割题目和选项
                if relation_sys_type == SysTypeEnum.SINGLE.value:
                    for option in options:
                        cur_df = final_df[final_df[title].isin([str(option)])]
                        # 过滤这个题对应的df数据
                        valid_indexes = cur_df.index
                        for vi in valid_indexes:
                            valid_index_arr.append(vi)
                elif relation_sys_type == SysTypeEnum.MULTIPLE.value:
                    indexes = title_id_indexes_dict.get(relation_title_id)  # 拿到多选下的所有index
                    multiple_db_index = {i: indexes[i] for i in range(len(indexes))}  # 变为下标和db索引的字典
                    for option in options:  # 遍历题目的选项
                        cur_db_index = multiple_db_index.get(int(option) - 1)  # 需要关联的db的索引
                        cur_db_index_title = db_index_obj_dict.get(cur_db_index)  # 当前db索引对应的列的名称
                        cur_df = final_df[final_df[cur_db_index_title] == 1]  # 过滤出不需要处理的数据（这一列等于1的数据）
                        for valid_index in cur_df.index:  # 如果有数据，则这些是合法的数据
                            valid_index_arr.append(valid_index)
            # 处理最终的dataframe
            for index, row in final_df.iterrows():
                if index not in valid_index_arr:  # 不在符合条件的里面就处理为-3
                    item_arr = row.to_numpy()
                    for j_index in range(len(item_arr)):
                        # 如果当前index需要处理，则处理为-3
                        if j_index in data_indexes:
                            item_arr[j_index] = -3
                    final_df.loc[index] = item_arr
        else:  # 只有一个逻辑（单独处理）
            title_options = cur_relation.split(',')  # 逗号分割题目和选项
            if not title_options:  # 没找到则跳过
                continue
            relation_title_id = title_options[0]  # 关联的题目id
            if len(title_options) == 1:
                if '0' == str(relation_title_id) or '-1' == str(relation_title_id):
                    for index, row in final_df.iterrows():
                        item_arr = row.to_numpy()
                        for j_index in range(len(item_arr)):
                            # 如果当前index需要处理，则处理为-3
                            if j_index in data_indexes:
                                item_arr[j_index] = -3
                        final_df.loc[index] = item_arr
                    continue
                else:
                    continue  # 否则直接跳过
            title = title_id_title_dict.get(relation_title_id)  # 标题中文名
            cur_options = title_options[1]  # 选项
            relation_sys_type = title_id_sys_type_dict.get(relation_title_id)  # 关联的题目类型
            options = cur_options.split(';')  # 分号分割题目和选项
            # 根据题目类型判断是否需要关联
            if relation_sys_type == SysTypeEnum.SINGLE.value:
                valid_index_arr = []  # 合法的数据索引
                for option in options:
                    cur_df = final_df[final_df[title].isin([str(option)])]
                    # 过滤这个题对应的df数据
                    valid_indexes = cur_df.index
                    for vi in valid_indexes:
                        valid_index_arr.append(vi)
                for index, row in final_df.iterrows():
                    if index not in valid_index_arr:  # 不在符合条件的里面就处理为-3
                        item_arr = row.to_numpy()
                        for j_index in range(len(item_arr)):
                            # 如果当前index需要处理，则处理为-3
                            if j_index in data_indexes:
                                item_arr[j_index] = -3
                        final_df.loc[index] = item_arr
            elif relation_sys_type == SysTypeEnum.MULTIPLE.value:
                indexes = title_id_indexes_dict.get(relation_title_id)  # 拿到多选下的所有index
                # multiple_db_index = dict()  # 变为下标和db索引的字典
                # for _xb in range(len(indexes)):
                #     multiple_db_index[_xb] = indexes[_xb]
                multiple_db_index = {i: indexes[i] for i in range(len(indexes))}  # 变为下标和db索引的字典
                valid_index_arr = []  # 合法的数据索引
                for option in options:  # 遍历题目的选项
                    cur_db_index = multiple_db_index.get(int(option) - 1)  # 需要关联的db的索引
                    cur_db_index_title = db_index_obj_dict.get(cur_db_index)  # 当前db索引对应的列的名称
                    cur_df = final_df[final_df[cur_db_index_title] == 1]  # 过滤出不需要处理的数据（这一列等于1的数据）
                    for valid_index in cur_df.index:  # 如果有数据，则这些是合法的数据
                        valid_index_arr.append(valid_index)
                # filtered_df = final_df[~final_df.index.isin(valid_index_arr)]  # 过滤后的dataframe
                for index, row in final_df.iterrows():
                    if index not in valid_index_arr:  # 不在符合条件的里面就处理为-3
                        item_arr = row.to_numpy()
                        for j_index in range(len(item_arr)):
                            # 如果当前index需要处理，则处理为-3
                            if j_index in data_indexes:
                                item_arr[j_index] = -3
                        final_df.loc[index] = item_arr
    # 7. 处理-3变成(跳过)的数据
    for i in cur_list:
        sys_type = i.get('sys_type')
        data_index = i.get('data_index')
        if sys_type == SysTypeEnum.SINGLE_SLIDE.value:
            for index, row in final_df.iterrows():
                item_arr = row.to_numpy()
                for j_index in range(len(item_arr)):
                    # 如果小于等于开始索引，大于等于结束索引，都不操作
                    if j_index in data_index:
                        if -3 == item_arr[j_index]:
                            item_arr[j_index] = "(跳过)"
                final_df.loc[index] = item_arr
            # for index in data_index:
            #     final_df.loc[final_df.iloc[:, index] == -3, index] = "(跳过)"
    # 8. 删除工单下所有数据
    topic_dao.delete_result_by_word_order_id(work_order_id)
    # 9. 数据按照work_order_id落库
    insert_into_topic(final_df, work_order_id)
    work_order_dao.modify_work_order_status(id=work_order_id, cur_status=2)
    return


def make_datas(work_order_id: int, data: json):
    """
    生成数据
    :param work_order_id:
    :param data:
    :return:
    """

    def insert_into_topic(df: pd.DataFrame, work_order_id: int):
        """
        生成数据-批量提交
        :param work_order_id:
        :param df:
        :return:
        """
        topics = []
        for index, row in df.iterrows():
            r = np.array(row).tolist()
            json_str = json.dumps(r, ensure_ascii=False)
            topics.append(ut.Topic(data=json_str, work_order_id=work_order_id, submitted=0))
        topic_dao.batch_insert(topics)
        return

    cur_list = data.get('weight')
    if not cur_list:
        return '请求参数list为空'
    # 1。通过工单id查询问卷链接
    cur_work_order = work_order_dao.query_work_order_by_id(work_order_id)
    link = cur_work_order.link  # 问卷链接
    total_count = int(cur_work_order.total_size)  # 总数量
    # 分析链接
    # cur_list = explain_link(work_order_id)
    # 2。生成当前数据 TODO 待fix的有，多选，比重
    final_df = pd.DataFrame()
    for i in cur_list:
        final_df = build_datas(i, final_df, total_count)
    print(final_df)
    # 3。删除工单下所有数据
    topic_dao.delete_result_by_word_order_id(work_order_id)
    # 4。判断跳题，将跳题之间的数据过滤为-3
    # 4。数据按照work_order_id落库
    insert_into_topic(final_df, work_order_id)
    work_order_dao.modify_work_order_status(id=work_order_id, cur_status=2)
    return {'work_order_link': link}


def judge_config(work_order_id: int, make_data: json, view_config_data: json):
    # 1. 校验数据是否通过
    item = work_order_dao.query_work_order_by_id(work_order_id)
    total_count = int(item.total_size)  # 总数量
    # 2。生成当前数据
    final_df = pd.DataFrame()
    for i in make_data:
        final_df = build_datas_v2(i, final_df, total_count)
    # 2. 若通过保存权重
    save_programme_weight(work_order_id, view_config_data)
    return "校验通过"


def reload_link(work_order_id: int, link: str):
    # 1,modify work_order
    work_order_dao.modify_work_order_link(work_order_id, link)
    # 2.删除所有历史配置
    strategy_dao.delete_by_word_order_id(work_order_id)
    # # 3.删除权重的缓存数据
    # key = REDIS_PROGRAMME_PREFIX + str(work_order_id)
    # cache_res = Redis.read(key)
    # if cache_res:
    #     Redis.delete(key)
    # 3.解析链接
    # return explain_link_v2(work_order_id, LinkLoadTypeEnum.RELOAD)
    return explain_service.explain_link_v3(work_order_id)


def explain_link_v2(work_order_id: int, load_type: LinkLoadTypeEnum = LinkLoadTypeEnum.NORMALITY):
    def pre_explain(link: str):
        success = False
        real_link = link
        header = dict()
        header['upgrade-insecure-requests'] = '1'
        header['X-Forwarded-For'] = f'{112}.{random.randint(64, 68)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
        header['user-agent'] = \
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'
        header['accept'] = ('text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,'
                            'image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9')
        header['sec-fetch-site'] = 'none'
        header['sec-fetch-mode'] = 'navigate'
        header['sec-fetch-user'] = '?1'
        header['sec-fetch-dest'] = 'document'
        header['accept-encoding'] = 'gzip, deflate, br'
        header['accept-language'] = 'zh-CN,zh'
        response = requests.get(link, allow_redirects=False, headers=header, timeout=10)
        if response.status_code == 200:
            success = True
        elif response.status_code == 302:  # 请求的资源需要重定向
            raise APIException(message="链接已被重定向，请检查问卷链接是否跳转到新的链接或已关闭")
            # headers = response.headers
            # # 遍历打印每个头字段及其值
            # for key, value in headers.items():
            #     if str.lower('Location') == str.lower(key):
            #         real_link = str(value)
        elif response.status_code == 404:  # 请求的资源不存在
            pass
        else:  # 其他状态码处理
            pass
        return success, real_link

    # key = REDIS_EXPLAIN_PREFIX + str(work_order_id)
    try:
        # cache_res = Redis.read(key)
        # if cache_res and LinkLoadTypeEnum.NORMALITY == load_type:
        #     return json.loads(cache_res)
        cur_work_order = work_order_dao.query_work_order_by_id(work_order_id)
        cur_link = cur_work_order.link
        if cur_link.startswith('https://www.wenjuan.com'):  # 问卷网部分链接带/t/的会重定向
            if cur_link.__contains__('/t/'):
                cur_link = cur_link.split('/t/')[0] + '/s/' + cur_link.split('/t/')[1]
        is_success, real_link = pre_explain(cur_link)  # 预解析链接
        if not is_success:
            raise APIException(message="问卷预解析失败")
        if cur_link != real_link:
            work_order_dao.modify_work_order_link(cur_work_order.id, real_link)
        # print(real_link)
        if real_link.startswith('https://www.wenjuan.com'):  # 问卷网
            we = WjwExplain(real_link)
            expl_result = we.do_explain()
            title_maps = expl_result['list']
            titles = expl_result['data_index']
        elif real_link.startswith('https://wj.qq.com'):  # 腾讯问卷
            te = TxExplain(real_link)
            expl_result = te.do_explain()
            title_maps = expl_result['list']
            titles = expl_result['data_index']
        elif real_link.startswith('https://docs.google.com/'):  # 谷歌问卷
            ge = GgExplain(real_link)
            expl_result = ge.do_explain()
            title_maps = expl_result['list']
            titles = expl_result['data_index']
        elif 'qualtrics' in real_link:  # qualtrics问卷
            qe = QualtricsExplain(real_link)
            expl_result = qe.do_explain()
            title_maps = expl_result['list']
            titles = expl_result['data_index']
        elif 'credamo.com' in real_link:  # credamo问卷
            qe = CredamoExplain(real_link)
            expl_result = qe.do_explain()
            title_maps = expl_result['list']
            titles = expl_result['data_index']
        else:  # 默认问卷星
            title_maps, titles = wjx_explain_v2.explain_www_wjx_cn(real_link)
        # json_str = json.dumps(data_indexes, ensure_ascii=False)
        work_order_extend_dao.delete_result_by_word_order_id(work_order_id=work_order_id)
        work_order_extend_dao.batch_insert(
            [ut.WorkOrderExtend(work_order_id=work_order_id, titles=titles, title_maps=title_maps)])
    except Exception as e:
        traceback.print_exc()
        raise APIException(message="文件解析失败" + str(e))
    res = {'list': title_maps, 'data_index': titles, 'total_data_index': len(titles)}
    # # 将请求结果写入缓存
    # Redis.write(key, json.dumps(res), 60 * 60)
    return res


def explain_link(work_order_id: int):
    """
    解析问卷
    :param work_order_id:
    :return:
    """
    # 通过工单id查询问卷链接
    try:
        cur_work_order = work_order_dao.query_work_order_by_id(work_order_id)
        cur_link = cur_work_order.link
        # 通过link解析问卷
        res = wjx_explain.explain_www_wjx_cn(cur_link)
        cur_list = res['list']
        real_title_maps = []  # 真正的题目与数据库需要提交的下标映射
        for i in cur_list:
            real_title_maps.append({'title_id': i['title_id'], 'type': i['type'], "data_index": i['data_index']})
        titles = []  # 把所有的虚拟标题打平 - 前端展示需要
        for i in cur_list:
            index_arr = i['data_index']
            for j in index_arr:
                titles.append(j)

        # 标题落库 - 先删后插
        json_str = json.dumps(titles, ensure_ascii=False)
        work_order_extend_dao.delete_result_by_word_order_id(work_order_id=work_order_id)
        work_order_extend_dao.batch_insert(
            [ut.WorkOrderExtend(work_order_id=work_order_id, titles=json_str, title_maps=real_title_maps)])
    except Exception as e:
        raise APIException(message="解析文件失败")
    return res


def work_order_info(work_order_id: int):
    """
    查询工单详情 - 通过工单id
    :param work_order_id: 工单id
    :return: 工单详情
    """
    # key = REDIS_WORK_ORDER_INFO_PREFIX + str(work_order_id)
    # cache_res = Redis.read(key)
    # if cache_res:
    #     return json.loads(cache_res)
    item = work_order_dao.query_work_order_by_id(work_order_id)
    if not item:
        return {}
    # 如果有这个工单，查询接单人，店铺名
    get_user_name = '未知'
    make_user_name = '未知'
    if item.get_user_id:
        cur_get_user_item = user_dao.query_user_by_id(item.get_user_id)
        if cur_get_user_item:
            get_user_name = cur_get_user_item.name_zh
    if item.make_user_id:
        cur_make_user_item = user_dao.query_user_by_id(item.make_user_id)
        if cur_make_user_item:
            make_user_name = cur_make_user_item.name_zh
    shop_name = '未知'
    if item.shop_id:
        shop_item = shop_dao.query_shops_by_id(item.shop_id)
        if shop_item:
            shop_name = shop_item.shop_name
    res = {'ww_id': item.ww, 'order_id': item.order_id, 'shop_id': item.shop_id, 'make_user_id': item.make_user_id,
           'get_user_id': item.get_user_id, 'total_size': item.total_size, 'link': item.link, 'priority': item.priority,
           'bz': item.bz, 'feedback': item.feedback, 'status': item.status, 'area_note': item.area_note,
           'get_user_name': get_user_name, 'shop_name': shop_name, 'make_user_name': make_user_name}
    # Redis.write(key, json.dumps(res))
    return res


def change_order_info(data):
    item = work_order_dao.query_work_order_by_id(data.get('id'))
    if not item:
        raise APIException(message="工单不存在")
    # # 修改信息后删除缓存
    # key = REDIS_WORK_ORDER_INFO_PREFIX + str(item.id)
    # Redis.delete(key)
    item.ww = data.get('custom_name')
    item.order_id = data.get('order_id') if data.get('order_id') else item.order_id
    item.make_user_id = int(data.get('make_user_id')) if data.get('make_user_id') else item.make_user_id
    item.get_user_id = int(data.get('get_user_id')) if data.get('get_user_id') else item.get_user_id
    item.cost = data.get('cost')
    item.total_size = data.get('size')
    item.shop_id = int(data.get('shop_id')) if data.get('shop_id') else item.shop_id
    item.link = data.get('link')
    item.priority = int(data.get('priority')) if data.get('priority') else item.priority
    item.bz = data.get('bz')
    item.status = int(data.get('status')) if data.get('status') else item.status
    item.feedback = data.get('feedback')
    item.oper_type = int(data.get('oper_type')) \
        if data.get('oper_type') == 0 or data.get('oper_type') else item.oper_type
    item.plan_finished_time = int(data.get('plan_finished_time')) if data.get(
        'plan_finished_time') else item.plan_finished_time
    item.ordered_time = int(data.get('ordered_time')) if data.get('ordered_time') else item.ordered_time
    work_order_dao.modify_work_order(item)
    return


def change_order_status(work_order_id: int, modify_type: int, data):
    """
    修改工单状态
    :return:
    """
    res = ut.WorkOrder()
    if modify_type == 0:
        status_str = data.get("status")
        if status_str is None or status_str == "":
            return False, "status is null"
        status = int(status_str)
        res = work_order_dao.modify_work_order_status(work_order_id, status)
    elif modify_type == 1:
        link = data.get("link")
        if link is None or link == "":
            return False, "link is null"
        res = work_order_dao.modify_work_order_link(work_order_id, link)
    elif modify_type == 2:
        feedback = data.get("feedback")
        if feedback is None or feedback == "":
            return False, "feedback is null"
        res = work_order_dao.modify_work_order_feedback(work_order_id, feedback)
    # # 修改状态后删除缓存
    # key = REDIS_WORK_ORDER_INFO_PREFIX + str(work_order_id)
    # Redis.delete(key)
    return True


def search_result_v2(work_order_id: int, cur_status: int):
    """
    查询当前工单的答案
    :param work_order_id: 工单id
    :param cur_status：当前数据的提交状态-> 0：all，1：已提交，2：未提交
    :return:
    """
    try:
        dao_res = topic_dao.search_results_with_status(work_order_id, cur_status)
        datas = []
        titles = []
        ext_obj = work_order_extend_dao.search_by_work_order_id(work_order_id)
        if ext_obj:
            # titles = json.loads(ext_obj.titles)
            titles = ext_obj.titles
        for i in dao_res:
            datas.append({'id': i.id, 'values': json.loads(i.data), 'submitted': i.submitted})
        res = {'total': len(datas), 'title': titles, 'datas': datas}
    except Exception as e:
        raise APIException(message="查询工单答案失败")
    return res


def search_result(work_order_id: int, cur_status: int):
    """
    查询当前工单的答案
    :param work_order_id: 工单id
    :param cur_status：当前数据的提交状态-> 0：all，1：已提交，2：未提交
    :return:
    """
    try:
        dao_res = topic_dao.search_results_with_status(work_order_id, cur_status)
        datas = []
        titles = []
        ext_obj = work_order_extend_dao.search_by_work_order_id(work_order_id)
        if ext_obj:
            # titles = json.loads(ext_obj.titles)
            titles = ext_obj.titles
        for i in dao_res:
            datas.append({'id': i.id, 'values': json.loads(i.data), 'submitted': i.submitted})
        res = {'total': len(datas), 'title': titles, 'datas': datas}
    except Exception as e:
        raise APIException(message="查询工单答案失败")
    return res


def insert_result(work_order_id: int):
    """
    插入答案
    :return:
    """
    arr = [1, 2, 3, '无', '1^问题']
    json_str = json.dumps(arr, ensure_ascii=False)
    res = topic_dao.insert_result(json_str, work_order_id)
    return res


def search_relation_config(wo_id: int):
    """
    查询相关心配置
    :param wo_id:
    :return:
    """
    item = strategy_dao.search_by_work_order_id(wo_id)
    if not item or not item.relation_config:
        return None, "当前工单无提交配置"
    return item.relation_config, "请求成功"


def search_submit_config(wo_id: int):
    """
    查询方案
    :param wo_id:
    :return:
    """
    item = strategy_dao.search_by_work_order_id(wo_id)
    if not item or not item.submit_config:
        return None, "当前工单无提交配置"
    return item.submit_config, "请求成功"


def search_programme(wo_id: int):
    """
    查询方案
    :param wo_id:
    :return:
    """
    item = strategy_dao.search_by_work_order_id(wo_id)
    if not item or not item.strategy_programme:
        return None, "当前工单无方案配置"
    return item.strategy_programme, "请求成功"


def search_programme_weight(wo_id: int):
    """
    查询权重
    :param wo_id:
    :return:
    """
    item = strategy_dao.search_by_work_order_id(wo_id)
    if not item or not item.strategy_weight:
        return None, "当前工单无权重配置"
    return item.strategy_weight, "请求成功"


def save_programme(wo_id, inner_data):
    """
    保存方案
    :return:
    """
    try:
        item = strategy_dao.search_by_work_order_id(wo_id)
        if item is None:
            objs = [ut.Strategy(work_order_id=wo_id, strategy_programme=inner_data)]
            strategy_dao.batch_insert(objs)
        else:
            strategy_dao.modify_programmes_by_work_order_id(wo_id=wo_id, programmes=inner_data)
    except Exception:
        traceback.print_exc()
        raise APIException(message="保存方案失败")
    return


def save_relation_config(wo_id, json_obj):
    """
    保存相关性配置
    :return:
    """
    try:
        item = strategy_dao.search_by_work_order_id(wo_id)
        if not item:
            objs = [ut.Strategy(work_order_id=wo_id, relation_config=json_obj)]
            strategy_dao.batch_insert(objs)
        else:
            strategy_dao.modify_relation_config_by_work_order_id(wo_id=wo_id, relation_config=json_obj)
    except Exception:
        traceback.print_exc()
        raise APIException(message="保存相关性配置失败")
    return


def save_programme_weight(wo_id, inner_data):
    """
    保存权重
    :return:
    """
    item = strategy_dao.search_by_work_order_id(wo_id)
    if item is None:
        objs = [ut.Strategy(work_order_id=wo_id, strategy_weight=inner_data)]
        strategy_dao.batch_insert(objs)
    else:
        strategy_dao.modify_by_work_order_id(wo_id=wo_id, strategy_weight_res=inner_data)
    return


def modify_result_by_id(work_order_id: int, datas: []):
    """
    通过id修改结果数据
    :return:
    """
    if len(datas) <= 0:
        return
    update_topics = []
    for i in datas:
        topic_id = i['id']
        item = ut.Topic.query.get(topic_id)
        if item:
            item.data = i['values']
            update_topics.append(item)
    topic_dao.batch_update_by_id(update_topics)
    return


def get_group_datas(cur_list: []):
    """
    生成每个分组的数据
    :param cur_list:
    :return:
    """
    if cur_list is None:
        return

    groups = []
    for i in cur_list:
        inner_l = i.get('options')
        group_index = i.get('group_index')
        if len(inner_l) <= 0:
            msg = "第", group_index, "组的题目为空"
            raise APIException(message=msg)
        base_option_count = inner_l[0].get('option_count')
        pre_size = base_option_count * len(inner_l)  # 会有多少个选项
        real_size = 0  # 真实有多少个选项
        indexes = []
        for j in inner_l:
            real_size += j.get('option_count')
            indexes.append(j.get('index'))
        if real_size != pre_size:
            msg = "分组错误，第" + str(i.get('group_index')) + "组存在题目选项个数不一致"
            raise APIException(message=msg)
        groups.append({'group_index': group_index, 'indexes': indexes, 'count': base_option_count})
    return groups


def get_group_datas_v2(cur_list: []):
    """
    生成每个分组的数据_v2
    :param cur_list:
    :return:
    """
    groups = []
    if cur_list is None:
        return groups
    for i in cur_list:
        inner_l = i.get('options')
        group_index = i.get('group_index')
        if len(inner_l) <= 0:
            msg = "第", group_index, "组的题目为空"
            raise APIException(message=msg)
        # base_option_count = len(inner_l[0].get('options'))
        indexes = dict()
        for j in inner_l:
            indexes[j.get('index')] = len(j.get('options'))
        groups.append({'group_index': group_index, 'indexes': indexes})
        # indexes = []
        # for j in inner_l:
        #     indexes.append(j.get('index'))
        # groups.append({'group_index': group_index, 'indexes': indexes, 'count': len(inner_l)})
    return groups


def relation_make_data(total_count: int, groups: [], relation_generate_type: str) -> pd.DataFrame:
    """
    生成相关性数据
    :return:
    """
    mkd_obj = make_relation_data.MakeRelationData(total_count, relation_generate_type, groups)
    return mkd_obj.generate_now()


def relation_analysis(total_df: pd.DataFrame, df: pd.DataFrame, groups: [], ext_obj,
                      query_params: v_submit.RelationView):
    """
    进行算法分析
    :param ext_obj: 标题和下标的映射
    :param total_df: 总的df结构
    :param df: 参与算法的数据
    :param groups: 分组信息 {'group_index': group_index, 'indexes': indexes, 'count': base_option_count}
    :param query_params: 请求参数
    :return:
    """
    result_pass = False
    # 计算mean_df
    mean_df = pd.DataFrame()
    for i in range(len(groups)):
        cur_group = groups[i]
        group_index = cur_group['group_index']  # 当前分组的df中的列名称
        cur_arr = cur_group['indexes']  # 当前分组的df中的列名称
        cur_arr_loc = [key for key, _ in cur_arr.items()]
        group_df = df.loc[:, cur_arr_loc]  # 当前分组的df
        row_mean = group_df.mean(axis=1)  # 这组df的均值
        row_mean_df = pd.DataFrame({group_index: row_mean})
        if mean_df.empty:
            mean_df = row_mean_df
        else:
            mean_df = mean_df.join(row_mean_df)
    # 进行信度分析，删除项后的alpha、 - 默认计算
    ra_res = reliability_analysis.do_analysis(df, mean_df, groups) if query_params.do_ra else {}
    # 进行效度分析 - 默认计算
    # va = validity_analysis.do_analysis(df) if len(df.columns) > 1 and query_params.do_va else {}
    validity_obj = validity_analysis.Validity(df, groups)
    va = validity_obj.do_analysis() if len(df.columns) > 1 and query_params.do_va else {}
    # 相关分析 一个二维表 - 需要勾选
    # ca = correlation_analysis.do_analysis(mean_df) if query_params.do_ca else {}
    correlation_obj = correlation_analysis.Correction(mean_df)
    ca = correlation_obj.calculate_p() if query_params.do_ca else []
    # 回归 - 勾选
    # regressive_res = regressive_analysis.do_analysis(mean_df, query_params.re_v, query_params.re_dv) \
    #     if len(df.columns) > 1 and query_params.do_re else {}
    regressive_res = []
    if len(df.columns) > 1 and query_params.do_re:
        obj = regressive_analysis.RegressiveAnalysis(mean_df, query_params.re_v, query_params.re_dv)
        regressive_res = obj.analysis()
    # 比例 - 默认计算 - 这里需要传总的df (查询索引和标题的映射)
    fa_obj = frequency_analysis.FrequencyAnalysis(total_df, ext_obj)
    fa = fa_obj.do_analysis_v2() if query_params.do_fa else {}
    # fa = frequency_analysis.do_analysis(total_df, ext_obj) if query_params.do_fa else {}
    # 差异性分析 - 默认展示（数据+状态） -不考虑是否通过
    # 中介、调节 - 待定
    res = {'reliability_analysis': ra_res, 'validity_analysis': va, 'correlation_analysis': ca,
           'regressive_analysis': regressive_res, 'frequency_analysis': fa}
    return res, result_pass


def relate_topic(work_order_id: int):
    """
    生成所有数据
    :param work_order_id: 工单id
    :return:
    """
    titles = []
    ext_obj = work_order_extend_dao.search_by_work_order_id(work_order_id)
    if ext_obj:
        # titles = json.loads(ext_obj.titles)
        titles = ext_obj.titles
    return titles


def relation_calculate(query_params: v_submit.RelationView):
    """
    仅计算相关性结果
    :param query_params: 查询参数
    :return:
    """
    work_order_id = query_params.work_order_id
    cur_list = query_params.correlation
    if not work_order_id or not cur_list:
        raise APIException(message="参数错误")
    item = work_order_dao.query_work_order_by_id(work_order_id)
    if not item:
        raise APIException(message="工单不存在")
    groups = get_group_datas_v2(cur_list)  # 查询分组数据
    # 1. 查询总数据 total_df
    base_datas = topic_dao.search_by_work_order_id(work_order_id)  # 基础数据
    if len(base_datas) <= 0:
        raise APIException(message="当前工单基础数据为空，无法分析数据")
    res_arr = []  # 需要参与计算的dataframe
    for i in range(len(base_datas)):
        data_obj = json.loads(base_datas[i].data)
        res_arr.append(data_obj)
    # 将二维数组转换为 DataFrame，并以索引的序号为行和列的名称
    total_df = pd.DataFrame(res_arr, index=range(len(res_arr)), columns=range(len(res_arr[0])))
    # 2. 查询相关的数据 relation_df
    col_indexes = []
    for g in groups:
        cur_indexes = g.get('indexes')
        for index, _ in cur_indexes.items():
            col_indexes.append(index)
    relation_df = total_df[col_indexes]
    # res = do_relation_analysis(groups, query_params, relation_df, total_df)
    res, is_pass, pass_status_dict, item_weight = do_analysis(groups, query_params, relation_df)
    # 5. 最后使用total_df的数据进行频数分析
    ext_obj = work_order_extend_dao.search_by_work_order_id(work_order_id)  # 查询所有的表头
    fa_obj = frequency_analysis.FrequencyAnalysis(total_df, ext_obj)
    fa = fa_obj.do_analysis_v2() if query_params.do_fa else {}
    res['frequency_analysis'] = fa
    pass_status_dict['frequency_analysis'] = True  # 频数默认通过
    res['pass_status'] = pass_status_dict  # 通过状态
    print('频数分析完成，进入保存答案状态')
    # 存储算法结果 - 直接保存res的json字段
    re_obj = relation_extend_dao.search_by_work_order_id(work_order_id)
    if not re_obj:  # 当前没有相关性数据
        insert_obj = [ut.RelationExtend(work_order_id=work_order_id, relation_res=res)]
        relation_extend_dao.batch_insert(insert_obj)
    else:
        relation_extend_dao.modify_by_work_order_id(work_order_id, res)
    return res


def generate_relation_key(work_order_id: int) -> str:
    """
    相关数据的redis_key
    :param work_order_id:
    :return:
    """
    key = 'relation:' + str(work_order_id) + ':make_data'
    return key


def relation_stop(work_order_id: int):
    """
    停止相关操作
    :param work_order_id:
    :return:
    """
    # 删除对应work_order_id的redis_key
    try:
        key = generate_relation_key(work_order_id)
        value = Redis.read(key)
        if not value:
            item = relation_extend_dao.search_by_work_order_id(work_order_id)
            return item.relation_res
        if value == RelationProcessType.STOPPING.value:
            raise APIException(message="任务正在停止中，请勿重复停止")
        if value == RelationProcessType.WRITING_DATA.value or value == RelationProcessType.PROCESSING.value:
            if value == RelationProcessType.PROCESSING.value:
                Redis.write(key, RelationProcessType.STOPPING.value, 100)
            # 已生成完成，保存数据中 或者直接停止
            can_return = 0
            while can_return < 30:  # 默认查询30秒
                can_return += 1
                time.sleep(1)  # 睡1秒
                inner_value = Redis.read(key)
                if not inner_value:  # 不存在则直接返回
                    return {}
                if inner_value == RelationProcessType.FINISHED.value:
                    item = relation_extend_dao.search_by_work_order_id(work_order_id)
                    return item.relation_res
        elif value == RelationProcessType.FINISHED.value:
            item = relation_extend_dao.search_by_work_order_id(work_order_id)
            return item.relation_res
        # if value == RelationProcessType.PROCESSING.value:  # 运行中则直接停止
        #     Redis.delete(key)
    except Exception as e:
        traceback.print_exc()
        raise APIException(message="停止失败")
    return "success"


def do_analysis(analysis_groups, params: v_submit.RelationView, r_df):
    """
    仅处理相关的内容
    :param analysis_groups: 分组
    :param params: 页面所有查询参数
    :param r_df:  相关的dataframe
    :return:
    """

    def do_relation(df: pd.DataFrame, relation_groups: [], params: v_submit.RelationView):
        """
        进行算法分析
        :param df: 参与算法的数据
        :param relation_groups: 分组信息 {'group_index': group_index, 'indexes': indexes, 'count': base_option_count}
        :param params: 请求参数
        :return:
        """
        cur_weight = 0
        result_pass = False
        relation_res = dict()
        pass_res = dict()  # 判断每一个数据是否pass
        relation_res['reliability_analysis'] = {}
        relation_res['validity_analysis'] = {}
        relation_res['correlation_analysis'] = []
        relation_res['regressive_analysis'] = []
        # 计算mean_df
        mean_df = pd.DataFrame()
        for relation_i in range(len(relation_groups)):
            cur_group = relation_groups[relation_i]
            group_index = cur_group['group_index']  # 当前分组的df中的列名称
            cur_arr = cur_group['indexes']  # 当前分组的df中的列名称
            cur_arr_loc = [key for key, _ in cur_arr.items()]
            group_df = df.loc[:, cur_arr_loc]  # 当前分组的df
            row_mean = group_df.mean(axis=1)  # 这组df的均值
            row_mean_df = pd.DataFrame({group_index: row_mean})
            if mean_df.empty:
                mean_df = row_mean_df
            else:
                mean_df = mean_df.join(row_mean_df)
        # 进行信度分析，删除项后的alpha、 - 默认计算
        if params.do_ra:
            ra_res, result_pass, re_score = reliability_analysis.do_analysis(df, mean_df, relation_groups)
        else:
            ra_res, result_pass, re_score = {}, False, 0
        cur_weight += re_score
        relation_res['reliability_analysis'] = ra_res
        pass_res['reliability_analysis'] = result_pass
        # 进行效度分析 - 默认计算
        if len(df.columns) > 1 and params.do_va:
            validity_obj = validity_analysis.Validity(df, relation_groups, params.kmo_min, params.kmo_max)
            obj = validity_obj.do_analysis()
            va = obj.get('res')
            vr_pass = obj.get('is_pass')
            va_score = obj.get('score')
        else:
            va, vr_pass, va_score = {}, False, 0
        cur_weight += va_score
        relation_res['validity_analysis'] = va
        pass_res['validity_analysis'] = vr_pass
        result_pass = vr_pass and result_pass
        # 相关分析 一个二维表 - 需要勾选
        correlation_obj = correlation_analysis.Correction(mean_df)
        ca = correlation_obj.calculate_p() if params.do_ca else []
        relation_res['correlation_analysis'] = ca
        pass_res['correlation_analysis'] = True  # 相关默认通过
        # 回归 - 勾选
        regressive_res = []
        if len(df.columns) > 1 and params.do_re:
            obj = regressive_analysis.RegressiveAnalysis(mean_df, params.re_v, params.re_dv, params.regressive_r)
            regressive_res, re_pass, regressive_score = obj.analysis()
            cur_weight += regressive_score
        else:  # 不进行回归操作则直接通过
            re_pass = True
        relation_res['regressive_analysis'] = regressive_res
        pass_res['regressive_analysis'] = re_pass
        result_pass = re_pass and result_pass
        # 差异性分析 - 默认展示（数据+状态） -不考虑是否通过
        # 中介、调节 - 待定
        return relation_res, result_pass, pass_res, cur_weight

    try:
        r_df = r_df.apply(pd.to_numeric, errors='coerce')  # 将所有数据转换为浮点数类型
        # 算法分析
        analysis_res, result_pass, status_dict, cur_weight = do_relation(r_df, analysis_groups, params)
    except Exception as do_analysis_error:
        traceback.print_exc()
        raise APIException(message="算法分析失败" + str(do_analysis_error))
    return analysis_res, result_pass, status_dict, cur_weight


def relation_cycle_check(query_params: v_submit.RelationView):
    """
    生成相关性数据及算法结果 - 循环校验 - 直到有合适的数据再返回
    :param query_params:
    :return:
    """

    def save_relation_datas(wo_id: int, inner_df: pd.DataFrame):
        """
        保存到相关性的数据库（基础数据+相关性数据）
        :return:
        """
        relation_arrs = []
        for _, row in inner_df.iterrows():
            r = np.array(row).tolist()
            relation_arrs.append(r)
            # data_str = json.dumps(r, ensure_ascii=False)
        index_arr = [h for h in inner_df.columns]  # 相关性数据在数据库中的位置
        index_str = json.dumps(index_arr, ensure_ascii=False)
        base_datas = topic_dao.search_by_work_order_id(wo_id)  # 基础数据
        if len(relation_arrs) <= 0:
            raise APIException(message="当前工单相关性数据为空，无法合并数据")
        if len(base_datas) <= 0:
            raise APIException(message="当前工单基础数据为空，无法合并数据")
        if len(relation_arrs) != len(base_datas):
            msg = "相关性数据和基础数据条数不一样，无法合并数据！基础数据共：" + str(
                len(relation_arrs)) + "条，相关性数据共：" + str(len(base_datas)) + "条"
            raise APIException(message=msg)
        relations = []
        res_arr = []  # 需要参与计算的dataframe
        for i in range(len(base_datas)):
            r_data_arr = relation_arrs[i]
            r_index_arr = index_arr
            r_dict = dict()
            for h in range(len(r_data_arr)):
                r_dict[r_index_arr[h]] = r_data_arr[h]
            obj = base_datas[i]
            data_obj = json.loads(obj.data)  # 将数据库的数据转化为json字符串
            for j in r_dict:
                if j > len(data_obj):
                    raise APIException(message="相关性数据索引超出基础数据索引范围，无法合并数据")
                if str(data_obj[j]) != '-3' and str(data_obj[j]) != '(跳过)':  # 不是-3和跳过才进行赋值
                    data_obj[j] = r_dict[j]
            res_arr.append(data_obj)
            ss = json.dumps(data_obj, ensure_ascii=False)
            relations.append(ut.Relation(relation_data=ss, relation_index=index_str, work_order_id=work_order_id))
        # 先删后插
        relation_dao.delete_result_by_word_order_id(work_order_id)
        relation_dao.batch_insert(relations)
        # 将二维数组转换为 DataFrame，并以索引的序号为行和列的名称
        res_df = pd.DataFrame(res_arr, index=range(len(res_arr)), columns=range(len(res_arr[0])))
        return res_df

    work_order_id = query_params.work_order_id
    cur_list = query_params.correlation
    if not work_order_id or not cur_list:
        raise APIException(message="参数错误")
    item = work_order_dao.query_work_order_by_id(work_order_id)
    if not item:
        raise APIException(message="工单不存在")
    # 通过工单id查询总条数
    total_count = int(item.total_size)
    if total_count <= 0:
        raise APIException(message="工单需要生成的总行数小于1,无法计算相关性")

    # 校验数据
    is_pass = False
    save_df = pd.DataFrame()  # 仅包含需要分析的列
    res = {}
    pass_status_dict = {}
    score = -9999999  # 设置所有算法的权重 信度：100分，效度：100分，相关：50分，回归：100分
    run_count = 0  # 执行次数，最多执行30次
    relation_redis_key = generate_relation_key(work_order_id)
    Redis.write(key=relation_redis_key, value=RelationProcessType.PROCESSING.value, expire=100)  # 最多生成100秒
    while not is_pass and RelationProcessType.PROCESSING.value == Redis.read(relation_redis_key):
        print(Redis.read(relation_redis_key))
        run_count += 1
        # 1. 生成数据
        try:
            groups = get_group_datas_v2(cur_list)  # 查询分组数据
            relation_generate_type = query_params.relation_generate_type
            relation_df = relation_make_data(total_count, groups, relation_generate_type)
        except Exception as e:
            traceback.print_exc()
            raise APIException(message="生成相关性数据失败" + str(e))
        # 2. 进行相关性分析
        res, is_pass, pass_status_dict, item_weight = do_analysis(groups, query_params, relation_df)
        print('第', run_count, '次的score是：', item_weight)
        if item_weight > score:  # 如果这一次的权重大于总体的权重，则保存数据
            print('======使用第', run_count, '次的数据，score是：', item_weight)
            score = item_weight
            # 这个df用来反向和存储，计算还是用relation_df
            save_df = relation_df
            # 3. 进行反向操作
            for i in cur_list:
                options = i.get('options')
                for j in options:
                    if j.get('is_reverse'):
                        cur_index = j.get('index')
                        options_count = len(j.get('options'))
                        cur_array = list(range(1, options_count + 1))
                        save_df[cur_index] = save_df[cur_index].apply(
                            lambda x: cur_array[-1] if x == 1 else cur_array[-x] if x in cur_array else x)

    print('操作完成，进入写数据状态')
    if Redis.read(relation_redis_key) == RelationProcessType.PROCESSING.value:
        Redis.write(key=relation_redis_key, value=RelationProcessType.WRITING_DATA.value, expire=100)  # 修改为写入状态
    # 4. 存储相关后的数据
    total_df = save_relation_datas(work_order_id, save_df)
    # 5. 最后使用total_df的数据进行频数分析
    ext_obj = work_order_extend_dao.search_by_work_order_id(work_order_id)  # 查询所有的表头
    fa_obj = frequency_analysis.FrequencyAnalysis(total_df, ext_obj)
    fa = fa_obj.do_analysis_v2() if query_params.do_fa else {}
    res['frequency_analysis'] = fa
    pass_status_dict['frequency_analysis'] = True  # 频数默认通过
    res['pass_status'] = pass_status_dict  # 通过状态
    print('频数分析完成，进入保存答案状态')
    # 存储算法结果 - 直接保存res的json字段
    re_obj = relation_extend_dao.search_by_work_order_id(work_order_id)
    if not re_obj:  # 当前没有相关性数据
        insert_obj = [ut.RelationExtend(work_order_id=work_order_id, relation_res=res)]
        relation_extend_dao.batch_insert(insert_obj)
    else:
        relation_extend_dao.modify_by_work_order_id(work_order_id, res)
    Redis.write(key=relation_redis_key, value=RelationProcessType.FINISHED.value, expire=300)  # 保存生成结果300秒
    return res


def relation(query_params: v_submit.RelationView):
    """
    生成相关性数据及算法结果
    :param query_params: 入参
    :return:
    """

    def save_relation_datas(wo_id: int, inner_df: pd.DataFrame):
        """
        保存到相关性的数据库（基础数据+相关性数据）
        :return:
        """
        relation_arrs = []
        for _, row in inner_df.iterrows():
            r = np.array(row).tolist()
            relation_arrs.append(r)
            # data_str = json.dumps(r, ensure_ascii=False)
        index_arr = [h for h in inner_df.columns]  # 相关性数据在数据库中的位置
        index_str = json.dumps(index_arr, ensure_ascii=False)
        base_datas = topic_dao.search_by_work_order_id(wo_id)  # 基础数据
        if len(relation_arrs) <= 0:
            raise APIException(message="当前工单相关性数据为空，无法合并数据")
        if len(base_datas) <= 0:
            raise APIException(message="当前工单基础数据为空，无法合并数据")
        if len(relation_arrs) != len(base_datas):
            msg = "相关性数据和基础数据条数不一样，无法合并数据！基础数据共：" + str(
                len(relation_arrs)) + "条，相关性数据共：" + str(len(base_datas)) + "条"
            raise APIException(message=msg)
        relations = []
        res_arr = []  # 需要参与计算的dataframe
        for i in range(len(base_datas)):
            r_data_arr = relation_arrs[i]
            r_index_arr = index_arr
            r_dict = dict()
            for h in range(len(r_data_arr)):
                r_dict[r_index_arr[h]] = r_data_arr[h]
            obj = base_datas[i]
            data_obj = json.loads(obj.data)  # 将数据库的数据转化为json字符串
            for j in r_dict:
                if j > len(data_obj):
                    raise APIException(message="相关性数据索引超出基础数据索引范围，无法合并数据")
                data_obj[j] = r_dict[j]
            res_arr.append(data_obj)
            ss = json.dumps(data_obj, ensure_ascii=False)
            relations.append(ut.Relation(relation_data=ss, relation_index=index_str, work_order_id=work_order_id))
        # 先删后插
        relation_dao.delete_result_by_word_order_id(work_order_id)
        relation_dao.batch_insert(relations)
        # 将二维数组转换为 DataFrame，并以索引的序号为行和列的名称
        res_df = pd.DataFrame(res_arr, index=range(len(res_arr)), columns=range(len(res_arr[0])))
        return res_df

    work_order_id = query_params.work_order_id
    cur_list = query_params.correlation
    if not work_order_id or not cur_list:
        raise APIException(message="参数错误")
    item = work_order_dao.query_work_order_by_id(work_order_id)
    if not item:
        raise APIException(message="工单不存在")
    # 通过工单id查询总条数
    total_count = int(item.total_size)
    if total_count <= 0:
        raise APIException(message="工单需要生成的总行数小于1,无法计算相关性")

    # 生成数据
    try:
        groups = get_group_datas_v2(cur_list)  # 查询分组数据
        relation_generate_type = query_params.relation_generate_type
        relation_df = relation_make_data(total_count, groups, relation_generate_type)
    except Exception as e:
        traceback.print_exc()
        raise APIException(message="生成相关性数据失败" + str(e))

    # 判断是否有反向，如果有反向，则进行反向操作
    revers_relation_df = relation_df  # 这个df用来反向和存储，计算还是用relation_df
    for i in cur_list:
        options = i.get('options')
        for j in options:
            if j.get('is_reverse'):
                cur_index = j.get('index')
                options_count = len(j.get('options'))
                cur_array = list(range(1, options_count + 1))
                revers_relation_df[cur_index] = revers_relation_df[cur_index].apply(
                    lambda x: cur_array[-1] if x == 1 else cur_array[-x] if x in cur_array else x)
    # 存储相关后的数据
    total_df = save_relation_datas(work_order_id, revers_relation_df)
    # 进行相关性分析
    res = do_relation_analysis(groups, query_params, relation_df, total_df)
    return res


def do_relation_analysis(groups, query_params, relation_df, total_df):
    """
    仅处理相关的内容
    :param groups: 分组
    :param query_params: 页面所有查询参数
    :param relation_df:  相关的dataframe
    :param total_df:  总的dataframe
    :return:
    """
    try:
        relation_df = relation_df.apply(pd.to_numeric, errors='coerce')  # 将所有数据转换为浮点数类型
        total_df = total_df.apply(pd.to_numeric, errors='coerce')  # 将所有数据转换为浮点数类型
        work_order_id = query_params.work_order_id  # 工单id
        ext_obj = work_order_extend_dao.search_by_work_order_id(work_order_id)
        # 算法分析
        # count = 5
        # res = {'reliability_analysis': {}, 'validity_analysis': {}, 'correlation_analysis': [],
        #        'regressive_analysis': [], 'frequency_analysis': []}
        # result_pass = True
        # while count > 0 and not result_pass:
        #     count -= 1
        #     res, result_pass = relation_analysis(total_df, relation_df, groups, ext_obj, query_params)
        res, result_pass = relation_analysis(total_df, relation_df, groups, ext_obj, query_params)
        # 存储算法结果 - 直接保存res的json字段
        re_obj = relation_extend_dao.search_by_work_order_id(work_order_id)
        if not re_obj:  # 当前没有相关性数据
            insert_obj = [ut.RelationExtend(work_order_id=work_order_id, relation_res=res)]
            relation_extend_dao.batch_insert(insert_obj)
        else:
            relation_extend_dao.modify_by_work_order_id(work_order_id, res)
    except Exception as e:
        traceback.print_exc()
        raise APIException(message="算法分析失败" + str(e))
    return res


def relation_analysis_result(work_order_id: int):
    """
    查询相关性分析结果
    :param work_order_id:
    :return:
    """
    item = work_order_dao.query_work_order_by_id(work_order_id)
    if item is None:
        return "工单不存在"
    re_item = relation_extend_dao.search_by_work_order_id(work_order_id)
    # ew_arr = []
    # for i in items:
    #     cru_arr = json.loads(i.relation_data)
    #     ew_arr.append(cru_arr)
    # df = pd.DataFrame(ew_arr)
    # # 算法分析
    # res = relation_analysis(df)
    if re_item:
        return re_item.confirmed_relation_res
    else:
        raise APIException("暂不存在已认可的相关性结果")


def get_frequency(work_order_id: int):
    """
    查询频数分析结果
    :param work_order_id:
    :return:
    """
    res = {'frequency_analysis': []}
    item = work_order_dao.query_work_order_by_id(id=work_order_id)
    if item is None:
        raise APIException(message="当前工单不存在", code=500)
    topics = topic_dao.search_by_work_order_id(work_order_id=work_order_id)
    if topics is None or len(topics) <= 0:
        # raise APIException(message="当前答案为空", code=500)
        return res
    topic_data_arr = []  # 需要参与计算的dataframe
    try:
        for i in range(len(topics)):
            obj = topics[i]
            data_obj = json.loads(obj.data)  # 将数据库的数据转化为json字符串
            topic_data_arr.append(data_obj)
        topic_data_df = pd.DataFrame(topic_data_arr, index=range(len(topic_data_arr)),
                                     columns=range(len(topic_data_arr[0])))
        ext_obj = work_order_extend_dao.search_by_work_order_id(work_order_id)
        # fa = frequency_analysis.do_analysis(topic_data_df, ext_obj) # 老版频数分析
        fa_obj = frequency_analysis.FrequencyAnalysis(topic_data_df, ext_obj)
        # fa = fa_obj.do_analysis_v2()
        fa = fa_obj.do_analysis_v3()
        res = {'frequency_analysis': fa}
        return res
    except Exception as e:
        traceback.print_exc()
        raise APIException(message="计算频数分析出错" + str(e))


def relation_confirm(work_order_id: int):
    """
    确认相关性数据到基础结果数据
    :param work_order_id:
    :return:
    """
    item = work_order_dao.query_work_order_by_id(work_order_id)
    if item is None:
        return "工单不存在，无法合并数据"
    relation_datas = relation_dao.search_by_work_order_id(work_order_id)  # 相关性数据
    base_datas = topic_dao.search_by_work_order_id(work_order_id)  # 基础数据
    if len(relation_datas) <= 0:
        return "当前工单相关性数据为空，无法合并数据"
    if len(relation_datas) <= 0:
        return "当前工单基础数据为空，无法合并数据"
    if len(relation_datas) != len(base_datas):
        return "相关性数据和基础数据条数不一样，无法合并数据！基础数据共：" + str(
            relation_datas) + "条，相关性数据共：" + str(base_datas) + "条"
    updates = []
    for i in range(len(base_datas)):
        r_obj = relation_datas[i]  # 相关性的对象
        obj = base_datas[i]  # 基础数据的对象
        # r_data_arr = json.loads(r_obj.relation_data)
        # r_index_arr = json.loads(r_obj.relation_index)
        # r_dict = dict()
        # for h in range(len(r_data_arr)):
        #     r_dict[r_index_arr[h]] = r_data_arr[h]
        # obj = base_datas[i]
        # data_obj = json.loads(obj.data)  # 将数据库的数据转化为json字符串
        # for j in r_dict:
        #     if j > len(data_obj):
        #         return "相关性数据索引超出基础数据索引范围，无法合并数据"
        #     data_obj[j] = r_dict[j]
        # ss = json.dumps(data_obj, ensure_ascii=False)
        obj.data = r_obj.relation_data  # 相关性对象的数据 赋值给 基础数据
        updates.append(obj)
    # 1。批量更新相关性数据到基础数据中
    topic_dao.batch_update_by_id(updates)
    # 2。确认分析结果到确认字段中
    relation_extend_dao.confirm_relation_res(work_order_id)
    return


def export_base_result(work_order_id: int, view_status: int):
    """
    导出基础数据结果
    :param work_order_id:
    :param view_status:
    :return:
    """
    item = work_order_dao.query_work_order_by_id(id=work_order_id)
    if not item:
        raise APIException(message="工单不存在，无法导出")
    topics = topic_dao.search_results_with_status(work_order_id, view_status)
    if not topics or len(topics) <= 0:
        raise APIException(message="导出数据为空")
    data_arr = []
    for i in topics:
        detail_arr = json.loads(i.data)
        data_arr.append(detail_arr)
    # TODO 查询work_order_extend 匹配出对应的标题信息
    woe_item = work_order_extend_dao.search_by_work_order_id(work_order_id)
    excel_title = []
    if woe_item and woe_item.titles:
        # 如果有标题，加上对应的标题信息
        for item in woe_item.titles:
            cur_obj = wjx_explain_v2.DataIndex(**item)
            excel_title.append(cur_obj.title)
    try:
        if excel_title:
            data_arr = [excel_title] + data_arr  # 将表头插入到第一行
        df = pd.DataFrame(data_arr)
        excel_data = io.BytesIO()
        with pd.ExcelWriter(excel_data, engine='xlsxwriter') as writer:
            df.to_excel(writer, index=False, header=False)
        excel_data.seek(0)
        res = excel_data.getvalue()
        excel_data.close()
    except Exception as e:
        print(e)
        raise APIException(message="导出数据出错")
    return res


def upload_base_result(work_order_id: int, file_data):
    """
    上传基础数据
    :param work_order_id:
    :param file_data:
    :return:
    """
    # 通过工单id查询数据
    cur_work_order = work_order_dao.query_work_order_by_id(id=work_order_id)
    if not cur_work_order:
        raise APIException(message="工单不存在")
    if not cur_work_order.link:
        raise APIException(message="工单不存在问卷链接")
    if cur_work_order.status >= 5:
        raise APIException(message="当前工单已完成或正在提交数据中，无法上传基础数据")
    # 读取excel文件数据
    try:
        stream = io.BytesIO(file_data.read())
        # 这是将文件转为流，在xlrd中打开
        book = openpyxl.load_workbook(stream)
        sheet1 = book.worksheets[0]
    except Exception as e:
        print(e)
        raise APIException(message="读取文件数据失败")
    n_rows = sheet1.max_row  # 获取该sheet中的有效行数
    n_cols = sheet1.max_column  # 获取该sheet中的有效列数
    # 对比链接的数据列数是不是一样的
    wo_extend = work_order_extend_dao.search_by_work_order_id(work_order_id=work_order_id)
    # title_arr = json.loads(wo_extend.titles)
    title_arr = wo_extend.titles
    if n_cols != len(title_arr):
        msg = '文件解析错误，列数不匹配。上传文件的总列数为' + str(n_cols) + '，需要的列数为' + str(len(title_arr)) + '。'
        raise APIException(message=msg)
    # 输出每一行的内容
    topics = []
    # 获取对应的数据
    ext_obj = work_order_extend_dao.search_by_work_order_id(work_order_id)
    # titles = json.loads(ext_obj.titles)
    titles = wo_extend.titles
    # 遍历每一行数据
    for index, row_tuple in enumerate(sheet1.iter_rows(values_only=True), start=0):
        # 在这里对每一行数据进行处理
        if index == 0:
            continue
        row = list(row_tuple)  # tuple 转list
        for cell in range(len(row)):
            title_obj = titles[cell]
            vvs = title_obj['valid_values']
            if not vvs:
                continue
            vvs = [str(i) for i in vvs]
            cell_value = str(row[cell]).split("^")[0]
            valid = str(cell_value) in vvs
            if not valid:
                msg = "上传数据错误，第" + str(index) + "行" + "，第" + str(cell + 1) + "列"
                raise APIException(message=msg)
            row[cell] = str(row[cell])
        json_str = json.dumps(row, ensure_ascii=False)
        topics.append(ut.Topic(data=json_str, work_order_id=work_order_id, submitted=0))
    # # 删除数据 - 写入数据库
    topic_dao.delete_result_by_word_order_id(work_order_id=work_order_id)
    topic_dao.batch_insert(topics)
    return
