import math
import time
from decimal import *
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
from utils.mylib_db import *
from utils.mylib_utils import *
from measurement.measurement_utils import *
from measurement.import_probe_data_into_db import *
from measurement.import_probe_data_to_store import *
from measurement.import_user_data_file import *
import multiprocessing

"""
//1. 参数保存到库中（预处理参数，数据分析参数）
//2. 添加任务表及状态（TaskType=1）
//3. 数据存入预处理后的数据库表
4. 统计学异常处理算法4个；
//5. 统一高度分辨率处理；
6. 统一时间分辨率处理；
7. 预处理所有参数跟数据headId关联？

8. 从临时获取最近一次数据；
9. 预处理后数据更新head表信息

预处理顺序为：
    1）时间一致性检查；
    2）统一高度分辨率；（完成）
    3）统一时间分辨率；（完成）
    4）极值检查；
    5）物理一致性检查；（完成）
    6）统计学检查；
    7）箱线图法检查。
    异常值处理包括剔除和设置为NAN（即python中的numpy.nan）两种

数据来源说明（四个模块通用：A预处理、B不确定度评定、C数据比对、D数据分析）：
0:数据库预处理数据  - 数据筛选：bt_data_head + bt_data                           - 结果数据存储：（B存库）bt_data_head + bt_data   （B不存库 +CD）bt_temporary_data_head + bt_temporary_data
1:数据库原始数据    - 数据筛选：bt_data_head + bt_data_l0                        - 结果数据存储：（AB存库）bt_data_head + bt_data  （AB不存库+CD）bt_temporary_data_head + bt_temporary_data
2:文件导入;         - 数据筛选：bt_temporary_data_head + bt_temporary_data       - 结果数据存储：bt_temporary_data_head + bt_temporary_data
3:最新处理的数据    - 数据筛选：根据任务表是否存库字段IsSaveIntoDb判断哪种情况（结果同0、2或1（预处理））  - 结果数据存储：（存库）bt_data_head + bt_data   （不存库）bt_temporary_data_head + bt_temporary_data

【适用范围】
预处理模块：   1，2
不确定度评定： 0，1，2，3 （3暂不考虑）
数据比对：     0，1，2
数据分析：     0，1，2
"""

"""
高度统一化默认插值条件：
温度、风速：线性插值
密度：对数插值
"""

EPSINON = 1e-6          #用于浮点数比较的差值
NoneStr = 'NoneNone'        # 常量（区别于数据库默认填充的None值）
NoneValue = -999999.99        # 常量（表示None值）

#数据库表
db_task_table = "sys_state_sync"
# db_parameter_table = "bt_system_parameter_config"
db_raw_l2_data_table = "bt_data_l2_raw"                 # 原始数据L2数据表
db_raw_l0_data_table = "bt_data_l0_raw"                 # 原始数据L0数据表

db_data_head_table = "bt_data_head"
db_raw_l0_precessed_table = "bt_data_l0"
db_raw_l2_precessed_table = "bt_data"
db_temporary_data_head_table = "bt_temporary_data_head"
db_temporary_data_table_l2 = "bt_temporary_data"
db_temporary_data_table_l0 = "bt_temporary_data_l0"

db_system_parameter_config_table = "bt_system_parameter_config"


def processProbeData(param):
    """
    1. 时间一致性检查：某个时间点没有温度值，只有密度值，处理的结果是：密度值全部为异常值。（多文件处理）
    2. 物理一致性检查：
    3. 极值检查：
    4. 箱线图法检查：
    5. 高度分辨率统一性：（完成）
    6. 统计学算法：（3 sigma准则，后面3个）
    7. 时间分辨率统一性：
    """
    print("进入数据预处理过程..")
    print("参数情况：")
    print(param)

    #参数处理
    task_id = param.get('task_id') if param.get('task_id') != None else get_current_timestamp()                         #TaskId默认为当前时间
    # data_head_id = param.get('data_id')

    # 筛选条件（数据查询参数）
    data_source = int(param.get('data_source')) if param.get('data_source') is not None else 0                          # 数据来源（枚举值）：0:数据库预处理数据，1:数据库原始数据 2:文件导入;3:最新处理的数据
    station_device_pair_list = param.get('station_device_pair_list') if param.get('station_device_pair_list') is not None else ""       # 站点设备对字符串，格式"1-2", 或者 "1-2,1-3,2-4" (某些模块传多组参数)
    station_id = int(param.get('station_id')) if param.get('station_id') is not None else 0                             # 站点编号
    device_id = int(param.get('device_id')) if param.get('device_id') is not None else 0                                # 设备编号
    # value_type_str = param.get('value_type') if param.get('value_type') is not None else ""                             # 参量类型，格式："0" 或 "0,1,2,3"，其中0:温度；1:密度；2:纬向风速; 3:径向风速；（2309调整：改成内部转换，不接受界面输入）
    data_type = param.get('data_type') if param.get('data_type') is not None else ""                                    # 数据类型，格式"TSVP" 或 "TSVP, DUSM"等（2309调整为：T/P/W1/W2/D/N/F）
    time_start = param.get('time_start') if param.get('time_start') is not None else ""                                 # 时间范围的起始时间，格式：YYYY-mm-dd HH:MM:SS
    time_end = param.get('time_end') if param.get('time_end') is not None else ""                                       # 时间范围的结束时间，格式：YYYY-mm-dd HH:MM:SS
    height_start = param.get('height_start') if param.get('height_start') is not None else ""                           # 高度区间起始值（暂时不使用）
    height_end = param.get('height_end') if param.get('height_end') is not None else ""                                 # 高度区间结束值（暂时不使用）

    # 文件导入路径（data_source=2时生效）
    import_file_path = param.get('import_file_path') if param.get('import_file_path') is not None else ""               # 文件导入支持参数：单个文件路径，多个文件路径，单个目录，格式"path1" 或 "path1;path2;path3"或"directory"（当data_source选择了2文件导入后，该字段必填）
    process_num = int(param.get('process_num')) if param.get('process_num') is not None else 4                          # 默认4个进程同时读入文件
    user_id = int(param.get('user_id')) if param.get('user_id') is not None else -1                                     # 当前登录用户

    # 指定数据文件
    head_id_list = param.get('head_id_list') if param.get('head_id_list') is not None else ""                           # 默认值为：""，格式为 2,3,4,5
    data_level = int(param.get('data_level')) if param.get('data_level') is not None else 1                             # 默认为1，即L2数据

    height_standard_set = int(param.get('height_standard_set')) if param.get('height_standard_set') is not None else 0                   # 高度统一化处理（完成）
    time_standard_set = int(param.get('time_standard_set')) if param.get('time_standard_set') is not None else 0                         # 时间统一化处理 TODO 1
    extreme_value_check = int(param.get('extreme_value_check')) if param.get('extreme_value_check') is not None else 0                   # 极值检查（完成）
    physics_consistency_check = int(param.get('physics_consistency_check')) if param.get('physics_consistency_check') is not None else 0 # 物理一致性检查（完成）
    time_consistency_check = int(param.get('time_consistency_check')) if param.get('time_consistency_check') is not None else 0          # 时间一致性检查（未完成）TODO 2（有测试数据）
    statistics_check = int(param.get('statistics_check')) if param.get('statistics_check') is not None else 0                            # 统计学检查 TODO
    boxplot_check = int(param.get('boxplot_check')) if param.get('boxplot_check') is not None else 0                                     # 箱线图法检查（多组数据，至少50组）TODO
    save_flag = int(param.get('save_flag')) if param.get('save_flag') is not None else 1                                                 # 预处理结果存入数据库（完成）（默认存库）

    #以下四个参数数据预处理同数据分析界面功能
    height_reslution = int(param.get('height_reslution')) if param.get('height_reslution') is not None else 1                            # 统一高度分辨率，数据为高度间隔值 TODO
    height_reslution_unit = int(param.get('height_reslution_unit')) if param.get('height_reslution_unit') is not None else 0             # 统一高度分辨率单位，0:km(默认),1:m
    time_reslution = int(param.get('time_reslution')) if param.get('time_reslution') is not None else 1                                  # 统一时间分辨率，数据为时间间隔值1 TODO 1
    time_reslution_unit = int(param.get('time_reslution_unit')) if param.get('time_reslution_unit') is not None else 0                   # 统一时间分辨率单位，0:小时/1:日/2:周/3:月/4:季度/5:年/6:时间区间等

    filter_rule = int(param.get('filter_rule')) if param.get('filter_rule') is not None else 0                   #统计判断准则（枚举值）：0:无;1:拉伊达准则;2:格拉布斯准则;3:狄克逊准则;4:稳健数据处理方法 TODO 3
    exception_rule = int(param.get('exception_rule')) if param.get('exception_rule') is not None else 0          #异常值处理（枚举值）：0:剔除;1:设为缺省值;2:保留

    temperature_max = param.get('temperature_max') if param.get('temperature_max') is not None else 0.0     #温度范围最大值（浮点数）
    temperature_min = param.get('temperature_min') if param.get('temperature_min') is not None else 0.0     #温度范围最小值（浮点数）
    density_max = param.get('density_max') if param.get('density_max') is not None else 0.0                 #密度范围最大值（浮点数）
    density_min = param.get('density_min') if param.get('density_min') is not None else 0.0                 #密度范围最小值（浮点数）
    wind_speed_max = param.get('wind_speed_max') if param.get('wind_speed_max') is not None else 0.0        #风速范围最大值（浮点数）
    wind_speed_min = param.get('wind_speed_min') if param.get('wind_speed_min') is not None else 0.0        #风速范围最小值（浮点数）

    # 新老探测体制临时转换
    data_type = getMultiDataTypeStr2309(data_type)
    value_type_str = getMultiValutTypeStrByDataType2309(data_type)

    conf = {
        'TaskId': task_id,
        # 任务种类：0:数据录入；1:数据预处理；2:不确定度评定;3:数据分析数据处理;
        # 5:一键评定;6:评定结果查询（文件上传）；7:不确定度校验（文件上传）；8:数据比对不同设备；9:数据比对不同时间
        "TaskType": 1,

        'Type': 0,              # 参数表类型字段标识
        'HeightStandardSet': height_standard_set,
        'TimeStandardSet': time_standard_set,
        'ExtremeValueCheck': extreme_value_check,
        'PhysicsConsistencyCheck': physics_consistency_check,
        'TimeConsistencyCheck': time_consistency_check,
        'StatisticsCheck': statistics_check,
        'BoxplotCheck': boxplot_check,
        'SaveFlag': save_flag,
        'HeightReslution': height_reslution,
        'HeightReslutionUnit': height_reslution_unit,
        'TimeReslution': time_reslution,
        'TimeReslutionUnit': time_reslution_unit,
        'TimeReslutionUnitOption': ['小时', '日', '周', '月', '季度', '年', '全时段'],
        'StatisticMethod': "0",           # 时间统一分辨率默认为0平均值计算
        'FilterRule': filter_rule,
        'FilterRuleOption': ['', '拉伊达准则', '格拉布斯准则', '狄克逊准则', '稳健数据处理方法'],
        'ExceptionRule': exception_rule,

        'TemperatureMax': Decimal(temperature_max),
        'TemperatureMin': Decimal(temperature_min),
        'DensityMax': Decimal(density_max),
        'DensityMin': Decimal(density_min),
        'WindSpeedMax': Decimal(wind_speed_max),
        'WindSpeedMin': Decimal(wind_speed_min),
        'CheckTemperatureValue': 0 if (abs(temperature_max - 0.0) < EPSINON and abs(temperature_min - 0.0) < EPSINON) else 1,
        'CheckDensityValue': 0 if (abs(density_max - 0.0) < EPSINON and abs(density_min - 0.0) < EPSINON) else 1,
        'CheckWindSpeedValue': 0 if (abs(wind_speed_max - 0.0) < EPSINON and abs(wind_speed_min - 0.0) < EPSINON) else 1,
        # 'ImputationMethod':
        # 'FitMethod':
        # 数据源
        'DataSource': data_source,

        # 支持指定headIdList
        'HeadIdList': head_id_list,

        'StationDevicePairList': station_device_pair_list.split(',') if station_device_pair_list != "" else [],
        'StationId': station_id,
        'DeviceId': device_id,
        'ValueType': value_type_str,
        'DataType': data_type,
        'TimeStart': time_start,
        'TimeEnd': time_end,
        'HeightStart': height_start,
        'HeightEnd': height_end,

        'DataLevel': data_level,

        # 用户文件导入相关
        'ImportFilePath': import_file_path,
        "ProcessNum": process_num,
        "UserId": user_id,

        'DbHeadTable': "",          # 数据筛选源Head表
        'DbTable': "",              # 数据筛选源Data表
        'DbSaveHeadTable': "",      # 数据存储Head表
        'DbSaveTable': "",          # 数据存储Data表
    }

    print("data_level：")
    print(data_level)
    # return 0

    # 不支持对预处理后的数据进行预处理！（但是代码里面已经支持该情况，需测试后启用）
    if conf['DataSource'] == 0:
        return {"code": -1, "msg": "不支持对预处理后的数据进行预处理！"}

    # 对最近一次录入的数据进行预处理！（系统已支持）
    if conf['DataSource'] == 3:
        # print("不支持对最近一次录入的数据进行预处理！（后续可增加此功能）")
        # return {"code": -1, "msg": "不支持对最近一次录入的数据进行预处理！"}
        print("对最近一次录入的数据进行处理..")

    if conf['DataSource'] == 2 and conf['ImportFilePath'] == "":
        return {"code": -1, "msg": "数据源选择了“文件导入”，需指定导入文件或文件夹路径！"}

    # 参数检查：极值检查参数
    if conf['ExtremeValueCheck'] == 1:
        msg = ""
        # 温度最大值不小于0，最小值不小于 -500
        if conf['CheckTemperatureValue'] == 1 and \
                ((temperature_max - 0.0 < EPSINON) or (temperature_min + 500.0 < EPSINON)):
            msg += "极值检查参数错误：未添加温度最大值最小值！"
        # 密度最大值不小于0，最小值不小于-1
        if conf['CheckDensityValue'] == 1 and \
                ((density_max - 0.0 < EPSINON) or (density_min + 1.0 < EPSINON)):
            msg += "极值检查参数错误：未添加密度最大值最小值！"
        # 温度最大值不小于0，最小值不小于 -500
        if conf['CheckWindSpeedValue'] == 1 and \
                ((wind_speed_max - 0.0 < EPSINON) or (wind_speed_min + 500.0 < EPSINON)):
            msg += "极值检查参数错误：未添加风速最大值最小值！"

        if msg != "":
            return {"code": -1, "msg": msg}

    # 参数检查：统计学检测参数检查
    if conf['StatisticsCheck'] == 1 and (conf['FilterRule'] == 0 or conf['FilterRule'] >4):
        print("统计学检查参数错误，未选择‘统计判断准则’！")
        return {"code": -1, "msg": "统计学检查参数错误，未选择‘统计判断准则’！"}

    print(conf)
    # return 0
    # 参数检查通过，已获取数据head列表
    # 保存参数
    param_conf_id = savePreprocessedDataIntoDB(conf)
    conf["ParameterSetting"] = param_conf_id
    print(param_conf_id)

    # 指定数据筛选库表和存储库表
    # 指定保存数据的库表（预处理数据对象是L2,如果存库，进正式表，如果不存，临时表）
    # 针对其他场景，进行修改（导入文件，存临时表）
    # 数据来源（枚举值）：0:数据库预处理数据，1:数据库原始数据
    # 保存数据
    if conf['SaveFlag'] == 1:
        conf['DbSaveHeadTable'] = db_data_head_table
        conf['DbSaveTable'] = db_raw_l2_precessed_table
    else:
        conf['DbSaveHeadTable'] = db_temporary_data_head_table
        conf['DbSaveTable'] = db_temporary_data_table_l2

    # 获取数据（conf['DataSource'] == 3情况，需根据任务表是否存库字段确定）
    # 0:数据库预处理数据
    if conf['DataSource'] == 0:
        conf['DbHeadTable'] = db_data_head_table
        conf['DbTable'] = db_raw_l2_precessed_table
    # 1:数据库原始数据
    elif conf['DataSource'] == 1:
        conf['DbHeadTable'] = db_data_head_table
        conf['DbTable'] = db_raw_l2_data_table
    # 2:文件导入;
    elif conf['DataSource'] == 2:
        #  2:文件导入：临时表，结果不存库
        conf['DbHeadTable'] = db_temporary_data_head_table
        conf['DbTable'] = db_temporary_data_table_l2
        conf['DbSaveHeadTable'] = db_temporary_data_head_table
        conf['DbSaveTable'] = db_temporary_data_table_l2
    # elif conf['DataSource'] == 3:  # 查任务表后进行确认数据存储源数据库表
    # 3:最新处理的数据
    else:
        conf['DbHeadTable'] = db_temporary_data_head_table
        conf['DbTable'] = db_temporary_data_table_l2

    # 设置任务开始
    # 设置本批次交互任务的ID
    # TASK_ID = data_batch_id if (data_batch_id != "" and data_batch_id != -1) else conf['TaskId']
    TASK_ID = conf['TaskId']
    if TASK_ID != "":
        ret = setTaskStatus(TASK_ID, conf['TaskType'], conf['SaveFlag'], param_conf_id, conf['DataSource'])
        if ret != "OK":
            print("setTaskStatus: %s" % ret)
            return ret

    # 所有业务逻辑放在子进程中执行
    processes = []
    process = multiprocessing.Process(target=runPreprocessTaskFunc, args=(conf, TASK_ID))  # 正式运行用（捕捉错误，设置任务完成状态）
    # process = multiprocessing.Process(target=runPreprocessTask, args=(conf, TASK_ID))   #开发调试用，暴露出错误代码行号
    processes.append(process)
    process.start()

    return 0


def runPreprocessTaskFunc(conf, TASK_ID):
    try:
        start_time = time.time()    # 计时开始
        ret = runPreprocessTask(conf, TASK_ID)
        print(ret)
        end_time = time.time()      # 计时结束
        #计算代码运行时间(ms)
        run_time = (end_time - start_time) * 1000
        #print('代码运行时间为{:.2f} s'.format(run_time))
        print('代码运行时间为%d ms' % run_time)

        # if ret != 0:
        print("设置任务状态为 完成状态 ！")
        setTaskStatusFinish(conf)
    except Exception as e:
        print("runPreprocessTaskFunc_Error: %s" % e)
        setTaskStatusFinish(conf)


def runPreprocessTask(conf, TASK_ID):
    ######################################
    # 保存数据
    if conf['DataSource'] == 2:
        print("开始导入用户数据..")
        print("先清除同类任务的旧数据..")
        delete_pre_data_by_batch_id(conf['DbTable'], conf['TaskType'])
        ret = callReadFileProcessAndWaitFinish(conf, conf['ImportFilePath'], conf['ProcessNum'], TASK_ID)
        print("读取用户文件结束！")
        print("进入筛选文件数据阶段..")

    ######################################
    # 获取数据
    print("开始筛选文件数据阶段..")
    head_id_list = conf['HeadIdList']

    # 按查询条件获取探测数据（head_id_list）
    data_head_id_list = []
    data_batch_id = -1

    # 查询流程：1）优先检查是否提供了headid，2）其次根据datasource判断是按条件检索或检索最近一次任务的结果
    if head_id_list != "":
        # 如果参数中指定了head_id_list
        print("参数指定了Head_id_list!优先处理..")
        # data_head_id_list = []
        head_id_list_arr = head_id_list.split(",")
        if len(head_id_list_arr) > 0:
            for head_id in head_id_list_arr:
                print(head_id)
                ret = getHeadIdInfoByHeadId(int(head_id))
                if ret != "":
                    data_head_id_list.append(ret)
        else:
            print("参数head_id_list格式错误！")
            return {"code": -1, "msg": "提供的参数head_id_list格式错误！"}

    elif conf['DataSource'] == 0 or conf['DataSource'] == 1:
        # 对数据源 0:数据库预处理数据，1:数据库原始数据
        print("根据数据源，按条件筛选head_id_list..")
        if conf['ValueType'] == "":
            print("参量类型ValueType不能为空")
            return {"code": -1, "msg": "当前选择的数据源，参量类型参数不能为空！"}

        # 同时支持传station-device 对和station_id / device_id单独赋值情况
        if len(conf['StationDevicePairList'])>0:
            for pair in conf['StationDevicePairList']:
                arr = pair.split('-')
                conf['StationId'] = int(arr[0])
                conf['DeviceId'] = int(arr[1])
                # data_head_id_list_part = queryDataByCondition(conf, 0)
                data_head_id_list_part = queryDataByCondition2309(conf, 0)  # 针对202309数据分表存储情况查询（修改了conf['DbHeadTable']值）
                if len(data_head_id_list_part) > 0:
                    data_head_id_list.extend(data_head_id_list_part)
        else:
            # data_head_id_list = queryDataByCondition(conf, 0)
            data_head_id_list = queryDataByCondition2309(conf, 0)   # 针对202309数据分表存储情况查询（修改了conf['DbHeadTable']值）

        # 存库的目标head表和源head表一致（源head表在上面有更新）
        if conf['SaveFlag'] == 1:
            conf['DbSaveHeadTable'] = conf['DbHeadTable']

        if len(data_head_id_list) == 0:
            print("未检索到探测数据，请重新设置检索条件！")
            return {"code": -1, "msg": "未检索到探测数据，请重新设置检索条件！"}

    elif conf['DataSource'] == 3:
        # 对数据源 3:最新处理的数据。获取最近一次的录入数据，对此进行数据预处理
        # 根据任务确定数据库表筛选数据情况
        print("取最新一次任务处理结束的数据..")
        data_batch_info = getLatestDataBatchId(0)
        if data_batch_info == "":
            print("错误：未找到最近录入的数据")
            return {"code": -1, "msg": "未找到最近录入的数据！"}
        data_batch_id = data_batch_info['KeyName']
        # 如果字段“KeyNameUpdate”有值，说明该taskId已被后续过程（预处理、评定等）修改
        # 修改后的值为 “KeyNameUpdate”的值
        if data_batch_info['KeyNameUpdate'] is not None and data_batch_info['KeyNameUpdate'] != '':
            data_batch_id = data_batch_info['KeyNameUpdate']

        # 此字段用于设置任务表的KeyNameUpdate字段，便于再次获取最近一次任务的数据
        # （因预处理、评定会更新最近一次的batchId，使用KeyNameUpdate字段进行数据跟踪）
        conf['LatestTaskId'] = data_batch_info['Id']
        print("data_batch_id: %s" % data_batch_id)

        # 确定数据录入的数据存储数据库源表（TODO：校验？）
        if data_batch_info['IsSaveIntoDb'] == 1:
            conf['DbHeadTable'] = db_data_head_table
            conf['DbTable'] = db_raw_l2_data_table
        elif data_batch_info['IsSaveIntoDb'] == 0:
            conf['DbHeadTable'] = db_temporary_data_head_table
            conf['DbTable'] = db_temporary_data_table_l2

        # data_head_id_list = getHeadIdByBatchId(data_batch_id, conf['DataLevel'])
        data_head_id_list = getHeadIdByBatchIdV2(data_batch_id, conf['DataLevel'], 0)
        if len(data_head_id_list) == 0:
            print("错误：未找到最近录入数据的head_id")
            return {"code": -1, "msg": "未找到最近录入数据的head_id！"}

    elif conf['DataSource'] == 2:
        # 对数据源 3:最新处理的数据。获取最近一次的录入数据，对此进行数据预处理
        print("获取刚刚导入的用户文件数据..")
        data_batch_id = TASK_ID
        print("data_batch_id: %s" % data_batch_id)

        data_head_id_list = getHeadIdByBatchIdV3(conf, data_batch_id, conf['DataLevel'], 0)
        if len(data_head_id_list) == 0:
            print("错误：未找到最近录入数据的head_id")
            return {"code": -1, "msg": "未找到最近录入数据的head_id！"}
    else:
        print("参数错误！")
        return {"code": -1, "msg": "参数错误，请检查传入的参数！"}

    print("data_head_id_list: ")
    print(data_head_id_list)
    print("检索到HearID数量：%d" % len(data_head_id_list))
    if len(data_head_id_list) == 0:
        print("未获取到探测数据！")
        return {"code": -1, "msg": "未获取到探测数据"}

    time.sleep(2)
    # return -1

    # 参数检查：依赖于数据条数的检查
    # 如：时间统一分辨率（至少需要2个及以上文件）
    # 数据来源（枚举值）：0:数据库预处理数据，1:数据库原始数据 3:最新处理的数据 （2:文件导入 不检查数据量）
    if conf['DataSource'] == 0 or conf['DataSource'] == 1 or conf['DataSource'] == 3:
        if conf['TimeStandardSet'] == 1 and len(data_head_id_list) < 2:
            print("时间统一分辨率条件错误：数据检查不足2组！")
            # return {"code": -1, "msg": "时间统一分辨条件错误：数据检查不足2组！"}
            # 不返回错误，直接设置不进行时间统一分辨率检查
            conf['TimeStandardSet'] = 0
        if conf['TimeConsistencyCheck'] == 1 and len(data_head_id_list) < 2:
            print("时间一致性检查参数错误：数据检查不足2组！")
            # return {"code": -1, "msg": "时间一致性检查参数错误：数据检查不足2组！"}
            # 不返回错误，直接设置不进行时间一致性检查
            conf['TimeConsistencyCheck'] = 0
        if conf['BoxplotCheck'] == 1 and len(data_head_id_list) < 30:
            print("箱线图法检查参数错误：数据检查不足30组！")
            # return {"code": -1, "msg": "箱线图法检查参数错误：数据检查不足30组！"}
            # 不返回错误，直接设置不进行箱线图法检查
            conf['BoxplotCheck'] = 0
        if conf['HeightReslution'] > 1:
            print("错误：高度分辨率统一功能不支持1km以上的分辨率（待完善）！")
        if conf['StatisticsCheck'] == 1:
            # 0:无;1:拉伊达准则;2:格拉布斯准则;3:狄克逊准则;4:稳健数据处理方法
            if conf['FilterRule'] == 1 and len(data_head_id_list) < 50:
                print("拉伊达准则检查参数错误：数据检查不足50组！")
                # return {"code": -1, "msg": "拉伊达准则检查参数错误：数据检查不足50组！"}
                # 不返回错误，直接设置不进行统计学方法检查
                conf['StatisticsCheck'] = 0
            if conf['FilterRule'] == 2 and len(data_head_id_list) < 30:
                print("格拉布斯准则检查参数错误：数据检查不足30组！")
                # return {"code": -1, "msg": "格拉布斯准则检查参数错误：数据检查不足30组！"}
                # 不返回错误，直接设置不进行统计学方法检查
                conf['StatisticsCheck'] = 0
            if conf['FilterRule'] == 3 and len(data_head_id_list) < 30:    #？？
                print("狄克逊准则检查参数错误：数据检查不足30组！")
                # return {"code": -1, "msg": "狄克逊准则检查参数错误：数据检查不足30组！"}
                # 不返回错误，直接设置不进行统计学方法检查
                conf['StatisticsCheck'] = 0
            if conf['FilterRule'] == 4 and len(data_head_id_list) < 10:
                print("稳健数据处理方法检查参数错误：数据检查不足10组！")
                # return {"code": -1, "msg": "稳健数据处理方法检查参数错误：数据检查不足10组！"}
                # 不返回错误，直接设置不进行统计学方法检查
                conf['StatisticsCheck'] = 0

    # 如果保存到临时表，清空临时表
    if conf['SaveFlag'] == 0:
        print("清空临时表数据，准备本次数据保存..")
        # 清除临时表数据，参数和setTaskStatus中任务类型一致
        clear_db_temporary_table_and_keep_last_same_type_task(1)

    preprocessProbeData(data_head_id_list, conf, TASK_ID)

    return 0


def preprocessProbeData(data_head_id_list, conf, TASK_ID):
    """
    进行数据预处理
    :return:
    """
    #保存预处理后数据的表头
    # db_cols = ["HeadId", "ValueType", "Label", "Height", "Value", "IsAbnormal"]
    # db_cols = ["HeadId", "BatchId", "ValueType", "Label", "Height", "Value", "IsAbnormal", "ParameterSetting"]

    conf['TaskId'] = TASK_ID

    # print("=======preprocessProbeData======:", conf)

    # 计数本次插入数据库的所有数据条数
    abnormal_count = 0
    data_list_json = {}    # 缓存原始和预处理中间数据，格式：{"head_id1":[], "head_id2":[], "head_id3":[], }

    data_level = conf['DataLevel']
    """
    预处理步骤
    1）时间一致性检查；
    2）统一高度分辨率；（完成）
    3）统一时间分辨率；（完成）
    4）极值检查；（完成）
    5）物理一致性检查；（完成）
    6）统计学检查；
    7）箱线图法检查。
    """

    # 预处理参数标记（用户简化参数描述，存入字段）
    # 执行了哪一步，将对应的变量值保存到数据库
    s1_desc_time_consist_check = "时间一致性检查"
    s2_desc_height_resolution = "统一高度分辨率：%d km" % conf['HeightReslution']
    s3_desc_time_resolution = "统一时间分辨率：%d %s" % (conf['TimeReslution'], conf['TimeReslutionUnitOption'][conf['TimeReslutionUnit']])
    s4_desc_extreme_value_check = "极值检查：温度%.2f K-%.2f K，密度%.8f kg/m3-%.8f kg/m3，风速%.3f m/s-%.3f m/s" % \
                                  (conf['TemperatureMin'], conf['TemperatureMax'], conf['DensityMin'],
                                   conf['DensityMax'], conf['WindSpeedMin'], conf['WindSpeedMax'], )
    s5_desc_physics_consist_check = "物理一致性检查"
    s6_desc_statistics_check = "统计学检查: %s" % conf['FilterRuleOption'][conf['FilterRule']]
    s7_desc_boxplot_check = "箱线图法检查"
    # 记录上述7步过程执行了哪一步
    pre_process_parameter_list = []

    # 步骤1：时间一致性检查(对文件进行) TODO
    # 检查对象：使用TUSM温度数据，提出对应不上的DUSM类型密度
    if conf['TimeConsistencyCheck'] == 1:
        print("TODO: 时间一致性检查")

        # 记录参数
        pre_process_parameter_list.append(s1_desc_time_consist_check)

        print("步骤1：时间一致性检查 处理完成..")
        time.sleep(2)

    # [总]获取所有预处理数据，保存到data_list_json
    # 根据head_id，测量数据类型value_type，获取原始数据
    # for (head_id, value_type, start_time, end_time) in data_head_id_list:
    for (head_id, value_type, start_time, end_time, station_id, device_id, file_name, file_store_path) in data_head_id_list:
        print("==================================")
        print("head_id = %d, value_type = %s" % (head_id, value_type))
        # raw_data = getRawDataByHeadId(head_id, data_level)          # data_type指定从L0或L2原始数据表中获取数据
        # raw_data = getRawDataByHeadIdInJsonObjectList(conf, head_id, data_level)        # data_type指定从L0或L2原始数据表中获取数据

        # 获取L2文件路径，读取文件中探测数据
        s_type = getDataSystemTypeByL2FileName(file_name)
        raw_data = parsingRadarProbeDataFromRawFile2309(file_store_path, file_name, head_id, TASK_ID, 1)

        if raw_data == "":
            print("错误：未找到最近录入的原始数据")
            # return {"code": -1, "msg": "未找到最近录入的原始数据！"}
            continue

        # print(raw_data)
        print("raw_data:原始数据条数: %d" % len(raw_data))
        # print(raw_data)
        # return
        #############################################
        # print(raw_data)
        # return 0
        # {"head_id1":[], "head_id2":[], "head_id3":[], }
        data_list_json["%s_%s" % (s_type, head_id)] = raw_data    #TODO: 需修改为使用systype + head_id标识

    if len(data_list_json.keys()) == 0:
        print('未找到原始探测数据！')
        return -1

    print("原始数据读取完成，开始进行预处理...")
    # 步骤2：统一高度分辨率
    # if height_standard_set == 1:
    if conf['HeightStandardSet'] == 1:
        print("预处理：对单个L2文件进行高度统一化处理..")
        data_list_json_tmp = {}
        # 实现不同高度分辨率
        for key in data_list_json.keys():
            # raw_data = heightNormalizationByStep(raw_data, 1)
            raw_data = data_list_json[key]
            raw_data = heightNormalizationByStep(raw_data, conf['HeightReslution'])   # 不查库，不需修改
            print("高度统一化后的数据：%d" % len(raw_data))
            data_list_json_tmp[key] = raw_data

        data_list_json = data_list_json_tmp
        print(data_list_json)
        print("高度统一分辨率处理完成！数据head_id个数：%d" % len(data_list_json))
        # 记录参数
        pre_process_parameter_list.append(s2_desc_height_resolution)

        # print(data_list_json)
        print("步骤2：统一高度分辨率 处理完成..")
        time.sleep(2)

        # 检查算法是否请求停止（前台界面用户点击“暂停”按钮的响应）
        if isTaskStoped(TASK_ID, conf['TaskType']) == 1:
            return 0

    # 步骤3：时间分辨率统一
    # 统一时间分辨率单位，0:小时/1:日/2:周/3:月/4:季度/5:年/6:时间区间等
    if conf['TimeStandardSet'] == 1:
        print("预处理：对多个L2文件进行时间统一化处理..")
        # TODO
        data_list_json = timeNormalization(data_list_json, conf, TASK_ID)  #TODO： 待修改1.修改data_list_json的key；添加systype字段
        print(data_list_json)

        print("时间统一分辨率处理完毕！数据head_id个数：%d" % len(data_list_json))
        print(data_list_json.keys())
        # print(data_list_json)
        # 记录参数
        pre_process_parameter_list.append(s3_desc_time_resolution)

        print("步骤3：时间分辨率统一 处理完成..")
        time.sleep(2)

        # 检查算法是否请求停止（前台界面用户点击“暂停”按钮的响应）
        if isTaskStoped(TASK_ID, conf['TaskType']) == 1:
            return 0

    # 步骤4：极值检查
    if conf['ExtremeValueCheck'] == 1:
        count = 0
        data_list_json_tmp = {}
        for key in data_list_json.keys():
            raw_data = data_list_json[key]
            # value_arr = []
            # 假设探测数据包括1（高度）+ N（探测值）列，按照1列高度+1列探测值入库保存
            for i in range(len(raw_data)):
                count += 1
                height = raw_data[i]['Height']
                value = Decimal(raw_data[i]['Value'])
                value_type = raw_data[i]['ValueType']
                is_abnormal = 0
                print(height, value, float(value), value_type)
                #print(conf['TemperatureMin'], conf['TemperatureMax'], conf['DensityMin'], conf['DensityMax'])
                #异常值判断，赋值给is_abnormal

                # 极值检查
                # print("极值法检查异常数据...")
                # 温度值：0
                if value_type == 0 and conf['CheckTemperatureValue'] == 1:
                    if (value - conf['TemperatureMin'] < EPSINON) or (value - conf['TemperatureMax'] > EPSINON):
                        is_abnormal = 1
                        print("极值法检查异常数据...温度值异常:%.6f" % value)
                        abnormal_count += 1

                #密度值：
                elif value_type == 1 and conf['CheckDensityValue'] == 1:
                    if (value - conf['DensityMin'] < EPSINON) or (value - conf['DensityMax'] > EPSINON):
                        is_abnormal = 1
                        print("极值法检查异常数据...密度值异常:%.6f" % value)
                        abnormal_count += 1

                # 径向风速or纬向风速
                elif (value_type == 2 or value_type == 3) and conf['CheckWindSpeedValue'] == 1:
                    if (value - conf['WindSpeedMin'] < EPSINON) or (value - conf['WindSpeedMax'] > EPSINON):
                        is_abnormal = 1
                        print("极值法检查异常数据...风速值异常:%.6f" % value)
                        abnormal_count += 1

                # 异常值判断结束
                # 如果异常值处理选项为（枚举值）：1:设为缺省值;，异常标志位设为2（0:剔除;1:设为缺省值;2:保留）
                if is_abnormal == 1 and conf['ExceptionRule'] == 1:
                    is_abnormal += 1
                elif is_abnormal == 1 and conf['ExceptionRule'] == 2:
                    is_abnormal += 2
                raw_data[i]['IsAbnormal'] = is_abnormal

            data_list_json_tmp[key] = raw_data

        data_list_json = data_list_json_tmp

        print("预处理的数据总条数：%d" % count)
        print("异常数据条数：%d" % abnormal_count)

        # 记录参数
        pre_process_parameter_list.append(s4_desc_extreme_value_check)

        print("步骤4：极值检查 处理完成..")
        time.sleep(2)

        # 检查算法是否请求停止（前台界面用户点击“暂停”按钮的响应）
        if isTaskStoped(TASK_ID, conf['TaskType']) == 1:
            return 0

    # 步骤5. 如果选择了物理一致性检查，同时探测值是密度，进行物流一致性检查
    if conf['PhysicsConsistencyCheck'] == 1:
        print("物理一致性检查..")
        data_list_json_tmp = {}
        for key in data_list_json.keys():
            raw_data = data_list_json[key]
            # 假设探测数据包括1（高度）+ N（探测值）列，按照1列高度+1列探测值入库保存
            for i in range(len(raw_data)):
                # 如果数据不为异常，则进行异常判断（不干扰其他方法已判断为异常的值）
                if raw_data[i]['IsAbnormal'] == 0:
                    # count += 1
                    height = raw_data[i]['Height']
                    value = Decimal(raw_data[i]['Value'])
                    value_type = raw_data[i]['ValueType']

                    # 异常值判断
                    is_abnormal = 0
                    if value_type == 1 and physicsConsistencyCheck(height, value):
                        # tmp_key = "IsAbnormal"
                        # if tmp_key in raw_data[i].keys():
                        #     is_abnormal = raw_data[i]['IsAbnormal']
                        is_abnormal = 1

                        # 异常值判断结束
                        # 如果异常值处理选项为（枚举值）：1:设为缺省值;，异常标志位设为2
                        if conf['ExceptionRule'] == 1:
                            is_abnormal += 1
                        elif conf['ExceptionRule'] == 2:
                            is_abnormal += 2

                        # print("物理一致性检查：检测到异常值")
                        # abnormal_count += 1
                    raw_data[i]['IsAbnormal'] = is_abnormal

            data_list_json_tmp[key] = raw_data

        data_list_json = data_list_json_tmp

        # 记录参数
        pre_process_parameter_list.append(s5_desc_physics_consist_check)

        print("步骤5. 物理一致性检查 处理完成..")
        time.sleep(2)

    # 步骤6 统计学方法
    if conf['StatisticsCheck'] == 1:
        print("统计学检查异常数据...")

        # 记录参数
        pre_process_parameter_list.append(s6_desc_statistics_check)

        print("步骤6 统计学方法 处理完成..")
        time.sleep(2)

    # 步骤7 箱线图法（至少有50组数据才能用）
    if conf['BoxplotCheck'] == 1:
        print("箱线图法检查异常数据...")

        # 记录参数
        pre_process_parameter_list.append(s7_desc_boxplot_check)

        print("步骤7 箱线图法 处理完成..")
        time.sleep(2)

    conf['ParameterDescription'] = ','.join(pre_process_parameter_list)
    # 存库测试

    # 特殊处理：如果筛选数据来自正式表，结果存储在临时表
    # 需要把正式表head信息同步插入到临时head表
    print("test: DbHeadTable: %s, DbSaveHeadTable: %s" % (conf['DbHeadTable'], conf['DbSaveHeadTable']))
    # if conf['DbHeadTable'] == db_data_head_table and conf['DbSaveHeadTable'] == db_temporary_data_head_table:
    if conf['DbHeadTable'].startswith("%s" % db_data_head_table) and conf['DbSaveHeadTable'] == db_temporary_data_head_table:
        # 仅对不存库（即存临时库使用该情况）
        head_id_pair_list = copyHeadInfoToTempHeadTable(conf, data_list_json.keys())
        # print(head_id_pair_list)
        # print("**********************")
        data_list_json = updateDataJsonInfo(head_id_pair_list, data_list_json)
        # print(data_list_json)
        # print("@@@@@@@@@@@@@@@@@@@@")

    print("预处理数据保存到数据库")
    # TODO 注意参数保存！！
    # 根据headid先删除之前同head_id的数据（覆盖式保存）
    if conf['DbSaveHeadTable'] == db_temporary_data_head_table:
        headid_list = []
        for head_id in data_list_json.keys():
            headid_list.append(str(head_id))
        delete_pre_data(conf['DbSaveTable'], headid_list)
    else:
        headid_list = {}
        for key in data_list_json.keys():
            # headid_list.append(key.split('_')[1])
            sys_type = key.split('_')[0]
            head_id = key.split('_')[1]
            if sys_type not in headid_list.keys():
                headid_list[sys_type] = []
            headid_list[sys_type].append(str(head_id))

        for sys_type in headid_list.keys():
            delete_pre_data2309(conf['DbSaveTable'], headid_list[sys_type], sys_type)

    # print(data_list_json.keys())
    # time.sleep(30)
    print(data_list_json)
    print("****************")

    save_processed_data_into_db(data_list_json, conf)
    # time.sleep(30)

    # 如果存库，表示headId被原始数据和预处理后数据共享
    if conf['SaveFlag'] == 1:
        data_head_id_str_list = []
        for key in data_list_json.keys():
            h = int(key.split('_')[1])
            data_head_id_str_list.append(str(h))

            ret = getHeadFullInfoByHeadIdV2(conf, h)
            print("更新数据头信息表状态（预处理）：")
            print(ret)

            # 判断当前文件是否已评定过，决定是否计数到当前站点
            # if ret != "" and checkHeadIdIsPerformedV2(h) is False:
            if ret != "" and checkHeadIdIsPerformed2309(conf, h) is False:
                print("当前文件未有预处理记录，计数！")
                station_id = ret['StationId']
                print(station_id)
                # 按文件记录评定数量
                updateStationDataCount(station_id, 1, 1)
                # updateStationDataCount(station_id, len(raw_data[0]), 2)
                # 更新SharedHeadId字段
                # updateDataHeadTableShareHeadId([str(h)], 1)
                updateDataHeadTableShareHeadId2309(conf, [str(h)], 1)

        # 更新SharedWay字段（只针对存库的情况）
        # updateDataHeadTableShareWay(data_head_id_str_list, 1)
        updateDataHeadTableShareWay2309(conf, data_head_id_str_list, 1)

    # 更新数据头信息表状态： TODO
    # updateDataHeadTable(head_id, 1, 1)      # 即：该头信息对应为预处理数据1，状态为预处理完成1
    # 更新headId
    update_processed_data_headinfo_into_db(data_list_json, conf)
    # update_processed_data_headinfo_into_db_in_batch(data_list_json, conf)

    # 设置任务结束(在调用函数中已经设置结束状态，这里不需要)
    # print("将要设置任务结束..")
    # if TASK_ID != "":
    #     print("设置任务结束！")
    #     updateTaskStatus(TASK_ID, conf['TaskType'])

    return 0


############################################
# 数据存储函数(数据预处理、数据评定共用)
def save_data_into_db(db_table, db_cols, value_arr, save_flag):
    """
     数据存储函数
     save_flag == 1: 预处理后的数据存入bt_data或者bt_l0_data
     save_falg == 0: 预处理后的数据存入临时表bt_temporary_data，保存前清空临时表
    """
    if save_flag == 1:
        print("save_data_into_db预处理后保存的数据条数：%d, 插入数据库表：%s" % (len(value_arr), db_table))
        if len(value_arr) > 0:
            ret = insert_db_in_batch(db_table, db_cols, value_arr)
    else:
        print("save_data_into_db预处理后保存的数据条数：%d, 插入数据库临时表：%s" % (len(value_arr), db_temporary_data_table_l2))
        if len(value_arr) > 0:
            #插入临时处理后的数据
            ret = insert_db_in_batch(db_temporary_data_table_l2, db_cols, value_arr)


def update_data_into_db(db_table, db_cols, value_arr, save_flag):
    """
     数据存储函数
     save_flag == 1: 预处理后的数据存入bt_data或者bt_l0_data
     save_falg == 0: 预处理后的数据存入临时表bt_temporary_data，保存前清空临时表
    """
    if save_flag == 1:
        print("预处理后保存的数据条数：%d, 批量更新到数据库表：%s" % (len(value_arr), db_table))
        if len(value_arr) > 0:
            ret = update_db_in_batch(db_table, db_cols, value_arr)
    else:
        print("预处理后保存的数据条数：%d, 批量更新到数据库表：%s" % (len(value_arr), db_temporary_data_table_l2))
        if len(value_arr) > 0:
            #插入临时处理后的数据
            ret = update_db_in_batch(db_temporary_data_table_l2, db_cols, value_arr)


def clear_db_temporary_table(db_table="bt_temporary_data"):
    #清空数据库bt_temporary_data表
    clear_table(db_table)


# TODO 需要修改
def clear_db_temporary_table_and_keep_last_same_type_task(task_type, db_table="bt_temporary_data", db_head_table="bt_temporary_data_head"):
    """
    清除数据库bt_temporary_data表，保留各类任务最后一个值
    即清除各类任务上一次的BatchId数据
    task_type = 1（预处理） or 2（评定） or 3（数据分析） or 5（一键评定）

    同时清理临时head表数据：bt_temporary_data_head
    """
    data_batch_info = getLatestDataBatchId(task_type)
    if data_batch_info == "":
        print("警告：未找到最近处理的同类型数据")
        return
    data_batch_id = data_batch_info['KeyName']

    print("clear_db_temporary_table_and_keep_last_same_type_task: %s" % data_batch_id)

    query_conditon = 'BatchId = "%s"' % data_batch_id
    print("query_conditon: %s" % query_conditon)
    delete_data_from_table(db_table, query_conditon)

    # 清除临时head表数据(20220703补充)
    delete_data_from_table(db_head_table, query_conditon)


# 预处理后数据存库使用
def save_processed_data_into_db(data_list_json, conf):
    """
     数据存储函数
     save_flag == 1: 预处理后的数据存入bt_data或者bt_l0_data
     save_falg == 0: 预处理后的数据存入临时表bt_temporary_data，保存前清空临时表
    """
    db_table = conf['DbSaveTable']
    # 获取表字段db_cols,和值列表 value_arr
    db_cols = []
    value_arr = []
    headid_list = []
    for key in data_list_json.keys():
        if len(data_list_json[key])>0:
            # print(data_list_json[key])
            for k in data_list_json[key][0].keys():
                if k == "Id" or k == "CreateTime":  # Id主键不插入；CreateTime数据库自动生成
                    continue
                db_cols.append(k)
            break

    for key in data_list_json.keys():
        for data_json in data_list_json[key]:
            # 识别出填充的高度值（不进行识别，需要保存，前端展示用）
            # if data_json['U20'] == NoneStr:
            #     continue

            # print(data_json)
            tmp_list = []
            for k in data_json.keys():
                if k == "Id" or k == "CreateTime":
                    continue
                if k == "Value":
                    tmp_list.append('"%.8f"' % data_json[k])
                elif k == "HeadId":
                    # 如果key格式类似 s1_5，取后面数字
                    if '_' in str(key):
                        tmp_list.append('"%s"' % key.split('_')[1])
                    else:
                        tmp_list.append('"%s"' % key)
                elif k == "BatchId":
                    tmp_list.append('"%s"' % conf['TaskId'])
                else:
                    tmp_list.append('"%s"' % str(data_json[k]))
            data_str = ','.join(tmp_list)
            # SQL语句替换处理
            data_str = data_str.replace('"None"', "NULL")   # 将"None"字符串替换为 NULL
            value_arr.append(data_str)

    for key in data_list_json.keys():
        if '_' in str(key):
            headid_list.append(key.split('_')[1])
        else:
            headid_list.append(str(key))

    # print(value_arr)
    print(db_cols)
    print(len(value_arr))
    # print(headid_list)
    if len(headid_list) > 10:
        print(headid_list[0:10])

    print("清除库中已有的重复数据..")
    if (db_table == db_temporary_data_table_l2 or db_table == db_temporary_data_table_l0) and 'TaskType' in conf.keys():
        print("使用batch_id判断")
        # if conf['DataSource'] == 2:
        #     # 如果是导入文件，该文件为预处理后的导入文件，清除改文件
        #     delete_pre_data_by_batch_id(db_table, 2)
        # else:
        delete_pre_data_by_batch_id(db_table, conf['TaskType'])
    else:
        print("使用head_id判断")
        delete_pre_data(db_table, headid_list)  #TODO 添加SysType参数

    print("预处理后保存的数据条数：%d, 插入数据库表：%s" % (len(value_arr), db_table))
    if len(value_arr) > 0:
        ret = insert_db_in_batch(db_table, db_cols, value_arr)


def delete_pre_data(db_table, headid_list):
    """
    删除指定headId的数据
    :return:
    """
    headid_list_str = ','.join(headid_list)
    query_conditon = 'HeadId in (%s)' % headid_list_str
    print("query_conditon: %s" % query_conditon)
    delete_data_from_table(db_table, query_conditon)


def delete_pre_data2309(db_table, headid_list, sys_type):
    """
    删除指定headId的数据
    :return:
    """
    headid_list_str = ','.join(headid_list)
    query_conditon = 'HeadId in (%s) and SysType = "%s"' % (headid_list_str, sys_type)
    print("query_conditon: %s" % query_conditon)
    delete_data_from_table(db_table, query_conditon)


def delete_pre_data_by_batch_id(db_table, task_type):
    """
    删除指定batch_id的数据(最后一次同类任务的bactchID)
    :return:
    """
    batch_id_list = getAllTaskKeyNameByTaskType(task_type)
    if len(batch_id_list) > 0:
        batch_id_list_str = ','.join(batch_id_list)
        query_conditon = 'BatchId in (%s)' % batch_id_list_str
        print("query_conditon: %s" % query_conditon)
        delete_data_from_table(db_table, query_conditon)


def update_processed_data_headinfo_into_db(data_list_json, conf):
    # db_table = conf['DbHeadTable']
    for key in data_list_json.keys():
        if '_' in str(key):
            head_id = int(key.split('_')[1])
        else:
            head_id = int(key)
        # updateDataHeadTableV2(conf, head_id, 1, 1, 0)      # 即：该头信息对应为预处理数据1，状态为预处理完成1
        updateDataHeadTableV2(conf, head_id, 1, 1)      # 即：该头信息对应为预处理数据1，状态为预处理完成1
    print("共更新HeadId数据量：%d" % len(data_list_json.keys()))

    # 更新设置Task表的任务Id，跟踪之前的任务
    if 'LatestTaskId' in conf.keys():
        updateTaskKeyNameById(conf['LatestTaskId'], conf['TaskId'])


def update_processed_data_headinfo_into_db_in_batch(data_list_json, conf):
    """
    该函数谨慎使用，需要完善，参考：
    https://wenku.baidu.com/view/2dd31f4ca75177232f60ddccda38376bae1fe04c.html
    """
    db_table = conf['DbHeadTable']
    db_cols = ["Id", "ParameterSetting", "ParameterDescription", "DataStatus"]
    value_arr = []
    for key in data_list_json.keys():
        head_id = int(key)
        tmp_str = '"%d", "%d", "%s", "%d"' % (head_id, conf['ParameterSetting'], conf['ParameterDescription'], 1)
        value_arr.append(tmp_str)

    print("预处理后数据，修改数据库表头信息！")
    if len(value_arr) > 0:
        # 批量更新headid
        ret = update_db_in_batch(db_table, db_cols, value_arr)
    print("共更新HeadId数据量：%d" % len(value_arr))


############################################
# 高度统一化和时间统一化（废弃处理）
def heightNormalization(raw_data, db_table, db_cols, TASK_ID, save_flag):
    """
    高度分辨率统一化处理
    1. 假设探测数据包括1（高度）+ N（探测值）列，按照1列高度+1列探测值入库保存
    2. 假设高度有序排列
    参数：
    raw_data： 预处理的原始数据
    db_table： 预处理后的数据插入的数据库表(表bt_data或bt_data_l0)
    db_cols： 插入的数据表列
    """
    print("高度统一化1预处理中..")

    value_arr = []
    probe_value_list = []
    pre_height = -1

    for i in range(len(raw_data)):
        #(455489, 51, None, 'DenAll', Decimal('20.00'), Decimal('2323.0000'), datetime.datetime(2022, 5, 7, 18, 44, 50),
        #print(raw_data[i])
        height = round(raw_data[i][5])      #高度四舍五入
        value = raw_data[i][6]

        #初始高度，或者同一高度
        #print('"%.2f", "%.2f", "%.6f"' % (pre_height, height, value))
        #print(pre_height+1)
        #print(abs(pre_height-height))
        if (pre_height + 1 < EPSINON) or (abs(height - pre_height) < EPSINON):
            pre_height = height
            probe_value_list.append(value)
            continue
        else:
            #出现新的高度，对之前相同的高度进行平均值计算
            print(probe_value_list)
            value_averge = np.mean(probe_value_list)
            #print(value_averge)

            #获取一组平均值
            is_abnormal = 0
            value_str = '"%d", "%s", "%s", "%.2f", "%.6f", "%d"' % (raw_data[i][1], TASK_ID, raw_data[i][4], pre_height, value_averge, is_abnormal) # 缺少ParameterSetting
            print(value_str)
            value_arr.append(value_str)

            #初始化，准备处理下一组数据
            pre_height = height
            probe_value_list = []

    if len(probe_value_list) > 0:
        value_averge = np.mean(probe_value_list)
        is_abnormal = 0
        value_str = '"%d", "%s", "%s", "%.2f", "%.6f", "%d"' % (raw_data[i][1], TASK_ID, raw_data[i][4], pre_height, value_averge, is_abnormal)  # 缺少ParameterSetting
        print(value_str)
        value_arr.append(value_str)

    # 存库：存永久存 or 存临时表
    save_data_into_db(db_table, db_cols, value_arr, save_flag)
    # print("预处理后保存的数据条数：%d, 插入数据库表：%s" % (len(value_arr), db_table))
    # if len(value_arr) > 0:
    #    ret = insert_db_in_batch(db_table, db_cols, value_arr)

    return 0

# step支持不同整数值
def heightNormalizationByStep(raw_data, step):
    """
    高度分辨率统一化处理
    1. 假设探测数据包括1（高度）+ N（探测值）列，按照1列高度+1列探测值入库保存
    2. 假设高度有序排列
    参数：
    raw_data： 预处理的原始数据
    db_table： 预处理后的数据插入的数据库表(表bt_data或bt_data_l0)
    db_cols： 插入的数据表列
    step：高度分辨率
    """
    print("高度统一化2预处理中..")

    ret_value_arr = []
    probe_value_list = []
    pre_height = -1

    # 获取起始高度（整数）：
    start_height = -1
    end_height = -1
    if len(raw_data)>0:
        start_height = round(raw_data[0]['Height'])
        end_height = start_height + step - 1

    for i in range(len(raw_data)):
        height = round(raw_data[i]['Height'])      #高度四舍五入
        value = str_to_float(raw_data[i]['Value'])

        ###########################
        # 根据步长计算：将本次高度转成对应的整数
        # 本次高度比较范围[start_height, end_height]
        if (height - start_height) > EPSINON and (height - end_height) < EPSINON:
            height = start_height
        else:
            # 发现新高度，更新新范围
            start_height = height
            end_height = start_height + step - 1
        ###########################

        # 初始高度，或者同一高度
        # print('"%.2f", "%.2f", "%.6f"' % (pre_height, height, value))
        # print(pre_height+1)
        # print(abs(pre_height-height))
        if (pre_height + 1 < EPSINON) or (abs(height - pre_height) < EPSINON):
            pre_height = height
            probe_value_list.append(value)
            continue
        else:
            # 出现新的高度，对之前相同的高度进行平均值计算
            # print(probe_value_list)
            value_averge = np.mean(probe_value_list)
            # print(value_averge)

            #获取一组平均值
            tmp = raw_data[i]
            tmp['Height'] = str_to_float(pre_height)
            tmp['Value'] = value_averge

            # print(tmp)
            ret_value_arr.append(tmp)

            #初始化，准备处理下一组数据
            pre_height = height
            probe_value_list = [value]


    if len(probe_value_list) > 0:
        value_averge = np.mean(probe_value_list)
        tmp = raw_data[i]
        tmp['Height'] = str_to_float(pre_height)
        tmp['Value'] = value_averge
        #print(tmp)
        ret_value_arr.append(tmp)

    #print(ret_value_arr)
    return ret_value_arr


# 注意：只能支持一个站点的时间分辨率处理！多个站点（相同时间）的数据只能处理第一个站点的数据！
def timeNormalization(data_list_json, conf, TASK_ID, flag=0):
    """
    时间分辨率统一化处理
    输入：{"head_id1":[], "head_id2":[], "head_id3":[], }
    flag:0:数据预处理（只需要计算平均值）；flag:1 数据分析（需要计算四种值）
    输出：{"head_id1":[], "head_id2":[], "head_id3":[], }
    """
    print("时间统一化处理对象：")
    # print(data_list_json)
    # ret_data_dict = {"0(温度)": {"日/周/月/季度/年/全时段":{"30":[a,b,c...],"31":[a,b,c,...], }}, ...}, "1(密度)":{}, "2风速":{}}
    ret_data_dict = {}
    # 1. 根据head_id，测量数据类型value_type，获取原始数据
    # 2. 筛选的数据归类到各组；
    # TODO: 需按分到各个
    start_time_max = ""
    end_time_max = ""
    for head_id in data_list_json.keys():
        print("head_id = %d" % head_id)
        #(head_id_tmp, value_type, start_time, end_time) = getHeadIdInfoByHeadId(int(head_id), 0)
        (head_id_tmp, value_type, start_time, end_time) = getHeadIdInfoByHeadIdV2(conf, int(head_id), 0)
        # print(head_id_tmp, value_type, start_time, end_time)

        raw_data = data_list_json[head_id]
        if len(raw_data) <= 0:
            print("获取时间统一分辨率处理数据对象失败！")
            continue

        # 获取所有数据的最大时间区间（为“全时段”时间区间做准备）
        (start_time_max, end_time_max) = checkMaxTimeRange(start_time_max, end_time_max, start_time, end_time)
        print(start_time_max, end_time_max)

        print("raw_data:XXXXXXXX")
        # print(raw_data)
        value_type = raw_data[0]['ValueType']
        if value_type not in ret_data_dict.keys():
            ret_data_dict[value_type] = {}

        for i in range(len(raw_data)):
            height = raw_data[i]['Height']
            value = Decimal(raw_data[i]['Value'])

            # 时间分辨率统一化处理：
            # 1）根据选择的时间分辨率确定组的数据量和时间段；
            # 0:小时/1:日/2:周/3:月/4:季度/5:年/6:全时段区间等
            # 0不做处理
            key = getTimeFormat(start_time, conf["TimeReslutionUnit"])

            if key not in ret_data_dict[value_type].keys():
                ret_data_dict[value_type][key] = {}
            if height not in ret_data_dict[value_type][key].keys():
                ret_data_dict[value_type][key][height] = []
            ret_data_dict[value_type][key][height].append(value)

    print(ret_data_dict.keys())
    for value_type in ret_data_dict.keys():
        print(value_type)
        print(len(ret_data_dict[value_type].keys()))
        print(ret_data_dict[value_type].keys())

    # 3）按统计方法计算（四类值）；（先进行高度对齐）
    # 1标准差计算；2最大值计算；3最小值计算，可多选，形成字符串参数，如"0,1,2,3" 或 "0"

    ret_data_dict_result = {}  # 本地记录使用，不返回
    # db_cols = ["HeadId", "BatchId", "Height", "ParameterSetting", "DataStartTime", "DataEndTime", "U1", "U2", "U3", "U4"]
    # db_cols = ["HeadId", "BatchId", "ValueType", "Height", "Value", "U1", "U2", "U3", "U4", "BinKey"]
    if flag == 0:
        db_cols = ["HeadId", "BatchId", "ValueType", "Label", "Height", "Value", "BinKey"]
    else:
        db_cols = ["HeadId", "BatchId", "ValueType", "Label", "Height", "Value", "U1", "U2", "U3", "U4", "BinKey"]
    value_arr = []
    tmp_head_count = 0
    initHeadTableTempDataHeadId()       # 初始化Head表TempDataHeadId字段为-1
    # 返回预处理数据，结构同ret_data_dict
    ret_data_list_json = {}             # 缓存原始和预处理中间数据，格式：{"head_id1":[], "head_id2":[], "head_id3":[], }

    statistic_type_arr = conf['StatisticMethod'].split(',')
    print(statistic_type_arr)

    # 0:小时/1:日/2:周/3:月/4:季度/5:年/6:全时段区间等
    time_format = getTimeFormatStr(conf["TimeReslutionUnit"])
    count = 0
    # 不同的value_type中
    for value_type in ret_data_dict.keys():
        print(value_type)
        ret_data_dict_result[value_type] = {}
        # 不同的时间分辨率
        for key in ret_data_dict[value_type].keys():
            print("value_type=%d; key=%s" % (value_type, key))
            print("len=%d;" % len(ret_data_dict[value_type][key]))
            ret_data_dict_result[value_type][key] = {}
            tmp_head_count += 1

            # 获取当前分辨率时间区间（根据时间分辨率和步长计算时间起始结束区间）计算标准时间：
            # TODO 可记录所有实际时间，取最大最小值
            if conf["TimeReslutionUnit"] == 6:
                data_start_time = start_time_max
                data_end_time = end_time_max
            else:
                (data_start_time, data_end_time) = getTimeRange(conf["TimeReslutionUnit"], conf['TimeReslution'], key)

            #print("新时间分辨率起始区间：")
            #print(conf["TimeReslutionUnit"], conf['TimeReslution'], key, data_start_time, data_end_time)

            # 每个时间分辨率产生一组数据（1个headId）
            # 生成新的Head_ID
            # 1）获取老的相关联head信息：设备、站点、体制等
            label = ""      # 填充新生成数据的Label值，后续用于不确定度评定
            for head_id in data_list_json.keys():
                print("head_id = %d" % head_id)
                head_info = getHeadFullInfoByHeadIdV2(conf, head_id)
                # print(head_info)
                if value_type == head_info['ValueType']:
                    # 2)用老的同类数据（同站点+同设备）创建新的headId
                    headParamConf = {
                        "BatchId": TASK_ID,
                        "Count": 0,
                        "Level": head_info['Level'],
                        "DataType": head_info['DataType'],      # ???
                        "ValueType": value_type,
                        "ValueUnit": head_info['ValueUnit'],
                        "DataStartTime": data_start_time,
                        "DataEndTime": data_end_time,
                        "filepath": "",
                        "tempfilename": "",
                        "StationId": head_info['StationId'],
                        "StationName": head_info['StationName'],
                        "DeviceId": head_info['DeviceId'],
                        "DeviceName": head_info['DeviceName'],
                        "SystemType": head_info['SystemType'],  # ???
                        "ProjectName": head_info['ProjectName'],
                        "RawDataStatus": 0,     # 原始数据状态：0:已录入；
                        "SharedWay": 1,         # 文件头生成方式：0仅对应原始文件;1：仅数据预处理；2原始文件+数据预处理共用
                        "SharedHeadId":1,       # 文件头标记：0:仅对应原始文件;1:仅数据预处理；2:不确定度评定；3:预处理+不确定度评定
                        # "DbTable": db_data_head_table if conf['SaveFlag'] == 1 else db_temporary_data_head_table,
                        # "DbTable": conf['DbHeadTable'],
                        "DbTable": conf['DbSaveHeadTable'],
                        "BinKey": key,
                    }
                    print(headParamConf)
                    # 查询HeadId，若不存在，则插入数据库
                    head_id_new = CheckDataHeadTableInfo(headParamConf)
                    print("头部信息headID：%s" % head_id_new)
                    if head_id_new <= 0:
                        print("Error: 文件头信息插入数据库错误!")
                        return {"code": -1, "msg": "文件头信息插入数据库错误！"}

                    # 获取探测数据的Label信息（后续用于不确定度评定计算）
                    if len(data_list_json[head_id])>0:
                        label = data_list_json[head_id][0]['Label']

                    break

            ret_data_list_json[head_id_new] = []

            # 以下为一组新数据：
            test_flag = 2
            # print(key)
            # print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
            # print(ret_data_dict[value_type][key].keys())
            # print(ret_data_dict[value_type][key])
            for height in ret_data_dict[value_type][key].keys():
                value_statistic = 0.0
                # 0平均值计算；1标准差计算；2最大值计算；3最小值计算
                value_averge = 0.0
                value_std = 0.0
                value_max = 0.0
                value_min = 0.0
                # 计算相应的值：0平均值计算；1标准差计算；2最大值计算；3最小值计算
                # 未选择计算的值置为none，前端就可以不显示出来
                if '0' in statistic_type_arr:
                    value_averge = np.mean(ret_data_dict[value_type][key][height])
                else:
                    value_averge = "none"
                if '1' in statistic_type_arr:
                    value_std = np.std(ret_data_dict[value_type][key][height])
                else:
                    value_std = "none"
                if '2' in statistic_type_arr:
                    value_max = np.max(ret_data_dict[value_type][key][height])
                else:
                    value_max = "none"
                if '3' in statistic_type_arr:
                    value_min = np.min(ret_data_dict[value_type][key][height])
                else:
                    value_min = "none"

                value_statistic = value_averge
                ret_data_dict_result[value_type][key][height] = value_statistic

                if flag == 0:
                    # 数据预处理
                    result = [[head_id_new, TASK_ID, value_type, label, str_to_float(height), value_statistic, key]]
                else:
                    # 数据分析：四种值分辨存入u1,u2,u3,u4字段
                    result = [[head_id_new, TASK_ID, value_type, label, str_to_float(height), value_statistic,
                               value_averge, value_std, value_max, value_min, key]]
                # print(ret_data_dict[value_type][key][height])
                # print(result)
                result_json_arr = listData2JsonObj(result, db_cols)

                # 只打印前两行数据
                if test_flag:
                    print(result_json_arr)
                    test_flag -= 1

                ret_data_list_json[head_id_new].append(result_json_arr[0])
                count += 1

            print(head_id_new)
            #print(ret_data_list_json[head_id_new])

            # 以下代码可以工作，但效率较低，先注释掉
            # if time_format == "quarter":
            #     # key的格式 2022-1Q, 2022-2Q, 2022-3Q, 2022-4Q
            #     key = key.split("-")[1][0]
            #     print("key: %s" % key)
            #
            # # 新生成的HeadId关联到老HeadId，更新head表的TempDataHeadId字段
            # # 缺点：只能保留最后一次的关联数据，历史数据不保留
            # print("更新Head表TempDataHeadId字段： key = %s ; value_type = %d" % (key, value_type))
            # for head_id in data_list_json.keys():
            #     print("head_id = %d" % head_id)
            #     (head_id_tmp, value_type_tmp, start_time, end_time) = getHeadIdInfoByHeadId(int(head_id), 0)
            #     if value_type == value_type_tmp:
            #         updateHeadTableTempDataHeadId(head_id, tmp_head_count, time_format, key)

    # 存库：存临时表
    # if len(value_arr) > 0:
    #     ret = save_data_into_db(db_temporary_data_table_l2, db_cols, value_arr, 0)
    #     count += len(value_arr)
    print(start_time_max, end_time_max)

    print("总数据HeadId数量：%d" % tmp_head_count)
    print("总数据条数：%d" % count)

    return ret_data_list_json


def differenceAlgorithm(raw_data_list, height_standard_list):
    """
    差值算法
    """
    print("TODO")
    return raw_data_list


#########################################
# 物理一致性检查
def physicsConsistencyCheck(height, desity):
    """
    物理一致性检查：
    1. 针对密度检查；
    如果判断为异常值，返回True，正常值，返回False
    """
    #计算：DL = 1.2 exp(-z/H)
    H = 7   #常量参数 H=7km
    z = height
    D = desity

    a = - z / H
    DL = 1.2 * math.exp(a)

    #计算：abs(log10(D) - log10(DL)) <= 1
    #print("height:%s, desity:%s, D=%s, DL=%s" % (str(height), str(desity), str(D), str(DL)))
    abs_res = abs(math.log10(D) - math.log10(DL))
    #print("物理一致性检查结果：")
    #print("height:%s, desity:%s, abs_res:%s" % (str(height), str(desity), str(abs_res)))
    if abs_res <= 1:
        #正常值
        return False
    else:
        #异常值
        print("height:%s, desity:%s, abs_res:%s" % (str(height), str(desity), str(abs_res)))
        return True


# 对密度和温度同时进行检查（密度和温度的时间和高度需一致），约束密度/温度和高度的关系
def physicsConsistencyCheckDensityAndTemperature(height, density, temperature):
    """
    物理一致性检查：
    1. 同时针对密度和温度检查；
    如果判断为异常值，返回True，正常值，返回False
    """
    H = 7           #单位：km
    R = 8.314510
    M = 28.959

    #计算：P = DRT / 100M
    D = density
    T = temperature
    z = height
    P = D * R * T / (100 * M)

    #计算：PL = 1000exp(-z/H)
    a = - z / H
    PL = 1000 * math.exp(a)

    #计算：abs(log10(D) - log10(DL)) <= 6
    abs_res = abs(math.log10(P) - math.log10(PL))

    if abs_res <= 1:
        #正常值
        return False
    else:
        #异常值
        print("height:%s, desity:%s, temperature:%s, abs:%s" % (str(height), str(desity), str(temperature), str(abs_res)))
        return True


# 对密度和温度同时进行检查（密度和温度的时间需一致），约束密度/温度和高度的关系
def timeConsistencyCheckDensityAndTemperature(height, density, temperature):
    """
    如果同一组探测数据（站点 + 设备 + 时间），温度没有值，则密度的值都为异常，置为NAN
    """
    if temperature == "":
        #密度值为异常值
        return True
    else:
        #密度值正常
        return False


##########################################
#统计判断准侧
def abnormalDataCheckRule(option):
    """
    使用不同的异常数据处理算法判断异常数据
    #1:拉伊达准则;2:格拉布斯准则; 3:狄克逊准则;4:稳健数据处理方法
    """
    if option == 1:
        checkRuleRajda()
    elif option == 2:
        checkRuleGrubbs()
    elif option == 3:
        checkRuleDixon()
    elif option == 4:
        checkRuleRobust()

    return

#####
#1:拉伊达准则（根据拉依达准则（3σ准则）去除异常值）
#https://blog.csdn.net/qq_40041133/article/details/109009409
def checkRuleRajda():  # TODO
    # generate random num tested
    src_data = pd.read_excel('待处理数据.xlsx', sheet_name=0, header=0) # 第一张表格，第一行为列名
    # pd.set_option('display.max_rows', src_data.shape[0] + 1)

    mean = src_data.mean()
    std = src_data.std()

    drop_indices = []
    for index, row in src_data.iterrows():
        # print(index, row['age'], row['gender'])
        tmp = (row - mean).abs() > 3 * std
        if tmp.any():
            drop_indices.append(index)

    print(drop_indices)

    dst_data = src_data.drop(drop_indices)
    # print(src_data)
    writer = pd.ExcelWriter('处理后的结果.xlsx')
    dst_data.to_excel(writer, 'page_1')
    writer.save()
    return 0

#2:格拉布斯准则;
#https://blog.csdn.net/HeumC_TheMan/article/details/108710929
def Average_Number(List):
    return sum(List) / len(List)

def Variance(List):
    temp_List = []
    ave = Average_Number(List)
    for num in List:
        temp_List.append(((num - ave) ** 2) / len(List))
    return sum(temp_List)

def Standard_Deviation(List):
    return Variance(List) ** (1/2)

def Gi(number, lst):
    if len(lst) <= 17:
        average = Average_Number(lst)
        SD = Standard_Deviation(lst)
        gi = (abs(number - average)) / SD
        return gi
    else:
        exit(0)
def Grubbs_Table(n):
    if n == 3:
        return [1.148, 1.153, 1.155, 1.155, 1.155]
    elif n == 4:
        return [1.425, 1.463, 1.481, 1.492, 1.496]
    elif n == 5:
        return [1.602, 1.672, 1.715, 1.749, 1.764]
    elif n == 6:
        return [1.729, 1.822, 1.887, 1.944, 1.973]
    elif n == 7:
        return [1.828, 1.938, 2.020, 2.097, 2.139]
    elif n == 8:
        return [1.909, 2.032, 2.126, 2.220, 2.274]
    elif n == 9:
        return [1.977, 2.110, 2.215, 2.323, 2.387]
    elif n == 10:
        return [2.036, 2.176, 2.290, 2.410, 2.482]
    elif n == 11:
        return [2.088, 2.234, 2.355, 2.485, 2.564]
    elif n == 12:
        return [2.134, 2.285, 2.412, 2.550, 2.636]
    elif n == 13:
        return [2.175, 2.331, 2.462, 2.607, 2.699]
    elif n == 14:
        return [2.213, 2.371, 2.507, 2.659, 2.755]
    elif n == 15:
        return [2.247, 2.409, 2.549, 2.705, 2.806]
    elif n == 16:
        return [2.279, 2.443, 2.585, 2.747, 2.852]
    elif n == 17:
        return [2.309, 2.475, 2.620, 2.785, 2.894]

# TODO
def checkRuleGrubbs(lst, confidence_level = 1):
    length = len(lst)
    lst = lst
    mark = []
    for i in range(0, length):
        n = Gi(lst[i], lst)
        if n > Grubbs_Table(len(lst))[confidence_level - 1]:
            mark.append(i)
    for j in mark:
        lst.pop(j)
    return lst


#3:狄克逊准则;
def checkRuleDixon():
    return 0

#4:稳健数据处理方法
def checkRuleRobust():
    return 0





################################################
# 预处理参数数据库保存，数据分析参数数据库保存
def savePreprocessedDataIntoDB(conf):
    print("保存预处理参数：")
    print(conf)
    cols = ['Type', 'HeightStandardSet', 'TimeStandardSet', 'ExtremeValueCheck', 'PhysicsConsistencyCheck', 'TimeConsistencyCheck', 'StatisticsCheck',
            'BoxplotCheck', 'SaveFlag', 'HeightReslution', 'HeightReslutionUnit', 'TimeReslution', 'TimeReslutionUnit', 'FilterRule', 'ExceptionRule',
            'DataSource', 'TemperatureMax', 'TemperatureMin', 'DensityMax ', 'DensityMin', 'WindSpeedMax', 'WindSpeedMin']
    # param_str = "conf.Type, conf.HeightStandardSet, conf.TimeStandardSet, conf.ExtremeValueCheck, conf.PhysicsConsistencyCheck, conf.TimeConsistencyCheck, \
    #         conf.StatisticsCheck, conf.BoxplotCheck, conf.SaveFlag, conf.HeightReslution, conf.HeightReslutionUnit, conf.TimeReslution, conf.TimeReslutionUnit, \
    #         conf.FilterRule, conf.ExceptionRule, conf.DataSource, str_to_float(conf.TemperatureMax, conf.TemperatureMin, conf.DensityMax, conf.DensityMin, conf.WindSpeedMax, \
    #         conf.WindSpeedMin"

    query_conditon = 'Type="%d" AND HeightStandardSet="%d" AND TimeStandardSet="%d" AND ExtremeValueCheck="%d" AND PhysicsConsistencyCheck="%d" AND TimeConsistencyCheck="%d" AND \
                    StatisticsCheck="%d" AND BoxplotCheck="%d" AND SaveFlag="%d" AND HeightReslution="%s" AND HeightReslutionUnit="%d" AND TimeReslution="%s" AND TimeReslutionUnit="%d" AND \
                    FilterRule="%d" AND ExceptionRule="%d" AND DataSource="%d" AND TemperatureMax="%.2f" AND TemperatureMin="%.2f" AND DensityMax="%.2f" AND DensityMin="%.2f" AND WindSpeedMax="%.2f" AND \
                    WindSpeedMin="%.2f"' % \
                     (conf['Type'], conf['HeightStandardSet'], conf['TimeStandardSet'], conf['ExtremeValueCheck'], conf['PhysicsConsistencyCheck'], conf['TimeConsistencyCheck'], \
                      conf['StatisticsCheck'], conf['BoxplotCheck'], conf['SaveFlag'], conf['HeightReslution'], conf['HeightReslutionUnit'], conf['TimeReslution'], conf['TimeReslutionUnit'], \
                      conf['FilterRule'], conf['ExceptionRule'], conf['DataSource'], str_to_float(conf['TemperatureMax']), str_to_float(conf['TemperatureMin']), str_to_float(conf['DensityMax']), \
                      str_to_float(conf['DensityMin']), str_to_float(conf['WindSpeedMax']), str_to_float(conf['WindSpeedMin']))


    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_system_parameter_config_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        #存在参数组合，返回组合Id
        return ret[0][0]

    #组合Id不存在，插入新的查询参数组合
    col_str = ','.join(cols)
    value_str = '"%d", "%d", "%d", "%d", "%d", "%d", "%d", "%d", "%d", "%s", "%d", "%s", "%d", "%d", "%d",' \
                ' "%d", "%.2f", "%.2f", "%.2f", "%.2f", "%.2f", "%.2f"' % \
                (conf['Type'], conf['HeightStandardSet'], conf['TimeStandardSet'], conf['ExtremeValueCheck'], conf['PhysicsConsistencyCheck'], conf['TimeConsistencyCheck'], \
                 conf['StatisticsCheck'], conf['BoxplotCheck'], conf['SaveFlag'], conf['HeightReslution'], conf['HeightReslutionUnit'], conf['TimeReslution'], conf['TimeReslutionUnit'], \
                 conf['FilterRule'], conf['ExceptionRule'], conf['DataSource'], str_to_float(conf['TemperatureMax']), str_to_float(conf['TemperatureMin']), str_to_float(conf['DensityMax']),
                 str_to_float(conf['DensityMin']), str_to_float(conf['WindSpeedMax']), str_to_float(conf['WindSpeedMin']))

    print("插入预处理参数： %s" % value_str)
    head_id = insert_db(db_system_parameter_config_table, col_str, value_str)
    print("预处理参数表插入ID： %d" % head_id)
    if head_id == -1:
        #print("[Error]：状态参数插入错误！")
        return "[Error]：预处理参数插入错误！"

    return head_id


def saveParameterIntoDb():
    return 0

def getParameterFromDb():
    return 0


def getLatestConfigParameterFromDb(type):
    """
    获取最近一次预处理/数据分析配置参数
    """
    query_conditon = 'Type = %d ORDER BY Id DESC limit 1' % type

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_system_parameter_config_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_system_parameter_config_table)
        return ret[0]
    else:
        return ""


def saveAnalysisParametersDataIntoDB(conf):
    """
    数据分析参数保存
    """
    print("保存数据分析参数：")
    print(conf)
    cols = ['Type', 'HeightReslution', 'HeightReslutionStep', 'HeightReslutionUnit', 'TimeReslution',
            'TimeReslutionStep', 'TimeReslutionUnit', 'DataSource', 'StatisticMethod', 'ImputationMethod',
            'FitMethod', 'PolynomialPower', 'FitFigureSavePath', 'QueryDeviceId', 'QueryStationId', 'QueryValueType',
            'QueryTimeStart', 'QueryTimeEnd', 'QueryHeightStart', 'QueryHeightEnd']

    query_conditon = 'Type="%d" AND HeightReslution="%d" AND HeightReslutionStep="%d" AND HeightReslutionUnit="%d" AND ' \
                     'TimeReslution="%d" AND TimeReslutionStep="%d" AND TimeReslutionUnit="%d" AND DataSource="%d" AND ' \
                     'StatisticMethod="%s" AND ImputationMethod="%s" AND FitMethod="%s" AND PolynomialPower="%d" AND ' \
                     'FitFigureSavePath="%s" AND QueryDeviceId="%d" AND QueryStationId="%d" AND QueryValueType="%s" ' \
                     'AND QueryTimeStart="%s" AND QueryTimeEnd="%s" AND QueryHeightStart="%s" AND QueryHeightEnd="%s"' % \
                     (conf['TaskType'], conf['HeightReslution'], conf['HeightReslutionStep'], conf['HeightReslutionUnit'],
                      conf['TimeReslution'], conf['TimeReslutionStep'], conf['TimeReslutionUnit'], conf['DataSource'],
                      conf['StatisticMethod'], conf['ImputationMethod'], conf['FitMethod'], conf['FunctionPower'],
                      conf['FigSavePath'], conf['DeviceId'], conf['StationId'], conf['ValueType'],
                      conf['TimeStart'], conf['TimeEnd'], conf['HeightStart'], conf['HeightEnd'])

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_system_parameter_config_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        #存在参数组合，返回组合Id
        return ret[0][0]

    #组合Id不存在，插入新的查询参数组合
    col_str = ','.join(cols)
    value_str = '"%d", "%d", "%d", "%d", "%d", "%d", "%d", "%d", "%s", "%s", "%s", "%d", ' \
                '"%s", "%d", "%d", "%s", "%s", "%s", "%s", "%s"' % \
                (conf['TaskType'], conf['HeightReslution'], conf['HeightReslutionStep'], conf['HeightReslutionUnit'],
                 conf['TimeReslution'], conf['TimeReslutionStep'], conf['TimeReslutionUnit'], conf['DataSource'],
                 conf['StatisticMethod'], conf['ImputationMethod'], conf['FitMethod'], conf['FunctionPower'],
                 conf['FigSavePath'], conf['DeviceId'], conf['StationId'], conf['ValueType'],
                 conf['TimeStart'], conf['TimeEnd'], conf['HeightStart'], conf['HeightEnd'])

    print("插入数据分析参数： %s" % value_str)
    head_id = insert_db(db_system_parameter_config_table, col_str, value_str)
    print("数据分析参数表插入ID： %d" % head_id)
    if head_id == -1:
        #print("[Error]：数据分析参数插入错误！")
        return "[Error]：数据分析参数插入错误！"

    return head_id


def saveDataCompareParametersDataIntoDB(conf):
    """
    数据分析参数保存
    """
    print("保存数据分析参数：")
    print(conf)
    cols = ['Type', 'HeightReslution', 'HeightReslutionStep', 'HeightReslutionUnit', 'TimeReslution',
            'TimeReslutionStep', 'TimeReslutionUnit', 'DataSource', 'StatisticMethod', 'ImputationMethod',
            'CompareMethod', 'CompareType', 'QueryTimeStart', 'QueryTimeEnd', 'QueryTimeStart2', 'QueryTimeEnd2']

    query_conditon = 'Type="%d" AND HeightReslution="%d" AND HeightReslutionStep="%d" AND HeightReslutionUnit="%d" AND ' \
                     'TimeReslution="%d" AND TimeReslutionStep="%d" AND TimeReslutionUnit="%d" AND DataSource="%d" AND ' \
                     'StatisticMethod="%s" AND ImputationMethod="%s" AND CompareMethod="%s" AND CompareType="%d" AND ' \
                     'QueryTimeStart="%s" AND QueryTimeEnd="%s" AND QueryTimeStart2="%s" AND QueryTimeEnd2="%s"' % \
                     (conf['TaskType'], conf['HeightReslution'], conf['HeightReslutionStep'], conf['HeightReslutionUnit'],
                      conf['TimeReslution'], conf['TimeReslutionStep'], conf['TimeReslutionUnit'], conf['DataSource'],
                      conf['StatisticMethod'], conf['ImputationMethod'], conf['CompareMethod'], conf['CompareType'],
                      conf['TimeStart'], conf['TimeEnd'], conf['TimeStart2'], conf['TimeEnd2'])

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_system_parameter_config_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        #存在参数组合，返回组合Id
        return ret[0][0]

    #组合Id不存在，插入新的查询参数组合
    col_str = ','.join(cols)
    value_str = '"%d", "%d", "%d", "%d", "%d", "%d", "%d", "%d", "%s", "%s", "%s", "%d", "%s", "%s", "%s", "%s"' % \
                (conf['TaskType'], conf['HeightReslution'], conf['HeightReslutionStep'], conf['HeightReslutionUnit'],
                 conf['TimeReslution'], conf['TimeReslutionStep'], conf['TimeReslutionUnit'], conf['DataSource'],
                 conf['StatisticMethod'], conf['ImputationMethod'], conf['CompareMethod'], conf['CompareType'],
                 conf['TimeStart'], conf['TimeEnd'], conf['TimeStart2'], conf['TimeEnd2'])

    print("插入数据分析参数： %s" % value_str)
    head_id = insert_db(db_system_parameter_config_table, col_str, value_str)
    print("数据分析参数表插入ID： %d" % head_id)
    if head_id == -1:
        # print("[Error]：数据分析参数插入错误！")
        return "[Error]：数据分析参数插入错误！"

    return head_id


#############################################
# 以下为获取新增数据接口
def getLatestDataBatchId(task_type=-1):
    """
    获取最近一次处理的任务数据（taskId/BatchId）
    状态是完成状态
    """
    query_conditon = 'Status = 0 ORDER BY Id DESC limit 1'
    if task_type != -1:
        query_conditon = 'Status = 0 AND TaskType = %d ORDER BY Id DESC limit 1' % task_type

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_task_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        # (Id, KeyName, TaskType, Status, CreateTime, UpdateTime)
        ret = dbQueryResult2JsonObj(ret, db_task_table)
        #return ret[0][1]
        return ret[0]
    else:
        return ""


def getHeadIdByBatchId(batch_id, level=1, flag=1):
    """
    通过BatchId获取探测原始数据头部信息(BatchId-->HeadId-)
    falg: 默认1为L2数据，0为L0数据
    """
    head_list = []

    # query_conditon = 'BatchId = "%s" AND Level = %d' % (batch_id, level)
    query_conditon = 'BatchId = "%s" AND Level = "%d" AND RawDataStatus = 0' % (batch_id, level)  # 已录入状态的原始数据
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_data_head_table, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_data_head_table)
        for item in ret:
            #print(item)
            # ValueType：温风密压
            # (item0, item5) = (HeadId, ValueType)
            if flag:
                head_list.append((item['Id'], item['ValueType']))
            else:
                head_list.append((item['Id'], item['ValueType'], item['DataStartTime'], item['DataEndTime']))
        print(head_list)

    else:
        print("Error: 文件头信息表中未找到BatchID")

    return head_list


# 评定算法中使用 TODO 兼容并取代getHeadIdByBatchId()
def getHeadIdByBatchIdV2(batch_id, level=1, flag=1):
    """
    通过BatchId获取探测原始数据头部信息(BatchId-->HeadId-)
    level: 默认1为L2数据，0为L0数据
    """
    head_list = []

    # query_conditon = 'BatchId = "%s" AND Level = %d' % (batch_id, level)
    # 已录入状态的原始数据 TODO L2数据
    # query_conditon = 'BatchId = "%s" AND Level = "%d" AND (DataStatus = 1 OR DataStatus = -1)' % (batch_id, level)
    query_conditon = 'BatchId = "%s" AND Level = "%d" AND (DataStatus >= 1 OR DataStatus >= -1)' % (batch_id, level)
    if level == 0:
        # L0数据
        query_conditon = 'BatchId = "%s" AND Level = "%d" AND RawDataStatus = 0' % (batch_id, level)  #已录入状态的原始数据 TODO L0数据

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_data_head_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_data_head_table)
        for item in ret:
            print(item)
            # ValueType：温风密压
            # (item0, item5) = (HeadId, ValueType)
            # head_list.append((item[0], item[5]))
            if flag:
                head_list.append((item['Id'], item['ValueType']))
            else:
                head_list.append((item['Id'], item['ValueType'], item['DataStartTime'], item['DataEndTime']))
        print(head_list)

    else:
        print("Error: 文件头信息表中未找到BatchID")

    return head_list


def getHeadIdByBatchIdV3(conf, batch_id, level=1, flag=1):
    """
    通过BatchId获取探测原始数据头部信息(BatchId-->HeadId-)
    flag: 默认1为L2数据，0为L0数据
    """
    head_list = []
    # query_conditon = 'BatchId = "%s" AND Level = "%d" AND RawDataStatus = -1' % (batch_id, level)  # 导入的文件数据
    query_conditon = 'BatchId = "%s" AND Level = "%d"' % (batch_id, level)
    query_conditon += " ORDER BY DataStartTime ASC"
    print("query_conditon: %s" % query_conditon)
    ret = select_db(conf['DbHeadTable'], query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_data_head_table)
        for item in ret:
            #print(item)
            # ValueType：温风密压
            # (item0, item5) = (HeadId, ValueType)
            if flag:
                head_list.append((item['Id'], item['ValueType']))
            else:
                head_list.append((item['Id'], item['ValueType'], item['DataStartTime'], item['DataEndTime']))
        print(head_list)

    else:
        print("Error: 文件头信息表中未找到BatchID")

    return head_list


def getHeadIdInfoByHeadId(head_id, flag=1):
    """
    通过HeadId 获取头部信息
    默认数据状态为：0已录入（可根据实际情况修改）
    返回：HeadId，测量值类型
    """
    query_conditon = 'Id = "%d" AND (RawDataStatus = 0 OR RawDataStatus = -1)' % head_id  #已录入状态的原始数据（指定headId）
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_data_head_table, query_conditon)
    #print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_data_head_table)
        #ValueType：温风密压
        #(item0, item5) = (HeadId, ValueType)
        # return (ret[0][0], ret[0][5])
        # return (ret[0]['Id'], ret[0]['ValueType'])
        if flag:
            return (ret[0]['Id'], ret[0]['ValueType'])
        else:
            return (ret[0]['Id'], ret[0]['ValueType'], ret[0]['DataStartTime'], ret[0]['DataEndTime'])
    else:
        print("Error: 文件头信息表中未找到head_id且已录入状态数据")

    return ""


def getHeadIdInfoByHeadIdV2(conf, head_id, flag=1):
    """
    通过HeadId 获取头部信息
    默认数据状态为：0已录入（可根据实际情况修改）
    返回：HeadId，测量值类型
    """
    db_table = conf['DbHeadTable']
    query_conditon = 'Id = "%d" AND (RawDataStatus = 0 OR RawDataStatus = -1)' % head_id  #已录入状态的原始数据（指定headId）
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    #print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_data_head_table)
        #ValueType：温风密压
        #(item0, item5) = (HeadId, ValueType)
        # return (ret[0][0], ret[0][5])
        # return (ret[0]['Id'], ret[0]['ValueType'])
        if flag:
            return (ret[0]['Id'], ret[0]['ValueType'])
        else:
            return (ret[0]['Id'], ret[0]['ValueType'], ret[0]['DataStartTime'], ret[0]['DataEndTime'])
    else:
        print("Error: 文件头信息表中未找到head_id且已录入状态数据")

    return ""


# 评定算法中使用
def getHeadFullInfoByHeadId(head_id):
    """
    通过HeadId 获取头部信息
    默认数据状态为：0已录入（可根据实际情况修改）
    返回：查询到的所有数据
    """
    query_conditon = 'Id = "%d"' % head_id  #已录入状态的原始数据（指定headId）
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_data_head_table, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_data_head_table)
        return ret[0]
    else:
        print("Error: 文件头信息表中未找到head_id且已录入状态数据")

    return ""


def getHeadFullInfoByHeadIdV2(conf, head_id):
    """
    通过HeadId 获取头部信息
    默认数据状态为：0已录入（可根据实际情况修改）
    返回：查询到的所有数据
    """
    db_table = conf['DbHeadTable']
    query_conditon = 'Id = "%d"' % head_id          # 已录入状态的原始数据（指定headId）
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_table)
        return ret[0]
    else:
        print("Error: 文件头信息表中未找到head_id且已录入状态数据")

    return ""


# head_id_str_list 的元素：
# s1_headid
def copyHeadInfoToTempHeadTable(conf, head_id_str_list):
    """
    从head表1中copy 所有head_id中head_id 到head表2
    (一般从正式head表copy数据到临时head表)
    """
    # bt_head_table_from = conf['DbHeadTable']
    bt_head_table_to = conf['DbSaveHeadTable']
    db_head_table_orig = "bt_data_head"

    head_id_pair_json = {}
    for key in head_id_str_list:
        db_table_head_type = key.split('_')[0]
        head_id = int(key.split('_')[1])

        # 更新head表名conf['DbHeadTable']
        if len(conf['DbHeadTable']) < len("bt_data_head_sx_l2"):
            conf['DbHeadTable'] = db_head_table_orig + '_' + db_table_head_type + '_l2',
        data_json = getHeadFullInfoByHeadIdV2(conf, head_id)

        # 获取表字段db_cols,和值列表 value_arr
        db_cols = []
        for k in data_json.keys():
            if k == "Id" or k == "CreateTime" or k == "UpdateTime":  # Id主键不插入；CreateTime数据库自动生成
                continue
            db_cols.append(k)
        db_cols.append('SysType')
        db_cols_str = ','.join(db_cols)

        tmp_list = []
        for k in data_json.keys():
            if k == "Id" or k == "CreateTime" or k == "UpdateTime":
                continue
            if k == "Value":
                tmp_list.append('"%.8f"' % data_json[k])
            elif k == "BatchId":                                # head表更新了BatchId,相应的数据表对应字段也得更新
                tmp_list.append('"%s"' % conf['TaskId'])
            else:
                tmp_list.append('"%s"' % str(data_json[k]))
        tmp_list.append('"%s"' % db_table_head_type)
        data_str = ','.join(tmp_list)
        # SQL语句替换处理
        data_str = data_str.replace('"None"', "NULL")   # 将"None"字符串替换为 NULL

        head_id_new = insert_db(bt_head_table_to, db_cols_str, data_str)
        # print(head_id, head_id_new)
        if head_id_new == -1:
            print("copyHeadInfoToTempHeadTable出错！！！")
        head_id_pair_json[key] = head_id_new

    return head_id_pair_json


def updateDataJsonInfo(head_id_pair_list, data_list_json):
    """
    替换data_list_json中的key：从老的head_id替换为新的head_id
    """
    data_list_json_new = {}
    for key in data_list_json.keys():
        key_new = head_id_pair_list[key]
        value = data_list_json[key]

        data_list_json_new[key_new] = value
    return data_list_json_new


def getHeadIdByHeadInfo(head_info, conf, flag=1, data_level=1):
    """
    通过L2 的HeadInfo获取对应L0 数据的 HeadInfo
    head_info:
        ValueType:      测量值类型
        DataStartTime:  L2 数据的开始时间
        DataEndTime:    L2 数据的结束时间
        StationId:      站点ID
        DeviceId:       设备ID
    """
    head_list = []
    if data_level == 1:
        db_head_table = conf['DbHeadTable'] if conf['DbHeadTable'] is not None else db_data_head_table
    else:
        # L0数据都存在正式库表中
        db_head_table = db_data_head_table

    query_conditon = 'StationId = "%d" AND DeviceId = "%d" AND Level = 0 AND ' \
                     'DataStartTime >= "%s" AND DataEndTime <= "%s" AND DataType = "%s"' \
                     % (head_info['StationId'], head_info['DeviceId'], head_info['DataStartTime'],
                        head_info['DataEndTime'], head_info['DataTypeL0'])

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_head_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_head_table)
        for item in ret:
            # print(item)
            # ValueType：温风密压
            # (item0, item5) = (HeadId, ValueType)
            # head_list.append((item[0], item[5]))
            if flag:
                head_list.append((item['Id'], item['ValueType']))
            else:
                head_list.append((item['Id'], item['ValueType'], item['DataStartTime'], item['DataEndTime']))
        # for item in ret:
        #     print(item)
        #     # ValueType：温风密压
        #     # (item0, item5) = (HeadId, ValueType)
        #     head_list.append((item[0], item[5]))
        # print(head_list)
    else:
        print("Error: 文件头信息表中未找到对应的L0数据头")

    return head_list


# getHeadIdByHeadInfo新版本，区别是 head_info['DataTypeL0'] 为数组
# 如：["Rm_DV", "Ry_DV", "Fe_DE", "Fe_DN"]
def getHeadIdByHeadInfo2309(head_info, conf, flag=1, data_level=1):
    """
    通过L2 的HeadInfo获取对应L0 数据的 HeadInfo
    head_info:
        ValueType:      测量值类型
        DataStartTime:  L2 数据的开始时间
        DataEndTime:    L2 数据的结束时间
        StationId:      站点ID
        DeviceId:       设备ID
    """
    head_list = []
    if data_level == 1:
        db_head_table = conf['DbHeadTable'] if conf['DbHeadTable'] is not None else db_data_head_table
    else:
        # L0数据都存在正式库表中
        db_head_table = db_data_head_table

    for data_type in head_info['DataTypeL0']:
        query_conditon = 'StationId = "%d" AND DeviceId = "%d" AND Level = 0 AND ' \
                         'DataStartTime >= "%s" AND DataEndTime <= "%s" AND DataType like "%%%s%%"' \
                         % (head_info['StationId'], head_info['DeviceId'], head_info['DataStartTime'],
                            head_info['DataEndTime'], data_type)

        print("query_conditon2309: %s" % query_conditon)
        ret = select_db(db_head_table, query_conditon)
        print(ret, len(ret))
        if len(ret) > 0:
            ret = dbQueryResult2JsonObj(ret, db_head_table)
            for item in ret:
                # print(item)
                # ValueType：温风密压
                # (item0, item5) = (HeadId, ValueType)
                # head_list.append((item[0], item[5]))
                if flag:
                    head_list.append((item['Id'], item['ValueType']))
                else:
                    head_list.append((item['Id'], item['ValueType'], item['DataStartTime'], item['DataEndTime']))
            # for item in ret:
            #     print(item)
            #     # ValueType：温风密压
            #     # (item0, item5) = (HeadId, ValueType)
            #     head_list.append((item[0], item[5]))
            # print(head_list)
        else:
            print("Error: 文件头信息表中未找到对应的L0数据头")

    return head_list


def getRawDataByHeadId(head_id, level=1):
    """
    通过HeadId获取原始数据L0/L2
    falg: 默认1为L2数据，0为L0数据
    """
    # 根据HeadId获取原始数据
    db_table = db_raw_l2_data_table
    if level == 0:
        db_table = db_raw_l0_data_table

    query_conditon = 'HeadId = %d' % (head_id)
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    # print(ret, len(ret))
    print("获取的原始数据条数：%d" % len(ret))
    if len(ret) > 0:
        return ret
    else:
        print("Error: 未找到原始L0或L2数据")
        return ""


def getRawDataByHeadIdInJsonObjectList(conf, head_id, level=1):
    """
    通过HeadId获取原始数据L0/L2
    falg: 默认1为L2数据，0为L0数据
    """
    # 根据HeadId获取原始数据
    db_table = conf['DbTable']
    # db_table = db_raw_l2_data_table
    # if level == 0:
    #     db_table = db_raw_l0_data_table

    query_conditon = 'HeadId = %d' % (head_id)
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    # print(ret, len(ret))
    print("获取的原始数据条数：%d" % len(ret))
    if len(ret) > 0:
        return dbQueryResult2JsonObj(ret, db_table)
    else:
        print("Error: 未找到原始L0或L2数据")
        return ""

###################################################
# 以下为数据分析、不确定度评定用函数
def getPreprocessedDataByHeadId(head_id, input_data_loc, level=1, conf={}):
    """
    通过HeadId获取预处理后的数据L0/L2
    input_data_loc: 0:不存库，预处理的数据在临时表，1:存库，预处理的数据在正式预处理后的表
    level: 默认1为L2数据，0为L0数据
    """
    # query_conditon = 'HeadId = %d' % head_id
    query_conditon = 'HeadId = %d AND IsAbnormal = 0' % head_id

    if 'DbTable' in conf.keys() and conf['DbTable'] != "":
        db_table = conf['DbTable']
        # 按创建时间排序，获取最新数据正确的label
        # query_conditon = 'HeadId = %d ORDER BY CreateTime DESC' % head_id
        query_conditon = 'HeadId = %d ORDER BY Height ASC' % head_id
    else:
        # 根据HeadId获取原始数据
        db_table = db_raw_l2_precessed_table
        if level == 0:
            db_table = db_raw_l0_precessed_table

        # 如果设置了数据处理不存库，则数据位置在临时表bt_temporary_data
        if input_data_loc == 0:
            db_table = db_temporary_data_table_l2
            query_conditon = 'HeadId = %d' % head_id

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    # print(ret, len(ret))
    print("获取的原始数据条数：%d" % len(ret))
    if len(ret) > 0:
        # return ret
        return dbQueryResult2JsonObj(ret, db_table)
    else:
        print("Error: 未找到原始L0或L2数据")
        return ""


def getPreprocessedDataByHeadIdV2(data_source, head_id, input_data_loc, level=1):
    """
    通过HeadId获取预处理后的数据L0/L2
    数据来源（枚举值）：0:新增数据;1:所有预处理数据;2:未经预处理数据
    input_data_loc: 0:不存库，预处理的数据在临时表，1:存库，预处理的数据在正式预处理后的表
    falg: 默认1为L2数据，0为L0数据
    """
    # 根据HeadId获取原始数据
    db_table = db_raw_l2_precessed_table
    if data_source == 1 and level == 0:
        db_table = db_raw_l0_precessed_table

    # 临时表和预处理后数据表字段不一致处理
    query_conditon = 'HeadId = %d AND IsAbnormal = 0' % head_id

    if data_source == 2 and level == 1:
        db_table = db_raw_l2_data_table
        query_conditon = 'HeadId = %d' % head_id


    # 如果设置了数据处理不存库，则数据位置在临时表bt_temporary_data
    if input_data_loc == 0:
        db_table = db_temporary_data_table_l2

    # query_conditon = 'HeadId = %d' % head_id
    #query_conditon = 'HeadId = %d AND IsAbnormal = 0' % head_id
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    #print(ret, len(ret))
    print("获取的原始数据条数：%d" % len(ret))
    if len(ret) > 0:
        return ret
    else:
        print("Error: 未找到原始L0或L2数据")
        return ""


def getPreprocessedDataByHeadIdV3(head_id, conf, flag=0):
    """
    通过HeadId获取预处理后的数据L0/L2
    #input_data_loc: 0:不存库，预处理的数据在临时表，1:存库，预处理的数据在正式预处理后的表
    level: 默认1为L2数据，0为L0数据
    flag = 0: 取conf中数据源表（DbTable）数据，=1， 取conf中数据存储目标表（DbSaveTable）数据
    """
    if flag == 0:
        db_table = conf['DbTable']
    else:
        db_table = conf['DbSaveTable']

    if db_table == db_raw_l2_data_table or db_table == db_raw_l0_data_table:
        # query_conditon = 'HeadId = %d ORDER BY CreateTime DESC' % head_id
        query_conditon = 'HeadId = %d ORDER BY Height ASC' % head_id
    else:
        query_conditon = 'HeadId = %d AND IsAbnormal = 0' % head_id

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    # print(ret, len(ret))
    print("获取的原始数据条数：%d" % len(ret))
    if len(ret) > 0:
        # return ret
        return dbQueryResult2JsonObj(ret, db_table)
    else:
        print("Error: 未找到原始L0或L2数据")
        return ""


def getPreprocessedDataByHeadIdV3WithHeightIndex(head_id, conf, flag=0):
    """
    通过HeadId获取预处理后的数据L0/L2
    #input_data_loc: 0:不存库，预处理的数据在临时表，1:存库，预处理的数据在正式预处理后的表
    level: 默认1为L2数据，0为L0数据
    flag = 0: 取conf中数据源表（DbTable）数据，=1， 取conf中数据存储目标表（DbSaveTable）数据
    """
    if flag == 0:
        db_table = conf['DbTable']
    else:
        db_table = conf['DbSaveTable']

    if db_table == db_raw_l2_data_table or db_table == db_raw_l0_data_table:
        # query_conditon = 'HeadId = %d ORDER BY CreateTime DESC' % head_id
        query_conditon = 'HeadId = %d ORDER BY Height ASC' % head_id
    else:
        query_conditon = 'HeadId = %d AND IsAbnormal = 0' % head_id

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    # print(ret, len(ret))
    print("获取的原始数据条数：%d" % len(ret))
    if len(ret) > 0:
        # return ret
        return dbQueryResult2JsonObj(ret, db_table)
    else:
        print("Error: 未找到原始L0或L2数据")
        return ""


def getPreprocessedDataByHeadIdInJsonObjectList(data_source, head_id, input_data_loc, level=1):
    """
    通过HeadId获取预处理后的数据L0/L2
    数据来源（枚举值）：0:新增数据;1:所有预处理数据;2:未经预处理数据
    input_data_loc: 0:不存库，预处理的数据在临时表，1:存库，预处理的数据在正式预处理后的表
    level: 默认1为L2数据，0为L0数据
    """
    # 根据HeadId获取原始数据
    db_table = db_raw_l2_precessed_table
    if data_source == 1 and level == 0:
        db_table = db_raw_l0_precessed_table

    # 临时表和预处理后数据表字段不一致处理
    query_conditon = 'HeadId = %d AND IsAbnormal = 0' % head_id

    if data_source == 2 and level == 1:
        db_table = db_raw_l2_data_table
        query_conditon = 'HeadId = %d' % head_id


    # 如果设置了数据处理不存库，则数据位置在临时表bt_temporary_data
    if input_data_loc == 0:
        db_table = db_temporary_data_table_l2

    # query_conditon = 'HeadId = %d' % head_id
    #query_conditon = 'HeadId = %d AND IsAbnormal = 0' % head_id
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    #print(ret, len(ret))
    print("获取的原始数据条数：%d" % len(ret))
    if len(ret) > 0:
        return dbQueryResult2JsonObj(ret, db_table)
    else:
        print("Error: 未找到原始L0或L2数据")
        return ""


####################################################
# GUM / MCM 不确定度评定算法中，筛选L0数据
def getL0RawDataByHeadId(head_id, level=0, conf={}):
    """
    通过HeadId 获取L0 原始数据
    """
    db_table = db_raw_l0_data_table
    query_conditon = 'HeadId = %d' % head_id

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    # print(ret, len(ret))
    print("获取的L0原始数据条数：%d" % len(ret))
    if len(ret) > 0:
        # return ret
        return dbQueryResult2JsonObj(ret, db_table)
    else:
        print("Error: 未找到原始L0或L2数据")
        return ""


def getL0RawDataByL2HeightRange(height_start, height_end, head_id_list, label, flag=1):
    """
    通过HeadId 获取L0 原始数据
    SELECT * FROM `bt_data_l0_raw` WHERE HeadId IN (17,18,19,20,21) AND Label = "RayVHS"
    AND Height >= 30.5 AND Height < 31.5
    AND BatchId = "202205290009"
    flag：1 默认取1个高度的值，返回值为值list；0取所有高度的值，返回值为{高度，[]}json，
    """
    if len(head_id_list) == 0:
        print("HeadId list不能为空")
        return -1

    head_id_list_str = ','.join(head_id_list)

    db_table = db_raw_l0_data_table
    if flag:
        query_conditon = 'HeadId in (%s) AND Label = "%s" AND Height >= %s AND Height < %s' % \
                     (head_id_list_str, label, height_start, height_end)
    else:
        query_conditon = 'HeadId in (%s) AND Label in (%s) AND Height >= %s AND Height < %s' % \
                         (head_id_list_str, label, height_start, height_end)

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table, query_conditon)
    # print(ret, len(ret))
    print("获取的L0原始数据条数：%d" % len(ret))
    ret_arr = []
    if len(ret) > 0:
        # return ret
        ret = dbQueryResult2JsonObj(ret, db_table)
        for item in ret:
            if flag:
                ret_arr.append(item['Value'])
            else:
                ret_arr.append({"Height": item['Height'],
                                "Value": item['Value'],
                                "Label": item['Label']})

    else:
        print("Error: 未找到原始L0或L2数据")
        return ""
    return ret_arr


#####################################################
# 数据分析：将数据库查到数据转成向量：高度X, 探测值Y
def getDataByColumns(data_list):
    print(type(data_list))
    if not isinstance(data_list, tuple):
        print("Error: 数据类型错误，无法获取列向量!")
        return "Error: 数据类型错误，无法获取列向量"
    colX = []
    colY = []

    for data in data_list:
        colX.append(str_to_float(data['Height']))
        colY.append(str_to_float(data['Value']))

    return (colX, colY)


def getDataByColumnsV2(data_list):
    print(type(data_list))
    if not isinstance(data_list, list):
        print("Error: 数据类型错误，无法获取列向量!")
        return "Error: 数据类型错误，无法获取列向量"
    colX = []
    colY = []

    for data in data_list:
        colX.append(str_to_float(data['Height']))
        colY.append(str_to_float(data['Value']))

    return (colX, colY)