#coding=utf-8
import pandas as pd
import numpy as np
from utils.mylib_db import *
from utils.mylib_utils import *
from decimal import Decimal
import copy
import re

'''
本项目工具
1. 外部调用状态同步表
2. setTaskStatus函数中，任务种类type参数： 0:数据录入；1:数据预处理；2:不确定度评定;3:数据分析数据处理；4:数据分析多项式拟合

'''
EPSINON = 1e-6          # 用于浮点数比较的差值
NoneStr = 'NoneNone'        # 常量（区别于数据库默认填充的None值）
NoneValue = -999999.99        # 常量（表示None值）

db_sync_table = "sys_state_sync"
db_data_head_table = "bt_data_head"
db_raw_l0_precessed_table = "bt_data_l0"
db_raw_l2_precessed_table = "bt_data"
db_temporary_data_head_table = "bt_temporary_data_head"
db_temporary_data_table_l2 = "bt_temporary_data"
db_temporary_data_table_l0 = "bt_temporary_data_l0"

db_raw_l2_data_table = "bt_data_l2_raw"                 # 原始数据L2数据表
db_raw_l0_data_table = "bt_data_l0_raw"                 # 原始数据L0数据表

# 通用功能：python画图界面加载，图片保存数据库表
db_table_draw_figure = "bt_data_figure"

# 默认同时处理数据的线程数量
# 除了评定模块默认为1，其他模块使用该默认值
DEFAULT_PROCESS_NUM = 1
# DEFAULT_PROCESS_NUM_FOR_STORE = 1      # 默认1个进程同时读入文件(需先解决多进程下站点headId计数问题，然后修改成多进程)

# 新旧体制转换（临时使用）
ProbeDataSystemTypeL2Transfer = {
    "TSVP": "T",
    "TUSM": "T",
    "TMSL": "T",
    "TCSM": "T",
    "HWLS": "W",
    "HWSM": "W",
    "HWSL": "W",
    "WCSM": "W",
    "DUSM": "D",
    "NDNA": "D",
    "DCSM": "D",
    "T": "T",
    "W": "W",
    "D": "D",
    "P": "P",
    "N": "N",
    "F": "F",
    "W1": "W",    # 纬向风
    "W2": "W",    # 径向风
}


# system_code从数据文件名中获取，如：AL_LD2_T_20230821140049_L2.dat，AL_LD2_532nm_Rm_DV_20230821145059_L0.dat
# ret[0]用于文件存储目录；ret[1]用于数据库分表命名
def getSystemTypeByCode2309(system_code):
    ret = ["", ""];
    if system_code == "LD1":
        ret = ["System1", "s1"]
    elif system_code == "LD2":
        ret = ["System2", "s2"]
    elif system_code == "LD3" or system_code == "L3A" or system_code == "L3B":
        ret = ["System3", "s3"]
    elif system_code == "LD4":
        ret = ["System4", "s4"]
    elif system_code == "LD5":
        ret = ["System5", "s5"]
    elif system_code == "TKQQ":
        ret = ["System6", "s6"]
    elif system_code == "BRS":
        ret = ["System7", "s7"]
    elif system_code == "LJLD":
        ret = ["System8", "s8"]
    return ret


# 升级版
def getSystemTypeByCodeOrFileName2309(system_code, tempfilename):
    ret = ["", ""];
    if system_code == "LD1":
        ret = ["System1", "s1"]
    elif system_code == "LD2":
        ret = ["System2", "s2"]
    elif system_code == "LD3" or system_code == "L3A" or system_code == "L3B":
        ret = ["System3", "s3"]
    elif system_code == "LD4":
        ret = ["System4", "s4"]
    elif system_code == "LD5":
        ret = ["System5", "s5"]
    elif system_code == "LJLD":
        ret = ["System8", "s8"]
    else:
        arr = re.split("_+", tempfilename)
        print("判断系统类别s1/s2/s3/s4/s5/s6/s7：", arr)
        if len(arr) == 4 and arr[2] == "TPWX":
            ret = ["System6", "s6"]
        elif len(arr) == 6 and arr[2] == "ATDW":
            ret = ["System7", "s7"]
    return ret


# 针对2023年9月数据处理
def getDataType2309(param_data_type):
    if param_data_type in ProbeDataSystemTypeL2Transfer.keys():
        return ProbeDataSystemTypeL2Transfer[param_data_type]
    else:
        print("getDataType：探测体制参数错误！")
        return ""


# 转换数据参数（新老探测体制转换）
# 例如：TSVP,DUSM,NDNA,HWSM,WCSM,HWSL ==> T,D,W
# TSVP,TUSM,TMSL,TCSM ==> T
# TSVP ==> T
def getMultiDataTypeStr2309(param_data_type_str):
    if ',' not in param_data_type_str:
        return getDataType2309(param_data_type_str)
    else:
        arr = param_data_type_str.split(',')
        arr2 = []
        for item in arr:
            new_data_type = getDataType2309(item)
            if new_data_type != "" and new_data_type not in arr2:
                arr2.append(new_data_type)
        return ','.join(arr2)


# 为兼容原来模拟数据，根据data_type构造变量value_type。（原先的value_type是从界面选择输入的）
# data_type: T 或 T,D,W
# return: 0 或 0,4,2
def getMultiValutTypeStrByDataType2309(data_type):
    if ',' not in data_type:
        return checkL2DataTypeByDataType(data_type)
    else:
        arr = data_type.split(',')
        arr2 = []
        for item in arr:
            new_data_type = checkL2DataTypeByDataType(item)
            if new_data_type != -1 and new_data_type not in arr2:
                arr2.append(str(new_data_type))
        return ','.join(arr2)


# 新的L2探测数据类型：
# 0：T是温度、1：P是气压、2：W是风场（W1纬向风）、3：W是风场（W2径向风）、4：D是大气密度、5：N是钠原子密度、6：F是铁原子密度
def checkL2DataTypeByDataType(data_type):
    ret = -1
    if data_type == 'T':
        ret = 0
    elif data_type == 'P':
        ret = 1
    elif data_type == 'W1':    #TODO 区分纬向风和经向风
        ret = 2
    elif data_type == 'W2':
        ret = 3
    elif data_type == 'D':
        ret = 4
    elif data_type == 'N':
        ret = 5
    elif data_type == 'F':
        ret = 6
    elif data_type == 'W':
        ret = 7
    elif data_type == 'X':
        ret = 8

    print("checkL2DataTypeByDataType: %s - %d" % (data_type, ret))
    return ret


# 从ValueType转换为DataType
# # 0：T温度、1：P气压、2：风场（W1纬向风）、3：风场（W2径向风）、4：D是大气密度、5：N是钠原子密度、6：F是铁原子密度、7： 原位气球风场W；8：原位气球风向X
def checkL2DataTypeByValueType2309(value_type):
    ret = ""
    if value_type == 0:
        ret = "T"
    elif value_type == 1:
        ret = "P"
    elif value_type == 2:
        ret = "W1"
    elif value_type == 3:
        ret = "W2"
    elif value_type == 4:
        ret = "D"
    elif value_type == 5:
        ret = "N"
    elif value_type == 6:
        ret = "F"
    elif value_type == 7:
        ret = "W"
    elif value_type == 8:
        ret = "X"

    return ret


# 功能类似checkL2DataTypeByValueType2309，在其基础上，添加了默认单位显示（用于画图）
# value_type: 参量类型
# flag: 0廓线单位；1不确定度评定结果单位
def checkL2DataTypeAndUnitByValueType2309(value_type, flag=0):
    ret = ""
    unit = "探测值"
    if value_type == 0:
        ret = "T"
        unit = "温度/K"
    elif value_type == 1:
        ret = "P"
        unit = "气压/(Pa)"
    elif value_type == 2:
        ret = "W1"
        unit = "纬向风速/(m*s-1)"
    elif value_type == 3:
        ret = "W2"
        unit = "径向风速/(m*s-1)"
    elif value_type == 4:
        ret = "D"
        unit = "密度/(kg/m3)"
    elif value_type == 5:
        ret = "N"
        unit = "钠原子密度/(cm-3)"
    elif value_type == 6:
        ret = "F"
        unit = "铁原子密度/(cm-3)"
    elif value_type == 7:
        ret = "W"
        unit = "风速/(m*s-1)"
    elif value_type == 8:
        ret = "X"
        unit = "风向/(°)"

    # 不确定度评定结果单位：只有密度不确定度（含大气密度、钠原子密度、铁原子密度）的单位是%
    if flag == 1 and value_type in (4,5,6):
        unit = "密度不确定度/(%)"

    return ret, unit


# 202309最新定义
# 类似函数checkL2DataTypeByFileName
# key的种类：0-T温度/1-P气压/2-W1纬向风/3-W2径向风/4-D密度/5-N钠原子密度/6-F铁原子密度/7-W风速/8-X风向，含义如下：
DataType2ValueType = {
    "T": 0,     # 0：T是温度、
    "P": 1,     # 1：P是气压、
    "W1": 2,    # 2：W1是风场（纬向风）、
    "W2": 3,    # 3：W2是风场（径向风）、
    "D": 4,     # 4：D是大气密度、
    "N": 5,     # 5：N是钠原子密度、
    "F": 6,     # 6：F是铁原子密度、
    "W": 7,     # 7：W风速（原位-气球）、
    "X": 8,     # 8：X风向（原位-风向）
}
def convertDataType2ValueType(value_type):
    if value_type in DataType2ValueType.keys():
        return DataType2ValueType[value_type]
    else:
        return -1


def setTaskStatus(task_id, type, is_save_into_db=1, parameter_set_id=-1, data_source=-1):
    if type == 0:
        query_conditon = 'KeyName="%s" AND TaskType=%d AND IsSaveIntoDb=%d' % (task_id, type, is_save_into_db)
        print("query_conditon: %s" % query_conditon)
        ret = select_db(db_sync_table, query_conditon)
        print(ret, len(ret))
        if len(ret) > 0:
            #存在相同任务ID，且未完成
            #if ret[2] != 0:
            #print("[Error]：任务ID重复!")
            return "[Error]：任务ID重复!"

    value_str = '"%s", "%d", "%d", "%d", "%d"' % (task_id, type, is_save_into_db, data_source, parameter_set_id)
    print("插入状态同步表： %s" % value_str)
    head_id = insert_db(db_sync_table, "KeyName, TaskType, IsSaveIntoDb, DataSource, ParameterSetting", value_str)
    print("状态同步表插入ID： %d" % head_id)
    if head_id == -1:
        #print("[Error]：状态参数插入错误！")
        return "[Error]：状态参数插入错误！"
    return "OK"


# TODO: 这里默认将所有任务置为完成状态（即只允许单个用户操作系统）
# 是否要更改为：支持多个任务（调整后需测试验证正确性）
def updateTaskStatus(task_id, type=-1, status=0):
    '''
    默认更新task_id为0，即0：完成，
    同时支持重置task_id状态，即1：处理中
    '''
    #更新头部信息
    query_conditon = 'KeyName="%s"' % task_id
    if type != -1:
        query_conditon = 'KeyName="%s" AND TaskType=%d' % (task_id, type)
    update_str = 'Status="%d"' % status
    update_db(db_sync_table, update_str, query_conditon)
    print("更新状态同步表完成： %s" % (update_str))
    return 0


def setTaskStatusFinish(conf):
    # 设置任务结束
    print("[setTaskStatusFinish]将要设置任务结束..")
    if conf['TaskId'] != "":
        print("[setTaskStatusFinish]设置任务结束！")
        updateTaskStatus(conf['TaskId'], conf['TaskType'])
    return 0


def resetAllTaskStatus(status=0):
    '''
    粗暴更新1)所有任务状态为0;2)所有实时评定任务IsPaused字段为1
    '''
    # 更新头部信息
    query_conditon = 'True'
    update_str = 'Status=%d' % status
    update_db(db_sync_table, update_str, query_conditon)

    # 所有实时评定任务IsPaused字段为1
    query_conditon = 'TaskType=5'
    update_str = 'IsPaused=1'
    update_db(db_sync_table, update_str, query_conditon)
    print("更新状态同步表完成： %s" % (update_str))
    return 0


def aprResetAllTaskStatus(param):
    print("重置所有任务状态！仅测试使用！")
    print(param)
    return resetAllTaskStatus()


def getAllTaskKeyNameByTaskType(task_type):
    """
    获取指定任务类型的所有batchid
    """
    batch_id_list = []
    if task_type == "":
        print("任务类型不能为空")
        return batch_id_list
    query_conditon = 'TaskType=%d ORDER BY CreateTime DESC' % task_type
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_sync_table, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_sync_table)
        for task in ret:
            if task['KeyName'] not in batch_id_list:
                batch_id_list.append(task['KeyName'])

    return batch_id_list


def checkTaskIsPaused(task_id, type=-1):
    """
    检查任务是否被暂停
    """
    query_conditon = 'KeyName="%s"' % task_id
    if type != -1:
        query_conditon = 'KeyName="%s" AND TaskType="%d"' % (task_id, type)

    query_conditon += ' ORDER BY CreateTime DESC'
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_sync_table, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_sync_table)
        # print(ret)
        return ret[0]['IsPaused']
    return 0


def isTaskStoped(task_id, task_type):
    ret = checkTaskIsPaused(task_id)
    if ret == 1:
        # 任务已被请求停止
        # 更新任务状态为 已完成
        print("收到暂定请求，程序退出执行...")
        updateTaskStatus(task_id, task_type)
        return 1
    else:
        return 0


def updateTaskKeyNameById(task_table_id, new_task_id):
    '''
    设置任务的KeyNameUpdate字段为 new_task_id
    task_table_id：为需要更新的自增ID
    '''
    # 更新头部信息
    query_conditon = 'Id = %d' % task_table_id
    update_str = 'KeyNameUpdate = %s' % new_task_id
    update_db(db_sync_table, update_str, query_conditon)
    print("1更新KeyNameUpdate字段完成： %s" % (update_str))
    return 0


def updateTaskKeyNameByPreviousTaskKeyName(previous_task_id, new_task_id):
    '''
    更新指定KeyNameUpdate的数据为新的 new_task_id
    '''
    # 更新头部信息
    # 同类型的任务的不更新，因为同类型任务每次只筛选最新的一次的数据
    # query_conditon = 'KeyNameUpdate = "%s" AND TaskType != %d' % (previous_task_id, task_type)
    query_conditon = 'KeyNameUpdate = "%s"' % previous_task_id
    update_str = 'KeyNameUpdate = "%s"' % new_task_id
    update_db(db_sync_table, update_str, query_conditon)
    print("2更新KeyNameUpdate字段完成： %s" % (update_str))
    return 0


############################
"""
站点和设备信息关联：保存、查询、关系设置
"""
db_station_table = "bt_station"
db_device_table = "bt_device"
db_station_device_relation_table = "bt_rel_station_device"
db_device_param_from_user_table = "bt_device_system_type"    # 用户从界面设置的参数表

def saveStation(station_dict):
    print(station_dict)
    value_str = '"%s", "%s", "%s", "%s", "%s", "%s"' % (station_dict["StationCode"], station_dict["StationName"], station_dict["Location"],
                                                        station_dict["Longitude"], station_dict["Latitude"], station_dict["Altitude"])
    print("插入站点数据： %s" % value_str)
    head_id = insert_db(db_station_table, "StationCode, StationName, Location, Longitude, Latitude, Altitude", value_str)
    print("插入站点数据后ID： %d" % head_id)
    if head_id == -1:
        print("[Error]：站点信息插入错误！")
    return head_id


def getStationInfo(station_name):
    query_conditon = 'StationName="%s"' % station_name
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_station_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        return ret[0]
    else:
        return ""


def getStationInfo2309(station_name):
    query_conditon = 'StationName="%s"' % station_name
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_station_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_station_table)
        return ret[0]
    else:
        return ""


# 获取所有站点信息
# flag = 0，返回格式：[{},{}]
# flag = 1，返回格式：{code1:{}, code2:{}}
def getAllStationInfo2309(flag=0, key_field=""):
    query_conditon = 'true'
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_station_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        if flag == 1:
            ret = dbQueryResult2JsonObjWithKey(ret, db_station_table, key_field)
        else:
            ret = dbQueryResult2JsonObj(ret, db_station_table)
        return ret
    else:
        return ""


def checkStationIdByName(station_name):
    ret = -1
    if station_name != "":
        obj = getStationInfo(station_name)
        if obj != "":
            ret = obj[0]       # Id

    return ret


def saveStationByCheck(station_dict):
    ret = getStationInfo(station_dict["StationName"])
    if ret == "":
        return saveStation(station_dict)
    else:
        return ret[0]


def saveDevice(device_dict):
    print(device_dict)
    value_str = '"%s", "%s", "%s"' % (device_dict["DeviceCode"], device_dict["DeviceName"], device_dict["DeviceType"])
    print("插入设备数据： %s" % value_str)
    head_id = insert_db(db_device_table, "DeviceCode, DeviceName, DeviceType", value_str)
    print("插入设备数据后ID： %d" % head_id)
    if head_id == -1:
        print("[Error]：设备信息插入错误！")
    return head_id


# 通过设备名称获取设备信息（不唯一，后续要废弃不用），使用getDeviceInfo2代替
def getDeviceInfo(device_name):
    query_conditon = 'DeviceName="%s"' % device_name
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_device_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        #ret[0] = (2, 'Wuhan Lidar II', 'Wuhan Lidar II', None, None, '10', '20', '50', None, '30', None, '40')
        return ret[0]
    else:
        return ""


# 通过设备编号获取设备信息
def getDeviceInfoByCode(device_code):
    query_conditon = 'DeviceCode="%s"' % device_code
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_device_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        #ret[0] = (2, 'Wuhan Lidar II', 'Wuhan Lidar II', None, None, '10', '20', '50', None, '30', None, '40')
        return ret[0]
    else:
        return ""


def getAllDeviceInfo2309(flag=0, key_field=""):
    query_conditon = 'true'
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_device_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        if flag == 1:
            ret = dbQueryResult2JsonObjWithKey(ret, db_device_table, key_field)
        else:
            ret = dbQueryResult2JsonObj(ret, db_device_table)
        return ret
    else:
        return ""


def getDeviceParametersUserSettingById2309(device_id, flag=0, key_field=""):
    query_conditon = 'DeviceId="%d"' % device_id
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_device_param_from_user_table, query_conditon)
    #print(ret, len(ret))
    if len(ret) > 0:
        if flag == 1:
            ret = dbQueryResult2JsonObjWithKey(ret, db_device_param_from_user_table, key_field)
        else:
            ret = dbQueryResult2JsonObj(ret, db_device_param_from_user_table)
        return ret
    else:
        return ""


def checkDeviceIdByName(device_name):
    ret = -1
    if device_name != "":
        obj = getDeviceInfo(device_name)
        print(obj)
        if obj != "":
            ret = obj[0]        # Id

    return ret

def checkDeviceIdByCode(device_code):
    ret = -1
    if device_code != "":
        obj = getDeviceInfoByCode(device_code)
        print(obj)
        if obj != "":
            ret = obj[0]        # Id

    return ret

def getDeviceInfoByDeviceNameList(device_name_list):
    device_name_list_str = '", "'.join(device_name_list)
    query_conditon = 'DeviceName in ("%s")' % device_name_list_str
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_device_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        #ret[0] = (2, 'Wuhan Lidar II', 'Wuhan Lidar II', None, None, '10', '20', '50', None, '30', None, '40')
        return ret[0]
    else:
        return ""


def saveDeviceByCheck(device_dict):
    ret = getDeviceInfoByCode(device_dict["DeviceCode"])
    if ret == "":
        return saveDevice(device_dict)
    else:
        return ret[0]


def setStationDeviceRelation(station_id, device_id):
    value_str = '"%d", "%d"' % (station_id, device_id)
    print("插入站点-设备关联关系： %s" % value_str)
    head_id = insert_db(db_station_device_relation_table, "StationId, DeviceId", value_str)
    print("插入站点-设备关系数据后ID： %d" % head_id)
    if head_id == -1:
        print("[Error]：站点-设备关系信息插入错误！")
    return head_id


def getStationDeviceRelation(station_id, device_id):
    query_conditon = 'StationId="%d" AND DeviceId="%d"' % (station_id, device_id)
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_station_device_relation_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        return ret[0]
    else:
        return ""


def saveStationDeviceRelationByCheck(station_id, device_id):
    if getStationDeviceRelation(station_id, device_id) == "":
        setStationDeviceRelation(station_id, device_id)


# 计数：文件是否录入、预处理、评定
def updateStationDataCount(station_id, count, flag=0):
    query_conditon = 'Id = %d' % station_id
    ret = select_db(db_station_table, query_conditon)
    print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_station_table)
        if flag == 0:
            total_count = ret[0]["TotalDataNum"] + count
            total_count_unmeasured = ret[0]["TotalDataNumUnmeasured"] + count
            update_str = 'TotalDataNum = "%d", TotalDataNumUnmeasured = "%d"' % (total_count, total_count_unmeasured)
            print("更新数据总数： %s" % update_str)
        elif flag == 1:
            total_count_preprocessed = ret[0]["TotalDataPreprocessed"] + count
            update_str = 'TotalDataPreprocessed = "%d"' % total_count_preprocessed
            print("更新预处理数据： %s" % update_str)
        elif flag == 2:
            total_count_measured = ret[0]["TotalDataNumMeasured"] + count
            total_count_unmeasured = ret[0]["TotalDataNum"] - total_count_measured
            update_str = 'TotalDataNumMeasured = "%d", TotalDataNumUnmeasured = "%d"' % \
                         (total_count_measured, total_count_unmeasured)
            print("更新评定数据： %s" % update_str)

        update_db(db_station_table, update_str, query_conditon)
    else:
        return ""


def updateStationInfo(station_dict):
    query_conditon = 'StationName="%s"' % station_dict['StationName']
    print("query_conditon: %s" % query_conditon)
    update_str = 'Longitude = "%s", Latitude = "%s", Altitude = "%s"' % (str(station_dict['Longitude']), str(station_dict['Latitude']), str(station_dict['Altitude']))
    update_db(db_station_table, update_str, query_conditon)


###################################
# 更新数据头信息表状态
db_table_head = "bt_data_head"
def updateDataHeadTable(head_id, batch_id, flag, status):
    """
    更新数据头信息表：
    DataStatus：预处理后数据两个状态：1:已预处理；2：已评定
    RawDataStatus： 原始数据两个状态：0:已录入；2：已评定
    参数：
    head_id: 信息头Id
    data_flag: 0:原始数据；1:预处理后数据
    status: 针对原始数据：更新RawDataStatus两个值0/2；针对预处理后数据：更新DataStatus两个值1/2
    """
    #更新头部信息
    query_conditon = 'Id = %d' % head_id
    update_str = 'BatchId = "%s", DataStatus = "%d"' % (batch_id, status)
    if flag == 0:
        update_str = 'RawDataStatus = "%d"' % status
    print("更新数据头状态： %s" % (update_str))
    update_db(db_table_head, update_str, query_conditon)


def updateDataHeadTableV2(conf, head_id, flag, status, update_batch_id=1):
    """
    更新数据头信息表：
    DataStatus：预处理后数据两个状态：1:已预处理；2：已评定
    RawDataStatus： 原始数据两个状态：0:已录入；2：已评定
    ParameterSetting：所有参数
    ParameterDescription：部分参数字符串展示
    参数：
    head_id: 信息头Id
    data_flag: 0:原始数据；1:预处理后数据
    status: 针对原始数据：更新RawDataStatus两个值0/2；针对预处理后数据：更新DataStatus两个值1/2
    """
    db_table = conf['DbSaveHeadTable']
    #更新头部信息
    query_conditon = 'Id = %d' % head_id
    update_str = 'DataStatus = "%d"' % status
    if flag == 0:
        update_str = 'RawDataStatus = "%d"' % status

    if 'ParameterSetting' in conf.keys():
        tmp_str = ', ParameterSetting = "%d"' % conf['ParameterSetting']
        update_str += tmp_str

    if 'ParameterDescription' in conf.keys():
        tmp_str = ', ParameterDescription = "%s"' % conf['ParameterDescription']
        update_str += tmp_str

    if update_batch_id == 1:
        update_batch_id_str = ', BatchId = "%s"' % conf['TaskId']
        update_str += update_batch_id_str

    print("更新数据头状态： head_id=%s; %s; db_table=%s" % (head_id, update_str, db_table))
    update_db(db_table, update_str, query_conditon)


def updateDataHeadTableShareWay(head_id_list, share_way=2):
    """
    更新数据头信息表：
    SharedWay: 文件头生成方式：0仅对应原始文件;1：仅数据预处理；2原始文件+数据预处理共用
    参数：
    head_id_list: 信息头Id 列表
    share_way: 0仅对应原始文件;1：仅数据预处理；2原始文件+数据预处理共用
    """
    # 更新头部信息
    if len(head_id_list) == 0:
        return "参数headId为空"

    head_id_str = ','.join(head_id_list)
    query_conditon = 'Id IN (%s)' % head_id_str
    update_str = 'SharedWay = "%d"' % share_way
    print("更新数据头状态： %s" % (update_str))
    update_db(db_table_head, update_str, query_conditon)


# 指定head表名
def updateDataHeadTableShareWay2309(conf, head_id_list, share_way=2):
    """
    更新数据头信息表：
    SharedWay: 文件头生成方式：0仅对应原始文件;1：仅数据预处理；2原始文件+数据预处理共用
    参数：
    head_id_list: 信息头Id 列表
    share_way: 0仅对应原始文件;1：仅数据预处理；2原始文件+数据预处理共用
    """
    # 更新头部信息
    if len(head_id_list) == 0:
        return "参数headId为空"

    head_table = conf['DbHeadTable']

    head_id_str = ','.join(head_id_list)
    query_conditon = 'Id IN (%s)' % head_id_str
    update_str = 'SharedWay = "%d"' % share_way
    print("更新数据头状态： %s" % (update_str))
    update_db(head_table, update_str, query_conditon)


def updateDataHeadTableShareHeadId(head_id_list, share_headid=2):
    """
    更新数据头信息表：
    SharedHeadId: 文件头生成方式：0:仅对应原始文件;1:仅数据预处理；2:不确定度评定；3:预处理+不确定度评定
    参数：
    head_id_list: 信息头Id 列表
    share_way: 0:仅对应原始文件;1:仅数据预处理；2:不确定度评定；3:预处理+不确定度评定
    更新策略：
    如果share_way = 1， 库SharedHeadId=0，则SharedHeadId=1； 库SharedHeadId=2，则SharedHeadId=3
    如果share_way = 2， 库SharedHeadId=0，则SharedHeadId=2； 库SharedHeadId=1，则SharedHeadId=3
    """
    # 更新头部信息
    if len(head_id_list) == 0:
        return "参数headId为空"

    head_id_str = ','.join(head_id_list)
    query_conditon = 'Id IN (%s)' % head_id_str
    update_str = 'SharedHeadId = SharedHeadId + %d' % share_headid
    print("更新数据头状态： %s" % update_str)
    update_db(db_table_head, update_str, query_conditon)


# 指定表名
def updateDataHeadTableShareHeadId2309(conf, head_id_list, share_headid=2):
    """
    更新数据头信息表：
    SharedHeadId: 文件头生成方式：0:仅对应原始文件;1:仅数据预处理；2:不确定度评定；3:预处理+不确定度评定
    参数：
    head_id_list: 信息头Id 列表
    share_way: 0:仅对应原始文件;1:仅数据预处理；2:不确定度评定；3:预处理+不确定度评定
    更新策略：
    如果share_way = 1， 库SharedHeadId=0，则SharedHeadId=1； 库SharedHeadId=2，则SharedHeadId=3
    如果share_way = 2， 库SharedHeadId=0，则SharedHeadId=2； 库SharedHeadId=1，则SharedHeadId=3
    """
    # 更新头部信息
    if len(head_id_list) == 0:
        return "参数headId为空"

    head_table = conf['DbHeadTable']

    head_id_str = ','.join(head_id_list)
    query_conditon = 'Id IN (%s)' % head_id_str
    update_str = 'SharedHeadId = SharedHeadId + %d' % share_headid
    print("更新数据头状态： %s" % update_str)
    update_db(head_table, update_str, query_conditon)


# 录入条数；预处理条数？评定条数？
def updateDataHeadTableCount(head_id, count, step=0):
    """
    更新数据头信息表：
    参数：
    head_id: 信息头Id
    step: 0:数据录入；1:数据预处理；2:数据评定（暂时不用）
    count: step操作中涉及到的数据条数
    """
    # 更新头部信息
    query_conditon = 'Id = %d' % head_id
    update_str = 'Count = "%d"' % count
    print("更新数据头状态： %s" % (update_str))
    update_db(db_table_head, update_str, query_conditon)


###################################
###################################
# 数据筛选L2的文件头信息（及文件新的存储路径）
def queryDataByCondition2309(conf, flag=1, level=1):
    """
    2309查找L2 原始数据头的逻辑：必须需指定DbHeadTable的值。调用方式queryDataByCondition2309（conf, 0）

    使用提供的字段进行数据筛选
    conf 中存在的字段
    'DataSource': data_source,              # 数据来源（枚举值）：0:数据库预处理数据;1:数据库原始数据 2:文件导入;3:最新处理的数据
    'StationId': station_id,                # 站点编号
    'DeviceId': device_id,                  # 设备编号
    'ValueType': value_type_str,            # 参量类型，格式："0" 或 "0,1,2,3"，其中0:温度；1:密度；2:纬向风速; 3:径向风速；
    'DataType': data_type,                  # 数据类型，格式"TSVP" 或 "DUSM"等
    'TimeStart': time_start,                # 时间范围的起始时间，格式：YYYY-mm-dd HH:MM:SS
    'TimeEnd': time_end,                    # 时间范围的结束时间，格式：YYYY-mm-dd HH:MM:SS
    'HeightStart': height_start,            # 高度区间起始值（暂时不使用）
    'HeightEnd': height_end,                # 高度区间结束值（暂时不使用）
    'DbHeadTable': bt_data_head_s1/s2/s3/s4_l2   # 针对2309新数据，可通过设备名称（DeviceId）确定s1/s2/s3/s4
    """
    # 修改逻辑20231122-文件head信息分表策略：所有L2文件存1个head表（bt_data_head），L0分head表存（bt_data_head_sx_l0）
    # 通过设备名称获取系统类型（系统1-系统4）不同
    # device_info = getDeviceInfoById(conf['DeviceId'])
    # ret_arr = getSystemTypeByCode2309(device_info['DeviceName'])
    # db_table_head_type = ret_arr[1]
    # conf['DbHeadTable'] = db_data_head_table + '_' + db_table_head_type + '_l2'
    print("文件头表：conf['DbHeadTable']", conf['DbHeadTable'])


    # 数据筛选的表，根据数据来源判断
    # 0:数据库:        bt_data_head,            L2:bt_data              L0:bt_data_l0; （暂不用）
    # 1:文件导入;      bt_temporary_data_head;  L2:bt_temporary_data    L0:bt_temporary_data_l0;（暂不用）
    # 2:最新处理的数据 根据sys_state_sync表的IsSaveIntoDb字段判断，如果存库，选择0；不存库，选择1
    if 'DbHeadTable' in conf.keys():
        db_table_head = conf['DbHeadTable']
    else:
        if conf['DataSource'] == 0 or (conf['DataSource'] == 2 and conf['SaveFlag'] == 1):
            db_table_head = db_data_head_table
            # db_table_data = db_raw_l2_precessed_table
        elif conf['DataSource'] == 1:
            db_table_head = db_data_head_table
            # db_table_data = db_raw_l2_data_table
        else:
            db_table_head = db_temporary_data_head_table
            # db_table_data = db_temporary_data_table_l2

    head_list = []

    # 是否需要添加条件：AND RawDataStatus = 0
    query_conditon = 'StationId="%d" AND DeviceId="%d" AND ' \
                     'DataStartTime >= "%s" AND DataStartTime <= "%s" AND Level = "%d" AND FileName!=""' % \
                     (conf['StationId'], conf['DeviceId'],
                      conf['TimeStart'], conf['TimeEnd'], level)

    # 如果参量类型为空，则筛选所有数据类型
    if conf['ValueType'] != "":
        print("参量类型ValueType为空，筛选所有参量类型..")
        tmp_str = ' AND ValueType in (%s) ' % conf['ValueType']
        query_conditon += tmp_str

    # 如果数据类型为空，则筛选所有数据类型
    if conf['DataType'] != "":
        print("数据类型DataType为空，筛选所有数据类型..")
        arr = conf['DataType'].split(',')
        if len(arr) > 1:
            data_type_str = '","'.join(arr)
        else:
            data_type_str = arr[0]
        print("数据类型筛选条件：%s" % data_type_str)
        tmp_str = ' AND DataType in ("%s")' % data_type_str
        query_conditon += tmp_str

    query_conditon += " ORDER BY DataStartTime ASC"

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table_head, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_table_head)
        for item in ret:
            print(item)
            # ValueType：温风密压
            # (item0, item5) = (HeadId, ValueType)
            if flag:
                head_list.append((item['Id'], item['ValueType']))
            else:
                # head_list.append((item[0], item[5], item[6], item[7]))
                head_list.append((item['Id'], item['ValueType'], item['DataStartTime'], item['DataEndTime'], item['StationId'], item['DeviceId'], item['FileName'], item['FileStorePath']))
        print("HeadId数量：%d" % len(head_list))
    else:
        print("Error: 文件头信息表 %s 中未找到符合检索条件的HeadID" % db_table_head)

    # 返回表头id列表，及表名
    # return head_list,db_table_head
    return head_list


# 解析L2文件名获取系统类别
# return: s1/s2/s3/s4
def getDataSystemTypeByL2FileName(L2_filename):
    #系统类型
    # ret_arr = getSystemTypeByCode2309(L2_filename.split('_')[1])
    ret_arr = getSystemTypeByCodeOrFileName2309(L2_filename.split('_')[1], L2_filename)
    return ret_arr[1]


# 解析L2/L0文件名获取数据类型
# AL_LD1_355nm_Rm_DV_20230821140045_L0.dat
# AL_LD1_D_20230821140045_L2.dat
# 探空气球：AL_TKQQ_TPWX_2023073022.txt
# return: 0/2
def getDataLevelByFileName(filename):
    if 'L0' in filename:
        return 0
    elif 'L2' in filename:
        return 2
    else:
        arr = re.split("_+", filename)
        # 原位气球 or 原位火箭
        if (len(arr) == 4 and arr[2] == "TPWX") or (len(arr) == 6 and arr[2] == "ATDW"):
            # 探空气球只有L2数据
            return 2
        else:
            return -1


# 根据文件名字判断是激光雷达（return 0）还是原位（气球）（return 1），还是原位（火箭）（return 2）
def checkDeviceTypeByFileName(filename):
    arr = re.split("_+", filename)
    if len(arr) == 4 and arr[2] == "TPWX":
        # 探空气球
        return 1
    elif len(arr) == 6 and arr[2] == "ATDW":
        # 探空火箭
        return 2
    else:
        return 0


# 逻辑：
# 1. 根据界面条件筛选L2头文件及路径，见接口：queryDataByCondition2309(conf, 0)
# 2. 根据单个L2文件筛选L0文件见接口findL0FilesByL2FileName
def findL0FilesByL2FileName(head_info):
    """
    根据L2文件查找对应的L0文件
    返回L0的文件列表，按照体制，如下
    # D
    # {'Ry_DV': []}
    # {'Ry_DV': []}
    # {'Ry_DN': [], 'Ry_DE': []}
    # T/P
    # {'Ry_DV': [], 'Rm_DV': [], 'Fe_DN': [], 'Fe_DE': []}
    # {'Ry_DV': [], 'Rm_DV': [], 'Na_DV': []}
    # {'Ry_DN': [], 'Ry_DE': [], 'Na_DN': [], 'Na_DE': []}
    # Na
    # {'Na_DV': []}
    # {'Na_DN': [], 'Na_DE': []}
    # Fe
    # {'Fe_DN': [], 'Fe_DE': []}
    # W
    # {'Ry_DE': [], 'Fe_DE': [],'Ry_DN': [], 'Fe_DN': []}
    # {'Ry_DE': [], 'Na_DE': [],'Ry_DN': [], 'Na_DN': []}
    # {'Ry_DE': [], 'Na_DE': [],'Ry_DN': [], 'Na_DN': []}
    """
    """
    通过L2 的HeadInfo获取对应L0 数据的 HeadInfo
    head_info:
        ValueType:      测量值类型
        DataStartTime:  L2 数据的开始时间
        DataEndTime:    L2 数据的结束时间
        StationId:      站点ID
        DeviceId:       设备ID
    """
    filepath_dict = {}
    db_head_table = head_info['DbHeadTableL0']

    # 多次查库不如一次查库性能好
    for data_type in head_info['DataTypeL0']:
        query_conditon = 'StationId = "%d" AND DeviceId = "%d" AND ' \
                         'DataStartTime >= "%s" AND DataEndTime <= "%s" AND DataType like "%%%s%%"' \
                         % (head_info['StationId'], head_info['DeviceId'], head_info['DataStartTime'],
                            head_info['DataEndTime'], data_type)

        print("query_conditon2309: %s" % query_conditon)
        ret = select_db(db_head_table, query_conditon)
        print(ret, len(ret))
        if len(ret) > 0:
            if data_type not in filepath_dict.keys():
                filepath_dict[data_type] = []
            ret = dbQueryResult2JsonObj(ret, db_head_table)
            for item in ret:
                # print(item)
                store_path = os.path.join(item['FileStorePath'], item['FileName'])
                store_path = store_path.replace("\\", "/")
                # store_path = "%s/%s"(item['FileStorePath'], item['FileName'])
                filepath_dict[data_type].append(store_path)
        else:
            print("Error: 文件头信息表中未找到对应体制 %s 的L0数据头" % data_type)

    return filepath_dict


#  From YangJian
def find_L0_file(L2_filepath, L0_path):
    filename = os.path.basename(L2_filepath)
    fileinfo = filename.split('_')
    if fileinfo[3].isdigit():
        detect_time = fileinfo[3]
    elif fileinfo[-2].isdigit():
        detect_time = fileinfo[-2]
    else:
        print('Filename Error: {}'.format(L2_filepath))
    begin_time = int(detect_time) // 10000 * 10000
    end_time = (int(detect_time) // 10000 + 1) * 10000
    filepath_dict = {}
    if os.path.isfile(L0_path):
        filepath_list = [L0_path]
    elif os.path.isdir(L0_path):
        filepath_list = get_file_list(L0_path)
        for key in ['Rm_DV', 'Ry_DV', 'Ry_DE', 'Ry_DN', 'Na_DV', 'Na_DE', 'Na_DN', 'Fe_DV', 'Fe_DE', 'Fe_DN']:
            detect_type, direction = key.split('_')
            if direction == 'DE':
                direction_list = ['DE', 'DW']
            elif direction == 'DN':
                direction_list = ['DN', 'DS']
            else:
                direction_list = [direction]
            data_list = [
                filepath for filepath in filepath_list
                if detect_type in [os.path.basename(filepath).split(
                    '_')[-4], os.path.basename(filepath).split('_')[3]] and (os.path.basename(filepath).split(
                    '_')[-3] in direction_list or os.path.basename(filepath).split('_')[4] in direction_list)
                   and begin_time <= int(os.path.basename(filepath).split('_')[5].split('.')[0]) < end_time
            ]
            if data_list != []:
                filepath_dict[key] = data_list
    else:
        print('L0_path ERROR!!!')
    return filepath_dict


def get_file_list(path):
    file_list = []
    for home, dirs, files in os.walk(path):
        for file in files:
            # file_list.append(os.path.join(home, file))
            file_list.append(os.path.join(home, file).replace("\\", "/"))
    return file_list


# 数据筛选
def queryDataByCondition(conf, flag=1, level=1):
    """
    使用提供的字段进行数据筛选
    conf 中存在的字段
    'DataSource': data_source,              # 数据来源（枚举值）：0:数据库预处理数据;1:数据库原始数据 2:文件导入;3:最新处理的数据
    'StationId': station_id,                # 站点编号
    'DeviceId': device_id,                  # 设备编号
    'ValueType': value_type_str,            # 参量类型，格式："0" 或 "0,1,2,3"，其中0:温度；1:密度；2:纬向风速; 3:径向风速；
    'DataType': data_type,                  # 数据类型，格式"TSVP" 或 "DUSM"等
    'TimeStart': time_start,                # 时间范围的起始时间，格式：YYYY-mm-dd HH:MM:SS
    'TimeEnd': time_end,                    # 时间范围的结束时间，格式：YYYY-mm-dd HH:MM:SS
    'HeightStart': height_start,            # 高度区间起始值（暂时不使用）
    'HeightEnd': height_end,                # 高度区间结束值（暂时不使用）
    """
    # 数据筛选的表，根据数据来源判断
    # 0:数据库:        bt_data_head,            L2:bt_data              L0:bt_data_l0; （暂不用）
    # 1:文件导入;      bt_temporary_data_head;  L2:bt_temporary_data    L0:bt_temporary_data_l0;（暂不用）
    # 2:最新处理的数据 根据sys_state_sync表的IsSaveIntoDb字段判断，如果存库，选择0；不存库，选择1
    if 'DbHeadTable' in conf.keys():
        db_table_head = conf['DbHeadTable']
    else:
        if conf['DataSource'] == 0 or (conf['DataSource'] == 2 and conf['SaveFlag'] == 1):
            db_table_head = db_data_head_table
            # db_table_data = db_raw_l2_precessed_table
        elif conf['DataSource'] == 1:
            db_table_head = db_data_head_table
            # db_table_data = db_raw_l2_data_table
        else:
            db_table_head = db_temporary_data_head_table
            # db_table_data = db_temporary_data_table_l2

    head_list = []

    # 是否需要添加条件：AND RawDataStatus = 0
    query_conditon = 'StationId="%d" AND DeviceId="%d" AND ' \
                     'DataStartTime >= "%s" AND DataStartTime <= "%s" AND Level = "%d" AND FileName!=""' % \
                     (conf['StationId'], conf['DeviceId'],
                      conf['TimeStart'], conf['TimeEnd'], level)

    # 如果参量类型为空，则筛选所有数据类型
    if conf['ValueType'] != "":
        print("参量类型ValueType为空，筛选所有参量类型..")
        tmp_str = ' AND ValueType in (%s) ' % conf['ValueType']
        query_conditon += tmp_str

    # 如果数据类型为空，则筛选所有数据类型
    if conf['DataType'] != "":
        print("数据类型DataType为空，筛选所有数据类型..")
        arr = conf['DataType'].split(',')
        if len(arr) > 1:
            data_type_str = '","'.join(arr)
        else:
            data_type_str = arr[0]
        print("数据类型筛选条件：%s" % data_type_str)
        tmp_str = ' AND DataType in ("%s")' % data_type_str
        query_conditon += tmp_str

    query_conditon += " ORDER BY DataStartTime ASC"

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table_head, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_data_head_table)
        for item in ret:
            print(item)
            # ValueType：温风密压
            # (item0, item5) = (HeadId, ValueType)
            if flag:
                head_list.append((item['Id'], item['ValueType']))
            else:
                # head_list.append((item[0], item[5], item[6], item[7]))
                head_list.append((item['Id'], item['ValueType'], item['DataStartTime'], item['DataEndTime']))
        print("HeadId数量：%d" % len(head_list))
    else:
        print("Error: 文件头信息表中未找到符合检索条件的HeadID")

    return head_list


def queryDataByCondition_bak(conf, flag=1, level=1):
    """
    使用提供的字段进行数据筛选
    conf 中存在的字段
    'DataSource': data_source,              # 数据来源（枚举值）：0:数据库预处理数据;1:数据库原始数据 2:文件导入;3:最新处理的数据
    'StationId': station_id,                # 站点编号
    'DeviceId': device_id,                  # 设备编号
    'ValueType': value_type_str,            # 参量类型，格式："0" 或 "0,1,2,3"，其中0:温度；1:密度；2:纬向风速; 3:径向风速；
    'DataType': data_type,                  # 数据类型，格式"TSVP" 或 "DUSM"等
    'TimeStart': time_start,                # 时间范围的起始时间，格式：YYYY-mm-dd HH:MM:SS
    'TimeEnd': time_end,                    # 时间范围的结束时间，格式：YYYY-mm-dd HH:MM:SS
    'HeightStart': height_start,            # 高度区间起始值（暂时不使用）
    'HeightEnd': height_end,                # 高度区间结束值（暂时不使用）
    """
    # 数据筛选的表，根据数据来源判断
    # 0:数据库:        bt_data_head,            L2:bt_data              L0:bt_data_l0; （暂不用）
    # 1:文件导入;      bt_temporary_data_head;  L2:bt_temporary_data    L0:bt_temporary_data_l0;（暂不用）
    # 2:最新处理的数据 根据sys_state_sync表的IsSaveIntoDb字段判断，如果存库，选择0；不存库，选择1
    if conf['ValueType'] == "":
        print("参量类型ValueType不能为空")
        return []

    if 'DbHeadTable' in conf.keys():
        db_table_head = conf['DbHeadTable']
    else:
        if conf['DataSource'] == 0 or (conf['DataSource'] == 2 and conf['SaveFlag'] == 1):
            db_table_head = db_data_head_table
            # db_table_data = db_raw_l2_precessed_table
        elif conf['DataSource'] == 1:
            db_table_head = db_data_head_table
            # db_table_data = db_raw_l2_data_table
        else:
            db_table_head = db_temporary_data_head_table
            # db_table_data = db_temporary_data_table_l2

    head_list = []

    # 是否需要添加条件：AND RawDataStatus = 0
    query_conditon = 'StationId="%d" AND DeviceId="%d" AND DataType="%s" AND ValueType in (%s) AND ' \
                     'DataStartTime >= "%s" AND DataStartTime <= "%s" AND Level = "%d" AND FileName!=""' % \
                     (conf['StationId'], conf['DeviceId'], conf['DataType'], conf['ValueType'],
                      conf['TimeStart'], conf['TimeEnd'], level)
    # query_conditon = 'StationId="%d" AND DeviceId="%d" AND DataType="%s" AND ValueType in (%s) AND ' \
    #                  'DataStartTime >= "%s" AND DataEndTime <= "%s" AND Level = "%d"' % \
    #                  (conf['StationId'], conf['DeviceId'], conf['DataType'], conf['ValueType'],
    #                   conf['TimeStart'], conf['TimeEnd'], level)
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table_head, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_data_head_table)
        for item in ret:
            print(item)
            # ValueType：温风密压
            # (item0, item5) = (HeadId, ValueType)
            if flag:
                head_list.append((item['Id'], item['ValueType']))
            else:
                # head_list.append((item[0], item[5], item[6], item[7]))
                head_list.append((item['Id'], item['ValueType'], item['DataStartTime'], item['DataEndTime']))
        print("HeadId数量：%d" % len(head_list))
    else:
        print("Error: 文件头信息表中未找到符合检索条件的HeadID")

    return head_list


# 查询预处理的数据（from表bt_data）
def getAllProcessedDataBySearch(conf, level=1):
    """
    按条件查询所有数据headId等信息：
    'DeviceId': device_id,
    'StationId': station_id,
    'ValueType': value_type,
    'DataSource': data_source,
    'TimeStart': time_start,
    'TimeEnd': time_end,
    'HeightStart': height_start,
    'HeightEnd': height_end,

    返回：
    head_list： list of (head_id, value_type)
    """
    head_list = []

    value_type_arr = conf['ValueType'].split(',')
    print("getAllProcessedDataBySearch: %s" % conf['ValueType'])
    print(value_type_arr)
    value_type_str = ','.join(value_type_arr)

    # 是否需要添加条件：AND RawDataStatus = 0
    query_conditon = 'StationId="%d" AND DeviceId="%d" AND ValueType in (%s) AND ' \
                     'DataStartTime >= "%s" AND DataStartTime <= "%s" AND Level = "%d"' % \
                     (conf['StationId'], conf['DeviceId'], value_type_str, conf['TimeStart'], conf['TimeEnd'], level)
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_data_head_table, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        for item in ret:
            print(item)
            # ValueType：温风密压
            # (item0, item5) = (HeadId, ValueType)
            head_list.append((item[0], item[5], item[6], item[7]))
        print("HeadId数量：%d" % len(head_list))
    else:
        print("Error: 文件头信息表中未找到符合检索条件的HeadID")

    return head_list


def updateHeadTableTempDataHeadId(head_id, tmp_head_index, time_format, key):
    """
    关联Head表字段TempDataHeadId 到临时表生成的临时headid
    """
    if time_format == "quarter":
        query_conditon = 'Id="%d" AND quarter(DataStartTime)="%s"' % (head_id, key)
    elif time_format == "total":
        query_conditon = 'Id="%d"' % head_id
    else:
        query_conditon = 'Id="%d" AND DATE_FORMAT(DataStartTime, "%s")="%s"' % (head_id, time_format, key)
    update_str = 'TempDataHeadId="%d"' % tmp_head_index
    update_db(db_data_head_table, update_str, query_conditon)
    # print("更新Head表TempDataHeadId字段： %s" % update_str)
    return


def initHeadTableTempDataHeadId():
    """
    清除Head表字段TempDataHeadId，赋值为-1
    """
    query_conditon = 'True'
    update_str = 'TempDataHeadId="-1"'
    update_db(db_data_head_table, update_str, query_conditon)
    print("初始化Head表TempDataHeadId字段为-1")
    return


def updateHeadTableUncertaintyComponentFileByHeadId(db_table, head_id, uncertainty_component_filename):
    """
    更新Head表字段UncertaintyComponentFile（不确定度分量文件名称）
    """
    query_conditon = 'Id="%d"' % head_id
    update_str = 'UncertaintyComponentFile="%s"' % uncertainty_component_filename
    print("更新不确定度分量文件名称到head表：%s" % update_str)
    update_db(db_table, update_str, query_conditon)
    return 0


# 判断HeadId是否在正式表中有评定值
# 方法一：判断GUM和MCM字段是否有值
def checkHeadIdIsPerformed(head_id):
    select_str = "DISTINCT GumResult,McmResult"
    query_conditon = "headId = %d" % head_id
    print("query_conditon: %s %s" % (select_str, query_conditon))
    ret = select_db_v2(db_raw_l2_precessed_table, select_str, query_conditon)
    print(ret, len(ret))
    if len(ret) > 5:
        print("该head_id有评定记录！")
        return True
    else:
        print("未查到head_id的评定记录！")
        return False


# 方法二：判断SharedHeadId字段的值：判断文件是否预处理或不确定度评定
# SharedHeadId：0:仅对应原始文件;1:仅数据预处理；2:不确定度评定；3:预处理+不确定度评定
# flag = 0: 判断是否预处理：1或3
# flag = 1: 判断是否评定：2或3
# flag = 2: 判断是否 预处理+评定
#
def checkHeadIdIsPerformedV2(head_id, flag=0):
    select_str = "*"
    if flag == 1:
        query_conditon = "Id = %d and (SharedHeadId=2 or SharedHeadId=3)" % head_id
    elif flag == 2:
        query_conditon = "Id = %d and SharedHeadId=3" % head_id
    else:
        # flag = 0
        query_conditon = "Id = %d and (SharedHeadId=1 or SharedHeadId=3)" % head_id

    print("query_conditon: %s %s" % (select_str, query_conditon))
    ret = select_db_v2(db_data_head_table, select_str, query_conditon)
    print(ret, len(ret))
    if len(ret) >= 1:
        print("该head_id有预处理/评定记录！")
        return True
    else:
        print("未查到head_id的预处理/评定记录！")
        return False


def checkHeadIdIsPerformed2309(conf, head_id, flag=0):
    db_table = conf['DbHeadTable']
    select_str = "*"
    if flag == 1:
        query_conditon = "Id = %d and (SharedHeadId=2 or SharedHeadId=3)" % head_id
    elif flag == 2:
        query_conditon = "Id = %d and SharedHeadId=3" % head_id
    else:
        # flag = 0
        query_conditon = "Id = %d and (SharedHeadId=1 or SharedHeadId=3)" % head_id

    print("query_conditon: %s %s" % (select_str, query_conditon))
    ret = select_db_v2(db_table, select_str, query_conditon)
    print(ret, len(ret))
    if len(ret) >= 1:
        print("该head_id有预处理/评定记录！")
        return True
    else:
        print("未查到head_id的预处理/评定记录！")
        return False


###################################
def saveSysLog(OperateStaff, OperateType, OperateResult, LastTime):
    """
    日志存储功能
    """
    sys_log_table = "bt_sys_op_log"
    value_str = '"%d", "%s", "%s", "%d"' % (OperateStaff, OperateType, OperateResult, LastTime)
    print("读文件log： %s" % value_str)
    insert_db(sys_log_table, "OperateStaff, OperateType, OperateResult, LastTime", value_str)
    print("保存读取文件记录完成！")


###################################
def dbQueryResult2JsonObj(result, table_name):
    """
    将数据库查询结果转成json对象数组
    :param result: select查询出来的所有数据
    :param table_name: 查询数据的表名
    :return: 转换成json格式的数据查询结果：[{"id":1, "name":"", ...},{}]
    """
    json_arr = []
    ret = get_table_fields(table_name)
    for rcd in result:
        json_obj = {}
        for index in range(len(ret)):
            json_obj[ret[index][0]] = rcd[index]
        json_arr.append(json_obj)
    return json_arr


def dbQueryResult2JsonObjWithKey(result, table_name, key_field):
    """
    将数据库查询结果转成json对象数组
    :param result: select查询出来的所有数据
    :param table_name: 查询数据的表名
    :return: 转换成json格式的数据查询结果：[key1:{"id":1, "name":"", ...},key2:{}]
    """
    ret_json_obj = {}
    ret = get_table_fields(table_name)
    for rcd in result:
        json_obj = {}
        for index in range(len(ret)):
            json_obj[ret[index][0]] = rcd[index]
        ret_json_obj[json_obj[key_field]] = json_obj
    return ret_json_obj


def listData2JsonObj(result, cols_name):
    """
    将数据库查询结果转成json对象数组
    :param result: select查询出来的所有数据
    :param table_name: 查询数据的表名
    :return: 转换成json格式的数据查询结果：[{"id":1, "name":"", ...},{}]
    """
    json_arr = []
    for rcd in result:
        json_obj = {}
        for index in range(len(cols_name)):
            json_obj[cols_name[index]] = rcd[index]
        json_arr.append(json_obj)
    return json_arr


def listData2JsonObjWithHeighAsKey(result, cols_name):
    """
    将数据库查询结果转成json对象,高度作为key,
    :param result: select查询出来的所有数据
    :param table_name: 查询数据的表名
    :return: 转换成json格式的数据查询结果：[{"id":1, "name":"", ...},{}]
    """
    json_arr = []
    for rcd in result:
        json_obj = {}
        for index in range(len(cols_name)):
            json_obj[cols_name[index]] = rcd[index]
        json_arr.append(json_obj)
    return json_arr


###################################
# def getUncertaintyComponentFile(dir, deviceName, systemType):
def getUncertaintyComponentFile(param):
    """
    获取不确定度分量文件u1-un的名称
    不确定度分量：
    Uncertainty_Lidar02_RamT.txt
    Uncertainty_Lidar02_RayD.txt
    Uncertainty_Lidar02_RayT.txt
    Uncertainty_Lidar02_RayW.txt
    Uncertainty_Lidar02_ResD.txt
    Uncertainty_Lidar02_ResT.txt
    Uncertainty_Lidar02_ResW.txt
    解析内容：
    # u_x1	L0级数据高度积分导致的方法不确定度	%
    # u_x2	L0级数据时间积分导致的方法不确定度	%
    # u_x3	死区时间不确定度	%
    """
    # 不确定分量文件
    print("不确定分量接口")
    print(param)
    file_name = param.get('uncertainty_component_file') if param.get('uncertainty_component_file') != None else ""
    device_name = param.get('device_name') if param.get('device_name') != None else ""

    # TODO 需要从配置数据库获取该路径
    UncertaintyComponentFilePath = "/root/measurementProject/system_conf/uncertainty_components_file"

    # 本地测试用
    # WindowsTestUncertaintyComponentFilePaht = "D:/GBfiles/QCloud/pri/Project/数据采集Chen/需求会议沟通/2021-12-03甲方反馈材料/计算流程与不确定分量文件/不确定度分量文件"
    WindowsTestUncertaintyComponentFilePaht = "data/uncertain_component_files/Lidar02"

    file_path = ""
    if checkLinuxOrWindows() == 1:
        file_path = "%s/%s/%s" % (UncertaintyComponentFilePath, device_name, file_name)
    elif checkLinuxOrWindows() == 2:
        file_path = "%s/%s" % (WindowsTestUncertaintyComponentFilePaht, file_name)
    else:
        return {"code": -1, "msg": "操作系统环境无法识别或不支持！"}
    print(file_path)

    start_key_words = "# u_x"
    start_key_words2 = "# c_N"
    # 根据文件特点，一次读取多行
    fd = open(file_path, "r", encoding='UTF-8')
    lines = fd.readlines()
    uncertainty_component_dict = {}       # 解析的数据保存在该字典变量
    count = 0
    uTN_count = 0
    for line in lines:
        #print(line.strip())
        if line.strip().startswith(start_key_words):
            count += 1
            arr = line.strip()[len(start_key_words):].split()
            #print(arr)
            key = "u%s" % arr[0]
            uncertainty_component_dict[key] = arr[1]
        elif line.strip().startswith(start_key_words2):
            count += 1
            uTN_count += 1
            key = "uTN%s" % uTN_count
            uncertainty_component_dict[key] = "通道%d光子噪声不确定度分量" % uTN_count

    uncertainty_component_dict['total_count'] = count
    print(uncertainty_component_dict)
    return {"code": 0, "msg": "不确定度分量获取成功！", "data": uncertainty_component_dict}


def readUncertaintyComponentFile(file_path):
    """
    使用Pandas的二维数组保存不确定度分量和灵敏系数
    """
    print("readUncertaintyComponentFile: %s" % file_path)

    start_key_words = "# u_x"
    start_key_words2 = "# range"
    start_key_words3 = "# c"

    count = 0
    u_count = 0
    c_count = 0
    data_cols_arr = []
    # 根据文件特点，一次读取多行
    fd = open(file_path, "r", encoding='UTF-8')
    lines = fd.readlines()
    for line in lines:
        if line.strip().startswith(start_key_words):
            u_count += 1

        if line.strip().startswith(start_key_words3):
            c_count += 1

        if line.strip().startswith(start_key_words2):
            data_cols_arr = line.strip()[2:].split()
            print(data_cols_arr)
            count += 1
            continue

        if line.strip().startswith('#'):
            count += 1
            continue

        if len(data_cols_arr) == 0:
            print("读取不确定度分量文件出错：未找到数据列头！")
        # 保存高度对应的不确定度分量和灵敏系数
        uncertainty_component = pd.read_csv(file_path, skiprows=count, header=None, names=data_cols_arr, sep=r'\s+')
        # print(uncertainty_component.head())
        # print(uncertainty_component.columns)
        # 修改第一列为索引
        uncertainty_component_data = uncertainty_component.set_index("range", drop=True)
        # print(uncertainty_component_data.duplicated())
        # 是否有重复索引（即相同高度）
        print(uncertainty_component_data.index.is_unique)
        print(uncertainty_component_data.head())
        print(uncertainty_component_data.columns)
        print("读取不确定度分量文件结束..")
        print("u_count = %d, c_count = %d" % (u_count, c_count))

        break

    return uncertainty_component_data, u_count, c_count


##############################################################
# 预处理、数据分析算法相关
def getTimeFormat(time, type):
    """
    # 1）根据选择的时间分辨率确定组的数据量和时间段；
    # 0:小时/1:日/2:周/3:月/4:季度/5:年/6:全时段区间等
    # 0不做处理
    """
    key = time
    if type == 0:
        key = get_time_hour(time)
    elif type == 1:
        key = get_time_day(time)
    elif type == 2:
        key = get_time_week(time)
    elif type == 3:
        key = get_time_month(time)
    elif type == 4:
        key = get_time_quarter(time)
    elif type == 5:
        key = get_time_year(time)
    elif type == 6:
        key = "total"
    else:
        print("注意：时间分辨率统一化格式不支持！按默认时间格式进行处理！")

    return key


def getTimeFormatStr(type):
    """
    # 1）根据选择的时间分辨率确定组的数据量和时间段；
    # 0:小时/1:日/2:周/3:月/4:季度/5:年/6:全时段区间等
    """
    time_format = '%Y-%m-%d %T'
    if type == 0:
        time_format = "%Y-%m-%d %H:00:00"
    elif type == 1:
        time_format = "%Y-%m-%d"
    elif type == 2:
        time_format = "%Y-%VW"
    elif type == 3:
        time_format = "%Y-%m"
    elif type == 4:
        time_format = "quarter"
    elif type == 5:
        time_format = "%Y"
    elif type == 6:
        time_format = "total"
    else:
        print("注意：时间分辨率统一化格式不支持！按默认时间格式进行处理！")

    return time_format


def getTimeRange(type, step, key):
    """
    # 1）根据选择的时间分辨率确定组的数据量和时间段；
    # 0:小时/1:日/2:周/3:月/4:季度/5:年/6:全时段区间等
    """
    default_time = "0001-01-01 00:00:00"
    start_time = ""
    end_time = ""
    if type == 0:
        # key = "2019-07-30 16:00:00"
        if isinstance(key, datetime.datetime):
            start_time = key
        elif isinstance(key, str):
            start_time = datetime.datetime.strptime(key, "%Y-%m-%d %H:%M:%S")
        end_time = start_time + datetime.timedelta(hours=step) - datetime.timedelta(minutes=1)
    elif type == 1:
        # key = "2019-07-30"
        if isinstance(key, datetime.datetime):
            key = get_time_day(key)
        start = key + " 00:00:00"
        start_time = datetime.datetime.strptime(start, "%Y-%m-%d %H:%M:%S")
        end_time = start_time + datetime.timedelta(days=step) - datetime.timedelta(minutes=1)
    elif type == 2:
        # key = "2019-07W"
        year = key[0:4]
        week = key[5:7].lstrip('0')
        print(int(year), int(week))
        start,end = get_date_from_week(int(year), int(week))
        start_time = datetime.datetime.strptime(str(start) + " 00:00:00", "%Y-%m-%d %H:%M:%S")
        # start_time = start.strftime("%Y-%m-%d %H:%M:%S")
        end_time = start_time + datetime.timedelta(days=step*6) - datetime.timedelta(minutes=1)
    elif type == 3:
        # key = "2019-07"
        start = key + "-01 00:00:00"
        start_time = datetime.datetime.strptime(start, "%Y-%m-%d %H:%M:%S")
        end_time = start_time + relativedelta(months=step) - datetime.timedelta(minutes=1)
    elif type == 4:
        # key = "2019-1Q"
        year = key[0:4]
        quarter_num = int(key[5:6])
        print(year, quarter_num)
        print(quarter_num)
        if quarter_num == 0:
            start = year + "-01-01 00:00:00"
        elif quarter_num == 1:
            start = year + "-04-01 00:00:00"
        elif quarter_num == 2:
            start = year + "-07-01 00:00:00"
        elif quarter_num == 3:
            start = year + "-10-01 00:00:00"
        else:
            print("注意：时间分辨率统一化格式不支持！按默认时间格式进行处理！")
            return default_time, default_time
        start_time = datetime.datetime.strptime(start, "%Y-%m-%d %H:%M:%S")
        end_time = start_time + relativedelta(months=step*3) - datetime.timedelta(minutes=1)
    elif type == 5:
        # key = "2019"
        start = key + "-01-01 00:00:00"
        start_time = datetime.datetime.strptime(start, "%Y-%m-%d %H:%M:%S")
        end_time = start_time + relativedelta(years=step) - datetime.timedelta(minutes=1)
    # elif type == 6:
    #     time_format = "total"
    else:
        print("注意：时间分辨率统一化格式不支持！按默认时间格式进行处理！")
        return default_time, default_time

    return start_time, end_time


def checkMaxTimeRange(start_time_max, end_time_max, start_time, end_time):
    if start_time_max == "":
        start_time_max = start_time
    if end_time_max == "":
        end_time_max = end_time

    if start_time_max > start_time:
        start_time_max = start_time
    if end_time_max < end_time:
        end_time_max = end_time

    return start_time_max, end_time_max


################################################
def getHeighRange(data_list_json):
    """
    获取数据结构中所有高度范围（用来筛选标准廓线的高度值）
    输入：
    # 缓存原始和预处理中间数据，格式：{"head_id1":[], "head_id2":[], "head_id3":[], }
    """
    height_standard = set()
    for key in data_list_json.keys():
        for data in data_list_json[key]:
            # 获取所有高度
            height_standard.add(float(data['Height']))
    height_standard_list = list(height_standard)
    height_standard_list.sort()
    print("统计需要查找的廓线高度：")
    print(height_standard_list)

    return height_standard_list


def getStandardProbeData(height_list):
    """
    获取标准廓线的值（指定高度的）
    """
    db_standard_data_table = "bt_standard_data"
    ret_height_json = {}

    h_list = []
    for height in height_list:
        h_list.append("%.2f" % (height * 1000.0))

    # print(h_list)
    h_list_str = ', '.join(h_list)
    query_conditon = 'Altitude IN (%s)' % h_list_str

    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_standard_data_table, query_conditon)
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_standard_data_table)
        for item in ret:
            #print(item)
            key = "%.2f" % (float(item['Altitude']) / 1000)
            ret_height_json[key] = item
        print("获取标准廓线的高度数量：%d" % len(ret_height_json))
    else:
        print("Error: 未检测到标准廓线中对应的高度信息")

    print("筛选的标准廓线数据：")
    # print(ret_height_json)
    print("筛选的标准廓线高度：")
    print(ret_height_json.keys())
    return ret_height_json


def getMaxHeightAndMinHeight(raw_data):
    """
    获取原始数据文件的最大高度和最小高度
    :param raw_data: [{},{},{}] 每个元素是一个高度的探测值
    :return:
    """
    max_height = float(raw_data[0]['Height'])
    min_height = float(raw_data[0]['Height'])
    for t in range(len(raw_data)):
        height = float(raw_data[t]['Height'])
        if height - max_height > EPSINON:
            max_height = height
        elif height - min_height < EPSINON:
            min_height = height
    print(min_height, max_height)
    return min_height, max_height


# 取所有廓线高度的并集，确定最大最小高度
def getGlobalMaxHeightAndMinHeight(min_height_global, max_height_global, min_height, max_height):
    """
    比较两组最大值最小值，获得更小的最小值和更大的最大值
    """
    if max_height - max_height_global > EPSINON:
        max_height_global = max_height
    if min_height - min_height_global < EPSINON:
        min_height_global = min_height

    return min_height_global, max_height_global


# 取所有廓线高度的交集，确定最大最小高度
def getGlobalMaxHeightAndMinHeightByIntersection(min_height_global, max_height_global, min_height, max_height):
    """
    比较两组最大值最小值，获得最大的的最小值和最小的最大值
    """
    # print("TEST1: ", min_height_global, max_height_global, min_height, max_height)
    if max_height - max_height_global < EPSINON:
        max_height_global = max_height
    if min_height - min_height_global > EPSINON:
        min_height_global = min_height

    if min_height_global - max_height_global > EPSINON:
        print("高度统一统一化（取交集）：%.6f, %.6f" % (min_height_global, max_height_global))
        return 0,0

    # print("TEST2: ", min_height_global, max_height_global)
    return min_height_global, max_height_global


def fillHeightWithNoneValue(data_list_json, min_height_global, max_height_global, step=1.0):
    """
    使用None值，填充不存在的高度探测值
    如果data_list_json中缺失了高度区间[min_height_global, max_height_global]（步长为step）的某个高度，使用none值填充
    :return:
    """
    # test
    # min_height_global = 19.0
    # max_height_global = 35.0

    if min_height_global - 0.0 <= EPSINON and max_height_global - 0.0 <= EPSINON:
        print("[Warning]fillHeightWithNoneValue：填充函数的最大最小高度参数为0.0")
        return data_list_json

    data_list_json_new = {}
    for head_id in data_list_json.keys():
        raw_data = data_list_json[head_id]
        # print(raw_data)
        # debugPrint(raw_data)
        raw_data_new = []
        min_height = min_height_global
        max_height = max_height_global
        # 一下算法有待改进：缺少对中间缺失高度的补充
        for t in range(len(raw_data)):
            if t == 0:
                height = float(raw_data[t]['Height'])
                while height - min_height > EPSINON:
                    new_heigh = copy.deepcopy(raw_data[t])     # 初始化一个高度数据（深度copy，不改变之前变量的值）
                    new_heigh['Height'] = min_height
                    # new_heigh['Value'] = NoneStr
                    # new_heigh['Value'] = NoneValue
                    new_heigh['U20'] = NoneStr
                    raw_data_new.append(new_heigh)

                    min_height += step

            if t == len(raw_data) - 1:
                raw_data_new.append(raw_data[t])            # 先把最大的高度加入
                height = float(raw_data[t]['Height']) + step
                while height - max_height <= EPSINON:       # 填充新高度
                    new_heigh = copy.deepcopy(raw_data[t])  # 初始化一个高度数据
                    new_heigh['Height'] = height
                    # new_heigh['Value'] = NoneStr
                    # new_heigh['Value'] = NoneValue
                    new_heigh['U20'] = NoneStr
                    raw_data_new.append(new_heigh)

                    height += step
                continue

            raw_data_new.append(raw_data[t])

        # print("-----------------------")
        # print(raw_data_new)
        # debugPrint(raw_data_new)
        # print(min_height_global, max_height_global, step)
        data_list_json_new[head_id] = raw_data_new

    return data_list_json_new


# 去掉数据比对结果中的异常值：
#  1. 密度、温度、钠原子密度、铁原子密度数据小于0的替换为空值（如numpy.nan），
#  2. 风速小于-1000替换为空值
def dataFilterAbnormalValue(data_list_json):
    #print("XXXXX:", data_list_json)
    flag = False
    data_list_json_filtered = {}
    for key, data_list in data_list_json.items():
        value_type = data_list[0]['ValueType']

        data_list_filtered = []
        # 密度、温度、钠原子密度、铁原子密度
        if value_type in [0, 4, 5, 6]:
            for data in data_list:
                if data['Value'] < 0:
                    # data['Value'] = Decimal(np.NaN)
                    flag = True
                else:
                    data_list_filtered.append(data)
        # 2风场(纬向风W)/3风场(径向风W),7原位气球风场W
        elif value_type in [2, 3, 7]:
            for data in data_list:
                if data['Value'] < -1000:
                    # data['Value'] = Decimal(np.NaN)
                    flag = True
                else:
                    data_list_filtered.append(data)

        data_list_json_filtered[key] = data_list_filtered

    if flag:
        print("比对结果去除了异常值：" \
              "1.密度、温度、钠原子密度、铁原子密度数据小于0的替换为空值" \
              "2. 风速小于-1000替换为空值")

    return data_list_json_filtered


############################################################
# 202309新数据逻辑
def getHeadFullInfoByTableNameAndHeadId(db_table_name, head_id):
    """
    通过HeadId 获取头部信息
    默认数据状态为：0已录入（可根据实际情况修改）
    返回：查询到的所有数据
    """
    query_conditon = 'Id = "%d"' % head_id  #已录入状态的原始数据（指定headId）
    print("query_conditon: %s" % query_conditon)
    ret = select_db(db_table_name, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, db_table_name)
        return ret[0]
    else:
        print("Error: 文件头信息表 %s 中未找到head_id且已录入状态数据" % db_data_head_table)

    return ""


# 通用功能：python画图，界面展示，保存图片信息到表bt_data_figure
# Python画图路径等信息存入数据库
def saveDrawFigureResultIntoDB(param):
    print("画图结果保存：")
    cols = ["BatchId", "TaskType", "FigureType", "FigureName", "FigureLocation", "DataType", "ValueType", "Remark"]
    col_str = ','.join(cols)
    value_str = '"%s", "%d", "%d", "%s", "%s", "%s", "%s", "%s"' % (param['BatchId'], param['TaskType'],
                                            param['FigureType'], param['FigureName'], param['FigureLocation'],
                                            param['DataType'], param['ValueType'], param['Remark'])

    print("画图结果保存参数： %s" % value_str)
    head_id = insert_db(db_table_draw_figure, col_str, value_str)
    print("画图结果表插入ID： %d" % head_id)
    if head_id == -1:
        return "[Error]：画图结果插入数据库错误！"

    return head_id


def debugPrint(raw_data):
    for item in raw_data:
        print(item['Height'], item['Value'], item['U20'])


################################################
# 不确定度报告生成相关函数
# 抽取数据中的列，形成向量，画图使用
def getDataByColumnsV2(data_list):
    print(type(data_list))
    if not isinstance(data_list, list):
        print("Error: 数据类型错误，无法获取列向量!")
        return "Error: 数据类型错误，无法获取列向量"
    colX = []
    colY = []

    for data in data_list:
        colX.append(str_to_float(data['Height']))
        if data['ValueType'] in [2,3,7]:
            # 风场（径向风/纬向风/风场）小于-500的替换成np.nan画图，即无效
            value = str_to_float(data['Value'])
            colY.append(value if value > -500 else np.nan)
        else:
            colY.append(str_to_float(data['Value']))

    return (colX, colY)


def getGumResultVectorByColumnsList(data_list):
    print(type(data_list))
    if not isinstance(data_list, list):
        print("Error: 数据类型错误，无法获取列向量!")
        return "Error: 数据类型错误，无法获取列向量"
    colX = []
    colY = []
    colGum = []
    colGumExtend = []

    for data in data_list:
        colX.append(str_to_float(data['Height']))
        if data['ValueType'] in [2,3,7]:
            # 风场（径向风/纬向风/风场）小于-500的替换成np.nan画图，即无效
            value = str_to_float(data['Value'])
            colY.append(value if value > -500 else np.nan)
        else:
            colY.append(str_to_float(data['Value']))
        colGum.append(str_to_float(data['GumResult']))
        colGumExtend.append(str_to_float(data['GumResultExtended']))
    return (colX, colY, colGum, colGumExtend)


# 获取不为NULL的U值（根据第一个高度判断）
def getNotNullU(data_obj, u_list, u_component_name_dict):
    ret_u = []
    ret_u_name = []
    for key,value in data_obj.items():
        if key not in u_list:
            continue
        # print(key, value)
        if not isNone(value):
            ret_u.append(key)
            ret_u_name.append(u_component_name_dict[key])
    return ret_u, ret_u_name


# 获取高度的最小值、最大值、中间值对应不确定度分量画图
# raw_data已按高度排序
def getNotNullUValues(raw_data, u_list):
    h_min_u_values = []
    h_middle_u_values = []
    h_max_u_values = []

    if len(raw_data) > 0:
        min = 0
        max = len(raw_data) - 1
        mid = int(len(raw_data) / 2)
        min_height = float(raw_data[min]['Height'])
        mid_height = float(raw_data[mid]['Height'])
        max_height = float(raw_data[max]['Height'])
        # print(raw_data[min])
        # print(raw_data[mid])
        # print(raw_data[max])
        for u in u_list:
            h_min_u_values.append(raw_data[min][u] if not isNone(raw_data[min][u]) else 0.0)
            h_middle_u_values.append(raw_data[mid][u] if not isNone(raw_data[mid][u]) else 0.0)
            h_max_u_values.append(raw_data[max][u] if not isNone(raw_data[max][u]) else 0.0)
    else:
        print("Error: 探测数据为空!")
    return h_min_u_values, h_middle_u_values, h_max_u_values, [min_height,mid_height,max_height]


# 判断数据库筛选的值是否为None
def isNone(value):
    if value is None or value == "nan" or value == "None" or value == "" or value == "NULL":
        return True
    else:
        return False

