#coding=utf-8
import sys
import xlrd
import re
import os
import time
import random
import pandas as pd
import numpy as np
from utils.mylib_db import *
from utils.mylib_utils import *
from measurement.measurement_utils import *
from concurrent.futures import ProcessPoolExecutor, as_completed
import multiprocessing

'''
读取原始探测文件，解析头部信息（不包括数据），并支持从L2数据获取对应的L0数据。
数据库采用分表形式，L0和L2表，不同探测体制分表。
对于处理后的数据，采用同一个表存储（和原来逻辑一致）

TODO:
1. 读取L2/L0文件并入库；
//2. 判断文件名合规性；
//3. 自动判断数据列数；
//4. 提供外部RestFul接口调用
//5. 保存读取文件记录到log表
6. 添加预处理算法
//7. 检查log记录，不重复读取原始数据；
8. head数据和站点StationId、设备DeviceId进行关联（查表）


'''
# 包含文件头部信息的最大行数(用于一次性读入文件头部信息)
# 适用：激光雷达原始文件(2000为经验值)
HEAD_MAX_LINE = 5000
db_table_data_read_log = "bt_data_read_log"
TASK_ID = ""        #与调用程序交互的任务Id，保存在数据库中
MAX_LEN = 50000     #每次插入数据库的最大数据条数（批量插入功能）

# 密度换算系数
FACTOR1 = 28.959 # 1/cm^3 * 28.959/6.0022e23*1e3
FACTOR2 = 6.0022e23
FACTOR3 = 1e3

# 文件存储系统的顶级目录，默认使用相对路径
FILE_UP_LEVEL_DIR = "raw_data"

DataTypeWindKeywords = ["HWLS", "HWSM", "HWSL", "WCSM"]     # 风速数据类型
cols_not_save = ["DenEr", "TemEr", "DenAllEr", "ZonalWEr", "MeridiWEr", "ZonalWMLErr", "MeridiWMLErr", "DenErr",
                 "TempAllEr", "TemEr", "RamTemEr", "ZonalWErr", "MeridiWErr", "ZonalWSLErr", "MeridiWSLErr",
                 "TemperatureUncertainty(K)"]
# 包含以下关键词的列不保存
cols_keywords_not_save = ["Uncertainty"]


# 新旧体制的映射关系
ProbeDataSystemTypeL0 = {
    "532nm_Ry_DV": "532P",
    "532nm_Ry_DN": "532P",
    "532nm_Ry_DE": "532P",
    "532nm_Ry_DS": "532P",
    "532nm_Ry_DW": "532P",
    "372nm_Ry_DV": "532P",
    "372nm_Ry_DN": "532P",
    "372nm_Ry_DE": "532P",
    "372nm_Ry_DS": "532P",
    "372nm_Ry_DW": "532P",
    "589nm_Na_DV": "589P",
    "589nm_Na_DN": "589P",
    "589nm_Na_DE": "589P",
    "589nm_Na_DS": "589P",
    "589nm_Na_DW": "589P",
    "372nm_Fe_DN": "589P",
    "372nm_Fe_DE": "589P",
    "372nm_Fe_DS": "589P",
    "372nm_Fe_DW": "589P",
    "532nm_Rm_DV": "532R",
    "355nm_Rm_DV": "532R",
    "355nm_Ry_DE": "",
    "355nm_Ry_DN": "",
    "355nm_Ry_DV": "",
}
ProbeDataSystemTypeL2 = {
    "T": ["TSVP", "TUSM", "TMSL", "TCSM"],
    "W": ["HWLS", "HWSM", "HWSL", "WCSM"],
    "D": ["DUSM", "NDNA", "DCSM"],
    "P": [],
    "N": [],
    "F": []
}
ProbeDataSystemTypeL2ZhName = {
    "T": "温度",
    "W": "风场",
    "P": "气压",
    "D": "大气密度",
    "N": "钠原子密度",
    "F": "铁原子密度"
}


# 获取所有读取的文件log记录，用于检查原始数据是否读取
def readFileLog():
    read_file_arr = []
    # query_conditon = 'IsAbnormal=1'
    # print("query_conditon: %s" % query_conditon)
    # ret = select_db(db_table_data_read_log, query_conditon)
    ret = select_db(db_table_data_read_log)
    print("读取记录条数: %d" % len(ret))
    ret = dbQueryResult2JsonObj(ret, db_table_data_read_log)
    # print(ret, len(ret))
    for i in range(len(ret)):
        # print(ret[i])
        # 收集所有读取的原始数据文件名称
        # 数据列：5为文件名称
        read_file_arr.append(ret[i]['FileName'])

    print("历史读取文件Log记录：")
    if len(read_file_arr) >= 2:
        print(read_file_arr[0], read_file_arr[1])
    return read_file_arr


def checkFileName(file_path, user_id):
    """
    检查文件名格式，解析不出来就算是错的，如切割完的元素个数不对，数据类型不在列表范围中
    返回参数：
        1.文件名称是否合理; True False
        2.探测体制: 文件名称中的体制信息（只有在文件名合法的情况下存在）
    """
    print(file_path)
    #分离路径、文件名
    (filepath, tempfilename) = os.path.split(file_path)
    filepath = filepath.replace("\\", "/")
    print("路径：%s; 文件名：%s" % (filepath, tempfilename))
    #分离文件名、文件后缀
    #(filename, extension) = os.path.splitext(tempfilename)
    #print("文件名：%s; 扩展名：%s" % (filename, extension))

    arr = re.split("_+", tempfilename)
    print(arr)
    #文件名格式错误
    if len(arr) != 7:
        print("文件名解析错误：%s" % file_path)
        saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "文件名解析错误")
        return [False, ""]

    #文件名中数据类型不是L0或者L2
    if arr[3] != "L0" and arr[3] != "L2":
        print("文件名数据类型不是L0或L2!")
        saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "文件名数据类型不是L0或L2")
        return [False, ""]

    #文件名中L2数据类型不是"TSVP","HWLS","DUSM","TUSM","HWSM","NDNA","TMSL","HWSL","DCSM","TCSM","WCSM"
    L2_system_type = ["TSVP","HWLS","DUSM","TUSM","HWSM","NDNA","TMSL","HWSL","DCSM","TCSM","WCSM"]
    if arr[3] == "L2" and not (arr[2] in L2_system_type):
        print("L2文件名探测体制不正确!")
        print("L2文件名探测体制需为：%s" % L2_system_type)
        saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "L2文件名探测体制不正确")
        return [False, ""]

    #文件名中L0数据类型不是"532P","589P","532R","374P"
    L0_system_type = ["532P","589P","532R","374P"]
    if arr[3] == "L0" and not (arr[2] in L0_system_type):
        print("L0文件名探测体制不正确!")
        print("L0文件名探测体制需为：532P/589P/532R/374P")
        saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "L0文件名探测体制不正确")
        return [False, ""]

    return [True, arr[2]]


def checkFileName2309(file_path, user_id):
    """
    检查文件名格式，解析不出来就算是错的，如切割完的元素个数不对，数据类型不在列表范围中
    返回参数：
        1.文件名称是否合理; True False
        2.探测体制: 文件名称中的体制信息（只有在文件名合法的情况下存在）

        文件名举例：
        # AL_LD1_355nm_Rm_DV_20230821140045_L0.dat
        # AL_LD1_355nm_Rm_DV_T_20230821140045_L2.dat(暂时不考虑)
        # AL_LD1_D_20230821140045_L2.dat
    返回数据：
    [False, ""]
    [True, sys_type, data_type, store_path, db_table_head_name, system_type, file_time]
    sys_type, 探测体制类别，如：355nm_Rm_DV 或 T
    data_type, 数据类型：L0、L2
    store_path, 服务器存储路径（部分），相对路径
    db_data_head_table, 文件表头信息存放的数据库表，如bt_data_head_s1_l0, bt_data_head_s1_l2
    system_type, 系统类型，如LD1,LD2,等
    file_time    文件名中带的时间天，如 2023-08-21
    """
    print(file_path)
    #分离路径、文件名
    (filepath, tempfilename) = os.path.split(file_path)
    filepath = filepath.replace("\\", "/")
    print("路径：%s; 文件名：%s" % (filepath, tempfilename))
    #分离文件名、文件后缀
    #(filename, extension) = os.path.splitext(tempfilename)
    #print("文件名：%s; 扩展名：%s" % (filename, extension))

    sys_type = ""        # 探测体制（新版本）
    data_type = ""      # 数据类型（L0 or L2）

    arr = re.split("_+", tempfilename)
    print(arr)
    #文件名格式错误
    if arr[len(arr)-1].split('.')[0] == "L0":
        data_type = "L0"
        #根据文件名格式判断合法性
        if len(arr) != 7 and len(arr) != 6:
            print("文件名解析错误：%s" % file_path)
            saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "文件名解析错误")
            return [False, ""]
        #通过文件名称判断探测体制，如：355nm_Rm_DV
        sys_type = "%s_%s_%s" % (arr[2], arr[3], arr[4])
        if sys_type not in ProbeDataSystemTypeL0.keys():
            print("L0文件名探测体制不正确!")
            print("L0文件名探测体制需为：%s" % ProbeDataSystemTypeL0.keys())
            saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "L0文件名探测体制不正确")
            return [False, ""]
        file_time_str = arr[5]
        sys_type_short = "%s_%s" % (arr[3], arr[4])      #用来构造存储路径

    elif arr[len(arr)-1].split('.')[0] == "L2":
        data_type = "L2"
        #根据文件名格式判断合法性
        if len(arr) != 5 and len(arr) != 6:
            print("文件名解析错误：%s" % file_path)
            saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "文件名解析错误（注：过程L2数据暂不支持，即下划线分割为8段）")
            return [False, ""]
        #通过文件名称判断探测体制，如：D、F、T
        sys_type = "%s" % arr[2]
        if sys_type not in ProbeDataSystemTypeL2.keys():
            print("L2文件名探测体制不正确!")
            print("L2文件名探测体制需为：%s" % ProbeDataSystemTypeL2.keys())
            saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "L2文件名探测体制不正确")
            return [False, ""]
        file_time_str = arr[3]
        sys_type_short = sys_type       #用来构造存储路径
    else:
        #文件名中数据类型不是L0或者L2（该条件前面已经过滤过一次）
        print("文件名数据类型不是L0或L2!")
        saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "文件名数据类型不是L0或L2")
        print("文件名格式错误：%s" % file_path)
        return [False, ""]

    # 获取系统类型，如：LD1, LD2, LD3
    system_type = arr[1]

    # 获取文件名称的时间
    file_time = "%s-%s-%s" % (file_time_str[0:4], file_time_str[4:6], file_time_str[6:8])
    file_timehour = "%s" % file_time_str[8:10]

    ### 构造文件的存储路径（相对路径）
    sep = os.path.sep
    ret_arr = getSystemTypeByCode2309(system_type)
    # 系统1234、年月日、小时、L0/L2、体制，
    # 如：System1/2023-08-21/L2D
    store_path = "%s%s%s%s%s%s%s%s%s" % (ret_arr[0], sep, file_time, sep, file_timehour, sep, data_type, sep, sys_type_short)

    ### 3.数据库分表表名
    # bt_data_head基本名，后缀：s1/s2/s3/s4/s5_l0/l2
    db_table_head_name = "bt_data_head_%s_%s" % (ret_arr[1], data_type.lower())
    # print("数据库文件信息表名：", db_table_head_name)

    return [True, sys_type, data_type, store_path, db_table_head_name, system_type, file_time]


# 新数据的对照关系
# # system_code从数据文件名中获取，如：AL_LD2_T_20230821140049_L2.dat，AL_LD2_532nm_Rm_DV_20230821145059_L0.dat
# # ret[0]用于文件存储目录；ret[1]用于数据库分表命名
# def getSystemTypeByCode2309(system_code):
#     ret = ["", ""];
#     if system_code == "LD1":
#         ret = ["System1", "s1"]
#     elif system_code == "LD2":
#         ret = ["System2", "s2"]
#     elif system_code == "LD3" or system_code == "L3A" or system_code == "L3B":
#         ret = ["System3", "s3"]
#     elif system_code == "LD4":
#         ret = ["System4", "s4"]
#     elif system_code == "LD5":
#         ret = ["System5", "s5"]
#     return ret


# 从原始文件名解析出时间（适用于真实数据文件名）
def getTimeStrFromFileName(file_name):
    file_name_arr = re.split("_+", file_name)
    print(file_name_arr)
    date = file_name_arr[1]
    time = file_name_arr[2]
    time_str = "%s-%s-%s %s:%s:%s" % (date[0:4], date[4:6], date[6:8], time[0:2], time[2:4], time[4:6])
    return time_str


#探测数据类型字典查询(针对L2文件进行判断)（老数据类型）
#0温度1密度2风速3压力
def checkDataType(datatype_str):
    ret = -1
    # 0温度
    if datatype_str.find("TSVP") >= 0 or datatype_str.find("TMSL") >= 0 or datatype_str.find("TUSM") >= 0 or datatype_str.find("TCSM") >= 0 or datatype_str.find("Temperature") >= 0:
        ret = 0
    # 1密度
    elif datatype_str.find("DUSM") >= 0 or datatype_str.find("NDNA") >= 0 or datatype_str.find("DCSM") >= 0 or datatype_str.find("Density") >= 0:
        ret = 1
    # 2风速（文件名无法区分径向风速或纬向风速，而且L2文件可能同时包含径向和纬向风速）
    # 获取探测数据后，进一步分为 2：纬向风速; 3：径向风速
    elif datatype_str.find("HWSL") >= 0 or datatype_str.find("HWSM") >= 0 or datatype_str.find("WCSM") >= 0 or datatype_str.find("wind") >= 0:
        ret = 2

    #压力（无数据）
    #elif datatype_str.find("XXXXX") >= 0:
    #    ret = 3

    print("checkDataType: %s - %d" % (datatype_str, ret))

    return ret


# 新的L2探测数据类型：
# 0：T是温度、1：P是气压、2：W是风场（纬向风）、3：W是风场（径向风）、4：D是大气密度、5：N是钠原子密度、6：F是铁原子密度
def checkL2DataTypeByFileName(l2_file_name_type_keyword):
    ret = -1
    if l2_file_name_type_keyword == 'T':
        ret = 0
    elif l2_file_name_type_keyword == 'P':
        ret = 1
    elif l2_file_name_type_keyword == 'W':    #TODO 区分纬向风和经向风
        ret = 2
    elif l2_file_name_type_keyword == 'D':
        ret = 4
    elif l2_file_name_type_keyword == 'N':
        ret = 5
    elif l2_file_name_type_keyword == 'F':
        ret = 6

    print("checkL2DataTypeByFileName: %s - %d" % (l2_file_name_type_keyword, ret))
    return ret


def checkDataTypeForWind(data_type, col_name):
    """
    # 进一步分为 2：纬向风速; 3：径向风速
    [data_type]     [col_name]
    HWLS	        ZonalWindSL 	纬向风速; MeridiWindSL	经向风速
    HWSM	        ZonalWindSM 	纬向风速; MeridiWindSM	经向风速
    HWSL	        ZonalWind	    纬向风速; MeridiWind	经向风速
    WCSM	        ZonalWind 	    纬向风速; MeridiWind	经向风速
    """
    if data_type not in DataTypeWindKeywords:
        return -1

    ZonalKeywords = ["ZonalWindSL", "ZonalWindSM", "ZonalWind"]
    RadialKeywords = ["MeridiWindSL", "MeridiWindSM", "MeridiWind"]
    ret = -1

    if col_name in ZonalKeywords:
        ret = 2
    elif col_name in RadialKeywords:
        ret = 3
    return ret


def getValueUnit_bak(quantities_str):
    """
    获取探测数据的单位信息：K，m/s，kg/m3,空值等
    输入格式：Density combination of stratosphere and mesosphere (kg/m3)
    正则表达式版本（未成功）
    """
    print("quantities_str: %s" % quantities_str)
    matchObj = re.match(r'\([^\(\)]*\)', quantities_str)
    print(matchObj)
    if matchObj:
        print("matchObj.group() : ", matchObj.group())
        print("matchObj.group(1) : ", matchObj.group(1))
        return matchObj.group(1)
    else:
        print("No match!!")
        return ""


def getValueUnit(quantities_str):
    """
    获取探测数据的单位信息：K，m/s，kg/m3,空值等
    输入格式：Density combination of stratosphere and mesosphere (kg/m3)
    """
    if quantities_str.find('(') == -1 or quantities_str.find(')') == -1:
        print("未找到单位数据或源数据格式错误！")
        return ""

    arr = quantities_str.split('(')
    if len(arr) > 1:
        tmp_str = arr[1]
        arr2 = tmp_str.split(')')
        if len(arr2) > 0:
            return arr2[0]

    return ""


# 探测数据类型字典查询
# TODO：修改成从数据库表sys_dict_system_type查出探测类型，并判断
def checkSystemType(systype_str):
    ret = -1
    #L0数据文件
    if systype_str.find("532R") >= 0:
        ret = 1
    elif systype_str.find("532P") >= 0:
        ret = 2
    elif systype_str.find("589P") >= 0:
        ret = 3
    elif systype_str.find("374P") >= 0:
        ret = 4
    #L2数据文件
    if systype_str.find("TSVP") >= 0:
        ret = 101
    elif systype_str.find("HWLS") >= 0:
        ret = 102
    elif systype_str.find("DUSM") >= 0:
        ret = 103
    elif systype_str.find("TUSM") >= 0:
        ret = 104
    elif systype_str.find("HWSM") >= 0:
        ret = 105
    elif systype_str.find("NDNA") >= 0:
        ret = 106
    elif systype_str.find("TMSL") >= 0:
        ret = 107
    elif systype_str.find("HWSL") >= 0:
        ret = 108
    elif systype_str.find("DCSM") >= 0:
        ret = 109
    elif systype_str.find("TCSM") >= 0:
        ret = 110
    elif systype_str.find("WCSM") >= 0:
        ret = 111
    return ret


# 探测数据类型字典查询
def checkDataLevelType(file):
    ret = -1
    if file.find("_L0_") >= 0:
        ret = 0
    elif file.find("_L2_") >= 0:
        ret = 1
    return ret

# 探测数据类型字典查询
def checkDataLevelType2309(file):
    ret = -1
    if file.find("_L0") >= 0:
        ret = 0
    elif file.find("_L2") >= 0:
        ret = 1
    return ret


def saveReadFileLog(OperateStaff, DataPath, FileName, DeviceId, DeviceName, IsAbnormal, LastTime=0, DataCount=0, AbnormalInfo=""):
    """
    操作人、数据路径、文件名称、设备Id、设备名称、是否异常、异常原因
    """
    print(OperateStaff, DataPath, FileName, DeviceId, DeviceName, IsAbnormal, AbnormalInfo, LastTime, DataCount)
    readFileLogTable = "bt_data_read_log"
    value_str = '"%d", "%s", "%s", "%d", "%s", "%d", "%s", "%d", "%d"' % (OperateStaff, DataPath, FileName, DeviceId, DeviceName, IsAbnormal, AbnormalInfo, LastTime, DataCount)
    print("读文件log： %s" % (value_str))
    insert_db(readFileLogTable, "OperateStaff, DataPath, FileName, DeviceId, DeviceName, IsAbnormal, AbnormalInfo, LastTime, DataCount", value_str)
    print("保存读取文件记录完成！")

    # 保存到系统记录表
    operate_result = "%s/%s" % (DataPath, FileName)
    saveSysLog(OperateStaff, "数据录入", operate_result, LastTime)
    print("保存读取文件记录到系统log完成！")


##################################################
# 读取原始数据文件
def readDataFileToStore(param):
    """
    根据用户提供的路径，读取探测文件或目录
    """
    print(param)
    task_id = param.get('task_id')
    user_id = int(param.get('user_id'))
    file_path = param.get('file_path')
    process_num = int(param.get('process_num')) if param.get('process_num') != None else 4      # 默认4个进程同时读入文件
    print("参数：file_path: %s" % file_path)

    if file_path.strip() == '':
        print("参数错误！")
        # return -1
        return {"code": -1, "msg": "文件路径参数为空"}
    if not os.path.exists(file_path):
        print("文件或目录不存在！")
        # return -1
        return {"code": -1, "msg": "文件或目录不存在"}

    files = [file_path]

    # 设置本批次交互任务的ID
    TASK_ID = task_id

    # 设置任务开始
    if TASK_ID != "":
        ret = setTaskStatus(TASK_ID, 0)
        if ret != "OK":
            print("setTaskStatus: %s" % ret)
            return ret

    q = multiprocessing.Queue()
    processes = []
    process = multiprocessing.Process(target=taskProcess, args=(files, user_id, process_num, TASK_ID, q))
    processes.append(process)
    process.start()

    # 注释掉这里，表示主进程不等待子进程结束（非阻塞异步执行）
    # for proc in processes:
    #    proc.join()

    print("开始运行数据读取进程。。。。。。")
    return 0


def taskProcess(files, user_id, process_num, TASK_ID, q):
    try:
        # 获取所有读取的文件，避免重复读取文件
        read_file_log = readFileLog()
        # 读取所有文件按进程分组
        (file_bin, count) = readAllFile(files, read_file_log, process_num)
        print("探测文件：%d - %d" % (len(file_bin.keys()), count))
        # return 0
        q.put(count)
        if count <= 0:
            print("没有监测到新数据文件！")
            updateTaskStatus(TASK_ID, 0)
            return 0

        processes = []
        for key in file_bin.keys():
            print("启动进程：%s" % key)
            # process = multiprocessing.Process(target=readFileOrDir, args=(file_bin[key], read_file_log, user_id, TASK_ID))
            process = multiprocessing.Process(target=callReadFileFunc, args=(file_bin[key], user_id, TASK_ID))
            processes.append(process)
            process.start()

        # 主进程等待子进程结束（阻塞执行）
        for proc in processes:
            # print(str(proc.get()))
            proc.join()

    except Exception as e:
        print("taskProcess_Error: %s" % e)
        conf = {
            'TaskId': TASK_ID,
            'TaskType': 0
        }
        setTaskStatusFinish(conf)

    # 设置任务结束
    print("将要设置任务结束..")
    if TASK_ID != "":
        print("设置任务结束！")
        updateTaskStatus(TASK_ID, 0)

    return 0


def readAllFile(files_list, read_file_log, process_num):
    """
    读取所有文件，按进程数量分组
    """
    file_bin = {}
    key = str(0)
    count = 0
    for file_path in files_list:
        # 路径是1个文件，处理单个文件
        if os.path.isfile(file_path):
            (filepath, tempfilename) = os.path.split(file_path)
            if not (tempfilename in read_file_log):
                print("读取探测数据文件：%s" % file_path)
                key = str(count % process_num)
                if key not in file_bin.keys():
                    file_bin[key] = []
                file_bin[key].append(file_path)
                count += 1
            else:
                print("文件已在读取log记录中，忽略本次读取：%s" % tempfilename)
        # 路径是1个目录，循环子目录，获取和处理所有文件
        elif os.path.isdir(file_path):
            print("读取探测数据目录：%s" % file_path)
            for root, dirs, files in os.walk(file_path):
                for file in files:
                    if file in read_file_log:
                        print("文件已在读取log记录中，忽略本次读取：%s" % file)
                        continue

                    file = os.path.join(root, file)
                    # print(file)
                    if os.path.isfile(file):
                        # print("读取探测数据文件：%s" % file)
                        key = str(count % process_num)
                        if key not in file_bin.keys():
                            file_bin[key] = []
                        file_bin[key].append(file)
                        count += 1

    print("共发现原始探测文件数量：%d" % count)
    if count > 0:
        print("探测文件：")
        print(file_bin.keys())
        print(file_bin["0"][0])
    return file_bin, count


def callReadFileFunc(file_list, user_id, TASK_ID):
    for file in file_list:
        # TODO 检查进程是否被结束（数据库状态）

        # 检查算法是否请求停止（前台界面用户点击“暂停”按钮的响应）
        if isTaskStoped(TASK_ID, 0) == 1:
            return 0

        # 读取单个文件
        readOneDataFileV2(file, user_id, TASK_ID)


# 解析和保存探测数据
def readOneDataFileV2(file_path, user_id, TASK_ID):
    """
    解析探测文件（激光雷达模拟数据、）
    """
    # 获取文件名信息（探测体制、等）
    # 分离路径、文件名
    # ['Wuhan', 'AWTL02', '532P', 'L0', '01H', '20220120140000', 'V01.01DAT']
    (filepath, tempfilename) = os.path.split(file_path)
    filepath = filepath.replace("\\", "/")
    print("路径：%s; 文件名：%s" % (filepath, tempfilename))
    params = {
        "file_path": file_path,
        "filepath": filepath,
        "tempfilename": tempfilename,
        "user_id": user_id,
        "task_id": TASK_ID
    }

    # 数据读取计时开始：
    start_time = time.time()

    fileNameArr = re.split("_+", tempfilename)
    if fileNameArr[len(fileNameArr)-1].split('.')[0] == "L0" or fileNameArr[len(fileNameArr)-1].split('.')[0] == "L2":
        # 2023年9月提供新数据文件，格式为：
        # AL_LD1_355nm_Rm_DV_20230821140045_L0.dat
        # AL_LD1_355nm_Rm_DV_T_20230821140045_L2.dat
        # AL_LD1_D_20230821140045_L2.dat
        print("202309新探测数据..")
        headParamConf = parsingRadarRealDataFile2309(params)
    else:
        print("源数据文件错误..")
        return -1

    if 'code' in headParamConf.keys() and headParamConf['code'] == -1:
        print("源数据文件解析错误..")
        return -1
    #数据读取计时结束：
    end_time = time.time()

    #计算代码运行时间(ms)
    run_time = (end_time - start_time) * 1000
    #print('代码运行时间为{:.2f} s'.format(run_time))
    print('代码运行时间为%d ms' % run_time)

    #读取记录
    device_id = headParamConf['DeviceId']
    device_name = headParamConf['DeviceName']
    count = headParamConf['Count']
    saveReadFileLog(user_id, filepath, tempfilename, device_id, device_name, 0, run_time, count, "")

    return 0


"""
解析探测文件（2023年09月数据）
# 1.解析新版本的雷达数据（2023-09），源数据文件名样例：
# AL_LD1_355nm_Rm_DV_20230821140045_L0.dat
# AL_LD1_355nm_Rm_DV_T_20230821140045_L2.dat
# AL_LD1_D_20230821140045_L2.dat
# 2.解析文件头部数据，构造出新的存储路径
"""
def parsingRadarRealDataFile2309(params):
    # 获取参数
    file_path = params['file_path']
    tempfilename = params['tempfilename']
    user_id = params['user_id']
    filepath = params['filepath']
    TASK_ID = params['task_id']

    # 获取文件名称中的信息（探测体制）
    fileNameArr = re.split("_+", tempfilename)
    print(fileNameArr)

    # 1.检查文件名合法性
    file_name_check_arr = checkFileName2309(file_path, user_id)
    if not file_name_check_arr[0]:
        print("数据文件名异常：%s" % file_path)
        # 读取记录
        saveReadFileLog(user_id, filepath, tempfilename, -1, "", 1, 0, 0, "文件名异常")
        return {"code": -1, "msg": "数据文件名异常！"}
    print("文件名检查通过！")
    # [True, sys_type, data_type, store_path, db_table_head_name, system_type, file_time]
    print(file_name_check_arr)

    # 2.获取数据文件的存储路径：
    # 系统1234、年月日、L0/L2、体制
    store_path = os.path.join(FILE_UP_LEVEL_DIR, file_name_check_arr[3])
    print("新存储位置：", store_path)
    # 判断路径是否存在，不存在则创建
    if not os.path.exists(store_path):
        print("存储目录路径不存在，创建store_path:%s" % store_path)
        os.makedirs(store_path)

    # 3.数据库分表表名
    # bt_data_head基本名，后缀：s1/s2/s3/s4/s5_l0/l2
    db_table_head_name = file_name_check_arr[4]
    print("数据库文件信息表名：", db_table_head_name)

    # 探测体制：
    # L0:532nm_Ry_DV、532nm_Ry_DN、532nm_Ry_DE、532nm_Ry_DS、532nm_Ry_DW、372nm_Ry_DV、372nm_Ry_DN、372nm_Ry_DE、372nm_Ry_DS、
    #    372nm_Ry_DW、589nm_Na_DV、589nm_Na_DN、589nm_Na_DE、589nm_Na_DS、589nm_Na_DW、372nm_Fe_DN、372nm_Fe_DE、372nm_Fe_DS、
    #    372nm_Fe_DW、532nm_Rm_DV、355nm_Rm_DV
    # L2:T、W、P、D、N、F
    DataTypeFromFileName = file_name_check_arr[1]

    # 4.解析文件头
    print("正在提取文件头数据..")
    # 根据文件特点，一次读取多行
    # fd = open(file_path, "r", encoding='UTF-8')
    fd = open(file_path, "r", encoding='GBK')
    lines = fd.readlines(HEAD_MAX_LINE)
    # lines = fd.readlines()
    # print(lines)
    pos = 0
    head_dict = {}       # 解析的数据保存在该字典变量
    # columns_header = []  # 探测数据列维度
    for line in lines:
        print(line.strip())

        # 头部信息结束标记
        if line.strip().startswith('Range') or line.strip().startswith('Altiude'):   # L0文件是Range行，L2文件是Altiude行
            # 头部信息最后一行，解析探测数据列头信息
            # 示例：L0: Range RayVDA RayVDP RayVNA RayVNP
            # L2: Altiude(km) Temperature(K) TemperatureUncertainty(K)
            # arr = line.strip().split()
            # print(arr)
            # for item in arr:
            #     if item.strip() != '':
            #         columns_header.append(item)
            break

        # 特殊处理：“Longitude:”
        if pos == 0:
            # 数据示例：LD1 Raman Raw Data
            head_dict['DeviceType'] = line.replace("Data", "").strip()
            print("%s" % line.strip())
        if pos == 1 and not line.strip().startswith('FileHeadLength'):
            # 有的ProjectName是第二行，有的没有
            head_dict['ProjectName'] = line.strip()
            print("%s" % line.strip())
        elif line.strip().startswith('Longitude:'):
            # 数据示例：Longitude: E116.600 degree, Latitude: N40.067 degree, Altitude: 1287m
            tmp_arr = line.strip().split(',')
            for item in tmp_arr:
                arr = item.strip().split(':')
                if len(arr) >= 2:
                    head_dict[arr[0].strip()] = arr[1].replace("degree", "").replace("E", "").replace("N", "").strip()
                    print("%s| %s" % (arr[0].strip(), arr[1].strip()))
        elif line.strip().startswith('DeviceSpec') or (line.strip().startswith('DeviceState') and "BeamDirectY" in line and "BeamDirectZ" in line):
            # DeviceSpec: WaveLen = 355nm, RepRate = 30Hz, PlsEnergy = 680mJ
            tmp_arr = line.split(':')[1].split(',')
            for item in tmp_arr:
                arr = item.strip().split('=')
                if len(arr) >= 2:
                    head_dict[arr[0].strip()] = arr[1].replace("degree", "").strip()
                    print("%s| %s" % (arr[0].strip(), arr[1].replace("degree", "").strip()))
        elif line.strip().startswith('DeviceState'):
            # 系统1：DeviceState: BeamDirectX = 000.00 degree, BeamDirectY = 000.00 degree, BeamDirectZ = 90.00 degree    (同DeviceSpec，在以上代码中已经处理)
            # 需要获取 Y 和 Z的值
            # 系统2：DeviceState: BeamDirect3(ALT = 60.0 degree, AZ = 270.0 degree, West)  (以下代码进行处理)
            # 仅获取：ALT（Y）和 AZ（Z）的值
            if "DeviceState: BeamDirect3" in line:
                tmp_arr = line.split('(')[1].split(',')
                for item in tmp_arr:
                    arr = item.strip().split('=')
                    if len(arr) >= 2:
                        head_dict[arr[0].strip()] = arr[1].replace("degree", "").strip()
                        print("%s| %s" % (arr[0].strip(), arr[1].replace("degree", "").strip()))
        else:
            arr = line.strip().split(':')
            if len(arr) >= 2:
                key = arr[0].strip().strip('_')
                del(arr[0])
                head_dict[key] = ":".join(arr).strip()
                print("%s| %s" % (key, ":".join(arr).strip()))

        pos += 1
    fd.close()
    print("头部信息解析完成！%d" % len(lines))
    print(pos)

    # 进一步处理文件头数据：
    # 抽取文件头数据行数FileHeadLength:  00029 lines ==> 29
    arr = head_dict['FileHeadLength'].split()
    if len(arr) > 0:
        head_dict['FileHeadLength'] = arr[0].strip('0')

    # print(head_dict)

    # TODO：获取参数值task_id
    BatchId = TASK_ID if TASK_ID != "" else get_current_timestamp()     # 批号信息，用户自己编码
    print("BatchId-Test: %s" % BatchId)
    Count = str_to_int(head_dict['RecordNumber'])                       # 当前文件探测数据条数
    Level = checkDataLevelType2309(file_path)                               # 数据级0:L0/1:L2（从文件名解析）

    # 老的L2探测数据类型：0温度1密度2径向风速3纬向风速（针对L2,L0为-1）
    # 风速这里先设为2，后面获取到数据表头，再进一步确定是2 or 3
    # 新的L2探测数据类型：
    # 0：T是温度、1：P是气压、2：W是风场（纬向风）、3：W是风场（径向风）、4：D是大气密度、5：N是钠原子密度、6：F是铁原子密度
    ValueType = -1
    if Level == 1:
        #ValueType = checkDataType(head_dict['DataType'])                # L2测量数据类型:0温度1风速2密度3压力
        ValueType = checkL2DataTypeByFileName(DataTypeFromFileName)      # 新的L2探测数据类型
    print("Valuetype: %s, DataType: %s" % (ValueType, head_dict['DataType']))
    DataStartTime = str2timestamp(head_dict['DataStartTime'])           # 数据起始时间
    DataEndTime = str2timestamp(head_dict['DataEndTime'])               # 数据结束时间

    # 探测数值的单位信息
    if Level == 1:
        ValueUnit = getValueUnit(head_dict['Quantities'])               # L2获取探测数据的单位信息
    else:
        ValueUnit = '-'                                                 # L0的测量数据单位为空
    print("单位信息：%s" % ValueUnit)

    # 站点名称信息
    # StationName = DeviceName.strip().split()[0] if DeviceName.strip() != "" else ""  #探测站点名称
    # StationName = head_dict['DeviceType'].strip().split()[0] if head_dict['DeviceType'].strip() != "" else ""  #探测站点名称
    StationName = ''
    if 'SiteName' in head_dict.keys():
        StationName = head_dict['SiteName'].strip()         #探测站点名称(202309数据格式，有不同字段)
    elif 'LidarSite' in head_dict.keys():
        StationName = head_dict['LidarSite'].strip()

    # 处理站点信息
    station = getStationInfo(StationName)
    StationId = -1
    if station == "":
        altitude = int(head_dict['Altitude'].replace("m", ""))
        station_dict = {
            "StationName": StationName,
            "Longitude": head_dict['Longitude'].replace("E", "").replace("°", ""),
            "Latitude": head_dict['Latitude'].replace("N", "").replace("°", ""),
            "Altitude": round(altitude/1000, 6)
        }
        # 解决多进程访问数据库冲突问题
        while StationId == -1:
            try:
                StationId = saveStationByCheck(station_dict)
            except Exception as e:
                print("saveStationByCheck_Error: %s", e)
                time.sleep(random.randint(1, 3))
                print("Try it again!")
                StationId = saveStationByCheck(station_dict)
    else:
        StationId = station[0]                                           #探测站点Id
    print("StationName = %s, StationId = %d" % (StationName, StationId))

    # 站点和设备数据
    DeviceName = head_dict['Device']                                    # 设备名称（雷达名称）
    DeviceCode = "%s-%s" % (StationName, DeviceName)                    # 重新编码设备：“站点-设备名称”
    device = getDeviceInfoByCode(DeviceCode)
    DeviceType = 0                                                     # 设备类型：0激光雷达，1原位探测（当前默认为激光雷达）
    DeviceId = -1                                                      # 探测设备Id
    if device == "":
        device_dict = {"DeviceName": DeviceName,
                       "DeviceCode": DeviceCode,
                       "DeviceType": DeviceType}
        # 解决多进程访问数据库冲突问题
        while DeviceId == -1:
            try:
                DeviceId = saveDeviceByCheck(device_dict)
            except Exception as e:
                print("saveDeviceByCheck_Error: %s", e)
                time.sleep(random.randint(1, 3))
                print("Try it again!")
                DeviceId = saveDeviceByCheck(device_dict)
    else:
        DeviceId = device[0]                                            # 设备id
    print("DeviceCode = %s, DeviceName = %s, DeviceId = %d" % (DeviceCode, DeviceName, DeviceId))

    # 保存站点和设备关系
    print("StationId = %d, DeviceId = %d" % (StationId, DeviceId))
    if StationId != -1 and DeviceId != -1:
        print("检查站点-设备关系")
        saveStationDeviceRelationByCheck(StationId, DeviceId)

    SystemType = -1         # 含义未知？暂时赋值为-1
    # if len(fileNameArr) > 2:
    #     SystemType = checkSystemType(fileNameArr[2])                    # 探测体制（枚举值）
    if 'ProjectName' in head_dict.keys():
        ProjectName = head_dict['ProjectName']                          # 探测项目名称:Chinese Meridian Project for Space Environment Monitoring
    else:
        ProjectName = ""

    print("保存文件头到数据库..")
    db_table_head = db_table_head_name
    head_id = -1

    # return  #测试20220415
    headParamConf = {
        "BatchId": BatchId,
        "Count": Count,
        "Level": Level,
        "DataType": DataTypeFromFileName,
        "ValueType": ValueType,
        "ValueUnit": ValueUnit,
        "DataStartTime": DataStartTime,
        "DataEndTime": DataEndTime,
        "filepath": filepath,
        "FileStorePath": store_path.replace("\\", "/"),
        "tempfilename": tempfilename,
        "StationId": StationId,
        "StationName": StationName,
        "DeviceId": DeviceId,
        "DeviceName": DeviceName,
        "SystemType": SystemType,
        "ProjectName": ProjectName,
        "RawDataStatus": 0,     # 原始数据状态：0:已录入；
        "SharedWay": 0,         # 文件头生成方式：0仅对应原始文件;1：仅数据预处理；2原始文件+数据预处理共用
        "SharedHeadId": 0,      # 文件头标记：0:仅对应原始文件;1:仅数据预处理；2:不确定度评定；3:预处理+不确定度评定
        "DbTable": db_table_head,
    }

    print("解析后的文件头部信息：")
    print(headParamConf)
    head_id = CheckDataHeadTableInfo(headParamConf)
    if head_id <= 0:
        print("Error: 文件头信息插入数据库错误!")
        return {"code": -1, "msg": "文件头信息插入数据库错误！"}

    # 5. 复制文件到存储服务器
    if not os.path.exists(store_path):
        os.makedirs(store_path)
    shutil.copy(os.path.join(headParamConf['filepath'], headParamConf['tempfilename']), headParamConf['FileStorePath'])

    return headParamConf

    ##########################################


# 根据提供的文件路径，读取文件
# flag = 0 不存库，直接返回原始数据
# flag = 1 存库，并返回原始数据
def parsingRadarProbeDataFromRawFile2309(file_store_path, file_name, head_id, task_id, flag=0):
    """
    解析数据文件的探测数据部分，保存到数据库表
    """
    db_table_head_type = getDataSystemTypeByL2FileName(file_name)
    data_level = getDataLevelByFileName(file_name)
    # 结果为：bt_data_head_s1/s2/s3_l0/l2
    db_table_head_name = db_data_head_table + '_' + db_table_head_type + '_l' + str(data_level)

    # 是否需要添加条件：AND RawDataStatus = 0
    headParamConf = getHeadFullInfoByTableNameAndHeadId(db_table_head_name, head_id)
    if headParamConf == "":
        print("未找到对应的数据头信息：%s - %d" % (db_data_head_table, head_id))
        return

    file_path = os.path.join(file_store_path, file_name)
    fd = open(file_path, "r", encoding='GBK')
    lines = fd.readlines(HEAD_MAX_LINE)

    #1.跳过头部信息，并获取数据列表头
    pos = 0
    columns_header = []  # 探测数据列维度
    for line in lines:
        # 头部信息结束标记，即数据列表头信息
        if line.strip().startswith('Range') or line.strip().startswith('Altiude'):   # L0文件是Range行，L2文件是Altiude行
            # 头部信息最后一行，解析探测数据列头信息
            # 示例：L0: Range RaLJA RaLJP RaHJA RaHJP
            # L2: Altiude(km) Density(kg*m-3) DensityUncertainty(percent)
            arr = line.strip().split()
            print(arr)
            for item in arr:
                if item.strip() != '':
                    columns_header.append(item)
            break
        pos += 1
    fd.close()
    print("跳过头部信息！%d" % len(lines))
    print(pos)
    #2.解析探测数值
    start_line = pos + 1   #不一定等于FileHeadLength

    print("正在提取探测数据..")
    print("数据开始行数：", start_line)
    # 获取探测数据列维度
    # columns_header = ['Height','RayRefL','RaySigL','RayRefH','RaySigH']
    # columns_header = ['Height','RayTem']
    print("columns_header: %s" % columns_header)

    # probe_data = pd.read_csv(file_path, iterator=True, chunksize=1, header=header_line, sep="\t")
    probe_data = pd.read_csv(file_path, skiprows=start_line, header=None, names=columns_header, sep=r'\s+', encoding='GBK')
    print(probe_data.head())
    print(probe_data.columns)
    # print(probe_data)      #输出原始数据

    # return

    #3.保存数据到数据库
    print("保存数据到数据库..")
    db_table_data = "bt_data_l2_raw"                #原始数据L2数据表
    if headParamConf['Level'] == 0:
        db_table_data = "bt_data_l0_raw"            #原始数据L0数据表
    print("原始数据库表：%s" % db_table_data)

    # db_table_data = "test"  #测试
    # 插入数据库（批量高效模式）
    db_cols = ["HeadId", "SysType", "BatchId", "ValueType", "Label", "Height", "Value"]
    value_arr = []  # 用于存库
    ret_value_list = []  # 用于返回数据
    count = 0
    wind_count_arr = []
    # 假设探测数据包括1（高度）+ N（探测值）列，按照1列高度+1列探测值入库保存
    for col_index,col_name in enumerate(columns_header):
        if col_index == 0:
            continue

        # 忽略掉L2密度文件和温度文件的误差列
        if col_name in cols_not_save:
            continue
        # 忽略掉包含关键词的列，如TemperatureUncertainty(K)包含关键词Uncertainty
        not_save_flag = False
        for kws in cols_keywords_not_save:
            if kws in col_name:
                not_save_flag = True
                break
        if not_save_flag:
            continue

        # 风速进一步确认：2：纬向风速; 3：径向风速
        if headParamConf['DataType'] in DataTypeWindKeywords:
            """
            [data_type]     [col_name]
            HWLS	        ZonalWindSL 	纬向风速; MeridiWindSL	经向风速
            HWSM	        ZonalWindSM 	纬向风速; MeridiWindSM	经向风速
            HWSL	        ZonalWind	    纬向风速; MeridiWind	经向风速
            WCSM	        ZonalWind 	    纬向风速; MeridiWind	经向风速
            """
            ValueType = checkDataTypeForWind(headParamConf['DataType'], col_name)
            print("风速纬向经向确认：%d" % ValueType)
            headParamConf['ValueType'] = ValueType
            head_id = CheckDataHeadTableInfo(headParamConf)
            if head_id <= 0:
                print("Error: 风速文件头信息插入/更新数据库错误!")
                return {"code": -1, "msg": "风速文件头信息插入/更新数据库错误！"}
            count = 0

        for i in range(len(probe_data)):
            # 存库
            if len(value_arr) == MAX_LEN and flag == 1:
                ret = insert_db_in_batch(db_table_data, db_cols, value_arr)
                #count += MAX_LEN
                value_arr = []

            # 如果数据为空，值为nan，用np.isnan() == True判断
            if np.isnan(probe_data[columns_header[col_index]][i]) != True:
                # 存库
                if flag == 1:
                    value_str = '"%d", "%s", "%s", "%d", "%s", "%.2f", "%.6f"' % \
                            (head_id, db_table_head_type, task_id, headParamConf['ValueType'], col_name, str_to_float(probe_data[columns_header[0]][i]),
                             str_to_float(probe_data[columns_header[col_index]][i]))
                    value_arr.append(value_str)

                # 构造返回数据（模仿从数据库直接筛选获取的Json数据）
                # {'Id': 234, 'HeadId': 582, 'BatchId': '20231010001', 'ValueType': 0, 'Label': 'Temperature(K)', 'Height': Decimal('100.75'), 'Value': Decimal('266.50550000'
                # ), 'CreateTime': datetime.datetime(2023, 10, 10, 15, 59, 55),
                value_item = {
                    "HeadId": head_id,
                    "SysType": db_table_head_type,
                    "BatchId": task_id,
                    "ValueType": headParamConf['ValueType'],
                    "Label": col_name,
                    "Height": str_to_float("%.2f" % str_to_float(probe_data[columns_header[0]][i])),
                    "Value": str_to_float("%.6f" % str_to_float(probe_data[columns_header[col_index]][i]))
                }
                ret_value_list.append(value_item)

                # 数据量计数
                count += 1

        # 记录风速数据读取数量
        # if headParamConf['DataType'] in DataTypeWindKeywords:
        #     wind_count_arr.append((head_id, count))

    if len(value_arr) > 0 and flag == 1:
        ret = insert_db_in_batch(db_table_data, db_cols, value_arr)
        #count += len(value_arr)

    print("保存数据条数(表：%s)：%d" % (db_table_data, count))
    # # 数据头信息表更新数据实际录入条数
    # if headParamConf['DataType'] in DataTypeWindKeywords:
    #     print(wind_count_arr)
    #     for head_id,count in wind_count_arr:
    #         updateDataHeadTableCount(head_id, count)
    # else:
    #     updateDataHeadTableCount(head_id, count)
    # # 仅计算L2的数据量
    # if Level == 1:
    #     # updateStationDataCount(StationId, count, 0)
    #     # 记录文件数量（数据录入不需要判断headId，数据只能录入一次）
    #     updateStationDataCount(StationId, 1, 0)

    headParamConf['Count'] = count
    # return headParamConf
    return ret_value_list


def CheckDataHeadTableInfo(conf):
    """
    检查数据头信息是否存在head表，如果存在进行更新，如果不存在，插入数据
    TODO: 优化建议尝试：
    使用 insert into ...on duplicate key update批量更新
    参考:
    https://wenku.baidu.com/view/2dd31f4ca75177232f60ddccda38376bae1fe04c.html
    """
    head_id = -1
    # 先查询是否已经存在数据库中(判断唯一标准：站点+设备+开始时间+结束时间+L0/L2级) TODO：是否添加batchID判断数据头信息的唯一性？
    query_conditon = 'ValueType=%d AND StationId=%d AND DeviceName="%s" AND DataStartTime="%s" ' \
                     'AND DataEndTime="%s" AND Level=%d AND FileName="%s" AND DataType="%s"' % \
                     (conf['ValueType'], conf['StationId'], conf['DeviceName'], conf['DataStartTime'],
                      conf['DataEndTime'], conf['Level'], conf['tempfilename'], conf['DataType'])
    print("query_conditon: %s" % query_conditon)
    ret = select_db(conf['DbTable'], query_conditon)
    print(ret, len(ret))
    RawDataStatus = 0  # 原始数据状态：0:已录入；
    if len(ret) > 0:
        print("[Warning]：该原始文件头已存在，更新BatchID（即TaskID）")
        head_id = ret[0][0]
        query_conditon_new = 'Id=%d' % head_id

        # 更新头部信息
        if 'BinKey' in conf.keys():
            update_str = 'HeadInfoCount="%d", ValueType="%d", SystemType="%d", BatchId="%s", RawDataStatus="%d", ' \
                         'BinKey="%s"' % \
                         (conf['Count'], conf['ValueType'], conf['SystemType'], conf['BatchId'], conf['RawDataStatus'],
                          conf['BinKey'])
        else:
            update_str = 'HeadInfoCount="%d", ValueType="%d", SystemType="%d", BatchId="%s", RawDataStatus="%d"' % \
                         (conf['Count'], conf['ValueType'], conf['SystemType'], conf['BatchId'], conf['RawDataStatus'])
        print("更新文件头Head信息： %s" % (update_str))
        # update_db(conf['DbTable'], update_str, query_conditon)
        # 使用Id更新一条记录（解决查询出现多条记录的问题）
        update_db(conf['DbTable'], update_str, query_conditon_new)
    else:
        if 'BinKey' in conf.keys():
            value_str = '"%s", "%d", "%d", "%s", "%d", "%s", "%s", "%s", "%s", ' \
                        '"%s", "%d", "%s", "%d", "%s", "%d", "%s", "%d", "%d", "%s", "%d", "%s"' % \
                        (conf['BatchId'], conf['Count'], conf['Level'], conf['DataType'], conf['ValueType'],
                         conf['ValueUnit'], conf['DataStartTime'], conf['DataEndTime'], conf['filepath'],
                         conf['tempfilename'], conf['StationId'], conf['StationName'], conf['DeviceId'],
                         conf['DeviceName'], conf['SystemType'], conf['ProjectName'], conf['RawDataStatus'],
                         conf['SharedWay'], conf['BinKey'], conf['SharedHeadId'], conf['FileStorePath'])
            print("文件头信息1： %s" % (value_str))
            head_id = insert_db(conf['DbTable'], "BatchId, HeadInfoCount, Level, DataType, ValueType, ValueUnit, "
                                                 "DataStartTime, DataEndTime, FilePath, FileName, StationId, StationName,"
                                                 "DeviceId, DeviceName, SystemType, ProjectName, RawDataStatus, SharedWay, BinKey,"
                                                 "SharedHeadId, FileStorePath",
                                value_str)
        else:
            value_str = '"%s", "%d", "%d", "%s", "%d", "%s", "%s", "%s", "%s", ' \
                        '"%s", "%d", "%s", "%d", "%s", "%d", "%s", "%d", "%d", "%d", "%s"' % \
                        (conf['BatchId'], conf['Count'], conf['Level'], conf['DataType'], conf['ValueType'],
                         conf['ValueUnit'], conf['DataStartTime'], conf['DataEndTime'], conf['filepath'],
                         conf['tempfilename'], conf['StationId'], conf['StationName'], conf['DeviceId'],
                         conf['DeviceName'], conf['SystemType'], conf['ProjectName'], conf['RawDataStatus'],
                         conf['SharedWay'], conf['SharedHeadId'], conf['FileStorePath'])

            print("文件头信息2： %s" % (value_str))
            head_id = insert_db(conf['DbTable'], "BatchId, HeadInfoCount, Level, DataType, ValueType, ValueUnit, "
                                                 "DataStartTime, DataEndTime, FilePath, FileName, StationId, StationName,"
                                                 "DeviceId, DeviceName, SystemType, ProjectName, RawDataStatus, SharedWay,"
                                                 "SharedHeadId, FileStorePath",
                                value_str)

    print("文件头ID： %d" % head_id)
    return head_id


if __name__ == '__main__':
    #if len(sys.argv) < 2:
    #    print("命令行参数错误！")
    #    exit()
    #readDataFile(sys.argv[1])

    #原始文件（测试）
    # raw_data_file = "D:/Projects/MeasurementProject/src/data/BJ_LD2_532nm_Ray_DE_20210611181105_L0.dat"
    raw_data_file1 = "D:/GBfiles/mycode/Workspace/MeasurementProject/test_data/2023-09/emulate_data/系统1"

    readDataFileToStore(raw_data_file1)
