import os
import pandas as pd
import numpy as np
from utils.mylib_db import *
from utils.mylib_utils import *
from measurement.measurement_utils import *
from measurement.import_probe_data_into_db import *
from measurement.import_probe_data_to_store import *
import multiprocessing

"""
处理用户文件录入
"""

db_temporary_data_head_table = "bt_temporary_data_head"
db_temporary_data_table_l2 = "bt_temporary_data"
db_temporary_data_table_l0 = "bt_temporary_data_l0"

MAX_LEN = 1000      # 每次存入数据的最大条数

# excel_title_list = ['项目名称', '站点名称', '设备名称', '设备编号', '设备类型', '数据级别', '数据类型', '文件处理描述',
#                     '开始时间', '结束时间', '高度(km)', '探测值类型', '探测值', '单位', 'MCM标准不确定度', '包含概率p',
#                     '蒙特卡洛试验次数M', 'GUM标准不确定度', '包含因子k', 'GUM扩展不确定度', 'u1', 'u2', 'u3', 'u4',
#                     'u5', 'u6', 'u7', 'u8', 'u9', 'u10', 'u11', 'u12', 'u13', 'u14', 'u15', 'u16', 'u17', 'u18',
#                     'u19', 'u20']

def importUserDataFile(param):
    """
    用户数据文件上传接口
    """
    print(param)
    task_id = param.get('task_id') if param.get('task_id') is not None else get_current_timestamp()                     # TaskId默认为当前时间
    user_id = int(param.get('user_id')) if param.get('user_id') is not None else -1                                     # 当前登录用户
    file_path = param.get('file_path') if param.get('file_path') is not None else ""                                    # 必填
    process_num = int(param.get('process_num')) if param.get('process_num') is not None else DEFAULT_PROCESS_NUM        # 默认4个进程同时读入文件
    type = int(param.get('type')) if param.get('type') is not None else -1                                              # 上传文件的类型：1：评定结果查询(任务id=6)；2：不确定度校验(任务id=7)

    if file_path.strip() == '':
        print("参数错误！")
        return {"code": -1, "msg": "文件路径参数为空"}

    if type == -1:
        print("参数错误！")
        return {"code": -1, "msg": "上传文件类型不能为空"}

    # 开始准备参数，进行数据一键评定
    conf = {
        "UserId": user_id,
        "FilePath": file_path,
        "FilePathList": [],
        "Type": type,
        "ProcessNum": process_num,
        "DataSource": 2,                # 文件导入
        # 导入数据存库表
        "DbSaveHeadTable": db_temporary_data_head_table,
        "DbSaveTable": db_temporary_data_table_l2,
        "TaskType": 6 if type == 1 else 7
    }

    # 设置本批次交互任务的ID
    TASK_ID = task_id
    if TASK_ID != "":
        ret = setTaskStatus(TASK_ID, conf['TaskType'], 0, -1, conf['DataSource'])
        if ret != "OK":
            print("setTaskStatus: %s" % ret)
            return ret

    processes = []
    process = multiprocessing.Process(target=taskImportUserFilesProcess, args=(conf, TASK_ID))
    processes.append(process)
    process.start()


    # 注释掉这里，表示主进程不等待子进程结束（费阻塞异步执行）
    # for proc in processes:
    #    proc.join()
    print("开始运行数据读取进程。。。。。。")
    return 0


def taskImportUserFilesProcess(conf, TASK_ID):
    file_path = conf['FilePath']
    process_num = conf['ProcessNum']

    callReadFileProcessAndWaitFinish(conf, file_path, process_num, TASK_ID)

    # 设置任务结束
    print("将要设置任务结束..")
    if TASK_ID != "":
        print("设置任务结束！")
        updateTaskStatus(TASK_ID, conf['TaskType'])

    return 0


def callReadFileProcessAndWaitFinish(conf, file_path, process_num, TASK_ID):
    """
    同步阻塞调用读取文件方法
    :param file_path:
    :param process_num:
    :return:
    """
    # 获取所有读取的文件，避免重复读取文件
    # read_file_log = readFileLog()
    read_file_log = []      # 已读文件列表

    files_list = []
    if ";" in file_path:
        # 多个文件
        arr = file_path.split(";")
        for file in arr:
            files_list.append(file)
    else:
        # 单个文件或文件夹
        files_list.append(file_path)

    # 读取所有文件按进程分组
    (file_bin, count) = readAllFile(files_list, read_file_log, process_num)
    print(file_bin)

    processes = []
    for key in file_bin.keys():
        print("启动进程：%s" % key)
        # process = multiprocessing.Process(target=readFileOrDir, args=(file_bin[key], read_file_log, user_id, TASK_ID))
        process = multiprocessing.Process(target=callReadFileFunc, args=(file_bin[key], conf, TASK_ID))
        processes.append(process)
        process.start()

    # 主进程等待子进程结束（阻塞执行）
    for proc in processes:
        # print(str(proc.get()))
        proc.join()

    return 0


def callReadFileProcessAndWaitFinish2309(conf, TASK_ID):
    """
    同步阻塞调用读取文件方法
    :param file_path:
    :param process_num:
    :return:
    """
    q = multiprocessing.Queue()
    processes = []
    process = multiprocessing.Process(target=taskProcessDataToStore, args=(conf['FileList'], conf['UserId'], conf['ProcessNum'], TASK_ID, q))
    processes.append(process)
    process.start()

    # 主进程等待子进程结束（阻塞执行）
    for proc in processes:
        proc.join()

    results = [q.get() for j in processes]
    print(results)
    # return 0
    if len(results) > 0 and results[0] == 0:
        print("未找到探测文件或探测文件已有录入记录！返回继续等待获取新探测文件..")
        return 0

    return 0


def callReadFileFunc(file_list, conf, TASK_ID):
    for file in file_list:
        # TODO 检查进程是否被结束（数据库状态）
        # 读取单个文件
        readUserDataFile(file, conf, TASK_ID)


def readUserDataFile(file_name, conf, TASK_ID):
    """
    根据用户提供的路径，读取探测文件或目录
    """
    # user_id = conf['UserId']

    if file_name.strip() == '':
        print("参数错误！")
        return {"code":-1, "msg": "参数错误"}
    if not os.path.isfile(file_name):
        print("文件不存在！")
        return {"code": -1, "msg": "文件不存在"}

    df = pd.read_excel(file_name, sheet_name=0, header=0)
    print("导入文件的行数、列数")
    print(df.shape)
    print(df.keys())
    print(df.columns)
    print(df.index)
    (line_num, col_num) = df.shape

    # 插入数据库(Head表)
    value_arr = []
    db_head_cols = ["BatchId", "HeadInfoCount", "Count", "Level", "DataType", "ValueType", "ValueUnit", "DataStartTime",
                    "DataEndTime", "StationId", "StationName", "DeviceId", "DeviceName", "ProjectName", "SharedWay", "SharedHeadId"]
    db_head_cols_str = ','.join(db_head_cols)

    db_data_cols = ["HeadId", "BatchId", "ValueType", "Height", "Value", "DataStartTime", "DataEndTime",
                    "McmResult", "McmParameterP", "McmParameterExpTimes", "GumResult", "GumParameterK",
                    "GumResultExtended", 'U1', 'U2', 'U3', 'U4', 'U5', 'U6', 'U7', 'U8', 'U9', 'U10',
                    'U11', 'U12', 'U13', 'U14', 'U15', 'U16', 'U17', 'U18', 'U19', 'U20']

    head_id_set = set()
    #print(df.iloc[0:])
    #print(df.iloc[1:])
    #print(df.loc[])

    # flag: 1字符串""，2整数-1，3浮点数0.0，4时间"0000-00-00 00:00:00"
    for i in range(len(df)):
        project_name = getDataFrameValue(df, col_num, i, 0)
        station_name = getDataFrameValue(df, col_num, i, 1)
        if station_name == "":
            print("站点名称为空，当前行数据无效！")
            continue

        station_id = checkStationIdByName(station_name)
        device_name = getDataFrameValue(df, col_num, i, 2)
        # device_id = checkDeviceIdByName(device_name)
        device_code = getDataFrameValue(df, col_num, i, 3)
        device_id = checkDeviceIdByCode(device_code)
        device_type = getDataFrameValue(df, col_num, i, 4)
        data_level = 1 if getDataFrameValue(df, col_num, i, 5) == "L2" else 0     # 转换为整数：L0->0; L2->1
        data_type = getDataFrameValue(df, col_num, i, 6)
        process_description = getDataFrameValue(df, col_num, i, 7)
        start_time = getDataFrameValue(df, col_num, i, 8, 4)        # 时间
        end_time = getDataFrameValue(df, col_num, i, 9, 4)          # 时间
        height = getDataFrameValue(df, col_num, i, 10, 3)           # 浮点数
        value_type = checkValueType(getDataFrameValue(df, col_num, i, 11))
        value = getDataFrameValue(df, col_num, i, 12, 3)
        value_unit = getDataFrameValue(df, col_num, i, 13)
        mcm_result = getDataFrameValue(df, col_num, i, 14)
        mcm_parameter_p = getDataFrameValue(df, col_num, i, 15)
        mcm_exp_times = getDataFrameValue(df, col_num, i, 16, 2)
        gum_result = getDataFrameValue(df, col_num, i, 17)
        gum_parameter_k = getDataFrameValue(df, col_num, i, 18)
        gum_result_extend = getDataFrameValue(df, col_num, i, 19)
        u1 = getDataFrameValue(df, col_num, i, 20)
        u2 = getDataFrameValue(df, col_num, i, 21)
        u3 = getDataFrameValue(df, col_num, i, 22)
        u4 = getDataFrameValue(df, col_num, i, 23)
        u5 = getDataFrameValue(df, col_num, i, 24)
        u6 = getDataFrameValue(df, col_num, i, 25)
        u7 = getDataFrameValue(df, col_num, i, 26)
        u8 = getDataFrameValue(df, col_num, i, 27)
        u9 = getDataFrameValue(df, col_num, i, 28)
        u10 = getDataFrameValue(df, col_num, i, 29)
        u11 = getDataFrameValue(df, col_num, i, 30)
        u12 = getDataFrameValue(df, col_num, i, 31)
        u13 = getDataFrameValue(df, col_num, i, 32)
        u14 = getDataFrameValue(df, col_num, i, 33)
        u15 = getDataFrameValue(df, col_num, i, 34)
        u16 = getDataFrameValue(df, col_num, i, 35)
        u17 = getDataFrameValue(df, col_num, i, 36)
        u18 = getDataFrameValue(df, col_num, i, 37)
        u19 = getDataFrameValue(df, col_num, i, 38)
        u20 = getDataFrameValue(df, col_num, i, 39)

        # 生成和保存文件头信息
        # 先查询是否已经存在数据库中
        query_conditon = 'ValueType=%d AND StationName="%s" AND DeviceName="%s" AND DataStartTime="%s" ' \
                         'AND DataEndTime="%s" AND Level=%d AND DataType="%s"' % \
                         (value_type, station_name, device_name, start_time, end_time, data_level, data_type)

        # 检查HeadId是否重复
        hash_value = hash(query_conditon)
        if hash_value not in head_id_set:
            head_id_set.add(hash_value)

            # 查找或生成新的HeadId
            head_id = -1
            print("query_conditon: %s" % query_conditon)
            ret = select_db(conf['DbSaveHeadTable'], query_conditon)
            print(ret, len(ret))
            if len(ret) > 0:
                print("[Warning]：该原始文件头已存在，更新BatchID（即TaskID）")
                head_id = ret[0][0]

                # 更新头部信息
                update_str = 'BatchId="%s", ValueUnit="%s", ProjectName="%s"' % \
                                 (TASK_ID, value_unit, project_name)
                print("更新文件头Head信息： %s" % (update_str))
                update_db(conf['DbSaveHeadTable'], update_str, query_conditon)
            else:
                value_str_head = '"%s", 0, 0, "%d", "%s", "%d", "%s", "%s", "%s", "%d", "%s", "%d", "%s", "%s", -1, -1' % \
                     (TASK_ID, data_level, data_type, value_type, value_unit, start_time, end_time,
                      station_id, station_name, device_id, device_name, project_name)

                print("文件头信息1： %s" % (value_str_head))
                head_id = insert_db(conf['DbSaveHeadTable'], db_head_cols_str, value_str_head)

            print("获得新文件头ID： %d" % head_id)

        # 保存探测数据
        value_str = '"%d", "%s", "%d", "%.2f", "%.8f", "%s", "%s", ' \
                    '"%s", "%s", "%d", "%s", "%s", "%s", ' \
                    '"%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", ' \
                    '"%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s"' % \
                    (head_id, TASK_ID, value_type, height, value, start_time, end_time,
                     mcm_result, mcm_parameter_p, mcm_exp_times, gum_result, gum_parameter_k, gum_result_extend,
                     u1, u2, u3, u4, u5, u6, u7, u8, u9, u10,
                     u11, u12, u13, u14, u15, u16, u17, u18, u19, u20)
        value_arr.append(value_str)

        if len(value_arr) == MAX_LEN:
            ret = insert_db_in_batch(conf['DbSaveTable'], db_data_cols, value_arr)
            value_arr = []

    if len(value_arr) > 0:
        ret = insert_db_in_batch(conf['DbSaveTable'], db_data_cols, value_arr)

    if ret > 0:
        return 0
    else:
        return ret


def checkValueType(value_type_str):
    if value_type_str == "温度":
        value_type = 0
    elif value_type_str == "密度":
        value_type = 1
    elif value_type_str == "径向风速":
        value_type = 2
    elif value_type_str == "纬向风速":
        value_type = 3
    else:
        value_type = -1
    return value_type


def getDataFrameValue(df, col_num, line, index, flag=1):
    """
    获取 DataFrame 中的数据
    :param df:
    :param col_num:
    :param line:
    :param index:
    :param flag: 1字符串""，2整数-1，3浮点数0.0，4时间"0000-00-00 00:00:00"
    :return:
    """
    #print(line, index, col_num)
    # if index < col_num:
    #     print("获取df值")
    #     print(line, index, col_num)
    #     if flag == 1:
    #         ret = df.iloc[line, index] if not pd.isnull(df.iloc[line, index]) else ""
    #     elif flag == 2:
    #         ret = int(df.iloc[line, index]) if not pd.isnull(df.iloc[line, index]) else -1
    #     elif flag == 3:
    #         ret = str_to_float(df.iloc[line, index]) if not pd.isnull(df.iloc[line, index]) else 0.0
    #     elif flag == 4:
    #         ret = df.iloc[line, index] if not pd.isnull(df.iloc[line, index]) else "0000-00-00 00:00:00"
    #     else:
    #         ret = ""
    # else:
    #     if flag == 1:
    #         ret = ""
    #     elif flag == 2:
    #         ret = -1
    #     elif flag == 3:
    #         ret = 0.0
    #     elif flag == 4:
    #         ret = "0000-00-00 00:00:00"
    #     else:
    #         ret = ""

    if flag == 1:
        ret = df.iloc[line, index] if (index < col_num and (not pd.isnull(df.iloc[line, index]))) else ""
    elif flag == 2:
        ret = int(df.iloc[line, index]) if (index < col_num and not pd.isnull(df.iloc[line, index])) else -1
    elif flag == 3:
        ret = str_to_float(df.iloc[line, index]) if (index < col_num and not pd.isnull(df.iloc[line, index])) else 0.0
    elif flag == 4:
        ret = df.iloc[line, index] if (index < col_num and not pd.isnull(df.iloc[line, index])) else "0000-00-00 00:00:00"
    else:
        ret = ""

    return ret
