import os
import time
import numpy as np
import matplotlib.pyplot as plt
import numpy.polynomial.chebyshev as chebyshev
import numpy.linalg as linalg
from measurement.preprocess_probe_data import *
from pylab import mpl
from utils.mylib_db import *
from utils.mylib_utils import *
from measurement.measurement_utils import *
from utils.gum_algorithm23.draw_fig import *
import multiprocessing

"""
1. 参数存库
//2. 根据参数筛选数据（HeadId个数）
3. 时间分辨率统一化处理：
        1）根据选择的时间分辨率确定组的数据量和时间段；
        2）筛选的数据归类到各组；
        3）按统计方法计算（四类值）；（先进行高度对齐）
        4）存库和关联headId
4.         

"""


db_table_function_fitting = "bt_data_analysis_function_fitting"
db_data_head_table = "bt_data_head"

MAX_LEN = 1000     # 每次插入数据库的最大数据条数（批量插入功能）

# 函数图像保存路径（固定不变）
figure_save_path = "media/data/function_fitting"

# 时空特性分析结果图保存路径（固定不变）
draw_figure_save_path = "media/data/figure_analysis_space_time"
# db_table_draw_figure = "bt_data_figure"


def dataAnalysis(param):
    """
    数据分析接口，包括：
        1. 统计处理方法（平均值、标准差、最大值、最小值）+ 时间统一分辨率
        2. 缺失值填充方法（线性插值、对数差值、三次样条差值）+ 高度统一分辨率
        3. 多项式拟合算法（最小二乘法拟合、切比雪夫拟合）
    """
    print("进入数据分析过程..")
    print("参数情况：")
    print(param)
    # figure_save_path = "%s/%s" % (os.getcwd(), related_figure_save_path)
    # figure_save_path = figure_save_path.replace("\\", "/")

    # 参数处理
    task_id = param.get('task_id') if param.get('task_id') is not None else get_current_timestamp()                     # TaskId默认为当前时间

    # 筛选条件（数据查询参数）
    data_source = int(param.get('data_source')) if param.get('data_source') is not None else 0                          # 数据来源（枚举值）：0:数据库预处理数据，1:数据库原始数据 2:文件导入;3:最新处理的数据
    station_id = int(param.get('station_id')) if param.get('station_id') is not None else 0                             # 站点编号
    device_id = int(param.get('device_id')) if param.get('device_id') is not None else 0                                # 设备编号
    # value_type_str = param.get('value_type') if param.get('value_type') is not None else ""                             # 参量类型，格式："0" 或 "0,1,2,3"，其中0:温度；1:密度；2:纬向风速; 3:径向风速；（2309调整：改成内部转换，不接受界面输入）
    data_type = param.get('data_type') if param.get('data_type') is not None else ""                                    # 数据类型，格式"TSVP" 或 "TSVP, DUSM"等（2309调整为：T/P/W1/W2/D/N/F）
    time_start = param.get('time_start') if param.get('time_start') is not None else "0001-01-01 00:00:00"              # 时间范围的起始时间，格式：YYYY-mm-dd HH:MM:SS
    time_end = param.get('time_end') if param.get('time_end') is not None else "0001-01-01 00:00:00"                    # 时间范围的结束时间，格式：YYYY-mm-dd HH:MM:SS
    height_start = param.get('height_start') if param.get('height_start') is not None else ""                           # 高度区间起始值（暂时不使用）
    height_end = param.get('height_end') if param.get('height_end') is not None else ""                                 # 高度区间结束值（暂时不使用）

    # 文件导入路径（data_source=2时生效）
    import_file_path = param.get('import_file_path') if param.get('import_file_path') is not None else ""               # 文件导入支持参数：单个文件路径，多个文件路径，单个目录，格式"path1" 或 "path1;path2;path3"或"directory"（当data_source选择了2文件导入后，该字段必填）
    process_num = int(param.get('process_num')) if param.get('process_num') is not None else DEFAULT_PROCESS_NUM        # 默认4个进程同时读入文件
    user_id = int(param.get('user_id')) if param.get('user_id') is not None else -1                                     # 当前登录用户

    # 指定数据文件
    head_id_list = param.get('head_id_list') if param.get('head_id_list') is not None else ""                           # 默认值为：""，格式为 2,3,4,5（暂不传参）
    data_level = int(param.get('data_level')) if param.get('data_level') is not None else 1                             # 默认为1，即L2数据

    # 多项式拟合参数
    check_fitting_method = int(param.get('check_fitting_method')) if param.get('check_fitting_method') is not None else 0      # 是否执行多项式拟合
    function_power = int(param.get('function_power')) if param.get('function_power') is not None else 3                 # 多项式范式次数
    fig_save_path = param.get('fig_save_path') if param.get('fig_save_path') is not None else figure_save_path          # 函数拟合图像保存位置
    fitting_method = param.get('fit_method') if param.get('fit_method') is not None else ""                             # 拟合方法：1:最小二乘法；2:切比雪夫拟合法，可多选，形式"1"或"1,2"

    # 202402添加新功能：（排他性功能：如果点击了时空特性分析，只进行此功能，其他功能忽略）
    # 数据分析添加按钮“时空特性分析”，显示出图。方案：Python画图，提供链接给前台界面展示。（类似之前函数拟合的方案）
    # 画图输入参数为：1条廓线：高度、探测值、时间、参量类型。
    check_space_time = int(param.get('check_space_time')) if param.get('check_space_time') is not None else 0           # 是否执行时空特性分析，0默认不分析，1进行分析

    # 高度统一分辨率参数
    check_statistic_method = int(param.get('check_statistic_method')) if param.get('check_statistic_method') is not None else 0      # 是否使用统计处理方法
    check_imputation_method = int(param.get('check_imputation_method')) if param.get('check_imputation_method') is not None else 0   # 是否使用缺失值填充方法
    statistic_method = param.get('statistic_method') if param.get('statistic_method') is not None else ""               # 统计处理方法：0平均值计算；1标准差计算；2最大值计算；3最小值计算，可多选，形成字符串参数，如"0,1,2,3" 或 "0"
    imputation_method = param.get('imputation_method') if param.get('imputation_method') is not None else ""            # 缺失值填充方法：0:不填充；1:线性插值、2:对数插值、3:三次样条插值、4:均值填充、5:中位数填充、6:最小二乘法填充、7:切比雪夫拟合填充.可多选，形成字符串参数

    # 以下四个参数数据预处理同数据分析界面功能
    height_resolution = int(param.get('height_reslution')) if param.get('height_reslution') is not None else 1                   # 统一高度分辨率，数据为高度间隔值
    height_resolution_unit = int(param.get('height_reslution_unit')) if param.get('height_reslution_unit') is not None else 0    # 统一高度分辨率单位，0:km(默认),1:m
    # height_resolution_step = int(param.get('height_resolution_step')) if param.get('height_resolution_step') is not None else 1  # 统一高度分辨率步长，整数值
    time_resolution = int(param.get('time_reslution')) if param.get('time_reslution') is not None else 1                         # 统一时间分辨率，数据为时间间隔值1
    time_resolution_unit = int(param.get('time_reslution_unit')) if param.get('time_reslution_unit') is not None else 0          # 统一时间分辨率单位，0:小时/1:日/2:周/3:月/4:季度/5:年/6:全时段区间等
    # time_resolution_step = int(param.get('time_resolution_step')) if param.get('time_resolution_step') is not None else 1        # 统一时间分辨率步长，整数值

    # 判断参数合理性
    # if value_type_str == "":
    #     print("参数错误：参量类型不能为空！")
    #     return {"code": -1, "msg": "参数错误：参量类型不能为空！"}

    # 新老探测体制临时转换
    # data_type = getMultiDataTypeStr2309(data_type)
    value_type_str = getMultiValutTypeStrByDataType2309(data_type)

    conf = {
        'TaskId': task_id,
        # 任务种类：0:数据录入；1:数据预处理；2:不确定度评定;3:数据分析数据处理;
        # 5:一键评定;6:评定结果查询（文件上传）；7:不确定度校验（文件上传）；8:数据比对不同设备；9:数据比对不同时间
        "TaskType": 3,

        'Type': 1,      # 参数表类型字段标识
        'HeightReslution': height_resolution,
        'HeightReslutionStep': 1,
        'HeightReslutionUnit': height_resolution_unit,
        'TimeReslution': time_resolution,
        'TimeReslutionStep': 1,
        'TimeReslutionUnit': time_resolution_unit,
        'TimeReslutionUnitOption': ['小时', '日', '周', '月', '季度', '年', '全时段'],
        'IsTimeProcessed': 0,  # 标记是否进行了时间分辨率统一化处理

        "CheckStatisticMethod": check_statistic_method,
        "CheckImputationMethod": check_imputation_method,
        "StatisticMethod": statistic_method,
        'ImputationMethod': imputation_method,
        # 'HeightStandardSet': height_resolution_unit,
        # 'TimeStandardSet': time_resolution_unit,

        "CheckSpaceTime": check_space_time,
        # "CheckSpaceTime": 1,    # 测试
        'FigureSavePath': draw_figure_save_path,

        'CheckFittingMethod': check_fitting_method,
        'FitMethod': fitting_method,
        'FunctionPower': function_power,
        'FigSavePath': fig_save_path if fig_save_path != "" else figure_save_path,

        # 数据源
        'DataSource': data_source,
        # 支持指定headIdList
        'HeadIdList': head_id_list,
        'StationId': station_id,
        'DeviceId': device_id,
        'ValueType': value_type_str,
        'DataType': data_type,
        'TimeStart': "0001-01-01 00:00:00" if time_start=="" else time_start,
        'TimeEnd': "0001-01-01 00:00:00" if time_end=="" else time_end,
        'HeightStart': height_start,
        'HeightEnd': height_end,

        'SaveFlag': 0,      # 数据分析的结果都不存库

        'DataLevel': data_level,

        # 用户文件导入相关
        'ImportFilePath': import_file_path,
        "ProcessNum": process_num,
        "UserId": user_id,

        'DbHeadTable': "",          # 数据筛选源Head表
        'DbTable': "",              # 数据筛选源Data表
        'DbSaveHeadTable': "",      # 数据存储Head表
        'DbSaveTable': "",          # 数据存储Data表
    }
    print("参数值：")
    print(conf)

    # 不支持对最近一次录入的数据进行预处理！（但是代码里面已经支持该情况，需测试后启用）
    if conf['DataSource'] == 3:
        # print("不支持对最近一次录入的数据进行预处理！（后续可增加此功能）")
        # return {"code": -1, "msg": "不支持对最近一次录入的数据进行预处理！"}
        print("对最近一次录入的数据进行处理..")

    if conf['DataSource'] == 2 and conf['ImportFilePath'] == "":
        return {"code": -1, "msg": "数据源选择了“文件导入”，需指定导入文件或文件夹路径！"}

    # 参数检查通过
    # 保存参数
    param_conf_id = saveAnalysisParametersDataIntoDB(conf)
    conf["ParameterSetting"] = param_conf_id
    print(param_conf_id)
    # return 0

    # 指定数据筛选库表和存储库表
    # 指定保存数据的库表（预处理数据对象是L2,如果存库，进正式表，如果不存，临时表）
    # 针对其他场景，进行修改（导入文件，存临时表）
    # 数据来源（枚举值）：0:数据库预处理数据，1:数据库原始数据 2:文件导入;3:最新处理的数据
    # 保存数据
    if conf['SaveFlag'] == 1:
        conf['DbSaveHeadTable'] = db_data_head_table
        conf['DbSaveTable'] = db_raw_l2_precessed_table
    else:
        conf['DbSaveHeadTable'] = db_temporary_data_head_table
        conf['DbSaveTable'] = db_temporary_data_table_l2

    # 获取数据（conf['DataSource'] == 3情况，需根据任务表是否存库字段确定）
    if conf['DataSource'] == 0:
        conf['DbHeadTable'] = db_data_head_table
        conf['DbTable'] = db_raw_l2_precessed_table
    elif conf['DataSource'] == 1:
        conf['DbHeadTable'] = db_data_head_table
        conf['DbTable'] = db_raw_l2_data_table
    elif conf['DataSource'] == 2:
        #  2:文件导入：临时表，结果不存库
        conf['DbHeadTable'] = db_temporary_data_head_table
        conf['DbTable'] = db_temporary_data_table_l2
        conf['DbSaveHeadTable'] = db_temporary_data_head_table
        conf['DbSaveTable'] = db_temporary_data_table_l2
    # elif conf['DataSource'] == 3:  # 查任务表后进行确认数据存储源数据库表
    else:
        conf['DbHeadTable'] = db_temporary_data_head_table
        conf['DbTable'] = db_temporary_data_table_l2

    # 设置任务开始
    TASK_ID = conf['TaskId']
    if TASK_ID != "":
        ret = setTaskStatus(TASK_ID, conf['TaskType'], 0, -1, conf['DataSource'])
        if ret != "OK":
            print("setTaskStatus: %s" % ret)
            return ret

    # 对data_head_id_list中的所有数据进行高度分辨率处理和时间分辨率处理
    processes = []
    # process = multiprocessing.Process(target=runDataAnalysisTaskFunc, args=(conf, TASK_ID))     # 正式运行用（捕捉错误，设置任务完成状态）
    process = multiprocessing.Process(target=runDataAnalysisTask, args=(conf, TASK_ID))       # 开发调试用，暴露出错误代码行号
    processes.append(process)
    process.start()

    return 0


def runDataAnalysisTaskFunc(conf, TASK_ID):
    try:
        start_time = time.time()    # 计时开始
        ret = runDataAnalysisTask(conf, TASK_ID)
        print(ret)
        end_time = time.time()      # 计时结束
        #计算代码运行时间(ms)
        run_time = (end_time - start_time) * 1000
        #print('代码运行时间为{:.2f} s'.format(run_time))
        print('代码运行时间为%d ms' % run_time)

        if ret != 0:
            print("设置任务状态为 完成状态 ！")
            setTaskStatusFinish(conf)

        # 以tkinter窗口形式给出系统错误提示
        if 'code' in ret.keys() and ret['code'] == -1:
            err_msg = ret['msg']
            print("错误：", err_msg)
            show_msg_to_tkinter(err_msg)

    except Exception as e:
        print("runDataAnalysisTaskFunc_Error: %s" % e)
        setTaskStatusFinish(conf)
        # 以tkinter窗口形式给出系统错误提示
        show_msg_to_tkinter(e)


def runDataAnalysisTask(conf, TASK_ID):
    ######################################
    # 保存数据
    if conf['DataSource'] == 2:
        print("开始导入用户数据..")
        ret = callReadFileProcessAndWaitFinish(conf, conf['ImportFilePath'], conf['ProcessNum'], TASK_ID)
        print("读取用户文件结束！")
        print("进入筛选文件数据阶段..")

    ######################################
    # 获取数据
    print("开始筛选文件数据阶段..")
    head_id_list = conf['HeadIdList']

    # 按查询条件获取探测数据（head_id_list）
    data_head_id_list = []

    # 如果没有指定head_id_list，获取最近一次预处理后的数据（表bt_data或表bt_temporary_data）
    data_batch_id = -1
    data_batch_location = 1   # 默认预处理的数据存库，数据在正式预处理后的数据库表

    # 查询流程：1）优先检查是否提供了headid，2）其次根据datasource判断是按条件检索或检索最近一次任务的结果
    if head_id_list != "":
        # 如果参数中指定了head_id_list
        print("参数指定了Head_id_list!优先处理..")
        # data_head_id_list = []
        head_id_list_arr = head_id_list.split(",")
        if len(head_id_list_arr) > 0:
            for head_id in head_id_list_arr:
                print(head_id)
                ret = getHeadIdInfoByHeadId(int(head_id))
                if ret != "":
                    data_head_id_list.append(ret)
        else:
            print("参数head_id_list格式错误！")
            return {"code": -1, "msg": "提供的参数head_id_list格式错误！"}

    elif conf['DataSource'] == 0 or conf['DataSource'] == 1:
        # 对数据源 0:数据库预处理数据，1:数据库原始数据 2:文件导入;
        print("根据数据源，按条件筛选head_id_list..")
        if conf['ValueType'] == "":
            print("参量类型ValueType不能为空")
            return {"code": -1, "msg": "当前选择的数据源，参量类型参数不能为空！"}

        # data_head_id_list = queryDataByCondition(conf, 0)
        data_head_id_list = queryDataByCondition2309(conf, 0)  # 针对202309数据分表存储情况查询
        if len(data_head_id_list) == 0:
            print("错误：未检索到探测数据，请重新设置检索条件！")
            return {"code": -1, "msg": "错误：未检索到探测数据，请重新设置检索条件！"}

    elif conf['DataSource'] == 3:
        # 对数据源 3:最新处理的数据。获取最近一次的录入数据，对此进行数据预处理
        # 根据任务确定数据库表筛选数据情况
        print("取最新一次任务处理结束的数据..")
        data_batch_info = getLatestDataBatchId(0)
        if data_batch_info == "":
            print("错误：未找到最近录入的数据")
            return {"code": -1, "msg": "未找到最近录入的数据！"}
        data_batch_id = data_batch_info['KeyName']
        print("data_batch_id: %s" % data_batch_id)

        # 确定数据录入的数据存储数据库源表（TODO：校验？）
        if data_batch_info['IsSaveIntoDb'] == 1:
            conf['DbHeadTable'] = db_data_head_table
            conf['DbTable'] = db_raw_l2_data_table
        elif data_batch_info['IsSaveIntoDb'] == 0:
            conf['DbHeadTable'] = db_temporary_data_head_table
            conf['DbTable'] = db_temporary_data_table_l2

        data_head_id_list = getHeadIdByBatchId(data_batch_id, conf['DataLevel'], 0)
        if len(data_head_id_list) == 0:
            print("错误：未找到最近录入数据的head_id")
            return {"code": -1, "msg": "未找到最近录入数据的head_id！"}

    elif conf['DataSource'] == 2:
        # 对数据源 3:最新处理的数据。获取最近一次的录入数据，对此进行数据预处理
        print("获取刚刚导入的用户文件数据..")
        data_batch_id = TASK_ID
        print("data_batch_id: %s" % data_batch_id)

        data_head_id_list = getHeadIdByBatchIdV3(conf, data_batch_id, conf['DataLevel'], 0)
        if len(data_head_id_list) == 0:
            print("错误：未找到最近录入数据的head_id")
            return {"code": -1, "msg": "未找到最近录入数据的head_id！"}

    else:
        print("参数错误！")
        return {"code": -1, "msg": "参数错误，请检查传入的参数！"}

    print("data_head_id_list: ")
    print(data_head_id_list)
    print("检索到HearID数量：%d" % len(data_head_id_list))
    if len(data_head_id_list) == 0:
        print("未获取到探测数据！")
        return {"code": -1, "msg": "未获取到探测数据"}

    time.sleep(2)
    # return 0

    # # 确定数据库表
    # # 数据来源（枚举值）：0:数据库预处理数据，1:数据库原始数据 2:文件导入;3:最新处理的数据
    # if conf['DataSource'] == 0:
    #     conf['DbHeadTable'] = db_data_head_table
    #     conf['DbTable'] = db_raw_l2_precessed_table
    # elif conf['DataSource'] == 1:
    #     conf['DbHeadTable'] = db_data_head_table
    #     conf['DbTable'] = db_raw_l2_data_table
    # else:
    #     conf['DbHeadTable'] = db_temporary_data_head_table
    #     conf['DbTable'] = db_temporary_data_table_l2

    # 清除临时表数据，参数和setTaskStatus中任务类型一致
    clear_db_temporary_table_and_keep_last_same_type_task(3)

    # # 设置任务开始
    # TASK_ID = task_id
    # if TASK_ID != "":
    #     ret = setTaskStatus(TASK_ID, 3, 0, -1, conf['DataSource'])
    #     if ret != "OK":
    #         print("setTaskStatus: %s" % ret)
    #         return ret
    #
    # # 对data_head_id_list中的所有数据进行高度分辨率处理和时间分辨率处理
    # processes = []
    # process = multiprocessing.Process(target=taskDataAnalysis, args=(data_head_id_list, conf, data_batch_location, data_level, TASK_ID))
    # processes.append(process)
    # process.start()

    #taskDataAnalysis(data_head_id_list, conf, data_batch_location, conf['DataLevel'], TASK_ID)
    ret = taskDataAnalysis(data_head_id_list, conf, conf['DataLevel'], TASK_ID)


    # 测试
    # return {"code": 0, "msg": ""}
    return ret


    # # 设置本批次交互任务的ID（函数拟合任务）
    # # TASK_ID = data_batch_id if (data_batch_id != "" and data_batch_id != -1) else task_id
    # TASK_ID = task_id
    # if TASK_ID != "":
    #     ret = setTaskStatus(TASK_ID, 4, 0, -1, conf['DataSource'])
    #     if ret != "OK":
    #         print("setTaskStatus: %s" % ret)
    #         return ret
    #
    # # 对data_head_id_list中的所有数据进行函数拟合
    # processes = []
    # fitting_methods_list = conf['FitMethod'].split(',')
    # for fitting_method in fitting_methods_list:
    #     process = multiprocessing.Process(target=taskFunctionFitting, args=(data_head_id_list, conf, data_batch_location, data_level, fitting_method, TASK_ID))
    #     processes.append(process)
    #     process.start()
    #
    # return 0


##########################################
# 多进程任务
# 数据分析任务
def taskDataAnalysis(data_head_id_list, conf, data_level, TASK_ID):
    print("数据分析对象：")
    print(data_head_id_list)
    print(conf)

    data_list_json = {}    # 缓存原始和预处理中间数据，格式：{"head_id1":[], "head_id2":[], "head_id3":[], }

    # 预处理参数标记（用户简化参数描述，存入字段）
    # 执行了哪一步，将对应的变量值保存到数据库
    s1_desc_height_resolution = "统一高度分辨率：%d km" % conf['HeightReslution']
    s2_desc_time_resolution = "统一时间分辨率：%d %s" % (conf['TimeReslution'], conf['TimeReslutionUnitOption'][conf['TimeReslutionUnit']])
    s3_desc_function_fitting = "函数拟合操作.."
    s4_desc_draw_space_time_figure = "时空特性分析.."
    # 记录上述3步过程执行了哪一步
    pre_process_parameter_list = []


    # [总]获取所有预处理数据，保存到data_list_json
    # 根据head_id，测量数据类型value_type，获取原始数据
    # for (head_id, value_type, start_time, end_time) in data_head_id_list:
    for (head_id, value_type, start_time, end_time, station_id, device_id, file_name, file_store_path) in data_head_id_list:
        print("==================================")
        print("head_id = %d, value_type = %s" % (head_id, value_type))
        # raw_data = getRawDataByHeadIdInJsonObjectList(conf, head_id, data_level)        # data_type指定从L0或L2原始数据表中获取数据

        # 获取L2文件路径，读取文件中探测数据
        raw_data = parsingRadarProbeDataFromRawFile2309(file_store_path, file_name, head_id, TASK_ID, 1)

        if raw_data == "":
            print("错误：未找到最近录入的原始数据")
            continue

        print("raw_data:原始数据条数: %d" % len(raw_data))

        data_list_json[head_id] = raw_data

    if len(data_list_json.keys()) == 0:
        print('未找到原始探测数据！')
        # return -1
        return {"code": -1, "msg": "未找到原始探测数据！"}

    # 高度分辨率处理是时间分辨率处理和函数拟合的前提，所以在数据获取的地方进行处理
    # 根据需要，进行高度分辨率处理 TODO
    if conf['CheckImputationMethod'] == 1 or conf['ImputationMethod'] != "":
        print("【数据分析Step1-高度分辨率统一化】：对多个L2文件进行高度统一化处理..")
        data_list_json_tmp = {}

        max_height_global = 0.0
        min_height_global = 100000.0            # 经验值
        # 实现不同高度分辨率
        for head_id in data_list_json.keys():
            raw_data = data_list_json[head_id]
            raw_data = heightNormalizationByStep(raw_data, conf['HeightReslution'])
            print("高度统一化后的数据：%d" % len(raw_data))
            data_list_json_tmp[head_id] = raw_data

            # 获取最大高度和最小高度
            min_height, max_height = getMaxHeightAndMinHeight(raw_data)
            min_height_global, max_height_global = getGlobalMaxHeightAndMinHeight(min_height_global, max_height_global, min_height, max_height)

        # 如果raw_data中缺失了高度区间[min_height_global, max_height_global]（步长为step）的某个高度，使用none值填充
        data_list_json_tmp = fillHeightWithNoneValue(data_list_json_tmp, min_height_global, max_height_global, conf['HeightReslution'])

        data_list_json = data_list_json_tmp
        print("高度统一分辨率处理完成！数据head_id个数：%d" % len(data_list_json))
        # 记录参数
        pre_process_parameter_list.append(s1_desc_height_resolution)

        time.sleep(2)

    conf['IsTimeProcessed'] = 0
    if conf['CheckStatisticMethod'] == 1 or conf['StatisticMethod'] != "":
        print("【数据分析Step2-时间分辨率统一化】：对多个L2文件进行时间统一化处理..")
        # TODO
        data_list_json = timeNormalization(data_list_json, conf, TASK_ID, 1)
        conf['IsTimeProcessed'] = 1
        # print(data_list_json)

        print("时间统一分辨率处理完毕！数据head_id个数：%d" % len(data_list_json))
        print(data_list_json.keys())

        # 记录参数
        pre_process_parameter_list.append(s2_desc_time_resolution)

        time.sleep(2)

    if conf['CheckFittingMethod'] == 1 or conf['FitMethod'] != "":
        print("【数据分析Step3-函数拟合】：对L2文件进行函数拟合处理..")
        functionFittingAnalysis(data_list_json, conf)
        # fitting_methods_list = conf['FitMethod'].split(',')
        # for fitting_method in fitting_methods_list:
        #     runFunctionFittingMethod(raw_data, fitting_method, conf)
        print("共拟合函数条数.. %d " % len(data_list_json))

        # 记录参数
        pre_process_parameter_list.append(s3_desc_function_fitting)

    # 数据分析--“时空特性分析”，显示出图。方案：Python画图，提供链接给前台界面展示。
    # 画图输入参数为：1条廓线：高度、探测值、时间、参量类型。
    if conf['CheckSpaceTime'] == 1:
        print("【数据分析--时空特性分析】画图处理...")
        ret = functionDrawSpaceTimeFigure(data_list_json, conf)
        if 'code' in ret.keys() and ret['code'] == -1:
            err_msg = ret['msg']
            print("错误：", err_msg)
            return {"code": -1, "msg": err_msg}


        # 记录参数
        pre_process_parameter_list.append(s4_desc_draw_space_time_figure)

    if len(pre_process_parameter_list) == 0:
        print("未选择任何功能，退出！")
        # return -1
        return {"code": -1, "msg": "未选择任何功能，系统退出！"}

    # 处理完毕进行存库
    conf['ParameterDescription'] = ','.join(pre_process_parameter_list)
    # 存库测试

    # 特殊处理：如果筛选数据来自正式表，结果存储在临时表
    # 需要把正式表head信息同步插入到临时head表
    if conf['IsTimeProcessed'] == 0 and \
       conf['DbHeadTable'] == db_data_head_table and conf['DbSaveHeadTable'] == db_temporary_data_head_table:
        head_id_pair_list = copyHeadInfoToTempHeadTable(conf, data_list_json.keys())
        data_list_json = updateDataJsonInfo(head_id_pair_list, data_list_json)

    print("预处理数据保存到数据库")
    # TODO 注意参数保存！！
    # 保存前按高度排序
    for head_id, data_list in data_list_json.items():
        # 按高度排序
        data_list_json[head_id] = sorted(data_list, key=lambda x: x['Height'])
    save_processed_data_into_db(data_list_json, conf)

    # 更新数据头信息表状态： TODO
    # updateDataHeadTable(head_id, 1, 1)      # 即：该头信息对应为预处理数据1，状态为预处理完成1
    # 更新headId
    update_processed_data_headinfo_into_db(data_list_json, conf)
    # update_processed_data_headinfo_into_db_in_batch(data_list_json, conf)

    # 设置任务结束
    print("将要设置任务结束..")
    if TASK_ID != "":
        print("设置任务结束！")
        updateTaskStatus(TASK_ID, conf['TaskType'])

    # return 0
    return {"code": 0, "msg": "数据分析完成！"}


def functionDrawSpaceTimeFigure(data_list_json, conf):
    print("===========================33333==============================")
    probe_data_obj = {}
    # print("functionDrawSpaceTimeFigure():", data_list_json)
    for head_id, data_list in data_list_json.items():
        # 按高度排序
        data_list = sorted(data_list, key=lambda x: x['Height'])

        row_data_list = []
        if len(data_list) > 0:
            value_type = data_list[0]['ValueType']
            data_type = checkL2DataTypeByValueType2309(value_type)

        for obj in data_list:
            # DataFrame中的行数据
            row_data = [obj['Height'], float(obj['Value']), obj['BinKey']]
            row_data_list.append(row_data)

        # 按高度排序
        # print("数组：", row_data_list)
        df = pd.DataFrame(row_data_list, columns=['Height', 'Value', 'DataStartTime'])
        # print(df)

        print(head_id, data_type)
        print(probe_data_obj.keys())
        if data_type not in probe_data_obj.keys():
            probe_data_obj[data_type] = []
        probe_data_obj[data_type].append(df)

    ###############################################################
    # 结果图保存路径
    save_path = conf['FigureSavePath']
    if not os.path.exists(save_path):
        try:
            os.makedirs(save_path)
        except Exception as e:
            print("创建路径报错：", e)
    else:
        # 清空文件夹数据
        clear_folder_shutil(save_path)

    # 1.合并同一参量类型多个dataframe为一个dataframe
    # 2.同时过滤只包含一条廓线的参量类型
    print("画时空特性分析图参数（合并dataframe前）：", probe_data_obj)
    probe_data_obj_for_drawing = {}
    for data_type, df_list in probe_data_obj.items():
        # 过滤只包含一条廓线的参量类型
        if len(df_list)<2:
            continue

        df_merge = pd.DataFrame()
        index = 0
        for df in df_list:
            if index == 0:
                df_merge = df_list[0]
            else:
                df_merge = pd.concat([df_merge, df], axis=0)
            index += 1
        probe_data_obj_for_drawing[data_type] = df_merge

    # 检查画图廓线数据参数合法性
    if len(probe_data_obj_for_drawing) == 0:
        print("无符合时空特性分析结果画图的廓线数据！")
        return {"code": -1, "msg": "无符合时空特性分析结果画图的廓线数据！"}
        # return -1

    print("画“时空特性分析”图参数：", probe_data_obj_for_drawing)
    figure_conf = {
        "figure_location": conf['FigureSavePath'],     # 结果图保存路径
        "xx": "",                                      # 可扩展其他参数
    }

    # TODO 替换为新接口
    # 返回格式：
    # result = drawSpaceTimeMock(probe_data_obj_for_drawing, figure_conf)
    result = dataAnalysisDrawSpaceTime(probe_data_obj_for_drawing, figure_conf)
    print("时空图画图结束，返回：", result)

    for key, value in result.items():
        param = {
            "BatchId": conf['TaskId'],
            "TaskType": 2,                  # 任务类型：0评定；1数据比对；2数据分析
            "FigureType": -1,               # 图片类型，适用于数据比对（0平均偏差、1标准偏差、2相关系数）
            "FigureName": "时空特性分析图",
            "DataType": key,
            "ValueType": convertDataType2ValueType(key),
            "FigureLocation": value.replace("\\", "/"),
            "Remark": "数据分析-时空特性分析"
        }
        ret_id = saveDrawFigureResultIntoDB(param)
    print("===========================44444==============================")

    # return 0
    return {"code": 0, "msg": "时空特性分析完成！"}


# 数据分析--“时空特性分析”，显示出图。方案：Python画图，提供链接给前台界面展示。
# 画图输入参数为：1条廓线：高度、探测值、时间、参量类型。
def drawSpaceTimeMock(probe_data_obj, figure_conf):
    obj = {'T': "/figure/path/figure1.png", 'D': "/figure/path/figure2.png"}
    return obj


def functionFittingAnalysis(data_list_json, conf):
    fitting_methods_list = conf['FitMethod'].split(',')
    total = len(data_list_json)
    count = 0
    for head_id in data_list_json.keys():
        count += 1
        raw_data = data_list_json[head_id]
        # print(raw_data)
        print("正在拟合原始数据： %d / %d" % (count, total))
        for fitting_method in fitting_methods_list:
            runFunctionFittingMethod(raw_data, fitting_method, conf, head_id)

    print("共拟合数据条数： %d" % count)


def taskDataAnalysis_bak(data_head_id_list, conf, data_batch_location, data_level, TASK_ID):
    print("数据分析对象：")
    print(data_head_id_list)
    print(conf)
    # 存临时表
    db_temporary_data_head_table = "bt_temporary_data_head"
    db_temporary_data_table_l2 = "bt_temporary_data"
    conf['DbHeadTable'] = db_temporary_data_head_table

    # ret_data_dict = {"0(温度)": {"日/周/月/季度/年/全时段":{"30":[a,b,c...],"31":[a,b,c,...], }}, ...}, "1(密度)":{}, "2风速":{}}
    ret_data_dict = {}
    # 1. 根据head_id，测量数据类型value_type，获取原始数据
    # 2. 筛选的数据归类到各组；
    for (head_id, value_type, start_time, end_time) in data_head_id_list:
        print("head_id = %d, value_type = %s" % (head_id, value_type))
        # raw_data = getPreprocessedDataByHeadId(head_id, data_batch_location, data_level)               # data_type指定从L0或L2原始数据表中获取数据
        # 数据来源（枚举值）：0:新增数据;1:所有预处理数据;2:未经预处理数据
        # raw_data = getPreprocessedDataByHeadIdV2(conf['DataSource'], head_id, data_batch_location, data_level)
        raw_data = getPreprocessedDataByHeadIdV3(head_id, conf)

        if raw_data == "":
            print("错误：未找到最近录入的原始数据")
            #return {"code": -1, "msg": "未找到最近录入的原始数据！"}
            continue

        print("raw_data:XXXXXXXX")
        # print(raw_data)
        #time.sleep(10)


        # 根据需要，进行高度分辨率处理 TODO
        if conf['ImputationMethod'] == 1:
            print("高度分辨率处理..")
            print("预处理：对单个L2文件进行高度统一化处理..")
            # data_list_json_tmp = {}
            # # 实现不同高度分辨率
            # for head_id in data_list_json.keys():
            #     # raw_data = heightNormalizationByStep(raw_data, 1)
            #     raw_data = data_list_json[head_id]
            #     raw_data = heightNormalizationByStep(raw_data, conf['HeightReslution'])
            #     print("高度统一化后的数据：%d" % len(raw_data))
            #     data_list_json_tmp[head_id] = raw_data
            #
            # data_list_json = data_list_json_tmp
            # print("高度统一分辨率处理完成！数据head_id个数：%d" % len(data_list_json))
            # # 记录参数
            # pre_process_parameter_list.append(s2_desc_height_resolution)
            #
            # # print(data_list_json)
            # print("步骤2：统一高度分辨率 处理完成..")
            # time.sleep(2)

         # 根据需要，进行时间分辨率处理
        if value_type not in ret_data_dict.keys():
            ret_data_dict[value_type] = {}

        for i in range(len(raw_data)):
            height = raw_data[i][5]
            value = Decimal(raw_data[i][6])

            # 时间分辨率统一化处理：
            # 1）根据选择的时间分辨率确定组的数据量和时间段；
            # 0:小时/1:日/2:周/3:月/4:季度/5:年/6:全时段区间等
            # 0不做处理
            key = getTimeFormat(start_time, conf["TimeReslutionUnit"])

            if key not in ret_data_dict[value_type].keys():
                ret_data_dict[value_type][key] = {}
            if height not in ret_data_dict[value_type][key].keys():
                ret_data_dict[value_type][key][height] = []
            ret_data_dict[value_type][key][height].append(value)

    print(ret_data_dict.keys())
    for value_type in ret_data_dict.keys():
        print(value_type)
        print(len(ret_data_dict[value_type].keys()))
        print(ret_data_dict[value_type].keys())

    # 3）按统计方法计算（四类值）；（先进行高度对齐）
    # 1标准差计算；2最大值计算；3最小值计算，可多选，形成字符串参数，如"0,1,2,3" 或 "0"
    ret_data_dict_result = {}
    # db_cols = ["HeadId", "BatchId", "Height", "ParameterSetting", "DataStartTime", "DataEndTime", "U1", "U2", "U3", "U4"]
    db_cols = ["HeadId", "BatchId", "ValueType", "Height", "Value", "U1", "U2", "U3", "U4", "BinKey"]
    value_arr = []
    tmp_head_count = 0                  # 临时HeadId
    initHeadTableTempDataHeadId()       # 初始化Head表TempDataHeadId字段为-1

    # 0:小时/1:日/2:周/3:月/4:季度/5:年/6:全时段区间等
    time_format = getTimeFormatStr(conf["TimeReslutionUnit"])
    count = 0

    statistic_type_arr = conf['StatisticMethod'].split(',')
    print(statistic_type_arr)

    for value_type in ret_data_dict.keys():
        print(value_type)
        ret_data_dict_result[value_type] = {}
        for key in ret_data_dict[value_type].keys():
            if len(value_arr) >= MAX_LEN:
                # 存库：存临时表
                ret = save_data_into_db(db_temporary_data_table_l2, db_cols, value_arr, 0)
                count += len(value_arr)
                value_arr = []

            print("value_type=%d; key=%s" % (value_type, key))
            print("len=%d;" % len(ret_data_dict[value_type][key]))
            ret_data_dict_result[value_type][key] = {}
            tmp_head_count += 1

            #################
            # 以下部分确定临时headId的信息
            # 获取当前分辨率时间区间（根据时间分辨率和步长计算时间起始结束区间）计算标准时间：
            # TODO 可记录所有实际时间，取最大最小值
            if conf["TimeReslutionUnit"] == 6:
                data_start_time = start_time_max
                data_end_time = end_time_max
            else:
                (data_start_time, data_end_time) = getTimeRange(conf["TimeReslutionUnit"], conf['TimeReslution'], key)

            # 每个时间分辨率产生一组数据（1个headId）
            # 生成新的Head_ID
            # 1）获取老的相关联head信息：设备、站点、体制等
            for (head_id, value_type_tmp, start_time, end_time) in data_head_id_list:
                print("head_id = %d" % head_id)
                head_info = getHeadFullInfoByHeadId(head_id)
                # print(head_info)
                if value_type == head_info['ValueType']:
                    # 2)用老的同类数据（同站点+同设备）创建新的headId
                    headParamConf = {
                        "BatchId": TASK_ID,
                        "Count": 0,
                        "Level": head_info['Level'],
                        "DataType": head_info['DataType'],      # ???
                        "ValueType": value_type,
                        "ValueUnit": head_info['ValueUnit'],
                        "DataStartTime": data_start_time,
                        "DataEndTime": data_end_time,
                        "filepath": "",
                        "tempfilename": "",
                        "StationId": head_info['StationId'],
                        "StationName": head_info['StationName'],
                        "DeviceId": head_info['DeviceId'],
                        "DeviceName": head_info['DeviceName'],
                        "SystemType": head_info['SystemType'],  # ???
                        "ProjectName": head_info['ProjectName'],
                        "RawDataStatus": 0,     # 原始数据状态：0:已录入；
                        "SharedWay": 1,         # 文件头生成方式：0仅对应原始文件;1：仅数据预处理；2原始文件+数据预处理共用
                        "SharedHeadId":1,       # 文件头标记：0:仅对应原始文件;1:仅数据预处理；2:不确定度评定；3:预处理+不确定度评定
                        "DbTable": conf['DbHeadTable'],
                        "BinKey": key,
                    }
                    print(headParamConf)
                    # 查询HeadId，若不存在，则插入数据库
                    head_id_new = CheckDataHeadTableInfo(headParamConf)
                    print("头部信息headID：%s" % head_id_new)
                    if head_id_new <= 0:
                        print("Error: 文件头信息插入数据库错误!")
                        return {"code": -1, "msg": "文件头信息插入数据库错误！"}
                    break
            # 临时headId的信息确定完毕
            ####################

            for height in ret_data_dict[value_type][key].keys():
                # 0平均值计算；1标准差计算；2最大值计算；3最小值计算
                value_averge = 0.0
                value_std = 0.0
                value_max = 0.0
                value_min = 0.0
                # 根据界面传参计算相应的值
                if '0' in statistic_type_arr:
                    value_averge = np.mean(ret_data_dict[value_type][key][height])
                if '1' in statistic_type_arr:
                    value_std = np.std(ret_data_dict[value_type][key][height])
                if '2' in statistic_type_arr:
                    value_max = np.max(ret_data_dict[value_type][key][height])
                if '3' in statistic_type_arr:
                    value_min = np.min(ret_data_dict[value_type][key][height])
                ret_data_dict_result[value_type][key][height] = [value_averge, value_std, value_max, value_min]

                value_str = '"%d", "%s", "%d", "%.2f", "%.8f", "%.8f", "%.8f", "%.8f", "%.8f", "%s"' % \
                            (head_id_new, TASK_ID, value_type, str_to_float(height), 0.0, value_averge,
                             value_std, value_max, value_min, key)
                value_arr.append(value_str)

            if time_format == "quarter":
                #key的格式 2022-1Q, 2022-2Q, 2022-3Q, 2022-4Q
                key = key.split("-")[1][0]
            # 关联到head表，更新head表的TempDataHeadId字段
            print("更新Head表TempDataHeadId字段： key = %s ; value_type = %d" % (key, value_type))
            for (head_id, value_type_tmp, start_time, end_time) in data_head_id_list:
                if value_type == value_type_tmp:
                    updateHeadTableTempDataHeadId(head_id, head_id_new, time_format, key)

    # 存库：存临时表
    if len(value_arr) > 0:
        ret = save_data_into_db(db_temporary_data_table_l2, db_cols, value_arr, 0)
        count += len(value_arr)

    print("总数据HeadId数量：%d" % tmp_head_count)
    print("总数据条数：%d" % count)

    # 设置任务结束
    print("将要设置任务结束..")
    if TASK_ID != "":
        print("设置任务结束！")
        updateTaskStatus(TASK_ID, 3)


# 数据拟合任务
def taskFunctionFitting(data_head_id_list, conf, data_batch_location, data_level, fitting_method, TASK_ID):
    print("函数拟合对象：")
    print(data_head_id_list)
    # 根据head_id，测量数据类型value_type，获取原始数据
    for (head_id, value_type, start_time, end_time) in data_head_id_list:
        print("head_id = %d, value_type = %s" % (head_id, value_type))
        raw_data = getPreprocessedDataByHeadId(head_id, data_batch_location, data_level)               #data_type指定从L0或L2原始数据表中获取数据
        if raw_data == "":
            print("错误：未找到最近录入的原始数据")
            #return {"code": -1, "msg": "未找到最近录入的原始数据！"}
            continue

        runFunctionFittingMethod(raw_data, fitting_method, conf)
        # print("raw_data:XXXXXXXX")
        # print(raw_data)
        # try:
        #     (colX, colY) = getDataByColumns(raw_data)
        #     print(colX)
        #     print(colY)
        # except Exception as e:
        #     print("Data_get_columns_Error: %s" % e)
        #     return {"code": -1, "msg": "数据获取列格式错误！"}
        #
        #
        # #图像保存路径
        # fig_save_path = "%s/%s.png" % (conf['FigSavePath'], get_current_timestamp())
        # coef = functionFitting(colX, colY, conf['FunctionPower'], fig_save_path)
        # print("函数拟合结果：")
        # print(coef)
        # coef_str = ','.join('%.3f' % factor for factor in coef)
        # saveFunctionFittingResultIntoDB(conf['FunctionPower'], coef_str, fig_save_path, fitting_method)


    #设置任务结束
    print("将要设置任务结束..")
    if TASK_ID != "":
        print("设置任务结束！")
        updateTaskStatus(TASK_ID, 4)


def runFunctionFittingMethod(raw_data, fitting_method, conf, tag=""):
    print("raw_data:XXXXXXXX")
    # print(raw_data)
    try:
        (colX, colY) = getDataByColumnsV2(raw_data)
        print(colX)
        print(colY)
    except Exception as e:
        print("Data_get_columns_Error: %s", e)
        return {"code": -1, "msg": "数据获取列格式错误！"}

    # 决定X轴Label
    # TODO 探测值和单位需用head_id从表bt_temporary_data_head中查询（已导入该数据）
    xlabel = "探测值"
    if raw_data[0]['ValueType'] == 0:
        xlabel = '温度/K'
    elif raw_data[0]['ValueType'] == 1:
        xlabel = '密度/(kg/m3)'
    elif raw_data[0]['ValueType'] == 2:
        xlabel = '纬向风速/(m/s)'
    elif raw_data[0]['ValueType'] == 3:
        xlabel = '径向风速/(m/s)'
    print("X_Label: %s", xlabel)

    # 图像保存路径
    if tag != "":
        file_tag = get_current_timestamp() + '_' + str(tag)
    else:
        file_tag = get_current_timestamp()
    fig_save_path = "%s/%s.png" % (conf['FigSavePath'], file_tag)

    # 最小二乘法多项式拟合
    coef = functionFitting(colX, colY, conf['FunctionPower'], fig_save_path, xlabel)
    print("函数拟合结果：")
    print(coef)
    coef_str = ','.join('%.3f' % factor for factor in coef)
    saveFunctionFittingResultIntoDB(conf['FunctionPower'], coef_str, fig_save_path, fitting_method)

    # 切比雪夫拟合
    # functionFittingChebyshev(colX, colY, conf['FunctionPower'])

    return 0


##########################################
# 数据拟合功能
# 最小二乘法拟合多项式
# 参考：https://baijiahao.baidu.com/s?id=1643468349140251072&wfr=spider&for=pc
def functionFitting(x, y, n, fig_save_path, xlabel="探测值"):
    """
    传入数据x,y,
    多项式次数m
    """
    # 清空画布，否则后面画的图像包含前面的图像
    plt.clf()

    x = np.array(x)
    y = np.array(y)
    fit_coef = np.polyfit(x, y, n)      # 计算拟合系数
    # poly1d将这些系数与未知数结合起来，在python中构造出一个数学的函数。（显示用，非必须）
    print(np.poly1d(np.polyfit(x,y,deg=n)))

    # 绘制图像
    fit_coef = [round(i,3) for i in fit_coef]
    # plt.plot(x, y, '^')
    plt.rcParams['font.sans-serif']=['SimHei']      # 用来正常显示中文标签
    plt.rcParams['axes.unicode_minus'] = False      # 用来正常显示负号
    #plt.plot(y, x, '^')
    # plt.plot(y, x, label='拟合数据')              # 普通画图
    plt.plot(y, x, label='拟合数据', color='#1ff78c', linewidth=2.5)     # 设置画图线条颜色和粗细
    # 改变坐标轴刻度颜色
    plt.tick_params(axis='x', colors='white')       # 坐标刻度颜色
    plt.tick_params(axis='y', colors='white')       # 坐标刻度颜色
    plt.xticks(fontsize=13)                         # 坐标刻度字体大小
    plt.yticks(fontsize=13)                         # 坐标刻度字体大小
    #plt.axis('off')                                # 去掉坐标轴
    # plt.grid(axis="y", linestyle = '--', linewidth = 0.5)    # 背景加横向的网格
    plt.grid(axis="y", linewidth = 0.3)    # 背景加横向的网格
    # 隐藏边线，保留刻度线和标签
    ax = plt.axes()
    for i in ['top', 'right', 'bottom', 'left']:
        ax.spines[i].set_visible(False)

    # 坐标轴图标颜色
    plt.xlabel(xlabel, fontsize=13, color='white')     #TODO
    plt.ylabel('高度/km', fontsize=13, color='white')
    plt.legend()

    # plt.savefig(fig_save_path, dpi=72)     # 保存分辨率为72的图片
    # 保存图片背景透明，bbox_inches和pad_inches的设置是为了保存图像时删除图像的白边
    plt.savefig(fig_save_path, dpi=72, transparent=True, bbox_inches='tight', pad_inches=0.0)
    #plt.show()
    return fit_coef

def functionFittingWithPredict(x, y, calculate_x, n):
    """
    传入数据x,y,
    预测数据calculate_x
    多项式次数m
    """
    x = np.array(x)
    y = np.array(y)
    calculate_x = np.array(calculate_x)
    fit_coef = np.polyfit(x, y, n)      #计算拟合系数
    calculate_y = np.polyval(fit_coef, calculate_x) #计算预测到的y值

    #绘制图像
    fit_coef = [round(i,3) for i in fit_coef]
    plt.plot(x, y, '^')
    plt.plot(calculate_x, calculate_y)
    plt.show()
    return fit_coef, calculate_y


# 切比雪夫法拟合多项式
def functionFittingChebyshev(x, y, deg):
    # x = np.array([1, 2, 3, 4])
    # y = np.array([1, 3, 5, 4])

    deg = len(x) - 1
    A = chebyshev.chebvander(x, deg)

    print(A, "# A")
    c = linalg.solve(A, y)
    print(c,"# c")
    for v in x:
        print( v, np.polynomial.Chebyshev(c)(v),"#p(%d)" % v)


#####
##二阶切比雪夫多项式系数
# https://blog.csdn.net/BeautyGao/article/details/47283053
def CbVal(IO):
    KO = 2
    ##二阶切比雪夫多项式系数
    cb = np.zeros([KO+1, IO])
    cb[0] = 1.0
    cb1err = (IO+1.0)/2.0

    cb2err = (IO*IO-1.0)/12.0
    cb[2] = [i*i-cb2err for i in cb[1]]

    ##标准化处理后的二阶切比雪夫多项式系数
    v = np.zeros([KO+1, IO])
    sq0 = sq(cb[0])
    v[0] = [i/sq0 for i in cb[0]]
    sq1 = sq(cb[1])
    v[1] = [i/sq1 for i in cb[1]]
    sq2 = sq(cb[2])
    v[2] = [i/sq2 for i in cb[2]]
    return KO,cb,v

##求平方和的根
def sq(v):
    return math.sqrt(sum([i*i for i in v]))

##一维二阶切比雪夫多项式
def D1Cheb(f):
    dimension = len(f)
    KO,cb,v = CbVal(dimension)

    A = np.zeros([KO+1])
    A[0] = sum(i[0]*i[1] for i in zip(f,v[0]))
    A[1] = sum(i[0]*i[1] for i in zip(f,v[1]))
    A[2] = sum(i[0]*i[1] for i in zip(f,v[2]))

    fit = np.zeros([dimension])
    for i in range(dimension):
        fit[i] = sum(i[0]*i[1] for i in zip([v[0][i],v[1][i],v[2][i]],A))
    return fit

##二维二阶切比雪夫多项式
def D2Cheb(f):
    dimensionX = len(f[0])
    dimensionY = len(f)

    KOX,cbX,vX = CbVal(len(f[0]))
    KOY,cbY,vY = CbVal(len(f))

    ##二维要素的多项式权重
    A = np.zeros([KOY+1, KOX+1])

    for k in range(KOY+1):
        for s in range(KOX+1):
            for i in range(dimensionY):
                for j in range(dimensionX):
                    A[k][s] += f[i][j] * vY[k][i] * vX[s][j]

    ##二维要素拟合
    fit = np.zeros([dimensionY, dimensionX])
    for i in range(dimensionY):
        for j in range(dimensionX):
            for k in range(KOY+1):
                for s in range(KOX+1):
                    fit[i][j] += A[k][s] * vY[k][i] * vX[s][j]

    return fit

def ChebyshevPolynomialFitting():
    # cb,v = CbVal(5)
    # print cb
    # print v
    data = [[0.0, 1.0, 2.0, 3.0, 4.0],
            [1.0, 2.6, 3.0, 4.0, 5.0],
            [2.0, 3.0, 4.0, 5.0, 6.0],
            [3.0, 4.0, 5.0, 6.0, 7.0],
            [4.0, 5.0, 6.0, 7.0, 8.0]]

    fit = D2Cheb(data)

    for i in range(100):
        curData = data
        curFit = D2Cheb(curData)
        data[1][1] = curFit[1][1]
        print("%d  %.1f" % (i, data[1][1]))


########################################
def dataResolutionConsolidated(param):
    """
    数据分辨率统一化：高度分辨率，时间分辨率
    """
    task_id = param.get('task_id')
    data_id = param.get('data_id')

    return ""



#########################################
#三次样条插值算法
"""
三次样条实现：
函数x的自变量为:3,   4.5, 7,    9
      因变量为：2.5, 1   2.5,  0.5
"""
x = [3, 4.5, 7, 9]
y = [2.5, 1, 2.5, 0.5]

"""
功能：完后对三次样条函数求解方程参数的输入
参数：要进行三次样条曲线计算的自变量
返回值：方程的参数
"""
def calculateEquationParameters(x):
    #parameter为二维数组，用来存放参数，sizeOfInterval是用来存放区间的个数
    parameter = []
    sizeOfInterval=len(x)-1;
    i = 1
    #首先输入方程两边相邻节点处函数值相等的方程为2n-2个方程
    while i < len(x)-1:
        data = init(sizeOfInterval*4)
        data[(i-1)*4] = x[i]*x[i]*x[i]
        data[(i-1)*4+1] = x[i]*x[i]
        data[(i-1)*4+2] = x[i]
        data[(i-1)*4+3] = 1
        data1 =init(sizeOfInterval*4)
        data1[i*4] =x[i]*x[i]*x[i]
        data1[i*4+1] =x[i]*x[i]
        data1[i*4+2] =x[i]
        data1[i*4+3] = 1
        temp = data[2:]
        parameter.append(temp)
        temp = data1[2:]
        parameter.append(temp)
        i += 1
    # 输入端点处的函数值。为两个方程, 加上前面的2n - 2个方程，一共2n个方程
    data = init(sizeOfInterval * 4 - 2)
    data[0] = x[0]
    data[1] = 1
    parameter.append(data)
    data = init(sizeOfInterval * 4)
    data[(sizeOfInterval - 1) * 4 ] = x[-1] * x[-1] * x[-1]
    data[(sizeOfInterval - 1) * 4 + 1] = x[-1] * x[-1]
    data[(sizeOfInterval - 1) * 4 + 2] = x[-1]
    data[(sizeOfInterval - 1) * 4 + 3] = 1
    temp = data[2:]
    parameter.append(temp)
    # 端点函数一阶导数值相等为n-1个方程。加上前面的方程为3n-1个方程。
    i=1
    while i < sizeOfInterval:
        data = init(sizeOfInterval * 4)
        data[(i - 1) * 4] = 3 * x[i] * x[i]
        data[(i - 1) * 4 + 1] = 2 * x[i]
        data[(i - 1) * 4 + 2] = 1
        data[i * 4] = -3 * x[i] * x[i]
        data[i * 4 + 1] = -2 * x[i]
        data[i * 4 + 2] = -1
        temp = data[2:]
        parameter.append(temp)
        i += 1
    # 端点函数二阶导数值相等为n-1个方程。加上前面的方程为4n-2个方程。且端点处的函数值的二阶导数为零，为两个方程。总共为4n个方程。
    i = 1
    while i < len(x) - 1:
        data = init(sizeOfInterval * 4)
        data[(i - 1) * 4] = 6 * x[i]
        data[(i - 1) * 4 + 1] = 2
        data[i * 4] = -6 * x[i]
        data[i * 4 + 1] = -2
        temp = data[2:]
        parameter.append(temp)
        i += 1
    return parameter

def init(size):
    """
    对一个size大小的元组初始化为0
    """
    j = 0;
    data = []
    while j < size:
        data.append(0)
        j += 1
    return data


def solutionOfEquation(parametes,y):
    """
    功能：计算样条函数的系数。
    参数：parametes为方程的系数，y为要插值函数的因变量。
    返回值：三次插值函数的系数。
    """
    sizeOfInterval = len(x) - 1;
    result = init(sizeOfInterval*4-2)
    i=1
    while i<sizeOfInterval:
        result[(i-1)*2]=y[i]
        result[(i-1)*2+1]=y[i]
        i+=1
    result[(sizeOfInterval-1)*2]=y[0]
    result[(sizeOfInterval-1)*2+1]=y[-1]
    a = np.array(calculateEquationParameters(x))
    b = np.array(result)
    for data_x in b:
        print(data_x)
    return np.linalg.solve(a,b)


def calculate(paremeters,x):
    """
    功能：根据所给参数，计算三次函数的函数值：
    参数:parameters为二次函数的系数，x为自变量
    返回值：为函数的因变量
    """
    result=[]
    for data_x in x:
        result.append(paremeters[0]*data_x*data_x*data_x+paremeters[1]*data_x*data_x+paremeters[2]*data_x+paremeters[3])
    return  result


def  Draw(data_x,data_y,new_data_x,new_data_y):
    """
    功能：将函数绘制成图像
    参数：data_x,data_y为离散的点.new_data_x,new_data_y为由拉格朗日插值函数计算的值。x为函数的预测值。
    返回值：空
    """
    plt.plot(new_data_x, new_data_y, label="拟合曲线", color="black")
    plt.scatter(data_x,data_y, label="离散数据",color="red")
    mpl.rcParams['font.sans-serif'] = ['SimHei']
    mpl.rcParams['axes.unicode_minus'] = False
    plt.title("三次样条函数")
    plt.legend(loc="upper left")
    plt.show()


def CubicSplineInterpolation():
    result=solutionOfEquation(calculateEquationParameters(x),y)
    new_data_x1=np.arange(3, 4.5, 0.1)
    new_data_y1=calculate([0,0,result[0],result[1]],new_data_x1)
    new_data_x2=np.arange(4.5, 7, 0.1)
    new_data_y2=calculate([result[2],result[3],result[4],result[5]],new_data_x2)
    new_data_x3=np.arange(7, 9.5, 0.1)
    new_data_y3=calculate([result[6],result[7],result[8],result[9]],new_data_x3)
    new_data_x=[]
    new_data_y=[]
    new_data_x.extend(new_data_x1)
    new_data_x.extend(new_data_x2)
    new_data_x.extend(new_data_x3)
    new_data_y.extend(new_data_y1)
    new_data_y.extend(new_data_y2)
    new_data_y.extend(new_data_y3)
    Draw(x,y,new_data_x,new_data_y)


##################################################
#函数拟合结果存数据库
def saveFunctionFittingResultIntoDB(power, coef_list, fig_path, fitting_method):
    print("函数拟合结果保存：")
    cols = ["FunctionPower", "CoefficientList", "FigureLocation", "FittingMethod"]
    col_str = ','.join(cols)
    value_str = '"%d", "%s", "%s", "%s"' % (power, coef_list, fig_path, str(fitting_method))

    print("函数拟合结果保存参数： %s" % value_str)
    head_id = insert_db(db_table_function_fitting, col_str, value_str)
    print("函数拟合结果表插入ID： %d" % head_id)
    if head_id == -1:
        return "[Error]：函数拟合结果插入数据库错误！"

    return head_id

