from fastapi import APIRouter, Depends
from pydantic import BaseModel
import numpy as np
import pandas as pd
from backend.app.app.api.api_v1.user import verify_token_user
from backend.app.app.crud_pro.tb_stimeforcast import stimeforcast_store, stimeforcast_delete, read_stimeforcast, \
    stimforcast_UnitName_Date_Sample, stimforcast_readModelName, stimeforcast_newModel, stimeforcast_readModelinfo, \
    stimeforcast_training, stimeforcast_saveparas, stimeforcast_loadPredictingData, stimeforcast_loadModels, \
    stimeforcast_predict, stimeforcast_read_Plan_Unit_Year, stimeforcast_predictsave, \
    stimeforcast_predictsavetopredtabel, get_dvp_info, get_sample_data, get_sample_data_all, bp_predict_save_params, \
    calculate_p, save_model_info_data
from backend.app.app.models.effect_pridict.effect_pri import save_BP_data, delete_BP_data, read_BP_data, read_BP_model, \
    creat_new_model, train_Data_BP, save_BP_paras, ModelInfo, predictData_Modal, predictData_readData, \
    predictData_savedata, \
    predictData_savetopredtabel, predictData_save_params

pridict_BP_router = APIRouter(prefix="/pridict_BP", tags=["3.单元方案措施效果预测---压裂-BP神经网络模型训练"])


class CountNumAll(BaseModel):
    StimType_Name: str
    Model_Name: str


# hzc改
@pridict_BP_router.post("/count_num_all", name="计算")
async def count_num(input_data: CountNumAll, ver=Depends(verify_token_user)):
    try:
        sample_data, sample_sign = get_sample_data_all(input_data.StimType_Name, input_data.Model_Name)
        return_data = {
            "all_avg": None,
            "all_max": None,
            "all_min": None,
            "all_var": None,
            "p10": None,
            "p90": None,
            "Sample_Sign": None
        }
        if not sample_data:
            return {"result": return_data, "verify": ver}
        sample_sign_count = {
            '0-否': 0,
            '1-样': 0,
            '2-样测': 0,
            '3-测': 0
        }
        for item in sample_sign:
            if item in sample_sign_count.keys():
                sample_sign_count[item] += 1
            else:
                sample_sign_count[item] = 0
        new_data = []
        for item in sample_data:
            data = list(item)
            new_data.append(data)
        columns_name = [
            'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8', 'X9', 'X10',
            'X11', 'X12', 'X13', 'X14', 'X15', 'X16', 'X17', 'X18', 'X19', 'X20',
            'X21', 'X22', 'X23', 'X24', 'X25', 'X26', 'X27', 'X28', 'X29', 'X30',
        ]
        # 计算p10, p90
        df = pd.DataFrame(new_data)
        p10_list, p90_list = [], []
        for column in df.columns:
            ser = df[column]
            cou = ser.value_counts()
            cou_sort = cou.sort_index(axis=0, ascending=True)
            cou_p = cou_sort / len(df[column])
            # print("---------%s---------" % column)
            # print(cou_p)
            p10, p90 = calculate_p(cou_p)
            p10_list.append(p10)
            p90_list.append(p90)
        print(df)
        print(p10_list)
        print(p90_list)
        # print(df)
        ar = np.array(new_data)
        # 计算平均值、最大值、最小值、均差
        all_avg = ar.mean(axis=0)
        all_max = np.max(ar, axis=0)
        all_min = np.min(ar, axis=0)
        all_var = np.var(ar, axis=0)
        return_data.update({"all_avg": dict(zip(columns_name, list(all_avg)))})
        return_data.update({"all_max": dict(zip(columns_name, list(all_max)))})
        return_data.update({"all_min": dict(zip(columns_name, list(all_min)))})
        return_data.update({"all_var": dict(zip(columns_name, list(all_var)))})
        return_data.update({"p10": dict(zip(columns_name, list(p10_list)))})
        return_data.update({"p90": dict(zip(columns_name, list(p90_list)))})
        return_data.update({"Sample_Sign": sample_sign_count})
        return {"result": return_data, "verify": ver}
    except Exception as e:
        print(e)
        return {"result": return_data, "verify": ver}


#   筛选条件，井号，日期
class sa_data(BaseModel):
    DvpUnit_Name: str
    StimType_Name: str
    start: str
    end: str


@pridict_BP_router.post("/count_num", name="根据筛选条件计算")
async def count_num(input_data: sa_data, ver=Depends(verify_token_user)):
    try:
        input_data.start = input_data.start + "-01-01"
        input_data.end = input_data.end + "-12-31"
        sample_data, sample_sign = get_sample_data(
            unit_name=input_data.DvpUnit_Name,
            name=input_data.StimType_Name,
            start=input_data.start,
            end=input_data.end
        )
        return_data = {
            "all_avg": None,
            "all_max": None,
            "all_min": None,
            "all_var": None,
            "p10": None,
            "p90": None,
            "Sample_Sign": None
        }
        if not sample_data:
            return {"result": return_data, "verify": ver}
        sample_sign_count = {
            '0-否': 0,
            '1-样': 0,
            '2-样测': 0,
            '3-测': 0
        }
        for item in sample_sign:
            if item in sample_sign_count.keys():
                sample_sign_count[item] += 1
            else:
                sample_sign_count[item] = 0
        new_data = []
        for item in sample_data:
            data = list(item)
            new_data.append(data)
        columns_name = [
            'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8', 'X9', 'X10',
            'X11', 'X12', 'X13', 'X14', 'X15', 'X16', 'X17', 'X18', 'X19', 'X20',
            'X21', 'X22', 'X23', 'X24', 'X25', 'X26', 'X27', 'X28', 'X29', 'X30',
        ]
        # 计算p10, p90
        df = pd.DataFrame(new_data)
        p10_list, p90_list = [], []
        for column in df.columns:
            ser = df[column]
            cou = ser.value_counts()
            cou_sort = cou.sort_index(axis=0, ascending=True)
            cou_p = cou_sort / len(df[column])
            # print("---------%s---------" % column)
            # print(cou_p)
            p10, p90 = calculate_p(cou_p)
            p10_list.append(p10)
            p90_list.append(p90)
        # print(df)
        ar = np.array(new_data)
        # 计算平均值、最大值、最小值、均差
        all_avg = ar.mean(axis=0)
        all_max = np.max(ar, axis=0)
        all_min = np.min(ar, axis=0)
        all_var = np.var(ar, axis=0)
        return_data.update({"all_avg": dict(zip(columns_name, list(all_avg)))})
        return_data.update({"all_max": dict(zip(columns_name, list(all_max)))})
        return_data.update({"all_min": dict(zip(columns_name, list(all_min)))})
        return_data.update({"all_var": dict(zip(columns_name, list(all_var)))})
        return_data.update({"p10": dict(zip(columns_name, list(p10_list)))})
        return_data.update({"p90": dict(zip(columns_name, list(p90_list)))})
        return_data.update({"Sample_Sign": sample_sign_count})
        return {"result": return_data, "verify": ver}
    except Exception as e:
        print(e)
        return {"result": return_data, "verify": ver}


# BP神经网络预测数据表读所有模型名称
@pridict_BP_router.post("/read_modelName", name="BP神经网络预测数据表读所有模型名称")
async def read_model(ver=Depends(verify_token_user)):
    try:
        data = stimforcast_readModelName()
        return {"result": data, "verify": ver}
    except:
        return False


def dvpunit_name_tree(dvp_list,DvpUnit_Name):
    dvp_list = sorted(dvp_list, key=lambda x: x.get("DvpUnit_Rank"))
    num=1
    temp_list=list()
    for dvp in dvp_list:
        temp_dict = dict()
        if dvp['Belong_DvpUnit_Name']==DvpUnit_Name and dvp['DvpUnit_Name']!=DvpUnit_Name:
            temp_dict.update({
                'id':f'{dvp["DvpUnit_ID"]}{num}',
                'title': dvp['DvpUnit_Name'],
            })
            num+=1
            child = dvpunit_name_tree(dvp_list, dvp['DvpUnit_Name'])
            if child:
                temp_dict.update({
                    'children': child
                })
            temp_list.append(temp_dict)
    return temp_list


# BP神经网络预测数据表读单元名称、日期
@pridict_BP_router.post("/read_UnitNameDate", name="BP神经网络预测数据表读单元名称、日期")
async def read_unit_date(ver=Depends(verify_token_user)):
    all_dvp, end = get_dvp_info()
    start = end[0]
    end.reverse()
    end_ = end[0]
    dvp_tree = list()
    dvp_list = sorted(all_dvp, key=lambda x: x["DvpUnit_Rank"])
    while len(dvp_list):
        item = dvp_list[0]
        temp_dict = dict()
        temp_dict.update({
            'id': item['DvpUnit_ID'],
            'title': item['DvpUnit_Name'],
        })
        child = dvpunit_name_tree(dvp_list, item['DvpUnit_Name'])
        if child:
            temp_dict.update({
                'children': child
            })
        dvp_tree.append(temp_dict)
        del_name = set()
        new_list = list()
        del_name.add(item['DvpUnit_Name'])
        for x in dvp_list:
            if x['Belong_DvpUnit_Name'] in del_name or x['DvpUnit_Name'] in del_name:
                del_name.add(x['DvpUnit_Name'])
            else:
                new_list.append(x)
        dvp_list = new_list

    return {"result": dvp_tree, "start_data": start, "end_data": end_, "verify": ver}


# BP神经网络预测数据表读库
@pridict_BP_router.post("/read_lib", name="BP神经网络预测数据表读库")
async def read_lib(Read_BP_Data: read_BP_data, ver=Depends(verify_token_user)):
    try:
        data = read_stimeforcast(
            Unit_Name=Read_BP_Data.Unit_Name,
            Model_Name=Read_BP_Data.Model_Name,
            start_date=Read_BP_Data.Start_Date,
            end_date=Read_BP_Data.End_Date
        )
        return {"result": data, "verify": ver}
    except Exception as e:
        print(e)
        return False


# BP神经网络预测数据表储存数据
@pridict_BP_router.post("/save_lib", name="BP神经网络预测数据表存库")
async def save_lib(Save_BP_Data: save_BP_data, ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_store(Save_BP_Data)
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络预测数据表读库删除数据
@pridict_BP_router.post("/delete_lib_data", name="BP神经网络预测数据表删除数据")
async def delete_lib(Delete_BP_Data: delete_BP_data, ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_delete(well_name=Delete_BP_Data.Well_Name,
                                end_data=Delete_BP_Data.End_Date)
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络模型信息数据表新建模型
@pridict_BP_router.post("/creat_new_model", name="BP神经网络模型信息数据表新建模型")
async def creat_model(New_Model_Data: creat_new_model, ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_newModel(
            Model_Name=New_Model_Data.Model_Name,
            Creater_Name=New_Model_Data.Creater_Name,
            StimType_Name=New_Model_Data.StimType_Name,
            Remark=New_Model_Data.Remark
                                  )
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络模型信息数据表读库
@pridict_BP_router.post("/read_modelinfo", name="BP神经网络读取模型信息")
async def read_modelinfo(New_Model_Data: read_BP_model, ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_readModelinfo(Model_Name=New_Model_Data.Model_Name)
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络模型训练
@pridict_BP_router.post("/train_model", name="BP神经网络训练")
async def train_model(Train_Data: train_Data_BP, ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_training(train_Data_BP=Train_Data, Model_Name=Train_Data.Model_Name, params=Train_Data.params)
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络保存参数
@pridict_BP_router.post("/save_bp_paras", name="BP神经网络保存参数")
async def train_model(save_data: save_BP_paras, ver=Depends(verify_token_user)):
    result = stimeforcast_saveparas(save_data=save_data)
    return {"result": result, "verify": ver}


# 保存BP神经网络预测模型信息表 BP神经网络保存参数
@pridict_BP_router.post("/save_model_info", name="保存BP神经网络预测模型信息表")
async def save_model_info(model_info: ModelInfo, ver=Depends(verify_token_user)):
    try:
        a = save_model_info_data(model_info=model_info)
        return {"result": a, "verify": ver}
    except Exception as e:
        print(e)
        print(e.__traceback__.tb_lineno)
        return False


# BP神经网络预测数据读库
@pridict_BP_router.post("/predict_read_Plan_Unit_Year", name="读取规划方案、单元名称、起止规划年")
async def Predict_Read(ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_read_Plan_Unit_Year()
        return {"result": a, "verify": ver}
    except Exception as e:
        print(e)
        return False


# BP神经网络预测数据读库
@pridict_BP_router.post("/predict_readData", name="读取BP神经网络预测数据")
async def Predict_Read(read_data: predictData_readData, ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_loadPredictingData(read_data=read_data)
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络预测数据读库
@pridict_BP_router.post("/predict_readModels", name="读取BP神经网络模型名称")
async def Predict_Read(ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_loadModels()
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络预测数据读库
@pridict_BP_router.post("/predict", name="预测")
async def Predict_Read(predictData_Modal: predictData_Modal, ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_predict(predData=predictData_Modal)
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络预测数据存库
@pridict_BP_router.post("/predict_save", name="存库")
async def Predict_Read(predictData_savedata: predictData_savedata, ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_predictsave(save_BP_data_json=predictData_savedata)
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络预测数据存油井措施方案效果预测表
@pridict_BP_router.post("/predict_savetopredtabel", name="存油井措施方案效果预测表")
async def Predict_Read(predictData_savetopredtabel: predictData_savetopredtabel, ver=Depends(verify_token_user)):
    try:
        a = stimeforcast_predictsavetopredtabel(senddata=predictData_savetopredtabel)
        return {"result": a, "verify": ver}
    except:
        return False


# BP神经网络预测参数存库
@pridict_BP_router.post("/save_bp_predict_params", name="BP神经网络预测参数存库")
async def save_bp_predict_params(page_data: predictData_save_params, ver=Depends(verify_token_user)):
    result = bp_predict_save_params(page_data.add, page_data.delete)
    return {"result": result, "verify": ver}
