"""
Author: 'silencesoup' 'silencesoup@outlook.com'
Date: 2024-12-01 14:55:41
LastEditors: 'silencesoup' 'silencesoup@outlook.com'
LastEditTime: 2024-12-30 11:24:01
FilePath: \neimeng_2024\main.py
Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
"""
import os 
import numpy as np
import pandas as pd
import xarray as xr
import datetime
import json
from loguru import logger
from hydroevaluate.configs.config import cmd, default_config_file, update_cfg
from fastapi import FastAPI, File, UploadFile, Body  # 添加 Body 导入
from typing import Optional, Dict, Any  # 添加类型提示导入
import uvicorn
import logging
import requests
from datetime import datetime, timedelta
from pydantic import BaseModel
from typing import List

from utils import (
    get_basin_area,
    move_time_start_back_30_days,
    move_time_start_back_365_days_day_format,
    move_time_start_forward_6_days_and_21_hours,
    move_time_start_forward_6_days,
    trans_to_hourly,
)
from hydroevaluate.hydroevaluate import EvalDeepHydro
from data_source import PastDataSource, agg_rain_data

logger = logging.getLogger(__name__)
app = FastAPI()

def model_infer(model_name, time_start, gage_id_lst, tp_data):
    """
    Args:
        model_name (_type_): "camels_3h" or "camels_1D" or "camelsandneimeng_3h" or "camelsandneimeng_1D" or "neimeng_3h" or "neimeng_1D"
        time_start (_type_):"2024-08-01 01"
        gage_id_lst (_type_): ["NM000001", "NM000002"]
        new_data (_type_): json
    """
    logger.warning(time_start)
    logger.warning(type(time_start))
    logger.warning(gage_id_lst)
    logger.warning(type(gage_id_lst))
    logger.warning(tp_data)
    logger.warning(type(tp_data))
    time_start = time_start.replace(" ", "-")
    if "3h" in model_name:
        new_time_start = move_time_start_back_30_days(time_start)
        time_end = move_time_start_forward_6_days_and_21_hours(time_start)
        data_source = PastDataSource(tp_data=tp_data)
        cfg_file = default_config_file()
        pth_path = f"data/{model_name}/best_model.pth"
        stat_file_path = f"data/{model_name}/dapengscaler_stat.json"
        args = cmd(
            object_ids=gage_id_lst,
            t_range_test=[(new_time_start, time_end)],
            download=False,
            pth_path=pth_path,
            stat_file_path=stat_file_path,
            model_type="torchhydro",
            device=[-1],
            model_name="Seq2Seq",
            horizon=70,
            rho=240,
            var_lst=["tp"],
            target_cols=["streamflow"],
            feature_mapping={
                "tp": {
                    "category": "precipitation",
                    "time_ranges": [(0, 310)],
                    "offset": 1,
                },
            },
            model_hyperparam={
                "en_input_size": 16,
                "de_input_size": 17,
                "output_size": 1,
                "hidden_size": 256,
                "forecast_length": 70,
                "prec_window": 1,
                "teacher_forcing_ratio": 0.5,
            },
            min_time_interval=3,
            min_time_unit="h",
        )
        update_cfg(cfg_file, args)
        
        eval_deep_hydro = EvalDeepHydro(cfg_file, data_source)
        pred = eval_deep_hydro.model_infer()
        pred = pred.isel(time=slice(1, None))
        # 将ds转成df
        df_pred = pred.to_dataframe().reset_index()
        df_pred = trans_to_hourly(df_pred)
        df_rain = pd.DataFrame(tp_data)[["time", "tp"]]
        df_rain["time"] = pd.to_datetime(df_rain["time"])
        df_pred = pd.merge(df_pred, df_rain, how="left", on="time")
        df_pred.fillna(0, inplace=True)
    elif "1D" in model_name:
        new_time_start = move_time_start_back_365_days_day_format(time_start)
        time_end = move_time_start_forward_6_days(time_start)
        data_source = PastDataSource(tp_data=tp_data)
        cfg_file = default_config_file()
        pth_path = f"data/{model_name}/best_model.pth"
        stat_file_path = f"data/{model_name}/dapengscaler_stat.json"
        args = cmd(
            object_ids=gage_id_lst,
            t_range_test=[(new_time_start, time_end)],
            download=False,
            pth_path=pth_path,
            stat_file_path=stat_file_path,
            model_type="torchhydro",
            device=[-1],
            model_name="Seq2Seq",
            horizon=10,
            rho=365,
            var_lst=["tp"],
            target_cols=["streamflow"],
            feature_mapping={
                "tp": {
                    "category": "precipitation",
                    "time_ranges": [(0, 375)],
                    "offset": 1,
                },
            },
            model_hyperparam={
                "en_input_size": 16,
                "de_input_size": 17,
                "output_size": 1,
                "hidden_size": 256,
                "forecast_length": 10,
                "prec_window": 1,
                "teacher_forcing_ratio": 0.5,
            },
            min_time_interval=1,
            min_time_unit="D",
        )
        update_cfg(cfg_file, args)
        eval_deep_hydro = EvalDeepHydro(cfg_file, data_source)
        pred = eval_deep_hydro.model_infer()
        pred = pred.isel(time=slice(1, None))
        # 将ds转成df
        df_pred = pred.to_dataframe().reset_index()
        df_rain = agg_rain_data(tp_data, [new_time_start, time_end], "1D")
        area = get_basin_area(gage_id_lst[0])
        df_pred = pd.merge(df_pred, df_rain, how="left", on="time")
        df_pred.fillna(0, inplace=True)
        df_pred["streamflow"] = df_pred["streamflow"] / 24

    area = get_basin_area(gage_id_lst[0])
    
    df_pred["streamflow"] = df_pred["streamflow"] * area / 3.6

    if not df_pred.empty:
        df_pred['basin'] = df_pred['basin'].str.replace('neimenggu_', '')
        df_pred = df_pred[["basin", "time", "streamflow", "tp"]]
        # 将time转成str
        df_pred["time"] = df_pred["time"].apply(
            lambda x: x if pd.isnull(x) else x.strftime("%Y-%m-%d %H:%M:%S")
        )

    return df_pred


def save_df_to_csv(df_pred, output_path):
    # 检查 df_pred 是否为空
    if df_pred.empty:
        return "数据为空，未保存文件。"

    # 检查 df_pred 是否包含所需的列
    required_columns = ["basin", "time", "streamflow", "tp"]
    if any(col not in df_pred.columns for col in required_columns):
        return f"输入数据缺少所需列：{', '.join([col for col in required_columns if col not in df_pred.columns])}。"

    # 确保 time 列是 datetime 类型，如果不是则转换
    if not pd.api.types.is_datetime64_any_dtype(df_pred["time"]):
        df_pred["time"] = pd.to_datetime(df_pred["time"], errors='coerce')

    # 将时间列转换为字符串格式
    if "time" in df_pred.columns:
        df_pred["time"] = df_pred["time"].apply(
            lambda x: x if pd.isnull(x) else x.strftime('%Y-%m-%d %H:%M:%S')
        )

    df_pred.to_csv(output_path, index=False, encoding='utf-8')
    print(f"数据已成功保存到 {output_path}")

def df_to_post_json_data(df):
    return json.loads(df.to_json(orient="records",force_ascii=False))
def rainfall_json_to_tpdata(rainfall_json):
    modified_data = []
    try:
        # 如果输入是字符串，尝试解析JSON
        if isinstance(rainfall_json, str):
            rainfall_json = json.loads(rainfall_json)
        
        # 如果是列表字符串，需要解析每个元素
        if isinstance(rainfall_json, list):
            for item in rainfall_json:
                if isinstance(item, str):
                    item = json.loads(item)
                if not isinstance(item, dict):
                    raise TypeError(f"数据项必须是字典类型，当前类型为: {type(item)}")
                
                modified_item = {
                    "time": item['time'],
                    "tp": item['tp']
                }
                modified_data.append(modified_item)
        # 如果是单个字典
        elif isinstance(rainfall_json, dict):
            modified_item = {
                "time": rainfall_json['time'],
                "tp": rainfall_json['tp']
            }
            modified_data.append(modified_item)
        else:
            raise TypeError(f"输入数据格式错误，当前类型为: {type(rainfall_json)}")
            
    except Exception as e:
        logger.error(f"数据处理错误: {str(e)}")
        logger.error(f"输入数据: {rainfall_json}")
        raise

    return pd.DataFrame(modified_data)

def rainfall_json_to_tpdata1(rainfall_json):
    modified_data = []
    try:
        # 如果输入是字符串，尝试解析JSON
        if isinstance(rainfall_json, str):
            rainfall_json = json.loads(rainfall_json)
        
        # 确保rainfall_json是可迭代的
        if isinstance(rainfall_json, dict):
            rainfall_json = [rainfall_json]
            
        for item in rainfall_json:
            if not isinstance(item, dict):
                raise TypeError(f"数据项必须是字典类型，当前类型为: {type(item)}")
            
            modified_item = {
                "time": item['time'],
                "tp": item['tp']
            }
            modified_data.append(modified_item)
            
    except Exception as e:
        logger.error(f"数据处理错误: {str(e)}")
        logger.error(f"输入数据: {rainfall_json}")
        raise

    return pd.DataFrame(modified_data)

def gfs_json_to_tpdata(gfs_json):
    modified_data = []
    for item in gfs_json:
        modified_item = {
            "basin": item["basin"],
            "time": datetime.fromtimestamp(item['tm']/ 1000).strftime('%Y-%m-%dT%H:%M:%S'),
            "tp": item['tp']
        }
        modified_data.append(modified_item)
    df = pd.DataFrame(modified_data)
    return df_to_post_json_data(df)

def get_json_earliest_time(gfs_json):
    
    timestamps = [item['tm'] / 1000 for item in gfs_json]
    earliest_timestamp = max(timestamps)
    earliest_time = datetime.fromtimestamp(earliest_timestamp)
    seven_days_earlier = earliest_time - timedelta(days=0)
    return seven_days_earlier.strftime('%Y-%m-%d-%H')

def add_seven_days(time_str):
    """将输入的时间字符串前移动7天"""
    # 将字符串转换为datetime对象
    dt = datetime.strptime(time_str, "%Y-%m-%d %H")
    
    new_dt = dt - timedelta(days=6)
    # 转回字符串格式
    return new_dt.strftime("%Y-%m-%d %H")

class TimePoint(BaseModel):
    time: str  # 若需要时间格式校验可改为datetime类型
    tp: float

class RainfallInput(BaseModel):
    data: List[TimePoint]  

@app.post("/model_infer_nmg/")
async def _model_infer_nmg(
    model_name: str,
    time_start: str,
    basin_id: str,
    rainfall_data: List[Dict] = None,
    #file: Optional[UploadFile] = None,
):
    """
    根据输入文件生成内蒙预报,首参为模型名,第二个参数为预测开始时间,格式为%Y-%m-%d-%H,如果是3h模型输入的小时时间是3n+1(即1,4,7……),1D模型输入的小时时间为00;
    第三个参数为流域编码,上传文件为降雨的csv文件,包含两列——TM,weighted_rainfall
    """
    # rainfall data -> json 
    df_list = []
    '''
    if file is not None:
        contents = await file.read()
        os.makedirs("output", exist_ok=True)
        file_path = "output/rainfall.csv"
        with open(file_path, "wb") as f:
            f.write(contents)
        df_file = pd.read_csv(file_path)
        df_list.append(df_file)
    '''
    
    if rainfall_data is not None:
        
        df_json = rainfall_json_to_tpdata(rainfall_data)
        df_list.append(df_json)
        
    if not df_list:
        raise ValueError("必须提供文件或JSON数据其中之一")
     # 合并所有数据源
    df = pd.concat(df_list, ignore_index=True)
    df['time'] = pd.to_datetime(df['time'], format='mixed')
    # 按时间分组并计算平均降雨量
    df = df.groupby('time', as_index=False)['tp'].mean()

    df = df[['time','tp']]
    df['basin'] = basin_id
    df = df.reindex(columns=['basin','time', 'tp'])
    df['time'] = pd.to_datetime(df['time']).dt.strftime('%Y-%m-%dT%H:%M:%S')
    json_file = df_to_post_json_data(df)
    tp_data = json_file

    # baisn_id -> list
    gage_id = f"neimenggu_{basin_id}"
    gage_id_lst = [gage_id]
    
    df_pred_7day = model_infer(
        model_name=model_name,
        time_start=add_seven_days(time_start),
        gage_id_lst=gage_id_lst,
        tp_data=tp_data,
    )

    output_csv_file = f"output/pred_{basin_id}_{time_start}.csv" 
    #save_df_to_csv(df_pred,output_csv_file)

    return  df_to_post_json_data(df_pred_7day)


@app.get("/get_gfs_data/")
def get_gfs_data(basin:str):
    
    res = requests.get(
        f'http://10.48.0.87:18199/get_gfs_area_mongo_tp/{basin}',
        headers={'Content-Type': 'application/json;charset=UTF-8'}
    )
    if res.ok:
        gfs_data_json = res.json()

        tp_data = gfs_json_to_tpdata(gfs_data_json)
        time_start = get_json_earliest_time(gfs_data_json)
        
        gage_id = f"neimenggu_{basin}"
        gage_id_lst = [gage_id]

        df_pred = model_infer(
        model_name='neimenggu_3h',
        time_start=time_start,
        gage_id_lst=gage_id_lst,
        tp_data=tp_data,
    )
        logger.warning(df_pred)
        return df_to_post_json_data(df_pred)

    else:
        print('Requests error!')

if __name__ == '__main__':
    try:
        web_config = uvicorn.Config("neimeng_service:app", host='0.0.0.0', port=8199,reload=False)
        server = uvicorn.Server(web_config)
        # uvicorn.run(app, host="10.10.50.108", port=8199)
        # 将uvicorn输出的全部让loguru管理
        # Loggers.init_config()
        server.run()
    except Exception as e:
        logger.error(e)
        logger.error(datetime.datetime.now().strftime('%Y%m%d-%H:%M:%S'), '预处理任务停止')
    

    