import argparse
import os
import random
import time

import joblib
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data import Dataset

from data_provider.data_loader import Dataset_Pred
from experiments.exp_basic import Exp_Basic
from utils.metrics import metric
from utils.timefeatures import time_features
from utils.tools import EarlyStopping, adjust_learning_rate, visual


class Predict_Dataset_PWV_5min(Dataset):
    def __init__(self, root_path, pwv_array, rain_array, size=None,
                 scale=True, timeenc=0, freq='5min'):
        # size [seq_len, label_len, pred_len]
        if size == None:
            self.seq_len = 12 * 3
            self.label_len = 18
            self.pred_len = 12 * 3
        else:
            self.seq_len = size[0]
            self.label_len = size[1]
            self.pred_len = size[2]

        self.scale = scale
        self.timeenc = timeenc
        self.freq = freq

        self.root_path = root_path

        # 直接使用传入的数据
        self.pwv_array = pwv_array
        self.rain_array = rain_array

        self.__read_data__()

    def __read_data__(self):
        # 加载已训练的scaler
        self.scaler = joblib.load(os.path.join(self.root_path, 'scaler.pkl'))

        # 多变量：PWV + Rain
        raw_data = np.column_stack([self.pwv_array, self.rain_array])

        # 标准化
        data = self.scaler.transform(raw_data)

        self.data_x = data
        self.data_stamp = None

    def __getitem__(self, index):
        s_begin = index
        s_end = s_begin + self.seq_len
        r_begin = s_end - self.label_len
        r_end = r_begin + self.label_len + self.pred_len

        seq_x = self.data_x[s_begin:s_end]
        # 预测时创建虚拟的seq_y（全零）
        seq_y = np.zeros((self.label_len + self.pred_len, seq_x.shape[1]))

        # 创建相同形状的零tensor代替None
        seq_x_mark = np.zeros((self.seq_len, 4))  # 4个时间特征
        seq_y_mark = np.zeros((self.label_len + self.pred_len, 4))

        return seq_x, seq_y, seq_x_mark, seq_y_mark

    def __len__(self):
        return max(1, len(self.data_x) - self.seq_len - self.pred_len + 1)

    def inverse_transform(self, data):
        return self.scaler.inverse_transform(data)


def data_provider(args, pwv_array, rain_array):
    timeenc = 0 if args.embed != 'timeF' else 1

    shuffle_flag = False
    drop_last = False
    batch_size = 1
    freq = args.freq

    data_set = Predict_Dataset_PWV_5min(
        root_path=args.root_path,
        pwv_array=pwv_array,
        rain_array=rain_array,
        size=[args.seq_len, args.label_len, args.pred_len],
        timeenc=timeenc,
        freq=freq,
    )

    data_loader = DataLoader(
        data_set,
        batch_size=batch_size,
        shuffle=shuffle_flag,
        num_workers=args.num_workers,
        drop_last=drop_last)
    return data_set, data_loader


class My_Exp_Long_Term_Forecast(Exp_Basic):
    def __init__(self, args, pwv_array, rain_array):
        super(My_Exp_Long_Term_Forecast, self).__init__(args)
        self.pwv_array = pwv_array
        self.rain_array = rain_array

    def _build_model(self):
        model = self.model_dict[self.args.model].Model(self.args).float()

        if self.args.use_multi_gpu and self.args.use_gpu:
            model = nn.DataParallel(model, device_ids=self.args.device_ids)
        return model

    def _get_data(self):
        data_set, data_loader = data_provider(self.args, self.pwv_array, self.rain_array)
        return data_set, data_loader

    def _select_optimizer(self):
        model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
        return model_optim

    def _select_criterion(self):
        criterion = nn.MSELoss()
        return criterion

    def predict(self, setting, load=True):
        pred_data, pred_loader = self._get_data()

        if load:
            path = os.path.join(self.args.checkpoints, setting)
            best_model_path = path + '/' + 'checkpoint.pth'
            if not args.use_gpu:
                self.model.load_state_dict(torch.load(best_model_path, map_location=torch.device('cpu')))
            else:
                self.model.load_state_dict(torch.load(best_model_path))

        preds = []

        self.model.eval()
        with torch.no_grad():
            for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader):
                batch_x = batch_x.float().to(self.device)
                batch_y = batch_y.float().to(self.device)
                batch_x_mark = batch_x_mark.float().to(self.device)
                batch_y_mark = batch_y_mark.float().to(self.device)

                # decoder input
                dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
                dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
                # encoder - decoder
                if self.args.use_amp:
                    with torch.cuda.amp.autocast():
                        if self.args.output_attention:
                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
                        else:
                            outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
                else:
                    if self.args.output_attention:
                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
                    else:
                        outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
                outputs = outputs.detach().cpu().numpy()
                if pred_data.scale and self.args.inverse:
                    shape = outputs.shape
                    outputs = pred_data.inverse_transform(outputs.squeeze(0)).reshape(shape)
                preds.append(outputs)

        preds = np.array(preds)
        preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])

        return preds


from flask import Flask
from flask_cors import CORS
import logging
from datetime import timedelta

from flask import Blueprint, request
from flask import jsonify

bp = Blueprint('aifs', __name__, url_prefix='/aifs')


@bp.post('/get_rain_predict')
def get_rain_predict():
    try:
        data = request.get_json()
        device_id = data.get('deviceId')
        lat = data.get('lat')
        lon = data.get('lon')
        pwv_list = data.get('pwv_list')
        rain_list = data.get('rain_list')

        # 数据验证
        if not pwv_list or not rain_list:
            return jsonify({'status': 402, 'error': '缺少pwv_list或rain_list数据'})

        if len(pwv_list) < args.seq_len or len(rain_list) < args.seq_len:
            return jsonify({'status': 403, 'error': f'数据长度不足，需要至少{args.seq_len}个点'})

        # 转换为numpy数组并取最后seq_len个点
        pwv_array = np.array(pwv_list[-args.seq_len:], dtype=np.float32)
        rain_array = np.array(rain_list[-args.seq_len:], dtype=np.float32)

        Exp = My_Exp_Long_Term_Forecast

        args.batch_size = 1
        exp = Exp(args, pwv_array, rain_array)  # set experiments
        preds = exp.predict(setting)

        torch.cuda.empty_cache()

        # 使用-1索引获取最后一列（target列，即rain）
        rain_preds = preds[:, :, -1]  # 提取所有batch、所有时间步的最后一列（rain）

        # Rain后处理：确保大于0且保留一位小数
        rain_preds = np.maximum(rain_preds, 0.0)  # 确保大于等于0
        rain_preds = np.round(rain_preds, 2)  # 保留2位小数

        ecmwf_predict = f"http://{args.ecmwf_host}:12000/aifs/get_forecast"

        ecmwf_response = requests.post(
            ecmwf_predict,
            json={"lat": lat, "lon": lon},
            headers={'Content-Type': 'application/json'}
        )

        ecmwf_data = ecmwf_response.json()

        ecmwf_total_precipitation = ecmwf_data.get("total_precipitation")

        model_total = np.sum(rain_preds)

        if model_total == 0:
            # 如果模型预测全是0，平均分配ECMWF总量
            corrected = np.full_like(rain_preds, ecmwf_total_precipitation / len(rain_preds))
        else:
            # 按比例缩放到ECMWF总量
            scale_factor = ecmwf_total_precipitation / model_total
            corrected = rain_preds * scale_factor

        corrected_mean = np.sum(corrected, axis=1)
        corrected_rain_pred = float(corrected_mean[0])

        return jsonify({
            'status': 200,
            'deviceId': device_id,
            'actual_lat': ecmwf_data.get("actual_lat"),
            'actual_lon': ecmwf_data.get("actual_lon"),
            'valid_time': ecmwf_data.get("valid_time"),
            'corrected_rain_pred': ecmwf_total_precipitation,
            'model_rain_preds_list': rain_preds.tolist()[0],
            'ecmwf_total_precipitation': ecmwf_total_precipitation
        })
    except Exception as e:
        print(e)


class BaseConfig:
    SECRET_KEY = "fsfsd482afewfwhudsdkckznknajdhuwrhafnf"

    # 数据库的配置信息
    # SQLALCHEMY_DATABASE_URI = 'sqlite:///../data.db'

    # 默认日志等级
    LOG_LEVEL = logging.WARN

    SEND_FILE_MAX_AGE_DEFAULT = timedelta(hours=48)

    # SOCK_SERVER_OPTIONS = {'ping_interval': 25}


def init_bps(app):
    # 在admin_bp下注册子蓝图
    app.register_blueprint(bp)


def create_app():
    app = Flask(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

    # 启用CORS，允许所有源访问
    CORS(app)

    # 引入配置
    app.config.from_object(BaseConfig)

    # 注册蓝图
    init_bps(app)

    return app


app = create_app()

if __name__ == "__main__":

    fix_seed = 2023
    random.seed(fix_seed)
    torch.manual_seed(fix_seed)
    np.random.seed(fix_seed)

    parser = argparse.ArgumentParser(description='iTransformer')

    parser.add_argument('--ecmwf_host', type=str, default='localhost', help='ecmwf server host')

    # basic config
    parser.add_argument('--is_training', type=int, default=0, help='status')
    parser.add_argument('--model_id', type=str, default='pwv_36_36', help='model id')
    parser.add_argument('--model', type=str, default='iTransformer',
                        help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]')

    # data loader
    parser.add_argument('--data', type=str, default='pwv', help='dataset type')
    parser.add_argument('--root_path', type=str, default='./dataset/pwv/', help='root path of the data file')
    parser.add_argument('--data_path', type=str, default='pwv.csv', help='data csv file')
    parser.add_argument('--features', type=str, default='MS',
                        help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
    parser.add_argument('--target', type=str, default='rain', help='target feature in S or MS task')
    parser.add_argument('--freq', type=str, default='5min',
                        help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')

    # forecasting task
    parser.add_argument('--seq_len', type=int, default=36, help='input sequence length')
    parser.add_argument('--label_len', type=int, default=18,
                        help='start token length')  # no longer needed in inverted Transformers
    parser.add_argument('--pred_len', type=int, default=36, help='prediction sequence length')

    # model define
    parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
    parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
    parser.add_argument('--c_out', type=int, default=7,
                        help='output size')  # applicable on arbitrary number of variates in inverted Transformers
    parser.add_argument('--d_model', type=int, default=512, help='dimension of model')
    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
    parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
    parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
    parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
    parser.add_argument('--factor', type=int, default=1, help='attn factor')
    parser.add_argument('--distil', action='store_false',
                        help='whether to use distilling in encoder, using this argument means not using distilling',
                        default=True)
    parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
    parser.add_argument('--embed', type=str, default='timeF',
                        help='time features encoding, options:[timeF, fixed, learned]')
    parser.add_argument('--activation', type=str, default='gelu', help='activation')
    parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
    parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')

    # optimization
    parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
    parser.add_argument('--itr', type=int, default=1, help='experiments times')
    parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')
    parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
    parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
    parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
    parser.add_argument('--des', type=str, default='test', help='exp description')
    parser.add_argument('--loss', type=str, default='MSE', help='loss function')
    parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
    parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)

    # GPU
    parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
    parser.add_argument('--gpu', type=int, default=0, help='gpu')
    parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
    parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')

    # iTransformer
    parser.add_argument('--exp_name', type=str, required=False, default='MTSF',
                        help='experiemnt name, options:[MTSF, partial_train]')
    parser.add_argument('--channel_independence', type=bool, default=False,
                        help='whether to use channel_independence mechanism')
    parser.add_argument('--inverse', action='store_true', help='inverse output data', default=True)
    parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token')
    parser.add_argument('--target_root_path', type=str, default='./data/electricity/',
                        help='root path of the data file')
    parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file')
    parser.add_argument('--efficient_training', type=bool, default=False,
                        help='whether to use efficient_training (exp_name should be partial train)')  # See Figure 8 of our paper for the detail
    parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm')
    parser.add_argument('--partial_start_index', type=int, default=0,
                        help='the start index of variates for partial training, '
                             'you can select [partial_start_index, min(enc_in + partial_start_index, N)]')

    args = parser.parse_args()
    args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False

    if args.use_gpu and args.use_multi_gpu:
        args.devices = args.devices.replace(' ', '')
        device_ids = args.devices.split(',')
        args.device_ids = [int(id_) for id_ in device_ids]
        args.gpu = args.device_ids[0]

    print('Args in experiment:')
    print(args)

    setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}'.format(
        args.model_id,
        args.model,
        args.data,
        args.features,
        args.seq_len,
        args.label_len,
        args.pred_len,
        args.d_model,
        args.n_heads,
        args.e_layers,
        args.d_layers,
        args.d_ff,
        args.factor,
        args.embed,
        args.distil,
        args.des,
        args.class_strategy)

    app.run(
        host='0.0.0.0',
        port=12002,
        debug=True
    )
