import os
import time
import torch
import random
import argparse
import warnings
import numpy as np

from transformers import AutoConfig
from chronos import ChronosPipeline
from sklearn.preprocessing import StandardScaler

from models.moment import MOMENTPipelineWithRetrieval
from utils.tools import test, test_retrieve
from retrieve import do_retrieve, load_database
from data_provider.data_factory import data_provider
from models.ChronosBolt import ChronosBoltPipeline, ChronosBoltModelForForecastingWithRetrieval

warnings.filterwarnings('ignore')

fix_seed = 2021
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)

parser = argparse.ArgumentParser(description='Chronos-bolt')

parser.add_argument('--model_id', type=str, required=True, default='test')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/')

parser.add_argument('--root_path', type=str, default='./dataset/traffic/')
parser.add_argument('--data_path', type=str, default='traffic.csv')
parser.add_argument('--data', type=str, default='custom')
parser.add_argument('--features', type=str, default='M')
parser.add_argument('--freq', type=int, default=1)
parser.add_argument('--target', type=str, default='OT')
parser.add_argument('--embed', type=str, default='timeF')
parser.add_argument('--percent', type=int, default=10)
parser.add_argument('--all', type=int, default=0)

parser.add_argument('--seq_len', type=int, default=512)
parser.add_argument('--pred_len', type=int, default=96)
parser.add_argument('--label_len', type=int, default=48)

parser.add_argument('--decay_fac', type=float, default=0.75)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--num_workers', type=int, default=10)
parser.add_argument('--train_epochs', type=int, default=10)
parser.add_argument('--patience', type=int, default=3)

parser.add_argument('--gpt_layers', type=int, default=3)
parser.add_argument('--is_gpt', type=int, default=1)
parser.add_argument('--e_layers', type=int, default=3)
parser.add_argument('--d_model', type=int, default=768)
parser.add_argument('--n_heads', type=int, default=16)
parser.add_argument('--d_ff', type=int, default=512)
parser.add_argument('--dropout', type=float, default=0.2)
parser.add_argument('--enc_in', type=int, default=862)
parser.add_argument('--c_out', type=int, default=862)
parser.add_argument('--patch_size', type=int, default=16)
parser.add_argument('--kernel_size', type=int, default=25)

parser.add_argument('--pretrain', type=int, default=1)
parser.add_argument('--model', type=str, default='model')
parser.add_argument('--stride', type=int, default=8)
parser.add_argument('--max_len', type=int, default=-1)
parser.add_argument('--hid_dim', type=int, default=16)
parser.add_argument('--tmax', type=int, default=20)

parser.add_argument('--cos', type=int, default=0)
parser.add_argument('--train_ratio', type=float, default=1.0 , required=False)
parser.add_argument('--save_file_name', type=str, default=None)
parser.add_argument('--gpu_loc', type=int, default=1)
parser.add_argument('--n_scale', type=float, default=-1)
parser.add_argument('--method', type=str, default='')

# retrieve
parser.add_argument('--embedding_tuning', type=str, default=None)
parser.add_argument('--metadata', type=dict, default={})
parser.add_argument('--metadata_database_name', type=str, default='ETTh2')
parser.add_argument('--metadata_frequency', type=str, default='hour')
parser.add_argument('--mode', type=str, default='only_self_train')
parser.add_argument('--top_k', type=int, default=1)
parser.add_argument('--retrieval_database_dir', type=str, default='../retrieval_database/')
parser.add_argument('--dimension', type=int, default=768)
parser.add_argument('--embedding_model_type', type=str, default='chronos')
parser.add_argument('--save', type=bool, default=True)
parser.add_argument('--lookback_length', type=int, default=512)

# augment
parser.add_argument('--augment_mode', type=str, default='moe2')

parser.add_argument('--checkpoint_model_path', type=str, default='None')
parser.add_argument('--pretrained_model_path', type=str, default='./checkpoints/base')

args = parser.parse_args()


if args.save_file_name is not None : 
    log_fine_name = args.save_file_name

device_address = 'cuda:'+str(args.gpu_loc)
        
SEASONALITY_MAP = {
   "minutely": 1440,
   "10_minutes": 144,
   "half_hourly": 48,
   "hourly": 24,
   "daily": 7,
   "weekly": 1,
   "monthly": 12,
   "quarterly": 4,
   "yearly": 1
}
mses = []
maes = []
print(args.model_id)

args.metadata['lookback_length'] = args.lookback_length
args.metadata['frequency'] = args.metadata_frequency
args.metadata['database_name'] = args.metadata_database_name.split(' ')
ori_data_path = args.data_path

best_model_path = args.checkpoint_model_path
print(f'best_model_path: {best_model_path}')
if not os.path.exists(best_model_path):
    exit('no corresponding checkpoint!!')
    
if args.freq == 0:
    args.freq = 'h'

if 'retrieve' in args.model_id:
    retrieval_database_names = '_'.join(args.metadata['database_name'])
    retrieved_data_path = os.path.join(args.root_path, f'{ori_data_path.split(".")[0]}_retrieve_{retrieval_database_names}_{args.metadata["lookback_length"]}_{args.mode}_{args.embedding_tuning}.csv')
    if os.path.exists(retrieved_data_path):
        print(f'----------retrieval for {args.model_id} has done!!----------')
    else:
        print(f'----------retrieving for {args.model_id} ...----------')
        if 'chronos' in args.embedding_model_type:
            if args.embedding_tuning == None:
                model_path = "amazon/chronos-t5-base"
            else: 
                model_path = f"../tuning_results/{args.metadata_database_name}_{str(args.seq_len)}_chronos_{args.embedding_tuning}"
                if not os.path.exists(model_path):
                    exit('embedding model path does not exist!!')
            embedding_model = ChronosPipeline.from_pretrained(
                model_path,
                device_map=device_address,
                torch_dtype=torch.bfloat16,
            )
        else:
            print('embedding model type error!!')
            exit()
        top_k = args.top_k if args.top_k > 20 else 20
        do_retrieve(ori_data_path.split('.')[0], args.retrieval_database_dir, args.root_path, args.metadata, args.mode, top_k, args.seq_len, args.pred_len, fix_seed, args.dimension, embedding_model, args.save, args.embedding_tuning)
    print('retrieved_data_path = {}'.format(retrieved_data_path))
    args.data_path = retrieved_data_path.split('/')[-1]

    # load retrieved raw data, it will be used to reconstruct the retrieved data
    retriever_rawdata = []
    if args.mode == 'only_self' or args.mode == 'only_self_train':
        # database: {var1: {}, var2: {}, ...}
        # retriever_rawdata: [var1_raw_data, var2_raw_data, ...]
        database = load_database(os.path.join(args.retrieval_database_dir, f'{args.metadata["database_name"][0]}_{args.metadata["frequency"]}_{args.metadata["lookback_length"]}.pkl'))
        for variable in database.keys():
            retriever_rawdata.append(database[variable]['raw_data'])
    elif args.mode == 'all_vars':
        for database_name in args.metadata['database_name']:
            database = load_database(os.path.join(args.retrieval_database_dir, f'{args.metadata["database_name"][0]}_{args.metadata["frequency"]}_{args.metadata["lookback_length"]}.pkl'))
            for variable in database.keys():
                retriever_rawdata.append(database[variable]['raw_data'])

    # scale transform for retrieved data
    scaler = StandardScaler()
    retriever_rawdata = np.array(retriever_rawdata).T
    scaler.fit(retriever_rawdata)
    retriever_rawdata = scaler.transform(retriever_rawdata)         #(n_samples, n_features)
    retriever_rawdata = retriever_rawdata.T
    test_data, test_loader = data_provider(args, 'test', retriever_rawdata=retriever_rawdata)

else:
    test_data, test_loader = data_provider(args, 'test')

if args.freq != 'h':
    args.freq = SEASONALITY_MAP[test_data.freq]
    print("freq = {}".format(args.freq))
device = torch.device(device_address)

time_now = time.time()

if args.model == 'ChronosBolt':
    model = ChronosBoltPipeline.from_pretrained(args.pretrained_model_path)
    model.model.load_state_dict(torch.load(args.pretrained_model_path+'autogluon_model.pth'))
    model.model.to(device)
elif args.model == 'ChronosBoltRetrieve':
    config = AutoConfig.from_pretrained(args.pretrained_model_path)
    model = ChronosBoltModelForForecastingWithRetrieval.from_pretrained(args.pretrained_model_path, config=config, augment=args.augment_mode)
    state_dict = torch.load(best_model_path)
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for key, value in state_dict.items():
        new_key = key.replace("module.", "")
        new_state_dict[new_key] = value
    model.load_state_dict(new_state_dict)
    model.to(device)
elif args.model == "MOMENTRetrieve":
    MOMENT_MODEL_PATH = "AutonLab/MOMENT-1-large"
    model = MOMENTPipelineWithRetrieval.from_pretrained(MOMENT_MODEL_PATH,
                                           model_kwargs={
                                               'task_name': 'forecasting',
                                               'forecast_horizon': 64,
                                           })
    model.init()
    state_dict = torch.load(best_model_path)
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for key, value in state_dict.items():
        new_key = key.replace("module.", "")
        new_state_dict[new_key] = value
    model.load_state_dict(new_state_dict)
    model.to(device)
else:
    print('model error')
    exit()

print("------------------------------------")

if 'retrieve' in args.model_id:
    mse, mae = test_retrieve(model, test_data, test_loader, args, device)
else:
    mse, mae = test(model, test_data, test_loader, args, device)
mses.append(round(mse,5))
maes.append(round(mae,5))

if len(maes)==0 : exit()
maes = np.array(maes)
mses = np.array(mses)
print("mse_mean = {:.4f}, mse_std = {:.4f}".format(np.mean(mses), np.std(mses)))
print("mae_mean = {:.4f}, mae_std = {:.4f}".format(np.mean(maes), np.std(maes)))
    
log_dir = 'results/forecast_evaluation'
os.makedirs(log_dir, exist_ok=True)
file_path = os.path.join(log_dir, log_fine_name)

with open(file_path, 'a') as f : 
    f.write("{}\n".format(args.model_id))
    f.write("mse:{:.4f}, std:{:.4f} ---- mae:{:.4f}, std:{:.4f}\n".format(np.mean(mses), np.std(mses) , np.mean(maes), np.std(maes)))
        
print(log_fine_name)
            
