import os
import ray
import json
import time
import argparse
import numpy as np
import pandas as pd
from ray import tune

from offlinerl.algo import algo_select
from offlinerl.data import load_data_from_neorl
from offlinerl.evaluation import OnlineCallBackFunction, PeriodicCallBack

SEEDS = [7, 42, 210]

ResultDir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'results'))


def training_function(config):
    ''' run on a seed '''
    config["kwargs"]['seed'] = config['seed']
    algo_init_fn, algo_trainer_obj, algo_config = algo_select(config["kwargs"])
    train_buffer, val_buffer = load_data_from_neorl(algo_config["task"])
    algo_config.update(config)
    algo_config["device"] = "cuda"
    
    algo_init = algo_init_fn(algo_config)
    algo_trainer = algo_trainer_obj(algo_init, algo_config)

    callback = PeriodicCallBack(OnlineCallBackFunction(), 100)
    callback.initialize(train_buffer=train_buffer, val_buffer=val_buffer, task=algo_config["task"], number_of_runs=1000)

    res = algo_trainer.train(train_buffer, val_buffer, callback_fn=callback)
    algo_trainer.exp_run.close()
    time.sleep(10)  # sleep ensure the log is flushed even if the disks or cpus are busy

    return res


def upload_result(task_name: str, algo_name: str, results: list):
    ''' upload the result '''
    # upload txt
    file_name = task_name + ',' + algo_name + '.txt'
    reward_means = [result['reward_mean'] for result in results]
    max_reward_mean = max(reward_means)
    best_index = reward_means.index(max_reward_mean)
    best_result = results[best_index]
    with open(os.path.join(ResultDir, file_name), 'w') as f:
        f.write(str(best_result['reward_mean']) + '+-' + str(best_result['reward_std']))
        for k, v in best_result['parameter'].items():
            f.write('\n')
            f.write(f'{k} : {v}')

    # upload json
    file_name = task_name + ',' + algo_name + '.json'
    with open(os.path.join(ResultDir, file_name), 'w') as f:
        json.dump(results, f, indent=4)


def find_result(exp_dir: str):
    ''' return the online performance of last epoch and the hyperparameter '''
    data_file = os.path.join(exp_dir, 'objects', 'map', 'dictionary.log')
    with open(data_file, 'r') as f:
        data = json.load(f)
    result = data['__METRICS__']['Reward_Mean_Env'][0]['values']['last']
    grid_search_keys = list(data['hparams']['grid_tune'].keys())
    parameter = {k: data['hparams'][k] for k in grid_search_keys}
    return result, parameter


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--domain', type=str)
    parser.add_argument('--algo', type=str, help='select from `bc`, `cql`, `edac`, `mcq`, `td3bc`, `mopo`, `combo`, `rambo`, `mobile`')
    parser.add_argument('--address', type=str, default=None, help='address of the ray cluster')
    args = parser.parse_args()

    ray.init(args.address)

    domain = args.domain
    algo = args.algo

    ''' run and upload result '''
    config = {}
    config["kwargs"] = {
        "exp_name": f'{domain}-{algo}',
        "algo_name": algo,
        "task": domain,
    }
    
    algo_args = {"algo_name": algo}
    if algo in ["mopo", "combo", "rambo", "mobile"]:
        dynamics_log = os.path.join(ResultDir, f'{domain}-bc_model', "log.csv")
        if os.path.exists(dynamics_log):
            df = pd.read_csv(dynamics_log)
            config["kwargs"]["dynamics_path"] = df.loc[df['loss'].idxmin()]["model_save_path"]
    _, _, algo_config = algo_select(algo_args)
        

    parameter_names = []
    grid_tune = algo_config["grid_tune"]
    for k, v in grid_tune.items():
        parameter_names.append(k)
        config[k] = tune.grid_search(v)

    config['seed'] = tune.grid_search(SEEDS)
    
    analysis = tune.run(
        training_function,
        name=f'{domain}-{algo}',
        config=config,
        metric='Reward_Mean_Env',
        mode='max',
        resources_per_trial={
            "cpu": 2,
            "gpu": 0.33,  # if no gpu or the memory of gpu is not enough, change this parameter
        }
    )

    df = analysis.results_df

    log_folder = os.path.join(ResultDir, config["kwargs"]["exp_name"])
    os.makedirs(log_folder, exist_ok=True)
    df.to_csv(os.path.join("ResultDir",log_folder,"log.csv"))