import os
import numpy as np
from os.path import dirname, abspath
from components.episode_buffer import EpisodeBatch
import h5py
from utils.h5dataloader import H5Dataset
from torch.utils import data
import torch as th
from types import SimpleNamespace as SN


def train_behavior_policy(args, total_datas, logger, learner, runner, data_dir, hdkey, scheme, groups, preprocess):
    behavior_train_steps = 0
    if 'map_name' in args.env_args.keys():
        behavior_checkpoint_path = os.path.join(dirname(dirname(dirname(abspath(__file__)))), "offline_datasets", args.env_args['map_name']+'_'+args.h5file_suffix+"_"+args.name+"_"+args.agent+"_bcmodel")
    else:
        behavior_checkpoint_path = os.path.join(dirname(dirname(dirname(abspath(__file__)))), "offline_datasets", args.env+args.h5file_suffix+"_"+args.name+"_"+args.agent+"_bcmodel")

    if os.path.exists(behavior_checkpoint_path):
        # behavior_save_path = args.behavior_checkpoint_path
        logger.console_logger.info("Loading behavior models from {}".format(behavior_checkpoint_path))
        learner.load_behavior_model(behavior_checkpoint_path)
    else:
        logger.console_logger.info("Training behavior models")
        behavior_last_log_T = 0
        while behavior_train_steps<args.max_behavior_train_steps:
            # Run for a whole episode at a time
            sample_number = np.random.choice(len(total_datas[hdkey[0]]), args.batch_size, replace=False)
            off_batch = {}
            for key in hdkey:
                if key !='filled':
                    off_batch[key] = total_datas[key][sample_number].to(args.device)
                else:
                    filled_sample = total_datas[key][sample_number].to(args.device)
            new_batch = EpisodeBatch(scheme, groups, args.batch_size, runner.episode_limit + 1,preprocess=preprocess, device=args.device)
            new_batch.update(off_batch)
            new_batch.data.transition_data['filled'] = filled_sample
            if "InSPO" in args.name:
                behavior_train_done, bcloss, arloss = learner.train_behavior(new_batch)
            else:
                behavior_train_done, bcloss = learner.train_behavior(new_batch)
            behavior_train_steps += int(filled_sample.sum().to('cpu'))
        # if 'map_name' in args.env_args.keys():
        #     behavior_save_path = data_dir+'/'+args.env_args['map_name']+'_'+args.h5file_suffix+'_bcmodel'
        # else:
        #     behavior_save_path = data_dir+'/'+args.env+'_'+args.h5file_suffix+'_bcmodel'
        behavior_save_path = behavior_checkpoint_path
        os.makedirs(behavior_save_path, exist_ok=True)
        logger.console_logger.info("Saving behavior models to {}".format(behavior_save_path))
        learner.save_behavior_model(behavior_save_path)

def load_datasets(args,logger,data_dir):
    
    # --------------------------- hdf5 -------------------------------
    if ('matrix' in args.env) or ('bridge' in args.env):
        dataset_dir = data_dir+'/'+args.env+'/'+args.h5file_suffix + '.h5'
    else:
        dataset_dir = data_dir+'/'+args.env_args['map_name']+'_'+args.h5file_suffix + '.h5'
    dataset = H5Dataset(dataset_dir)
    hdkey = dataset.keys
    if getattr(args,"training_episodes",False):
        sample_datas_batch_size = args.training_episodes
    else:
        sample_datas_batch_size = 5000

    args.batch_size=min(args.batch_size,sample_datas_batch_size)
    random_idx = np.sort(np.random.choice(len(dataset), min(sample_datas_batch_size,len(dataset)), replace=False))
    total_datas = dataset[random_idx]
    for key in total_datas.keys():
        total_datas[key] = th.tensor(total_datas[key])
    logger.console_logger.info("Loading data from  {}, totally {} episodes, sample {} episodes".format(dataset_dir,len(dataset),sample_datas_batch_size))
    return total_datas,hdkey

def sample_medium_replay(args,scheme,buffer):
    if not os.path.exists(os.path.join(dirname(dirname(dirname(abspath(__file__)))), "offline_datasets")):
        os.makedirs(os.path.join(dirname(dirname(dirname(abspath(__file__)))), "offline_datasets"), exist_ok=True)
    data_dir=os.path.join(dirname(dirname(dirname(abspath(__file__)))), "offline_datasets",args.env_args['map_name']+'_medium_replay'+  '.h5')
    if os.path.exists(data_dir):
        os.remove(data_dir)
    keys = list(scheme.keys())+['filled']
    f = h5py.File(data_dir, 'w')
    for key in keys:
        print('medium_replay:',buffer.episodes_in_buffer)
        episode_batch = buffer.sample(buffer.episodes_in_buffer)
        f.create_dataset(key, data=episode_batch[key].to('cpu').numpy())
    f.close()

def merge_episode_batches(batch1, batch2):
    # 确保scheme和groups兼容
    assert batch1.scheme == batch2.scheme, "Schemes must be identical"
    assert batch1.groups == batch2.groups, "Groups must be identical"
    assert batch1.max_seq_length == batch2.max_seq_length, "Sequence lengths must match"
    assert batch1.device == batch2.device, "Device must be identical"

    # 创建新数据容器
    merged_data = SN()
    merged_data.transition_data = {}
    merged_data.episode_data = {}
    
    # 合并transition_data (按batch维度拼接)
    for key in batch1.data.transition_data:
        t1 = batch1.data.transition_data[key]
        t2 = batch2.data.transition_data[key]
        merged_data.transition_data[key] = th.cat([t1, t2], dim=0)
    
    # 合并episode_data (按batch维度拼接)
    for key in batch1.data.episode_data:
        e1 = batch1.data.episode_data[key]
        e2 = batch2.data.episode_data[key]
        merged_data.episode_data[key] = th.cat([e1, e2], dim=0)
    
    # 创建合并后的EpisodeBatch
    merged_batch = EpisodeBatch(
        scheme=batch1.scheme,
        groups=batch1.groups,
        batch_size=batch1.batch_size + batch2.batch_size,
        max_seq_length=batch1.max_seq_length,
        data=merged_data,
        device=batch1.device
    )
    return merged_batch
