import torch, traceback
import numpy as np
from Utils.tensor_ops import _2tensor
from config import GlobalConfig as cfg
from Utils.gpu_share import GpuShareUnit
from Common.trajectory_sampler import TrajectoryDataSampler


class Trainer():
    def __init__(self, model, train_config):
        self.policy = model
        self.train_epoch = train_config.train_epoch
        self.use_avail_act = train_config.action_filter_enabled
        self.max_grad_norm = train_config.max_grad_norm
        self.memory_safety_check = train_config.memory_safety_check
        self.lr = train_config.learning_rate
        self.gamma = train_config.gamma

        self.train_update_cnt = 0
        self.trivial_dict = {}

        self.gpu_share_unit = GpuShareUnit(cfg.device, gpu_party=cfg.gpu_party)

    def train_model_on_trajectories(self, traj_pool, task):
        while True:
            try:
                with self.gpu_share_unit:
                    self.learn_from_trajectories(traj_pool, task)
                break  # 运行到这说明显存充足
            except RuntimeError as err:
                print(traceback.format_exc())
                if self.memory_safety_check:
                    # in some cases, reversing MaxSampleNum a single time is not enough
                    if TrajectoryDataSampler.MaxSampleNum[-1] < 0: TrajectoryDataSampler.MaxSampleNum.pop(-1)
                    assert TrajectoryDataSampler.MaxSampleNum[-1] > 0
                    TrajectoryDataSampler.MaxSampleNum[-1] = -1
                    print('Insufficient gpu memory, using previous sample size !')
                else:
                    assert False
            torch.cuda.empty_cache()

    def learn_from_trajectories(self, traj_pool, task):

        sampler = TrajectoryDataSampler(n_div=1, traj_pool=traj_pool, flag=task,
                                        memory_safety_check=self.memory_safety_check)
        for e in range(self.train_epoch):
            sample_iter = sampler.reset_and_get_iter()
            # 获得轨迹的采样
            train_sample = next(sample_iter)
            loss_final, loss_dict = self.run_training(task, train_sample)
            loss_final = loss_final * 0.5
            if e == 0:
                print('[PPO.py] Memory Allocated %.2f GB' % (torch.cuda.memory_allocated() / 1073741824))
            self.record_training_metrics(dictionary=loss_dict); loss_dict = None
        pass  # finish all epoch update

        self.print_training_summary()
        self.train_update_cnt += 1
        return self.train_update_cnt

    def record_training_metrics(self, dictionary):
        for key in dictionary:
            if key not in self.trivial_dict: self.trivial_dict[key] = []
            item = dictionary[key].item() if hasattr(dictionary[key], 'item') else dictionary[key]
            self.trivial_dict[key].append(item)

    def print_training_summary(self, output=True):
        for key in self.trivial_dict:
            self.trivial_dict[key] = np.array(self.trivial_dict[key])

        print_buf = ['[Trainer.py] ']
        for key in self.trivial_dict:
            self.trivial_dict[key] = self.trivial_dict[key].mean()
            print_buf.append(' %s:%.3f, '%(key, self.trivial_dict[key]))
        if output: print(''.join(print_buf))
        self.trivial_dict = {}

    def run_training(self, flag, sample):

        state = _2tensor(sample['state'])
        reward = _2tensor(sample['reward'])
        action = _2tensor(sample['action'])
        next_state = _2tensor(sample['next_state'])
        done = _2tensor(sample['done'])
        avail_act = _2tensor(sample['avail_act']) if 'avail_act' in sample else None

        assert flag == 'train'
        loss = self.policy.evaluate_actions(state_batch=state,
                                            action_batch=action,
                                            avail_act_batch=avail_act,
                                            next_state_batch=next_state,
                                            reward_batch=reward,
                                            done_batch=done,
                                            gamma=self.gamma,
                                            max_grad_norm=self.max_grad_norm)

        loss_dict = {"value_l1loss": loss}
        return loss, loss_dict
