import pickle
import random

import gymnasium as gym
import numpy as np
import torch
import torch.multiprocessing as mp


class BaseAgent:
    def __init__(self, config):
        self.config = config
        self.env_name = config['env_name']
        self.env = gym.make(self.env_name)
        self.eval_env = gym.make(self.env_name)
        self.seed = config['seed']
        self.set_random_seeds(self.seed)
        self.device = 'cuda:0' if config['use_GPU'] else 'cpu'
        self.state_size = int(self.get_state_size())
        self.action_size = int(self.get_action_size())
        self.max_steps = self.config['max_steps']
        self.eval_freq = config['eval_freq']
        self.total_step = 0
        self.train_step = 0
        self.episode = 0
        self.episode_rewards = []
        self.evaluations = []
        self.max_episode_reward = float('-inf')
        self.goal_reward = self.get_score_required_to_win()
        self.goal_achieved = False

    def set_random_seeds(self, seed):
        """Sets all possible random seeds so results can be reproduced"""
        self.env.action_space.seed(seed)
        self.eval_env.action_space.seed(seed)
        torch.manual_seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(seed)
            torch.cuda.manual_seed(seed)
        np.random.seed(seed)
        random.seed(seed)

    def eval_model(self, model, state_norm=None, times=3):
        model.to('cpu')
        eval_reward = 0
        for _ in range(times):
            done = False
            state, _ = self.eval_env.reset(seed=self.seed)
            if state_norm is not None:
                state = state_norm(state, update=False)
            while not done:
                action = self.evaluate(state)
                state, reward, terminated, truncated, _ = self.eval_env.step(action)
                if state_norm is not None:
                    state = state_norm(state, update=False)
                eval_reward += reward
                done = terminated or truncated
        return eval_reward / times

    def evaluate(self, state):
        raise ValueError("evaluate() must be implemented")

    @staticmethod
    def take_optimization(optimizer, loss, grad_clip=None, model=None):
        optimizer.zero_grad()
        loss.backward()
        if grad_clip is not None:
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=grad_clip)
        optimizer.step()

    @staticmethod
    def copy_model(from_model, target_model):
        for target_param, from_param in zip(target_model.parameters(), from_model.parameters()):
            target_param.detach().copy_(from_param.detach().clone())

    def print_results(self, episode_step, episode_reward):
        self.episode_rewards.append(episode_reward)
        if episode_reward > self.max_episode_reward:
            self.max_episode_reward = episode_reward
        print(
            f"Total_T:{self.total_step} Episode:{self.episode} Episode_T:{episode_step} Score:{episode_reward:.2f} "
            f"Max_score:{self.max_episode_reward:.2f}")

    def save_model(self, model, running_state=None):
        if self.evaluations and self.evaluations[-1] >= self.goal_reward:
            self.goal_achieved = True
            # torch.save(model, f"../results/model/{self.config['env_name']}_{self.__class__.__name__}.pth")
            with open(f"../results/model/{self.config['env_name']}_{self.__class__.__name__}.pth", 'wb') as f:
                pickle.dump((model, running_state), f)
        elif not self.goal_achieved and self.total_step % self.eval_freq == 0:
            # torch.save(model, f"../results/model/{self.config['env_name']}_{self.__class__.__name__}.pth")
            with open(f"../results/model/{self.config['env_name']}_{self.__class__.__name__}.pth", 'wb') as f:
                pickle.dump((model, running_state), f)

    def get_state_size(self):
        return self.env.observation_space.shape[0]

    def get_action_size(self):
        if isinstance(self.env.action_space, gym.spaces.Discrete):
            return self.env.action_space.n
        else:
            return self.env.action_space.shape[0]

    def get_score_required_to_win(self):
        max_score = self.env.spec.reward_threshold
        if max_score is not None:
            return max_score
        else:
            return float('inf')
