"""Example Algorithm of Module Predict Control."""

import os
import numpy as np
import torch
from torch import nn
from gym import utils
from gym.spaces import Box
from gym.envs.mujoco import mujoco_env

os.add_dll_directory(r"C:\Users\xxx\.mujoco\mujoco210\bin")


class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
    metadata = {"render_modes": ["human", "rgb_array", "depth_array"], "render_fps": 100, }

    def __init__(self, **kwargs):
        observation_space = Box(low=-np.inf, high=np.inf, shape=(21,), dtype=np.float32)
        mujoco_env.MujocoEnv.__init__(self, 'half_cheetah.xml', 1, observation_space=observation_space, **kwargs)
        utils.EzPickle.__init__(self, **kwargs)

        self.skip = self.frame_skip
        self.action_dim = self.ac_dim = self.action_space.shape[0]
        self.observation_dim = self.obs_dim = self.observation_space.shape[0]

    def get_reward(self, observations):
        self.reward_dict = {}
        if len(observations.shape) == 1:
            observations = np.expand_dims(observations, axis=0)
            batch_mode = False
        else:
            batch_mode = True

        xvel = observations[:, 9].copy()
        front_leg = observations[:, 6].copy()
        front_shin = observations[:, 7].copy()
        front_foot = observations[:, 8].copy()
        zeros = np.zeros((observations.shape[0],)).copy()

        leg_range = 0.2
        shin_range = 0
        foot_range = 0
        penalty_factor = 10

        self.reward_dict['run'] = xvel
        front_leg_rew = zeros.copy()
        front_leg_rew[front_leg > leg_range] = -penalty_factor
        self.reward_dict['leg'] = front_leg_rew
        front_shin_rew = zeros.copy()
        front_shin_rew[front_shin > shin_range] = -penalty_factor
        self.reward_dict['shin'] = front_shin_rew
        front_foot_rew = zeros.copy()
        front_foot_rew[front_foot > foot_range] = -penalty_factor
        self.reward_dict['foot'] = front_foot_rew
        self.reward_dict['r_total'] = sum((self.reward_dict[i] for i in ['run', 'leg', 'shin', 'foot']))
        dones = zeros.copy()
        if not batch_mode: return self.reward_dict['r_total'][0], dones[0]
        return self.reward_dict['r_total'], dones

    def get_score(self, obs):
        xposafter = obs[0]
        return xposafter

    def step(self, action):
        self.do_simulation(action, self.frame_skip)
        ob = self._get_obs()
        rew, done = self.get_reward(ob)
        score = self.get_score(ob)
        env_info = {'obs_dict': self.obs_dict, 'rewards': self.reward_dict, 'score': score}
        return ob, rew, done, env_info

    def _get_obs(self):
        self.obs_dict = {}
        self.obs_dict['joints_pos'] = self.data.qpos.flat.copy()
        self.obs_dict['joints_vel'] = self.data.qvel.flat.copy()
        self.obs_dict['com_torso'] = self.get_body_com("torso").flat.copy()
        return np.concatenate([self.obs_dict[i] for i in ['joints_pos', 'joints_vel', 'com_torso']])

    def reset_model(self, seed=None):
        self.reset_pose = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)
        self.reset_vel = self.init_qvel + self.np_random.random(self.model.nv) * .1
        return self.do_reset(self.reset_pose.copy(), self.reset_vel.copy())

    def do_reset(self, reset_pose, reset_vel, reset_goal=None):
        self.set_state(reset_pose, reset_vel)
        return self._get_obs()


class Tool:
    def __init__(self, env, dataset):
        self.state_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.shape[0]
        self.action_low = env.action_space.low
        self.action_high = env.action_space.high
        self.get_reward = env.get_reward

        self.state_mean = np.mean(dataset.states_bf, axis=0)
        self.state_std = np.std(dataset.states_bf, axis=0)
        self.action_mean = np.mean(dataset.actions_bf, axis=0)
        self.action_std = np.std(dataset.actions_bf, axis=0)
        self.delta = np.array(dataset.next_states_bf) - np.array(dataset.states_bf)
        self.delta_mean = np.mean(self.delta, axis=0)
        self.delta_std = np.std(self.delta, axis=0)

        self.build_network()

    def build_network(self):
        self.network = nn.Sequential()
        self.network.append(nn.Linear(self.state_dim + self.action_dim, 500))
        self.network.append(nn.ReLU())
        self.network.append(nn.Linear(500, self.state_dim))

    def optimize(self, loss, optimizer):
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


class MPC(Tool):
    def __init__(self, env, dataset):
        super().__init__(env, dataset)
        self.horizon = 5  # 默认15
        self.sample_action_num = 1024  # 默认4096
        self.train_times = 60
        self.batch_size = 512
        self.learning_rate = 1e-3

    def predict_delta(self, states, actions):
        state_norm = torch.asarray(states, dtype=torch.float32)
        action_norm = torch.asarray(actions, dtype=torch.float32)
        input_layer = torch.cat([state_norm, action_norm], dim=1)
        return self.network(input_layer)

    def choose_action(self, state):
        states = np.stack([state] * self.sample_action_num)

        action_shape = [self.sample_action_num, self.horizon, self.action_dim]
        actions = np.random.uniform(self.action_low, self.action_high, action_shape)
        loss = np.zeros(self.sample_action_num)
        for i in range(self.horizon):
            state_norm = (states - self.state_mean) / (self.state_std + 1e-8)
            action_norm = (actions[:, i, :] - self.action_mean) / (self.action_std + 1e-8)

            delta_pred_norm = self.predict_delta(state_norm, action_norm).detach().numpy()
            delta_pred = delta_pred_norm * self.delta_std + self.delta_mean

            next_states = states + delta_pred
            loss += self.get_reward(states)[0]
            states = next_states
        return actions[np.argmax(loss)][0]

    def train(self, buffer):
        for i in range(self.train_times):
            for states, actions, next_states in buffer.random_iterator(self.batch_size):
                state_norm = (states - self.state_mean) / (self.state_std + 1e-8)
                action_norm = (actions - self.action_mean) / (self.action_std + 1e-8)

                delta_pred_norm = self.predict_delta(state_norm, action_norm)
                delta_norm = torch.asarray((next_states - states + self.delta_mean) / (self.delta_std + 1e-8))

                loss = ((delta_norm - delta_pred_norm) ** 2).mean()
                self.optimizer = torch.optim.Adam(self.network.parameters(), lr=3e-4)
                self.optimize(loss, self.optimizer)


class Buffer:
    def __init__(self):
        self.states_bf = []
        self.actions_bf = []
        self.next_states_bf = []
        self.dones_bf = []

    def store(self, state, action, next_state, done):
        self.states_bf.append(np.ravel(state))
        self.actions_bf.append(np.ravel(action))
        self.next_states_bf.append(np.ravel(next_state))
        self.dones_bf.append(done)

    def append(self, other_dataset):
        self.states_bf += other_dataset.states_bf
        self.actions_bf += other_dataset.actions_bf
        self.next_states_bf += other_dataset.next_states_bf
        self.dones_bf += other_dataset.dones_bf

    def random_iterator(self, batch_size):
        all_indices = np.nonzero(np.logical_not(self.dones_bf))[0]
        np.random.shuffle(all_indices)
        i = 0
        while i < len(all_indices):
            indices = all_indices[i:i + batch_size]
            yield (np.asarray(self.states_bf, dtype=np.float32)[indices],
                   np.asarray(self.actions_bf, dtype=np.float32)[indices],
                   np.asarray(self.next_states_bf, dtype=np.float32)[indices])
            i += batch_size


def main(env, max_epoch, policy=None):
    buffer = Buffer()
    for epoch in range(max_epoch):
        sum_reward = 0
        state = env.reset()[0]
        step = 0
        while True:
            step += 1
            if policy:
                action = policy.choose_action(state)
            else:
                action = np.random.uniform(env.action_space.low, env.action_space.high)
            next_state, reward, done, _ = env.step(action)
            done = done or (step >= max_step)
            sum_reward += reward
            buffer.store(state, action, next_state, done)
            if done: break
            state = next_state
        print(f"{global_epoch}, {epoch}, {sum_reward}")
    return buffer


global_epoch, loss = 0, 0
max_step = 500
env = HalfCheetahEnv()
random_dataset = main(env, 5)
agent = MPC(env, random_dataset)
buffer = random_dataset
for global_epoch in range(10):
    loss = agent.train(buffer)
    new_dataset = main(env, 5, agent)
    buffer.append(new_dataset)
