from create import *
from configg import *
import gym
import numpy as np
import time
from model import DDPG
import matplotlib.pyplot as plt
if __name__ == "__main__":
    ARGSSS, DEVICE, present=creaenv()
    env = gym.make(ARGSSS.env)
    STATE_dim = env.observation_space.shape[0]
    ACTION_dim = env.action_space.shape[0]
    MAX_action = env.action_space.high[0]
    MIN_action = env.action_space.low[0]
    env.seed(seed)
    torch.manual_seed(seed)
    np.random.seed(seed)
    print(f"Env: {ARGSSS.env}")
    FACTOR = DDPG(STATE_dim, ACTION_dim, DEVICE, env, ARGSSS)
    if ARGSSS.load_path != "":
        FACTOR.load_model(ARGSSS.load_path)

    if ARGSSS.load_path == "" or (ARGSSS.load_path != "" and ARGSSS.train):
        if ARGSSS.warm_episode > 0:
            print(f"Warm-up for {ARGSSS.warm_episode} episodes...")
        warmenv = gym.make(ARGSSS.env)
        for episode in range(ARGSSS.warm_episode):
            state = warmenv.reset()
            for step in range(ARGSSS.max_step):
                action = FACTOR.random_action()
                next_state, reward, done, _ = warmenv.step(action)
                FACTOR.save_replay(state, action, reward, next_state, done)
                state = next_state
                if done:
                    break
        if ARGSSS.warm_episode > 0:
            print("Warm-up Done...")

    if ARGSSS.load_path == "" or (ARGSSS.load_path != "" and ARGSSS.train):
        print("Begin Learning..")
        begin = time.time()
        for episode in range(ARGSSS.episode):
            start = time.time()
            all_reward = 0
            state = env.reset()
            lossLs = []
            lossJs = []
            for step in range(ARGSSS.max_step):
                if ARGSSS.render and (episode + 1) % ARGSSS.render_interval == 0:
                    env.render()
                action = FACTOR.get_action(state, noise=True)
                next_state, reward, done, _ = env.step(action)
                FACTOR.save_replay(state, action, reward, next_state, done)
                all_reward += reward
                state = next_state
                lossL, lossJ = FACTOR.learn()
                lossLs.append(lossL) if lossL is not None else None
                lossJs.append(lossJ) if lossL is not None else None
                if done:
                    break
            end = time.time()
            FACTOR.update_eps()
            total_rewards.append(all_reward)
            loss_mse.append(sum(lossLs)/len(lossLs))
            loss_J.append(sum(lossJs)/len(lossJs))
            print(f"Episode {episode} rewards:{all_reward:.2f}, spend:{(end - start):.2f}seconds.",
                  f"Critic MSEloss:{loss_mse[-1]:.2f},Actor Meanloss:{loss_J[-1]:.2f}")
            # time.sleep(1)
        print("total_rewards:", total_rewards, "Spend: ", (time.time() - begin) / 60, "minutes")

        plt.figure()
        plt.plot([i * cut for i in range(ARGSSS.episode // cut)],
                 [np.mean(total_rewards[i*cut:(i+1)*cut]) for i in range(ARGSSS.episode // cut)], color='red')
        plt.title(f"Training-{ARGSSS.env}-{ARGSSS.noise}-Gamma{ARGSSS.gamma}-Tau{ARGSSS.tau}-{present}")
        plt.savefig(f"Training-{ARGSSS.env}-{ARGSSS.noise}-Gamma{ARGSSS.gamma}-Tau{ARGSSS.tau}-{present}.png")
        plt.show()

    if ARGSSS.save and (ARGSSS.load_path == "" or (ARGSSS.load_path != "" and ARGSSS.train)):
        FACTOR.save_model()

    if ARGSSS.test_episode > 0 or ARGSSS.eval or ARGSSS.load_path:
        print(f"Test for {ARGSSS.test_episode} episodes...")
        testenv = gym.make(ARGSSS.env)
        testenv = gym.wrappers.Monitor(
            testenv,
            f'./wrapper/Testing-{ARGSSS.env}-{ARGSSS.noise}-Gamma{ARGSSS.gamma}-Tau{ARGSSS.tau}-{present}',
            force=True,
            video_callable=which_to_render,
            mode="evaluation"
        )
        for episode in range(ARGSSS.test_episode):
            state = testenv.reset()
            test_reward = 0
            for step in range(ARGSSS.max_step):
                testenv.render()
                action = FACTOR.get_action(state, noise=False)
                next_state, reward, done, _ = testenv.step(action)
                FACTOR.save_replay(state, action, reward, next_state, done)
                test_reward += reward
                state = next_state
                # time.sleep(0.05)
                if done:
                    break
            test_rewards.append(test_reward)
            print(f"Test {episode} rewards:", "%.2f" % test_reward)
            time.sleep(1)

        print("Test Done...")
        plt.figure()
        plt.plot([i * cut for i in range(ARGSSS.test_episode // cut)],
                 [np.mean(test_rewards[i*cut:(i+1)*cut]) for i in range(ARGSSS.test_episode // cut)], color='red')
        plt.title(f"Testing-{ARGSSS.env}-{ARGSSS.noise}-Gamma{ARGSSS.gamma}-Tau{ARGSSS.tau}-{present}")
        plt.savefig(f"Testing-{ARGSSS.env}-{ARGSSS.noise}-Gamma{ARGSSS.gamma}-Tau{ARGSSS.tau}-{present}.png")
        plt.show()
