import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines3.common.callbacks import BaseCallback
from multiprocessing import Process, Queue
from route import RouteEnv

script_name = os.path.basename(__file__)
script_name = os.path.splitext(script_name)[0]
log_directory = './Log/' + script_name +'/'
pak_directory = './Pak/' + script_name +'/'


class RewardLogger(BaseCallback):
    def __init__(self, verbose=0):
        super(RewardLogger, self).__init__(verbose)

    def _on_step(self) -> bool:
        reward = self.locals['rewards']
        self.logger.record('reward', reward.mean())
        return True

def save_model(model, id=None):
    if id is not None:
        model.save(pak_directory + '{}_model.pth'.format(id))
    else:
        model.save(pak_directory + 'model.pth')
    #print("Model has been saved...")

def load_model(id=None):
    #print("Model has been loaded...")
    if id is not None:
        return PPO.load(pak_directory + '{}_model.pth'.format(id))
    else:
        return PPO.load(pak_directory + 'model.pth')

def train_model(car_id, train_step=150000, save_log=True):
    env = RouteEnv(car_id)
    model = PPO("MlpPolicy",
                    env,
                    verbose=1,
                    learning_rate=1e-4,
                    ent_coef=0.05)
    if save_log:
        model.tensorboard_log=log_directory
    model.learn(total_timesteps=train_step, callback=RewardLogger())
    save_model(model, car_id)
    env.close()

def make_env(env_id):
    def _init():
        env = RouteEnv(env_id)
        return env
    return _init

def train_model_subproc(num_envs, train_step=7500000, save_log=True):
    env = DummyVecEnv([make_env(i) for i in range(num_envs)])
    model = PPO("MlpPolicy",
                    env,
                    verbose=1,
                    learning_rate=1e-5,
                    ent_coef=0.05)
    if save_log:
        model.tensorboard_log=log_directory
    model.learn(total_timesteps=train_step, callback=RewardLogger())
    save_model(model)
    env.close()

def test_model(car_id, model_id, test_step=10):
    env = RouteEnv(car_id)
    if model_id == None:
        model = load_model()
    else:
        model = load_model(model_id)
    score = 0

    for _ in range(test_step):
        done = True
        resolved = False

        #while not resolved:
        for _ in range(100):
            if done:
                state, _ = env.reset()
                total_reward = 0
            action, _states = model.predict(state, deterministic=True)
            state, reward, done, resolved, _ = env.step(action.item())
            total_reward += reward
            if done:
                if resolved:
                    print('car{}, reward: \t{:.4f}, path is {}, Steill have Energy = {} Time = {}'.format(car_id, reward, env.path, state[2], state[1]))
                    score += env.get_score()
                #print("total_reward: \t{:.4f}".format(total_reward))
    env.close()
    return score

THREADS:int = 8       # 线程数
CARS:int = 50         # 车辆数
TYPE:int = 0          # 训练方式，0：单独模型 1：评估最好的模型 3：一个模型集中训练
SAVELOG:bool = True   # Log

def train():
    processes = []
    id = 0
    if TYPE < 3:
        if THREADS > 0:
            for i in range(round(CARS / THREADS)):
                for j in range(THREADS):
                    p = Process(target=train_model, args=(id, 150000, SAVELOG))
                    p.start()
                    processes.append(p)
                    id = id + 1

                for p in processes:
                    p.join()
                processes.clear()

            while id < CARS:
                train_model(id, save_log=SAVELOG)
                id = id + 1
        else:
            for vehicle_id in range(CARS):
                train_model(vehicle_id, save_log=SAVELOG)
    else:
        train_model_subproc(CARS, save_log=SAVELOG)

def test():
    if TYPE == 0:
        for vehicle_id in range(CARS):
            test_model(vehicle_id, vehicle_id, 1)
    elif TYPE == 1:
        assess = []
        for vehicle_id in range(CARS):
            scores = []
            for model_id in range(CARS):
                score = test_model(vehicle_id, model_id, 1)
                scores.append(score)
            assess.append(np.argmax(scores))
            scores.clear()
        model, _ = np.unique(assess, return_counts=True)
        print('model: {}_model.pth'.format(model))
    elif TYPE == 3:
        for vehicle_id in range(CARS):
            test_model(vehicle_id, None, 1)

def main():
    os.makedirs(log_directory, exist_ok=True)
    os.makedirs(pak_directory, exist_ok=True)
    train()
    test()

if __name__ == "__main__":
    main()