import sys
import argparse
import gym
import gym_env
import os
import datetime
import numpy as np
import torch
import ast

from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3 import SAC
from custom_policy import No_CNN
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from ui_train import TrainingUi
from configparser import ConfigParser


class TrainingThread(QtCore.QThread):
    def __init__(self, config_path, continue_train=False):
        super(TrainingThread, self).__init__()
        self.cfg = ConfigParser()
        self.cfg.read(config_path)
        self.continue_train = continue_train
        env_name = self.cfg.get("options", "env_name")
        self.project_name = env_name
        self.env = gym.make("airsim-env-v0",cfg=self.cfg)
        # self.env.set_config(self.cfg)

        if continue_train:
            self.project_path = os.path.dirname(config_path)
        else:
            now = datetime.datetime.now()
            now_string = now.strftime('%Y_%m_%d_%H_%M')
            self.project_path = f"result/{self.project_name}_{now_string}"

    def run(self):
        log_path = f"{self.project_path}/logs"
        model_path = f"{self.project_path}/models"
        data_path = f"{self.project_path}/data"
        config_path = f"{self.project_path}"
        if not self.continue_train:
            os.makedirs(log_path, exist_ok=True)
            os.makedirs(model_path, exist_ok=True)
            os.makedirs(data_path, exist_ok=True)
            with open(f"{self.project_path}/config.ini", "w") as file:
                self.cfg.write(file)

        if self.continue_train:
            self.model = SAC.load(
                f"{self.project_path}/models/model.zip", env=self.env)
        else:
            feature_num_state = self.env.dynamic_model.state_feature_length
            feature_num_cnn = self.cfg.getint('options', 'cnn_feature_num')
            policy_kwargs = dict(
                features_extractor_class=No_CNN,
                features_extractor_kwargs=dict(
                    features_dim=feature_num_state + feature_num_cnn,
                    state_feature_dim=feature_num_state
                ),
                activation_fn=torch.nn.Tanh,
                net_arch=ast.literal_eval(self.cfg.get("options", "net_arch")))
            n_actions = self.env.action_space.shape[-1]
            noise_sigma = self.cfg.getfloat(
                'DRL', 'action_noise_sigma') * np.ones(n_actions)
            action_noise = NormalActionNoise(
                mean=np.zeros(n_actions), sigma=noise_sigma)
            self.model = SAC(
                policy="MlpPolicy",
                env=self.env,
                action_noise=action_noise,
                policy_kwargs=policy_kwargs,
                buffer_size=self.cfg.getint("DRL","buffer_size"),
                gamma=self.cfg.getfloat("DRL","gamma"),
                learning_starts=self.cfg.getint('DRL', 'learning_starts'),
                learning_rate=self.cfg.getfloat('DRL', 'learning_rate'),
                batch_size=self.cfg.getint('DRL', 'batch_size'),
                train_freq=(self.cfg.getint('DRL', 'train_freq'), 'step'),
                gradient_steps=self.cfg.getint('DRL', 'gradient_steps'),
                tensorboard_log=log_path,
                # seed=0,
                verbose=1
            )
        total_timesteps = self.cfg.getint("options","total_timesteps")
        self.env.model = model_path
        self.env.data_path = data_path
        stage = self.cfg.getint("options","model_save_stage")
        for i in range(0,total_timesteps,stage):
            print(f"training {i+1} stage")
            self.model.learn(min(stage,total_timesteps-i),reset_num_timesteps=False)
            self.model.save(f"{model_path}/model_{i+min(stage,total_timesteps-i)}.zip")
        self.model.save(f"{model_path}/model.zip")
        print("training finished")

def main():
    from interceptor import LogInterceptor, WxLog
    # select your config file here
    # config_file = 'result\\NewYork_2024_05_17_18_06\\config.ini'
    config_file="config.ini"
    # config_file = 'configs\config_Maze_SimpleMultirotor_2D.ini'
    # 1. Create the qt thread
    app = QtWidgets.QApplication(sys.argv)
    gui = TrainingUi(config_file)
    gui.show()
    li = LogInterceptor(WxLog())
    # 2. Start training thread
    training_thread = TrainingThread(config_file,continue_train=False)

    training_thread.env.action_signal.connect(gui.action_cb)
    training_thread.env.state_signal.connect(gui.state_cb)
    training_thread.env.attitude_signal.connect(gui.attitude_plot_cb)
    training_thread.env.reward_signal.connect(gui.reward_plot_cb)
    training_thread.env.pose_signal.connect(gui.traj_plot_cb)

    cfg = ConfigParser()
    cfg.read(config_file)

    training_thread.start()

    sys.exit(app.exec_())


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print('system exit')
