import os
import sys
import argparse
import numpy as np
import gym
import highway_env
# from random import sample
# from CarlaLCEnv import CarlaEnv, PlayGame

import matplotlib.pyplot as plt
import copy
from itertools import count
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from tensorboardX import SummaryWriter
import pickle
from models import *

# add packages
import collections
import random
from algorithm.dqn import DQN
import warnings # 不想看FutureWarning
warnings.simplefilter(action='ignore', category=FutureWarning)

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["SDL_VIDEODRIVER"] ="dummy"  #华为云训练和测试需要
# hyper-parameters
#设置超参数

#在此编写经验回放
class ReplayBuffer(object):
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity) 

    def add(self, state, action, reward, next_state, done): 
        self.buffer.append((state, action, reward, next_state, done)) 

    def sample(self, batch_size): 
        transitions = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = zip(*transitions)
        return np.array(state), action, reward, np.array(next_state), done 

    def size(self): 
        return len(self.buffer)
   



class EgoAttention(nn.Module):
    pass
   #如果要用attention，在此编写Attention


''' class DDQN(object):
    def __init__(self, num_states, num_actions, config):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        # 编写初始化

    def choose_action(self, state):
        #编写选择动作
        return action



    def learn(self):
        pass
       #编写agent算法训练过程

    def save(self, directory, i):
        torch.save(self.eval_net.state_dict(), directory + 'dqn{}.pth'.format(i))
        # print("====================================")
        # print("Model has been saved...")
        # print("====================================")

    def load(self, directory, i):
        self.eval_net.load_state_dict(torch.load(directory + 'dqn{}.pth'.format(i)))
        print("====================================")
        print("Model has been loaded...")
        print("====================================")
 '''

def parse_args(args):
    """ Parse arguments from command line input
    """
    parser = argparse.ArgumentParser(description='Training parameters')
    #
    parser.add_argument('--mode', default='train', type=str, choices=['train', 'test'])  # mode = 'train' or 'test'
    parser.add_argument('--type', type=str, default='DDQN', help="Algorithm to train from {A2C, A3C, DDQN, DDPG}")
    parser.add_argument('--is_atari', dest='is_atari', action='store_true', help="Atari Environment")
    parser.add_argument('--with_PER', dest='with_per', action='store_true',
                        help="Use Prioritized Experience Replay (DDQN + PER)")
    parser.add_argument('--dueling', dest='dueling', action='store_true', help="Use a Dueling Architecture (DDQN)")
    #
    parser.add_argument('--nb_episodes', type=int, default=5000, help="Number of training episodes")
    parser.add_argument('--batch_size', type=int, default=64, help="Batch size (experience replay)")
    parser.add_argument('--consecutive_frames', type=int, default=1,
                        help="Number of consecutive frames (action repeat)")
    parser.add_argument('--training_interval', type=int, default=30, help="Network training frequency")
    parser.add_argument('--n_threads', type=int, default=8, help="Number of threads (A3C)")
    #
    parser.add_argument('--gather_stats', dest='gather_stats', action='store_true',
                        help="Compute Average reward per episode (slower)")
    parser.add_argument('--render', dest='render', action='store_true', help="Render environment while training")
    parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4', help="OpenAI Gym Environment")
    parser.add_argument('--gpu', type=int, default=0, help='GPU ID')
    #
    parser.set_defaults(render=False)
    return parser.parse_args(args)


def train(args=None):
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # Check if a GPU ID was set
    # if args.gpu:
    #     os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    # world, client = PlayGame.setup_world(host='localhost', fixed_delta_seconds=0.05, reload=True)
    # # client.set_timeout(5.0)
    # if world is None:
    #     return
    # traffic_manager = client.get_trafficmanager(8000)
    env_config = {
                "id": "highway-v0",
                "import_module": "highway_env",
                "lanes_count": 3,
                "vehicles_count": 50,   # 环境车数量
                "duration": 50,         #每个episode的step数
                "other_vehicles_type": "highway_env.vehicle.behavior.IDMVehicle",
                "observation": {
                    "type": "Kinematics",
                    "vehicles_count": 15,
                    "features": ["presence", "x", "y", "vx", "vy", "cos_h", "sin_h"],
                    "features_range": {
                        "x": [-100, 100],
                        "y": [-100, 100],
                        "vx": [-20, 20],
                        "vy": [-20, 20]
                    },
                    # "absolute": True,
                    "order": "shuffled"
                },
                "screen_width": 600,  # [px]
                "screen_height": 150,  # [px]
                # "destination": "o1"
                
                "in":15*7, # 网络输入dim: "vehicles_count" * 7
                "layers": [64, 64], # 网络隐藏层结构
                "out": 5 # action_dim
            }
    
    # 编写算法训练过程
    env = gym.make("highway-v0")
    env.unwrapped.configure(env_config)  
    # directory = './weights_with_ego_attention/'
    # dqn.writer = SummaryWriter(directory)
    #
    batch_size = 64
    buffer_size = 10000
    episodes = 500  #尝试不同episodes结果   1000-2000
    minimal_size = 100
    
    #
    random.seed(0)
    np.random.seed(0)
    env.seed(0)
    torch.manual_seed(0)
    replay_buffer = ReplayBuffer(buffer_size)
    agent = DQN(env_config["in"],env_config["out"],env_config)
    
    #
    # print("Collecting Experience....")
    reward_list = []
    for i_episode in range(episodes):
        # 每个episode
        state = env.reset().reshape(1,-1) # state从环境出来就reshape，避免之后麻烦
        episode_reward = 0
        done = False
        while not done:
            action = agent.choose_action(state)
            next_state, reward, done, _ = env.step(action)
            replay_buffer.add(state, action, reward, next_state.reshape(1,-1), done)
            state = next_state.reshape(1,-1)
            episode_reward += reward
            # 当buffer数据的数量超过一定值后,才进行Q网络训练
            if replay_buffer.size() > minimal_size:
                b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                transition_dict = {
                    'states': b_s,
                    'actions': b_a,
                    'next_states': b_ns,
                    'rewards': b_r,
                    'dones': b_d
                }
                agent.learn(transition_dict)
        reward_list.append(episode_reward)
        print("i_episode:{}, episode_reward:{}".format(i_episode, episode_reward))
       
    episodes_list = list(range(len(reward_list)))
    plt.plot(episodes_list, reward_list)
    plt.xlabel('Episodes')
    plt.ylabel('Rewards')
    plt.title('DQN on {}'.format(reward_list))
    plt.savefig("./results/dqn/dqn.jpg")
    plt.show()
    
    agent.save()

'''
def test(args=None):
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    env_config = {
                "id": "highway-v0",
                "import_module": "highway_env",
                "lanes_count": 3,
                "vehicles_count": 50,
                "duration": 50,
                "other_vehicles_type": "highway_env.vehicle.behavior.IDMVehicle",
                "observation": {
                    "type": "Kinematics",
                    "vehicles_count": 15,
                    "features": ["presence", "x", "y", "vx", "vy", "cos_h", "sin_h"],
                    # "features_range": {
                    #     "x": [-100, 100],
                    #     "y": [-100, 100],
                    #     "vx": [-20, 20],
                    #     "vy": [-20, 20]
                    # },
                    # "absolute": True,
                    # "order": "shuffled"
                },
                "screen_width": 600,  # [px]
                "screen_height": 150,  # [px]
                # "destination": "o1"
            }
    env = gym.make("highway-v0")
    env.unwrapped.configure(env_config)
    directory = './weights_with_ego_attention/'
    dqn.epsilon = 0
    dqn.load(directory, 1000)

    #编写测试过程


    for _ in range(10):
        state = env.reset()
        # state = state.flatten()
        # for _ in range(5):
        #     _, _, done = env.step(0)
        ep_reward = 0
        lane_change = 0
        for t in count():
            action = dqn.choose_action(state)
            if action in [0, 2]:
                lane_change += 1
            next_state, reward, done, _ = env.step(action)
            # next_state = next_state.flatten()
            ep_reward += reward
            if done:
                print("step: {}, ep_reward: {}".format(t, ep_reward))
                with open(directory+'result.txt', 'a') as result:
                    result.write("step: {}, ep_reward: {}, lane change: {}".format(t, ep_reward, lane_change))
                    result.write('\n')
                break
            state = next_state
            #env.render()
'''

if __name__ == "__main__":
    train()
 #   test()
