# from torch._C import int64
import torch.nn as nn
import torch.nn.functional as F 
import gym
import torch
import numpy as np
from collections import deque
from copy import deepcopy
import torch.nn.functional as F
from tqdm import tqdm
GAMMA = 0.9
INPUT_SHAPE = (1,3,160,210)
# SIZE = 79*104
COLLECTION_EPOCH = 2000
MAX_LENGH = 1000
SYNCHROIZE = 30
TRAIN_EPOCH = 20
BATCH_SIZE = 32
LR = 0.01
EPSILON = 0.1
env = gym.make("Pong-v0")
env = env.unwrapped
NUM_ACTION = env.action_space.n
PATH = "model.pth"
LOSSPATH = "loss.npy"
class DQN(nn.Module):

    def __init__(self, h, w, outputs):
        super(DQN, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
        self.bn1 = nn.BatchNorm2d(16)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
        self.bn2 = nn.BatchNorm2d(32)
        self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
        self.bn3 = nn.BatchNorm2d(32)

        # Number of Linear input connections depends on output of conv2d layers
        # and therefore the input image size, so compute it.
        def conv2d_size_out(size, kernel_size = 5, stride = 2):
            return (size - (kernel_size - 1) - 1) // stride  + 1
        convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
        convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
        linear_input_size = convw * convh * 32
        self.head = nn.Linear(linear_input_size, outputs)

    # Called with either one element to determine next action, or a batch
    # during optimization. Returns tensor([[left0exp,right0exp]...]).
    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = F.relu(self.bn3(self.conv3(x)))
        return self.head(x.view(x.size(0), -1))


class T:
    def __init__(self,state,action,reward,next_state) -> None:
        self.state = state 
        self.action = action
        self.reward = reward
        self.next_state = state

class Agent:
    def __init__(self,load=False,path=PATH) -> None:
        self.target_network = DQN(h=160,w=210,outputs=6).float()
        self.eval_network = DQN(h=160,w=210,outputs=6).float()
        if load:
            self.target_network.load_state_dict(torch.load(path))
            self.eval_network.load_state_dict(torch.load(path))
        self.learn_epoch = 0
        self.memory = deque(maxlen=MAX_LENGH)
        self.optimizer = torch.optim.Adam(self.eval_network.parameters(),lr = LR)
        self.loss = nn.MSELoss()
        pass
    def convert_state(self,state):
        input_state = np.reshape(state,INPUT_SHAPE)
        input_tensor = torch.tensor(input_state).float()
        return input_tensor
    def make_decision(self,state,epi=EPSILON): #state is from agent and its shape is (3,210,160)
        input_tensor = self.convert_state(state)
        if np.random.rand() < epi:
            action_value = self.eval_network(input_tensor)
            possibility = F.softmax(action_value)
            return np.random.choice(np.arange(0,6),size=1,p=possibility[0].detach().numpy())[0]
        else:
            # print("make random decision ",t)
            return env.action_space.sample()
    def store_transition(self,state,action,reward,next_state):
        # 需要对state进行处理存入memory repay queue中，这样做可以使得代码更加简洁
        T_inst = T(self.convert_state(state),action,reward,self.convert_state(next_state))
        self.memory.append(T_inst)
    def loss_func(self,q_eval,q_target):
        self.loss = 0
        for i in range(len(q_eval)):
            self.loss += (q_eval[i]-q_target[i])**2
        self.loss /= len(q_eval)
    def learn(self):
        self.optimizer.zero_grad()
        self.learn_epoch += 1
        if self.learn_epoch%SYNCHROIZE == 0:
            # print("Load dict into target")
            self.target_network.load_state_dict(self.eval_network.state_dict())
        queue_length = self.memory.__len__()
        index = np.random.choice(queue_length,BATCH_SIZE,replace=False)
        state = self.memory[index[0]].state
        # code 
        action = [self.memory[index[0]].action]
        reward = [self.memory[index[0]].reward]
        next_state = self.memory[index[0]].next_state
        
        for idx in index[1:]:
            state = torch.cat((state,self.memory[idx].state))
            action.append(self.memory[idx].action)
            reward.append(self.memory[idx].reward)
            next_state = torch.cat((next_state,self.memory[idx].next_state))
        # Q(s,a)=r+max Q(s_,a_)
        
        Eval_result = self.eval_network(state)
        Target_result = self.target_network(next_state).detach()
        Target_result = torch.max(Target_result,dim=1).values
        action = torch.tensor([action],dtype=torch.int64)
        action = torch.reshape(action,(BATCH_SIZE,1))    
        # print(action)
        Eval_result = torch.gather(Eval_result,1,action)
        Eval_result = torch.reshape(Eval_result,(BATCH_SIZE,))
        
        reward = torch.tensor(reward)
        Q_target = reward+GAMMA*Target_result
        self.truloss = self.loss(Q_target,Eval_result)
        
        self.truloss.backward()
        self.optimizer.step()
        self.optimizer.zero_grad()

if __name__ == "__main__":
    state = env.reset()
    Agent_inst = Agent(load=True)
    loss = []
    for iter in tqdm(range(5)):
        while True:
            env.render(mode="human")
            action = Agent_inst.make_decision(state=state,epi=1)
            next_state ,reward ,done ,_ = env.step(action)
            Agent_inst.store_transition(state,action,reward,next_state)
            state = next_state
            if done:
                Agent_inst.learn()
                loss.append(Agent_inst.truloss)
                state = env.reset()
                EPSILON += 0.003
                break
    torch.save(Agent_inst.eval_network.state_dict(),PATH)
    with open(LOSSPATH,"wb") as fp:
        np.save(fp,np.array(loss))
