import numpy as np
import random
from collections import deque
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
action_map={0: (-1, 0),1: (1, 0),2: (0, -1),3: (0, 1),4: (0, 0)}
'''
Random brain
'''
class Brain_Random:
  def __init__(self,action_map=action_map):
    self.action_map = action_map
  def choose_action(self):
    return random.choice(list(self.action_map.keys()))
  def remember(self, state, action, reward, next_state, done):
    return
  def replay(self,batch_size):
    return 0
'''
Double Q Learning brain
'''
class DQN(nn.Module):
  def __init__(self,observation_radius,observation_channel,action_dimension):
    super(DQN, self).__init__()
    self.conv1 = nn.Conv2d(observation_channel,8,kernel_size=3,padding='same')
    self.conv2 = nn.Conv2d(8,16,kernel_size=3,padding='same')
    self.fc = nn.Linear(16*((observation_radius*2+1)**2),action_dimension)
  def forward(self,x):
    x = F.relu(self.conv1(x))
    x = F.relu(self.conv2(x))
    x = x.view(x.size(0),-1)
    return self.fc(x)
class Brain_QLearning:
  def __init__(self, observation_radius=6,observation_channel=3,action_map=action_map,learning_rate=0.001, discount_factor=0.9, epsilon=0.1):
    self.observation_radius = observation_radius
    self.observation_channel = observation_channel
    self.action_map = action_map
    self.learning_rate = learning_rate
    self.discount_factor = discount_factor
    self.epsilon = epsilon
    self.q_network = DQN(observation_radius,observation_channel,len(action_map))
    self.target_network = DQN(observation_radius,observation_channel,len(action_map))
    self.update_target_network()
    self.optimizer = optim.Adam(self.q_network.parameters(), lr=learning_rate)
    self.criterion = nn.MSELoss()
    self.memory = deque(maxlen=2048)
  def choose_action(self, state):
    if np.random.rand() < self.epsilon:
      return random.choice(list(self.action_map.keys()))
    else:
      state_tensor = torch.FloatTensor(state).unsqueeze(0)
      with torch.no_grad():
        q_values = self.q_network(state_tensor)
      return torch.argmax(q_values).item()
  def remember(self, state, action, reward, next_state, done):
    self.memory.append((state, action, reward, next_state, done))
  def replay(self, batch_size):
    if len(self.memory) < batch_size:
      return
    batch = random.sample(self.memory, batch_size)
    states = torch.FloatTensor(np.array([t[0] for t in batch]))
    actions = torch.LongTensor([t[1] for t in batch]).unsqueeze(1)
    rewards = torch.FloatTensor([t[2] for t in batch]).unsqueeze(1)
    next_states = torch.FloatTensor(np.array([t[3] for t in batch]))
    dones = torch.FloatTensor([t[4] for t in batch]).unsqueeze(1)
    current_q = self.q_network(states)
    with torch.no_grad():
      next_q = self.target_network(next_states).max(1, keepdim=True)[0]
    target_q = current_q.clone()
    target_q.scatter_(1, actions, rewards + (1 - dones) * self.discount_factor * next_q)
    loss = self.criterion(current_q, target_q)
    self.optimizer.zero_grad()
    loss.backward()
    self.optimizer.step()
    return loss
  def update_target_network(self):
    self.target_network.load_state_dict(self.q_network.state_dict())
  def save_weight(self,x):
    torch.save({f'weight': self.q_network.state_dict(),},x)
  def load_weight(self,x):
    checkpoint = torch.load(x)
    self.q_network.load_state_dict(checkpoint["weight"])
    self.target_network.load_state_dict(checkpoint["weight"])
'''
DDPG brain
'''
class DDPG_Network(nn.Module):
  def __init__(self,observation_radius,observation_channel,max_velocity):
    super(DDPG_Network, self).__init__()
    self.conv1 = nn.Conv2d(observation_channel,8,kernel_size=3,padding='same')
    self.conv2 = nn.Conv2d(8,16,kernel_size=3,padding='same')
    self.fc_direction = nn.Linear(16*((observation_radius*2+1)**2),2)
    self.fc_velocity = nn.Linear(16*((observation_radius*2+1)**2),1)
    self.fc_value = nn.Linear(16*((observation_radius*2+1)**2)+3,1)
    self.max_velocity = max_velocity
  def forward(self,x,mode=0):
    if mode==0:
      x = F.relu(self.conv1(x))
      x = F.relu(self.conv2(x))
      x = x.view(x.size(0),-1)
      return torch.cat([F.tanh(self.fc_direction(x)),F.sigmoid(self.fc_velocity(x))*self.max_velocity],-1)
    else:
      y = F.relu(self.conv1(x[0]))
      y = F.relu(self.conv2(y))
      y = y.view(y.size(0),-1)
      return self.fc_value(torch.cat([y,x[1]],-1))
class DDPG_Network2(nn.Module):
  def __init__(self,observation_radius,observation_channel,max_velocity):
    super(DDPG_Network2, self).__init__()
    self.conv_1_direction = nn.Conv2d(observation_channel,8,kernel_size=3,padding='same')
    self.conv_2_direction = nn.Conv2d(8,16,kernel_size=3,padding='same')
    self.fc_direction = nn.Linear(16*((observation_radius*2+1)**2),2)
    self.conv_1_velocity = nn.Conv2d(observation_channel,8,kernel_size=3,padding='same')
    self.conv_2_velocity = nn.Conv2d(8,16,kernel_size=3,padding='same')
    self.fc_velocity = nn.Linear(16*((observation_radius*2+1)**2),1)
    self.conv_1_value = nn.Conv2d(observation_channel,8,kernel_size=3,padding='same')
    self.conv_2_value = nn.Conv2d(8,16,kernel_size=3,padding='same')
    self.fc_value = nn.Linear(16*((observation_radius*2+1)**2)+3,1)
    self.max_velocity = max_velocity
  def forward(self,x,mode=0):
    if mode==0:
      direction = F.relu(self.conv_1_direction(x))
      direction = F.relu(self.conv_2_direction(direction))
      direction = direction.view(x.size(0),-1)
      direction = F.tanh(self.fc_direction(direction))

      velocity = F.relu(self.conv_1_velocity(x))
      velocity = F.relu(self.conv_2_velocity(velocity))
      velocity = velocity.view(x.size(0),-1)
      velocity = F.sigmoid(self.fc_velocity(velocity))*self.max_velocity
      return torch.cat([direction,velocity],-1)
    else:
      y = F.relu(self.conv_1_value(x[0]))
      y = F.relu(self.conv_2_value(y))
      y = y.view(y.size(0),-1)
      return self.fc_value(torch.cat([y,x[1]],-1))
class Brain_DDPG:
  def __init__(self, observation_radius=6,observation_channel=3,learning_rate=0.001, discount_factor=0.9, epsilon=0.1,max_velocity=1,mode=0):
    self.observation_radius = observation_radius
    self.observation_channel = observation_channel
    self.action_map = action_map
    self.learning_rate = learning_rate
    self.discount_factor = discount_factor
    self.epsilon = epsilon
    if mode==0:
      self.network = DDPG_Network(observation_radius,observation_channel,max_velocity=max_velocity)
    else:
      self.network = DDPG_Network2(observation_radius,observation_channel,max_velocity=max_velocity)
    self.optimizer_actor = optim.Adam(self.network.parameters(), lr=learning_rate)
    self.optimizer_value = optim.Adam(self.network.parameters(), lr=learning_rate)
    self.criterion = nn.MSELoss()
    self.memory = deque(maxlen=2048)
  def choose_action(self, state):
    if np.random.rand() < self.epsilon:
      tmp = np.random.rand(3)
      tmp[:2] *= 2
      tmp[:2] -= 1
      return tmp
    else:
      state_tensor = torch.FloatTensor(state).unsqueeze(0)
      with torch.no_grad():
        action = self.network(state_tensor,mode=0)
      return action.numpy()[0]
  def remember(self, state, action, reward, next_state, done):
    self.memory.append((state, action, reward, next_state, done))
  def replay(self, batch_size):
    if len(self.memory) < batch_size:
      return
    batch = random.sample(self.memory, batch_size)
    states = torch.FloatTensor(np.array([t[0] for t in batch]))
    actions = torch.FloatTensor([t[1] for t in batch])
    rewards = torch.FloatTensor([t[2] for t in batch]).unsqueeze(1)
    next_states = torch.FloatTensor(np.array([t[3] for t in batch]))
    dones = torch.FloatTensor([t[4] for t in batch]).unsqueeze(1)
    current_v = self.network([states,actions],mode=1)
    with torch.no_grad():
      next_action = self.network(next_states,mode=0)
      next_v = self.network([next_states,next_action],mode=1)
    target_v = rewards + (1 - dones) * self.discount_factor * next_v
    loss = self.criterion(current_v, target_v)
    self.optimizer_value.zero_grad()
    loss.backward()
    self.optimizer_value.step()
    loss = -self.network([states,self.network(states,mode=0)],mode=1).mean()
    self.optimizer_actor.zero_grad()
    loss.backward()
    self.optimizer_actor.step()
    return loss
  def update_target_network(self):
    return
  def save_weight(self,x):
    torch.save({f'weight': self.network.state_dict(),},x)
  def load_weight(self,x):
    checkpoint = torch.load(x)
    self.network.load_state_dict(checkpoint["weight"])