import gymnasium as gym
import torch
import torch.nn as nn
import random
import torch.nn.functional as F
import torch.optim as optim
class QNetwork(nn.Module):
    def __init__(self, state_dim,action_dim):
        super(QNetwork,self).__init__()
        self.fc1 = nn.Linear(state_dim,64)
        self.fc2 =nn.Linear(64,64)
        self.fc3 =nn.Linear(64,64)
        self.fc4 =nn.Linear(64,action_dim)


    def forward(self,x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x=self.fc4(x)
        return x
    
class QLearningAgent:
    def __init__(self,state_dim,action_dim):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.epsilon=0.2

        self.q_network = QNetwork(state_dim,action_dim)
        self.target_q_network = QNetwork(state_dim,action_dim)
        self.target_q_network.load_state_dict(self.q_network.state_dict())

        self.loss_fun = nn.MSELoss()
        self.optimizer=optim.Adam(self.q_network.parameters())
        pass
    def update_target_network(self):
        self.target_q_network.load_state_dict(self.q_network.state_dict())

    def select_action(self,state):
        if random.random() < self.epsilon:
            return random.randint(0, self.action_dim - 1)
        else:
            state = torch.FloatTensor(state)
            q_value = self.q_network(state)
            return torch.argmax(q_value).item() 
    def learn(self,buffer,batch_size):
        if len(buffer)<batch_size:
            return
        
        samples = random.sample(buffer,batch_size)
        states,actions,rewards,next_states,dones = zip(*samples)

        states = torch.FloatTensor(states)
        actions = torch.LongTensor(actions)
        rewards = torch.FloatTensor(rewards)
        next_states = torch.FloatTensor(next_states)
        dones = torch.FloatTensor(dones)

        q_values = self.q_network(states)
        q_values =q_values.gather(1,actions.unsqueeze(1)).squeeze(1) 

        next_q_values=self.target_q_network(next_states).max(1)[0]
        expected_q_values = rewards+0.99*next_q_values*(1-dones)

        loss = self.loss_fun(q_values,expected_q_values.detach())
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        pass


env = gym.make('CartPole-v1')
state_dim=env.observation_space.shape[0]
action_dim = env.action_space.n

agent = QLearningAgent(state_dim,action_dim)
buffer=[]
episodes=500
batch_size=32
for episode in range(episodes):
    state = env.reset()[0]
    total_reward=0
    done =False
    while not done:
        # action = agent
        
        action  = agent.select_action(state)
        next_state,reward,done,_,_ = env.step(action)
        buffer.append((state,action,reward,next_state,done))

        state = next_state
        total_reward+=reward
        agent.learn(buffer,batch_size)

    if episode % 10 == 0:
        agent.update_target_network()
        print(f"Episode: {episode}, Total Reward: {total_reward}")
