import torch
import numpy as np
import matplotlib.pyplot as plt
import env
device = torch.device('cpu')


def eucli(a, b):
    # Calculate the Euclidean distance
    dist = torch.sqrt(torch.sum((a - b) ** 2))
    return dist


class QLearningAgent:
    def __init__(self, env, learning_rate=0.1, discount_factor=0.9, exploration_rate=0.5, exploration_decay=0.995):
        self.env = env
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.exploration_rate = exploration_rate
        self.exploration_decay = exploration_decay
        self.q_table = np.zeros((env.n_samples, env.n_vehicle, env.n_customer + 1, env.n_customer + 1))

    def choose_action(self, state):
        if np.random.rand() < self.exploration_rate:
            return np.random.randint(self.env.n_customer + 1)
        else:
            return np.argmax(self.q_table[state[0], state[1][0], state[2]])

    def train(self, num_episodes=1000):
        rewards = []
        for episode in range(num_episodes):
            state = env.reset()
            done = False
            total_reward = 0

            while not done:
                action = self.choose_action(state)
                next_state, reward, mask = env.step(torch.tensor([[state[1][0][action]]]), torch.tensor([[action]]))
                next_action = self.choose_action(next_state)
                self.q_table[state[0], state[1][0][action], action, next_action] += self.learning_rate * (
                        reward + self.discount_factor * np.max(self.q_table[next_state[0], next_state[1][0], next_action])
                        - self.q_table[state[0], state[1][0][action], action])
                state = next_state
                total_reward += reward

                if torch.all(mask):
                    done = True

            rewards.append(total_reward)
            self.exploration_rate *= self.exploration_decay

        return rewards


if __name__ == "__main__":
    env = env.Env()
    agent = QLearningAgent(env)
    rewards = agent.train()

    # Plot the reward trend
    plt.plot(rewards)
    plt.xlabel('Episode')
    plt.ylabel('Total Reward')
    plt.title('Q-learning Reward Trend')
    plt.show()
