import numpy as np
import tensorflow as tf
import random
from collections import deque

class QLearningAgent:
    def __init__(self, state_dim, action_dim, hidden_layers=[10, 10], learning_rate=0.01, discount_factor=0.99,
                 epsilon=1.0, epsilon_decay=0.995, epsilon_min=0.01, replay_memory_size=10000, batch_size=32):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.hidden_layers = hidden_layers
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.epsilon_min = epsilon_min
        self.batch_size = batch_size

        self.q_function = NeuralNetwork(state_dim, action_dim, hidden_layers)
        self.replay_memory = deque(maxlen=replay_memory_size)

    def get_action(self, state):
        if np.random.rand() <= self.epsilon:
            return np.random.choice(self.action_dim)
        else:
            q_values = self.q_function.predict(np.array([state.player_sum, state.dealer_card]))
            return np.argmax(q_values)

    def experience_replay(self):
        if len(self.replay_memory) < self.batch_size:
            return

        minibatch = random.sample(self.replay_memory, self.batch_size)

        X = []
        Y = []
        for state, action, reward, next_state, done in minibatch:
            q_update = reward
            if not done:
                q_update = reward + self.discount_factor * np.amax(self.q_function.predict(np.array([next_state.player_sum, next_state.dealer_card])))

            q_values = self.q_function.predict(np.array([state.player_sum, state.dealer_card]))
            q_values[action] = q_update

            X.append([state.player_sum, state.dealer_card])
            Y.append(q_values)

        self.q_function.train(X, Y)

        self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)

    def remember(self, state, action, reward, next_state, done):
        self.replay_memory.append((state, action, reward, next_state, done))

    def save(self, filepath):
        self.q_function.save(filepath)

    def load(self, filepath):
        self.q_function.load(filepath)

class NeuralNetwork:
    def __init__(self, input_dim, output_dim, hidden_layers=[10, 10], activation=tf.nn.relu,
                 optimizer=tf.optimizers.Adam(learning_rate=0.001), loss=tf.losses.mean_squared_error):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.hidden_layers = hidden_layers
        self.activation = activation
        self.optimizer = optimizer
        self.loss = loss

        self.model = self.build_model()

    def build_model(self):
        model = tf.keras.Sequential()

        model.add(tf.keras.layers.Dense(self.hidden_layers[0], input_dim=self.input_dim, activation=self.activation))

        for i in range(1, len(self.hidden_layers)):
            model.add(tf.keras.layers.Dense(self.hidden_layers[i], activation=self.activation))

        model.add(tf.keras.layers.Dense(self.output_dim))

        model.compile(optimizer=self.optimizer, loss=self.loss)

        return model

    def predict(self, X):
        return self.model.predict(X)

    def train(self, X, Y):
        self.model.train_on_batch(np.array(X), np.array(Y))

    def save(self, filepath):
        self.model.save(filepath)

    def load(self, filepath):
        self.model = tf.keras.models.load_model(filepath)


class State:
    def __init__(self, player_sum, dealer_card):
        self.player_sum = player_sum
        self.dealer_card = dealer_card

class Action:
    HIT = 0
    STAND = 1

class QFunction:
    def __init__(self):
        self.q = {}

    def get_q_value(self, state, action):
        if (state, action) not in self.q:
            return 0
        return self.q[(state, action)]

    def set_q_value(self, state, action, value):
        self.q[(state, action)] = value



