from blackjack import BlackjackEnv
from qlearning_agent import QLearningAgent
import numpy as np
from collections import deque
import random
env = BlackjackEnv()
state_dim = 3
action_dim = 2 
gamma = 0.99
agent = QLearningAgent(state_dim, action_dim,discount_factor=gamma)
max_episodes = 100 #10000
max_steps_per_episode = 100
# 经验回放缓冲区
replay_buffer = deque(maxlen=10000)
# 迭代训练
for episode in range(max_episodes):
    state = env.reset() 
    done = False
    total_reward = 0

    for step in range(max_steps_per_episode):
        action = agent.get_action(state)
        next_state, reward, done, _ = env.step(action)
        total_reward += reward

        # 将经验存储到经验回放缓冲区
        replay_buffer.append((state, action, reward, next_state, done))

        # 更新Q值函数
        batch_size = 32
        if len(replay_buffer) >= batch_size:
            # 从经验回放缓冲区中随机采样一批经验
            minibatch = random.sample(replay_buffer, batch_size)

            # 用这批经验训练神经网络
            for _state, action, reward, next_state, done in minibatch:
                _s = np.expand_dims(_state, axis=0)
                target = reward
                if not done:
                    target += gamma * np.amax(agent.q_function.predict(_s))

                target_f = agent.q_function.predict(_s)
                target_f[0][action] = target
                agent.q_function.train(_s, target_f)

        if done:
            print('Training completed!')
            break

        state = next_state

    #if episode % 100 == 0:
    #    print('Episode {}/{}: Total Reward = {}'.format(episode, max_episodes, total_reward))