from envir import environment
from replay import ReplayMemory
from Net import QNetwork,get_network_input

import time
import matplotlib.pyplot as plt
import numpy as np
import random
import torch
import torch.nn as nn

model = QNetwork(input_dim=10, hidden_dim=20,output_dim=5)
#print(model)

epsilon = 0.1
mapsize = 15
GAMMA = 0.9

env = environment(mapsize, nothing=0, dead=-1, apple=1)
memory = ReplayMemory(1000)
optimizer = torch.optim.Adam(model.parameters(), lr = 1e-5)
'''
torch.optim.Adam 是 PyTorch 中实现 Adam 优化算法的类。
Adam 是一种自适应学习率优化算法，结合了 AdaGrad 和 RMSProp 的优点，
能够在训练过程中自适应地调整每个参数的学习率。
lr=1e-5,防止模型在训练过程中步长过大而导致不稳定的更新。
'''

def start(max_iter):
    move = 0
    games = 0
    total_reward = 0
    episode_games = 0
    len_arry = []

    Bk = True
    while Bk:
        state = get_network_input(env.snack, env.apple)
        acts = model(state)
        #生成当前状态的动作奖励值
        #Qlearning中的取最大动作或随机动作
        if random.random() < epsilon:
            act = torch.argmax(acts)
        else :
            act = np.random.randint(0, 5)
        #进行动作，更新状态
        reward, done, lens = env.update(act)
        new_state = get_network_input(env.snack, env.apple)
        #推入经验池
        memory.push(state, act, reward, new_state, done)

        total_reward += reward
        episode_games += 1

        if env.gameover:
            games += 1
            len_arry.append(lens)
            env.reset()

            if games == max_iter:
               Bk = False

    avg_len = np.mean(len_arry)
    max_len = np.max(len_arry)
    return total_reward, avg_len, max_len

MSE = nn.MSELoss()
#PyTorch中的一个损失函数，用于计算均方误差（MeanSquaredError, MSE）。

def learn(num_updates, batch_size):
    total_loss = 0

    for i in range(num_updates):
        optimizer.zero_grad()
        sample = memory.sample(batch_size)

        states, actions, rewards, new_states, done = sample
        states = torch.cat([x.unsqueeze(0) for x in states], dim=0)
        actions = torch.LongTensor(actions)
        rewards = torch.FloatTensor(rewards)
        next_states = torch.cat([x.unsqueeze(0) for x in new_states])
        dones = torch.FloatTensor(dones)
        '''
        states 是一个包含多个状态的列表，每个状态是一个张量。
        [x.unsqueeze(0) for x in states] 将每个状态张量 x 在第 0 维度上增加一个维度
        使其形状从 (n,) 变为 (1, n)，其中 n 是状态的特征数量。
        torch.cat([...], dim=0) 将这些增加维度后的状态张量在第 0 维度上拼接起来
        torch.LongTensor(actions) 
        将动作列表转换为一个形状为 (batch_size,[]) 的长整型张量。
        torch.FloatTensor(rewards) 
        将奖励列表转换为一个形状为 (batch_size,[]) 的浮点型张量。
        '''
        q_values = model.forward(states)
        next_q = model.forward(new_states)

        Q_expected = rewards + GAMMA * next_q * (1 - dones)

if __name__ == '__main__':
    start(100)