import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm


class DQN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.nonlinear1 = nn.Sigmoid()
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.nonlinear2 = nn.Sigmoid()
        self.fc3 = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.fc1(x)
        x = self.nonlinear1(x)
        x = self.fc2(x)
        x = self.nonlinear2(x)
        x = self.fc3(x)
        return x


lr = 0.05
batch_size = 8
target_Q = torch.tensor(np.load("optimal_qv.npy"), dtype=torch.float32)
states = torch.tensor(np.array([[y, x] for y in range(5) for x in range(5)]), dtype=torch.float32)


def make_batch(N):
    select_idxs = np.random.choice(range(25), N)
    return states[select_idxs], target_Q[select_idxs]


net = DQN(2, 16, 5)
q_table = torch.zeros((25, 5))
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
epoches = 20000
lr_bkp = []
scheduler = torch.optim.lr_scheduler.MultiStepLR(
    optimizer, milestones=[epoches / 10 * i for i in range(1, 10)], gamma=0.8, last_epoch=-1
)
for epoch in range(epoches):
    x, y = make_batch(batch_size)
    y_hat = net(x)
    loss = loss_func(y, y_hat)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    scheduler.step()

    lr_bkp.append(optimizer.param_groups[0]["lr"])

    if epoch % 100 == 0:
        with torch.no_grad():
            for i in range(5):
                for j in range(5):
                    q_table[i * 5 + j] = net(states[i * 5 + j])
        delta = (q_table - target_Q).abs().mean()
        print(epoch, delta.item())

        if delta < 1e-2:
            break

torch.save(net.state_dict(), "optimal_Q_func.pth")

# 绘制学习率曲线
import matplotlib.pyplot as plt

plt.plot(range(epoches), lr_bkp)
plt.show()
