import torch
import random
import gymnasium as gym
from torch import nn, optim
from method import QNet
# 检查 CUDA 是否可用
if torch.cuda.is_available():
    device = torch.device("cuda")  # 使用 GPU
    print("CUDA is available. Using GPU.")
else:
    device = torch.device("cpu")  # 使用 CPU
    print("CUDA is not available. Using CPU.")

#maxiter为训练次数
#memory_size为经验池大小
#e_greedy为贪心学习策略
#name为要进行训练的游戏
#insize为状态输入量
#outsize为动作输出量
#loss_fn为损失函数，默认为MSE
#optimizer为优化器，默认为Adam
class game:
    def __init__(self,max_iter=10000,memory_size=10000,e_greedy=0.1,name=None,insize=None,outsize=None,loss_fn='MSE',optimizer='Adam'):
        self.name=name
        self.env=gym.make(name)
        self.memory_size=memory_size
        self.memory=[]
        self.e_greedy = e_greedy
        self.max_iter=max_iter
        self.loss_fn=nn.MSELoss()
        self.net=QNet(insize,outsize).to(device)
        self.opt= optim.Adam(self.net.parameters())
        #可自行加入其他优化器，SGD等
        if optimizer=='xxx':
            self.opt= optim.Adam(self.net.parameters(),lr=1e-3)
        #可自行更改其他损失函数
        if loss_fn=='xxx':
            self.loss_fn=nn.MSELoss()

    def train(self,lowbound,highbound):
        iter = 0
        show_me = True
        showR = 0
        while iter < self.max_iter:
            state = self.env.reset()[0]
            R = 0
            print(iter,showR)
            while True:
                # 经验池还没有满，那么随机选择动作扩充经验池
                if show_me:
                    self.env.render()
                if len(self.memory) < self.memory_size:
                    action = self.env.action_space.sample()
                else:
                    # 在探索次数逐渐增多的过程中，值应该越发收敛，这时应减少随机探索的概率
                    self.e_greedy += 1e-7
                    self.memory.pop(0)
                    # 删除最早的经验
                    # 以一定概率采取随机策略
                    if random.random() < self.e_greedy:
                        action = self.env.action_space.sample()
                    else:
                        s = torch.tensor(state, dtype=torch.float32).to(device)
                        s = s.unsqueeze(0)
                        QSA = self.net(s).to(device)
                        action = torch.argmax(QSA, 1)[0].item()
                new_state, reward, done, _, info = self.env.step(action)

                self.memory.append([state, reward, action, new_state, done])

                state = new_state
                R+=reward
                if done:
                    iter += 1
                    showR = showR*0.95 + 0.05 * R
                    if showR>highbound:
                        self.env=gym.make(self.name,render_mode = "human")
                        show_me = True
                    break
                if R<lowbound:
                    iter +=1
                    break

            if len(self.memory) < self.memory_size:
                continue
            exps = random.choices(self.memory, k=1000)
            # 选取1000条历史经验
            # 状态转换成1000*insize的张量
            states = torch.tensor([exp[0] for exp in exps]).float().to(device)
            nstates = torch.tensor([exp[3] for exp in exps]).float().to(device)
            # 其他值转化为1000*1的张量
            rewards = torch.tensor([[exp[1]] for exp in exps]).float().to(device)
            actions = torch.tensor([[exp[2]] for exp in exps],dtype=torch.int64).to(device)
            dones = torch.tensor([[int(exp[4])] for exp in exps]).to(device)

            QSAs = self.net(states).to(device)
            Qs = torch.gather(QSAs, 1, actions).float().to(device)
            # 这里指取出actions对应的维度的结果值

            NQSAs = self.net(nstates).to(device)
            maxQ = torch.max(NQSAs, dim=1, keepdim=True)[0].float().to(device)
            target_Q = rewards + (1.0 - dones) * maxQ * 0.9

            loss = self.loss_fn(Qs, target_Q.detach())
            # detach会避免target的梯度进行传播
            self.opt.zero_grad()
            loss.backward()
            self.opt.step()
        return

