import torch.cuda
import torch.optim as optim
from algorithm.DQN import MLP,ReplayBuffer
class DQNConfig:
    def __init__(self,n_states,n_actions,hidden_dim,lr,main_win=None,sub_win=None,buffer_size=int(1e5)):
        self.n_actions = n_actions
        self.n_states = n_states
        self.hidden_dim = hidden_dim
        self.buffer_size = buffer_size
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.device = torch.device(device)
        self.gamma = 0.99  # 奖励的折扣因子
        # e-greedy策略相关参数
        self.sample_count = 0  # 用于epsilon的衰减计数
        # self.epsilon = 0.95
        self.sample_count = 0
        self.epsilon_start = 0.95
        self.epsilon_end = 0.01
        self.epsilon_decay = 500000
        self.batch_size = 64
        self.lr = lr
        self.target_update = 100
        self.main_win = main_win
        self.sub_win = sub_win
