import torch.nn as nn
import torch.optim as optim

from rainforeLearn.gomoku.v2.neywork.factory import create_gomoku_network


class NetworkManager:
    """网络管理器 - 负责神经网络的创建和管理"""
    
    def __init__(self, config, device):
        self.config = config
        self.device = device
        self.target_update_counter = 0
        
        # 创建网络
        self.policy_net = self._create_network()
        self.target_net = self._create_network()
        
        # 初始化目标网络
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()
        
        # 创建优化器和调度器
        self.optimizer = self._create_optimizer()
        self.scheduler = self._create_scheduler()
        
        # 创建损失函数
        self.criterion = self._create_criterion()
    
    def _create_network(self):
        """创建神经网络"""
        return create_gomoku_network(
            board_size=self.config.board_size,
            config=self.config.network_config,
            task_type=self.config.task_type,
            num_classes=self.config.num_classes,
            use_noisy=self.config.use_noisy,
            dropout_rate=self.config.dropout_rate,
            history_steps=self.config.history_steps,
            attention_type=self.config.attention_type,
            attention_freq=self.config.attention_freq,
            use_positional_encoding=self.config.use_positional_encoding,
            learnable_pos_encoding=self.config.learnable_pos_encoding,
            pos_encoding_temperature=self.config.pos_encoding_temperature

        ).to(self.device)
    
    def _create_optimizer(self):
        """创建优化器"""
        return optim.AdamW(
            self.policy_net.parameters(),
            lr=self.config.learning_rate,
            eps=1e-4,
            weight_decay=self.config.weight_decay
        )
    
    def _create_scheduler(self):
        """创建学习率调度器"""
        if self.config.lr_schedule == 'step':
            return optim.lr_scheduler.StepLR(
                self.optimizer,
                step_size=self.config.lr_step_size,
                gamma=self.config.lr_gamma
            )
        elif self.config.lr_schedule == 'exponential':
            return optim.lr_scheduler.ExponentialLR(
                self.optimizer,
                gamma=self.config.lr_gamma
            )
        elif self.config.lr_schedule == 'cosine':
            return optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer,
                T_max=self.config.lr_cosine_steps
            )
        elif self.config.lr_schedule == 'warmup_cosine':
            # 创建带预热的余弦调度器
            return optim.lr_scheduler.OneCycleLR(
                self.optimizer,
                max_lr=self.config.learning_rate,
                total_steps=self.config.lr_cosine_steps,
                pct_start=0.02,  # 前2%的步数用于预热
                anneal_strategy='cos',
                div_factor=25.0,  # 初始学习率 = max_lr/25
                final_div_factor=10000.0  # 最终学习率 = max_lr/10000
            )
        return None
    
    def _create_criterion(self):
        """创建损失函数"""
        if self.config.use_huber_loss:
            return nn.SmoothL1Loss(reduction='none')
        else:
            return nn.MSELoss(reduction='none')
    
    def update_target_network(self):
        """更新目标网络"""
        self.target_update_counter += 1
        
        if self.config.soft_target_update:
            self._soft_update()
        else:
            self._hard_update()
    
    def _soft_update(self):
        """软更新目标网络"""
        tau = self.config.target_update_tau
        for target_param, policy_param in zip(self.target_net.parameters(),
                                              self.policy_net.parameters()):
            target_param.data.copy_(tau * policy_param.data +
                                    (1.0 - tau) * target_param.data)
    
    def _hard_update(self):
        """硬更新目标网络"""
        if self.target_update_counter % self.config.target_update_freq == 0:
            self.target_net.load_state_dict(self.policy_net.state_dict())
    
    def reset_noise(self):
        """重置噪声网络"""
        if self.config.use_noisy:
            self.policy_net.reset_noise()
    
    def get_current_lr(self):
        """获取当前学习率"""
        return self.optimizer.param_groups[0]['lr']
    
    def step_scheduler(self, steps_done):
        """更新学习率调度器"""
        if self.scheduler and steps_done % self.config.lr_update_freq == 0:
            self.scheduler.step()