#client.py
import torch
from torch_geometric.data import Data
from const import HIS_LEN, NEIGHBOR_LEN, HIDDEN
import numpy as np
from torch.optim import AdamW  # 导入 AdamW 优化器


class FederatedClient:
    def __init__(self, client_id, local_data, model, device, user_neighbor_emb):
        self.client_id = client_id
        self.local_data = local_data
        self.model = model
        self.device = device

        # 使用 AdamW 替代 Adam
        self.optimizer = AdamW(self.model.parameters(), lr=0.01, weight_decay=1e-5)  # 可调参数: weight_decay 也可以进行调整

        # Mixed precision gradient scaler
        self.scaler = torch.cuda.amp.GradScaler()

        # Store user_neighbor_emb for later use
        self.user_neighbor_emb = user_neighbor_emb


class FederatedClient:
    def __init__(self, client_id, local_data, model, device, user_neighbor_emb):
        self.client_id = client_id
        self.local_data = local_data
        self.model = model
        self.device = device
        self.optimizer = AdamW(self.model.parameters(), lr=0.01, weight_decay=1e-5)

        # Mixed precision gradient scaler
        self.scaler = torch.cuda.amp.GradScaler()

        # Store user_neighbor_emb for later use
        self.user_neighbor_emb = user_neighbor_emb

    def train(self, global_model_params, accumulation_steps=4):
        """
        客户端本地训练的函数（支持梯度累积和梯度平均）
        """
        # 1) 加载来自服务器的全局模型参数
        self.model.load_state_dict(global_model_params)
        self.model.to(self.device)
        self.model.train()

        print(f"[DEBUG] Client {self.client_id} begins training. "
              f"Number of batches: {len(self.local_data['batches'])}")

        # 2) 重新创建优化器状态（防止旧状态干扰）
        self.optimizer.zero_grad()

        # ✅ 重置 GradScaler 状态（防止显存残留问题）
        self.scaler = torch.cuda.amp.GradScaler(enabled=self.device.type == "cuda")

        num_batches = len(self.local_data['batches'])
        epoch_loss = 0.0
        accumulated_grads = None  # ⬅ 用于累计平均梯度
        valid_batch_count = 0  # ⬅ 用于计算平均梯度

        for batch_idx, batch in enumerate(self.local_data['batches']):
            inputs, labels = batch
            user_ids, item_ids, history, _ = inputs

            user_ids = user_ids.to(self.device)
            item_ids = item_ids.to(self.device)
            history = history.to(self.device)
            labels = labels.to(self.device)

            # 取整个 batch 的邻居嵌入（假设是 tensor 列表）
            batch_neighbor_emb = torch.stack([
                torch.tensor(self.user_neighbor_emb[u.item()], dtype=torch.float32)
                for u in user_ids
            ]).to(self.device)

            loss, output, batch_gradients = self.model.train_step(
                user_ids, item_ids, history, batch_neighbor_emb, labels, self.scaler, accumulation_steps
            )

            if loss is None:
                continue

            # 恢复 loss 到原始 scale
            epoch_loss += loss.item() * accumulation_steps
            valid_batch_count += 1

            # ⬅ 累计梯度（逐层相加）
            if accumulated_grads is None:
                accumulated_grads = batch_gradients
            else:
                for i in range(len(accumulated_grads)):
                    accumulated_grads[i] += batch_gradients[i]

            # 梯度累积 step()
            if (batch_idx + 1) % accumulation_steps == 0 or (batch_idx + 1) == num_batches:
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
                self.scaler.step(self.optimizer)
                self.scaler.update()
                self.optimizer.zero_grad()

            if batch_idx % 10 == 0:
                print(f"[Client {self.client_id}] Batch {batch_idx + 1}/{num_batches}, Loss: {loss.item():.4f}")
                print(f"Real Labels: {labels[:5]}")
                print(f"Predictions: {output[:5]}")

        avg_loss = epoch_loss / valid_batch_count if valid_batch_count > 0 else 0.0
        print(f"[Client {self.client_id}] Epoch Loss: {avg_loss:.4f}")

        # ✅ 梯度平均处理
        if accumulated_grads is not None:
            for i in range(len(accumulated_grads)):
                accumulated_grads[i] /= valid_batch_count

        updated_params = [param.data.clone() for param in self.model.parameters()]
        return updated_params, accumulated_grads





