# client.py
# 封装客户端的所有行为。

import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from calibration import BiasCorrectedTS
from utils import get_logits_and_labels

class Client:
    def __init__(self, client_id, dataloaders, config):
        """
        初始化一个客户端。
        参数:
            client_id (int): 客户端的唯一标识。
            dataloaders (dict): 包含 'train' 和 'calib' 数据加载器的字典。
            config (dict): 全局配置。
        """
        self.id = client_id
        self.dataloaders = dataloaders
        self.config = config
        self.device = config['device']
        
        # 打印数据信息
        train_samples = len(self.dataloaders['train'].dataset)
        if self.dataloaders['calib'] is not None:
            calib_samples = len(self.dataloaders['calib'].dataset)
            print(f"Client {self.id}: {train_samples} train samples, {calib_samples} calib samples.")
        else:
            print(f"Client {self.id}: {train_samples} train samples (no calib split).")
    
    def train_local_classifier(self, base_model):
        """
        在本地训练数据上训练分类头。
        参数:
            base_model: 包含CLIP编码器和分类头的模型
        返回:
            训练好的分类头参数
        """
        print(f"\nClient {self.id}: Training local classifier...")
        
        # 创建本地模型副本（只训练分类头）
        local_model = base_model.to(self.device)
        
        # 只训练分类头参数，冻结CLIP编码器
        for param in local_model.vision_model.parameters():
            param.requires_grad = False
        for param in local_model.classifier.parameters():
            param.requires_grad = True
            
        optimizer = optim.Adam(local_model.classifier.parameters(), lr=self.config['classifier_lr'])
        criterion = nn.CrossEntropyLoss()
        
        local_model.train()
        for epoch in range(self.config['classifier_epochs']):
            total_loss = 0
            num_batches = 0
            
            for images, labels in tqdm(self.dataloaders['train'], desc=f"Client {self.id} Epoch {epoch+1}", 
                                      mininterval=3.0):
                images, labels = images.to(self.device), labels.to(self.device)
                
                optimizer.zero_grad()
                logits = local_model(images)
                loss = criterion(logits, labels)
                loss.backward()
                optimizer.step()
                
                total_loss += loss.item()
                num_batches += 1
            
            avg_loss = total_loss / num_batches
            print(f"Client {self.id} Epoch {epoch+1} avg loss: {avg_loss:.4f}")
        
        # 返回训练好的分类头参数
        return local_model.classifier.state_dict()

    def execute_scheme(self, global_model, global_classifier_params=None):
        """
        根据配置的方案执行相应的客户端逻辑。
        参数:
            global_model: 全局模型（包含CLIP编码器）
            global_classifier_params: 全局分类头参数（方案2第二轮需要）
        返回:
            dict: 包含客户端计算结果的聚合信息
        """
        scheme = self.config['scheme']
        
        if scheme == 'scheme1':
            return self._execute_scheme1(global_model)
        elif scheme == 'scheme2':
            if global_classifier_params is None:
                # 方案2第一轮：训练本地分类头
                return self._execute_scheme2_round1(global_model)
            else:
                # 方案2第二轮：使用全局分类头进行校准
                return self._execute_scheme2_round2(global_model, global_classifier_params)
        elif scheme == 'scheme3':
            return self._execute_scheme3(global_model)
        else:
            raise ValueError(f"Unknown scheme: {scheme}")
    
    def _execute_scheme1(self, global_model):
        """
        方案1：传统FedAvg
        - 使用全部数据训练分类头
        - 只返回分类头参数，不进行校准
        """
        print(f"\nClient {self.id}: Executing Scheme 1 (Traditional FedAvg)")
        
        # 训练本地分类头
        classifier_params = self.train_local_classifier(global_model)
        
        # 计算样本数量
        num_samples = len(self.dataloaders['train'].dataset)
        
        return {
            "classifier_params": classifier_params,
            "num_samples": num_samples
        }
    
    def _execute_scheme2_round1(self, global_model):
        """
        方案2第一轮：训练本地分类头并上传
        """
        print(f"\nClient {self.id}: Executing Scheme 2 Round 1 (Train Local Classifier)")
        
        # 训练本地分类头
        classifier_params = self.train_local_classifier(global_model)
        
        # 计算样本数量
        num_samples = len(self.dataloaders['train'].dataset)
        
        return {
            "classifier_params": classifier_params,
            "num_samples": num_samples
        }
    
    def _execute_scheme2_round2(self, global_model, global_classifier_params):
        """
        方案2第二轮：使用全局分类头进行校准
        """
        print(f"\nClient {self.id}: Executing Scheme 2 Round 2 (Calibration with Global Classifier)")
        
        # 加载全局分类头
        global_model.classifier.load_state_dict(global_classifier_params)
        
        # 在校准集上获取logits
        calib_logits, calib_labels = get_logits_and_labels(global_model, self.dataloaders['calib'], self.device)
        
        # 训练BCTS校准器
        calibrator = BiasCorrectedTS(num_classes=self.config['num_classes'])
        calibrator.fit(calib_logits, calib_labels, max_iter=self.config['calibrator_max_iter'])
        
        local_calib_params = {
            'temperature': calibrator.temperature.detach().clone(),
            'bias': calibrator.bias.detach().clone()
        }
        
        # 计算本地先验（使用校准后的预测概率平均）
        local_priors = self._compute_local_priors_with_calibration(global_model, local_calib_params)
        
        # 计算总样本数
        num_samples = len(self.dataloaders['train'].dataset) + len(self.dataloaders['calib'].dataset)
        
        return {
            "calib_params": local_calib_params,
            "local_priors": local_priors,
            "num_samples": num_samples
        }
    
    def _execute_scheme3(self, global_model):
        """
        方案3：一轮通信
        - 训练本地分类头
        - 使用本地分类头进行校准
        - 同时返回分类头和校准参数
        """
        print(f"\nClient {self.id}: Executing Scheme 3 (One-Round Communication)")
        
        # 1. 训练本地分类头
        classifier_params = self.train_local_classifier(global_model)
        
        # 2. 使用训练好的本地分类头进行校准
        # 在校准集上获取logits
        calib_logits, calib_labels = get_logits_and_labels(global_model, self.dataloaders['calib'], self.device)
        
        # 训练BCTS校准器
        calibrator = BiasCorrectedTS(num_classes=self.config['num_classes'])
        calibrator.fit(calib_logits, calib_labels, max_iter=self.config['calibrator_max_iter'])
        
        local_calib_params = {
            'temperature': calibrator.temperature.detach().clone(),
            'bias': calibrator.bias.detach().clone()
        }
        
        # 3. 计算本地先验（使用校准后的预测概率平均）
        local_priors = self._compute_local_priors_with_calibration(global_model, local_calib_params)
        
        # 计算总样本数
        num_samples = len(self.dataloaders['train'].dataset) + len(self.dataloaders['calib'].dataset)
        
        return {
            "classifier_params": classifier_params,
            "calib_params": local_calib_params,
            "local_priors": local_priors,
            "num_samples": num_samples
        }
    
    def _compute_local_priors_with_calibration(self, model, calib_params):
        """
        使用校准后的预测概率平均来计算本地先验。
        这是正确的方法，符合论文中的数学公式。
        """
        print(f"Client {self.id}: Computing local priors using calibrated predictions...")
        
        def apply_calibration(logits, temperature, bias):
            """应用BCTS校准"""
            return logits / temperature + bias
        
        all_probs = []
        total_samples = 0
        
        # 只在校准集（held-out validation set）上计算先验，符合论文算法
        if self.dataloaders['calib'] is not None:
            loader_logits, _ = get_logits_and_labels(model, self.dataloaders['calib'], self.device)
            
            # 应用校准
            calibrated_logits = apply_calibration(
                loader_logits, 
                calib_params['temperature'], 
                calib_params['bias']
            )
            
            # 转换为概率
            calibrated_probs = torch.softmax(calibrated_logits, dim=1)  # [N, num_classes]
            all_probs.append(calibrated_probs)
            total_samples += calibrated_probs.shape[0]
        else:
            # 如果没有校准集（如scheme1），则使用训练集
            loader_logits, _ = get_logits_and_labels(model, self.dataloaders['train'], self.device)
            calibrated_logits = apply_calibration(
                loader_logits, 
                calib_params['temperature'], 
                calib_params['bias']
            )
            calibrated_probs = torch.softmax(calibrated_logits, dim=1)
            all_probs.append(calibrated_probs)
            total_samples += calibrated_probs.shape[0]
        
        # 拼接所有概率并计算平均（即先验）
        all_probs = torch.cat(all_probs, dim=0)  # [total_samples, num_classes]
        local_priors = torch.mean(all_probs, dim=0)  # [num_classes]
        
        print(f"Client {self.id}: Used {total_samples} samples for prior estimation")
        print(f"Client {self.id}: Local Priors (calibrated): {np.round(local_priors.detach().cpu().numpy(), 3)}")
        
        return local_priors
