import torch
import torch.nn as nn
import torch.optim as optim
from knowledge_distillation import kl_divergence_loss
from pruning import personalized_pruning
from utils import calculate_accuracy

class Client:
    def __init__(self, client_id, train_data, test_data, config):
        self.client_id = client_id
        self.train_data = train_data
        self.test_data = test_data
        self.config = config
        self.device = config['device']
        
        # 初始化双模型（3.2.1节）
        self.personalized_model = self.initialize_model()
        self.exchange_model = self.initialize_model()
        
        self.optimizer = optim.Adam(
            self.personalized_model.parameters(), 
            lr=0.001
        )
        self.criterion = nn.CrossEntropyLoss()
        
        self.accuracy_history = []
        self.prediction_distribution = None
    
    def initialize_model(self):
        # 与服务器相同的初始化
        model = torch.nn.Sequential(
            torch.nn.Conv1d(1, 32, kernel_size=5),
            torch.nn.ReLU(),
            torch.nn.MaxPool1d(2),
            torch.nn.Flatten(),
            torch.nn.Linear(32*38, 128),
            torch.nn.ReLU(),
            torch.nn.Linear(128, 2)
        )
        return model.to(self.device)
    
    def local_train(self):
        # 个性化剪枝（3.2.1节）
        personalized_pruning(self.personalized_model, self.train_data)
        
        # 本地训练循环
        self.personalized_model.train()
        for epoch in range(3):  # 本地epoch
            for x, y in self.train_data:
                x, y = x.to(self.device), y.to(self.device)
                self.optimizer.zero_grad()
                outputs = self.personalized_model(x)
                loss = self.criterion(outputs, y)
                loss.backward()
                self.optimizer.step()
        
        # 更新交换模型
        self.update_exchange_model()
        
        # 记录性能
        self.evaluate()
    
    def update_exchange_model(self):
        # 知识蒸馏更新交换模型（3.2.1节）
        teacher_outputs = self.personalized_model(self.public_dataset)
        student_outputs = self.exchange_model(self.public_dataset)
        
        kd_loss = kl_divergence_loss(teacher_outputs, student_outputs)
        kd_loss.backward()
        self.exchange_optimizer.step()
    
    def receive_global_model(self, global_state_dict, cluster_id):
        # 接收服务器下发的模型
        self.global_model.load_state_dict(global_state_dict)
        self.cluster_id = cluster_id
    
    def evaluate(self):
        self.personalized_model.eval()
        correct = 0
        total = 0
        all_preds = []
        
        with torch.no_grad():
            for x, y in self.test_data:
                x, y = x.to(self.device), y.to(self.device)
                outputs = self.personalized_model(x)
                _, predicted = torch.max(outputs.data, 1)
                total += y.size(0)
                correct += (predicted == y).sum().item()
                all_preds.append(outputs.softmax(dim=1))
        
        accuracy = correct / total
        self.accuracy_history.append(accuracy)
        self.prediction_distribution = torch.cat(all_preds).mean(dim=0)
    
    def calculate_stability(self):
        # 计算模型稳定性（Algorithm 1）
        if len(self.accuracy_history) < 2:
            return 0
        return abs(self.accuracy_history[-1] - self.accuracy_history[-2])
