# main.py

from utils import *
from encrypt import *
from model import *
from preprocess import *
from expansion import *
from generator import *
from const import *
from server import FederatedServer
from client import FederatedClient
import math
import numpy as np
import random
import torch
from torch.utils.data import DataLoader
import os
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"

# 请根据你的实际情况，修改为正确的文件路径
path_dataset = 'D:\学习项目汇总\实验数据集\Flixster\\training_test_dataset_10_NNs.mat'
#D:\学习项目汇总\实验数据集\Flixster\\training_test_dataset_10_NNs.mat
#D:\学习项目汇总\实验数据集\ml-100k\\split_1.mat
#D:\学习项目汇总\实验数据集\Yahoo\\training_test_dataset_10_NNs.mat
def count_valid_clients(available_clients, usernei):
    """
    计算有效客户端的数量。有效客户端的标准是：交互物品数量大于等于 2 且第一个交互物品与第二个交互物品不同。
    """
    valid_clients = []

    # 遍历所有客户端，检查每个客户端是否有效
    for client_id in available_clients:
        if is_valid_client(client_id, usernei):
            valid_clients.append(client_id)

    return valid_clients


def is_valid_client(client_id, usernei):
    """
    检查客户端是否有效：交互物品数大于等于 2 且第一个交互物品和第二个交互物品不同。
    """
    interactions = usernei[client_id]
    if len(interactions) >= 2 and interactions[0] != interactions[1]:
        return True
    return False


def select_clients(num_selected, NUM_CLIENTS, x):
    """
    从列表 x 中选择 num_selected 个客户端，尽量避免重复选择，且让未选择的客户端有更多机会被选中。
    如果选出的客户端少于 num_selected，则重新从所有客户端中选择剩余的。
    """
    selected = []

    # 将 available_clients 设置为与 x 相同，避免使用其他客户端
    available_clients = x[:]

    while len(selected) < num_selected and available_clients:
        # 随机选择一个客户端
        client_id = random.choice(available_clients)

        # 添加到已选列表，并从 available_clients 中移除
        selected.append(client_id)
        available_clients.remove(client_id)

        # 如果已选客户端达到 num_selected，跳出循环
        if len(selected) == num_selected:
            break

    # 如果选出的客户端少于 num_selected，则重新从所有客户端中选择剩余的
    if len(selected) < num_selected:
        remaining = num_selected - len(selected)
        all_clients = list(range(NUM_CLIENTS))  # 所有客户端的列表

        # 从所有客户端中选择剩余的客户端
        for client_id in all_clients:
            if client_id not in selected:  # 确保不重复选择
                selected.append(client_id)
                if len(selected) == num_selected:
                    break

    return selected, available_clients


if __name__ == "__main__":
    # 1. 固定随机种子（可选）
    random.seed(42)
    np.random.seed(42)
    torch.manual_seed(42)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(42)

    # 2. 选择设备
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 3. 读取数据
    M = load_matlab_file(path_dataset, 'M')            # User-item interaction matrix
    Otraining = load_matlab_file(path_dataset, 'Otraining')  # Training interaction data matrix
    Otest = load_matlab_file(path_dataset, 'Otest')          # Testing interaction data matrix
    print('There are %i interactions logs.' % np.sum(np.array(np.array(M, dtype='bool'), dtype='int32')))

    # 4. 预处理
    usernei = generate_history(Otraining)  # 生成用户历史交互（用户的物品邻居）
    #print(f"[DEBUG] Generated history (usernei): {usernei[:5]}")

    trainu, traini, trainlabel, train_user_index = generate_training_data(Otraining, M)#用户id,项目id,评分,交互项索引
    testu, testi, testlabel = generate_test_data(Otest, M)

    unique_train_users = len(train_user_index)


    # 5. 初始化全局模型 & 服务器
    num_users, num_items = Otraining.shape[0], Otraining.shape[1]
    NUM_CLIENTS = unique_train_users
    NUM_ROUNDS = 500   # 迭代轮数示例
    PATIENCE = 10    # 提前终止轮数
    SELECTED_CLIENTS_PER_ROUND = 128

    # 创建全局模型
    global_model = GraphRecommendationModel(
        num_users=num_users + 3,
        num_items=num_items + 3,
        hidden_dim=HIDDEN
    ).to(device)

    # 创建服务器
    server = FederatedServer(global_model, device=device)
    print("[INFO] Global model and server initialized.")

    # 6. 划分数据到各客户端
    data = list(zip(trainu, traini, trainlabel))
    client_data_splits = split_data_for_clients(data, NUM_CLIENTS)
    print(f"[INFO] Data split into {NUM_CLIENTS} clients.")

    # 7. 生成用户邻居嵌入
    user_neighbor_emb = graph_embedding_expansion(
        Otraining,
        usernei,
        global_model.user_embedding.weight.data.cpu().numpy()
    )
    print(f"Shape of user_neighbor_emb: {user_neighbor_emb.shape}")

    # 8. 为每个客户端生成训练批次
    train_batches = [
        generate_local_batches(client_data, BATCH_SIZE,  user_neighbor_emb, usernei)
        for client_data in client_data_splits
    ]
    print(f"[INFO] Training batches generated for each client.")


    #这里client可以增加用户交互历史来增加用户嵌入的信息


    # 9. 初始化客户端
    clients = []
    for i in range(NUM_CLIENTS):
        client_obj = FederatedClient(
            client_id=i,
            local_data={'batches': train_batches[i]},
            model=GraphRecommendationModel(num_users=num_users + 3,
                                           num_items=num_items + 3,
                                           hidden_dim=HIDDEN).to(device),
            device=device,
            user_neighbor_emb= user_neighbor_emb
        )
        clients.append(client_obj)

    print(f"[INFO] {NUM_CLIENTS} clients initialized successfully.")

    # Early stopping 相关参数
    best_loss = float('inf')
    early_stop_counter = 0

    # 客户端选择列表
    available_clients = list(range(NUM_CLIENTS))
    x=count_valid_clients(available_clients, usernei)
    print(x)

    # 10. 联邦训练循环
    for round_num in range(NUM_ROUNDS):
        print(f"\n[Round {round_num + 1}] Starting training...")

        # 10.1 广播当前全局模型参数
        global_params = server.broadcast_model_params()

        # 10.2 随机选择部分客户端
        selected_clients_ids, available_clients = select_clients(
            SELECTED_CLIENTS_PER_ROUND, NUM_CLIENTS,x
        )
        print(f"[INFO] Selected clients for this round: {selected_clients_ids}")

        # 10.3 在各客户端进行本地训练并返回“更新后模型参数” 和 “梯度”
        all_client_params = []
        all_client_gradients = []  # 新增：存储梯度
        for client_id in selected_clients_ids:
            client = clients[client_id]
            updated_params, updated_gradients = client.train(
                global_model_params=global_params
            )
            all_client_params.append(updated_params)
            all_client_gradients.append(updated_gradients)  # 保存梯度

        # 10.4 服务器端进行聚合，将全局模型更新
        server.aggregate_parameters_and_gradients(all_client_params, all_client_gradients)

        # 10.5 测试/评估全局模型
        server.global_model.eval()
        test_dataset = CustomDataset(testu, testi, testlabel, usernei, user_neighbor_emb)
        test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)

        round_loss = 0
        with torch.no_grad():
            for (user_ids, item_ids, history, neighbor_emb), labels in test_loader:
                user_ids = user_ids.long().to(device)
                item_ids = item_ids.long().to(device)
                history = history.long().to(device)
                neighbor_emb = neighbor_emb.float().to(device)
                labels = labels.to(device)

                output = server.global_model(user_ids, item_ids, history, neighbor_emb, flag=0)
                loss = torch.nn.functional.mse_loss(output, labels)
                round_loss += loss.item()

        round_loss /= len(test_loader)
        print(f"[Round {round_num + 1}] Average Loss: {round_loss}")

        # 10.6 Early Stopping 逻辑
        if round_loss < best_loss:
            best_loss = round_loss
            early_stop_counter = 0
            print(f"[INFO] New best loss: {best_loss}")
        else:
            early_stop_counter += 1
            print(f"[INFO] No improvement. Early stop counter: {early_stop_counter}/{PATIENCE}")

        if early_stop_counter >= PATIENCE:
            print(f"[INFO] Early stopping triggered after {round_num + 1} rounds.")
            break

    print(f"TestSet mse loss: {best_loss}")
    rmse_loss = math.sqrt(best_loss)
    print(f"TestSet rmse loss: {rmse_loss}")