import os
import copy
import pickle
import numpy as np
import networkx as nx
import stumpy
from sklearn.cluster import AffinityPropagation
from sklearn.metrics import precision_recall_fscore_support
import matplotlib.pyplot as plt
from adsketch.utils import *
import multiprocessing
from sklearn.preprocessing import MinMaxScaler
import logging
from adsketch.motif_operations import *
from dataset_loader import *

# 模拟客户端类
class Client:
    def __init__(self, client_id, train_data, test_data, train_labels, test_labels):
        self.client_id = client_id
        self.train_data = train_data
        self.test_data = test_data
        self.train_labels = train_labels
        self.test_labels = test_labels

    def client_train(self, m, p):
        # 客户端进行离线异常检测训练
        offline_pattern_dir = os.path.join(r'F:\2025\fed\AD-git\ad-fed\client_offline',
                                           f'client_{self.client_id}_offline_pattern.pkl')
        # 构建保存离线可视化结果文件的完整路径
        fig_dir = os.path.join(r'F:\2025\fed\AD-git\ad-fed\client_offline',
                               f'client_{self.client_id}_offline_fig.png')
        result = offline_anomaly_detection(m, p, self.train_data, self.test_data, self.test_labels,
                                           offline_pattern_dir, fig_dir)
        print(f"客户端接收到的result类型：{type(result)}，元素个数：{len(result)}")
        return result


# 模拟服务端类
class Server:
    def __init__(self):
        self.client_results = []

    def receive_client_results(self, client_result):
        if len(client_result) != 5:  # 假设期望是5个元素的结构，可根据实际调整验证逻辑
            logging.error(
                f"接收到的客户端 {len(self.client_results)} 结果不符合预期格式，元素个数为 {len(client_result)}")
        self.client_results.append(client_result)

    def print_client_results(self):
        logging.info("服务端接收到的客户端训练结果：")
        for client_id, result in enumerate(self.client_results):
            anomalous_subseqs, anomalous_clusters, cluster_sizes, cluster_centers, cluster_radii = result
            precision, recall, f1 = evaluate(m, anomalous_subseqs, self.get_client_test_labels(client_id))
            logging.info(f"客户端 {client_id} 训练结果：")
            logging.info(f"  异常子序列数量: {len(anomalous_subseqs)}")
            logging.info(f"  异常聚类数量: {len(anomalous_clusters)}")


    def get_client_test_labels(self, client_id):
        # 这里简单返回对应客户端的测试标签，实际可能需要更合理的存储和获取方式
        return clients[client_id].test_labels


if __name__ == '__main__':
    # 设置日志等初始化操作（可根据实际需求完善日志配置）
    logging.basicConfig(level=logging.INFO)

    # 加载aiops_18数据（假设函数能正确加载数据，这里示例简化了真实数据加载逻辑）
    metric_name = 'f0932edd'  # 根据实际数据集情况调整
    train_metric_values, train_metric_labels, test_metric_values, test_metric_labels = load_aiops18_data(metric_name)

    # 将数据平均划分为4份分配给4个客户端（这里简单按索引划分，假设数据维度等情况允许这样简单划分，可按需优化）
    num_clients = 4
    data_size_per_client_train = len(train_metric_values) // num_clients
    data_size_per_client_test = len(test_metric_values) // num_clients

    clients = []
    for client_id in range(num_clients):
        start_idx_train = client_id * data_size_per_client_train
        end_idx_train = (client_id + 1) * data_size_per_client_train
        start_idx_test = client_id * data_size_per_client_test
        end_idx_test = (client_id + 1) * data_size_per_client_test

        client_train_data = train_metric_values[start_idx_train:end_idx_train]
        client_train_labels = train_metric_labels[start_idx_train:end_idx_train]
        client_test_data = test_metric_values[start_idx_test:end_idx_test]
        client_test_labels = test_metric_labels[start_idx_test:end_idx_test]

        client = Client(client_id, client_train_data, client_test_data, client_train_labels, client_test_labels)
        clients.append(client)

    # 设定异常检测相关参数（示例参数，根据实际需求调整）
    m = 10  # 模式长度相关参数，根据实际含义调整
    p = 90  # 阈值相关参数，根据实际含义调整

    # 模拟服务端实例
    server = Server()

    # 各个客户端进行训练，并将结果传输到服务端
    for client in clients:
        result = client.client_train(m, p)
        server.receive_client_results(result)

    # 服务端打印接收到的客户端结果
    server.print_client_results()

    # 这里可以添加后续对客户端结果进行聚合等操作的逻辑，比如参考之前提到的服务器聚合函数等思路，根据具体需求扩展功能
    logging.info("客户端训练及服务端接收结果完成，可根据需求进一步处理结果。")
