import os

import torch
from fl.config import Config
import torch.nn as nn
import torch.optim as optim
from fl.util.ipfs import IPFSManager
import numpy as np
from torchvision import datasets, transforms
from typing import List, Tuple
from torch.utils.data import Dataset, DataLoader
from ultralytics import YOLO





class FedChainClient:
    def __init__(self, config: Config, ipfs_client: IPFSManager):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.config = config
        self.is_uploaded = False

        # 初始化YOLOv8模型
        self.model = YOLO('fl/yolov8_kitti_model/yolov8s.pt').to(self.device)
        # self.optimizer = optim.Adam(self.model.parameters(), lr=self.config.LEARNING_RATE)

        # ipfs 配置
        self.ipfs_client = ipfs_client
        self.train_data_file = config.DATASET_YAML_PATH
        self.train_data_path = config.DATASET_PATH

        # 加载数据集
        # image_dir = 'fl/datasets/training/image_2'
        # label_dir = 'fl/datasets/training/label_2'
        # val_image_dir = 'fl/datasets/testing/image_2'
        # train_dataset = KittiDataset(image_dir, label_dir)
        # train_dataset = KittiDataset(image_dir, label_dir)
        # # 加载数据集
        # self.train_dataset = KittiDataset(image_dir=image_dir, label_dir=label_dir)
        # self.val_dataset = KittiDataset(image_dir=val_image_dir, label_dir=None)
        # self.train_loader = DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, shuffle=True,
        #                                collate_fn=collate_fn)
        # self.val_loader = DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE, shuffle=False,
        #                              collate_fn=collate_fn)

    # 用于获取当前模型的参数
    def get_parameters(self):
        return [val.cpu().numpy() for val in self.model.state_dict().values()]

    # 将传入的参数设置为模型的当前参数
    def set_parameters(self, parameters):
        params_dict = zip(self.model.state_dict().keys(), parameters) # 将参数名称(key)和传入的参数列表进行键值匹配。
        state_dict = {k: torch.tensor(v) for k, v in params_dict} # 将参数值转化为pytorch张量的形式，并转换回字典
        self.model.load_state_dict(state_dict, strict=True) # 将生成的参数字典加载回模型，strict=True表示字典必须完全匹配模型的参数。

    # 训练
    # 训练
    def fit(self, parameters):
        self.set_parameters(parameters)
        self.model.train(data=self.train_data_file, epochs=100, imgsz=640)
        return self.get_parameters(), self.get_dataset_size()

    # 评估
    # def evaluate(self, parameters):
    #     self.set_parameters(parameters)
    #     self.model.eval()
    #     detections = 0
    #     with torch.no_grad():
    #         for images, _ in self.val_loader:
    #             images = images.to(self.device)
    #             results = self.model(images)
    #             detections += len(results)
    #     avg_detections = detections / len(self.val_loader)
    #     return avg_detections, {"detections": avg_detections}

    # FedAvg聚合
    def aggregate(self, results: List[Tuple[List[np.ndarray], int]]) -> List[np.ndarray]:
        total_weight = sum(weight for _, weight in results)
        averaged_weights = [np.zeros_like(weights, dtype=np.float64) for weights in results[0][0]]  # 初始化与第一个客户端权重形状相同的零数组列表
        for weights, weight in results:
            for i, w in enumerate(weights):
                averaged_weights[i] += w * (weight / total_weight)
        return averaged_weights

    # 使用krum算法进行筛选
    def krum_selection(self, results: List[Tuple[List[np.ndarray], str]], num_to_select: int) -> Tuple[List[np.ndarray], str]:
        n = len(results)
        distances = np.zeros((n, n))

        # 计算每对之间的欧氏距离
        for i in range(n):
            for j in range(i+1, n):
                distances[i, j] = distances[j, i] = sum(
                    np.linalg.norm(w1 - w2) for w1, w2 in zip(results[i][0], results[j][0]))

        # 计算每个客户端，与其他num_to_select个客户端之间的距离之和
        scores = np.zeros(n)
        for i in range(n):
            sorted_distances = np.sort(distances[i])
            scores[i] = np.sum(sorted_distances[:num_to_select])

        # 选择和最小的
        selected_indices = np.argsort(scores)[0] # 返回索引
        return results[selected_indices]




    def lock_upload(self):
        self.is_uploaded = True

    def unlock_upload(self):
        self.is_uploaded = False

    def get_dataset_size(self):
        train_images_dir = os.path.join(self.train_data_path, 'images', 'train')
        dataset_size = len(
            [name for name in os.listdir(train_images_dir) if os.path.isfile(os.path.join(train_images_dir, name))])
        return dataset_size
