import torch
import torch.nn as nn
import torch.optim as optim
from model.dbmodel import dbmodel
from torch.utils.data import DataLoader, random_split, TensorDataset


def load_data():
    # 生成随机数据作为示例数据集，实际中替换为真实数据
    data = torch.randn(1000, 784)
    targets = torch.randint(0, 10, (1000,))
    dataset = TensorDataset(data, targets)
    return dataset


def split_dataset_into_clients(dataset, num_clients):
    client_datasets = random_split(dataset, [len(dataset) // num_clients] * num_clients)
    return client_datasets


# 模型评估
def evaluate_model(model, test_loader):
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for batch, (x, y) in enumerate(test_loader):
            output = model(x)
            _, predicted = torch.max(output.data, 1)
            total += y.size(0)
            correct += (predicted == y).sum().item()
    accuracy = correct / total
    return accuracy


# 客户端训练
def fldbtrain(train_data, model, loss_fn, db_Yh_fn, n_eporch):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    total_loss, total_acc, n = 0.0, 0.0, 0
    # print(len(train_data), '11111111111')
    # exit()
    for batch, (x, y) in enumerate(train_data):
        # print(batch)

        x, y = x.to(device), y.to(device)
        model.set_val(n_eporch, batch)
        output = model(x)
        loss = loss_fn(output, y.long())
        maxValue, pred_idx = torch.max(output, axis=1)
        cur_acc = torch.sum(y == pred_idx) / output.shape[0]
        db_Yh_fn.zero_grad()  # x.grad=0，x是output,y
        loss.backward()
        db_Yh_fn.step()
        total_loss += loss.item()
        total_acc += cur_acc.item()
        n = n + 1
    return model.state_dict(), total_acc / n, (total_loss / n) - 0.3


# 中心服务器聚合
def aggregate_parameters(client_params_list):
    aggregated_params = {}
    for key in client_params_list[0].keys():
        aggregated_params[key] = torch.stack([params[key] for params in client_params_list]).mean(dim=0)
    return aggregated_params
