import numpy as np
import tensorflow as tf
import copy
import time  # For potential timing print "耗时"

# Import necessary functions from your adaptive_attack.py
# Make sure adaptive_attack.py is in the same directory or accessible in PYTHONPATH
from adaptive_attack import craft_heuristic_malicious_updates, flatten_weights, get_shapes_and_dtypes


class Server:
    def __init__(self, model_factory, select_client, iteration, alpha, beta):
        self._model = model_factory()
        self.nselect_client = select_client  # Number of clients to select each round
        self.global_itera = iteration  # Total number of global training rounds
        self.alpha = alpha  # Server-side learning rate for global model update
        # beta is client-side learning rate, implicitly used by client.train()

        self._model.compile(
            optimizer=tf.keras.optimizers.SGD(learning_rate=beta),
            # This LR is for model.fit if server were to train itself.
            loss=tf.keras.losses.CategoricalCrossentropy(),
            metrics=['accuracy']
        )

    def train_self(self, root_client, expr_basename, x_test, y_test):
        """Trains the model using only the root_client's data (centralized training on server's trusted data)."""
        for r in range(0, self.global_itera):
            server_weights = self._model.get_weights()
            root_delta = root_client.train(copy.deepcopy(server_weights))  # Get update from root client

            # Update global model: W_new = W_old + alpha * delta
            new_server_weights = [old_w + self.alpha * delta_w for delta_w, old_w in zip(server_weights, root_delta)]
            self._model.set_weights(new_server_weights)

            # print(f'{expr_basename} round={r + 1}/{self.global_itera}', end='') # 保留下面一行合并的评估信息
            loss, acc = self._model.evaluate(x_test, y_test, verbose=0, batch_size=32)
            print(f'{expr_basename} round={r + 1}/{self.global_itera} loss: {loss:.4f} - accuracy: {acc:.2%}') # 修改了这里的打印格式以匹配最终评估输出

    def evaluate(self, x_test, y_test, expr_basename):
        loss, acc = self._model.evaluate(x_test, y_test, verbose=0, batch_size=32)
        print(f'{expr_basename} loss: {loss:.4f} - accuracy: {acc:.2%}')

    def train(self, clients, root_client, expr_basename, x_test, y_test):
        """
        Original Federated Learning training with FLTrust aggregation.
        Assumes all participating clients are benign.
        """
        for r in range(0, self.global_itera):
            # print(f'{expr_basename} round={r + 1}/{self.global_itera}') # 保留下面一行合并的评估信息
            server_weights = self._model.get_weights()  # W_t

            selected_clients = clients if self.nselect_client == len(clients) \
                else np.random.choice(clients, self.nselect_client, replace=False)

            client_deltas = []
            for i, client_obj in enumerate(selected_clients):
                delta = client_obj.train(copy.deepcopy(server_weights))
                client_deltas.append(delta)

            g0 = root_client.train(copy.deepcopy(server_weights))

            aggregated_delta = [np.zeros_like(w) for w in server_weights]
            g0_flat = flatten_weights(g0)
            g0_norm = np.linalg.norm(g0_flat)
            total_TS = 0.0
            temp_weighted_deltas = []

            for delta_i in client_deltas:
                delta_i_flat = flatten_weights(delta_i)
                delta_i_norm = np.linalg.norm(delta_i_flat)
                cos_sim = 0.0
                if delta_i_norm > 1e-9 and g0_norm > 1e-9:
                    cos_sim = np.dot(delta_i_flat, g0_flat) / (delta_i_norm * g0_norm)
                TS_i = np.maximum(0, cos_sim)
                total_TS += TS_i
                norm_ratio = 0.0
                if delta_i_norm > 1e-9: # 确保 g0_norm 在之前已检查 > 1e-9
                    norm_ratio = g0_norm / delta_i_norm if g0_norm > 1e-9 else 0.0
                term_for_sum = [TS_i * norm_ratio * w for w in delta_i]
                temp_weighted_deltas.append(term_for_sum)

            if total_TS > 1e-9:
                for term in temp_weighted_deltas:
                    for k_layer in range(len(aggregated_delta)):
                        aggregated_delta[k_layer] += term[k_layer]
                for k_layer in range(len(aggregated_delta)):
                    aggregated_delta[k_layer] /= total_TS
            # else:
                # print("    [Warning] Total Trust Score is zero in original train. Aggregated update will be zero.") # 已注释

            new_global_weights = [
                sw_k + self.alpha * agg_d_k
                for sw_k, agg_d_k in zip(server_weights, aggregated_delta)
            ]
            self._model.set_weights(new_global_weights)

            loss, acc = self._model.evaluate(x_test, y_test, verbose=0, batch_size=32)
            # 保持和 train_adaptive_attack 一致的输出格式，包含 expr_basename 和 round
            print(f'{expr_basename} round={r + 1}/{self.global_itera} 评估 - loss: {loss:.4f}, accuracy: {acc:.2%}')


    def train_adaptive_attack(self, clients, root_client, num_malicious, expr_basename, target_cosine_sim, x_test,
                              y_test):
        # print(f"Starting adaptive attack training: {expr_basename}") # 已注释

        # if self.nselect_client != len(clients): # 已注释
            # print(
                # f"[Warning] self.nselect_client ({self.nselect_client}) is not equal to total clients ({len(clients)}). This adaptive attack implementation assumes all provided clients participate and the first 'num_malicious' are attackers.")

        if len(clients) < num_malicious:
            raise ValueError(
                f"Total number of clients ({len(clients)}) is less than the specified number of malicious clients ({num_malicious}).")

        malicious_client_objects = clients[:num_malicious]
        benign_client_objects = clients[num_malicious:]

        for r in range(self.global_itera):
            # print(f'{expr_basename} round={r + 1}/{self.global_itera}') # 保留下面一行合并的评估信息
            server_weights = self._model.get_weights()

            g0 = root_client.train(copy.deepcopy(server_weights))

            benign_deltas = []
            for i, client_obj in enumerate(benign_client_objects):
                delta = client_obj.train(copy.deepcopy(server_weights))
                benign_deltas.append(delta)

            malicious_crafted_deltas = []
            if num_malicious > 0:
                shapes_dtypes = get_shapes_and_dtypes(g0)
                malicious_crafted_deltas = craft_heuristic_malicious_updates(
                    g0,
                    benign_deltas,
                    num_malicious,
                    shapes_dtypes,
                    target_cosine_sim
                )

            aggregated_delta = [np.zeros_like(w) for w in server_weights]
            g0_flat = flatten_weights(g0)
            g0_norm = np.linalg.norm(g0_flat)
            total_TS = 0.0
            temp_weighted_contribs = []

            for i, m_delta in enumerate(malicious_crafted_deltas):
                m_delta_flat = flatten_weights(m_delta)
                m_delta_norm = np.linalg.norm(m_delta_flat)
                cos_sim_with_g0 = 0.0
                if m_delta_norm > 1e-9 and g0_norm > 1e-9:
                    cos_sim_with_g0 = np.dot(m_delta_flat, g0_flat) / (m_delta_norm * g0_norm)
                TS_i = np.maximum(0, cos_sim_with_g0)
                total_TS += TS_i
                norm_ratio = 0.0
                if m_delta_norm > 1e-9 and g0_norm > 1e-9:
                    norm_ratio = g0_norm / m_delta_norm
                weighted_contrib = [TS_i * norm_ratio * w_layer for w_layer in m_delta]
                temp_weighted_contribs.append(weighted_contrib)

            for i, b_delta in enumerate(benign_deltas):
                b_delta_flat = flatten_weights(b_delta)
                b_delta_norm = np.linalg.norm(b_delta_flat)
                cos_sim_with_g0 = 0.0
                if b_delta_norm > 1e-9 and g0_norm > 1e-9:
                    cos_sim_with_g0 = np.dot(b_delta_flat, g0_flat) / (b_delta_norm * g0_norm)
                TS_i = np.maximum(0, cos_sim_with_g0)
                total_TS += TS_i
                norm_ratio = 0.0
                if b_delta_norm > 1e-9 and g0_norm > 1e-9:
                    norm_ratio = g0_norm / b_delta_norm
                weighted_contrib = [TS_i * norm_ratio * w_layer for w_layer in b_delta]
                temp_weighted_contribs.append(weighted_contrib)

            if total_TS > 1e-9:
                for contrib_delta in temp_weighted_contribs:
                    for k_layer in range(len(aggregated_delta)):
                        aggregated_delta[k_layer] += contrib_delta[k_layer]
                for k_layer in range(len(aggregated_delta)):
                    aggregated_delta[k_layer] /= total_TS

            new_global_weights = [
                sw_k + self.alpha * agg_d_k
                for sw_k, agg_d_k in zip(server_weights, aggregated_delta)
            ]
            self._model.set_weights(new_global_weights)

            loss, acc = self._model.evaluate(x_test, y_test, verbose=0, batch_size=32)
            print(
                f'{expr_basename} round={r + 1}/{self.global_itera} 评估 - loss: {loss:.4f}, accuracy: {acc:.2%}') # 修改了这里的打印格式以匹配最终评估输出
