import logging
import os
import random
import time
# from tqdm.notebook import tqdm

try:
    from tqdm.notebook import tqdm
except ImportError:
    from tqdm import tqdm


# from tqdm import tqdm
# import colorlog
import torch
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from utils.parameters import Params
from torch import autograd
import torch.nn as nn
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.signal import find_peaks
import torch.nn.functional as F
def plot_loss(loss_list):
    """
    该函数接收一个包含损失值的列表并绘制损失曲线。
    
    参数:
    loss_list (list of float): 损失值的列表。
    """
    plt.figure(figsize=(10, 5))  # 设置图形的大小
    plt.plot(loss_list, label='Training Loss', color='blue')  # 绘制损失曲线
    plt.title('Loss Over Iterations')  # 设置图形标题
    plt.xlabel('Iterations')  # 设置x轴标签
    plt.ylabel('Loss')  # 设置y轴标签
    plt.legend()  # 添加图例
    plt.grid(True)  # 显示网格
    plt.show()  # 显示图形


def visualize_image(dataset, index):
    # 获取图像和标签
    image, label = dataset[index]

    # 检查通道数，并相应地调整图像用于显示
    if image.shape[0] == 1:
        # 单通道图像（灰度图），去掉通道维度
        image = image.squeeze(0)
    else:
        # 三通道图像（彩色图），调整通道顺序为(H, W, C)
        image = image.permute(1, 2, 0)
    
    # 确保图像数据在CPU上
    image = image.cpu().numpy()

    # 显示图像
    plt.imshow(image, cmap='gray' if image.shape[-1] != 3 else None)
    plt.title(f'Label: {label}')
    plt.show()
    
def record_time(params: Params, t=None, name=None):
    if t and name and params.save_timing == name or params.save_timing is True:
        torch.cuda.synchronize()
        params.timing_data[name].append(round(1000 * (time.perf_counter() - t)))


def dict_html(dict_obj, current_time):
    out = ''
    for key, value in dict_obj.items():

        # filter out not needed parts:
        if key in ['poisoning_test', 'test_batch_size', 'discount_size',
                   'folder_path', 'log_interval',
                   'coefficient_transfer', 'grad_threshold']:
            continue

        out += f'<tr><td>{key}</td><td>{value}</td></tr>'
    output = f'<h4>Params for model: {current_time}:</h4><table>{out}</table>'
    return output


def poison_text(inputs, labels):
    inputs = inputs.clone()
    labels = labels.clone()
    for i in range(inputs.shape[0]):
        pos = random.randint(1, (inputs[i] == 102).nonzero().item() - 3)
        inputs[i, pos] = 3968
        inputs[i, pos + 1] = 3536
    labels = torch.ones_like(labels)
    return inputs, labels


def poison_text_test(inputs, labels):
    for i in range(inputs.shape[0]):
        pos = random.randint(1, inputs.shape[1] - 4)
        inputs[i, pos] = 3968
        inputs[i, pos + 1] = 3536
    labels.fill_(1)
    return True


def create_table(params: dict):
    data = "| name | value | \n |-----|-----|"

    for key, value in params.items():
        data += '\n' + f"| {key} | {value} |"

    return data


def get_current_git_hash():
    import git
    repo = git.Repo(search_parent_directories=True)
    sha = repo.head.object.hexsha
    return sha


def create_logger():
    """
        Setup the logging environment
    """
    log = logging.getLogger()  # root logger
    log.setLevel(logging.DEBUG)
    format_str = '%(asctime)s - %(levelname)-8s - %(message)s'
    date_format = '%Y-%m-%d %H:%M:%S'
    if os.isatty(2):
        cformat = '%(log_color)s' + format_str
        colors = {'DEBUG': 'reset',
                  'INFO': 'reset',
                  'WARNING': 'bold_yellow',
                  'ERROR': 'bold_red',
                  'CRITICAL': 'bold_red'}
        formatter = colorlog.ColoredFormatter(cformat, date_format,
                                              log_colors=colors)
    else:
        formatter = logging.Formatter(format_str, date_format)
    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)
    log.addHandler(stream_handler)
    return logging.getLogger(__name__)


def th(vector):
    return torch.tanh(vector) / 2 + 0.5


def thp(vector):
    return torch.tanh(vector) * 2.2

def retain_middle_percent_gradients(k, p):
    # 创建一个闭包函数，该函数接受梯度作为输入
    def hook(grad):
        total = grad.numel()
        # 计算需要排除的梯度数量
        k_count = int(k / 100.0 * total)
        p_count = int(p / 100.0 * total)

        # 只保留非前k%和非后p%的梯度
        if k_count > 0 or p_count > 0:
            # 使用torch.kthvalue找到第k_count大的值作为上界
            if k_count > 0:
                k_threshold = torch.kthvalue(grad.abs().view(-1), total - k_count)[0]
            else:
                k_threshold = float('inf')  # 如果k_count为0，则不排除任何较大的值

            # 使用torch.kthvalue找到第p_count小的值作为下界
            if p_count > 0:
                p_threshold = torch.kthvalue(grad.abs().view(-1), p_count)[0]
            else:
                p_threshold = float('-inf')  # 如果p_count为0，则不排除任何较小的值
            
            # 将不在这两个阈值之间的梯度置为0
            grad[(grad.abs() >= k_threshold) | (grad.abs() <= p_threshold)] = 0

        return grad
    return hook
# def retain_top_k_activations(k):
#     def hook(module, input, output):
#         # 计算要保留的元素数量
#         k_count = int(torch.numel(output) * k / 100)

#         # 如果 k_count 为 0，则保留至少一个元素
#         if k_count == 0:
#             k_count = 1
        
#         # 保留 top k% 激活值
#         threshold = torch.kthvalue(output.view(-1), torch.numel(output) - k_count + 1).values
#         output[output < threshold] = 0
        
#         return output
#     return hook

def retain_top_k_activations(k):
    def hook(module, input, output):
        # 计算要保留的元素数量
        k_count = int(torch.numel(output) * k / 100)

        # 如果 k_count 为 0，则保留至少一个元素
        if k_count == 0:
            k_count = 1
        
        # 保留 top k% 激活值
        threshold = torch.kthvalue(output.reshape(-1), torch.numel(output) - k_count + 1).values
        output[output < threshold] = 0
        
        return output
    return hook
def apply_hooks_to_model(model, k):
    for name, layer in model.named_children():
        # 检查层类型，避免在非卷积层和非线性层上注册钩子
        if isinstance(layer, (torch.nn.Conv2d, torch.nn.ReLU)):
            # 注册前向钩子
            layer.register_forward_hook(retain_top_k_activations(k))
        # 递归应用于子模块
        elif any(isinstance(sublayer, (torch.nn.Conv2d, torch.nn.ReLU)) for sublayer in layer.children()):
            apply_hooks_to_model(layer, k)
            
def apply_hooks_to_model_last_conv(model, k):
    last_conv_layer = None
    for name, layer in model.named_children():
        if isinstance(layer, nn.Sequential):
            for sublayer_name, sublayer in layer.named_children():
                if isinstance(sublayer, nn.Conv2d):
                    last_conv_layer = sublayer
                elif isinstance(sublayer, nn.Sequential):
                    for sub_sublayer_name, sub_sublayer in sublayer.named_children():
                        if isinstance(sub_sublayer, nn.Conv2d):
                            last_conv_layer = sub_sublayer

    if last_conv_layer is not None:
        last_conv_layer.register_forward_hook(retain_top_k_activations(k))
        
def show_opt_input(optimized_imgs):
    # 假设 optimized_imgs 是你的输入张量，形状为 [n, 3, 32, 32]
    n = optimized_imgs.size(0)  # 获取批次中图片的数量
    
    # 设置绘图
    fig, axes = plt.subplots(1, n, figsize=(n * 3, 3))  # 创建足够空间的subplot
    if n == 1:
        axes = [axes]  # 如果只有一张图片，保证 axes 是一个列表

    for i, ax in enumerate(axes):
        # 移除批次中的一张图片并转为numpy
        img = optimized_imgs[i].detach().cpu().numpy()
        # 调整通道顺序以适应PIL的要求，从 [3, 32, 32] 变为 [32, 32, 3]
        img = np.transpose(img, (1, 2, 0))
        # 归一化处理，确保数据在[0, 1]范围内
        img = (img - img.min()) / (img.max() - img.min())
        img = (img * 255).astype(np.uint8)  # 转换为0-255的RGB值
        
        # 显示图像
        ax.imshow(img)
        ax.axis('off')  # 关闭坐标轴
    
    plt.show()

def show_opt_input_signed(optimized_imgs, is_malicious):
    """
    展示优化后的图像，如果样本是恶意样本，显示红色边框。每行最多显示8张图像。
    
    :param optimized_imgs: 形状为 [n, 3, 32, 32] 的输入图像张量
    :param is_malicious: 形状为 [n] 的标记恶意样本的布尔值或二值矩阵
    """
    n = optimized_imgs.size(0)  # 获取批次中图片的数量
    images_per_row = 16  # 每行显示的图像数量
    rows = (n + images_per_row - 1) // images_per_row  # 计算总行数
    
    # 设置绘图
    fig, axes = plt.subplots(rows, images_per_row, figsize=(images_per_row * 3, rows * 3))
    axes = axes.flatten()  # 将 axes 转为一维，以便于逐个处理
    
    for i, ax in enumerate(axes):
        if i < n:
            # 处理图像
            img = optimized_imgs[i].detach().cpu().numpy()
            img = np.transpose(img, (1, 2, 0))  # 调整通道顺序
            img = (img - img.min()) / (img.max() - img.min())  # 归一化
            img = (img * 255).astype(np.uint8)  # 转换为0-255的RGB值
            
            # 显示图像
            ax.imshow(img)
            ax.axis('off')  # 关闭坐标轴
            
            # 如果是恶意样本，则加红色边框
            if is_malicious[i]:
                ax.add_patch(plt.Rectangle((0, 0), img.shape[1], img.shape[0], 
                                           linewidth=5, edgecolor='red', facecolor='none'))
        else:
            ax.axis('off')  # 超出图像数量的 subplot 关闭
    
    plt.tight_layout()
    plt.show()

def plot_loss(loss_list):
    plt.figure(figsize=(10, 5))  # 设置图形的大小
    plt.plot(loss_list, label='Training Loss', color='blue')  # 绘制损失曲线
    plt.title('Loss Over Iterations')  # 设置图形标题
    plt.xlabel('Iterations')  # 设置x轴标签
    plt.ylabel('Loss')  # 设置y轴标签
    plt.legend()  # 添加图例
    plt.grid(True)  # 显示网格
    plt.show()  # 显示图形
    
def model_forward(model, dataset, index):
    # 假设 backdoor_dataset 是已经加载的数据集
    image, label = dataset[index]
    image, label = image.cuda(), label
    
    # 添加通道维度以匹配归一化期望的输入格式
    image = image.unsqueeze(0)  # 变为 [1, 28, 28]
    model.cuda()
    # 现在再尝试用模型进行预测
    output = model(image)
    # print(output)
    _, predicted_class = torch.max(output, 1)
    # print("Predicted class:", predicted_class.item())
    return output, predicted_class

def show_acc(dataset, model, r):
    cnt = 0
    for i in range(r):
        output, pred = model_forward(model, dataset, i)
        if pred.item() != dataset[i][1]:
            cnt = cnt + 1
            # print("i={}.  predicted_class:{}\nTrue Label:{}\n".format(i, pred.item(), dataset[i][1]))
    print("错误率: {}".format((cnt/r)))
    print("正确率: {}".format(1-(cnt/r)))

def show_acc_label(dataset, model, r, label):
    cnt = 0
    label_cnt = 0
    for i in range(r):
        if dataset[i][1] == label:
            label_cnt = label_cnt + 1
            output, pred = model_forward(model, dataset, i)
            if pred.item() != dataset[i][1]:
                cnt = cnt + 1
            # print("i={}.  predicted_class:{}\nTrue Label:{}\n".format(i, pred.item(), dataset[i][1]))
    print("错误率: {}".format((cnt/label_cnt)))
    print("正确率: {}".format(1-(cnt/label_cnt)))

def load_saved_model(saved_model, model):
    log = torch.load(saved_model)
    # 创建模型实例并加载状态字典
    model.load_state_dict(log['state_dict'])
    return model

def predict_with_sliding_window(model, input_image, num_classes, window_size=10):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.eval()

    input_image = input_image.to(device).unsqueeze(0)  # Add batch dimension

    # Get image dimensions
    _, _, H, W = input_image.shape

    # Initialize an array to store predictions
    prediction_map = np.zeros((H, W), dtype=int)

    # Sliding window loop
    for i in range(H - window_size + 1):
        for j in range(W - window_size + 1):
            # Create a copy of the input image with windowed region preserved
            windowed_image = torch.zeros_like(input_image)
            windowed_image[:, :, i:i+window_size, j:j+window_size] = input_image[:, :, i:i+window_size, j:j+window_size]

            # Forward pass through the model
            with torch.no_grad():
                output = model(windowed_image)
                predicted_class = output.argmax(dim=1).item()
                prediction_map[i:i+window_size, j:j+window_size] = predicted_class
    
    return prediction_map

def visualize_predictions(prediction_map, input_image, num_classes):
    plt.figure(figsize=(15, 15))

    # Plot the original image using show_opt_input
    # plt.subplot(1, 2, 1)
    show_opt_input(input_image.unsqueeze(0))  # Add batch dimension for compatibility
    plt.title("Original Image")

    # Define a colormap with the number of classes
    cmap = plt.get_cmap('tab20', num_classes)

    # Plot the prediction map
    plt.subplot(1, 2, 2)
    plt.imshow(prediction_map, cmap=cmap, alpha=0.6)
    plt.colorbar(ticks=np.arange(num_classes))
    plt.title("Prediction Map")

    plt.show()
    
def regularize_tensor(tensor, lower_bound=0, upper_bound=255, penalty_factor=1):
    # 超出范围的部分分别计算惩罚
    lower_penalty = torch.relu(lower_bound - tensor)  # 小于 lower_bound 的部分
    upper_penalty = torch.relu(tensor - upper_bound)  # 大于 upper_bound 的部分

    # 计算总的惩罚值
    penalty = (lower_penalty.sum() + upper_penalty.sum()) * penalty_factor

    return penalty

def detect_malicious_threshold(individual_losses_np, bandwidth=0.05, plot=False):
    """
    Detect the threshold for malicious samples with an adaptive mechanism to filter out outliers.
    
    Parameters:
    individual_losses_np (numpy array): The loss values for each sample.
    
    Returns:
    float: The threshold value for detecting malicious samples.
    numpy array: Indices of samples considered malicious after adaptive filtering.
    """
    # 将损失值转换为二维数组以便 KDE 处理
    individual_losses_reshaped = individual_losses_np.reshape(-1, 1)

    # 进行核密度估计
    kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(individual_losses_reshaped)
    log_density = kde.score_samples(individual_losses_reshaped)

    # 对损失值进行排序
    sorted_losses = np.sort(individual_losses_np)
    log_density_sorted = kde.score_samples(sorted_losses.reshape(-1, 1))
    
    # 可视化排序后的概率密度分布函数
    if plot:
        plt.figure(figsize=(5, 3))
        plt.plot(sorted_losses, np.exp(log_density_sorted), 'b-', label='Estimated Density')
        plt.fill_between(sorted_losses, np.exp(log_density_sorted), alpha=0.5)
        plt.xlabel('Loss Value')
        plt.ylabel('Density')
        plt.title('Kernel Density Estimation of Sorted Loss Values')
        plt.legend()
        plt.grid(True)
        plt.show()

    # 获取密度估计的实际值
    density_values = np.exp(log_density_sorted)

    # 找到所有的局部峰值和谷值（最小值）
    peaks, _ = find_peaks(density_values)
    valleys, _ = find_peaks(-density_values)

    # 找到第一个峰值（波峰）
    first_peak_index = peaks[0]

    # 在第一个波峰之后，找到第一个谷值（波谷）
    threshold_index = None
    for valley in valleys:
        if valley > first_peak_index:
            threshold_index = valley
            break

    # 如果没有找到有效的谷值，将阈值设置为0.1
    if threshold_index is None:
        final_threshold = 0.1
    else:
        # 根据索引获取初步的阈值
        initial_threshold = sorted_losses[threshold_index]

        # 初步筛选出低于该阈值的样本
        preliminary_malicious_indices = np.where(individual_losses_np <= initial_threshold)[0]
        preliminary_malicious_losses = individual_losses_np[preliminary_malicious_indices]
        preliminary_malicious_losses.sort()

        # 计算相邻损失值之间的差异，并排除最大和最小值
        differences = np.diff(preliminary_malicious_losses)
        if len(differences) > 2:
            filtered_differences = np.sort(differences)[1:-1]  # 排除最大和最小差异
        else:
            filtered_differences = differences

        # 设定自适应阈值，排除那些与前一个样本差异超过平均差异10倍的值
        mean_diff = np.mean(filtered_differences)
        adaptive_threshold_index = np.where(differences > 100 * mean_diff)[0]

        if len(adaptive_threshold_index) > 0:
            adaptive_threshold_index = adaptive_threshold_index[0] + 1  # 加1是因为差分数组比原数组少一个元素
            filtered_losses = preliminary_malicious_losses[:adaptive_threshold_index]
            final_threshold = filtered_losses[-1]
        else:
            filtered_losses = preliminary_malicious_losses
            final_threshold = initial_threshold

        # if final_threshold > 1.0:    
        #     final_threshold = 0

    # 根据最终的自适应阈值筛选恶意样本
    malicious_indices = None
    if final_threshold != 0:
        malicious_indices = np.where(individual_losses_np <= final_threshold)[0]
        
    return final_threshold, malicious_indices

import torch.nn.functional as F

def evaluate(model, dataloader, criterion, device):
    model.eval()  # 设置模型为评估模式
    correct = 0
    total = 0
    total_loss = 0.0
    with torch.no_grad():  # 关闭梯度计算
        for data in tqdm(dataloader, desc="Evaluating Model"):
            # Handle the case where data has 2 or 3 elements
            if len(data) == 2:
                imgs, labels = data
            else:
                imgs, labels, _ = data
            imgs = imgs.to(device)
            labels = labels.to(device)
            
            outputs = model(imgs)
            loss = criterion(outputs, labels)
            total_loss += loss.item() * labels.size(0)
            
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    
    avg_loss = total_loss / total
    accuracy = correct / total
    model.train()  # 重新设置模型为训练模式
    return avg_loss, accuracy

def evaluate_detect(model, dataloader, criterion, device):
    model.eval()  # 设置模型为评估模式
    correct = 0
    total = 0
    total_loss = 0.0
    with torch.no_grad():  # 关闭梯度计算
        for data in tqdm(dataloader, desc="Evaluating DetectNet"):
            # Handle the case where data has 2 or 3 elements
            if len(data) == 2:
                imgs, labels = data
            else:
                imgs, labels, _ = data
            imgs = imgs.to(device)
            labels = labels.to(device)
            
            outputs = model(imgs)

            loss = criterion(outputs, labels)
            total_loss += loss.item() * labels.size(0)
            
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    
    avg_loss = total_loss / total
    accuracy = correct / total
    model.train()  # 重新设置模型为训练模式
    return avg_loss, accuracy

def evaluate_class(model, dataloader, criterion, device, num_classes):
    model.eval()  # Set model to evaluation mode
    correct = 0
    total = 0
    total_loss = 0.0
    class_correct = [0] * num_classes
    class_total = [0] * num_classes

    with torch.no_grad():  # Disable gradient computation
        for data in dataloader:
            # Handle the case where data has 2 or 3 elements
            if len(data) == 2:
                imgs, labels = data
            else:
                imgs, labels, _ = data
            imgs = imgs.to(device)
            labels = labels.to(device)

            outputs = model(imgs)
            loss = criterion(outputs, labels)
            total_loss += loss.item() * labels.size(0)

            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

            for i in range(labels.size(0)):
                label = labels[i].item()
                class_total[label] += 1
                if predicted[i] == label:
                    class_correct[label] += 1

    avg_loss = total_loss / total
    accuracy = correct / total
    class_acc = [class_correct[i] / class_total[i] if class_total[i] > 0 else 0.0 for i in range(num_classes)]
    
    model.train()  # Set model back to training mode
    
    return avg_loss, accuracy, class_acc
