import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
from sklearn.cluster import KMeans,DBSCAN
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
import  collections
from collections import Counter
from sklearn.metrics.cluster import v_measure_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import calinski_harabasz_score
from sklearn.preprocessing import MinMaxScaler
import mplcursors
import math


# 定义自编码器模型
class SimpleAutoencoder(nn.Module):
    def __init__(self, input_dim, z_dim, encoding_dim,  dropout_prob=0.5):
        super(SimpleAutoencoder, self).__init__()

        # 编码器部分
        self.encoder = nn.Sequential(
            nn.Linear(input_dim, z_dim),  # 第一层全连接，将输入降至编码维度
            nn.ReLU(True), # 使用ReLU激活函数
            nn.Linear(z_dim, encoding_dim),  # 第一层全连接，将输入降至编码维度
            nn.ReLU(True) # 使用ReLU激活函数
        )

        # 解码器部分
        self.decoder = nn.Sequential(
            nn.Linear(encoding_dim, z_dim ),  # 第二层全连接，从编码维度恢复到原始输入维度
            nn.ReLU(True),
            nn.Linear(z_dim, input_dim ),  # 第一层全连接，将输入降至编码维度
            nn.Sigmoid()  # 输出层使用Sigmoid，使得输出值位于0-1之间，适用于数据重构
        )

    def forward(self, x):
        x = self.encoder(x)
        y = self.decoder(x)
        return y,x


#定义数据加载器
class OneDimSequenceDataset(Dataset):
    def __init__(self, data_tensor, transform=None):
        self.data = data_tensor.type(torch.float32)  # 直接将数据转换为浮点类型张量

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        sequence = self.data[idx]
        return sequence


#自定义损失函数
class SimplifiedWeightedMSELoss(nn.Module):
    def __init__(self, weights):
        super(SimplifiedWeightedMSELoss, self).__init__()
        self.weights = weights.unsqueeze(0)  # 将权重张量扩展为(1, features)形状，以便广播机制能正确应用权重
    def forward(self, y_pred, y_true):
        squared_diff = (y_true - y_pred) ** 2
        loss = torch.mean(self.weights * squared_diff, dim=1)
        return torch.mean(loss)

#自定义损失函数，将不考虑非频繁项的MSE，使得模型关注于频繁项
class MSELoss(nn.Module):
    def __init__(self):
        super(MSELoss, self).__init__()
    def forward(self, y_pred, y_true, weight):
        squared_diff = (y_true - y_pred) ** 2 *weight
        loss = torch.mean(squared_diff, dim=1)
        return torch.mean(loss)


def calculate_xie_beni_index(data, labels):
    """
    计算 Xie-Beni 指数。

    :param data: 数据集，形状为 (n_samples, n_features)
    :param labels: 聚类标签，形状为 (n_samples,)
    :return: Xie-Beni 指数
    """
    unique_labels = np.unique(labels)
    n_clusters = len(unique_labels)
    centroids = np.array([data[labels == label].mean(axis=0) for label in unique_labels])
    global_center = data.mean(axis=0)

    # 计算簇内平方误差之和
    cluster_inertia = 0
    for label in unique_labels:
        cluster_data = data[labels == label]
        cluster_center = centroids[label]
        cluster_inertia += np.sum(np.linalg.norm(cluster_data - cluster_center, axis=1) ** 2)

    # 计算簇间最小距离
    inter_cluster_distances = np.linalg.norm(centroids[:, np.newaxis] - centroids[np.newaxis, :], axis=-1)
    np.fill_diagonal(inter_cluster_distances, np.inf)  # 对角线置为无穷大，避免计算自身距离
    min_inter_cluster_distance = np.min(inter_cluster_distances)

    # 计算 Xie-Beni 指数
    xi = cluster_inertia / (n_clusters * min_inter_cluster_distance ** 2)

    return xi

#使用dbscan算法进行最佳聚类簇数选取，并进行聚类
def dbscan_cluster(data):
    # 数据预处理 - 标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(data)

    # 定义寻找最佳eps的范围和min_samples的候选值
    eps_range = np.arange(0.02, 0.2, 0.02)
    min_samples_range = np.arange(len(data) * 0.005,len(data) * 0.02 , 50)

    best_eps = 0.1
    best_min_samples = len(data) * 0.01
    best_metrics = -1

    # 寻找最佳参数
    for eps in eps_range:
        for min_samples in min_samples_range:
            # 应用DBSCAN
            db = DBSCAN(eps=eps, min_samples=min_samples)
            labels = db.fit_predict(X_scaled)

            # 通过聚类评估指标寻找最佳聚类参数
            if len(set(labels)) > 1:  # 确保有多个簇存在以计算轮廓系数
                #silhouette_avg = silhouette_score(X_scaled, labels)   #使用轮廓系数进行参数搜索
                score = calculate_xie_beni_index(X_scaled, labels)  # 使用 Xie-Beni 指数进行参数搜索
                #score = calinski_harabasz_score(X_scaled, labels)   #使用ch_score进行参数搜索
                print(f"EPS: {eps}, MinPts: {min_samples}, best_metrics: {score}")

                # 更新最佳参数
                if score > best_metrics:
                    best_eps = eps
                    best_min_samples = min_samples
                    best_metrics = score

    print(f"Best Parameters: EPS={best_eps}, MinPts={best_min_samples}")

    # 使用最佳参数进行聚类
    db_optimized = DBSCAN(eps=best_eps, min_samples=best_min_samples)
    labels_optimized = db_optimized.fit_predict(X_scaled)
    cluster_labels = labels_optimized

    return cluster_labels


#将预测标签与真实标签比对
def seek_true_label(pre_label,true_label):
    for i in set(pre_label):
        index = np.where(pre_label == i)[0]
        x = true_label[index]
        # 找到所有唯一类别及其计数
        unique_categories, counts = np.unique(x, return_counts=True)
        # 计算每个类别的频率（即比例）
        category_frequencies = counts / len(x)
        print("第{}簇中各种数据的总数为{}，其的比例为".format(i,len((x))))
        print("Categories:", unique_categories)
        print("Frequencies:", category_frequencies)


#计算纯度
def calculate_purity(pre_label,true_label):
    purtiy = 0
    unique_clusters = set(pre_label)  # 获取所有聚类类别
    for i in unique_clusters:
        index = np.where(pre_label == i)[0]
        x = true_label[index]
        counter = collections.Counter(x)
        most_common_element, max_count = counter.most_common(1)[0]
        purtiy = purtiy + max_count/len(pre_label)
    return purtiy

#计算F值
def calculate_macro_f1(pre_label, true_label):
    counter = collections.Counter(pre_label)
    unique_labels = set(true_label)
    F1 = 0
    for i in unique_labels:
        index = np.where(true_label == i)[0]
        x = pre_label[index]
        nt = len(x)
        unique_clu = set(x)
        list = []
        for j in unique_clu:
            nc = counter[j]
            index = np.where(x == j)[0]
            y = x[index]
            ntc = len(y)
            recall = ntc/ nt
            precision = ntc/nc
            F = 2*recall*precision/(recall+precision)
            list.append(F)
        F_max = max(list)
        F1 = F1 + F_max*nt
    return F1/len(pre_label)


def euclidean_distance(p1, p2):
    return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)


def map_clusters_to_labels(true_labels, cluster_labels):

    # 初始化一个字典来存储每个聚类的最佳映射
    cluster_to_label = {}

    # 对于每个唯一的聚类标签
    for cluster_id in np.unique(cluster_labels):
        # 获取属于当前聚类的所有样本的真实标签
        labels_in_cluster = true_labels[cluster_labels == cluster_id]

        # 使用 Counter 来找出最常见的标签
        most_common_label = Counter(labels_in_cluster).most_common(1)[0][0]

        # 将当前聚类映射到最常见的标签
        cluster_to_label[cluster_id] = most_common_label

    # 创建一个新的数组来存储映射后的标签
    mapped_labels = np.array([cluster_to_label[label] for label in cluster_labels])

    return mapped_labels


def calculate_class_accuracy(true_labels, predicted_labels):
    # 获取所有独特的类别标签
    unique_labels = np.unique(true_labels)

    # 初始化一个字典来存储每个类别的准确率
    class_accuracies = {}

    # 对于每个类别
    for label in unique_labels:
        # 提取当前类别的所有索引
        indices = np.where(true_labels == label)[0]

        # 提取当前类别对应的预测标签
        predicted_for_class = predicted_labels[indices]

        # 计算当前类别的准确率
        correct_predictions = np.sum(predicted_for_class == label)
        total_predictions = len(predicted_for_class)
        accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0

        # 存储准确率
        class_accuracies[label] = accuracy

    return class_accuracies


def calculate_class_f1(true_labels, predicted_labels):
    # 获取所有独特的类别标签
    unique_labels = np.unique(true_labels)

    # 初始化一个字典来存储每个类别的 F1 分数
    class_f1_scores = {}

    # 对于每个类别
    for label in unique_labels:
        # 提取当前类别的所有索引
        indices_true = np.where(true_labels == label)[0]

        # 提取当前类别对应的预测标签
        predicted_for_class = predicted_labels[indices_true]

        # 计算查全率（Recall）
        recall = np.sum(predicted_for_class == label) / len(indices_true) if len(indices_true) > 0 else 0

        # 提取预测为当前类别的所有索引
        indices_pred = np.where(predicted_labels == label)[0]

        # 计算查准率（Precision）
        precision = np.sum(true_labels[indices_pred] == label) / len(indices_pred) if len(indices_pred) > 0 else 0

        # 计算 F1 分数
        f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0

        # 存储 F1 分数
        class_f1_scores[label] = f1_score

    return class_f1_scores

#    计算每类的纯度，并将结果存储在一个字典中。
def calculate_per_class_purity(true_labels, predicted_labels):
    # 初始化存储结果的字典
    purity_dict = {}

    # 获取所有真实标签的唯一标签
    unique_true_labels = np.unique(true_labels)

    # 遍历每个真实标签
    for true_label in unique_true_labels:
        # 获取该真实标签类中所有样本的索引
        true_label_indices = np.where(true_labels == true_label)[0]

        # 获取该真实标签类中所有样本的预测标签
        predicted_labels_in_true_label = predicted_labels[true_label_indices]

        # 计算该真实标签类中每个预测标签的出现次数
        label_counts = Counter(predicted_labels_in_true_label)

        # 获取出现次数最多的那个预测标签及其出现次数
        most_common_label, most_common_count = label_counts.most_common(1)[0]

        # 计算该真实标签类的纯度
        true_label_purity = most_common_count / len(true_label_indices)

        # 存储结果
        purity_dict[true_label] = true_label_purity

    return purity_dict

#绘制双轴条形图，共用一个 y 轴，左边轴为 F1 分数，右边轴为准确率。
def plot_dual_bars(f1_scores, accuracies, title='F1 Scores and Accuracy by Class'):
    # 将字典转换为列表
    labels = list(f1_scores.keys())
    f1_values = [f1_scores[label] for label in labels]
    accuracy_values = [accuracies[label] for label in labels]

    # 设置条形图的位置和宽度
    x = np.arange(len(labels))  # 类别的索引位置
    width = 0.35  # 条形图的宽度

    # 创建图形和坐标轴
    fig, ax = plt.subplots()

    # 在 ax1 上绘制 F1 分数的条形图
    rects1 = ax.bar(x - width/2, f1_values, width, label='Purity', color='black')
    # 在 ax2 上绘制准确率的条形图
    rects2 = ax.bar(x + width/2, accuracy_values, width, label='Accuracy', color='white', edgecolor = 'black')

    # 添加一些文本描述
    ax.set_ylabel('Score')
    ax.set_title(title)
    ax.set_xticks(x)
    ax.set_xticklabels(labels)
    ax.legend()

    # 自动旋转 x 轴标签以避免重叠
    plt.xticks(rotation=45)

    # 设置 Y 轴的范围
    ax.set_ylim(0, 1.2)

    # 添加数值标签到条形图上方，并保留两位小数
    def autolabel(rects, ax):
        """Attach a text label above each bar in *rects*, displaying its height with two decimal places."""
        for rect in rects:
            height = rect.get_height()
            ax.annotate(f'{height:.2f}',  # 保留两位小数
                        xy=(rect.get_x() + rect.get_width() / 2, height),
                        xytext=(0, 3),  # 3 points vertical offset
                        textcoords="offset points",
                        ha='center', va='bottom', fontsize=8)

    autolabel(rects1, ax)
    autolabel(rects2, ax)

    # 显示图形
    plt.tight_layout()
    plt.ion()  # 启用交互模式
    plt.show()
    plt.ioff()  # 禁用交互模式，防止影响后续代码

#计算多个字典中相同键的值的平均值。
def average_dicts(dict_list):
    #param dict_list: 包含多个字典的列表，每个字典的键相同,return: 包含每个键的平均值的新字典
    # 初始化一个字典来存储每个键的总和
    sum_dict = dict.fromkeys(dict_list[0], 0)
    # 初始化一个字典来存储每个键出现的次数
    count_dict = dict.fromkeys(dict_list[0], 0)

    # 遍历字典列表，累加每个键的值并更新计数器
    for d in dict_list:
        for key in d:
            if key not in sum_dict:
                sum_dict[key] = 0
                count_dict[key] = 0
            sum_dict[key] += d[key]
            count_dict[key] += 1

    # 计算平均值
    average_dict = {key: sum_dict[key] / count_dict[key] for key in sum_dict}

    return average_dict


def update_zeros_with_min_value(dictionary):
    """
    更新字典中值为零的键，使其等于字典中非零值的最小值。

    :param dictionary: 输入的字典
    :return: 修改后的字典
    """
    # 获取字典中所有非零值的列表
    non_zero_values = [value for value in dictionary.values() if value != 0]

    # 如果所有值都为零，则无法找到非零的最小值
    if not non_zero_values:
        raise ValueError("字典中所有值都为零，无法找到非零的最小值。")

    # 计算非零值的最小值
    min_non_zero_value = min(non_zero_values)

    # 更新值为零的键
    for key, value in dictionary.items():
        if value == 0:
            dictionary[key] = min_non_zero_value

    return dictionary

#    计算字典中所有值的平均值，并将平均值作为一个新的键值对添加到字典中，
def add_average_to_dict(data_dict,weight):

    # 计算共同键对应的值相乘后再相加
    result = sum(data_dict[key] * weight[key] for key in data_dict if key in weight)

    # 将平均值添加到字典中
    data_dict["average"] = result
    return data_dict,result

# 使用不同数据占比计算总体F1和V值的均值
def calculate_average(data, weight):
    result = sum(x * y for x,y in zip(data, weight))
    return result


    # 创建饼状图
def draw_pie_chart(data_dict, title='Pie Chart'):
    # 从字典中提取标签和大小
    labels = list(data_dict.keys())
    sizes = list(data_dict.values())

    plt.figure(figsize=(8, 6))  # 设置图形大小
    # 使用 labels 和 sizes 来绘制饼状图
    plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=140)
    # 设置标题
    plt.title(title)
    plt.ion()  # 启用交互模式
    # 显示图表
    plt.show()
    plt.ioff()  # 禁用交互模式，防止影响后续代码