import numpy as np
import pandas as pd

from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_score, calinski_harabasz_score, DistanceMetric
from scipy.cluster.hierarchy import linkage, dendrogram
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from wordcloud import WordCloud
import os
import seaborn as sns
from sklearn.metrics import silhouette_score


# 读取包含法律案件的数据集
df = pd.read_csv('D:/python/Lab1/Last.csv')

# 将日期格式转换为pandas的datetime格式
df['start_time'] = pd.to_datetime(df['start_time'])
df['year'] = df['start_time'].dt.year

# 设置输出所有列
pd.set_option('display.max_columns', 1000)
# 设置输出所有列保持在同一行
pd.set_option('display.width', 10000)
# 设置输出右对齐
pd.set_option('display.unicode.east_asian_width', True)

n_clusters = 5  # 聚类数

# 将"案件原因"和"时间"列合并成一个新列作为输入
# df['input'] = df['案件原因']
df['input'] = df['案件原因'].astype(str) + ' ' + df['start_time'].apply(lambda x: str(x))

# 根据案例内容提取关键词，构建 TF-IDF 特征向量
# 此正则适用于同为Unicode标准的中日韩字符集, 同时也保留了拉丁字母和数字等英文字符.
vectorizer = TfidfVectorizer(token_pattern='(?u)\\b\\w+\\b|\\b[^\\d\\W]+\\b')
X = vectorizer.fit_transform(df['input'])

# 使用层次聚类将案例分成几类
clustering = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward').fit(X.toarray())

# 将分类结果添加到原始数据框架中
df['cluster'] = clustering.labels_

# 输出每个聚类的案例数量
# for k in range(n_clusters):
#     cluster_k = df[df['cluster'] == k]
#     n_cluster_k = len(cluster_k)
#     print(f"Cluster {k}: {n_cluster_k} cases")
#
#     # 输出每个聚类中的案例详细信息（前5个案例）
#     for index, row in cluster_k.head().iterrows():
#         print(f" - {row['案件原因']}")

# 计算距离和链式嵌套序列，并画出树状图
# dists = linkage(X.toarray(), 'ward')  # 使用 TF-IDF 向量计算距离并使用 ward 方法进行层次聚类
# plt.figure(figsize=(20, 10))
# dendrogram(dists, truncate_mode='level', p=5)  # 只展示树的前 5 层
# plt.title("Dendrogram")
# plt.xlabel("Cluster size")
# plt.ylabel("Distance")
# plt.show()

# 饼图：通过饼图可以直观地展示聚类簇的大小和相对比例
# sizes = [len(df[df['cluster'] == k]) for k in range(n_clusters)]
# labels = [f'Cluster {k}' for k in range(n_clusters)]
#
# plt.figure(figsize=(8, 8))
# plt.pie(sizes, labels=labels)
# plt.show()

# 散点图：使用散点图来展示不同属性的关系
# pca = PCA(n_components=2)
# X_transformed = pca.fit_transform(X.toarray())
#
# plt.figure(figsize=(10, 10))
# for k in range(n_clusters):
#     cluster_k = df[df['cluster'] == k]
#     label = f'Cluster {k}'
#     plt.scatter(X_transformed[cluster_k.index.values, 0],
#                 X_transformed[cluster_k.index.values, 1],
#                 s=50, alpha=0.5, label=label)
#
# plt.legend()
# plt.show()

# 带标签的词云图：使用词云图来生成某个聚类或类别中的高频词
cluster_k = df[df['cluster'] == 0]
contents = cluster_k['案件原因']
# 连接所有内容
all_content = ' '.join(contents)

font_path = os.path.join(os.environ.get("WINDIR"), "Fonts/simsun.ttc")  # 指定中文字体路径
wc = WordCloud(width=800, height=400,
               background_color='white',
               max_words=100,
               min_font_size=10,
               font_path=font_path)  # 指定使用中文字体

plt.figure(figsize=(16, 8))
plt.imshow(wc.generate(all_content))
plt.axis('off')
plt.show()

# 绘制案件的趋势变化图
groups = df.groupby('cluster')
fig, ax = plt.subplots(figsize=(10, 6))
for name, group in groups:
    group['year-month'] = pd.to_datetime(group['start_time']).dt.to_period('M')
    trend = group.groupby('year-month')['文书ID'].count()
    trend.plot(ax=ax, label=name, x_compat=True)
plt.legend(title='Cluster')
plt.xlabel('Year-Month')
plt.ylabel('Number of Cases')
plt.show()

# 使用聚类结果中每个群集的平均值来绘制一个热力图
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
cluster_means = df.groupby('cluster').mean()
sns.heatmap(cluster_means.T, cmap='coolwarm')
plt.title('Cluster Feature Means')
plt.show()

# 评估指标
# 计算聚类结果的轮廓系数
silhouette_avg = silhouette_score(X, clustering.labels_)
print("聚类结果的轮廓系数为：", silhouette_avg)

# 计算Calinski-Harabasz Index评估指标
ch_score = calinski_harabasz_score(X.toarray(), clustering.labels_)
print("聚类结果的Calinski-Harabasz指数为：", ch_score)

# 获取距离矩阵并输出
dist = DistanceMetric.get_metric('euclidean')
distance_matrix = dist.pairwise(X.toarray())
print("聚类结果的距离矩阵为:\n", distance_matrix)

