from collections import Counter
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import lightgbm as lgb
import matplotlib.pyplot as plt
import seaborn as sns
from gensim.models import Word2Vec
import numpy as np

# 设置全局字体大小
plt.rcParams.update({
    "font.size": 12,
    "axes.titlesize": 14,
    "axes.labelsize": 12,
    "xtick.labelsize": 10,
    "ytick.labelsize": 8,
    "legend.fontsize": 10,
    "figure.titlesize": 16
})

# 读取Excel文件
excel_file_path = 'D:/BaiduNetdiskDownload/美赛赛题合集/2023年美赛赛题/2023/第一问/output.xlsx'  # 文件路径
df = pd.read_excel(excel_file_path)

# 特征工程
# 单词中元音字母占比
vowels = set('aeiouAEIOU')
def vowel_ratio(word):
    word_lower = word.lower()
    count = sum(1 for letter in word_lower if letter in vowels)
    return count / len(word_lower) if len(word_lower) > 0 else 0.0

df['元音占比'] = df['Word'].apply(vowel_ratio)

# 计算单词中重复字母的数目
def count_duplicate_letters(word):
    word_lower = word.lower()
    letter_count = Counter(word_lower)
    return sum(count for count in letter_count.values() if count > 1)

df['重复字母数目'] = df['Word'].apply(count_duplicate_letters)

# 常用字母的占比
common_letters = set('earioEARIO')
def calculate_common_letter_ratio(word):
    word_lower = word.lower()
    word_set = set(word_lower)
    common_letters_in_word = word_set.intersection(common_letters)
    return len(common_letters_in_word) / len(word_set) if len(word_set) > 0 else 0.0

df['常用字母占比'] = df['Word'].apply(calculate_common_letter_ratio)

# 非常用字母占比
uncommon_letters = set('qjzxvQJZXV')
def calculate_uncommon_letter_ratio(word):
    word_lower = word.lower()
    word_set = set(word_lower)
    uncommon_letters_in_word = word_set.intersection(uncommon_letters)
    return len(uncommon_letters_in_word) / len(word_set) if len(word_set) > 0 else 0.0

df['非常用字母占比'] = df['Word'].apply(calculate_uncommon_letter_ratio)

# # 生成词嵌入
# # 假设每个单词是一个句子
# sentences = [[word.lower() for word in df['Word']]]
# word2vec_model = Word2Vec(sentences, vector_size=10, window=5, min_count=1, workers=4)

# def get_word_embedding(word):
#     word_lower = word.lower()
#     if word_lower in word2vec_model.wv:
#         return word2vec_model.wv[word_lower].tolist()
#     else:
#         return [0.0] * 10  # 如果单词不在词汇表中，返回零向量

# df['word_embedding'] = df['Word'].apply(get_word_embedding)

# # 将嵌入向量展开为单独的列
# embedding_cols = [f'embedding_{i}' for i in range(10)]
# df[embedding_cols] = pd.DataFrame(df['word_embedding'].tolist(), index=df.index)
# df.drop(columns=['word_embedding'], inplace=True)

# 定义特征
features = df[ ['元音占比', '重复字母数目', '常用字母占比', '非常用字母占比']].values

# 标准化特征
scaler = StandardScaler()
features_scaled = scaler.fit_transform(features)

# 数据分割
X = features_scaled
y = df['clusters']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)

# 创建LightGBM数据集
train_data = lgb.Dataset(X_train, label=y_train)

# 设置参数
params = {
    'objective': 'multiclass',
    'num_class': 4,
    'metric': 'multi_logloss',
    'boosting_type': 'gbdt',
    'num_leaves': 31,
    'learning_rate': 0.05,
    'feature_fraction': 0.9
}

# 训练模型
num_round = 100
bst = lgb.train(params, train_data, num_round)

# 预测
y_pred = bst.predict(X_test)
y_pred_classes = np.argmax(y_pred, axis=1)

# 评估模型
print("Classification Report:")
print(classification_report(y_test, y_pred_classes))

print("Confusion Matrix:")
print(confusion_matrix(y_test, y_pred_classes))

# 可视化特征重要性
importances = bst.feature_importance(importance_type='split')
feature_names = ['Vowel Ratio', 'Number of repeated letters', 'Common letter ratio', 'Uncommon letter ratio']
indices = np.argsort(importances)[::-1]

# 打印特征重要性
print("Feature ranking:")
for f in range(len(feature_names)):
    print(f"{f + 1}. feature {feature_names[indices[f]]} ({importances[indices[f]]})")

# 可视化特征重要性
plt.figure(figsize=(16, 8))
plt.title("Feature importances")
plt.barh(range(len(feature_names)), importances[indices], align="center")  # 使用barh进行水平条形图
plt.yticks(range(len(feature_names)), [feature_names[i] for i in indices])  # 设置y轴标签
plt.xlabel('Importance Score')  # 设置x轴标签
plt.ylabel('Features')  # 设置y轴标签
plt.grid(True)
plt.show()

# 对示例单词 "EERIE" 进行难度分类
def calculate_features(word):
    features = {}
    
    # 元音字母占比
    features['元音占比'] = vowel_ratio(word)
    
    # 重复字母数目
    features['重复字母数目'] = count_duplicate_letters(word)
    
    # 常用字母占比
    features['常用字母占比'] = calculate_common_letter_ratio(word)
    
    # 非常用字母占比
    features['非常用字母占比'] = calculate_uncommon_letter_ratio(word)
    
    return pd.DataFrame([features])

# 示例单词
example_word = "EERIE"
example_features_df = calculate_features(example_word)

# 标准化示例单词的特征
example_features_scaled = scaler.transform(example_features_df)

# 使用训练好的模型进行预测
example_pred = bst.predict(example_features_scaled)
example_pred_class = np.argmax(example_pred, axis=1)

# 输出预测结果
print(f"示例单词 '{example_word}' 的预测聚类类别: {example_pred_class[0]}")