import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from gensim.models import Word2Vec
from collections import Counter

# 读取数据
file_path = r"D:\BaiduNetdiskDownload\美赛赛题合集\2023年美赛赛题\2023\第一问\output.xlsx"
try:
    tagged_df = pd.read_excel(file_path)
except FileNotFoundError:
    raise FileNotFoundError(f"文件 {file_path} 未找到")
except Exception as e:
    raise Exception(f"读取文件时出错: {e}")

tagged_words = tagged_df['Word']

# 数据预处理
tagged_df.dropna(subset=['Word'], inplace=True)

# 特征工程
# 单词中元音字母占比
vowels = set('aeiouAEIOU')
def vowel_ratio(word):
    word_lower = word.lower()
    count = sum(1 for letter in word_lower if letter in vowels)
    return count / len(word_lower) if len(word_lower) > 0 else 0.0

tagged_df['元音占比'] = tagged_df['Word'].apply(vowel_ratio)

# 计算单词中重复字母的数目
def count_duplicate_letters(word):
    word_lower = word.lower()
    letter_count = Counter(word_lower)
    return sum(count for count in letter_count.values() if count > 1)

tagged_df['重复字母数目'] = tagged_df['Word'].apply(count_duplicate_letters)

# 常用字母的占比
common_letters = set('earioEARIO')
def calculate_common_letter_ratio(word):
    word_lower = word.lower()
    word_set = set(word_lower)
    common_letters_in_word = word_set.intersection(common_letters)
    return len(common_letters_in_word) / len(word_set) if len(word_set) > 0 else 0.0

tagged_df['常用字母占比'] = tagged_df['Word'].apply(calculate_common_letter_ratio)

# 非常用字母占比
uncommon_letters = set('qjzxvQJZXV')
def calculate_uncommon_letter_ratio(word):
    word_lower = word.lower()
    word_set = set(word_lower)
    uncommon_letters_in_word = word_set.intersection(uncommon_letters)
    return len(uncommon_letters_in_word) / len(word_set) if len(word_set) > 0 else 0.0

tagged_df['非常用字母占比'] = tagged_df['Word'].apply(calculate_uncommon_letter_ratio)

# 生成词嵌入
# 假设每个单词是一个句子
sentences = [[word.lower() for word in tagged_words]]
word2vec_model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, workers=4)

def get_word_embedding(word):
    word_lower = word.lower()
    if word_lower in word2vec_model.wv:
        return word2vec_model.wv[word_lower].tolist()
    else:
        return [0.0] * 100  # 如果单词不在词汇表中，返回零向量

tagged_df['word_embedding'] = tagged_df['Word'].apply(get_word_embedding)

# 将嵌入向量展开为单独的列
embedding_cols = [f'embedding_{i}' for i in range(100)]
tagged_df[embedding_cols] = pd.DataFrame(tagged_df['word_embedding'].tolist(), index=tagged_df.index)
tagged_df.drop(columns=['word_embedding'], inplace=True)

# 定义特征
features = tagged_df[embedding_cols + ['元音占比', '重复字母数目', '常用字母占比', '非常用字母占比']].values


# 将输出转换为numpy数组
targets = tagged_df[['1 try', '2 tries', '3 tries', '4 tries', '5 tries', '6 tries', '7 or more tries (X)']].values

# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(features, targets, test_size=0.2, random_state=42)

# 标准化
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 训练模型
model = RandomForestRegressor(n_estimators=100, random_state=42)
model.fit(X_train_scaled, y_train)

# 预测和评估
y_pred = model.predict(X_test_scaled)
mse = mean_squared_error(y_test, y_pred)
print(f"均方误差: {mse}")

# 示例预测
example_word = "EERIE"
example_features = pd.DataFrame({
    'Word': [example_word]
})

# 特征工程
example_features['元音占比'] = example_features['Word'].apply(vowel_ratio)
example_features['重复字母数目'] = example_features['Word'].apply(count_duplicate_letters)
example_features['常用字母占比'] = example_features['Word'].apply(calculate_common_letter_ratio)
example_features['非常用字母占比'] = example_features['Word'].apply(calculate_uncommon_letter_ratio)

# 生成词嵌入
example_features['word_embedding'] = example_features['Word'].apply(get_word_embedding)
example_features[embedding_cols] = pd.DataFrame(example_features['word_embedding'].tolist(), index=example_features.index)
example_features.drop(columns=['word_embedding'], inplace=True)

# 提取特征
example_features = example_features[embedding_cols + ['元音占比', '重复字母数目', '常用字母占比', '非常用字母占比']].values

# 标准化
example_features_scaled = scaler.transform(example_features)

# 预测
example_prediction = model.predict(example_features_scaled)
print(f"示例单词 '{example_word}' 的预测结果: {example_prediction}")

print(f"均方误差: {mse}")