import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from gensim.models import Word2Vec
from collections import Counter
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Layer
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import mean_squared_error

# 读取数据
file_path = r"D:\BaiduNetdiskDownload\美赛赛题合集\2023年美赛赛题\2023\第一问\output.xlsx"
try:
    tagged_df = pd.read_excel(file_path)
except FileNotFoundError:
    raise FileNotFoundError(f"文件 {file_path} 未找到")
except Exception as e:
    raise Exception(f"读取文件时出错: {e}")

tagged_words = tagged_df['Word']

# 数据预处理
tagged_df.dropna(subset=['Word'], inplace=True)

# 特征工程
# 单词中元音字母占比
vowels = set('aeiouAEIOU')
def vowel_ratio(word):
    word_lower = word.lower()
    count = sum(1 for letter in word_lower if letter in vowels)
    return count / len(word_lower) if len(word_lower) > 0 else 0.0

tagged_df['元音占比'] = tagged_df['Word'].apply(vowel_ratio)

# 计算单词中重复字母的数目
def count_duplicate_letters(word):
    word_lower = word.lower()
    letter_count = Counter(word_lower)
    return sum(count for count in letter_count.values() if count > 1)

tagged_df['重复字母数目'] = tagged_df['Word'].apply(count_duplicate_letters)

# 常用字母的占比
common_letters = set('earioEARIO')
def calculate_common_letter_ratio(word):
    word_lower = word.lower()
    word_set = set(word_lower)
    common_letters_in_word = word_set.intersection(common_letters)
    return len(common_letters_in_word) / len(word_set) if len(word_set) > 0 else 0.0

tagged_df['常用字母占比'] = tagged_df['Word'].apply(calculate_common_letter_ratio)

# 非常用字母占比
uncommon_letters = set('qjzxvQJZXV')
def calculate_uncommon_letter_ratio(word):
    word_lower = word.lower()
    word_set = set(word_lower)
    uncommon_letters_in_word = word_set.intersection(uncommon_letters)
    return len(uncommon_letters_in_word) / len(word_set) if len(word_set) > 0 else 0.0

tagged_df['非常用字母占比'] = tagged_df['Word'].apply(calculate_uncommon_letter_ratio)

# 生成词嵌入
# 假设每个单词是一个句子
sentences = [[word.lower() for word in tagged_words]]
word2vec_model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, workers=4)

def get_word_embedding(word):
    word_lower = word.lower()
    if word_lower in word2vec_model.wv:
        return word2vec_model.wv[word_lower].tolist()
    else:
        return [0.0] * 100  # 如果单词不在词汇表中，返回零向量

tagged_df['word_embedding'] = tagged_df['Word'].apply(get_word_embedding)

# 将嵌入向量展开为单独的列
embedding_cols = [f'embedding_{i}' for i in range(100)]
tagged_df[embedding_cols] = pd.DataFrame(tagged_df['word_embedding'].tolist(), index=tagged_df.index)
tagged_df.drop(columns=['word_embedding'], inplace=True)

# 定义特征
features = tagged_df[embedding_cols + ['元音占比', '重复字母数目', '常用字母占比', '非常用字母占比']].values

# 将输出转换为numpy数组
targets = tagged_df[['1 try', '2 tries', '3 tries', '4 tries', '5 tries', '6 tries', '7 or more tries (X)']].values

# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(features, targets, test_size=0.2, random_state=42)

# 标准化
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 定义 MMOE 模型
class MMOELayer(Layer):
    def __init__(self, num_experts, units, hidden_units, num_tasks, **kwargs):
        super(MMOELayer, self).__init__(**kwargs)
        self.num_experts = num_experts
        self.units = units
        self.hidden_units = hidden_units
        self.num_tasks = num_tasks

    def build(self, input_shape):
        self.experts = [Dense(self.units, activation='relu') for _ in range(self.num_experts)]
        self.gates = [Dense(self.num_experts, activation='softmax') for _ in range(self.num_tasks)]
        self.task_outputs = []
        for _ in range(self.num_tasks):
            task_output = []
            for hidden_unit in self.hidden_units:
                task_output.append(Dense(hidden_unit, activation='relu'))
            task_output.append(Dense(1))
            self.task_outputs.append(task_output)

    def call(self, inputs):
        expert_outputs = [expert(inputs) for expert in self.experts]
        task_outputs = []
        for i in range(self.num_tasks):
            gate = self.gates[i](inputs)
            expert_contribution = tf.matmul(gate, tf.stack(expert_outputs, axis=1))
            expert_contribution = tf.reduce_sum(expert_contribution, axis=1)  # 替代方案
            task_output = expert_contribution
            for layer in self.task_outputs[i]:
                task_output = layer(task_output)
            task_outputs.append(task_output)
        return task_outputs

def MMOE(input_dim, num_tasks, units=128, num_experts=4, hidden_units=[128, 64]):
    inputs = Input(shape=(input_dim,))
    mmoe_layer = MMOELayer(num_experts, units, hidden_units, num_tasks)
    task_outputs = mmoe_layer(inputs)
    model = Model(inputs=inputs, outputs=task_outputs)
    return model

# 定义模型
input_dim = X_train_scaled.shape[1]
num_tasks = y_train.shape[1]
model = MMOE(input_dim, num_tasks)

# 编译模型
model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')

# 训练模型
model.fit(X_train_scaled, [y_train[:, i] for i in range(num_tasks)], epochs=50, batch_size=32, validation_split=0.2)

# 预测和评估
y_pred = np.array([model.predict(X_test_scaled)[i].flatten() for i in range(num_tasks)]).T
mse = mean_squared_error(y_test, y_pred)  # 确保 y_pred 的形状与 y_test 一致
print(f"均方误差: {mse}")

# 示例预测
example_word = "EERIE"
example_features = pd.DataFrame({
    'Word': [example_word]
})

# 特征工程
example_features['元音占比'] = example_features['Word'].apply(vowel_ratio)
example_features['重复字母数目'] = example_features['Word'].apply(count_duplicate_letters)
example_features['常用字母占比'] = example_features['Word'].apply(calculate_common_letter_ratio)
example_features['非常用字母占比'] = example_features['Word'].apply(calculate_uncommon_letter_ratio)

# 生成词嵌入
example_features['word_embedding'] = example_features['Word'].apply(get_word_embedding)
example_features[embedding_cols] = pd.DataFrame(example_features['word_embedding'].tolist(), index=example_features.index)
example_features.drop(columns=['word_embedding'], inplace=True)

# 提取特征
example_features = example_features[embedding_cols + ['元音占比', '重复字母数目', '常用字母占比', '非常用字母占比']].values

# 标准化
example_features_scaled = scaler.transform(example_features)

# 预测
example_prediction = np.array([model.predict(example_features_scaled)[i].flatten() for i in range(num_tasks)]).T

# 标准化预测结果，使其总和为100
example_prediction_normalized = (example_prediction / example_prediction.sum(axis=1, keepdims=True)) * 100

print(f"示例单词 '{example_word}' 的标准化预测结果: {example_prediction_normalized}")
print(f"均方误差: {mse}")
