import re
import jieba
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense, TimeDistributed
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from collections import defaultdict, Counter
import tensorflow as tf

# 启用Eager Execution
tf.config.run_functions_eagerly(True)

def read_data(file_path):
    """
    读取数据文件，提取单词和词性信息
    """
    try:
        with open(file_path, 'r', encoding='gbk') as file:
            content = file.read()
    except FileNotFoundError:
        print(f"文件 {file_path} 未找到，请检查文件路径。")
        return []

    # 修改正则表达式以适应新的格式 (word / tag)
    pattern = r'([^\s]+)\s*/\s*([a-zA-Z0-9]+)'
    matches = re.findall(pattern, content)
    if not matches:
        print("未找到匹配的数据，请检查文件内容格式。")
        return []

    data = [(match[0], match[1]) for match in matches]
    return data

def prepare_data(data):
    if not data:
        print("数据为空，无法准备训练数据。")
        return None, None, {}, {}

    words = set()
    tags = set()
    sentences = []

    sentence = []
    for word, tag in data:
        words.add(word)
        tags.add(tag)
        sentence.append((word, tag))
        # 修改句子结束的判断条件，考虑更多可能的句子结束标记
        if tag in ['wj', 'wt', 'wm', 'wd']:  # 假设这些标签是句子结束的标志
            sentences.append(sentence)
            sentence = []

    if not sentences:
        print("未找到完整的句子，请检查文件内容格式。")
        return None, None, {}, {}

    word2idx = {w: i + 1 for i, w in enumerate(words)}
    tag2idx = {t: i for i, t in enumerate(tags)}

    X = [[word2idx[w[0]] for w in s] for s in sentences]
    y = [[tag2idx[w[1]] for w in s] for s in sentences]

    X_pad = pad_sequences(maxlen=50, sequences=X, padding="post", value=0)
    y_pad = pad_sequences(maxlen=50, sequences=y, padding="post", value=0)

    num_tags = len(tag2idx)
    y_cat = [to_categorical(i, num_classes=num_tags) for i in y_pad]
    y_cat = np.array(y_cat)

    return X_pad, y_cat, word2idx, tag2idx

def build_model(input_len, vocab_size, num_tags):
    model = Sequential()
    model.add(Embedding(input_dim=vocab_size, output_dim=50, input_length=input_len))
    model.add(LSTM(units=100, return_sequences=True))
    model.add(TimeDistributed(Dense(num_tags, activation="softmax")))

    model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
    return model

def predict_sentence(model, word2idx, idx2tag, user_input):
    # 使用jieba进行分词
    seg_list = jieba.lcut(user_input)

    x_test = [[word2idx.get(w, 0) for w in seg_list]]
    x_test_pad = pad_sequences(maxlen=50, sequences=x_test, padding="post", value=0)

    p = model.predict(np.array(x_test_pad))
    p = np.argmax(p, axis=-1)

    result_str = ""
    for w, pred in zip(seg_list, p[0]):
        result_str += f"{w}/{idx2tag[pred]} "

    return result_str.strip()

def interact_with_model(model, word2idx, idx2tag):
    while True:
        user_input = input("请输入要预测词性的汉语句子（输入'exit'退出）: ")
        if user_input.lower() == 'exit':
            break

        result_str = predict_sentence(model, word2idx, idx2tag, user_input)
        print(f"输入的句子: {user_input}")
        print(f"分词结果及词性: {result_str}")

def main():
    file_path = 'pku_train_gbk.tag'
    data = read_data(file_path)

    if not data:
        print("无法继续训练，因为没有有效数据。")
        return

    X, y, word2idx, tag2idx = prepare_data(data)

    if X is None or y is None:
        print("无法继续训练，因为数据准备失败。")
        return

    idx2tag = {i: t for t, i in tag2idx.items()}

    input_len = 50
    vocab_size = len(word2idx) + 1
    num_tags = len(tag2idx)

    print(f"Input shape: {X.shape}")
    print(f"Output shape: {y.shape}")

    model = build_model(input_len, vocab_size, num_tags)
    model.summary()
    model.fit(X, y, batch_size=32, epochs=5)

    # 确保训练完成后进入交互模式
    print("训练完成，开始交互...")
    interact_with_model(model, word2idx, idx2tag)

if __name__ == "__main__":
    main()



