import os
import tkinter as tk
from tkinter import messagebox
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence

# 数据集类
def char_to_idx(sentences, vocab):
    return [[vocab.get(char, vocab['<UNK>']) for char in sentence] for sentence in sentences]

def tag_to_idx(tags, tag_vocab):
    return [[tag_vocab[tag] for tag in tag_seq] for tag_seq in tags]

class SentenceDataset(Dataset):
    def __init__(self, sentences, tags, vocab, tag_vocab):
        self.sentences = char_to_idx(sentences, vocab)
        self.tags = tag_to_idx(tags, tag_vocab)

    def __len__(self):
        return len(self.sentences)

    def __getitem__(self, idx):
        return torch.tensor(self.sentences[idx]), torch.tensor(self.tags[idx])

# **修改 1**: 添加 collate_fn，用于对句子和标签进行填充
def collate_fn(batch):
    sentences, tags = zip(*batch)
    padded_sentences = pad_sequence(sentences, batch_first=True, padding_value=0)  # 填充句子
    padded_tags = pad_sequence(tags, batch_first=True, padding_value=0)  # 填充标签
    return padded_sentences, padded_tags

# GRU 模型
class GRUModel(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, tagset_size, num_layers=1, bidirectional=True):
        super(GRUModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.gru = nn.GRU(embedding_dim, hidden_dim, num_layers=num_layers, bidirectional=bidirectional, batch_first=True)
        self.fc = nn.Linear(hidden_dim * (2 if bidirectional else 1), tagset_size)

    # **修改 2**: 支持 pack_padded_sequence 和 pad_packed_sequence
    def forward(self, x, lengths):
        embedded = self.embedding(x)
        lengths = lengths.cpu()  # **修改 3**: 将 lengths 转换为 CPU 张量
        packed_input = pack_padded_sequence(embedded, lengths, batch_first=True, enforce_sorted=False)
        packed_output, _ = self.gru(packed_input)
        output, _ = pad_packed_sequence(packed_output, batch_first=True)
        output = self.fc(output)
        return output

# 加载数据
def load_data(file_path):
    sentences, tags = [], []
    with open(file_path, encoding='gbk') as f:
        sentence, tag_seq = [], []
        for line in f:
            if line.strip() == "":
                if sentence:
                    sentences.append(sentence)
                    tags.append(tag_seq)
                    sentence, tag_seq = [], []
            else:
                parts = line.strip().split()
                if len(parts) == 2:
                    char, tag = parts
                    sentence.append(char)
                    tag_seq.append(tag)
        if sentence:
            sentences.append(sentence)
            tags.append(tag_seq)
    return sentences, tags

# **修改 4**: 更新训练函数，传递序列长度信息
def train_model(model, dataloader, criterion, optimizer, epochs):
    model.train()
    for epoch in range(epochs):
        total_loss = 0
        for sentences, tags in dataloader:
            lengths = torch.sum(sentences != 0, dim=1)  # 计算每个句子的有效长度
            sentences, tags = sentences.to(device), tags.to(device)
            optimizer.zero_grad()
            outputs = model(sentences, lengths)
            loss = criterion(outputs.view(-1, outputs.shape[-1]), tags.view(-1))
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        print(f"Epoch {epoch + 1}/{epochs}, Loss: {total_loss / len(dataloader):.4f}")

# 推断函数
def predict(model, sentence, vocab, tag_vocab):
    model.eval()
    with torch.no_grad():
        input_tensor = torch.tensor([char_to_idx([sentence], vocab)[0]]).to(device)
        lengths = torch.tensor([len(sentence)]).to(device)
        output = model(input_tensor, lengths)
        predicted_tags = output.argmax(dim=-1).squeeze(0).cpu().numpy()
        idx_to_tag = {idx: tag for tag, idx in tag_vocab.items()}
        return [idx_to_tag[idx] for idx in predicted_tags]

# 主程序
class SegmentApp:
    def __init__(self, root):
        self.root = root
        self.root.title("GRU 词位标注系统")

        self.label = tk.Label(root, text="输入待标注的句子：")
        self.label.pack()

        self.text_entry = tk.Entry(root, width=50)
        self.text_entry.pack()

        self.segment_button = tk.Button(root, text="词位标注", command=self.segment_text)
        self.segment_button.pack()

        self.result_label = tk.Label(root, text="词位标注结果：")
        self.result_label.pack()

        self.result_text = tk.Label(root, text="", wraplength=400)
        self.result_text.pack()

        self.vocab = None  # **修改 5**: 在类中定义 vocab 和 tag_vocab
        self.tag_vocab = None
        self.model = None
        self.train_model()

    def train_model(self):
        base_dir = os.path.dirname(os.path.abspath(__file__))
        train_path = os.path.join(base_dir, "msr_training.txt")
        
        # 加载数据
        sentences, tags = load_data(train_path)

        # 构建词汇表和标签表
        self.vocab = {char: idx for idx, char in enumerate({char for sent in sentences for char in sent}, start=1)}
        self.vocab['<PAD>'] = 0
        self.vocab['<UNK>'] = len(self.vocab)

        self.tag_vocab = {tag: idx for idx, tag in enumerate({tag for tag_seq in tags for tag in tag_seq})}

        dataset = SentenceDataset(sentences, tags, self.vocab, self.tag_vocab)
        dataloader = DataLoader(dataset, batch_size=32, shuffle=True, collate_fn=collate_fn)

        # 模型设置
        embedding_dim = 128
        hidden_dim = 256
        tagset_size = len(self.tag_vocab)

        self.model = GRUModel(len(self.vocab), embedding_dim, hidden_dim, tagset_size, bidirectional=True).to(device)
        criterion = nn.CrossEntropyLoss(ignore_index=0)
        optimizer = optim.Adam(self.model.parameters(), lr=0.001)

        # 训练模型
        train_model(self.model, dataloader, criterion, optimizer, epochs=5)
        messagebox.showinfo("提示", "模型训练完成！")

    def segment_text(self):
        sentence = self.text_entry.get()
        if sentence:
            tag_seq = predict(self.model, sentence, self.vocab, self.tag_vocab)  # **修改 6**: 使用 self.vocab 和 self.tag_vocab
            self.result_text.config(text=" ".join(tag_seq))
        else:
            messagebox.showwarning("提示", "请输入句子！")

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    root = tk.Tk()
    app = SegmentApp(root)
    root.mainloop()
