import os
import tkinter as tk
from tkinter import messagebox
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence

# 数据集类
def char_to_idx(sentences, vocab):
    return [[vocab.get(char, vocab['<UNK>']) for char in sentence] for sentence in sentences]

def tag_to_idx(tags, tag_vocab):
    return [[tag_vocab[tag] for tag in tag_seq] for tag_seq in tags]

class SentenceDataset(Dataset):
    def __init__(self, sentences, tags, vocab, tag_vocab):
        self.sentences = char_to_idx(sentences, vocab)
        self.tags = tag_to_idx(tags, tag_vocab)

    def __len__(self):
        return len(self.sentences)

    def __getitem__(self, idx):
        return torch.tensor(self.sentences[idx]), torch.tensor(self.tags[idx])

# 填充函数
def collate_fn(batch):
    sentences, tags = zip(*batch)
    padded_sentences = pad_sequence(sentences, batch_first=True, padding_value=0)
    padded_tags = pad_sequence(tags, batch_first=True, padding_value=0)
    return padded_sentences, padded_tags

# GRU 模型
class GRUModel(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, tagset_size, num_layers=1, bidirectional=True):
        super(GRUModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.gru = nn.GRU(embedding_dim, hidden_dim, num_layers=num_layers, bidirectional=bidirectional, batch_first=True)
        self.fc = nn.Linear(hidden_dim * (2 if bidirectional else 1), tagset_size)

    def forward(self, x, lengths):
        embedded = self.embedding(x)
        lengths = lengths.cpu()
        packed_input = pack_padded_sequence(embedded, lengths, batch_first=True, enforce_sorted=False)
        packed_output, _ = self.gru(packed_input)
        output, _ = pad_packed_sequence(packed_output, batch_first=True)
        output = self.fc(output)
        return output

# 加载数据
def load_data(file_path):
    sentences, tags = [], []
    with open(file_path, encoding='gbk') as f:
        sentence, tag_seq = [], []
        for line in f:
            if line.strip() == "":
                if sentence:
                    sentences.append(sentence)
                    tags.append(tag_seq)
                    sentence, tag_seq = [], []
            else:
                parts = line.strip().split()
                if len(parts) == 2:
                    char, tag = parts
                    sentence.append(char)
                    tag_seq.append(tag)
        if sentence:
            sentences.append(sentence)
            tags.append(tag_seq)
    return sentences, tags

# 训练函数
def train_model(model, dataloader, criterion, optimizer, epochs, tag_vocab):
    model.train()
    for epoch in range(epochs):
        total_loss = 0
        correct, total = 0, 0
        for sentences, tags in dataloader:
            lengths = torch.sum(sentences != 0, dim=1)
            sentences, tags = sentences.to(device), tags.to(device)
            optimizer.zero_grad()
            outputs = model(sentences, lengths)
            loss = criterion(outputs.view(-1, outputs.shape[-1]), tags.view(-1))
            loss.backward()
            optimizer.step()
            total_loss += loss.item()

            # 计算准确率
            preds = outputs.argmax(dim=-1)
            mask = tags != 0
            correct += (preds[mask] == tags[mask]).sum().item()
            total += mask.sum().item()
        accuracy = correct / total
        print(f"Epoch {epoch + 1}/{epochs}, Loss: {total_loss / len(dataloader):.4f}, Accuracy: {accuracy:.4f}")

# 推断函数
def predict(model, sentence, vocab, tag_vocab):
    model.eval()
    with torch.no_grad():
        input_tensor = torch.tensor([char_to_idx([sentence], vocab)[0]]).to(device)
        lengths = torch.tensor([len(sentence)]).to(device)
        output = model(input_tensor, lengths)
        predicted_tags = output.argmax(dim=-1).squeeze(0).cpu().numpy()
        idx_to_tag = {idx: tag for tag, idx in tag_vocab.items()}
        return [idx_to_tag[idx] for idx in predicted_tags]

# 分词函数
def segment_sentence(sentence, tags):
    words = []
    word = ""
    for char, tag in zip(sentence, tags):
        if tag == "B":  # 词的开始
            if word:
                words.append(word)
            word = char
        elif tag == "M" or tag == "E":  # 词的中间或结束
            word += char
        else:  # 单字词或其他情况
            if word:
                words.append(word)
            word = char
    if word:
        words.append(word)
    return " ".join(words)

# 主程序
class SegmentApp:
    def __init__(self, root):
        self.root = root
        self.root.title("GRU 汉语分词与词位标注系统")

        self.label = tk.Label(root, text="输入待标注的句子：")
        self.label.pack()

        self.text_entry = tk.Entry(root, width=50)
        self.text_entry.pack()

        self.segment_button = tk.Button(root, text="标注并分词", command=self.segment_text)
        self.segment_button.pack()

        self.result_label1 = tk.Label(root, text="分词结果：")
        self.result_label1.pack()

        self.result_text1 = tk.Label(root, text="", wraplength=400)
        self.result_text1.pack()

        self.result_label2 = tk.Label(root, text="词位标注结果：")
        self.result_label2.pack()

        self.result_text2 = tk.Label(root, text="", wraplength=400)
        self.result_text2.pack()

        self.vocab = None
        self.tag_vocab = None
        self.model = None
        self.train_model()

    def train_model(self):
        base_dir = os.path.dirname(os.path.abspath(__file__))
        train_path = os.path.join(base_dir, "msr_training.txt")
        
        sentences, tags = load_data(train_path)
        self.vocab = {char: idx for idx, char in enumerate({char for sent in sentences for char in sent}, start=1)}
        self.vocab['<PAD>'] = 0
        self.vocab['<UNK>'] = len(self.vocab)
        self.tag_vocab = {tag: idx for idx, tag in enumerate({tag for tag_seq in tags for tag in tag_seq})}

        dataset = SentenceDataset(sentences, tags, self.vocab, self.tag_vocab)
        dataloader = DataLoader(dataset, batch_size=32, shuffle=True, collate_fn=collate_fn)

        embedding_dim = 128
        hidden_dim = 256
        tagset_size = len(self.tag_vocab)

        self.model = GRUModel(len(self.vocab), embedding_dim, hidden_dim, tagset_size, bidirectional=True).to(device)
        criterion = nn.CrossEntropyLoss(ignore_index=0)
        optimizer = optim.Adam(self.model.parameters(), lr=0.001)

        train_model(self.model, dataloader, criterion, optimizer, epochs=5, tag_vocab=self.tag_vocab)
        messagebox.showinfo("提示", "模型训练完成！")

    def segment_text(self):
        sentence = self.text_entry.get()
        if sentence:
            tag_seq = predict(self.model, sentence, self.vocab, self.tag_vocab)
            segmented_sentence = segment_sentence(sentence, tag_seq)
            self.result_text1.config(text=segmented_sentence)
            self.result_text2.config(text=" ".join(tag_seq))
        else:
            messagebox.showwarning("提示", "请输入句子！")

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    root = tk.Tk()
    app = SegmentApp(root)
    root.mainloop()
