import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score
from collections import defaultdict
import matplotlib.pyplot as plt


# -------------------- 数据预处理 --------------------
def load_data(file_path):
    sentences, labels = [], []
    current_sentence, current_labels = [], []
    with open(file_path, 'r', encoding='gbk') as f:
        for line in f:
            line = line.strip()
            if not line:
                if current_sentence:
                    sentences.append(current_sentence)
                    labels.append(current_labels)
                    current_sentence, current_labels = [], []
                continue
            char, tag = line.split()
            current_sentence.append(char)
            current_labels.append(tag)
    if current_sentence:
        sentences.append(current_sentence)
        labels.append(current_labels)
    return sentences, labels


class Vocab:
    def __init__(self, tokens, min_freq=1, unk_token='<UNK>', pad_token='<PAD>'):
        self.unk_token = unk_token
        self.pad_token = pad_token
        counter = defaultdict(int)
        for token in tokens:
            counter[token] += 1
        self.token2id = {pad_token: 0, unk_token: 1}
        for token, count in counter.items():
            if count >= min_freq:
                self.token2id[token] = len(self.token2id)
        self.id2token = {v: k for k, v in self.token2id.items()}

    def __len__(self):
        return len(self.token2id)


class TagDataset(Dataset):
    def __init__(self, sentences, labels, char_vocab, tag_vocab, max_len=100):
        self.sentences = sentences
        self.labels = labels
        self.char_vocab = char_vocab
        self.tag_vocab = tag_vocab
        self.max_len = max_len

    def __len__(self):
        return len(self.sentences)

    def __getitem__(self, idx):
        chars = [self.char_vocab.token2id.get(c, 1) for c in self.sentences[idx]]
        tags = [self.tag_vocab.token2id[t] for t in self.labels[idx]]
        padded_chars = torch.zeros(self.max_len, dtype=torch.long)
        padded_tags = torch.zeros(self.max_len, dtype=torch.long)
        mask = torch.zeros(self.max_len, dtype=torch.bool)
        seq_len = min(len(chars), self.max_len)
        padded_chars[:seq_len] = torch.tensor(chars[:seq_len])
        padded_tags[:seq_len] = torch.tensor(tags[:seq_len])
        mask[:seq_len] = 1
        return padded_chars, padded_tags, mask


# -------------------- 模型定义 --------------------
class SequenceLabeler(nn.Module):
    def __init__(self, vocab_size, embed_dim, hidden_dim, num_tags, rnn_type='lstm'):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.rnn_type = rnn_type
        if rnn_type == 'rnn':
            self.rnn = nn.RNN(embed_dim, hidden_dim, batch_first=True, bidirectional=True)
        elif rnn_type == 'lstm':
            self.rnn = nn.LSTM(embed_dim, hidden_dim, batch_first=True, bidirectional=True)
        elif rnn_type == 'gru':
            self.rnn = nn.GRU(embed_dim, hidden_dim, batch_first=True, bidirectional=True)
        self.fc = nn.Linear(hidden_dim * 2, num_tags)

    def forward(self, x):
        x = self.embedding(x)
        out, _ = self.rnn(x)
        logits = self.fc(out)
        return logits


# -------------------- 训练与评估 --------------------
def train_model(model, train_loader, val_loader, optimizer, criterion, device):
    best_f1 = 0.0
    best_model_state = None
    val_acc_history = []
    val_f1_history = []

    for epoch in range(EPOCHS):
        model.train()
        total_loss = 0.0
        for inputs, labels, masks in train_loader:
            inputs, labels, masks = inputs.to(device), labels.to(device), masks.to(device)
            optimizer.zero_grad()
            logits = model(inputs)
            active_loss = masks.view(-1) == 1
            active_logits = logits.view(-1, logits.shape[-1])[active_loss]
            active_labels = labels.view(-1)[active_loss]
            loss = criterion(active_logits, active_labels)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        train_loss = total_loss / len(train_loader)

        val_loss, val_acc, val_f1 = evaluate_model(model, val_loader, criterion, device)
        val_acc_history.append(val_acc)
        val_f1_history.append(val_f1)
        print(f"Epoch {epoch + 1}/{EPOCHS} | Train Loss: {train_loss:.4f} | Val F1: {val_f1:.4f}")

        if val_f1 > best_f1:
            best_f1 = val_f1
            best_model_state = model.state_dict().copy()

    return best_f1, best_model_state, val_acc_history, val_f1_history


def evaluate_model(model, dataloader, criterion, device):
    model.eval()
    total_loss = 0.0
    preds, true_labels = [], []
    with torch.no_grad():
        for inputs, labels, masks in dataloader:
            inputs, labels, masks = inputs.to(device), labels.to(device), masks.to(device)
            logits = model(inputs)
            active_loss = masks.view(-1) == 1
            active_logits = logits.view(-1, logits.shape[-1])[active_loss]
            active_labels = labels.view(-1)[active_loss]
            loss = criterion(active_logits, active_labels)
            total_loss += loss.item()

            batch_preds = torch.argmax(logits, dim=-1).cpu().numpy()
            batch_labels = labels.cpu().numpy()
            for i in range(len(batch_preds)):
                seq_len = masks[i].sum().item()
                preds.extend(batch_preds[i][:seq_len])
                true_labels.extend(batch_labels[i][:seq_len])

    accuracy = accuracy_score(true_labels, preds)
    f1 = f1_score(true_labels, preds, average='weighted')
    return total_loss / len(dataloader), accuracy, f1


# -------------------- 主程序 --------------------
if __name__ == '__main__':
    # 超参数
    EMBED_DIM = 128
    HIDDEN_DIM = 256
    BATCH_SIZE = 32
    LR = 0.001
    EPOCHS = 20
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载数据
    sentences, labels = load_data("E:\\桌面\\pku_stn_training.txt")
    all_chars = [char for sent in sentences for char in sent]
    all_tags = [tag for tag_seq in labels for tag in tag_seq]
    char_vocab = Vocab(all_chars)
    tag_vocab = Vocab(all_tags, min_freq=0)

    # 划分数据集
    train_sentences, val_sentences, train_labels, val_labels = train_test_split(
        sentences, labels, test_size=0.2, random_state=42
    )
    train_dataset = TagDataset(train_sentences, train_labels, char_vocab, tag_vocab)
    val_dataset = TagDataset(val_sentences, val_labels, char_vocab, tag_vocab)
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE)

    # 初始化模型
    models = {
        'RNN': SequenceLabeler(len(char_vocab), EMBED_DIM, HIDDEN_DIM, len(tag_vocab), 'rnn'),
        'LSTM': SequenceLabeler(len(char_vocab), EMBED_DIM, HIDDEN_DIM, len(tag_vocab), 'lstm'),
        'GRU': SequenceLabeler(len(char_vocab), EMBED_DIM, HIDDEN_DIM, len(tag_vocab), 'gru')
    }

    # 训练并记录性能
    results = {}
    history = {}
    best_overall_f1 = 0.0
    best_model_info = None

    for model_name, model in models.items():
        print(f"\n=== Training {model_name} ===")
        model = model.to(device)
        optimizer = torch.optim.Adam(model.parameters(), lr=LR)
        criterion = nn.CrossEntropyLoss()

        best_f1, best_state, val_acc_hist, val_f1_hist = train_model(
            model, train_loader, val_loader, optimizer, criterion, device
        )
        model.load_state_dict(best_state)
        _, final_acc, final_f1 = evaluate_model(model, val_loader, criterion, device)

        results[model_name] = {
            'accuracy': final_acc * 100,
            'f1': final_f1 * 100
        }
        history[model_name] = {
            'acc': val_acc_hist,
            'f1': val_f1_hist
        }

        if final_f1 > best_overall_f1:
            best_overall_f1 = final_f1
            best_model_info = {
                'model_type': model_name,
                'state_dict': best_state,
                'char_vocab': char_vocab,
                'tag_vocab': tag_vocab
            }

    # 保存最佳模型
    if best_model_info:
        save_path = "best_model"
        torch.save(best_model_info, save_path)
        print(f"\nSaved Best Model: {best_model_info['model_type']} to {save_path}")

    # 输出对比结果
    print("\n模型对比结果:")
    print(f"{'模型':<6} | {'准确率 (%)':<12} | {'F1 (%)':<10}")
    for model_name in ['RNN', 'LSTM', 'GRU']:
        acc = results[model_name]['accuracy']
        f1 = results[model_name]['f1']
        print(f"{model_name:<6} | {acc:>10.2f}    | {f1:>9.2f}")

    # 可视化对比
    plt.figure(figsize=(12, 5))

    # F1对比图
    plt.subplot(1, 2, 1)
    for model_name in ['RNN', 'LSTM', 'GRU']:
        plt.plot(range(1, EPOCHS + 1),
                 [x * 100 for x in history[model_name]['f1']],
                 marker='o',
                 label=model_name)
    plt.xlabel('Epoch')
    plt.ylabel('F1 Score (%)')
    plt.title('Validation F1 Comparison')
    plt.legend()
    plt.grid(True)

    # 准确率对比图
    plt.subplot(1, 2, 2)
    for model_name in ['RNN', 'LSTM', 'GRU']:
        plt.plot(range(1, EPOCHS + 1),
                 [x * 100 for x in history[model_name]['acc']],
                 marker='o',
                 label=model_name)
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title('Validation Accuracy Comparison')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig('performance_comparison.png')
    plt.close()