import torch
import argparse


# ---------- 重新定义必要的类结构（需与训练代码一致） ----------
class Vocab:
    def __init__(self, tokens=None, min_freq=1, unk_token='<UNK>', pad_token='<PAD>'):
        self.unk_token = unk_token
        self.pad_token = pad_token
        self.token2id = {pad_token: 0, unk_token: 1}
        self.id2token = {0: pad_token, 1: unk_token}

        if tokens is not None:
            counter = {}
            for token in tokens:
                counter[token] = counter.get(token, 0) + 1
            for token, count in counter.items():
                if count >= min_freq:
                    self.token2id[token] = len(self.token2id)
            self.id2token = {v: k for k, v in self.token2id.items()}

    def __len__(self):
        return len(self.token2id)


class SequenceLabeler(torch.nn.Module):
    def __init__(self, vocab_size, embed_dim, hidden_dim, num_tags, rnn_type='lstm'):
        super().__init__()
        self.embedding = torch.nn.Embedding(vocab_size, embed_dim)
        self.rnn_type = rnn_type
        if rnn_type == 'rnn':
            self.rnn = torch.nn.RNN(embed_dim, hidden_dim, batch_first=True, bidirectional=True)
        elif rnn_type == 'lstm':
            self.rnn = torch.nn.LSTM(embed_dim, hidden_dim, batch_first=True, bidirectional=True)
        elif rnn_type == 'gru':
            self.rnn = torch.nn.GRU(embed_dim, hidden_dim, batch_first=True, bidirectional=True)
        self.fc = torch.nn.Linear(hidden_dim * 2, num_tags)

    def forward(self, x):
        x = self.embedding(x)
        out, _ = self.rnn(x)
        logits = self.fc(out)
        return logits


# ---------- 预测功能实现 ---------zz-
class Predictor:
    def __init__(self, model_path, max_len=100):
        # 加载保存的模型信息
        checkpoint = torch.load(model_path, map_location='cpu', weights_only=False)
        self.char_vocab = checkpoint['char_vocab']
        self.tag_vocab = checkpoint['tag_vocab']

        # 初始化模型结构
        self.model = SequenceLabeler(
            vocab_size=len(self.char_vocab),
            embed_dim=128,  # 需与训练参数一致
            hidden_dim=256,  # 需与训练参数一致
            num_tags=len(self.tag_vocab),
            rnn_type=checkpoint['model_type'].lower()
        )
        self.model.load_state_dict(checkpoint['state_dict'])
        self.model.eval()

        self.max_len = max_len
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model.to(self.device)

    def preprocess(self, sentence):
        # 将句子转换为字符ID序列
        chars = list(sentence.strip())
        ids = [self.char_vocab.token2id.get(c, 1) for c in chars]  # 1是UNK的ID
        # 填充/截断
        padded = torch.zeros((self.max_len,), dtype=torch.long)
        seq_len = min(len(ids), self.max_len)
        padded[:seq_len] = torch.tensor(ids[:seq_len])
        return padded.unsqueeze(0).to(self.device)  # 添加batch维度

    def predict(self, sentence):
        with torch.no_grad():
            inputs = self.preprocess(sentence)
            logits = self.model(inputs)
            preds = torch.argmax(logits, dim=-1).squeeze().cpu().numpy()

        # 转换标签
        chars = list(sentence)[:self.max_len]  # 截断超长部分
        tags = []
        for i, char in enumerate(chars):
            if i >= self.max_len:
                break
            tag_id = preds[i]
            tags.append(self.tag_vocab.id2token[tag_id])
        return list(zip(chars, tags))


# ---------- 交互界面 ----------
def main():
    parser = argparse.ArgumentParser(description='序列标注预测工具')
    parser.add_argument('--model', type=str, default="E:\\桌面\\best_model", help='模型文件路径')
    args = parser.parse_args()

    print("加载模型中...")
    predictor = Predictor(args.model)
    print("模型加载完成！输入'quit'退出\n")

    while True:
        sentence = input("请输入文本：")
        if sentence.lower() in ['exit', 'quit']:
            break

        if not sentence.strip():
            print("输入不能为空！")
            continue

        results = predictor.predict(sentence)
        print("\n预测结果：")
        for char, tag in results:
            print(f"{char}\t{tag}")
        print("\n")


if __name__ == '__main__':
    main()