import os
import sys
import numpy as np
import torch
from transformers import BertTokenizer, pipeline
from model.base_model import BertForIntentClassification
from peft import PeftModel

def main():
    if len(sys.argv) < 2:
        print("Usage: python predict.py <text>")
        sys.exit(1)
        
    text = sys.argv[1]
    model_dir = "model/saved_models"
    label_classes = np.load("data/label_classes.npy")
    
    # 加载分词器和模型
    tokenizer = BertTokenizer.from_pretrained(model_dir)
    
    # 加载基础模型
    base_model = BertForIntentClassification.from_pretrained(
        "bert-base-chinese",
        num_labels=len(label_classes),
        ignore_mismatched_sizes=True
    )
    
    # 加载LoRA适配器
    model = PeftModel.from_pretrained(base_model, model_dir)
    model.eval()
    
    # 创建推理管道
    classifier = pipeline(
        "text-classification",
        model=model,
        tokenizer=tokenizer,
        device=0 if torch.cuda.is_available() else -1,
        return_all_scores=True
    )
    
    # 文本预处理
    def clean_text(text):
        import re
        text = re.sub(r'[^\u4e00-\u9fff\s]', '', text).strip()
        return text
    
    cleaned_text = clean_text(text)
    
    # 模型推理
    results = classifier(cleaned_text)[0]
    
    # 处理结果
    sorted_results = sorted(results, key=lambda x: x['score'], reverse=True)
    top_intent = sorted_results[0]
    
    print(f"Input Text: {text}")
    print(f"Predicted Intent: {label_classes[int(top_intent['label'].split('_')[-1])]}")
    print(f"Confidence: {top_intent['score']:.4f}")
    print("\nTop 3 Intents:")
    for i, res in enumerate(sorted_results[:3]):
        intent = label_classes[int(res['label'].split('_')[-1])]
        print(f"{i+1}. {intent}: {res['score']:.4f}")

if __name__ == "__main__":
    main()