from fastapi import APIRouter, HTTPException
from app.schemas.prediction import (
    PredictionRequest,
    PredictionResponse,
    BatchPredictionRequest,
    BatchPredictionResponse
)
from app.core.config import settings
from app.models.ml.bert_model import BertSpamClassifier
from transformers import BertTokenizer
import torch
from cot_utils import generate_cot

router = APIRouter()

# 加载模型和分词器
model = None
tokenizer = None

def load_model():
    global model, tokenizer
    if model is None:
        try:
            model = BertSpamClassifier(settings.PRETRAINED_MODEL_NAME)
            model.load_state_dict(torch.load(
                settings.MODEL_DIR / "spam_bert_cot.pth",
                map_location=torch.device('cpu')
            ))
            model.eval()
            
            tokenizer = BertTokenizer.from_pretrained(settings.PRETRAINED_MODEL_NAME)
        except Exception as e:
            raise HTTPException(
                status_code=500,
                detail=f"模型加载失败: {str(e)}"
            )

@router.post("/predict", response_model=PredictionResponse)
async def predict_single(request: PredictionRequest):
    """预测单个文本"""
    load_model()
    
    try:
        # 生成思维链
        cot = generate_cot(request.text)
        
        # 准备输入
        inputs = tokenizer(
            request.text + " [SEP] " + cot,
            truncation=True,
            padding='max_length',
            max_length=settings.MAX_LENGTH,
            return_tensors='pt'
        )
        
        # 预测
        with torch.no_grad():
            outputs = model(
                inputs['input_ids'],
                inputs['attention_mask']
            )
            probs = torch.softmax(outputs, dim=1)
            pred_label = torch.argmax(outputs, dim=1).item()
            confidence = probs[0][pred_label].item()
        
        return PredictionResponse(
            label="spam" if pred_label == 1 else "ham",
            confidence=confidence,
            cot=cot
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@router.post("/predict/batch", response_model=BatchPredictionResponse)
async def predict_batch(request: BatchPredictionRequest):
    """批量预测"""
    load_model()
    
    try:
        predictions = []
        for text in request.texts:
            # 为每个文本生成思维链
            cot = generate_cot(text)
            
            # 准备输入
            inputs = tokenizer(
                text + " [SEP] " + cot,
                truncation=True,
                padding='max_length',
                max_length=settings.MAX_LENGTH,
                return_tensors='pt'
            )
            
            # 预测
            with torch.no_grad():
                outputs = model(
                    inputs['input_ids'],
                    inputs['attention_mask']
                )
                probs = torch.softmax(outputs, dim=1)
                pred_label = torch.argmax(outputs, dim=1).item()
                confidence = probs[0][pred_label].item()
            
            predictions.append(
                PredictionResponse(
                    label="spam" if pred_label == 1 else "ham",
                    confidence=confidence,
                    cot=cot
                )
            )
        
        return BatchPredictionResponse(predictions=predictions)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e)) 