import torch
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from importlib import import_module
from utils import build_dataset, load_class_names
from models.TextRNN import Config, Model
import pickle as pkl
import os
from collections import Counter


# 定义输入数据的格式
class NewsInput(BaseModel):
    text: str


app = FastAPI()

# 加载模型
dataset = 'THUCNews'
embedding = 'embedding_SougouNews.npz'
config = Config(dataset, embedding)
model = Model(config).to(config.device)

try:
    model.load_state_dict(torch.load(config.save_path))
    model.eval()
except FileNotFoundError:
    raise HTTPException(status_code=404, detail="模型文件未找到")
except KeyError as e:
    raise HTTPException(status_code=500, detail=f"模型参数加载错误：{e}")

# 加载类别名称
class_names = load_class_names(config.class_list)
print("Loaded class names:", class_names)


# 这里需要修改为适合单个文本的处理方式
def preprocess_text(text, vocab, pad_size):
    tokenizer = lambda x: [y for y in x]  # 假设按字符处理，可按需修改
    words_line = []
    token = tokenizer(text)
    seq_len = len(token)
    print(f"Tokenized text: {token}")
    if pad_size:
        if len(token) < pad_size:
            token.extend([vocab.get('<PAD>')] * (pad_size - len(token)))
        else:
            token = token[:pad_size]
            seq_len = pad_size
    for word in token:
        words_line.append(vocab.get(word, vocab.get('<UNK>')))
    print(f"Encoded text: {words_line}")
    return torch.LongTensor([words_line]), torch.LongTensor([seq_len])


@app.post("/predict")
async def predict(news_data: NewsInput):
    try:
        if not os.path.exists(config.vocab_path):
            raise HTTPException(status_code=404, detail="词汇表文件未找到")
        with open(config.vocab_path, 'rb') as f:
            vocab = pkl.load(f)
        input_tensor, seq_len = preprocess_text(news_data.text, vocab, config.pad_size)
        input_tensor = input_tensor.to(config.device)
        seq_len = seq_len.to(config.device)
        print(f"Request body: {news_data.text}")
        with torch.no_grad():
            output = model((input_tensor, seq_len))
            _, predicted = torch.max(output, 1)
        response = {"prediction": class_names[predicted.item()]}
        print(f"Response: {response}")
        return response
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


if __name__ == "__main__":
    # 以下代码用于检查训练数据类别分布
    # 假设build_dataset返回的train_data是[(文本, 标签, 长度),...]的形式
    # 这里需要根据实际的build_dataset函数返回结构调整代码
    # 并且需要先获取到config和args（如果需要）
    # vocab, train_data, dev_data, test_data = build_dataset(config, args.word)
    # def count_classes(train_data):
    #     labels = [label for _, label, _ in train_data]
    #     return Counter(labels)
    # class_count = count_classes(train_data)
    # print(class_count)

    import uvicorn

    uvicorn.run(app, port=8000)