import json

from fastapi import FastAPI, Request
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi import HTTPException
from transformers import BertTokenizer, BertConfig
import torch

from api.schemas import PaperRequest, RecommendationRequest
from models.model import PaperClassifier
from models.dataset import ArxivDataset
from pathlib import Path

import logging
logger = logging.getLogger("api")


from services.knowledge_graph import KnowledgeGraph
from services.recommender import PaperRecommender


app = FastAPI()
# 添加静态文件路由
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")

# 初始化数据集
dataset = ArxivDataset(
    data_path="data/arxiv-metadata-oai-snapshot.json",
    tokenizer_path="configs"
)

# 加载模型
model_path = Path("saved_models/final_model.pt")
config = BertConfig.from_pretrained("configs/")
tokenizer = BertTokenizer.from_pretrained("configs/")  # 加载整个目录
# model = torch.load(model_path/"pytorch_model.bin", map_location=torch.device('cpu'))
# model = PaperClassifier.load_from_checkpoint(
#     "saved_models/final_model.pt",
#     config_path="configs",
#     num_classes=309  # 根据实际类别数修改
# )
model = PaperClassifier(
    config_path="configs",
    num_classes=len(dataset.data['categories'].cat.categories)
)
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
model.eval()

@app.post("/classify")
async def classify_paper(paper: PaperRequest):
    try:
        logger.info(f"Processing paper: {paper.title[:50]}...")
        inputs = tokenizer(
            paper.title + " " + paper.abstract,
            return_tensors="pt",
            max_length=128,
            truncation=True,
            padding="max_length",
            return_token_type_ids=False  # 禁用不需要的参数
        )
        with torch.no_grad():
            outputs = model(**inputs)
        probs = torch.nn.functional.softmax(outputs, dim=-1)
        return {
            "category": outputs.argmax().item(),
            "probability": probs.max().item()
        }
    except Exception as e:
        logger.error(f"Classification failed: {str(e)}")
        return {"error": "内部服务器错误"}

@app.get("/")
async def read_root(request: Request):
    return templates.TemplateResponse("old-indexhtml.txt", {"request": request})

# 初始化推荐系统
recommender = PaperRecommender(model, dataset)
@app.post("/recommend")
async def get_recommendations(req: RecommendationRequest):
    paper_id = req.paper_id
    recommendations = recommender.recommend(paper_id, req.top_k)
    return {
        "recommendations": [
            {
                "paper_id": int(idx),
                "title": dataset.data.iloc[idx]['title'],
                "similarity": float(recommender.embeddings[paper_id] @ recommender.embeddings[idx])
            } for idx in recommendations
        ]
    }

# 初始化知识图谱
kg = KnowledgeGraph(dataset)
@app.get("/knowledge/{field}")
async def get_knowledge(field: str):
    return {
        "field": field,
        "paper_count": kg.graph.nodes[field]['count'],
        "related_papers": [
            kg.graph.nodes[n]['title']
            for n in kg.get_related_papers(field)
        ]
    }

# 历史记录接口，自动保存最近5条历史记录
@app.get("/history")
async def get_history():
    try:
        with open("history.json", "r") as f:
            return json.load(f)
    except FileNotFoundError:
        return []