from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Literal, Dict, Any, AsyncGenerator, Optional
from openai import OpenAI
from fastapi.responses import JSONResponse
import logging
import uvicorn
import httpx
from starlette.background import BackgroundTask
from config import settings


app = FastAPI()

class KnowledgeElementExtractionAPI:
    def __init__(self):
        self.client = OpenAI(
            api_key=settings.deepseek_api_key,
            base_url=settings.deepseek_base_url,
        )
        
        self.system_prompt = '''<身份>
你是一名擅长中学数学的老师，你需要根据上传的知识点，设计一个简单的题目来进行知识点的讲解。
提供题目的同时提供解析答案，并将题目中的元素列出来（（点的坐标、函数的解析式等））

</身份>

<需求>
•   题目和各个小问中提到的所有元素包括计算过程中涉及的辅助线都必须包含，以保证输出答案的精确。
•   若题目中需要分类讨论，相关元素的所有情况都必须列出，以保证输出答案的精准。
•   输出严格参照下方格式，不得添加任何额外内容。
</需求>

<输出格式>
### 1. **点**

### 2. **直线**

### 3. **线段**

### 4. **函数**

### 5. **图形**
</输出格式>'''

        logging.basicConfig(
            filename="knowledge_element_extraction_api.log",
            level=logging.INFO,
            format="%(asctime)s - %(levelname)s - %(message)s"
        )

    async def extract_elements(self, knowledge_point: str) -> Dict[str, str]:
        """
        Extract elements from a knowledge point
        """
        try:
            logging.info(f"Received knowledge point: {knowledge_point}")
            response = self.client.chat.completions.create(
                model=settings.deepseek_reasoner_model,
                messages=[
                    {"role": "system", "content": self.system_prompt},
                    {"role": "user", "content": knowledge_point},
                ],
                stream=False
            )
            reasoning_content = response.choices[0].message.reasoning_content
            elements = response.choices[0].message.content
            logging.info(f"Extracted knowledge elements: {elements}")
            return {
                "reasoning": reasoning_content,
                "elements": elements
            }
        except Exception as e:
            logging.error(f"Error in extract_elements: {str(e)}")
            raise

    async def extract_elements_stream(self, knowledge_point: str) -> AsyncGenerator[Dict[str, Any], None]:
        """
        Stream the knowledge element extraction process
        """
        try:
            logging.info(f"Received knowledge point for streaming: {knowledge_point[:100]}...")
            logging.info(f"Using model: {settings.deepseek_reasoner_model}")
            
            response = self.client.chat.completions.create(
                model=settings.deepseek_reasoner_model,
                messages=[
                    {"role": "system", "content": self.system_prompt},
                    {"role": "user", "content": knowledge_point},
                ],
                stream=True
            )
            
            reasoning_chunks = []
            element_chunks = []
            chunk_count = 0
            
            for chunk in response:
                chunk_count += 1
                delta = chunk.choices[0].delta
                
                if delta.reasoning_content:
                    reasoning_chunks.append(delta.reasoning_content)
                    logging.info(f"Reasoning chunk {chunk_count}: {delta.reasoning_content[:50]}...")
                    yield {"type": "reasoning", "content": delta.reasoning_content}
                    
                elif delta.content:
                    element_chunks.append(delta.content)
                    logging.info(f"Element chunk {chunk_count}: {delta.content[:50]}...")
                    yield {"type": "element", "content": delta.content}
            
            # 汇总结果
            total_reasoning = ''.join(reasoning_chunks)
            total_elements = ''.join(element_chunks)
            
            logging.info(f"Stream完成 - 总chunk数: {chunk_count}")
            logging.info(f"推理内容总长度: {len(total_reasoning)}")
            logging.info(f"元素内容总长度: {len(total_elements)}")
            logging.info(f"最终知识点元素内容: {total_elements}")
            
            # 检查元素内容是否有效，如果无效则使用非流式接口作为fallback
            if not total_elements or total_elements.strip() in ["###", ""] or len(total_elements.strip()) < 10:
                logging.warning("流式接口返回的元素内容无效，使用非流式接口作为fallback")
                try:
                    fallback_result = await self.extract_elements(knowledge_point)
                    fallback_elements = fallback_result.get('elements', '')
                    if fallback_elements and len(fallback_elements.strip()) > 10:
                        logging.info(f"Fallback成功，元素内容长度: {len(fallback_elements)}")
                        yield {"type": "element", "content": fallback_elements}
                    else:
                        logging.error("Fallback也失败了")
                        yield {"type": "error", "content": "知识点元素提取失败：流式和非流式接口都无法获取有效元素"}
                except Exception as fallback_error:
                    logging.error(f"Fallback失败: {fallback_error}")
                    yield {"type": "error", "content": f"知识点元素提取失败: {fallback_error}"}
                    
        except Exception as e:
            logging.error(f"Error in extract_elements_stream: {str(e)}")
            yield {"type": "error", "content": str(e)}


RECEIVER_URL = "http://0.0.0.0:9000/receive_chunk_stream"

class KnowledgeInput(BaseModel):
    knowledge_point: str

class ElementsOutput(BaseModel):
    reasoning: str
    elements: str

@app.post("/extract_knowledge_elements", response_model=ElementsOutput)
async def extract_knowledge_elements(input: KnowledgeInput):
    try:
        logging.info(f"Received knowledge point: {input.knowledge_point}")
        api = KnowledgeElementExtractionAPI()
        result = await api.extract_elements(input.knowledge_point)
        return JSONResponse(content=result, status_code=200)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


def push_stream_to_receiver(knowledge_point: str):
    try:
        # 直接使用OpenAI客户端
        client = OpenAI(
            api_key="sk-b6cd134847e7430495eb999a9bb03956",
            base_url="https://api.deepseek.com"
        )
        
        system_prompt = '''<身份>
你是一名擅长中学数学的老师，你需要根据上传的知识点，设计一个简单的题目来进行知识点的讲解。
提供题目的同时提供解析答案，并将题目中的元素列出来（（点的坐标、函数的解析式等））

</身份>

<需求>
•   题目和各个小问中提到的所有元素包括计算过程中涉及的辅助线都必须包含，以保证输出答案的精确。
•   若题目中需要分类讨论，相关元素的所有情况都必须列出，以保证输出答案的精准。
•   输出严格参照下方格式，不得添加任何额外内容。
</需求>

<输出格式>
### 1. **点**

### 2. **直线**

### 3. **线段**

### 4. **函数**

### 5. **图形**
</输出格式>'''
        
        response = client.chat.completions.create(
            model="deepseek-reasoner",
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": knowledge_point},
            ],
            stream=True
        )
        for chunk in response:
            delta = chunk.choices[0].delta
            data = {}
            if delta.reasoning_content:
                logging.info(f"Knowledge reasoning content: {delta.reasoning_content}")
                data = {"type": "reasoning", "content": delta.reasoning_content}
            elif delta.content:
                logging.info(f"Knowledge extracted elements: {delta.content}")
                data = {"type": "element", "content": delta.content}

            try:
                httpx.post(RECEIVER_URL, json=data, timeout=3.0)
            except Exception as e:
                logging.error(f"Failed to send chunk to receiver: {e}")
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/extract_knowledge_elements_stream")
async def extract_knowledge_elements_proxy(input: KnowledgeInput):
    logging.info(f"Received knowledge point: {input.knowledge_point}")
    task = BackgroundTask(push_stream_to_receiver, knowledge_point=input.knowledge_point)
    return JSONResponse(content={"status": "accepted", "message": "Knowledge streaming started to receiver"}, background=task)


if __name__ == "__main__":
    uvicorn.run("knowledge_element_extraction_api:app", host="0.0.0.0", port=8004, reload=False) 