from typing import List, Dict
from llama_index.llms.openai import OpenAI
from app.core.config import settings
import json
import asyncio
from app.core.exceptions import LLMError

class LLMService:
    """LLM服务类"""
    
    def __init__(self):
        self.llm = OpenAI(
            api_key=settings.LLM_API_KEY,
            api_base=settings.LLM_API_BASE if settings.LLM_API_BASE else None,
            model=settings.LLM_MODEL
        )

    async def generate_questions_from_text(
        self,
        text: str,
        question_types: List[str],
        difficulty: str,
        num_questions: int = 5,
        subject_id: int = None,
        chapter_id: int = None
    ) -> Dict:
        """从文本生成题目"""
        prompt = f"""
        你是一个专业的教育专家，善于出题和解析。
        基于以下内容生成{num_questions}道{difficulty}难度的题目，包含以下类型：{', '.join(question_types)}
        
        对于不同类型的题目，请注意：
        - 选择题(single_choice/multiple_choice)：需要提供选项内容和正确答案
        - 判断题(true_false)：答案必须是 true 或 false
        - 填空题(fill_blank)：需要指明空位数量，每个空的答案
        - 简答题(short_answer)：需要提供标准答案和答案要点

        内容如下：
        {text}
        
        按以下JSON格式返回：
        {{
            "questions": [
                {{
                    "title": "题目标题",
                    "content": "题目内容",
                    "type": "题目类型",
                    "difficulty": "{difficulty}",
                    "answer": {{
                        // 选择题：["A", "B"] 或 ["A"]
                        // 判断题：true/false
                        // 填空题：["答案1", "答案2"]
                        // 简答题：null
                    }},
                    "analysis": "解析",
                    "reference": "参考原文",
                    "source_position": {{
                        "start": "参考原文在内容中的起始位置",
                        "end": "参考原文在内容中的结束位置"
                    }},
                    "options": [  // 选择题专用
                        {{
                            "option_label": "A",
                            "content": "选项内容",
                            "is_correct": true/false
                        }}
                    ],
                    "blank_count": null,  // 填空题专用，表示空位数量
                    "standard_answer": null,  // 简答题专用
                    "answer_points": [  // 简答题专用
                        "要点1",
                        "要点2"
                    ]
                }}
            ]
        }}
        """
        
        try:
            response = await self.llm.acomplete(prompt, temperature=0.1)
            print(response.text)
            return json.loads(response.text)
        except json.JSONDecodeError:
            raise LLMError("LLM返回的格式不正确")
        except Exception as e:
            raise LLMError(f"LLM调用失败: {str(e)}")

    async def generate_questions_batch(
        self,
        texts: List[Dict[str, str]],  # [{"id": "xxx", "text": "xxx"}]
        question_types: List[str],
        difficulty: str,
        num_questions_per_text: int = 5,
        max_concurrent: int = 3
    ) -> Dict[str, Dict]:
        """批量生成题目"""
        semaphore = asyncio.Semaphore(max_concurrent)
        
        async def process_single_text(text_data: Dict[str, str]) -> tuple:
            async with semaphore:
                try:
                    questions = await self.generate_questions_from_text(
                        text=text_data["text"],
                        question_types=question_types,
                        difficulty=difficulty,
                        num_questions=num_questions_per_text
                    )
                    return text_data["id"], questions
                except Exception as e:
                    return text_data["id"], {"error": str(e)}

        tasks = [process_single_text(text) for text in texts]
        results = await asyncio.gather(*tasks)
        return {text_id: questions for text_id, questions in results if questions} 