import chromadb
from chromadb.config import Settings
import os
import re
from typing import List, Dict, Any
from django.conf import settings
import uuid

class ChromaRAGService:
    def __init__(self):
        self.client = chromadb.PersistentClient(
            path=str(settings.VECTOR_DB_PATH),
            settings=Settings(anonymized_telemetry=False, allow_reset=True)
        )
        self.collection_name = "interview_knowledge"
        self._ensure_collection()
    
    def _ensure_collection(self):
        try:
            self.collection = self.client.get_collection(self.collection_name)
        except:
            self.collection = self.client.create_collection(
                name=self.collection_name,
                metadata={"description": "面试知识库向量数据库"}
            )
    
    def _extract_text_from_file(self, file_path: str) -> str:
        file_extension = os.path.splitext(file_path)[1].lower()
        
        if file_extension == '.txt':
            with open(file_path, 'r', encoding='utf-8') as f:
                return f.read()
        elif file_extension == '.docx':
            try:
                from docx import Document
                doc = Document(file_path)
                return '\n'.join([paragraph.text for paragraph in doc.paragraphs])
            except ImportError:
                raise Exception("请安装 python-docx: pip install python-docx")
        else:
            raise Exception(f"不支持的文件类型: {file_extension}")
    
    def _split_text(self, text: str, chunk_size: int = 1000, overlap: int = 200) -> List[str]:
        if len(text) <= chunk_size:
            return [text]
        
        chunks = []
        start = 0
        
        while start < len(text):
            end = start + chunk_size
            
            if end < len(text):
                for i in range(end, max(start, end - 200), -1):
                    if text[i] in '.。\n':
                        end = i + 1
                        break
            
            chunk = text[start:end].strip()
            if chunk:
                chunks.append(chunk)
            
            start = end - overlap
            if start >= len(text):
                break
        
        return chunks
    
    def add_document(self, file_path: str, title: str, document_id: str) -> Dict[str, Any]:
        try:
            content = self._extract_text_from_file(file_path)
            chunks = self._split_text(content)
            
            vector_ids = []
            documents = []
            metadatas = []
            
            for i, chunk in enumerate(chunks):
                chunk_id = f"{document_id}_chunk_{i}"
                vector_ids.append(chunk_id)
                documents.append(chunk)
                metadatas.append({
                    "title": title,
                    "document_id": document_id,
                    "chunk_index": i,
                    "file_path": file_path,
                    "total_chunks": len(chunks)
                })
            
            self.collection.add(
                ids=vector_ids,
                documents=documents,
                metadatas=metadatas
            )
            
            return {
                "success": True,
                "document_id": document_id,
                "chunks_count": len(chunks),
                "title": title
            }
            
        except Exception as e:
            return {"success": False, "error": str(e)}
    
    def search_similar(self, query: str, n_results: int = 5) -> List[Dict[str, Any]]:
        try:
            results = self.collection.query(
                query_texts=[query],
                n_results=n_results
            )
            
            similar_docs = []
            if results['documents'] and results['documents'][0]:
                for i, doc in enumerate(results['documents'][0]):
                    similar_docs.append({
                        "content": doc,
                        "metadata": results['metadatas'][0][i] if results['metadatas'] and results['metadatas'][0] else {},
                        "distance": results['distances'][0][i] if results['distances'] and results['distances'][0] else 0
                    })
            
            return similar_docs
            
        except Exception as e:
            print(f"搜索错误: {e}")
            return []
    
    def generate_questions(self, document_content: str, num_questions: int = 5) -> List[str]:
        questions = []
        keywords = ['什么', '如何', '为什么', '请解释', '描述', '比较', '分析']
        
        sentences = re.split(r'[。！？]', document_content)
        sentences = [s.strip() for s in sentences if len(s.strip()) > 10]
        
        for i in range(min(num_questions, len(sentences))):
            sentence = sentences[i]
            if len(sentence) > 20:
                import random
                keyword = random.choice(keywords)
                question = f"{keyword}{sentence[:50]}..."
                questions.append(question)
        
        while len(questions) < num_questions:
            questions.append(f"请详细解释一下这个主题的相关内容。")
        
        return questions[:num_questions]
    
    def calculate_similarity(self, user_answer: str, reference_content: str) -> float:
        try:
            results = self.collection.query(
                query_texts=[user_answer],
                n_results=1
            )
            
            if results['distances'] and results['distances'][0]:
                distance = results['distances'][0][0]
                similarity = max(0, 1 - distance)
                return similarity
            
            return 0.0
            
        except Exception as e:
            print(f"相似度计算错误: {e}")
            return 0.0
    
    def evaluate_answer(self, user_answer: str, question: str, reference_content: str) -> Dict[str, Any]:
        similarity_score = self.calculate_similarity(user_answer, reference_content)
        quality_score = self._calculate_quality_score(user_answer, question)
        feedback = self._generate_feedback(similarity_score, quality_score, user_answer)
        
        return {
            "similarity_score": similarity_score,
            "quality_score": quality_score,
            "overall_score": (similarity_score + quality_score) / 2,
            "feedback": feedback
        }
    
    def _calculate_quality_score(self, answer: str, question: str) -> float:
        score = 0.0
        
        if len(answer) >= 50:
            score += 0.3
        elif len(answer) >= 20:
            score += 0.2
        else:
            score += 0.1
        
        question_keywords = set(re.findall(r'[\u4e00-\u9fff]+', question))
        answer_keywords = set(re.findall(r'[\u4e00-\u9fff]+', answer))
        
        if question_keywords:
            keyword_match = len(question_keywords.intersection(answer_keywords)) / len(question_keywords)
            score += keyword_match * 0.4
        
        if any(word in answer for word in ['首先', '然后', '最后', '因此', '所以']):
            score += 0.3
        
        return min(1.0, score)
    
    def _generate_feedback(self, similarity_score: float, quality_score: float, answer: str) -> str:
        feedback_parts = []
        
        if similarity_score >= 0.8:
            feedback_parts.append("答案与标准答案高度匹配，理解准确。")
        elif similarity_score >= 0.6:
            feedback_parts.append("答案基本正确，但可以更详细。")
        elif similarity_score >= 0.4:
            feedback_parts.append("答案部分正确，需要补充更多信息。")
        else:
            feedback_parts.append("答案与标准答案差异较大，建议重新学习相关内容。")
        
        if quality_score >= 0.8:
            feedback_parts.append("答案结构清晰，表达完整。")
        elif quality_score >= 0.6:
            feedback_parts.append("答案结构基本合理，可以更详细。")
        else:
            feedback_parts.append("答案过于简短，建议提供更详细的解释。")
        
        if len(answer) < 20:
            feedback_parts.append("答案过于简短，建议提供更详细的说明。")
        
        return " ".join(feedback_parts)
    
    def delete_document(self, document_id: str) -> bool:
        """删除文档"""
        try:
            # 获取所有相关的向量
            results = self.collection.get(
                where={"document_id": document_id}
            )
            
            if results['ids']:
                self.collection.delete(ids=results['ids'])
            
            return True
            
        except Exception as e:
            print(f"删除文档错误: {e}")
            return False
