from models.database import Session, KnowledgeContent
import json
import os

class KnowledgeService:
    def __init__(self):
        self.chunk_size = 500  # 每块文本的大小
        self.chunk_overlap = 50  # 重叠部分大小
        self.max_file_size = 2 * 1024 * 1024  # 2MB
        self.texts = []
        self.file_contents = {}  # 存储文件ID和内容的映射
        self.complete_texts = {}  # 存储完整的文本内容
        self.db_path = os.path.join("data", "knowledge.db")
        os.makedirs("data", exist_ok=True)
        self.init_db()
        self.load_knowledge_base()  # 确保加载知识库

    def split_text(self, text, file_size=0):
        """根据文件大小决定是否分块"""
        # 如果文件小于2MB，直接返回完整文本
        if file_size < self.max_file_size:
            return [text]
            
        # 否则进行分块
        chunks = []
        words = text.split()
        current_chunk = []
        current_size = 0
        
        for word in words:
            current_chunk.append(word)
            current_size += len(word) + 1  # +1 for space
            
            if current_size >= self.chunk_size:
                chunks.append(' '.join(current_chunk))
                # 保留一部分重叠内容
                overlap_words = current_chunk[-self.chunk_overlap:]
                current_chunk = overlap_words
                current_size = sum(len(word) + 1 for word in overlap_words)
        
        if current_chunk:
            chunks.append(' '.join(current_chunk))
            
        return chunks
    
    def search(self, query, k=2):
        results = []
        query_lower = query.lower()
        
        # 优先使用完整文本搜索
        for file_id, text in self.complete_texts.items():
            if query_lower in text.lower():
                results.append({
                    'text': text,
                    'score': 1.0,
                    'file_id': file_id,
                    'is_complete': True
                })
                if len(results) >= k:
                    return results
        
        # 如果查询包含文件相关关键词，返回所有完整文本
        if "文件" in query_lower or "内容" in query_lower or ".docx" in query_lower:
            # 返回所有完整文本
            for file_id, text in self.complete_texts.items():
                results.append({
                    'text': text,
                    'score': 1.0,
                    'file_id': file_id,
                    'is_complete': True
                })
            
            # 如果没有完整文本，返回分块内容
            if not results:
                for file_id, contents in self.file_contents.items():
                    for text in contents:
                        results.append({
                            'text': text,
                            'score': 0.8,
                            'file_id': file_id,
                            'is_complete': False
                        })
        else:
            # 关键词搜索
            for file_id, contents in self.file_contents.items():
                if file_id not in self.complete_texts:
                    for text in contents:
                        text_lower = text.lower()
                        if query_lower in text_lower:
                            score = text_lower.count(query_lower) / len(text_lower)
                            results.append({
                                'text': text,
                                'score': score,
                                'file_id': file_id,
                                'is_complete': False
                            })
        
        results.sort(key=lambda x: x['score'], reverse=True)
        return results[:k] if k > 0 else results
    
    def train_knowledge(self, text, file_id):
        """训练新的知识"""
        try:
            # 计算文本大小（以字节为单位）
            text_size = len(text.encode('utf-8'))
            
            # 根据大小决定是否分块
            chunks = self.split_text(text, text_size)
            
            # 如果是完整文本，存储到 complete_texts
            if len(chunks) == 1:
                self.complete_texts[file_id] = text
            
            # 存储到数据库
            with Session() as session:
                for chunk in chunks:
                    content = KnowledgeContent(
                        file_id=file_id,
                        content=chunk,
                        chunks=None,
                        embeddings=None,
                        is_complete=(len(chunks) == 1)  # 添加标记
                    )
                    session.add(content)
                session.commit()
            
            # 更新内存中的文本
            if file_id not in self.file_contents:
                self.file_contents[file_id] = []
            self.file_contents[file_id].extend(chunks)
            self.texts.extend(chunks)
            
            return True
        except Exception as e:
            raise Exception(f"知识库训练失败: {str(e)}")
    
    def load_knowledge_base(self):
        """从数据库加载知识库"""
        try:
            with Session() as session:
                contents = session.query(KnowledgeContent).all()
                self.texts = []
                self.file_contents = {}
                self.complete_texts = {}
                
                for content in contents:
                    if content.file_id not in self.file_contents:
                        self.file_contents[content.file_id] = []
                    self.file_contents[content.file_id].append(content.content)
                    self.texts.append(content.content)
                    
                    if content.is_complete:
                        self.complete_texts[content.file_id] = content.content
                    
        except Exception as e:
            print(f"加载知识库失败: {str(e)}")

    def init_db(self):
        # This method is mentioned in the __init__ method but not implemented in the original file or the new file
        # It's assumed to exist as it's called in the __init__ method
        pass 