from typing import List, Dict
import re
from config import Config
from utils.logger import logger


class DocumentSplitter:
    def __init__(self,
                 min_chunk_size: int = Config.MIN_CHUNK_SIZE,
                 chunk_overlap: int = Config.CHUNK_OVERLAP,
                 split_mode: str = "rule"):
        self.chunk_overlap = chunk_overlap
        self.min_chunk_size = min_chunk_size
        self.split_mode = split_mode
    

    def _qa_split(self, text: str) -> List[str]:
        """问答对切分"""
        try:
            chunks = []
            
            # 先尝试匹配有编号的问答对
            qa_pairs = re.finditer(Config.QA_PATTERN, text, re.DOTALL)
            for match in qa_pairs:
                qa_text = match.group(1).strip()
                # 提取问题和答案（支持更灵活的格式）
                question_match = re.match(r"\d+、(.*?)(?:[。！？!?])", qa_text)
                if question_match:
                    question = question_match.group(1).strip()
                    answer = re.search(r"(?:回[答复]：|答：)(.*)", qa_text, re.DOTALL)
                    if answer:
                        answer_text = answer.group(1).strip()
                        # 格式化为统一的文本块
                        chunk = f"{question}\n答案：{answer_text}"
                        chunks.append(chunk)
                        logger.debug(f"切分出编号QA对: {chunk}")
            
            # 如果没有匹配到编号问答对，尝试匹配无编号的问答对
            if len(chunks) < 5:
                # 使用新的正则表达式匹配无编号问答对
                # no_num_qa_pairs = re.finditer(r'([^？]+？)\n答：([^\n]+)', text, re.DOTALL)
                no_num_qa_pairs = re.finditer(r'([^？?]+[？?])\n(?:答|答案|回答|回复)：(.*?)(?=\n\S|$)', text, re.DOTALL)
                for match in no_num_qa_pairs:
                    question = match.group(1).strip()
                    answer_text = match.group(2).strip()
                    chunk = f"{question}\n答案：{answer_text}"
                    chunks.append(chunk)
                    logger.debug(f"切分出无编号QA对: {chunk}")
            
            if not chunks:
                logger.warning("未找到任何问答对")
                
            return chunks
        except Exception as e:
            logger.error(f"QA切分失败: {str(e)}")
            return []

    def _extract_qa_pairs(self, text: str) -> List[Dict[str, str]]:
        """提取问答对，返回问题和完整QA文本"""
        try:
            chunks = []
            
            # 处理Excel导入的标准格式（问题\n答案：答案内容）
            excel_qa_pairs = text.split('####')
            for qa_text in excel_qa_pairs:
                qa_text = qa_text.strip()
                if not qa_text:
                    continue
                    
                # 分割问题和答案
                parts = qa_text.split('\n答案：')
                if len(parts) == 2:
                    question, answer = parts
                    question = question.strip()
                    answer = answer.strip()
                    
                    # 检查问题和答案的有效性
                    if (len(question) > 3 and len(answer) > 3 and
                        not re.match(r'^[\d\W]+$', question) and
                        not any(q.get('question') == question for q in chunks)):
                        chunks.append({
                            'question': question,
                            'full_text': answer
                        })
                        logger.debug(f"提取Excel格式QA对 - 问题: {question}")
                        continue
            
            # 如果没有找到Excel格式的QA对，尝试其他格式
            if len(chunks) < 3:
                # 尝试匹配编号问答对
                qa_pairs = re.finditer(Config.QA_PATTERN, text, re.DOTALL)
                for match in qa_pairs:
                    qa_text = match.group(1).strip()
                    question_match = re.match(r"\d+、(.*?)(?:[。！？!?])", qa_text)
                    if question_match:
                        question = question_match.group(1).strip()
                        answer = re.search(r"(?:回[答复]：|答：)(.*)", qa_text, re.DOTALL)
                        if answer:
                            answer_text = answer.group(1).strip()
                            qa_text = f"{answer_text}"
                            if (len(question) > 5 and
                                not re.match(r'^[\d\W]+$', question) and
                                not any(q.get('question') == question for q in chunks)):
                                chunks.append({
                                    'question': question,
                                    'full_text': qa_text
                                })
                                logger.debug(f"提取编号QA对 - 问题: {question}")
            
            # 如果仍然没有找到QA对，尝试无编号格式
            if not chunks:
                no_num_qa_pairs = re.finditer(r'([^？?]+[？?])\n(?:答|答案|回答|回复)：(.*?)(?=\n\S|$)', text, re.DOTALL)
                for match in no_num_qa_pairs:
                    question = match.group(1).strip()
                    answer_text = match.group(2).strip()
                    qa_text = f"{answer_text}"
                    if (len(question) > 5 and
                        not re.match(r'^[\d\W]+$', question) and
                        not any(q.get('question') == question for q in chunks)):
                        chunks.append({
                            'question': question,
                            'full_text': qa_text
                        })
                        logger.debug(f"提取无编号QA对 - 问题: {question}")
            
            if not chunks:
                logger.warning("未找到任何问答对")
                
            return chunks
            
        except Exception as e:
            logger.error(f"QA切分失败: {str(e)}")
            return []

    def split_text(self, text: str, client=None) -> tuple:
        """分割文本
        
        Args:
            text: 要分割的文本
            client: 大模型客户端，仅在语义切分模式下需要
            
        Returns:
            tuple: (文档块列表, 问答对列表, 文档类型标签列表)
        """
        try:
            # 混合模式：段落+问答对共存
            if self.split_mode == "hybrid":
                return self._hybrid_split(text, client)
            
            # QA向量检索模式
            elif self.split_mode == "qa_vector":
                qa_pairs = self._extract_qa_pairs(text)
                if qa_pairs:
                    # 只返回问题用于向量索引
                    q_temp, a_temp, qa_labels = [], [], []
                    for pair in qa_pairs:
                        if len(pair.get('question', "")) > 1:
                            q_temp.append(pair.get('question'))
                            a_temp.append(pair.get('full_text'))
                            qa_labels.append('qa')  # 标记为问答对
                        
                    return q_temp, a_temp, qa_labels
                else:
                    # 没有QA对，回退到规则切分
                    para_chunks = self._split_by_size(text, Config.CHUNK_SIZES[0])
                    para_labels = ['paragraph'] * len(para_chunks)
                    return para_chunks, [], para_labels
            # QA切分模式
            elif self.split_mode == "qa":
                qa_chunks = self._qa_split(text)
                if len(qa_chunks) > 5:
                    qa_labels = ['qa'] * len(qa_chunks)
                    return qa_chunks, [], qa_labels
                # 回退到规则切分
                para_chunks = self._split_by_size(text, Config.CHUNK_SIZES[0])
                para_labels = ['paragraph'] * len(para_chunks)
                return para_chunks, [], para_labels
                
            # 语义切分模式（简化为规则切分）
            elif self.split_mode == "semantic":
                para_chunks = self._split_by_size(text, Config.CHUNK_SIZES[0])
                para_labels = ['paragraph'] * len(para_chunks)
                return para_chunks, [], para_labels
            
            # 默认规则切分模式
            else:
                # 使用配置中的切分大小
                chunk_sizes = Config.CHUNK_SIZES
                all_chunks = []
                
                # 对每个大小进行切分
                for size in chunk_sizes:
                    current_chunks = self._split_by_size(text, size)
                    all_chunks.extend(current_chunks)
                    if len(current_chunks) > 0 and all(
                        self.min_chunk_size <= len(chunk) <= size for chunk in current_chunks
                    ):
                        break
                        
                para_labels = ['paragraph'] * len(all_chunks)
                return all_chunks, [], para_labels
        except Exception as e:
            logger.error(f"文本分割失败: {str(e)}")
            return [], [], []
    
    def _hybrid_split(self, text: str, client=None) -> tuple:
        """混合切分：段落+问答对共存"""
        try:
            all_chunks = []
            all_answers = []
            all_labels = []
            
            # 先尝试提取问答对
            qa_pairs = self._extract_qa_pairs(text)
            if qa_pairs:
                logger.info(f"提取到 {len(qa_pairs)} 个问答对")
                for pair in qa_pairs:
                    if len(pair.get('question', "")) > 1:
                        all_chunks.append(pair.get('question'))
                        all_answers.append(pair.get('full_text'))
                        all_labels.append('qa')
                
            # 根据配置决定问题生成策略
            if Config.PARAGRAPH_QUESTION_MODE == "original":
                # 对原始段落生成问题，然后切分
                self._process_original_paragraphs(text, client, all_chunks, all_answers, all_labels)
            else:
                # 先切分，再对每个块生成问题（当前默认模式）
                self._process_chunked_paragraphs(text, client, all_chunks, all_answers, all_labels)
            
            logger.info(f"混合切分完成：总共 {len(all_chunks)} 个块")
            logger.info(f"  - 原始问答对: {all_labels.count('qa')} 个")  
            logger.info(f"  - 段落生成问答对: {all_labels.count('paragraph_qa')} 个")
            logger.info(f"  - 普通段落: {all_labels.count('paragraph')} 个")
            if len(all_labels) > 0:
                qa_ratio = (all_labels.count('qa') + all_labels.count('paragraph_qa')) / len(all_labels) * 100
                logger.info(f"问答对模式占比: {qa_ratio:.1f}%")
            return all_chunks, all_answers, all_labels
            
        except Exception as e:
            logger.error(f"混合切分失败: {str(e)}")
            return [], [], []
    
    def _generate_questions_from_paragraph(self, paragraph: str, client) -> List[str]:
        """从段落中生成核心问题"""
        try:
            if not client or len(paragraph.strip()) < 20:
                return []
                
            response = client.chat.completions.create(
                model=Config.LLM_MODEL_ID,
                messages=[
                    {"role": "user", "content": Config.PARAGRAPH_QUESTION_PROMPT.format(paragraph=paragraph)}
                ]
            )
            
            content = response.choices[0].message.content.strip()
            # 分割问题，过滤空行
            questions = [q.strip() for q in content.split('\n') if q.strip()]
            # 限制1-3个问题
            questions = questions[:3]
            
            logger.debug(f"从段落生成问题: {questions}")
            return questions
            
        except Exception as e:
            logger.error(f"生成段落问题失败: {str(e)}")
            return []
    
    def _process_paragraph_chunks(self, para_chunks: List[str], client, all_chunks: List[str], all_answers: List[str], all_labels: List[str]):
        """处理段落块，尝试生成问题"""
        for chunk in para_chunks:
            if client:
                # 尝试生成问题
                questions = self._generate_questions_from_paragraph(chunk, client)
                if questions:
                    # 用生成的问题作为索引
                    for question in questions:
                        all_chunks.append(question)
                        all_answers.append(chunk)  # 原段落作为答案
                        all_labels.append('paragraph_qa')
                else:
                    # 问题生成失败，使用原段落
                    all_chunks.append(chunk)
                    all_answers.append("")
                    all_labels.append('paragraph')
            else:
                # 没有LLM客户端，直接使用原段落
                all_chunks.append(chunk)
                all_answers.append("")
                all_labels.append('paragraph')
                
        logger.info(f"规则切分+问题生成完成，得到 {all_labels.count('paragraph_qa')} 个段落问题对，{all_labels.count('paragraph')} 个原始段落")
    
    def _process_original_paragraphs(self, text: str, client, all_chunks: List[str], all_answers: List[str], all_labels: List[str]):
        """原始段落模式：先生成问题，再切分段落"""
        if not text.strip():
            return
            
        # 按自然段落分割
        original_paragraphs = text.split('\n\n')
        original_paragraphs = [p.strip() for p in original_paragraphs if p.strip() and len(p.strip()) > 20]
        
        for para in original_paragraphs:
            if client:
                # 对原始段落生成问题
                questions = self._generate_questions_from_paragraph(para, client)
                if questions:
                    # 对每个问题，需要决定对应的答案段落
                    para_chunks = self._split_by_size(para, Config.CHUNK_SIZES[0])
                    for question in questions:
                        # 问题对应最相关的段落块（这里简化为第一个块）
                        answer_chunk = para_chunks[0] if para_chunks else para
                        all_chunks.append(question)
                        all_answers.append(answer_chunk)
                        all_labels.append('paragraph_qa')
                    
        logger.info(f"原始段落模式完成，处理了 {len(original_paragraphs)} 个原始段落")
    
    def _process_chunked_paragraphs(self, text: str, client, all_chunks: List[str], all_answers: List[str], all_labels: List[str]):
        """切分段落模式：先切分，再生成问题"""
        if not text.strip():
            return
            
        # 直接使用规则切分，然后生成问题
        para_chunks = self._split_by_size(text, Config.CHUNK_SIZES[0])
        self._process_paragraph_chunks(para_chunks, client, all_chunks, all_answers, all_labels)

    def _split_by_size(self, text: str, target_size: int) -> List[str]:
        """按指定大小切分文本
        
        Args:
            text: 要切分的文本
            target_size: 目标切分大小
            
        Returns:
            List[str]: 切分后的文本块列表
        """
        # 清理文本
        text = re.sub(r'\s+', ' ', text).strip()
        logger.info(f"开始按大小 {target_size} 分割文本，长度: {len(text)}")
        
        # 首先按段落分割
        paragraphs = text.split('\n\n')
        paragraphs = [p.strip() for p in paragraphs if p.strip()]
        
        # 按句子分割每个段落
        all_sentences = []
        for para in paragraphs:
            sentences = re.split('([。！？!?])', para)
            sentences = [''.join(i) for i in zip(sentences[0::2], sentences[1::2] + [''])]
            sentences = [s.strip() for s in sentences if s.strip()]
            all_sentences.extend(sentences)
            if len(sentences) > 0:
                all_sentences.append('\n')
        
        chunks = []
        current_chunk = []
        current_length = 0
        
        for sentence in all_sentences:
            sentence_length = len(sentence)
            
            # 处理过长的单个句子
            if sentence_length > target_size:
                if current_chunk:
                    chunks.append(''.join(current_chunk))
                    current_chunk = []
                    current_length = 0
                
                words = list(sentence)
                for i in range(0, len(words), target_size):
                    sub_chunk = ''.join(words[i:i + target_size])
                    chunks.append(sub_chunk)
                continue
            
            # 如果添加当前句子会超过目标大小
            if current_length + sentence_length > target_size:
                if current_length >= self.min_chunk_size:
                    chunks.append(''.join(current_chunk))
                    last_sentence = current_chunk[-1] if current_chunk else ''
                    current_chunk = [last_sentence] if last_sentence and last_sentence != '\n' else []
                    current_length = len(last_sentence) if last_sentence and last_sentence != '\n' else 0
                
            current_chunk.append(sentence)
            current_length += sentence_length
        
        # 处理最后一个chunk
        if current_chunk and current_length >= self.min_chunk_size:
            chunks.append(''.join(current_chunk))
        
        # 处理重叠
        if self.chunk_overlap > 0:
            overlapped_chunks = []
            for i in range(len(chunks)):
                if i > 0:
                    # 智能添加重叠内容
                    overlap_text = chunks[i-1][-self.chunk_overlap:]
                    # 确保重叠部分在句子边界
                    last_period = max(overlap_text.rfind('。'), 
                                    overlap_text.rfind('！'), 
                                    overlap_text.rfind('？'))
                    if last_period != -1:
                        overlap_text = overlap_text[last_period+1:]
                    
                    current = chunks[i]
                    if overlap_text and not overlap_text.isspace():
                        overlapped_chunks.append(overlap_text + current)
                    else:
                        overlapped_chunks.append(current)
                else:
                    overlapped_chunks.append(chunks[i])
            
            # 清理空白行和格式
            overlapped_chunks = [re.sub(r'\n+', '\n', chunk.strip()) for chunk in overlapped_chunks]
            logger.info(f"使用大小 {target_size} 分割完成，生成 {len(overlapped_chunks)} 个文档块")
            return [chunk for chunk in overlapped_chunks if chunk]
        
        logger.info(f"使用大小 {target_size} 分割完成，生成 {len(chunks)} 个文档块")
        return [chunk for chunk in chunks if chunk.strip()]
