
'''
文本智能分块功能
'''
import re
from typing import List,Dict
import uuid

try:
    import jieba
    JIEBA_AVAILABLE=True
except ImportError:
    JIEBA_AVAILABLE=False
    print("Jieba模块没有安装，使用简化分块功能")

# 定义智能分块类
class SmartDocumentChunker:
    def __init__(self,chunk_size: int = 250, chunk_overlap: int =50):
        self.chunk_size=chunk_size
        self.chunk_overlap=chunk_overlap

        # 定义文档分割符号
        self.separators=[
        "\n# ","n\## ","\n### ",   # 标题
        "\n\n", # 段落
        "\n" , #行
        ", ",". ","，","。 ","! ","? ","; "," ","" # 句号，逗号，问好，感叹号,分号，空格，空字符
        ]

    # 主函数，接受一个长文本，返回分块后的元数据和块文本
    def chunk_document(self,text: str,metadata: dict = None) -> List[Dict]:
        try:
            # 文本预处理
            cleaned_text=self._preprocess_text(text)

            # 检查文本长度
            if len(cleaned_text.strip()) < 100:
                print(f'文档内容太短：{len(cleaned_text)} 个字符')
                return [{
                'content': cleaned_text.strip(),
                'metadata': {
                    'chunk_index': 0,
                    'chunk_type': '常规',
                    'keywords': [],
                    'length': len(cleaned_text),
                    'source':metadata.get('source','unknown') if metadata else  'unknown'
                }
            }]

            # 文本分块
            chunks=self._split_text(cleaned_text)

            # 分块后文本的后处理
            result=[]
            for i , chunk in enumerate(chunks):
                if len(chunk.strip()) > 5: #降低最小长度要求，定义单个token大小
                    chunk_data={
                        'content': chunk.strip(),
                        'metadata': {
                            'chunk_index': i,
                            'chunk_type': self._classify_chunk(chunk),
                            'keywords': self._extract_keywords(chunk),
                            'length': len(chunk),
                            'source': metadata.get('source','unknown') if metadata else 'unknown'
                        }
                    }
                    result.append(chunk_data)
            # 如果没有分块，创建默认块
            if not result:
                print(f'分块过程处理后，无有效文本块')
                result=[{
                    'content': cleaned_text[:500] if len(cleaned_text) > 500 else cleaned_text,
                    'metadata': {
                        'chunk_index': 0,
                        'chunk_type': '常规',
                        'keywords': [],
                        'length': len(cleaned_text),
                        'source': metadata.get('source','unknown') if metadata else 'unknown'
                    }
                }]
            print(f"文档分块完成,共有{len(result)} 个块")
            return result
        except Exception as e:
            print(f'文档分块失败：{e}')
            return self._simple_chunk(text,metadata)

    #文本预处理函数
    def _preprocess_text(self,text: str)-> str:
        #统一换行符
        text=re.sub(r'\n\s*\n','\n',text)
        #消除空白行
        text=re.sub(r'\n\s*\n','\n\n',text)
        text=re.sub(r' +',' ',text)
        return text.strip()

    #文本递归分割函数
    def _split_text(self,text: str)->List[str]:
        if len(text) <= self.chunk_size:
            return [text]
        
        # 尝试使用分隔符分割文档
        for separator in self.separators:
            if separator in text:
                parts=text.split(separator)
                if len(parts) > 1:
                    chunks=[]
                    current_chunk=""

                    for part in parts:
                        if len(current_chunk) + len(part)+ len(separator) <= self.chunk_size:
                            current_chunk+=part+separator
                    else:
                        if current_chunk:
                            chunks.append(current_chunk.strip())
                        current_chunk=part+separator
                if current_chunk:
                    chunks.append(current_chunk.strip())
                return chunks
    
        # 如果文本中不存在分隔符
        chunks=[]
        for i in range(0,len(text),self.chunk_size - self.chunk_overlap):
            chunk=text[i:i + self.chunk_size]
            chunks.append(chunk)
        return chunks

    

    # 文本简单分割函数
    def _simple_chunk(self,text: str,metadata: dict = None)->List[str]:
        words=text.split()
        chunks=[]
        chunk_size_word= 50 
        for i in range(0,len(words),chunk_size_word):
            chunk_words=words[i:i+chunk_size_word]
            chunk_text=' '.join(chunk_words)

            chunk_data={
                'content':chunk_text,
                'metadata':{
                    'chunk_index':i,
                    'chunk_type': '常规',
                    'keywords':[],
                    'length': len(chunk_text),
                    'source': metadata.get('source','unknown') if metadata else 'unknown'
                }
            }
            chunks.append(chunk_data)
        return chunks

    # 块内容分类
    def _classify_chunk(self,chunk: str)-> str:
        # 根据企业文档的性质来定义类型
        chunk_lower = chunk.lower()
        if any (keyword in chunk_lower for keyword in ['编码','定义','类','函数','构造器']):
            return "代码"
        elif any (keyword in chunk_lower for keyword in ['对象','结构','类','原则','设计']):
            return "设计"
        elif any (keyword in chunk_lower for keyword in ['测试','单元测试','代码覆盖率','测试结果','Bug','缺陷']):
            return "测试"
        elif any (keyword in chunk_lower for keyword in ['容器','k8s','虚拟机','部署','生产环境','测试环境']):
            return "部署"
        else:
            return "常规"


    # 提取块内容关键字
    def _extract_keywords(self,chunk: str)-> List[str]:
        try:
            if JIEBA_AVAILABLE:
                words=list(jieba.cut(chunk))
                keywords=[word for word in words if len(word) > 1 and word.strip()]
                return keywords[:5]
        except:
            return []
        else:
            words=chunk.split()
            return [word for word in words[:5] if len(word)> 1]

  
