# -*- coding: utf-8 -*-
"""
语义分块示例 (Semantic Chunking)

语义分块优缺点：

优点：
1. ✅ 基于语义边界分割，保持内容完整性
2. ✅ 自动适应不同长度的语义单元
3. ✅ 减少无意义的分割点
4. ✅ 对复杂文档结构适应性强
5. ✅ 检索质量通常更高

缺点：
1. ❌ 实现相对复杂，计算成本较高
2. ❌ 需要额外的模型进行语义分析
3. ❌ 分块大小不均匀，可能影响处理效率
4. ❌ 对模型质量依赖较强
5. ❌ 参数调优更复杂

适用场景：
• 技术文档、学术论文等结构化内容
• 对语义完整性要求高的应用
• 需要高质量检索结果的场景
• 包含代码、表格等复杂格式的文档
"""

from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings

# =======================
# 1. 准备文本
# =======================
with open("RAG_2_data.txt", "r", encoding="utf-8") as f:
    text = f.read()

# =======================
# 2. 语义分块
# =======================
# RecursiveCharacterTextSplitter 会尝试按自然边界切分文本
# separator_levels 定义切分优先级（先按换行，再按句号，再按空格）
# chunk_size: 最大字符数
# chunk_overlap: 重叠字符数
text_splitter = RecursiveCharacterTextSplitter(
    separators=["\n\n", "\n", "。", "！", "？", "；", "，", " ", ""],
    chunk_size=400,
    chunk_overlap=50
)

chunks = text_splitter.split_text(text)

print("总 chunk 数量:", len(chunks))
for i, chunk in enumerate(chunks[:3]):
    print(f"Chunk {i + 1}:\n{chunk}\n{'-' * 40}")

# =======================
# 3. 转成 Document 对象
# =======================
docs = [Document(page_content=chunk) for chunk in chunks]

# =======================
# 4. 向量化并存入向量数据库
# =======================
embeddings = HuggingFaceEmbeddings(model_name="D:/models/BAAIbge-base-zh-v1.5", model_kwargs={'device': 'cpu'})
vector_store = FAISS.from_documents(docs, embeddings)

# =======================
# 5. 检索测试
# =======================
query = "语义分块有什么优点？"
docs_found = vector_store.similarity_search(query, k=2)

print("检索结果:")
for doc in docs_found:
    print(doc.page_content)
    print("-" * 50)
