from langchain_ollama import OllamaEmbeddings
embed = OllamaEmbeddings(model="bge-m3:latest")

# 按行读取.txt文件内容 放入一个数组中
# with open('new_stu_syhg.txt', 'r', encoding='utf-8') as file:
#     lines = file.readlines()

# sentences = [line.strip() for line in lines]
# print("Number of sentences:",len(sentences))

# 如何按段落读取txt文件内容 放入一个数组中
with open('new_stu_syhg.txt', 'r', encoding='utf-8') as file:
    paragraphs = file.read().split('\n\n')  # 使用两个换行符作为段落分隔符

print("Number of paragraphs:", len(paragraphs))
    
sentences = [para.strip() for para in paragraphs if para.strip()]  # 去除空段落和前后空白字符
print("Number of sentences:",len(sentences))

# 对每一行进行嵌入
embeddings = embed.embed_documents(sentences)
print("Number of embeddings:",len(embeddings))

# 计算相邻行之间的余弦相似度
import numpy as np
def cosine_similarity(vec1, vec2):
    return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))

similarities = [cosine_similarity(embeddings[i], embeddings[i + 1]) for i in range(len(embeddings) - 1)]

# 找“切点”：以百分位法为例
def compute_breakpoints(similarities, method="percentile", threshold=90):
    if method == "percentile":
        threshold_value = np.percentile(similarities, threshold)
    return [i for i, sim in enumerate(similarities) if sim < threshold_value]

breakpoints = compute_breakpoints(similarities, method="percentile", threshold=90)

# 切块
def split_into_chunks(sentences, breakpoints):
    chunks = []
    start = 0
    for bp in breakpoints:
        chunks.append(". ".join(sentences[start:bp + 1]) + ".")
        start = bp + 1
    chunks.append(". ".join(sentences[start:]))
    return chunks

text_chunks = split_into_chunks(sentences, breakpoints)
print(f"Number of semantic chunks: {len(text_chunks)}")
# print("\nFirst text chunk:\n", text_chunks[0])

# 每个块再生成一个embedding
def create_embeddings(text_chunks):
    return [embed.embed_query(chunk) for chunk in text_chunks]

chunk_embeddings = create_embeddings(text_chunks)

# 检索时直接比对块向量
def semantic_search(query, text_chunks, chunk_embeddings, k=5):
    query_embedding = embed.embed_query(query)
    similarities = [cosine_similarity(query_embedding, emb) for emb in chunk_embeddings]
    top_indices = np.argsort(similarities)[-k:][::-1]
    return [text_chunks[i] for i in top_indices]

search = "请介绍下如何管理学生的电子产品"

top_chunks = semantic_search(search, text_chunks, chunk_embeddings, k=2)

print(top_chunks)

system_prompt = "You are an AI assistant that strictly answers based on the given context. If the answer cannot be derived directly from the provided context, respond with: 'I do not have enough information to answer that.'"

user_prompt = "\n".join([f"Context {i + 1}:\n{chunk}\n=====================================\n" for i, chunk in enumerate(top_chunks)])
user_prompt = f"{user_prompt}\nQuestion: {search}"

from langchain_ollama import ChatOllama
llm = ChatOllama(model="qwen3:8b", temperature=0.3, reasoning=False)

from langchain_core.messages import (
    AIMessage,
    SystemMessage,
    HumanMessage,
)

ai_response = llm.invoke([SystemMessage(content=system_prompt), HumanMessage(content=user_prompt)])
print("----------------------------------")
print(ai_response.content)
