import time

from sentence_transformers import SentenceTransformer, util
import re
import os
import torch

# 禁用符号链接警告
os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"

# 判断是否支持cuda
device = "cuda" if torch.cuda.is_available() else "cpu"

# 加载模型
model = None


def get_model():
    """懒加载model，只在第一次调用的时候加载模型"""
    global model
    if model is None:
        model = SentenceTransformer('DMetaSoul/sbert-chinese-general-v2', device=device)
    return model


def split_sentences(text):
    """
    按标点分句
    """
    sentences = re.split(r'[。！？]', text)
    sentences = [s.strip() for s in sentences if s.strip()]
    return sentences


def segment_text(text, threshold=0.65, batch_size=100):
    """
    按语义分段小说文本，支持对话优化和分块计算嵌入
    :param text: 输入的小说文本
    :param threshold: 语义相似性阈值（0-1，值越高段落越粗大）
    :param batch_size: 分块计算嵌入的批次大小
    :return: 分段后的段落列表
    """
    model = get_model()
    # 分句
    sentences = split_sentences(text)
    if not sentences:
        return []

    # 分块计算句子嵌入
    embeddings = []
    for i in range(0, len(sentences), batch_size):
        batch = sentences[i:i + batch_size]
        batch_embeddings = model.encode(batch, convert_to_tensor=True, device=device)
        embeddings.append(batch_embeddings)
    embeddings = torch.cat(embeddings, dim=0)

    # 计算相邻句子间的余弦相似性
    cosine_scores = util.cos_sim(embeddings, embeddings)

    # 按相似性分段，加入对话优化
    paragraphs = []
    current_paragraph = [sentences[0]]
    turn_words = {'然而', '但是', '因此', '接着'}  # 转折词集合

    for i in range(1, len(sentences)):
        similarity = cosine_scores[i - 1][i].item()
        # print(f"句子 {i - 1} 和 {i} 的相似性: {similarity:.4f}")

        # 对话或转折词优先合并
        if (similarity > threshold or
                '“' in sentences[i] or '”' in sentences[i] or
                sentences[i].startswith(tuple(turn_words))):
            current_paragraph.append(sentences[i])
        else:
            paragraphs.append('。'.join(current_paragraph) + '。')
            current_paragraph = [sentences[i]]

    if current_paragraph:
        paragraphs.append('。'.join(current_paragraph) + '。')

    return paragraphs
