import logging
import json
import re
import chromadb
import redis
from typing import Generator
from sentence_transformers import SentenceTransformer

logging.basicConfig(level=logging.INFO)

MAX_RETRIES = 300
REDIS_HOST = "127.0.0.1"
REDIS_PORT = 6379
REDIS_PASSWORD = ""
REDIS_DATABASE = 0
REDIS_KEY = "vector_processing_queue"
Dead_Letter_Queue = "dead_letter_queue"
CHROMA_PERSIST_DIR = "./chroma_local_data"
LOCAL_MODEL_DIR = "./paraphrase-multilingual-MiniLM-L12-v2"
COLLECTION_NAME = "file_search"
BATCH_SIZE = 8
MAX_LENGTH = 300

CHROMA_SERVER_HOST = "127.0.0.1"  #
CHROMA_SERVER_POST = "8000"
CHROMA_PERSISTENT_DATA_PATH = "./chroma_local_data"
CHROMA_CONTRACT_NAME = "file_search"


class Chroma(object):
    def __init__(self):
        self.client = self.get_httpclient()
        self.collection = self.client.get_or_create_collection(name=CHROMA_CONTRACT_NAME,
                                                               metadata={"hnsw:space": "cosine"}) \
            if self.client else None

    @classmethod
    def get_client(cls):
        """
        Client() 基于内存存储，数据不会持久化存储，不建议用于生产环境
        """
        client = chromadb.Client()
        return client

    @classmethod
    def get_persistent_client(cls):
        """
        PersistentClient() 基于硬盘存储，可以持久化存储，会把数据存到指定路径下，但不建议用于生产环境
        来保存和加载本地机器上的数据，启动时加载(如果存在)
        """
        try:
            client = chromadb.PersistentClient(path=CHROMA_PERSISTENT_DATA_PATH)
            return True
        except Exception as ex:
            logging.info("保存加载本地机器上的数据失败，原因是：%s" % str(ex))
            return False

    @classmethod
    def get_httpclient(cls):
        """
        HttpClient() 连接到Chroma服务器进行存储，可多台客户端连接同一个服务器，可用于生产环境
        """
        try:
            client = chromadb.HttpClient(
                host=CHROMA_SERVER_HOST,
                port=CHROMA_SERVER_POST
            )
            Chroma.get_persistent_client()
            return client
        except Exception as ex:
            print(f"连接chroma服务器失败，原因是：{str(ex)}")
            logging.info(f"连接chroma服务器失败，原因是：{str(ex)}")
            return None

    def chromadb(self):
        return self.collection


class RedisWrapper:
    pool = redis.ConnectionPool(
        host=REDIS_HOST,
        port=REDIS_PORT,
        db=REDIS_DATABASE,
        password=REDIS_PASSWORD
    )
    r = redis.Redis(connection_pool=pool)

    @staticmethod
    def rpush(key, value):
        """在列表右侧添加元素（对应 Redis 的 RPUSH 命令）"""
        return RedisWrapper.r.rpush(key, value)

    @staticmethod
    def blpop(keys, timeout=0):
        return RedisWrapper.r.blpop(keys, timeout=timeout)


def clean_text(text: str) -> str:
    """清洗文本"""
    # 1. 移除HTML标签
    text = re.sub(r'<.*?>', '', text)
    # 2. 移除特殊符号
    text = re.sub(r'[<》「」『』【】、]+', '', text)
    # 3. 处理多余空格/换行
    text = re.sub(r'\s+', ' ', text).strip()
    # 4. 移除BOM头
    return text.lstrip('\ufeff').strip()


def file_stream_segmenter(file_path: str, max_length: int = MAX_LENGTH, min_length: int = 20) -> Generator[str, None, None]:
    """文件流分段器"""
    carryover = ""

    try:
        with open(file_path, 'r', encoding='utf-8-sig') as f:
            while True:
                chunk = f.read(4096)
                if not chunk and not carryover:
                    break

                content = carryover + chunk
                content = clean_text(content)

                paragraphs = re.split(r'\n{1,2}', content)

                if chunk:
                    carryover = paragraphs[-1] if not content.endswith('\n') else ""
                    process_paras = paragraphs[:-1] if not content.endswith('\n') else paragraphs
                else:
                    carryover = ""
                    process_paras = paragraphs

                cleaned_paras = [clean_text(p) for p in process_paras]

                for seg in _process_paragraphs(cleaned_paras, max_length, min_length):
                    yield seg
    except Exception as e:
        logging.error(f"文件读取或分段失败: {str(e)}，文件路径：{file_path}")
        raise


def _process_paragraphs(paragraphs: list[str], max_length: int, min_length: int) -> Generator[str, None, None]:
    """处理段落合并与截断"""
    buffer = []
    buffer_length = 0

    for p in paragraphs:
        p = p.strip()
        if not p:
            continue

        # 处理超长段落
        while len(p) > max_length:
            seg = p[:max_length]
            yield seg
            p = p[max_length:]

        # 合并到缓冲区
        if buffer_length + len(p) > max_length:
            yield " ".join(buffer)
            buffer = [p]
            buffer_length = len(p)
        else:
            buffer.append(p)
            buffer_length += len(p)

    # 处理剩余缓冲区
    if buffer:
        yield " ".join(buffer)


def get_embedding(model, text: str) -> list[float]:
    """生成向量"""
    try:
        embedding = model.encode(text, show_progress_bar=False, normalize_embeddings=True, batch_size=BATCH_SIZE).tolist()
        return embedding if embedding else []
    except Exception as e:
        logging.error(f"向量化失败: {str(e)}, 文本内容: {text}")
        return []


def process_file(file_path: str, version: str, app_id: str, scene: str, model, collection) -> bool:
    try:
        # 版本号验证
        if not re.match(r"^[A-Za-z]+_\d+_\d+$", version):
            raise ValueError("版本号格式错误")

        # 解析版本号
        _, version_number_str = version.rsplit('_', 1)
        version_number = int(version_number_str)
        if version_number < 1:
            raise ValueError("版本号必须≥1")

        # 删除旧版本数据
        if version_number > 1:
            old_version = f"{version.rsplit('_', 1)[0]}_{version_number - 1}"
            where_filter = {
                "$and": [
                    {"app_id": {"$eq": app_id}},
                    {"version": {"$eq": old_version}},
                    {"scene": {"$eq": scene}}
                ]
            }
            collection.delete(where=where_filter)
            logging.info(f"成功删除旧版本 {old_version} 的数据")

        # 文件分段和存储
        paragraphs = list(file_stream_segmenter(file_path, max_length=MAX_LENGTH))
        valid_embeddings = []
        valid_paragraphs = []

        for p in paragraphs:
            embedding = get_embedding(model, p)
            if embedding:
                valid_embeddings.append(embedding)
                valid_paragraphs.append(p)

        if not valid_embeddings:
            raise RuntimeError("所有段落生成向量失败")

        metadatas = [{
            "app_id": app_id,
            "version": version,
            "scene": scene,
            "file_id": file_path,
        } for _ in valid_paragraphs]

        # 存储到 ChromaDB
        collection.add(
            embeddings=valid_embeddings,
            metadatas=metadatas,
            ids=[f"{file_path}_{i}" for i in range(len(valid_embeddings))]
        )
        logging.info(f"成功持久化数据到 {CHROMA_PERSIST_DIR}")
        return True
    except Exception as e:
        logging.error(f"文件处理失败: {str(e)}")
        return False


def init_dependencies():
    # 初始化模型
    model = SentenceTransformer(LOCAL_MODEL_DIR)
    return model


def main():
    model = init_dependencies()
    collection = Chroma().chromadb()
    while True:
        print("正在运行")
        task_data = RedisWrapper.blpop(REDIS_KEY, timeout=1)
        if task_data:
            task = json.loads(task_data[1].decode('utf-8'))
            task['retry_count'] = task.get('retry_count', 0) + 1

            try:
                if process_file(
                        task['file_path'],
                        task['version'],
                        task['app_id'],
                        task['scene'],
                        model,
                        collection
                ):
                    logging.info(f"任务成功: {task}")
                else:
                    raise RuntimeError("处理失败")
            except Exception as e:
                logging.error(f"任务失败: {task}, 错误: {str(e)}")
                if task['retry_count'] < MAX_RETRIES:
                    RedisWrapper.rpush(REDIS_KEY, json.dumps(task))
                else:
                    RedisWrapper.rpush(Dead_Letter_Queue, json.dumps(task))


if __name__ == '__main__':
    main()
