import json
import requests
import re
from typing import Union, List, Dict, Any
import logging
from configparser import ConfigParser
# 导入所需的文本分割器
from langchain.text_splitter import MarkdownHeaderTextSplitter, RecursiveCharacterTextSplitter

# --- 配置初始化 ---
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
                    datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
logger = logging.getLogger(__name__)

config = ConfigParser()
try:
    # 确保 config.ini 与此文件在同一目录下
    config.read('config.ini', encoding='utf-8')
except Exception as e:
    logger.error(f"读取 config.ini 失败: {e}", exc_info=True)


# --- EmbeddingModel 类 ---
class EmbeddingModel:
    """与 embedding API 交互的客户端类"""

    def __init__(self, endpoint_url, api_key, model_name):
        self.endpoint = endpoint_url
        self.api_key = api_key
        self.modelname = model_name
        self.headers = {'Authorization': f'Bearer {self.api_key}', 'Content-Type': 'application/json'}

    def get_embeddings(self, texts: Union[str, List[str]]) -> List[List[float]]:
        if not self.endpoint:
            logger.error("EmbeddingModel 的 endpoint_url 未配置，无法获取向量。")
            return [[] for _ in texts] if isinstance(texts, list) else []

        if not isinstance(texts, list): texts = [texts]
        valid_texts = [text for text in texts if text and not text.isspace()]
        if not valid_texts: return [[] for _ in texts]

        payload = {"model": self.modelname, "input": valid_texts}
        try:
            response = requests.post(self.endpoint, headers=self.headers, json=payload, timeout=30)
            response.raise_for_status()
            result = response.json()
            embeddings = [item['embedding'] for item in result.get('data', [])]

            final_embeddings = []
            emb_iter = iter(embeddings)
            for text in texts:
                if text and not text.isspace():
                    try:
                        final_embeddings.append(next(emb_iter))
                    except StopIteration:
                        final_embeddings.append([])
                else:
                    final_embeddings.append([])
            return final_embeddings
        except requests.exceptions.RequestException as e:
            logger.error(f"调用 Embedding API 失败: {e}", exc_info=True)
            return [[] for _ in texts]
        except Exception as e:
            logger.error(f"解析 Embedding API 响应失败: {e}", exc_info=True)
            return [[] for _ in texts]


# --- 服务客户端初始化 ---
embedding_api_client = None
reranker_config = {}

try:
    embedding_config = {
        'endpoint_url': config.get('embedding', 'endpoint_url'),
        'api_key': config.get('embedding', 'api_key'),
        'model_name': config.get('embedding', 'model_name')
    }
    embedding_api_client = EmbeddingModel(**embedding_config)
    logger.info("vector.py: Embedding API 客户端初始化成功。")
except Exception:
    logger.error("vector.py: 从 config.ini 初始化 EmbeddingModel 失败。", exc_info=True)

try:
    reranker_config = {
        'url': config.get('reranker_model', 'base_url'),
        'api_key': config.get('reranker_model', 'api_key'),
        'model_name': config.get('reranker_model', 'model_name')
    }
    logger.info("vector.py: Reranker 配置加载成功。")
except Exception:
    logger.error("vector.py: 从 config.ini 初始化 Reranker 失败。", exc_info=True)

# --- 公开的业务逻辑函数 ---
CONTROL_PARAMS = {'fragment_field', 'split_type', 'size', 'overlap', 'text'}


def embedding_model(data: Dict) -> Dict:
    if embedding_api_client is None:
        return {"code": 503, "msg": "Embedding 服务未初始化", "data": None}

    fragment_fields = {f.strip() for f in data.get('fragment_field', '').split(',') if f.strip()}
    split_type = data.get('split_type', '3')
    size = data.get('size')
    overlap = data.get('overlap')
    result_data = {}

    for key, value in data.items():
        if key in CONTROL_PARAMS:
            continue
        if key in fragment_fields:
            result_data[key] = _process_single_field(value, key, is_fragment_field=True, split_type=split_type,
                                                     size=size, overlap=overlap)
            continue
        if isinstance(value, str):
            result_data[key] = _process_single_field(value, key, is_fragment_field=False, split_type=split_type,
                                                     size=size, overlap=overlap)
            continue
        if isinstance(value, list):
            result_data[key] = _process_single_field(value, key, is_fragment_field=False, split_type=split_type,
                                                     size=size, overlap=overlap)
            continue
        result_data[key] = value

    return {"code": 200, "msg": "OK", "data": result_data}


def get_embeddings_for_texts(texts: List[str]) -> List[List[float]]:
    if embedding_api_client is None:
        logger.warning("Embedding API 客户端未初始化，返回空向量。")
        return [[] for _ in texts]
    return embedding_api_client.get_embeddings(texts)


# rerank
def rerank(data: Dict) -> Dict:
    """主重排接口函数 """
    # 1. 参数校验
    if not all(k in data for k in ['query', 'documents']):
        return {"code": 400, "msg": "缺少必要参数 'query' 或 'documents'", "data": None}
    if not reranker_config.get('url'):
        return {"code": 500, "msg": "Reranker服务未配置，请检查config.ini", "data": None}

    # 2. 提取和准备数据
    query = str(data['query'])
    documents = data.get('documents', [])
    top_k = data.get('top_k', len(documents))
    # 从文档列表（可能包含字典）中提取纯文本
    candidate_texts = [doc.get("text", "") if isinstance(doc, dict) else str(doc) for doc in documents]

    try:
        # 3. 调用 Reranker API (采用已验证可行的 /score 接口和 text_1/text_2 格式)
        response = requests.post(
            f"{reranker_config['url']}/score",  # 端点从 /rerank 改为 /score
            headers={
                'Authorization': f'Bearer {reranker_config["api_key"]}',
                'Content-Type': 'application/json'
            },
            json={
                "model": reranker_config["model_name"],
                "text_1": query,  # 字段从 query 改为 text_1
                "text_2": candidate_texts  # 字段从 texts 改为 text_2
            },
            timeout=30
        )
        response.raise_for_status()  # 如果请求失败 (例如 404, 500), 则抛出异常
        api_result = response.json()

        # 4. 处理响应 (采用已验证可行的响应格式)
        # 假设响应格式为: {"data": [{"score": 0.9}, {"score": 0.1}, ...]}
        scores_data = api_result.get("data")

        if scores_data and isinstance(scores_data, list) and len(scores_data) == len(documents):
            # 将原始文档和分数合并
            response_data = []
            for i, score_item in enumerate(scores_data):
                response_data.append({
                    "id": i,
                    "document": documents[i],  # 使用原始的 document 对象
                    "score": round(score_item.get('score', 0.0), 4)
                })

            # 按分数降序排序
            sorted_response = sorted(response_data, key=lambda x: x['score'], reverse=True)

            # 返回 top_k 个结果
            return {"code": 200, "msg": "OK", "data": sorted_response[:top_k]}
        else:
            logger.warning(f"Reranker API 返回格式不正确或数据不匹配: {api_result}")
            return {"code": 500, "msg": "Reranker API 返回格式不正确", "data": None}

    except requests.exceptions.RequestException as e:
        logger.error(f"调用Reranker API失败: {e}", exc_info=True)
        # 增加对响应内容的日志记录，方便调试
        if 'response' in locals() and hasattr(response, 'text'):
            logger.error(f"Reranker服务返回内容: {response.text}")
        return {"code": 500, "msg": "调用Reranker服务失败", "data": None}
    except Exception as e:
        logger.error(f"处理Rerank请求时发生未知错误: {e}", exc_info=True)
        return {"code": 500, "msg": "处理Rerank请求失败", "data": None}


# --- 内部辅助函数 ---
def _process_single_field(value: Any, field_name: str, is_fragment_field: bool = False, split_type: str = None,
                          size: Any = None, overlap: Any = None) -> \
        Union[List, Dict, List[float]]:
    try:
        if isinstance(value, str):
            if not value.strip(): return [] if is_fragment_field else []
            # 移除 Markdown 图片和 Latex 表达式
            value = re.sub(r'!\[\]\(images/[a-zA-Z0-9_\-]+\.(jpg|jpeg|png|gif|webp)\)', '', value, flags=re.IGNORECASE)
            value = re.sub(r'(\$\$.+?\$\$|\$[^$]+?\$)', '', value, flags=re.DOTALL)

            if is_fragment_field:
                headers_to_split_on = [("#", "Header 1"), ("##", "Header 2"), ("###", "Header 3"), ("####", "Header 4")]
                header_lookup = {name: symbol for symbol, name in headers_to_split_on}
                if split_type == "1":
                    splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
                    split_docs = splitter.split_text(value)
                    if not split_docs: return []
                    final_chunks = []
                    for doc in split_docs:
                        header_lines = [f"{header_lookup[key]} {value}" for key, value in doc.metadata.items()]
                        header_string = "\n\n".join(header_lines)
                        full_content = f"{header_string}\n\n{doc.page_content}".strip()
                        final_chunks.append({"metadata": doc.metadata, "text": full_content})
                    chunks_to_embed = [chunk['text'] for chunk in final_chunks]
                    embeddings = embedding_api_client.get_embeddings(chunks_to_embed)
                    for chunk, emb in zip(final_chunks, embeddings):
                        chunk["vector"] = emb
                    return final_chunks
                elif split_type == "2":
                    try:
                        chunk_size_for_split = int(size) if size is not None else 300
                    except (ValueError, TypeError):
                        chunk_size_for_split = 300
                        logger.warning(f"无效的 'size' 参数: '{size}'. 使用默认值 300。")
                    md_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
                    header_chunks = md_splitter.split_text(value)
                    char_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size_for_split, chunk_overlap=20)
                    final_chunk_objects = []
                    for header_chunk in header_chunks:
                        header_lines = [f"{header_lookup[key]} {value}" for key, value in header_chunk.metadata.items()]
                        header_string = "\n\n".join(header_lines)
                        full_content_for_subsplitting = f"{header_string}\n\n{header_chunk.page_content}".strip()
                        sub_chunks = char_splitter.split_text(full_content_for_subsplitting)
                        for sub_chunk_content in sub_chunks:
                            final_chunk_objects.append({"metadata": header_chunk.metadata, "text": sub_chunk_content})
                    if not final_chunk_objects: return []
                    chunks_to_embed = [chunk['text'] for chunk in final_chunk_objects]
                    embeddings = embedding_api_client.get_embeddings(chunks_to_embed)
                    for chunk_obj, emb in zip(final_chunk_objects, embeddings):
                        chunk_obj["vector"] = emb
                    return final_chunk_objects
                elif split_type == "3":
                    try:
                        chunk_size_for_split = int(size) if size is not None else 300
                    except (ValueError, TypeError):
                        chunk_size_for_split = 300
                        logger.warning(f"无效的 'size' 参数: '{size}'. 使用默认值 300。")

                    def split_with_code_preservation(text, chunk_size):
                        code_blocks = re.findall(r'```.*?```', text, flags=re.DOTALL)
                        placeholders = [f"__CODE_BLOCK_{i}__" for i in range(len(code_blocks))]
                        for i, code in enumerate(code_blocks): text = text.replace(code, placeholders[i], 1)

                        def length_func(txt: str) -> int:
                            return len(re.sub(r'__CODE_BLOCK_\d+__', '', txt))

                        text_splitter = RecursiveCharacterTextSplitter(
                            chunk_size=chunk_size, chunk_overlap=0, length_function=length_func,
                            separators=["\n\n", "\n", r"(?<=。)", r"(?<=！)", r"(?<=？)", r"(?<=\.\s)", r"(?<=\;\s)",
                                        r"(?<=\,\s)"],
                            is_separator_regex=True)
                        chunks = text_splitter.split_text(text)
                        final_chunks = []
                        for chunk in chunks:
                            for i, ph in enumerate(placeholders): chunk = chunk.replace(ph, code_blocks[i])
                            final_chunks.append(chunk)
                        return final_chunks

                    md_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
                    header_chunks = md_splitter.split_text(value)
                    final_chunk_objects = []
                    for header_chunk in header_chunks:
                        header_lines = [f"{header_lookup[key]} {value}" for key, value in header_chunk.metadata.items()]
                        header_string = "\n\n".join(header_lines)
                        full_content_for_subsplitting = f"{header_string}\n\n{header_chunk.page_content}".strip()
                        sub_chunks = split_with_code_preservation(full_content_for_subsplitting, chunk_size_for_split)
                        for sub_chunk_content in sub_chunks:
                            final_chunk_objects.append({"metadata": header_chunk.metadata, "text": sub_chunk_content})
                    if not final_chunk_objects: return []
                    chunks_to_embed = [chunk['text'] for chunk in final_chunk_objects]
                    embeddings = embedding_api_client.get_embeddings(chunks_to_embed)
                    for chunk_obj, emb in zip(final_chunk_objects, embeddings):
                        chunk_obj["vector"] = emb
                    return final_chunk_objects
                elif split_type == "4":
                    try:
                        chunk_size = int(size) if size is not None else 300
                        chunk_overlap = int(overlap) if overlap is not None else 0
                    except (ValueError, TypeError):
                        logger.warning(f"无效的 'size' ({size}) 或 'overlap' ({overlap}) 参数。它们必须是有效的数字。")
                        return []
                    if chunk_size <= 0:
                        logger.warning(f"'size' 必须是正整数，但收到了: {chunk_size}")
                        return []
                    if chunk_overlap < 0:
                        logger.warning(f"'overlap' 必须是非负整数，但收到了: {chunk_overlap}")
                        return []
                    if chunk_overlap >= chunk_size:
                        logger.warning(f"'overlap' ({chunk_overlap}) 必须小于 'size' ({chunk_size})")
                        return []
                    final_chunk_objects = []
                    text_len = len(value)
                    start_index = 0
                    while start_index < text_len:
                        end_index = start_index + chunk_size
                        chunk_content = value[start_index:end_index]
                        final_chunk_objects.append({"metadata": {}, "text": chunk_content})
                        start_index += (chunk_size - chunk_overlap)
                    if not final_chunk_objects: return []
                    chunks_to_embed = [chunk['text'] for chunk in final_chunk_objects]
                    embeddings = embedding_api_client.get_embeddings(chunks_to_embed)
                    for chunk_obj, emb in zip(final_chunk_objects, embeddings):
                        chunk_obj["vector"] = emb
                    return final_chunk_objects
                else:
                    logger.warning(f"字段 '{field_name}' 被指定为分块字段，但 split_type ('{split_type}') 无效或未提供。")
                    return []
            else:
                embedding = embedding_api_client.get_embeddings(value)
                return embedding[0] if embedding and embedding[0] else []
        elif isinstance(value, list):
            return [_process_single_field(item, field_name, is_fragment_field, split_type, size, overlap) for item in
                    value]
        return value
    except Exception as e:
        logger.error(f"字段 {field_name} 处理失败: {str(e)}", exc_info=True)
        return value