# -*- coding: utf-8 -*-
"""
Embedding工具函数
"""

import re
import numpy as np
from typing import List, Tuple, Dict, Any
from sklearn.metrics.pairwise import cosine_similarity


def preprocess_text(text: str) -> str:
    """文本预处理"""
    if not text:
        return ""
    
    # 去除多余空白字符
    text = re.sub(r'\s+', ' ', text.strip())
    
    # 去除特殊字符（保留中文、英文、数字、基本标点）
    text = re.sub(r'[^\u4e00-\u9fff\w\s.,!?;:()（）]', '', text)
    
    return text


def calculate_cosine_similarity(vector1: List[float], vector2: List[float]) -> float:
    """计算两个向量的余弦相似度"""
    if not vector1 or not vector2:
        return 0.0
    
    if len(vector1) != len(vector2):
        raise ValueError("向量维度不匹配")
    
    # 转换为numpy数组
    v1 = np.array(vector1)
    v2 = np.array(vector2)
    
    # 计算余弦相似度
    similarity = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
    
    return float(similarity)


def calculate_euclidean_distance(vector1: List[float], vector2: List[float]) -> float:
    """计算两个向量的欧几里得距离"""
    if not vector1 or not vector2:
        return float('inf')
    
    if len(vector1) != len(vector2):
        raise ValueError("向量维度不匹配")
    
    # 转换为numpy数组
    v1 = np.array(vector1)
    v2 = np.array(vector2)
    
    # 计算欧几里得距离
    distance = np.linalg.norm(v1 - v2)
    
    return float(distance)


def batch_cosine_similarity(query_vector: List[float], vectors: List[List[float]]) -> List[float]:
    """批量计算余弦相似度"""
    if not query_vector or not vectors:
        return []
    
    # 转换为numpy数组
    query = np.array(query_vector).reshape(1, -1)
    vectors_array = np.array(vectors)
    
    # 批量计算余弦相似度
    similarities = cosine_similarity(query, vectors_array)[0]
    
    return similarities.tolist()


def normalize_vector(vector: List[float]) -> List[float]:
    """向量归一化"""
    if not vector:
        return []
    
    v = np.array(vector)
    norm = np.linalg.norm(v)
    
    if norm == 0:
        return [0.0] * len(vector)
    
    normalized = v / norm
    return normalized.tolist()


def find_most_similar(vector: List[float], candidates: List[List[float]], 
                     top_k: int = 5) -> List[Tuple[int, float]]:
    """找到最相似的向量"""
    if not vector or not candidates:
        return []
    
    similarities = batch_cosine_similarity(vector, candidates)
    
    # 创建索引和相似度的配对
    indexed_similarities = list(enumerate(similarities))
    
    # 按相似度排序，取top-k
    indexed_similarities.sort(key=lambda x: x[1], reverse=True)
    
    return indexed_similarities[:top_k]


def chunk_text(text: str, chunk_size: int = 1000, overlap: int = 100) -> List[str]:
    """将长文本分块"""
    if not text:
        return []
    
    if len(text) <= chunk_size:
        return [text]
    
    chunks = []
    start = 0
    
    while start < len(text):
        end = start + chunk_size
        
        # 如果不是最后一块，尝试在句号处分割
        if end < len(text):
            # 寻找最近的句号
            last_period = text.rfind('。', start, end)
            if last_period > start + chunk_size // 2:  # 确保不会太短
                end = last_period + 1
        
        chunk = text[start:end]
        chunks.append(chunk)
        
        # 计算下一块的起始位置（考虑重叠）
        start = end - overlap
        if start >= len(text):
            break
    
    return chunks


def merge_chunk_embeddings(embeddings: List[List[float]], 
                          method: str = "mean") -> List[float]:
    """合并分块embedding"""
    if not embeddings:
        return []
    
    if len(embeddings) == 1:
        return embeddings[0]
    
    embeddings_array = np.array(embeddings)
    
    if method == "mean":
        # 平均池化
        merged = np.mean(embeddings_array, axis=0)
    elif method == "max":
        # 最大池化
        merged = np.max(embeddings_array, axis=0)
    elif method == "weighted":
        # 加权平均（可以根据chunk长度或其他指标加权）
        weights = np.ones(len(embeddings))
        merged = np.average(embeddings_array, axis=0, weights=weights)
    else:
        raise ValueError(f"不支持的合并方法: {method}")
    
    return merged.tolist()


def validate_vector(vector: List[float]) -> bool:
    """验证向量是否有效"""
    if not vector:
        return False
    
    # 检查是否包含NaN或无穷大值
    if any(not np.isfinite(x) for x in vector):
        return False
    
    # 检查向量是否全为零
    if all(x == 0 for x in vector):
        return False
    
    return True


def get_vector_statistics(vectors: List[List[float]]) -> Dict[str, Any]:
    """获取向量集合的统计信息"""
    if not vectors:
        return {
            "count": 0,
            "dimension": 0,
            "mean_norm": 0.0,
            "std_norm": 0.0,
            "min_norm": 0.0,
            "max_norm": 0.0
        }
    
    vectors_array = np.array(vectors)
    
    # 计算每个向量的范数
    norms = np.linalg.norm(vectors_array, axis=1)
    
    return {
        "count": len(vectors),
        "dimension": vectors_array.shape[1],
        "mean_norm": float(np.mean(norms)),
        "std_norm": float(np.std(norms)),
        "min_norm": float(np.min(norms)),
        "max_norm": float(np.max(norms))
    }
