#!/usr/bin/env python3

"""
文本处理工具函数
"""

import hashlib
import json
import re
from datetime import datetime
from typing import Any, Dict, List, Optional


def generate_request_id(text: str, user_id: Optional[str] = None) -> str:
    """
    生成请求唯一标识

    Args:
        text: 输入文本
        user_id: 用户ID

    Returns:
        str: 唯一标识
    """
    content = f"{text[:100]}{user_id or ''}{datetime.now().isoformat()}"
    return hashlib.md5(content.encode()).hexdigest()[:16]

def sanitize_text(text: str) -> str:
    """
    清理和标准化文本

    Args:
        text: 输入文本

    Returns:
        str: 清理后的文本
    """
    # 移除多余空白字符
    text = re.sub(r'\s+', ' ', text.strip())

    # 标准化标点符号
    text = text.replace('，', ',').replace('。', '.').replace('！', '!').replace('？', '?')

    # 移除特殊字符（保留中英文、数字、常用标点）
    text = re.sub(r'[^\u4e00-\u9fff\w\s.,!?;:\-()（）【】]', '', text)

    return text.strip()

def split_text_by_sentences(text: str, max_length: int = 200) -> List[str]:
    """
    按句子分割文本

    Args:
        text: 输入文本
        max_length: 最大长度

    Returns:
        List[str]: 分割后的文本列表
    """
    # 按句号分割
    sentences = re.split(r'[.!?。！？]', text)

    segments = []
    current_segment = ""

    for sentence in sentences:
        sentence = sentence.strip()
        if not sentence:
            continue

        if len(current_segment + sentence) <= max_length:
            current_segment += sentence + "。"
        else:
            if current_segment:
                segments.append(current_segment.strip())
            current_segment = sentence + "。"

    if current_segment:
        segments.append(current_segment.strip())

    return segments

def calculate_text_similarity(text1: str, text2: str) -> float:
    """
    计算两个文本的相似度（简单版本）

    Args:
        text1: 文本1
        text2: 文本2

    Returns:
        float: 相似度分数 (0-1)
    """
    # 简单的字符级别相似度计算
    set1 = set(text1.lower())
    set2 = set(text2.lower())

    intersection = len(set1.intersection(set2))
    union = len(set1.union(set2))

    return intersection / union if union > 0 else 0.0

def extract_keywords(text: str, max_keywords: int = 10) -> List[str]:
    """
    提取文本关键词（简单版本）

    Args:
        text: 输入文本
        max_keywords: 最大关键词数量

    Returns:
        List[str]: 关键词列表
    """
    # 停用词（简化版本）
    stop_words = {
        '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一',
        '个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没',
        '看', '好', '自己', '这', '那', '它', '他', '她', '们', '我们', '你们'
    }

    # 简单的词频统计
    words = re.findall(r'[\u4e00-\u9fff]+', text)
    word_freq = {}

    for word in words:
        if len(word) >= 2 and word not in stop_words:
            word_freq[word] = word_freq.get(word, 0) + 1

    # 按频率排序
    keywords = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)

    return [word for word, freq in keywords[:max_keywords]]

def format_processing_result(result: Dict[str, Any]) -> str:
    """
    格式化处理结果为可读文本

    Args:
        result: 处理结果字典

    Returns:
        str: 格式化后的文本
    """
    lines = []

    # 添加摘要
    if 'summary' in result and result['summary']:
        lines.append("【摘要】")
        lines.append(result['summary']['summary'])
        lines.append("")

    # 添加分类结果
    if 'classifications' in result and result['classifications']:
        lines.append("【分类结果】")
        for i, classification in enumerate(result['classifications'], 1):
            lines.append(f"{i}. {classification['label']} (置信度: {classification['confidence']:.2f})")
            lines.append(f"   内容: {classification['text_segment'][:100]}...")
        lines.append("")

    # 添加关键点
    if 'summary' in result and result['summary'] and 'key_points' in result['summary']:
        key_points = result['summary']['key_points']
        if key_points:
            lines.append("【关键要点】")
            for i, point in enumerate(key_points, 1):
                lines.append(f"{i}. {point}")
            lines.append("")

    # 添加处理信息
    if 'processing_time' in result:
        lines.append(f"【处理信息】")
        lines.append(f"处理时间: {result['processing_time']:.2f}秒")
        if 'model_versions' in result:
            lines.append(f"使用模型: {result['model_versions']}")

    return "\n".join(lines)

def validate_model_output(output: Any, expected_type: str) -> bool:
    """
    验证模型输出格式

    Args:
        output: 模型输出
        expected_type: 期望类型 ('classification' 或 'summary')

    Returns:
        bool: 是否有效
    """
    if expected_type == 'classification':
        if isinstance(output, list):
            for item in output:
                if not all(key in item for key in ['label', 'confidence', 'text_segment']):
                    return False
            return True
        return False

    elif expected_type == 'summary':
        if isinstance(output, dict):
            return all(key in output for key in ['summary', 'key_points', 'word_count'])
        return False

    return False

def create_error_response(error_code: str, message: str, request_id: str = "") -> Dict[str, Any]:
    """
    创建标准错误响应

    Args:
        error_code: 错误代码
        message: 错误消息
        request_id: 请求ID

    Returns:
        Dict: 错误响应
    """
    return {
        "success": False,
        "error_code": error_code,
        "message": message,
        "request_id": request_id,
        "timestamp": datetime.now().isoformat()
    }

def log_processing_metrics(
    text_length: int,
    processing_time: float,
    classification_count: int,
    summary_length: int,
    user_id: Optional[str] = None
) -> Dict[str, Any]:
    """
    记录处理指标

    Args:
        text_length: 文本长度
        processing_time: 处理时间
        classification_count: 分类结果数量
        summary_length: 摘要长度
        user_id: 用户ID

    Returns:
        Dict: 指标数据
    """
    metrics = {
        "timestamp": datetime.now().isoformat(),
        "user_id": user_id,
        "text_length": text_length,
        "processing_time": processing_time,
        "classification_count": classification_count,
        "summary_length": summary_length,
        "processing_speed": text_length / processing_time if processing_time > 0 else 0
    }

    return metrics
