#!/usr/bin/env python3

import asyncio
import gc
import hashlib
import logging
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional

import psutil
from cachetools import TTLCache

from .models import UnifiedTextProcessingManager, get_text_processing_manager
from .schemas import ActionClassification, ProcessingResult, SummaryResult

logger = logging.getLogger(__name__)

class TextProcessingService:
    """
    文本处理服务类

    协调StructBERT和PALM2.0模型的工作流程
    提供统一的文本处理接口
    包含性能优化和内存管理功能
    """

    def __init__(self):
        self.text_processing_manager = None
        self.service_start_time = time.time()
        self.initialized = False

        # 结果缓存（TTL缓存，1小时过期）
        self.result_cache = TTLCache(maxsize=1000, ttl=3600)

        # 性能统计
        self.stats = {
            'requests_processed': 0,
            'cache_hits': 0,
            'cache_misses': 0,
            'average_processing_time': 0,
            'last_gc_time': time.time(),
            'memory_peak': 0
        }

        # 并发控制
        self.processing_semaphore = asyncio.Semaphore(10)  # 最多10个并发请求
        self.active_requests = 0

        # 服务配置
        self.config = {
            'enable_parallel_processing': True,  # 启用并行处理
            'enable_result_caching': True,       # 启用结果缓存
            'max_text_length': 10000,            # 最大文本长度
            'min_text_length': 5,                # 最小文本长度
            'default_segment_length': 200,       # 默认文本片段长度
            'memory_threshold_mb': 1024,         # 内存阈值（MB）
            'gc_interval': 300,                  # 垃圾回收间隔（秒）
            'cache_enabled': True,               # 缓存开关
            'max_concurrent_requests': 10,       # 最大并发请求数
        }

    async def initialize(self) -> bool:
        """初始化文本处理服务"""
        if self.initialized:
            return True

        try:
            logger.info("正在初始化文本处理服务...")
            self.text_processing_manager = await get_text_processing_manager()

            if self.text_processing_manager and self.text_processing_manager.is_ready():
                self.initialized = True
                logger.info("文本处理服务初始化成功")
                return True
            else:
                logger.error("文本处理模型未就绪")
                return False

        except Exception as e:
            logger.error(f"文本处理服务初始化失败: {e}")
            return False

    def _get_cache_key(self, text: str, enable_classification: bool, enable_summary: bool) -> str:
        """生成缓存键"""
        content = f"{text}:{enable_classification}:{enable_summary}"
        return hashlib.md5(content.encode('utf-8')).hexdigest()

    def _check_memory_usage(self) -> Dict[str, Any]:
        """检查内存使用情况"""
        process = psutil.Process()
        memory_info = process.memory_info()
        memory_mb = memory_info.rss / 1024 / 1024

        if memory_mb > self.stats['memory_peak']:
            self.stats['memory_peak'] = memory_mb

        return {
            'memory_used_mb': memory_mb,
            'memory_percent': process.memory_percent(),
            'is_above_threshold': memory_mb > self.config['memory_threshold_mb']
        }

    def _trigger_cleanup(self):
        """触发内存清理"""
        current_time = time.time()
        if current_time - self.stats['last_gc_time'] > self.config['gc_interval']:
            logger.info("执行内存清理...")

            # 清理缓存
            if len(self.result_cache) > 500:
                self.result_cache.clear()
                logger.info("缓存已清理")

            # 触发垃圾回收
            collected = gc.collect()
            self.stats['last_gc_time'] = current_time
            logger.info(f"垃圾回收完成，回收了 {collected} 个对象")

    async def process_text(
        self,
        text: str,
        enable_classification: bool = True,
        enable_summary: bool = True,
        user_id: Optional[str] = None,
        session_id: Optional[str] = None
    ) -> ProcessingResult:
        """
        处理文本的主要方法（性能优化版本）

        Args:
            text: 输入文本
            enable_classification: 是否启用StructBERT分类
            enable_summary: 是否启用PALM2.0摘要
            user_id: 用户ID
            session_id: 会话ID

        Returns:
            ProcessingResult: 处理结果
        """
        start_time = time.time()

        # 并发控制
        async with self.processing_semaphore:
            self.active_requests += 1

            try:
                # 检查缓存
                cache_key = None
                if self.config['cache_enabled']:
                    cache_key = self._get_cache_key(text, enable_classification, enable_summary)
                    cached_result = self.result_cache.get(cache_key)
                    if cached_result:
                        self.stats['cache_hits'] += 1
                        logger.info(f"缓存命中: {cache_key[:8]}...")
                        return cached_result
                    else:
                        self.stats['cache_misses'] += 1

                # 验证输入
                if not text or not text.strip():
                    raise ValueError("输入文本不能为空")

                text_length = len(text)
                if text_length < self.config['min_text_length']:
                    raise ValueError(f"文本长度过短，最少需要{self.config['min_text_length']}个字符")

                if text_length > self.config['max_text_length']:
                    raise ValueError(f"文本长度过长，最多支持{self.config['max_text_length']}个字符")

                # 检查内存使用
                memory_status = self._check_memory_usage()
                if memory_status['is_above_threshold']:
                    logger.warning(f"内存使用过高: {memory_status['memory_used_mb']:.1f}MB")
                    self._trigger_cleanup()

                # 并行处理
                tasks = []
                classification_result = None
                summary_result = None

                if enable_classification:
                    if self.config['enable_parallel_processing'] and enable_summary:
                        # 并行执行分类和摘要
                        classification_task = asyncio.create_task(
                            self._run_classification(text)
                        )
                        tasks.append(('classification', classification_task))
                    else:
                        # 串行执行分类
                        classification_result = await self._run_classification(text)

                if enable_summary:
                    if self.config['enable_parallel_processing'] and enable_classification and classification_result is None:
                        # 并行执行（已在上面添加了分类任务）
                        summary_task = asyncio.create_task(
                            self._run_summary(text, None)  # 没有分类结果时传None
                        )
                        tasks.append(('summary', summary_task))
                    else:
                        # 串行执行摘要
                        summary_result = await self._run_summary(text, classification_result)

                # 等待并行任务完成
                if tasks:
                    results = await asyncio.gather(*[task for _, task in tasks], return_exceptions=True)
                    for i, (task_type, _) in enumerate(tasks):
                        result = results[i]
                        if isinstance(result, Exception):
                            logger.error(f"{task_type}任务失败: {result}")
                            if task_type == 'classification':
                                classification_result = None
                            else:
                                summary_result = None
                        else:
                            if task_type == 'classification':
                                classification_result = result
                            else:
                                summary_result = result

                # 如果并行处理时需要重新生成摘要（基于分类结果）
                if (enable_summary and summary_result and classification_result and
                    self.config['enable_parallel_processing']):
                    # 重新生成包含分类信息的摘要
                    enhanced_summary = await self._run_summary(text, classification_result)
                    if enhanced_summary:
                        summary_result = enhanced_summary

                # 构建结果
                processing_time = time.time() - start_time
                result = ProcessingResult(
                    text=text,
                    text_length=text_length,
                    classification=classification_result,
                    summary=summary_result,
                    processing_time=processing_time,
                    timestamp=datetime.now(),
                    user_id=user_id,
                    session_id=session_id
                )

                # 缓存结果
                if self.config['cache_enabled'] and cache_key:
                    self.result_cache[cache_key] = result

                # 更新统计
                self.stats['requests_processed'] += 1
                self.stats['average_processing_time'] = (
                    (self.stats['average_processing_time'] * (self.stats['requests_processed'] - 1) + processing_time) /
                    self.stats['requests_processed']
                )

                logger.info(f"文本处理完成，耗时: {processing_time:.2f}秒")
                return result

            except Exception as e:
                logger.error(f"文本处理失败: {e}")
                raise
            finally:
                self.active_requests -= 1

    async def _run_classification(self, text: str) -> Optional[ActionClassification]:
        """执行文本分类"""
        try:
            # 确保服务已初始化
            if not self.initialized:
                await self.initialize()

            if not self.text_processing_manager:
                logger.error("文本处理管理器未初始化")
                return None

            structbert_manager = self.text_processing_manager.get_structbert_manager()
            if structbert_manager:
                return await structbert_manager.classify_text(text)
            else:
                logger.warning("StructBERT管理器不可用")
                return None
        except Exception as e:
            logger.error(f"文本分类失败: {e}")
            return None

    async def _run_summary(
        self,
        text: str,
        classification_result: Optional[ActionClassification] = None
    ) -> Optional[SummaryResult]:
        """执行文本摘要"""
        try:
            # 确保服务已初始化
            if not self.initialized:
                await self.initialize()

            if not self.text_processing_manager:
                logger.error("文本处理管理器未初始化")
                return None

            palm2_manager = self.text_processing_manager.get_palm2_manager()
            if palm2_manager:
                return await palm2_manager.generate_summary(text, classification_result)
            else:
                logger.warning("PALM2管理器不可用")
                return None
        except Exception as e:
            logger.error(f"文本摘要失败: {e}")
            return None

    async def process_batch(
        self,
        texts: List[str],
        enable_classification: bool = True,
        enable_summary: bool = True,
        user_id: Optional[str] = None,
        session_id: Optional[str] = None,
        max_concurrent: int = 5
    ) -> List[ProcessingResult]:
        """
        批量处理文本（性能优化版本）

        Args:
            texts: 文本列表
            enable_classification: 是否启用分类
            enable_summary: 是否启用摘要
            user_id: 用户ID
            session_id: 会话ID
            max_concurrent: 最大并发数

        Returns:
            List[ProcessingResult]: 处理结果列表
        """
        if not texts:
            return []

        # 限制并发数
        max_concurrent = min(max_concurrent, self.config['max_concurrent_requests'])
        semaphore = asyncio.Semaphore(max_concurrent)

        async def process_single_text(text: str, index: int) -> ProcessingResult:
            async with semaphore:
                try:
                    return await self.process_text(
                        text=text,
                        enable_classification=enable_classification,
                        enable_summary=enable_summary,
                        user_id=user_id,
                        session_id=f"{session_id}_{index}" if session_id else None
                    )
                except Exception as e:
                    logger.error(f"批处理第{index}项失败: {e}")
                    # 返回错误结果
                    return ProcessingResult(
                        text=text,
                        text_length=len(text),
                        classification=None,
                        summary=None,
                        processing_time=0,
                        timestamp=datetime.now(),
                        user_id=user_id,
                        session_id=f"{session_id}_{index}" if session_id else None,
                        error=str(e)
                    )

        # 创建并发任务
        tasks = [
            process_single_text(text, i)
            for i, text in enumerate(texts)
        ]

        # 执行并收集结果
        results = await asyncio.gather(*tasks, return_exceptions=True)

        # 处理异常结果
        processed_results = []
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                logger.error(f"批处理任务{i}异常: {result}")
                processed_results.append(ProcessingResult(
                    text=texts[i],
                    text_length=len(texts[i]),
                    classification=None,
                    summary=None,
                    processing_time=0,
                    timestamp=datetime.now(),
                    user_id=user_id,
                    session_id=f"{session_id}_{i}" if session_id else None,
                    error=str(result)
                ))
            else:
                processed_results.append(result)

        logger.info(f"批量处理完成，共处理{len(texts)}条文本")
        return processed_results

    async def get_health_status(self) -> Dict[str, Any]:
        """
        获取服务健康状态（增强版本）

        Returns:
            Dict[str, Any]: 健康状态信息
        """
        try:
            # 检查模型状态
            structbert_status = None
            palm2_status = None

            if self.text_processing_manager:
                structbert_manager = self.text_processing_manager.get_structbert_manager()
                palm2_manager = self.text_processing_manager.get_palm2_manager()

                if structbert_manager:
                    structbert_status = await structbert_manager.get_model_status()
                if palm2_manager:
                    palm2_status = await palm2_manager.get_model_status()

            # 检查内存状态
            memory_status = self._check_memory_usage()

            # 服务运行时间
            uptime = time.time() - self.service_start_time

            # 缓存统计
            cache_stats = {
                'size': len(self.result_cache),
                'maxsize': self.result_cache.maxsize,
                'hits': self.stats['cache_hits'],
                'misses': self.stats['cache_misses'],
                'hit_rate': (self.stats['cache_hits'] /
                           max(1, self.stats['cache_hits'] + self.stats['cache_misses']) * 100)
            }

            return {
                'status': 'healthy' if self.initialized else 'unhealthy',
                'timestamp': datetime.now().isoformat(),
                'uptime_seconds': uptime,
                'models_loaded': {
                    'structbert': structbert_status is not None,
                    'palm2': palm2_status is not None,
                    'overall': self.initialized
                },
                'memory_usage': memory_status,
                'uptime': uptime,
                'models': {
                    'structbert': structbert_status,
                    'palm2': palm2_status
                },
                'statistics': self.stats,
                'cache': cache_stats,
                'active_requests': self.active_requests,
                'config': self.config
            }

        except Exception as e:
            logger.error(f"健康检查失败: {e}")
            return {
                'status': 'unhealthy',
                'error': str(e),
                'timestamp': datetime.now().isoformat(),
                'models_loaded': {
                    'structbert': False,
                    'palm2': False,
                    'overall': False
                },
                'memory_usage': self._check_memory_usage(),
                'uptime': time.time() - self.service_start_time
            }

    def clear_cache(self) -> Dict[str, Any]:
        """清理缓存"""
        old_size = len(self.result_cache)
        self.result_cache.clear()
        logger.info(f"缓存已清理，删除了{old_size}个条目")
        return {
            'cleared_items': old_size,
            'current_size': len(self.result_cache)
        }

    def update_config(self, new_config: Dict[str, Any]) -> Dict[str, Any]:
        """更新配置"""
        old_config = self.config.copy()
        self.config.update(new_config)
        logger.info(f"配置已更新: {new_config}")
        return {
            'old_config': old_config,
            'new_config': self.config
        }

    async def cleanup_resources(self):
        """清理资源"""
        logger.info("开始清理资源...")

        # 清理缓存
        self.result_cache.clear()

        # 清理模型资源
        if self.text_processing_manager:
            structbert_manager = self.text_processing_manager.get_structbert_manager()
            palm2_manager = self.text_processing_manager.get_palm2_manager()

            if structbert_manager:
                await structbert_manager.cleanup()
            if palm2_manager:
                await palm2_manager.cleanup()

        # 执行垃圾回收
        collected = gc.collect()
        logger.info(f"资源清理完成，回收了{collected}个对象")

    def get_performance_metrics(self) -> Dict[str, Any]:
        """获取性能指标"""
        memory_status = self._check_memory_usage()

        return {
            'requests_processed': self.stats['requests_processed'],
            'average_processing_time': self.stats['average_processing_time'],
            'cache_hit_rate': (self.stats['cache_hits'] /
                             max(1, self.stats['cache_hits'] + self.stats['cache_misses']) * 100),
            'memory_usage': memory_status,
            'active_requests': self.active_requests,
            'uptime': time.time() - self.service_start_time
        }
