from typing import Dict, Any, List, Tuple, Optional
from pathlib import Path
from datetime import datetime, timedelta
import json
import tempfile
import shutil
import zipfile

from config import config
from utils import LogManager, file_manager, metrics_collector
from cloud_client import cloud_client

class SampleManager:
    """样本管理器"""
    
    def __init__(self):
        self.logger = LogManager.get_logger("SampleManager")
        
        # 目录路径 - 边侧只维护一个活跃数据集
        self.dataset_dir = Path(config.dataset.datasets_dir)
        self.images_dir = self.dataset_dir / 'images'
        self.labels_dir = self.dataset_dir / 'labels'
        self.confidence_dir = self.dataset_dir / config.dataset.confidence_dir
        
        # 样本上传目录
        self.sample_upload_dir = Path("sample_uploads")
        # 注意：新策略下不再使用 collected_file 跟踪状态
        
        # 确保目录存在
        for dir_path in [self.dataset_dir, self.images_dir, self.labels_dir, 
                        self.confidence_dir, self.sample_upload_dir]:
            dir_path.mkdir(parents=True, exist_ok=True)
    
    def get_sample_statistics(self) -> Dict[str, Any]:
        """获取样本统计信息 - 基于datasets/images目录的简化管理"""
        try:
            # 获取temp目录中未推理的样本
            temp_dir = Path(config.dataset.temp_dir)
            uninferred_samples = self._get_temp_samples()
            
            # 获取datasets/images中的已推理样本（未归集样本）
            uncollected_samples = self._get_images_samples()
            
            # 按难度阈值统计未归集的样本
            difficulty_stats = self._analyze_sample_difficulty_from_files(
                [self.confidence_dir / f"{name}.txt" for name in uncollected_samples if (self.confidence_dir / f"{name}.txt").exists()]
            )
            
            return {
                "total_inferred_samples": len(uncollected_samples),  # 等于未归集样本数
                "collected_samples": 0,  # 归集后即清空，始终为0
                "uncollected_samples": len(uncollected_samples),    # 基于images目录
                "uninferred_samples": len(uninferred_samples),
                "difficulty_distribution": difficulty_stats,
                "task_name": config.edge_node.service_type,
                "timestamp": datetime.now().isoformat(),
                "dataset_active": True  # 边侧只有一个活跃数据集
            }
            
        except Exception as e:
            self.logger.error(f"获取样本统计失败: {str(e)}")
            return {}
    
    def _get_temp_samples(self) -> set:
        """获取temp目录中的样本名称集合"""
        temp_dir = Path(config.dataset.temp_dir)
        if not temp_dir.exists():
            return set()
        
        sample_names = set()
        for ext in ['*.jpg', '*.jpeg', '*.png', '*.bmp']:
            for file_path in temp_dir.glob(ext):
                sample_names.add(file_path.stem)
        
        return sample_names
    
    def select_samples_for_aggregation(self, 
                                     difficulty_threshold: float = None,
                                     time_range: Tuple[datetime, datetime] = None,
                                     max_samples: int = None) -> List[str]:
        """根据条件选择需要聚合的样本"""
        try:
            # 获取所有可用的样本（已推理但未归集）
            available_samples = self._get_available_samples()
            
            if not available_samples:
                self.logger.info("没有可用的样本进行聚合")
                return []
            
            # 应用筛选条件
            filtered_samples = self._apply_filters(
                available_samples, difficulty_threshold, time_range, max_samples
            )
            
            self.logger.info(f"选择了 {len(filtered_samples)} 个样本用于聚合")
            return filtered_samples
            
        except Exception as e:
            self.logger.error(f"选择样本失败: {str(e)}")
            return []
    
    def _get_available_samples(self) -> List[str]:
        """获取可用的（已推理但未归集的）样本列表 - 基于images目录"""
        return self._get_images_samples()
    
    def _get_images_samples(self) -> List[str]:
        """获取datasets/images目录中的样本名称列表"""
        try:
            if not self.images_dir.exists():
                return []
            
            sample_names = []
            for ext in ['*.jpg', '*.jpeg', '*.png', '*.bmp']:
                for file_path in self.images_dir.glob(ext):
                    sample_names.append(file_path.stem)
            
            return sample_names
        except Exception as e:
            self.logger.error(f"获取images目录样本失败: {str(e)}")
            return []
    
    def _apply_filters(self, samples: List[str], 
                      difficulty_threshold: float = None,
                      time_range: Tuple[datetime, datetime] = None,
                      max_samples: int = None) -> List[str]:
        """应用筛选条件"""
        filtered_samples = samples.copy()
        
        # 难度阈值筛选
        if difficulty_threshold is not None:
            filtered_samples = self._filter_by_difficulty(filtered_samples, difficulty_threshold)
        
        # 时间范围筛选 - 基于推理时间而非文件时间
        if time_range is not None:
            filtered_samples = self._filter_by_inference_time(filtered_samples, time_range)
        
        # 最大样本数限制（优先选择困难样本）
        if max_samples is not None and len(filtered_samples) > max_samples:
            filtered_samples = self._select_top_samples(filtered_samples, max_samples)
        
        return filtered_samples
    
    def _filter_by_difficulty(self, samples: List[str], threshold: float) -> List[str]:
        """按难度阈值筛选样本"""
        filtered = []
        for sample_name in samples:
            conf_file = self.confidence_dir / f"{sample_name}.txt"
            try:
                confidence = self._read_confidence_from_file(conf_file)
                # 置信度低于阈值的为难样本
                if confidence <= threshold:
                    filtered.append(sample_name)
            except Exception as e:
                self.logger.warning(f"读取置信度失败: {conf_file}")
                continue
        return filtered
    
    def _filter_by_inference_time(self, samples: List[str], time_range: Tuple[datetime, datetime]) -> List[str]:
        """按推理时间范围筛选样本"""
        start_time, end_time = time_range
        filtered = []
        
        for sample_name in samples:
            conf_file = self.confidence_dir / f"{sample_name}.txt"
            try:
                inference_time = self._read_inference_time_from_file(conf_file)
                if inference_time and start_time <= inference_time <= end_time:
                    filtered.append(sample_name)
            except Exception as e:
                self.logger.warning(f"读取推理时间失败: {conf_file}")
                continue
        
        return filtered
    
    def _read_confidence_from_file(self, conf_file: Path) -> float:
        """从置信度文件读取置信度值"""
        try:
            with open(conf_file, 'r') as f:
                data = f.read().strip()
                
                # 尝试解析JSON格式（新格式）
                try:
                    conf_data = json.loads(data)
                    return float(conf_data.get("confidence", 0.0))
                except json.JSONDecodeError:
                    # 兼容旧格式（纯数字）
                    return float(data)
                    
        except Exception as e:
            self.logger.warning(f"读取置信度文件失败: {conf_file}")
            return 0.0
    
    def _read_inference_time_from_file(self, conf_file: Path) -> Optional[datetime]:
        """从置信度文件读取推理时间"""
        try:
            with open(conf_file, 'r') as f:
                data = f.read().strip()
                
                # 尝试解析JSON格式
                try:
                    conf_data = json.loads(data)
                    time_str = conf_data.get("inference_time")
                    if time_str:
                        return datetime.fromisoformat(time_str.replace('Z', '+00:00'))
                except json.JSONDecodeError:
                    # 旧格式没有时间信息，使用文件修改时间
                    return datetime.fromtimestamp(conf_file.stat().st_mtime)
                    
        except Exception as e:
            self.logger.warning(f"读取推理时间失败: {conf_file}")
            return None
    
    def _select_top_samples(self, samples: List[str], max_count: int) -> List[str]:
        """选择置信度最低的样本（最困难的样本）"""
        samples_with_conf = []
        for sample_name in samples:
            conf_file = self.confidence_dir / f"{sample_name}.txt"
            try:
                confidence = self._read_confidence_from_file(conf_file)
                samples_with_conf.append((sample_name, confidence))
            except:
                samples_with_conf.append((sample_name, 1.0))  # 默认高置信度
        
        # 按置信度升序排序（困难样本优先）
        samples_with_conf.sort(key=lambda x: x[1])
        return [name for name, _ in samples_with_conf[:max_count]]
    
    def package_and_upload_samples(self, sample_names: List[str]) -> Tuple[bool, Dict]:
        """打包并上传样本数据 - 新策略：上传后清空datasets"""
        if not sample_names:
            return True, {"message": "没有样本需要上传"}
        
        try:
            # 打包样本
            zip_path = self._package_samples(sample_names)
            if not zip_path:
                return False, {"error": "打包样本失败"}
            
            self.logger.info(f"样本已打包到: {zip_path}")
            
            # 将压缩包放置在sample_uploads目录，等待云端获取
            # 不立即上传，而是让云端来获取
            self._clear_datasets_after_upload(sample_names)
            
            return True, {
                "message": f"成功打包 {len(sample_names)} 个样本，等待云端获取",
                "samples": sample_names,
                "package_path": str(zip_path),
                "package_name": zip_path.name
            }
                
        except Exception as e:
            self.logger.error(f"打包样本失败: {str(e)}")
            return False, {"error": str(e)}
    
    def _clear_datasets_after_upload(self, sample_names: List[str]):
        """上传后清空datasets目录下的对应文件"""
        try:
            cleared_count = 0
            
            for sample_name in sample_names:
                # 清理图片文件
                image_file = self._find_image_file(sample_name)
                if image_file and image_file.exists():
                    image_file.unlink()
                    cleared_count += 1
                
                # 清理标注文件
                label_file = self.labels_dir / f"{sample_name}.txt"
                if label_file.exists():
                    label_file.unlink()
                
                # 清理置信度文件
                conf_file = self.confidence_dir / f"{sample_name}.txt"
                if conf_file.exists():
                    conf_file.unlink()
            
            self.logger.info(f"已清空datasets目录中的 {cleared_count} 个样本文件")
            
        except Exception as e:
            self.logger.error(f"清空datasets目录失败: {str(e)}")
    
    def _package_samples(self, sample_names: List[str]) -> Optional[Path]:
        """打包样本数据为ZIP文件 - 使用时间戳和服务类型命名"""
        try:
            # 创建ZIP文件 - 使用时间戳和running_service命名
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            service_type = config.edge_node.service_type.replace("（", "_").replace("）", "_").replace(" ", "_")
            zip_filename = f"{timestamp}_{service_type}_{len(sample_names)}_samples.zip"
            zip_path = self.sample_upload_dir / zip_filename
            
            with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
                copied_count = 0
                
                for sample_name in sample_names:
                    try:
                        # 添加图片文件
                        image_file = self._find_image_file(sample_name)
                        if image_file:
                            zipf.write(image_file, f"images/{image_file.name}")
                        else:
                            self.logger.warning(f"图片文件不存在: {sample_name}")
                            continue
                        
                        # 添加标注文件
                        label_file = self.labels_dir / f"{sample_name}.txt"
                        if label_file.exists():
                            zipf.write(label_file, f"labels/{sample_name}.txt")
                        
                        # 添加置信度文件
                        conf_file = self.confidence_dir / f"{sample_name}.txt"
                        if conf_file.exists():
                            zipf.write(conf_file, f"confidence/{sample_name}.txt")
                        
                        copied_count += 1
                        
                    except Exception as e:
                        self.logger.error(f"打包样本失败: {sample_name}, {str(e)}")
                        continue
            
            if copied_count == 0:
                self.logger.error("没有成功打包任何样本文件")
                zip_path.unlink(missing_ok=True)
                return None
            
            self.logger.info(f"成功打包 {copied_count} 个样本到: {zip_path}")
            return zip_path
            
        except Exception as e:
            self.logger.error(f"打包样本失败: {str(e)}")
            return None
    
    def _find_image_file(self, sample_name: str) -> Optional[Path]:
        """查找样本对应的图片文件"""
        for ext in ['.jpg', '.jpeg', '.png', '.bmp']:
            image_file = self.images_dir / f"{sample_name}{ext}"
            if image_file.exists():
                return image_file
        return None
    
        # 注意：新策略下不再需要跟踪已归集状态，因为上传后直接清空datasets目录
    
    def _analyze_sample_difficulty_from_files(self, confidence_files: List[Path]) -> Dict[str, int]:
        """从置信度文件分析样本难度分布"""
        difficulty_counts = {
            "very_easy": 0,    # > 0.9
            "easy": 0,         # 0.7 - 0.9
            "medium": 0,       # 0.5 - 0.7
            "hard": 0,         # 0.3 - 0.5
            "very_hard": 0     # < 0.3
        }
        
        thresholds = config.aggregation.difficulty_thresholds
        
        for conf_file in confidence_files:
            if not conf_file.exists():
                continue
                
            try:
                confidence = self._read_confidence_from_file(conf_file)
                
                if confidence >= thresholds["very_easy"]:
                    difficulty_counts["very_easy"] += 1
                elif confidence >= thresholds["easy"]:
                    difficulty_counts["easy"] += 1
                elif confidence >= thresholds["medium"]:
                    difficulty_counts["medium"] += 1
                elif confidence >= thresholds["hard"]:
                    difficulty_counts["hard"] += 1
                else:
                    difficulty_counts["very_hard"] += 1
                    
            except Exception as e:
                self.logger.warning(f"分析样本难度失败: {conf_file}")
                continue
        
        return difficulty_counts
    
    def cleanup_old_uploads(self, days: int = 7):
        """清理旧的上传文件"""
        try:
            cutoff_time = datetime.now() - timedelta(days=days)
            
            for file_path in self.sample_upload_dir.glob("*.zip"):
                try:
                    file_time = datetime.fromtimestamp(file_path.stat().st_mtime)
                    if file_time < cutoff_time:
                        file_path.unlink()
                        self.logger.debug(f"清理旧上传文件: {file_path}")
                except Exception as e:
                    self.logger.warning(f"清理文件失败: {file_path}, {str(e)}")
                    
        except Exception as e:
            self.logger.debug(f"清理旧上传文件失败: {str(e)}")

# 全局实例
sample_manager = SampleManager() 