# utils.py - 实用工具函数
"""
CSV数据集处理的实用工具函数集合
提供文件操作、数据验证、统计等辅助功能
"""

import os
import json
import hashlib
import mimetypes
from typing import List, Dict, Any, Optional, Tuple
from pathlib import Path
import pandas as pd


def get_file_hash(file_path: str, algorithm: str = "md5") -> str:
    """计算文件的哈希值"""
    hash_func = hashlib.new(algorithm)
    with open(file_path, 'rb') as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_func.update(chunk)
    return hash_func.hexdigest()


def analyze_csv_structure(csv_path: str, delimiter: str = ",", max_rows: int = 1000) -> Dict[str, Any]:
    """分析CSV文件结构和内容统计"""
    try:
        # 读取CSV文件的前几行进行分析
        df = pd.read_csv(csv_path, delimiter=delimiter, nrows=max_rows)
        
        analysis = {
            "file_path": csv_path,
            "file_size_mb": os.path.getsize(csv_path) / 1024 / 1024,
            "total_rows_sample": len(df),
            "total_columns": len(df.columns),
            "column_names": list(df.columns),
            "column_types": {},
            "missing_values": {},
            "sample_values": {},
            "estimated_total_rows": None
        }
        
        # 分析每一列
        for col in df.columns:
            analysis["column_types"][col] = str(df[col].dtype)
            analysis["missing_values"][col] = int(df[col].isnull().sum())
            
            # 获取非空样本值
            non_null_values = df[col].dropna()
            if len(non_null_values) > 0:
                sample_size = min(3, len(non_null_values))
                analysis["sample_values"][col] = list(non_null_values.head(sample_size).astype(str))
            else:
                analysis["sample_values"][col] = []
        
        # 估算总行数（基于文件大小）
        if len(df) > 0:
            avg_bytes_per_row = os.path.getsize(csv_path) / len(df)
            analysis["estimated_total_rows"] = int(os.path.getsize(csv_path) / avg_bytes_per_row)
        
        return analysis
        
    except Exception as e:
        return {
            "file_path": csv_path,
            "error": str(e),
            "file_size_mb": os.path.getsize(csv_path) / 1024 / 1024 if os.path.exists(csv_path) else 0
        }


def validate_csv_files(csv_files: List[str], delimiter: str = ",") -> Dict[str, Any]:
    """验证多个CSV文件的一致性"""
    if not csv_files:
        return {"valid": False, "error": "没有提供CSV文件"}
    
    validation_result = {
        "valid": True,
        "total_files": len(csv_files),
        "valid_files": 0,
        "invalid_files": 0,
        "common_columns": None,
        "column_mismatches": [],
        "file_analyses": [],
        "total_estimated_rows": 0,
        "total_size_mb": 0
    }
    
    all_columns_sets = []
    
    for csv_file in csv_files:
        analysis = analyze_csv_structure(csv_file, delimiter=delimiter)
        validation_result["file_analyses"].append(analysis)
        validation_result["total_size_mb"] += analysis.get("file_size_mb", 0)
        
        if "error" in analysis:
            validation_result["invalid_files"] += 1
            validation_result["valid"] = False
        else:
            validation_result["valid_files"] += 1
            validation_result["total_estimated_rows"] += analysis.get("estimated_total_rows", 0)
            all_columns_sets.append(set(analysis["column_names"]))
    
    # 检查列名一致性
    if all_columns_sets:
        common_columns = set.intersection(*all_columns_sets)
        validation_result["common_columns"] = sorted(list(common_columns))
        
        # 找出列名不匹配的文件
        for i, (csv_file, columns_set) in enumerate(zip(csv_files, all_columns_sets)):
            if columns_set != common_columns:
                missing = common_columns - columns_set
                extra = columns_set - common_columns
                validation_result["column_mismatches"].append({
                    "file": csv_file,
                    "missing_columns": sorted(list(missing)),
                    "extra_columns": sorted(list(extra))
                })
        
        if validation_result["column_mismatches"]:
            validation_result["valid"] = False
    
    return validation_result


def create_processing_report(csv_files: List[str], processing_stats: Dict[str, Any]) -> str:
    """创建处理报告"""
    report = []
    report.append("=" * 80)
    report.append("CSV 数据集处理报告")
    report.append("=" * 80)
    report.append(f"处理时间: {processing_stats.get('timestamp', 'N/A')}")
    report.append("")
    
    # 文件信息
    report.append("📁 文件信息:")
    report.append(f"   处理文件数: {len(csv_files)}")
    total_size = sum(os.path.getsize(f) / 1024 / 1024 for f in csv_files if os.path.exists(f))
    report.append(f"   总文件大小: {total_size:.2f} MB")
    report.append("")
    
    for i, csv_file in enumerate(csv_files, 1):
        file_size = os.path.getsize(csv_file) / 1024 / 1024 if os.path.exists(csv_file) else 0
        report.append(f"   {i}. {os.path.basename(csv_file)} ({file_size:.2f} MB)")
    report.append("")
    
    # 处理统计
    if processing_stats:
        report.append("📊 处理统计:")
        report.append(f"   原始记录数: {processing_stats.get('original_records', 'N/A')}")
        report.append(f"   最终记录数: {processing_stats.get('final_records', 'N/A')}")
        report.append(f"   处理成功率: {processing_stats.get('success_rate', 'N/A')}%")
        report.append(f"   图片处理: {processing_stats.get('images_processed', 'N/A')} 张")
        report.append(f"   数据类型转换: {processing_stats.get('type_conversions', 'N/A')} 个字段")
        report.append("")
    
    # 数据集信息
    if 'dataset_info' in processing_stats:
        dataset_info = processing_stats['dataset_info']
        report.append("🗃️ 数据集信息:")
        report.append(f"   列数: {len(dataset_info.get('columns', []))}")
        report.append(f"   列名: {', '.join(dataset_info.get('columns', []))}")
        report.append("")
    
    # 错误信息
    if processing_stats.get('errors'):
        report.append("⚠️ 处理错误:")
        for error in processing_stats['errors']:
            report.append(f"   - {error}")
        report.append("")
    
    # 性能信息
    if processing_stats.get('performance'):
        perf = processing_stats['performance']
        report.append("⏱️ 性能信息:")
        report.append(f"   总处理时间: {perf.get('total_time', 'N/A')} 秒")
        report.append(f"   平均每条记录: {perf.get('time_per_record', 'N/A')} 毫秒")
        report.append(f"   数据加载时间: {perf.get('load_time', 'N/A')} 秒")
        report.append(f"   数据处理时间: {perf.get('process_time', 'N/A')} 秒")
        report.append(f"   上传时间: {perf.get('upload_time', 'N/A')} 秒")
        report.append("")
    
    report.append("=" * 80)
    return "\n".join(report)


def save_report(report: str, output_path: str):
    """保存报告到文件"""
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, 'w', encoding='utf-8') as f:
        f.write(report)


def backup_config(config_data: Dict[str, Any], backup_dir: str = "backups") -> str:
    """备份配置文件"""
    os.makedirs(backup_dir, exist_ok=True)
    
    from datetime import datetime
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    backup_path = os.path.join(backup_dir, f"config_backup_{timestamp}.json")
    
    with open(backup_path, 'w', encoding='utf-8') as f:
        json.dump(config_data, f, indent=2, ensure_ascii=False)
    
    return backup_path


def detect_csv_delimiter(file_path: str, sample_lines: int = 5) -> str:
    """自动检测CSV文件的分隔符"""
    possible_delimiters = [',', '\t', ';', '|', ':']
    delimiter_scores = {}
    
    with open(file_path, 'r', encoding='utf-8') as f:
        lines = [f.readline().strip() for _ in range(sample_lines) if f.readline()]
    
    if not lines:
        return ','  # 默认返回逗号
    
    for delimiter in possible_delimiters:
        scores = []
        for line in lines:
            if line:
                scores.append(line.count(delimiter))
        
        # 计算方差，好的分隔符应该在每行中出现次数相对一致
        if scores and max(scores) > 0:
            avg_score = sum(scores) / len(scores)
            variance = sum((s - avg_score) ** 2 for s in scores) / len(scores)
            delimiter_scores[delimiter] = (avg_score, -variance)  # 负方差，方差小的优先
    
    if delimiter_scores:
        # 选择平均出现次数最多且方差最小的分隔符
        best_delimiter = max(delimiter_scores.keys(), 
                           key=lambda d: delimiter_scores[d])
        return best_delimiter
    
    return ','  # 默认返回逗号


def check_image_column_format(csv_path: str, image_column: str = "image", 
                             delimiter: str = ",", sample_size: int = 10) -> Dict[str, Any]:
    """检查图片列的格式"""
    try:
        df = pd.read_csv(csv_path, delimiter=delimiter, nrows=sample_size)
        
        if image_column not in df.columns:
            return {
                "valid": False,
                "error": f"未找到图片列 '{image_column}'",
                "available_columns": list(df.columns)
            }
        
        image_data = df[image_column].dropna()
        
        if len(image_data) == 0:
            return {
                "valid": False,
                "error": "图片列中没有有效数据"
            }
        
        analysis = {
            "valid": True,
            "total_images": len(image_data),
            "has_data_prefix": 0,
            "pure_base64": 0,
            "invalid_format": 0,
            "avg_length": 0,
            "sample_prefixes": []
        }
        
        lengths = []
        prefixes = set()
        
        for img_str in image_data.head(sample_size):
            img_str = str(img_str).strip()
            lengths.append(len(img_str))
            
            if img_str.startswith("data:"):
                analysis["has_data_prefix"] += 1
                prefix = img_str.split(',')[0]
                prefixes.add(prefix)
            elif len(img_str) > 50 and img_str.replace('+', '').replace('/', '').replace('=', '').isalnum():
                analysis["pure_base64"] += 1
            else:
                analysis["invalid_format"] += 1
        
        analysis["avg_length"] = sum(lengths) / len(lengths) if lengths else 0
        analysis["sample_prefixes"] = list(prefixes)[:5]  # 最多显示5个前缀样本
        
        return analysis
        
    except Exception as e:
        return {
            "valid": False,
            "error": f"分析图片列失败: {str(e)}"
        }


def estimate_processing_time(total_records: int, avg_record_size: float = 1.0) -> Dict[str, float]:
    """估算处理时间"""
    # 基于经验值的估算（每秒处理记录数）
    base_processing_rate = 100  # 每秒处理100条记录
    image_processing_rate = 20   # 每秒处理20张图片
    upload_rate = 50            # 每秒上传50条记录
    
    # 调整处理速度（基于记录大小）
    size_factor = min(2.0, avg_record_size / 10.0)  # 记录越大，处理越慢
    
    estimates = {
        "data_loading": total_records / (base_processing_rate * 2),  # 加载比较快
        "image_processing": total_records / (image_processing_rate / size_factor),
        "type_conversion": total_records / (base_processing_rate * 1.5),
        "schema_casting": total_records / (base_processing_rate * 3),
        "upload": total_records / (upload_rate / size_factor),
        "total": 0
    }
    
    estimates["total"] = sum(estimates.values()) - estimates["total"]  # 排除total自身
    
    return estimates


def format_time_duration(seconds: float) -> str:
    """格式化时间长度为可读字符串"""
    if seconds < 60:
        return f"{seconds:.1f} 秒"
    elif seconds < 3600:
        minutes = int(seconds // 60)
        secs = seconds % 60
        return f"{minutes} 分 {secs:.0f} 秒"
    else:
        hours = int(seconds // 3600)
        minutes = int((seconds % 3600) // 60)
        return f"{hours} 小时 {minutes} 分"


def create_dataset_preview(dataset, num_samples: int = 3) -> Dict[str, Any]:
    """创建数据集预览信息"""
    if len(dataset) == 0:
        return {"empty": True}
    
    preview = {
        "total_records": len(dataset),
        "columns": list(dataset.column_names),
        "sample_records": [],
        "column_stats": {}
    }
    
    # 获取样本记录
    sample_size = min(num_samples, len(dataset))
    for i in range(sample_size):
        record = dataset[i]
        sample_record = {}
        
        for col, value in record.items():
            if col == "image" and isinstance(value, dict) and "bytes" in value:
                # 对于图片列，只显示字节长度
                sample_record[col] = f"<Image bytes: {len(value['bytes'])}>"
            elif isinstance(value, str) and len(value) > 100:
                # 对于长字符串，截断显示
                sample_record[col] = value[:100] + "..."
            else:
                sample_record[col] = value
        
        preview["sample_records"].append(sample_record)
    
    # 计算列统计
    for col in dataset.column_names:
        if col == "image":
            continue  # 跳过图片列
        
        try:
            values = [dataset[i][col] for i in range(min(100, len(dataset)))]  # 采样前100条
            non_null_values = [v for v in values if v is not None and v != ""]
            
            preview["column_stats"][col] = {
                "non_null_count": len(non_null_values),
                "null_count": len(values) - len(non_null_values),
                "unique_count": len(set(str(v) for v in non_null_values)) if non_null_values else 0
            }
            
            # 对于数值列，添加统计信息
            if non_null_values and all(isinstance(v, (int, float)) for v in non_null_values):
                preview["column_stats"][col].update({
                    "min": min(non_null_values),
                    "max": max(non_null_values),
                    "avg": sum(non_null_values) / len(non_null_values)
                })
        except:
            preview["column_stats"][col] = {"error": "无法计算统计信息"}
    
    return preview


def cleanup_temp_files(temp_dir: str = "temp", max_age_hours: int = 24):
    """清理临时文件"""
    if not os.path.exists(temp_dir):
        return
    
    import time
    current_time = time.time()
    max_age_seconds = max_age_hours * 3600
    
    cleaned_files = []
    
    for root, dirs, files in os.walk(temp_dir):
        for file in files:
            file_path = os.path.join(root, file)
            try:
                file_age = current_time - os.path.getmtime(file_path)
                if file_age > max_age_seconds:
                    os.remove(file_path)
                    cleaned_files.append(file_path)
            except Exception:
                continue  # 忽略无法处理的文件
    
    return cleaned_files