# demo.py (for Datasets v3.x)
"""
CSV数据集处理和上传工具 - 增强版
支持单个文件或目录中的多个CSV文件处理
包含结构分析、一致性验证、时间估算等实用功能
"""

import base64
import os
import glob
import time
from datetime import datetime
from typing import List, Dict, Any
from datasets import load_dataset, Features, Value, Image, concatenate_datasets

# 导入配置
from config import (
    CSV_PATH, RECURSIVE_SEARCH, CSV_DELIMITER, CSV_ENCODING, AUTO_DETECT_DELIMITER,
    IMAGE_COLUMN, INTEGER_COLUMNS, FLOAT_COLUMNS, STRING_COLUMNS,
    HUB_REPO, MAX_SHARD_SIZE, PRIVATE_REPO,
    VERBOSE, SHOW_FIRST_IMAGE, SHOW_PROGRESS,
    DATASET_FEATURES, STRIP_DATA_PREFIX, ERROR_HANDLING,
    ANALYZE_CSV_STRUCTURE, VALIDATE_CSV_CONSISTENCY, GENERATE_REPORT,
    REPORT_OUTPUT_PATH, BACKUP_CONFIG, BACKUP_DIR,
    SHOW_DATASET_PREVIEW, PREVIEW_SAMPLE_SIZE,
    ESTIMATE_PROCESSING_TIME, AUTO_CLEANUP_TEMP, TEMP_FILE_MAX_AGE_HOURS, TEMP_DIR,
    NETWORK_MAX_RETRIES, CONNECTION_TIMEOUT, PRE_UPLOAD_NETWORK_CHECK,
    validate_config, get_config_summary
)

# 导入实用工具
from utils import (
    get_file_hash, analyze_csv_structure, validate_csv_files,
    create_processing_report, save_report, backup_config,
    detect_csv_delimiter, check_image_column_format,
    estimate_processing_time as estimate_time, format_time_duration,
    create_dataset_preview, cleanup_temp_files
)

# 导入网络工具
from simple_network_utils import (
    upload_with_network_check, test_basic_connectivity,
    get_simple_recommendations, create_simple_network_test
)

def log(message: str, force: bool = False):
    """日志输出函数"""
    if VERBOSE or force:
        print(message)

def get_csv_files(path: str, recursive: bool = False) -> List[str]:
    """获取 CSV 文件路径列表，支持单个文件或目录"""
    if os.path.isfile(path):
        # 如果是单个文件，验证是否为 CSV 文件
        if not path.lower().endswith('.csv'):
            raise ValueError(f"文件 {path} 不是 CSV 文件")
        return [path]
    elif os.path.isdir(path):
        # 如果是目录，获取所有 CSV 文件
        if recursive:
            # 递归搜索
            csv_files = glob.glob(os.path.join(path, "**", "*.csv"), recursive=True)
        else:
            # 只搜索当前目录
            csv_files = glob.glob(os.path.join(path, "*.csv"))
        
        if not csv_files:
            search_type = "递归" if recursive else "当前目录"
            raise ValueError(f"在 {path} 的{search_type}中没有找到 CSV 文件")
        return sorted(csv_files)  # 排序保证处理顺序一致
    else:
        raise ValueError(f"路径 {path} 不存在或不是有效的文件/目录")

def get_file_info(file_path: str) -> Dict[str, Any]:
    """获取文件信息"""
    stat = os.stat(file_path)
    return {
        'path': file_path,
        'size_mb': stat.st_size / 1024 / 1024,
        'name': os.path.basename(file_path),
        'hash': get_file_hash(file_path) if os.path.getsize(file_path) < 100 * 1024 * 1024 else "large_file"  # 大于100MB不计算hash
    }

def load_csv_datasets(csv_files: List[str], delimiter: str = ",") -> Any:
    """加载多个 CSV 文件并合并为一个数据集"""
    datasets = []
    failed_files = []
    
    for i, csv_file in enumerate(csv_files, 1):
        file_info = get_file_info(csv_file)
        log(f"[{i}/{len(csv_files)}] 正在加载: {file_info['name']} ({file_info['size_mb']:.2f} MB)")
        
        try:
            # 根据分隔符参数加载 CSV
            load_params = {
                "data_files": {"train": csv_file}, 
                "split": "train"
            }
            if delimiter != ",":
                load_params["delimiter"] = delimiter
            
            ds = load_dataset("csv", **load_params)
            datasets.append(ds)
            log(f"    ✓ 成功加载 {len(ds)} 条记录")
            
        except Exception as e:
            error_msg = f"加载文件 {csv_file} 失败: {e}"
            if ERROR_HANDLING == "strict":
                raise ValueError(error_msg)
            elif ERROR_HANDLING == "warn":
                log(f"    ⚠ 警告: {error_msg}")
            failed_files.append((csv_file, str(e)))
            continue
    
    if not datasets:
        error_msg = "没有成功加载任何 CSV 文件。失败详情:\n"
        for file, error in failed_files:
            error_msg += f"  - {file}: {error}\n"
        raise ValueError(error_msg)
    
    if failed_files:
        log(f"注意: {len(failed_files)} 个文件加载失败", force=True)
    
    if len(datasets) == 1:
        return datasets[0]
    else:
        # 合并多个数据集
        log(f"正在合并 {len(datasets)} 个数据集...")
        try:
            merged_ds = concatenate_datasets(datasets)
            log(f"✓ 合并完成，总共 {len(merged_ds)} 条记录")
            return merged_ds
        except Exception as e:
            error_msg = f"数据集合并失败，可能是因为列结构不一致: {e}"
            if ERROR_HANDLING == "strict":
                raise ValueError(error_msg)
            else:
                log(f"⚠ 警告: {error_msg}")
                log("将使用第一个数据集继续处理")
                return datasets[0]

def b64_to_bytes(ex: Dict[str, Any]) -> Dict[str, Any]:
    """将base64图片数据转换为字节"""
    s = ex.get(IMAGE_COLUMN) or ""
    if STRIP_DATA_PREFIX and s.startswith("data:"):
        b64 = s.split(",", 1)[1]
    else:
        b64 = s
    
    try:
        ex[IMAGE_COLUMN] = {"bytes": base64.b64decode(b64)}
    except Exception as e:
        if ERROR_HANDLING == "strict":
            raise ValueError(f"base64解码失败: {e}")
        else:
            # 如果解码失败，设置为空字节
            ex[IMAGE_COLUMN] = {"bytes": b""}
    
    return ex

def to_types(ex: Dict[str, Any]) -> Dict[str, Any]:
    """数据类型转换"""
    def _to_int(x):
        if x in (None, "", "null", "NULL"): 
            return None
        try: 
            return int(x)
        except:
            try: 
                return int(float(x))
            except: 
                return None

    def _to_float(x):
        if x in (None, "", "null", "NULL"): 
            return None
        try: 
            return float(x)
        except: 
            return None

    # 转换整数列
    for col in INTEGER_COLUMNS:
        if col in ex:
            ex[col] = _to_int(ex.get(col))
    
    # 转换浮点数列
    for col in FLOAT_COLUMNS:
        if col in ex:
            ex[col] = _to_float(ex.get(col))
    
    # 字符串列保持不变（已经是字符串）
    return ex

def create_features_schema() -> Features:
    """创建数据集特征模式"""
    features = {}
    
    for col_name, col_type in DATASET_FEATURES.items():
        if col_type == "image":
            features[col_name] = Image()
        elif col_type == "int64":
            features[col_name] = Value("int64")
        elif col_type == "float64":
            features[col_name] = Value("float64")
        elif col_type == "string":
            features[col_name] = Value("string")
        else:
            features[col_name] = Value(col_type)
    
    return Features(features)

def main():
    """主函数"""
    start_time = time.time()
    processing_stats = {
        'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        'start_time': start_time,
        'errors': [],
        'performance': {}
    }
    
    print("=" * 60)
    print("CSV 数据集处理和上传工具 - 增强版")
    print("=" * 60)
    
    # 清理临时文件
    if AUTO_CLEANUP_TEMP and os.path.exists(TEMP_DIR):
        try:
            cleaned = cleanup_temp_files(TEMP_DIR, TEMP_FILE_MAX_AGE_HOURS)
            if cleaned:
                log(f"🧹 清理了 {len(cleaned)} 个临时文件")
        except Exception as e:
            log(f"⚠ 清理临时文件失败: {e}")
    
    # 验证配置
    config_errors, config_warnings = validate_config()
    if config_errors:
        print("❌ 配置验证失败:")
        for error in config_errors:
            print(f"   - {error}")
        return 1
    
    if config_warnings:
        log("⚠️ 配置警告:")
        for warning in config_warnings:
            log(f"   - {warning}")
    
    log("✓ 配置验证通过")
    
    # 备份配置
    if BACKUP_CONFIG:
        try:
            config_data = get_config_summary()
            backup_path = backup_config(config_data, BACKUP_DIR)
            log(f"✓ 配置已备份到: {backup_path}")
        except Exception as e:
            log(f"⚠ 配置备份失败: {e}")
    
    # 获取 CSV 文件列表
    try:
        log("\n📁 正在搜索 CSV 文件...")
        csv_files = get_csv_files(CSV_PATH, recursive=RECURSIVE_SEARCH)
        
        log(f"✓ 在路径 '{CSV_PATH}' 中找到 {len(csv_files)} 个 CSV 文件:")
        total_size = 0
        for i, file in enumerate(csv_files, 1):
            file_info = get_file_info(file)
            total_size += file_info['size_mb']
            log(f"   {i}. {file_info['name']} ({file_info['size_mb']:.2f} MB)")
        
        log(f"   总大小: {total_size:.2f} MB")
        processing_stats['total_files'] = len(csv_files)
        processing_stats['total_size_mb'] = total_size
        
    except Exception as e:
        print(f"❌ 获取 CSV 文件失败: {e}")
        processing_stats['errors'].append(f"获取CSV文件失败: {e}")
        return 1
    
    # 分析CSV文件结构
    if ANALYZE_CSV_STRUCTURE:
        log(f"\n🔍 正在分析 CSV 文件结构...")
        for csv_file in csv_files[:3]:  # 只分析前3个文件以节省时间
            try:
                delimiter = detect_csv_delimiter(csv_file) if AUTO_DETECT_DELIMITER else CSV_DELIMITER
                analysis = analyze_csv_structure(csv_file, delimiter=delimiter)
                if 'error' not in analysis:
                    log(f"   📊 {os.path.basename(csv_file)}:")
                    log(f"      - 列数: {analysis['total_columns']}")
                    log(f"      - 估计行数: {analysis.get('estimated_total_rows', 'N/A')}")
                    log(f"      - 列名: {', '.join(analysis['column_names'][:5])}{'...' if len(analysis['column_names']) > 5 else ''}")
                else:
                    log(f"   ⚠ 分析失败 {os.path.basename(csv_file)}: {analysis['error']}")
            except Exception as e:
                log(f"   ⚠ 分析失败 {os.path.basename(csv_file)}: {e}")
    
    # 验证CSV文件一致性
    if VALIDATE_CSV_CONSISTENCY and len(csv_files) > 1:
        log(f"\n🔧 正在验证 CSV 文件一致性...")
        try:
            delimiter = detect_csv_delimiter(csv_files[0]) if AUTO_DETECT_DELIMITER else CSV_DELIMITER
            validation = validate_csv_files(csv_files, delimiter=delimiter)
            
            if validation['valid']:
                log(f"   ✓ 所有文件结构一致")
                log(f"   ✓ 共同列数: {len(validation['common_columns'])}")
            else:
                log(f"   ⚠ 发现 {len(validation['column_mismatches'])} 个文件结构不一致")
                for mismatch in validation['column_mismatches'][:3]:  # 只显示前3个
                    log(f"      - {os.path.basename(mismatch['file'])}")
        except Exception as e:
            log(f"   ⚠ 一致性验证失败: {e}")
    
    # 检查图片列格式
    if csv_files:
        log(f"\n🖼️  正在检查图片列格式...")
        try:
            delimiter = detect_csv_delimiter(csv_files[0]) if AUTO_DETECT_DELIMITER else CSV_DELIMITER
            img_check = check_image_column_format(csv_files[0], IMAGE_COLUMN, delimiter)
            
            if img_check['valid']:
                log(f"   ✓ 图片列格式检查通过")
                log(f"   - 带前缀数据: {img_check['has_data_prefix']} 条")
                log(f"   - 纯base64数据: {img_check['pure_base64']} 条")
                log(f"   - 平均长度: {img_check['avg_length']:.0f} 字符")
            else:
                log(f"   ⚠ 图片列检查失败: {img_check['error']}")
        except Exception as e:
            log(f"   ⚠ 图片列检查失败: {e}")
    
    # 估算处理时间
    if ESTIMATE_PROCESSING_TIME:
        log(f"\n⏱️  正在估算处理时间...")
        try:
            total_estimated_records = sum(
                analyze_csv_structure(f, CSV_DELIMITER).get('estimated_total_rows', 100) 
                for f in csv_files
            )
            time_estimates = estimate_time(total_estimated_records, total_size / len(csv_files))
            
            log(f"   预计总记录数: {total_estimated_records}")
            log(f"   预计处理时间: {format_time_duration(time_estimates['total'])}")
            log(f"   - 数据加载: {format_time_duration(time_estimates['data_loading'])}")
            log(f"   - 图片处理: {format_time_duration(time_estimates['image_processing'])}")
            log(f"   - 数据上传: {format_time_duration(time_estimates['upload'])}")
        except Exception as e:
            log(f"   ⚠ 时间估算失败: {e}")
    
    # 确定最终的CSV分隔符
    final_delimiter = CSV_DELIMITER
    if AUTO_DETECT_DELIMITER and csv_files:
        try:
            detected_delimiter = detect_csv_delimiter(csv_files[0])
            if detected_delimiter != CSV_DELIMITER:
                log(f"\n🔍 自动检测到分隔符: {repr(detected_delimiter)} (配置中为: {repr(CSV_DELIMITER)})")
                final_delimiter = detected_delimiter
        except Exception as e:
            log(f"⚠ 分隔符自动检测失败，使用配置值: {e}")
    
    # 加载数据集
    load_start_time = time.time()
    try:
        log(f"\n📊 正在加载数据集... (分隔符: {repr(final_delimiter)})")
        ds = load_csv_datasets(csv_files, delimiter=final_delimiter)
        processing_stats['original_records'] = len(ds)
    except Exception as e:
        print(f"❌ 加载数据集失败: {e}")
        processing_stats['errors'].append(f"加载数据集失败: {e}")
        return 1
    
    processing_stats['performance']['load_time'] = time.time() - load_start_time
    
    # 处理图片数据
    process_start_time = time.time()
    try:
        log(f"\n🖼️  正在处理图片数据 (列: {IMAGE_COLUMN})...")
        if SHOW_PROGRESS:
            ds = ds.map(b64_to_bytes, desc="处理图片")
        else:
            ds = ds.map(b64_to_bytes)
        log("✓ 图片数据处理完成")
        processing_stats['images_processed'] = len(ds)
    except Exception as e:
        print(f"❌ 图片数据处理失败: {e}")
        processing_stats['errors'].append(f"图片数据处理失败: {e}")
        return 1

    # 数据类型清洗
    try:
        log(f"\n🔧 正在清洗数据类型...")
        if SHOW_PROGRESS:
            ds = ds.map(to_types, desc="类型转换")
        else:
            ds = ds.map(to_types)
        log("✓ 数据类型清洗完成")
        processing_stats['type_conversions'] = len(INTEGER_COLUMNS) + len(FLOAT_COLUMNS)
    except Exception as e:
        print(f"❌ 数据类型清洗失败: {e}")
        processing_stats['errors'].append(f"数据类型清洗失败: {e}")
        return 1

    # 设置数据集模式
    try:
        log(f"\n📋 正在设置数据集模式...")
        features = create_features_schema()
        ds = ds.cast(features)
        log("✓ 数据集模式设置完成")
    except Exception as e:
        print(f"❌ 数据集模式设置失败: {e}")
        processing_stats['errors'].append(f"数据集模式设置失败: {e}")
        return 1
    
    processing_stats['performance']['process_time'] = time.time() - process_start_time
    processing_stats['final_records'] = len(ds)
    processing_stats['success_rate'] = (processing_stats['final_records'] / processing_stats['original_records']) * 100
    processing_stats['dataset_info'] = {
        'columns': list(ds.column_names),
        'features': str(ds.features)
    }

    # 显示处理结果
    log(f"\n✅ 数据集处理完成!")
    log(f"   总记录数: {len(ds)}")
    log(f"   数据集列: {list(ds.column_names)}")
    log(f"   处理成功率: {processing_stats['success_rate']:.1f}%")
    
    # 显示数据集预览
    if SHOW_DATASET_PREVIEW and len(ds) > 0:
        log(f"\n📋 数据集预览:")
        try:
            preview = create_dataset_preview(ds, PREVIEW_SAMPLE_SIZE)
            log(f"   总记录数: {preview['total_records']}")
            log(f"   列统计:")
            for col, stats in preview['column_stats'].items():
                if 'error' not in stats:
                    log(f"      {col}: {stats['non_null_count']} 非空, {stats['unique_count']} 唯一值")
        except Exception as e:
            log(f"⚠ 无法生成预览: {e}")
    
    if SHOW_FIRST_IMAGE and len(ds) > 0:
        log(f"\n🔍 第一条记录的图片信息:")
        try:
            first_image = ds[0][IMAGE_COLUMN]
            if isinstance(first_image, dict) and 'bytes' in first_image:
                log(f"   图片字节长度: {len(first_image['bytes'])}")
            else:
                print(first_image)
        except Exception as e:
            log(f"⚠ 无法显示图片信息: {e}")

    # 上传到 Hugging Face Hub
    upload_start_time = time.time()
    try:
        log(f"\n🚀 准备上传到 Hugging Face Hub: {HUB_REPO}")
        log(f"   最大分片大小: {MAX_SHARD_SIZE}")
        log(f"   私有仓库: {PRIVATE_REPO}")
        
        # 使用增强的上传功能，包含网络重试机制
        upload_with_network_check(
            dataset=ds,
            repo_id=HUB_REPO,
            max_shard_size=MAX_SHARD_SIZE,
            private=PRIVATE_REPO,
            skip_network_check=not PRE_UPLOAD_NETWORK_CHECK
        )
        
        processing_stats['performance']['upload_time'] = time.time() - upload_start_time
        
        print(f"\n🎉 上传完成! 数据集已发布到:")
        print(f"   https://huggingface.co/datasets/{HUB_REPO}")
        
    except Exception as e:
        processing_stats['performance']['upload_time'] = time.time() - upload_start_time
        print(f"\n❌ 上传到 Hub 失败: {e}")
        processing_stats['errors'].append(f"上传失败: {e}")
        
        # 提供更详细的错误诊断
        log(f"\n🔍 进行错误诊断...")
        try:
            connectivity = test_basic_connectivity()
            recommendations = get_simple_recommendations(connectivity)
            
            log(f"网络诊断结果:")
            for rec in recommendations:
                log(f"   {rec}")
                
        except Exception as diag_error:
            log(f"⚠ 网络诊断失败: {diag_error}")
        
        # 创建网络测试脚本
        try:
            create_simple_network_test()
        except Exception:
            pass
        
        log(f"\n💡 常见解决方案:")
        log(f"1. 运行 python simple_network_test.py 测试网络")
        log(f"2. 重新登录: huggingface-cli login")
        log(f"3. 检查仓库权限和名称格式")
        log(f"4. 如果使用代理，尝试关闭或更换代理")
        log(f"5. 减小分片大小: MAX_SHARD_SIZE = '50MB'")
        log(f"6. 稍后在网络较好时重试")
    
    # 计算总处理时间和性能统计
    total_time = time.time() - start_time
    processing_stats['performance']['total_time'] = total_time
    
    if processing_stats['final_records'] > 0:
        processing_stats['performance']['time_per_record'] = (total_time / processing_stats['final_records']) * 1000  # 毫秒

    log(f"\n⏱️  性能统计:")
    log(f"   总处理时间: {format_time_duration(total_time)}")
    if processing_stats['final_records'] > 0:
        log(f"   平均每条记录: {processing_stats['performance']['time_per_record']:.2f} 毫秒")
    
    # 生成处理报告
    if GENERATE_REPORT:
        try:
            log(f"\n📄 正在生成处理报告...")
            report = create_processing_report(csv_files, processing_stats)
            save_report(report, REPORT_OUTPUT_PATH)
            log(f"✓ 报告已保存到: {REPORT_OUTPUT_PATH}")
        except Exception as e:
            log(f"⚠ 报告生成失败: {e}")
    
    # 返回状态码
    return 1 if processing_stats['errors'] else 0

if __name__ == "__main__":
    exit_code = main()
    exit(exit_code)