import os
import re
from collections import defaultdict
from datetime import datetime

def collect_dat_files(root_dir):
    """遍历文件夹结构，收集所有DAT文件路径"""
    if not os.path.exists(root_dir):
        return None, f"根目录不存在: {root_dir}"
    
    # 存储DAT文件信息: {品牌: {型号: [文件路径列表]}}
    dat_files_info = defaultdict(lambda: defaultdict(list))
    
    # 遍历根目录
    for brand in os.listdir(root_dir):
        brand_path = os.path.join(root_dir, brand)
        if not os.path.isdir(brand_path):
            continue  # 跳过非文件夹
        
        # 遍历品牌文件夹下的型号文件夹
        for model in os.listdir(brand_path):
            model_path = os.path.join(brand_path, model)
            if not os.path.isdir(model_path):
                continue  # 跳过非文件夹
            
            # 收集型号文件夹下的所有DAT文件
            for file in os.listdir(model_path):
                if file.lower().endswith('.dat'):
                    file_path = os.path.join(model_path, file)
                    dat_files_info[brand][model].append(file_path)
    
    total_brands = len(dat_files_info)
    total_models = sum(len(models) for models in dat_files_info.values())
    total_files = sum(len(files) for models in dat_files_info.values() for files in models.values())
    
    return dat_files_info, f"成功收集DAT文件信息: {total_brands}个品牌, {total_models}个型号, {total_files}个文件"

def extract_ro_product_name(file_path):
    """从文件内容中提取ro.product.name信息"""
    try:
        # 读取整个文件内容（二进制模式）
        with open(file_path, 'rb') as f:
            content = f.read()
        
        # 尝试多种编码解码文件内容
        encodings = ['utf-8', 'gbk', 'latin-1', 'utf-16']
        decoded_content = None
        
        for encoding in encodings:
            try:
                decoded_content = content.decode(encoding)
                break  # 成功解码则退出循环
            except UnicodeDecodeError:
                continue
        
        if decoded_content is None:
            return None, "无法解码文件内容"
        
        # 构建灵活的正则表达式，支持带引号/不带引号，冒号/等号分隔
        pattern = r'ro\.product\.name["\']?\s*[:=]\s*["\']?([^\s,}]+)'
        match = re.search(pattern, decoded_content)
        
        if match:
            # 提取并清理结果（移除可能的引号）
            name = match.group(1).strip('"\'')
            return name, None
        else:
            # 尝试搜索ro.product.model作为备选
            model_pattern = r'ro\.product\.model["\']?\s*[:=]\s*["\']?([^\s,}]+)'
            model_match = re.search(model_pattern, decoded_content)
            if model_match:
                model_name = model_match.group(1).strip('"\'')
                return f"[model_fallback]{model_name}", None
            return None, "未找到ro.product.name或ro.product.model"
            
    except Exception as e:
        return None, f"文件处理错误: {str(e)}"

def parse_all_dat_files(dat_files_info):
    """解析所有DAT文件，提取ro.product.name值"""
    if not dat_files_info:
        return None, "未提供DAT文件信息"
    
    # 存储解析结果: {品牌: {型号: [{file_path: "", key: "", error: ""}, ...]}}
    parse_results = defaultdict(lambda: defaultdict(list))
    error_count = 0
    total_files = 0
    
    # 遍历所有文件
    for brand, models in dat_files_info.items():
        for model, file_paths in models.items():
            for file_path in file_paths:
                total_files += 1
                key, error = extract_ro_product_name(file_path)
                
                # 记录结果
                parse_results[brand][model].append({
                    "file_path": file_path,
                    "key": key,
                    "error": error
                })
                
                if error:
                    error_count += 1
    
    success_rate = (total_files - error_count) / total_files * 100 if total_files > 0 else 0
    message = f"DAT文件解析完成: 总文件{total_files}, 成功{total_files - error_count}, 失败{error_count}, 成功率{success_rate:.2f}%"
    
    return parse_results, message, total_files, error_count

def mark_duplicates(parse_results):
    """标记重复文件，返回保留和重复文件路径"""
    if not parse_results:
        return None, "未提供解析结果"
    
    # 存储去重结果: {品牌: {型号: {保留文件: [], 重复文件: []}}}
    dedup_results = defaultdict(lambda: defaultdict(lambda: {"keep": [], "duplicates": []}))
    total_kept = 0
    total_duplicates = 0
    
    # 遍历每个品牌和型号
    for brand, models in parse_results.items():
        for model, files in models.items():
            # 按文件路径排序，确保处理顺序一致
            sorted_files = sorted(files, key=lambda x: x["file_path"])
            
            seen_keys = {}  # 记录已出现的key及其对应的文件路径
            
            for file_info in sorted_files:
                file_path = file_info["file_path"]
                key = file_info["key"]
                
                # 跳过无key的文件（不解码或解析失败的文件）
                if not key:
                    continue
                
                # 检查key是否已出现
                if key in seen_keys:
                    # 标记为重复文件
                    dedup_results[brand][model]["duplicates"].append(file_path)
                    total_duplicates += 1
                else:
                    # 保留文件，记录key
                    dedup_results[brand][model]["keep"].append(file_path)
                    seen_keys[key] = file_path
                    total_kept += 1
    
    total_files = total_kept + total_duplicates
    duplicate_rate = total_duplicates / total_files * 100 if total_files > 0 else 0
    
    return dedup_results, total_kept, total_duplicates, total_files, duplicate_rate

def delete_duplicate_files(dedup_results):
    """删除标记的重复文件，并返回删除结果"""
    delete_stats = {
        "success": 0,
        "failed": 0,
        "failed_files": []  # 存储删除失败的文件路径和原因
    }
    
    print("\n===== 开始删除重复文件 =====")
    
    # 遍历所有品牌和型号的重复文件
    for brand, models in dedup_results.items():
        for model, file_lists in models.items():
            for file_path in file_lists["duplicates"]:
                try:
                    # 确认文件存在后删除
                    if os.path.exists(file_path):
                        os.remove(file_path)
                        delete_stats["success"] += 1
                        print(f"已删除: {file_path}")
                    else:
                        raise Exception("文件不存在")
                except Exception as e:
                    delete_stats["failed"] += 1
                    delete_stats["failed_files"].append(f"{file_path} (原因: {str(e)})")
                    print(f"删除失败: {file_path} (原因: {str(e)})")
    
    return delete_stats

def main():
    """主函数：执行完整的数据清洗流程（含重复文件删除）"""
    print("===== DAT文件去重工具  =====")
    start_time = datetime.now()
    
    # 1. 收集DAT文件
    root_dir = "data_2数据清洗（品牌,型号）"
    print(f"1. 正在收集{root_dir}下的DAT文件...")
    dat_files_info, msg = collect_dat_files(root_dir)
    if not dat_files_info:
        print(f"错误: {msg}")
        return
    print(f"   {msg}")
    
    # 2. 解析DAT文件
    print("2. 正在解析DAT文件")
    parse_results, msg, total_files, error_count = parse_all_dat_files(dat_files_info)
    if not parse_results:
        print(f"错误: {msg}")
        return
    print(f"   {msg}")
    
    # 3. 标记重复文件
    print("3. 正在标记重复文件...")
    dedup_results, total_kept, total_duplicates, total_processed, duplicate_rate = mark_duplicates(parse_results)
    if not dedup_results:
        print("错误: 无法完成重复文件标记")
        return
    
    # 4. 删除重复文件
    if total_duplicates > 0:
        delete_stats = delete_duplicate_files(dedup_results)
    else:
        delete_stats = {"success": 0, "failed": 0, "failed_files": []}
        print("\n===== 无重复文件可删除 =====")
    
    # 输出最终结果
    print("\n===== 去重结果汇总 =====")
    print(f"总处理文件数: {total_processed}")
    print(f"成功解析文件: {total_files - error_count}/{total_files}")
    print(f"保留文件数: {total_kept}")
    print(f"计划删除文件数: {total_duplicates}")
    print(f"实际删除成功: {delete_stats['success']}")
    print(f"删除失败: {delete_stats['failed']}")
    print(f"去重率: {duplicate_rate:.2f}%")
    
    # 输出删除失败的文件（如果有）
    if delete_stats["failed"] > 0:
        print("\n===== 删除失败的文件 =====")
        for failed_file in delete_stats["failed_files"]:
            print(failed_file)
    
    # 计算并输出耗时
    end_time = datetime.now()
    elapsed_time = end_time - start_time
    print(f"\n===== 操作完成 =====")
    print(f"耗时: {elapsed_time.total_seconds():.2f}秒")

if __name__ == "__main__":
    main()