#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
JSON到CSV转换工具
合并工作区内的3个JSON翻译文件为一个CSV文件
列为: key, cn, en, tr
"""

import json
import csv
import os
import glob
from typing import Dict, Any, Optional, List, Tuple

def load_json_file(file_path: str) -> Optional[Dict[str, Any]]:
    """
    安全地加载JSON文件
    """
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    except FileNotFoundError:
        print(f"警告: 文件 {file_path} 不存在")
        return None
    except json.JSONDecodeError as e:
        print(f"错误: 文件 {file_path} 不是有效的JSON格式: {e}")
        return None
    except Exception as e:
        print(f"错误: 读取文件 {file_path} 时发生错误: {e}")
        return None

def scan_json_files(directory: str = ".") -> Dict[str, List[str]]:
    """
    扫描工作区内的JSON文件，根据前5个字母识别语种
    返回格式: {"zh_CN": ["zh_CN.json", "zh_CN_admin.json"], "en_US": ["en_US.json"], ...}
    """
    json_files = glob.glob(os.path.join(directory, "*.json"))
    language_files = {}
    
    for file_path in json_files:
        filename = os.path.basename(file_path)
        
        # 跳过输出文件
        if filename.startswith("merged_"):
            continue
            
        # 根据前5个字符识别语种
        if len(filename) >= 5:
            lang_prefix = filename[:5]
            if lang_prefix not in language_files:
                language_files[lang_prefix] = []
            language_files[lang_prefix].append(filename)
    
    return language_files

def merge_language_files(file_list: List[str]) -> Dict[str, Any]:
    """
    合并同一语种的多个JSON文件
    """
    merged_data = {}
    
    for filename in file_list:
        print(f"  正在加载 {filename}...")
        data = load_json_file(filename)
        if data:
            # 检查键冲突
            conflicts = set(merged_data.keys()) & set(data.keys())
            if conflicts:
                print(f"    警告: 发现 {len(conflicts)} 个重复键，将使用 {filename} 中的值覆盖")
            
            merged_data.update(data)
            print(f"    已加载 {len(data)} 个键，当前总计 {len(merged_data)} 个键")
    
    return merged_data

def merge_json_to_csv(output_file: str = "merged_translations.csv"):
    """
    扫描工作区内的JSON文件并合并为CSV文件
    """
    print("正在扫描工作区内的JSON文件...")
    language_files = scan_json_files()
    
    if not language_files:
        print("错误: 未找到任何JSON文件")
        return
    
    print(f"发现以下语种文件:")
    for lang, files in language_files.items():
        print(f"  {lang}: {files}")
    
    # 加载和合并各语种的文件
    language_data = {}
    
    for lang_prefix, file_list in language_files.items():
        print(f"\n正在处理 {lang_prefix} 语种文件...")
        merged_data = merge_language_files(file_list)
        if merged_data:
            language_data[lang_prefix] = merged_data
            print(f"  {lang_prefix} 语种共有 {len(merged_data)} 个翻译键")
    
    if not language_data:
        print("错误: 没有成功加载任何语种数据")
        return
    
    # 收集所有唯一的键
    all_keys = set()
    for data in language_data.values():
        all_keys.update(data.keys())
    
    # 按键名排序
    sorted_keys = sorted(all_keys)
    
    # 确定CSV列的顺序（中文、英文、土耳其语等）
    column_order = []
    column_headers = ['key']
    
    # 优先显示常见语种
    priority_langs = ['zh_CN', 'en_US', 'tr_TR']
    for lang in priority_langs:
        if lang in language_data:
            column_order.append(lang)
            # 简化列名
            if lang == 'zh_CN':
                column_headers.append('cn')
            elif lang == 'en_US':
                column_headers.append('en')
            elif lang == 'tr_TR':
                column_headers.append('tr')
            else:
                column_headers.append(lang)
    
    # 添加其他语种
    for lang in sorted(language_data.keys()):
        if lang not in priority_langs:
            column_order.append(lang)
            column_headers.append(lang)
    
    # 创建CSV文件
    print(f"\n正在创建CSV文件 {output_file}...")
    print(f"列顺序: {column_headers}")
    
    with open(output_file, 'w', newline='', encoding='utf-8') as csvfile:
        writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
        
        # 写入表头
        writer.writerow(column_headers)
        
        # 写入数据行
        rows_written = 0
        for key in sorted_keys:
            row = [key]
            
            for lang in column_order:
                value = language_data[lang].get(key, '')
                
                # 处理换行符问题：将换行符替换为空格并去除首尾空格
                if value:
                    value = value.replace('\r\n', ' ').replace('\n', ' ').replace('\r', ' ').strip()
                
                row.append(value)
            
            writer.writerow(row)
            rows_written += 1
    
    print(f"CSV文件创建完成！共写入 {rows_written} 行数据")
    
    # 统计翻译完整性
    print(f"\n翻译完整性统计:")
    print(f"- 总键数: {len(sorted_keys)}")
    
    for lang in column_order:
        lang_name = column_headers[column_order.index(lang) + 1]  # +1 因为第一列是key
        count = len([k for k in sorted_keys if language_data[lang].get(k)])
        print(f"- {lang_name} 翻译: {count}")
    
    # 计算完整翻译（所有语种都有）
    complete_translations = 0
    for key in sorted_keys:
        if all(language_data[lang].get(key) for lang in column_order):
            complete_translations += 1
    
    print(f"- 完整翻译（所有语种都有）: {complete_translations}")

if __name__ == "__main__":
    # 执行合并
    merge_json_to_csv()