import os
import re
import chardet
from langdetect import detect_langs

MAX_FILE_SIZE_MB = 3  # 限制目标文件大小为 6MB

def detect_encoding(file_path):
    """检测文件编码"""
    with open(file_path, 'rb') as file:
        raw_data = file.read()
        result = chardet.detect(raw_data)
        encoding = result['encoding']
        if encoding == 'EUC-TW':  # 如果检测到 EUC-TW，替换为 utf-8
            encoding = 'utf-8'
        return encoding

def detect_language(text):
    """改进语言检测，避免误判非英文字幕"""
    try:
        detected_langs = detect_langs(text)
        # 打印检测结果，帮助调试
        print(f"检测语言: {detected_langs}")

        # 只要 'en' 语言概率大于 50%，就认为是英文
        for lang in detected_langs:
            if lang.lang == 'en' and lang.prob > 0.5:
                return 'en'

        return detected_langs[0].lang  # 返回检测概率最高的语言

    except Exception as e:
        print(f"语言检测失败: {e}")
        return None
def clean_data(input_file):
    """清洗 .srt 文件，去除无意义内容，并过滤非英文字幕"""
    encoding = detect_encoding(input_file)
    if not encoding:
        encoding = 'utf-8'

    try:
        with open(input_file, "r", encoding=encoding) as file:
            content = file.read()
    except UnicodeDecodeError:
        print(f"尝试使用 {encoding} 编码读取 {input_file} 失败，改用 'latin1'")
        with open(input_file, "r", encoding='latin1') as file:
            content = file.read()

    subtitle_blocks = re.split(r"\n\s*\n", content.strip())
    cleaned_blocks = []
    full_text = ""  # 用于语言检测的完整文本

    for block in subtitle_blocks:
        lines = block.split("\n")

        if len(lines) >= 3 and re.match(r"\d+", lines[0]) and re.match(
                r"\d{2}:\d{2}:\d{2},\d{3} --> \d{2}:\d{2}:\d{2},\d{3}", lines[1]):
            text_lines = lines[2:]
            cleaned_text = "\n".join(
                [line for line in text_lines if not re.search(r"(Improved by|Resynced by|Enjoy the movie)", line, re.IGNORECASE)]
            )

            if cleaned_text.strip():
                full_text += cleaned_text + " "  # 拼接所有字幕文本，提高检测准确度
                lines[2:] = cleaned_text.split("\n")
                cleaned_blocks.append("\n".join(lines))

    # **语言检测（改进版）**
    if detect_language(full_text) != 'en':
        print(f"跳过非英文字幕文件: {input_file}")
        return ""

    return "\n\n".join(cleaned_blocks)


def get_file_size(file_path):
    """获取文件大小（MB）"""
    return os.path.getsize(file_path) / (1024 * 1024) if os.path.exists(file_path) else 0

def process_files_in_directory(directory, output_dir):
    """遍历目录下所有 .srt 文件，按照 6MB 大小限制合并清洗字幕"""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 按文件名排序，确保有序处理
    srt_files = sorted([f for f in os.listdir(directory) if f.endswith('.srt')])

    output_index = 1
    current_output_file = os.path.join(output_dir, f'subtitle_{output_index}.txt')

    for srt_file in srt_files:
        file_path = os.path.join(directory, srt_file)
        cleaned_content = clean_data(file_path)

        if not cleaned_content.strip():
            continue  # 跳过空文件或非英文字幕文件

        # 计算新文件大小（包含即将写入的内容）
        current_size = get_file_size(current_output_file)
        new_size = current_size + (len(cleaned_content.encode('utf-8')) / (1024 * 1024))

        if new_size > MAX_FILE_SIZE_MB:
            # 超过 6MB，创建新的目标文件
            output_index += 1
            current_output_file = os.path.join(output_dir, f'subtitle_{output_index}.txt')
            print(f"创建新文件：{current_output_file}")

        # 追加字幕到当前目标文件
        with open(current_output_file, 'a', encoding='utf-8') as outfile:
            outfile.write(cleaned_content + '\n')

        print(f"已处理 {srt_file}，写入 {current_output_file}")

    print("字幕文件清洗并合并完成。")

output_directory = r'D:\subtitle\bring_goods\txt3M'  # 目标输出目录

for i in range(2, 61):
    directory_to_clean = rf'D:\subtitle\bring_goods\{i}'  # 源字幕目录

    # 调用函数
    process_files_in_directory(directory_to_clean, output_directory)
