"""电影应用任务模块

定义了使用Celery处理电影相关异步任务的函数。
"""

import json

from celery import chain, group, shared_task
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.forms import ValidationError

from movies.services import parse_csv, parse_json


def split_csv_file(file_path: str, chunk_size_mb: int = 1) -> list[str]:
    """
    将CSV文件分割成多个块

    Args:
        file_path (str): 文件路径
        chunk_size_mb (int): 每个块的大小（MB）

    Returns:
        list[str]: 分割后的文件块路径列表
    """
    chunk_paths = []
    part = 1
    current_chunk_size = 0
    chunk_lines = []

    with default_storage.open(file_path, "r") as file:
        for index, line in enumerate(file):
            if index == 0:
                header = line
            line_size = len(line.encode("utf-8"))
            if current_chunk_size + line_size > chunk_size_mb * 1024 * 1024:
                chunk_file_name = f"{file_path}_part_{part}.csv"
                default_storage.save(
                    chunk_file_name, ContentFile(header + "".join(chunk_lines))
                )
                chunk_paths.append(chunk_file_name)
                chunk_lines = [line]
                current_chunk_size = line_size
                part += 1
            else:
                chunk_lines.append(line)
                current_chunk_size += line_size

        if chunk_lines:  # 保存最后一个块（如果有的话）
            chunk_file_name = f"{file_path}_part_{part}.csv"
            default_storage.save(
                chunk_file_name, ContentFile(header + "".join(chunk_lines))
            )
            chunk_paths.append(chunk_file_name)

            return chunk_paths


def split_json_file(file_path: str, chunk_size_mb: int = 100) -> list[str]:
    """
    将JSON文件分割成多个块

    Args:
        file_path (str): 文件路径
        chunk_size_mb (int): 每个块的大小（MB）

    Returns:
        list[str]: 分割后的文件块路径列表
    """
    chunk_paths = []
    part = 1
    current_chunk_size = 0
    chunk_objects = []

    with default_storage.open(file_path, "r") as file:
        objects = json.load(file)

        for obj in objects:
            obj_str = json.dumps(obj)
            obj_size = len(obj_str.encode("utf-8"))
            if current_chunk_size + obj_size > chunk_size_mb * 1024 * 1024:
                chunk_file_name = f"{file_path}_part_{part}.json"
                default_storage.save(
                    chunk_file_name, ContentFile(json.dumps(chunk_objects))
                )
                chunk_paths.append(chunk_file_name)
                chunk_objects = [obj]
                current_chunk_size = obj_size
                part += 1
            else:
                chunk_objects.append(obj)
                current_chunk_size += obj_size

            if chunk_objects:  # 保存最后一个块（如果有的话）
                chunk_file_name = f"{file_path}_part_{part}.json"
                default_storage.save(
                    chunk_file_name, ContentFile(json.dumps(chunk_objects))
                )
                chunk_paths.append(chunk_file_name)

    return chunk_paths


@shared_task
def split_file_task(file_name: str, file_type: str) -> list[str]:
    """
    分割文件任务

    Args:
        file_name (str): 文件名
        file_type (str): 文件类型

    Returns:
        list[str]: 分割后的文件块路径列表

    Raises:
        ValidationError: 当文件类型无效时抛出异常
    """
    if file_type == "text/csv":
        result = split_csv_file(file_name)
    elif file_type == "application/json":
        result = split_json_file(file_name)
    else:
        raise ValidationError("Invalid file type")

    return result


@shared_task
def process_chunk(chunk_path: str, file_type: str) -> int:
    """
    处理单个文件块

    Args:
        chunk_path (str): 文件块路径
        file_type (str): 文件类型

    Returns:
        int: 处理的电影数量

    Raises:
        ValidationError: 当文件类型无效时抛出异常
    """
    with default_storage.open(chunk_path, "r") as file:
        if file_type == "text/csv":
            result = parse_csv(file)
        elif file_type == "application/json":
            result = parse_json(file)
        else:
            raise ValidationError("Invalid file type")
    return result


@shared_task
def process_chunks(chunk_paths: list, file_type: str) -> int:
    """
    处理所有文件块的任务。在文件分割完成后调用。

    Args:
        chunk_paths (list): 文件块路径列表
        file_type (str): 文件类型

    Returns:
        int: 异步任务组的结果
    """
    # 创建一个任务组来处理每个块
    task_group = group(
        process_chunk.s(chunk_path, file_type) for chunk_path in chunk_paths
    )
    return (
        task_group.apply_async()
    )  # 异步应用任务组并返回任务组的AsyncResult


@shared_task
def process_file(file_name: str, file_type: str) -> int:
    """
    协调文件块的分割和并行处理。

    Args:
        file_name (str): 文件名
        file_type (str): 文件类型

    Returns:
        int: 处理结果

    Raises:
        ValidationError: 当文件在存储中不存在时抛出异常
    """
    if not default_storage.exists(file_name):
        raise ValidationError("File does not exist in storage.")

    # 链接split_file_task与块处理任务
    workflow = chain(
        split_file_task.s(file_name, file_type),  # 分割文件
        process_chunks.s(file_type),  # 处理所有块
    )
    result = workflow.apply_async()
    return result
