#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/7/27
# @Author  : YunZhen
# @File    : upload_movie_data.py
# @Software: PyCharm
"""
解析nfo文件文 python 类
"""
import os
import time

from enum import Enum
from pathlib import Path

from loguru import logger
from tqdm import tqdm  # 导入进度条库
from time import sleep  # 用于模拟处理延迟
from datetime import datetime
from typing import List, Union

from config.settings import BASE_DIR
from django.db import transaction
from django.db import IntegrityError
from django.core.files import File

from utils.parser import DataMapperParser
from utils.loader import FileProcessor
from utils.processor import ListBatchProcessor


class ActorParser(DataMapperParser):
    """文件数据解析成 Actor 模型数据"""

    class ModelMapper(Enum):
        # 演员数据表
        ACTOR_FIELD_MAP = {
            'name': 'Name',
            'avatar': '',
            'gender': '',
            'birthday': 'Birthday',
            'country': ''
        }
        # 演员详情数据表
        ACTOR_DETAILS_FIELD_MAP = {
            'actor': 'Name',
            'roma': 'Roma',
            'alias': 'Alias',
            'height': 'Height',
            'weight': '',
            'bust': 'Bust',
            'hip': 'Hip',
            'waist': 'Waist',
            'cups': 'Cup',
            'ethnicity': '',
            'birth_place': 'Birthplace',
            'blood_type': '',
            'zodiac_sign': '',
            'hobbies': '',
            'profile': '',
            'experience': '',
        }
        # 公司数据表
        COMPANY = {
            'id': '',
            'name': '',
            'website': '',
        }
        # 演员-公司关系表
        ACTOR_TO_COMPANY = [
        ]

    def parser_data(self, source: Union[Path, str], field_map: ModelMapper):
        source = FileProcessor(source).load()
        return self.run_parser(source, field_map.value)


class FilmParser(DataMapperParser):
    class ModelMapper(Enum):
        FILM_FIELD_MAP = {
            'num': 'dir_name',
            'title': 'title',
            'sort_title': 'sorttitle',
            'original_title': 'originaltitle',
            'plot': 'plot',
            'outline': 'outline',
            'original_plot': 'originalplot',
            'tagline': 'tagline',
            'premiered': 'premiered',
            'release_date': 'releasedate',
            'year': 'year',
            'mpaa': 'mpaa',
            'cover_url': 'cover',
            'trailer_url': 'trailer',
            'website_url': 'website',
            'runtime': 'runtime',
            'rating': 'rating',
            'critic_rating': 'criticrating',
            'votes': 'votes',

        }

        ACTOR_FILM_RELATION_MAP = {
            'actor': 'actors',
            'film': 'dir_name',
        }

        TAGS_MAP = {
            'value': 'tags',
        }
        FILM_TAGS_RELATION_MAP = {
            "dir_name": 'tags'
        }

    def parser_films(self, source: Union[Path, str]):
        source = FileProcessor(source).load()
        source = list(source['items'].values())

        return self.run_parser(source, self.ModelMapper.FILM_FIELD_MAP.value)

    def parser_actor_film_relation(self, source: Union[Path, str]):
        source = FileProcessor(source).load()
        source = list(source['items'].values())
        return self.run_parser(source, self.ModelMapper.ACTOR_FILM_RELATION_MAP.value, ext_fields={'role': 2})

    def parser_tags(self, film_data: Path | str):
        tags = FileProcessor(film_data).load()
        tags_data = tags['tags']
        tags_data = self.run_parser(tags_data, self.ModelMapper.TAGS_MAP.value)
        # 初步分类数据

        return self.run_parser(tags_data, self.ModelMapper.TAGS_MAP.value)

    def parser_film_tags_relation(self, film_data: Path | str):
        tags = list(FileProcessor(film_data).load()['items'].values())
        return self.run_parser(tags, self.ModelMapper.FILM_TAGS_RELATION_MAP.value, same_file=True)


class UploadData:
    size = 500

    def batch_insert(self, data: List[dict], models, export=False, export_file=None, export_field=None):
        """批量插入数据
        :param data: 上传数据
        :param models: 数据模型
        :param export: 是否需要导出
        :param export_file: 导出的文件路径
        :param export_field: 导出字段与id的映射关系
        """
        # 前置准备
        total = len(data)
        batch = int(total / self.size) + 1
        start_num = 0
        end_num = self.size if total >= self.size else total
        insert_result = {}
        # 开始迭代批次
        for i in range(batch):
            start_time = int(time.time())
            obj_list = []  # 批次写入的数据
            # 取出批次数据
            batch_data = data[start_num: end_num]
            logger.info(
                '开始处理 批次_{}, 数量: {}, 数据区间:{}-{} 开始时间: {}',
                i + 1, len(batch_data), start_num, end_num, start_time
            )
            export_field_id_map = {}
            for insert_data_obj in batch_data:
                insert_data_obj = {k: v for k, v in insert_data_obj.items() if v != ""}
                if export:
                    export_field_id_map[insert_data_obj[export_field]] = None
                obj_list.append(
                    models(**insert_data_obj)
                )
            with transaction.atomic():
                saved_data = models.objects.bulk_create(obj_list, ignore_conflicts=True)

            logger.info(
                '批次_{}导入成功, 数量: {}; 失败数量: {}; 耗时: {}',
                i + 1, len(batch_data), len(batch_data) - len(saved_data), int(time.time()) - start_time
            )
            if export:
                saved_data = models.objects.filter(
                    **{f'{export_field}__in': export_field_id_map.keys()}
                ).values('id', export_field)
                for obj in saved_data:
                    insert_result[obj[export_field]] = obj['id']
            # 处理下一个批次的索引
            start_num = end_num
            end_num = total if end_num + self.size > total else end_num + self.size

        else:
            logger.info('数据上传完毕')

        # 导出数据
        if insert_result:
            export_file = export_file if export_file \
                else BASE_DIR.joinpath('data', 'result',
                                       f'{models.__name__}-{datetime.now().strftime("%Y%m%d%H%M%S")}.json')
            FileProcessor(export_file, model='w').save(insert_result)
        return insert_result

    @classmethod
    def update(cls, models, data: dict, unique_key: dict):
        """单条数据更新
        :param models: 需要处理的模型
        :param data: 更新的数据, key 的命名需要与models的需要更新的字段一致
        :param unique_key: 数据的唯一键
        """
        # 查询数据
        obj = models.objects.filter(**unique_key).first()
        # 如果数据表不存在数据，结束更新
        if not obj:
            logger.warning('数据不存在{}', unique_key)
            return False
        # 过滤点需要更新的字段值与原本对象值相等的字段
        try:
            with transaction.atomic():
                for key, val in data.items():
                    setattr(obj, key, val)
                    obj.save()
        except Exception as err:
            logger.error(f'数据{unique_key}-字段{data}更新失败，原因>>>{err}')
            return False
        logger.info(f'数据{unique_key}-字段{data}更新成功')
        return True

    @classmethod
    def insert_picture(cls, models, film_id, file: list[dict]):
        """添加单个图片"""

        upload_obj = []
        open_file_obj = []
        failed_insert_file = []
        for item in file:
            for file_type, file_path_list in item.items():
                for f in file_path_list:
                    print(file_type, f)
                    try:
                        f_stream = open(f, 'rb')
                        upload_obj.append(models(
                            film_id=film_id,
                            poster=File(f_stream, name=os.path.basename(f)),
                            type=file_type
                        ))
                        open_file_obj.append(f_stream)
                    except FileNotFoundError:
                        logger.error('文件不存在: %s', item)
                        failed_insert_file.append({'obj': item, 'msg': '文件不存在'})
                    except OSError as e:
                        logger.error('文件读取错误: %s - %s', item, str(e))
                        failed_insert_file.append({'obj': item, 'msg': '文件读取错误'})
                    except IntegrityError:
                        logger.error('数据库已存在文件:{}', item)
                        failed_insert_file.append({'obj': item, 'msg': '数据库已存在文件'})
                    except TypeError:
                        logger.error('文件对象路径有问题', item)
                        failed_insert_file.append({'obj': item, 'msg': '部分文件路径有问题'})
        # 批量更新文件
        models.objects.bulk_create(upload_obj)
        # 关闭文件
        for f in open_file_obj:
            f.close()


class ActorUploadData(UploadData):
    actor_models = None
    actor_details_models = None
    company_models = None

    def upload_actor_information(self, actor_data: List, export=False):
        """批量插入数据-无关联数据插入"""
        self.batch_insert(actor_data, self.actor_models, export=export, export_field='name')

    def upload_actor_avatar(self, avatar_data: dict):
        """更新头像数据，以本地数据为主，带进度条"""
        success, failed = [], []
        total = len(avatar_data)  # 总任务数

        # 创建进度条
        with tqdm(
                total=total,
                desc="更新演员头像",
                unit="avatar",
                bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]"
        ) as pbar:
            for key, value in avatar_data.items():
                try:
                    # 1. 处理头像文件
                    # 获取文件名（处理Windows路径）
                    filename = os.path.basename(value)

                    # 2. 打开文件
                    with open(value, 'rb') as avatar_stream:
                        # 3. 创建Django文件对象
                        avatar = File(avatar_stream, name=filename)

                        # 4. 尝试更新或创建
                        result = self.update(
                            self.actor_models,
                            {"avatar": avatar},
                            unique_key={'name': key}
                        )

                        # 5. 处理更新失败的情况
                        if not result:
                            try:
                                # 尝试创建新记录
                                result = self.actor_models.objects.create(
                                    name=key,
                                    avatar=avatar
                                )
                            except IntegrityError:
                                # 处理唯一键冲突
                                result = self.actor_models.objects.get(name=key)
                                result.avatar = avatar
                                result.save()

                    # 6. 记录结果
                    if result:
                        logger.info(f'{key}头像更新成功')
                        success.append(key)
                        pbar.set_postfix(status='✅', current=key, refresh=False)
                    else:
                        logger.error(f'{key}头像更新失败')
                        failed.append(key)
                        pbar.set_postfix(status='❌', current=key, refresh=False)

                except Exception as err:
                    logger.error(f'处理{key}发生异常-{err}')
                    failed.append(key)
                    pbar.set_postfix(status='⚠️', current=key, refresh=False)
                    # 短暂暂停以显示错误
                    sleep(0.1)

                finally:
                    # 7. 更新进度条
                    pbar.update(1)
                    # 添加短暂延迟，让进度条更平滑
                    sleep(0.01)

        # 8. 最终结果统计
        logger.info(f'头像更新完成: 成功 {len(success)} 个, 失败 {len(failed)} 个')

        # 9. 可选：显示失败明细
        if failed:
            logger.warning('失败头像列表:')
            for i, name in enumerate(failed, 1):
                logger.warning(f'{i}. {name}')

        return success, failed

    def upload_actor_detail_information(self, actor_data: List):
        """上传演员详情数据-一对一关联数据插入"""
        # 处理关联关系-分批处理
        batch_processor = ListBatchProcessor(actor_data, batch_size=self.size, total_elements=len(actor_data))
        for data, _, _ in batch_processor:
            new_actor_data = {}
            upload_actor_detail_information_obj = []
            logger.info('uploading actor batch {}'.format(batch_processor.current_batch))
            for item in data:
                # 处理映射关系
                name = item.pop('actor', None)
                if name is None:
                    continue
                new_actor_data[name] = item
            actor_id = self.actor_models.objects.filter(name__in=new_actor_data.keys()).values('id', 'name')
            # 迭代出数据库查询回来的数据，进一步处理
            for actor in actor_id:
                at = new_actor_data.get(actor['name'])
                # 过滤掉数据库中没有的数据
                if not at:
                    continue
                # 添加关联key actor_id 值
                at.update({'actor_id': actor['id']})
                # 实例ActorDetails对象，并添加到批次中
                upload_actor_detail_information_obj.append(self.actor_details_models(**at))
            # 批量更新数据
            with transaction.atomic():
                self.actor_details_models.objects.bulk_create(upload_actor_detail_information_obj,
                                                              ignore_conflicts=True)

    def upload_company(self, company_list: List):
        company_list = [{'name': company} for company in company_list]
        self.batch_insert(company_list, self.company_models)


class FilmsUploadPresenter(UploadData):
    actor_model = None
    film_models = None
    poster_models = None
    actor_film_models = None
    tags_models = None
    film_to_tags_models = None

    def upload_films(self, films_data: List, export=False):
        self.batch_insert(data=films_data, models=self.film_models, export=export, export_field='num')

    def upload_poster(self, film_data: Path | str):
        poster = FileProcessor(film_data).load()
        poster_path = poster['items'].values()
        for poster in poster_path:
            film_id = self.film_models.objects.filter(num=poster['file']).values('id')
            if not film_id:
                # 收集失败数据
                continue
            #  todo: 路径为None的情况没有处理
            upload_path = [
                {1: [poster['fanarts']]},
                {2: [poster['poster']]},
                {3: [poster['thumb']]},
                {5: poster['extra_fanarts']}
            ]

            self.insert_picture(self.poster_models, film_id[0]['id'], upload_path)

    def upload_actor_film_relation(self, films_data: List):
        # 获取actor与fime的映射关
        actor_id_list, films_id_list = [], []
        for obj_relation_map in films_data:
            actor_id_list.extend(obj_relation_map['actor'])
            films_id_list.append(obj_relation_map['film'])
        # 查询数据库
        actors_id = self.actor_model.objects.filter(name__in=set(actor_id_list)).values('name', 'id')
        films_id = self.film_models.objects.filter(num__in=set(films_id_list)).values('num', 'id')
        # 处理映射关系
        actor_film_id_map, _ = self.map_to_ids(films_data, actors_id, films_id)
        self.batch_insert(actor_film_id_map, self.actor_film_models)

    @staticmethod
    def map_to_ids(source_data, actors, films):
        """
        将源数据中的演员名称和电影编号替换为对应的ID

        :param source_data: 源数据列表
        :param actors: 演员列表 [{'name': 'JULIA', 'id': 264366}, ...]
        :param films: 电影列表 [{'num': '428SUKE-148', 'id': 18}, ...]
        :return: (映射后的数据列表, 缺失数据记录)
        """
        # 创建映射字典
        actor_map = {actor['name']: actor['id'] for actor in actors}
        film_map = {film['num']: film['id'] for film in films}

        # 处理源数据
        mapped_data = []
        missing_records = []  # 记录缺失数据

        for item in source_data:
            # 处理电影
            film_id = film_map.get(item['film'])
            if film_id is None:
                missing_records.append({
                    'type': 'film',
                    'value': item['film'],
                    'source_item': item
                })
                logger.warning(f"警告: 未找到电影 '{item['film']}' 的ID")
                continue  # 跳过没有电影ID的记录

            # 处理演员
            actor_ids = []
            missing_actors = []  # 记录当前项中缺失的演员

            for actor_name in item['actor']:
                actor_id = actor_map.get(actor_name)
                if actor_id is None:
                    missing_actors.append(actor_name)
                    logger.warning(f"警告: 未找到演员 '{actor_name}' 的ID")
                else:
                    actor_ids.append(actor_id)

            # 记录缺失的演员
            if missing_actors:
                missing_records.append({
                    'type': 'actor',
                    'value': missing_actors,
                    'source_item': item
                })

            # 如果没有找到任何演员ID，跳过此项
            if not actor_ids:
                continue

            # 创建新记录 - 每个演员一个记录
            for actor_id in actor_ids:
                mapped_data.append({
                    'actor_id': actor_id,  # 单个演员ID
                    'film_id': film_id,
                    'role': item['role']
                })
        logger.info('隐射结果: \n成功-{}\n失败-{}', mapped_data, missing_records)
        return mapped_data, missing_records

    def upload_tags(self, film_data: Path | str):
        tags_data = FileProcessor(film_data).load()
        for tag_item in tags_data.items():
            tag_type, values = tag_item
            tag_obj = [self.tags_models(value=value, tag_type=tag_type) for value in values]
            with transaction.atomic():
                self.tags_models.objects.bulk_create(tag_obj, ignore_conflicts=True)

    def upload_films_tags_relations(self, film_tags_relations: List[dict]):
        """处理电影标签映射数据"""
        for relations_item in film_tags_relations:
            for film, tags in relations_item.items():
                film_obj = self.film_to_tags_models.objects.get(num=film)
                tags_obs_list = self.tags_models.objects.filter(value__in=tags)
                film_obj.tags.add(*tags_obs_list)


if __name__ == '__main__':
    actor_info = r'E:\PythonProject\yun-cool-cinema\data\actor\actor.json'
    print(Path(actor_info).name)
