import logging
import datetime

from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError

from .celery_task_exec import *

_logger = logging.getLogger(__name__)


class CeleryTask(models.Model):
    """Celery任务
    * 保存任务执行需要用到的参数
    * 保存任务执行的状态结果
    """
    _name = 'celery.task'
    _description = 'Celery Task'

    name = fields.Char(
        string='任务编号',
        default=lambda self: self.env['ir.sequence'].next_by_code('celery.task')
    )
    task_type = fields.Selection(
        string='任务类型',
        selection=[
            # 合集创建也是通过更新进行，通过mix_id,mix_name等基础信息先创建合集
            # 然后通过更新获取信息
            ('collection_update', '合集更新'),
            ('collection_reload', '合集重载'),
            ('create_collection', '创建合集'),
            ('collection_merge', '合集合并'),
            ('collection_manual_update', '手动更新合集'),
            ('create_fake_collection', '创建伪合集'),
            ('publish_update', '发布更新')
        ]
    )
    status = fields.Selection(
        string='任务状态',
        selection=[
            ('draft', '未开始'),
            ('ongoing', '执行中'),
            ('finish', '已完成'),
            ('error', '异常')
        ],
        default='draft'
    )
    error_msg = fields.Char(string='异常原因')
    create_time = fields.Datetime(string='创建时间', default=fields.Datetime.now)
    sequence_id = fields.Many2one(comodel_name='celery.task.sequence', string='任务序列')
    finish_time = fields.Datetime(string='完成时间')
    collection_ids = fields.Many2many(comodel_name='douyin.collection', string='合集列表')
    merge_ids = fields.One2many(comodel_name='douyin.merge', inverse_name='celery_task_id', string='合并记录')
    downloaded_episode = fields.Integer(string='已下载集数')
    re_download_episode = fields.Char(string='重新下载集数')
    collection_count = fields.Integer(string='合集数量', compute='_compute_collection_count')
    collection_update_history_ids = fields.One2many(
        comodel_name='douyin.collection.update.history',
        inverse_name='celery_task_id',
        string='关联更新记录'
    )
    published_history_ids = fields.Many2many(
        comodel_name='douyin.collection.update.history',
        string='发布的更新'
    )
    manual_update_type = fields.Selection(
        string='手动更新类型',
        selection=[('range', '选取片段'), ('delete', '清除')],
        default='range'
    )
    manual_update_collection_id = fields.Many2one(comodel_name='douyin.collection', string='手动更新的合集')
    # 从首页抓取的视频总量
    manual_update_count = fields.Integer(string='手动更新数量')
    # 描述如何对抓取的视频列表进行处理，最终合并为可用的视频列表
    manual_update_info_ids = fields.One2many(
        comodel_name='manual.update.info',
        inverse_name='celery_task_id',
        string='手动合并信息'
    )
    sec_uid = fields.Char(string='Sec UID')


    def _compute_collection_count(self):
        for rec in self:
            rec.collection_count = len(rec.collection_ids)

    def action_run_task(self):
        '''
        执行Celery任务
        * 只需要发送执行任务所必需的数据即可，任务回调时需要的数据存储在celery.task模型中
        '''
        for task in self:
            if task.task_type in ('collection_update', 'create_collection', 'collection_reload'):
                task_args = task.collection_ids.mapped(lambda r: {
                    'alias': r.alias,
                    'collection_id': r.id,
                    'mix_id': r.mix_id,
                    'updated_to_episode': r.updated_to_episode
                })
                fetch_update_collection_infos.delay(task_args, task.id)
            if task.task_type == 'collection_merge':
                merge = task.merge_ids.ensure_one()
                videos = merge.video_ids
                download_videos = self.env['douyin.video'].browse()

                if task.re_download_episode:
                    for ep in task.re_download_episode.split(','):
                        download_videos |= videos[int(ep)-1]
                else:
                    download_videos = videos[task.downloaded_episode:]

                task_args = {
                    'start': merge.start,
                    'end': merge.end,
                    'merge_id': merge.id,
                    'merge_name': merge.collection_id.alias,
                    'videos': download_videos.mapped(
                        lambda v: (v.current_episode, v.video_url)
                    )
                }
                merge_video.delay(task_args, task.id)
            if task.task_type in ('collection_manual_update', 'create_fake_collection'):
                # 获取视频信息流程相同，回调的处理不同
                collection = task.manual_update_collection_id
                sec_uid = ''
                if task.task_type == 'collection_manual_update':
                    sec_uid = collection.author_id.sec_uid
                if task.task_type == 'create_fake_collection':
                    sec_uid = sec_uid = task.sec_uid

                task_args = {
                    'alias': collection.alias,
                    'sec_uid': sec_uid,
                    'manual_update_count': task.manual_update_count
                }
                fetch_author_portal_info.delay(task_args, task.id)
            if task.task_type == 'publish_update':
                # 频道名称与频道内容的映射
                update_channel_content = []
                channel_content_map = {}
                for history in task.published_history_ids:
                    channel_name = history.collection_id.channel_name
                    if not channel_name:
                        raise UserError(f'合集：{history.collection_id.alias} 没有设置频道名称')
                    update_channel_content.append('  『#{0}』更新數量：{1} 集'.format(channel_name, len(history.video_ids)))
                    # 手动更新的合集只在更新资讯频道展示，不进行自动发布（有可能已经制作合集）
                    if history.task_type == 'collection_manual_update':
                        continue
                    channel_content_map.update({channel_name: history.video_ids.mapped(
                        lambda r: f'  {history.collection_id.start_ep + r.current_episode}集 {r.title}: {r.video_url}'
                    )})
                channel_content_map.update({'更新資訊': update_channel_content})
                publish_update_content.delay(channel_content_map, task.id)
        # 更新任务状态
        self.update({'status': 'ongoing'})

    def action_task_rerun(self):
        '''
        重新执行任务
        '''
        for task in self:
            if task.task_type in ('collection_manual_update', 'create_fake_collection'):
                task.status = 'draft'
                task.collection_update_history_ids.mapped('video_ids').unlink()
                task.collection_update_history_ids.unlink()
                task.action_run_task()

    def celery_callback(self, status, error_msg, callback_args):
        '''所有任务的回调都通过该API网关进行dispatch'''
        # celery_update_collections方法内部也会改变任务的status。更新合集时出现错误（比如集数异常）
        self.update({'status': status, 'finish_time': fields.Datetime.now()})
        if status == 'error':
            # 任务失败后，之后更新任务的状态，不会出现后续操作（例如创建更新history）
            self.update({'error_msg': error_msg})
        if status == 'finish':
            if self.task_type in ('collection_update', 'create_collection', 'collection_reload'):
                self.celery_update_collections(callback_args)
            if self.task_type == 'collection_merge':
                self.merge_ids.update({'state': 'done'})
                # 合集合并后，如果合集未完结，则将合集变更为需要导出到Discord
                # 之后的单集更新需要单独发布
                self.merge_ids.mapped('collection_id').filtered(lambda r: not r.is_finished).update({'export_discord_info': True})
            if self.task_type in ('collection_manual_update', 'create_fake_collection'):
                self.celery_author_portal_callback(callback_args)
            if self.task_type == 'publish_update':
                self.published_history_ids.update({'is_export_discord': True})

        return '任务回调执行成功'

    def _handle_author_info(self, author_info):
        '''如果不存在作者信息，则创建，如果存在则更新'''
        author_obj = self.env['douyin.author']
        author_rec = author_obj.search([('uid', '=', author_info['uid'])])
        if not author_rec:
            author_rec = author_obj.create(author_info)
        else:
            author_rec.update(author_info)
        return author_rec

    def celery_update_collections(self, updated_collections):
        author_obj = self.env['douyin.author']
        collection_obj = self.env['douyin.collection']
        history_obj = self.env['douyin.collection.update.history']

        new_video_vals = []
        new_history_vals = []
        # 创建更新记录
        for updated_collection in updated_collections:
            status = updated_collection['status']
            if status == 'no_update':
                continue
            # 更新成功/出现异常都会创建history记录
            collection_id = updated_collection['collection_id']
            history_val = {
                'status': status,
                'collection_id': collection_id,
                'celery_task_id': self.id
            }
            if status == 'update':
                collection_info = updated_collection['collection_info']
                author_info = collection_info['author']
                author_rec = self._handle_author_info(author_info)
                # 1、准备更新视频的值
                new_video_vals.extend(self._prepare_video_vals(
                    collection_info['videos'], author_rec.id, collection_id))
                # 2、更新作者信息
                author_rec.update(author_info)
                # 3、更新合集信息，这里之所以更新作者字段是因为新建合集任务也会调用该流程。新建合集作者字段为空
                collection_obj.browse(collection_id).update(dict(
                    **collection_info['mix_info'], author_id=author_rec.id
                ))
            if status == 'error':
                history_val.update({'note': updated_collection['error_msg']})
            new_history_vals.append(history_val)
        history_recs = history_obj.create(new_history_vals)

        collection_history_map = dict(history_recs.mapped(lambda r: (r.collection_id.id, r.id)))

        for new_video_val in new_video_vals:
            new_video_val.update({
                'collection_update_history_id': collection_history_map[new_video_val['collection_id']]
            })

        self.env['douyin.video'].create(new_video_vals)

    def _prepare_video_vals(self, videos, author_id, collection_id):
        new_video_vals = []
        for video in videos:
            video['create_time'] = datetime.datetime.fromtimestamp(video['create_time'])
            new_video_vals.append(dict(
                **video, author_id=author_id, collection_id=collection_id
            ))
        return new_video_vals

    def _handle_author_portal_videos(self, videos):
        '''根据条件处理视频信息'''
        result = []
        if self.manual_update_type == 'range':
            for line in self.manual_update_info_ids.sorted(key=lambda r: r.append_sequence):
                # 人工审查，下标必然正确
                end = line.end
                start = line.start
                if not end:
                    # end为0，则从开始处取到列表最后
                    segment = videos[start:]
                else:
                    segment = videos[start:end]
                if line.reverse:
                    segment.reverse()
                result.extend(segment)
        elif self.manual_update_type == 'delete':
            exclude_episodes = set()
            for line in self.manual_update_info_ids:
                exclude_episodes |= set(range(line.start, line.end))
            for index,video in enumerate(videos):
                if index not in exclude_episodes:
                    result.append(video)
            # 倒序
            result.reverse()
        return result

    def celery_author_portal_callback(self, callback_args):
        '''如果需要处理作者未创建的合集，需要对mix_id进行调整
            * callback_args {'videos': [], 'author': {}}
        '''
        collection = self.manual_update_collection_id
        collection_id = collection.id
        author_rec = self._handle_author_info(callback_args['author'])
        # 因为是手动更新，只要任务成功，则必然是更新。任务异常代码不会走到这里
        history_val = {
            'status': 'update',
            'collection_id': collection_id,
            'celery_task_id': self.id
        }
        # 处理视频列表
        videos = self._handle_author_portal_videos(callback_args['videos'])
        history_rec = self.env['douyin.collection.update.history'].create(history_val)
        new_video_vals = self._prepare_video_vals(
            videos,
            author_rec.id,
            collection_id
        )
        # 未并入合集的视频不会包含视频的集数信息，需要根据当前合集的集数手动添加
        updated_to_episode = collection.updated_to_episode
        for index,new_video_val in enumerate(new_video_vals, start=1):
            new_video_val.update({
                'collection_update_history_id': history_rec.id,
                'current_episode': updated_to_episode+index
            })
        self.env['douyin.video'].create(new_video_vals)

        # 伪合集处理，1.添加作者 2.添加封面（取视频第一集封面）
        if self.task_type == 'create_fake_collection':
            collection.author_id = author_rec.id
            collection.cover_url = videos[0]['cover']
        if self.task_type == 'collection_manual_update' and not collection.is_fake_collection:
            collection.is_manual_update = True