import logging

from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.safe_eval import safe_eval

_logger = logging.getLogger(__name__)

COLLECTION_TYPE = [
    ('sd', '沙雕动画'),
    ('mh', '漫画解说'),
    ('xs', '有声小说')
]


class DouyinCollection(models.Model):
    """抖音合集模型
    """
    _name = 'douyin.collection'
    _description = 'Douyin Collection'
    _order = 'create_time desc'
    _rec_name = 'alias'

    # 用途
    # * 合集更新时通过该字段排除伪合集，伪合集的更新需要人工排查
    is_fake_collection = fields.Boolean(string='伪合集', help='作者未创建的合集，在Odoo后台手动创建')
    # 非伪合集执行手动添加任务后，会设置该字段，伪合集手动更新不会设置
    is_manual_update = fields.Boolean(string='执行过手动任务')
    categ_id = fields.Many2one(comodel_name='douyin.collection.categ', string='视频类别')
    collection_type = fields.Selection(
        string='合集类型', selection=COLLECTION_TYPE
    )
    start_ep = fields.Integer(string='开始集数', default=0)
    channel_name = fields.Char(string='Discord频道名称')
    author_url = fields.Char(related='author_id.author_url')
    mix_name = fields.Char(string='合集名称', help='mix_name')
    mix_id = fields.Char(string='合集ID', help='mix_id', index=True)
    next_collection_id = fields.Many2one(comodel_name='douyin.collection', string='下一合集')
    is_finished = fields.Boolean(string='已完结', default=False)
    is_interrupt = fields.Boolean(string='断更', default=False)
    export_discord_info = fields.Boolean(string='导出Discord信息')
    create_time = fields.Datetime(string='创建时间', help='合集记录的创建时间', default=fields.Datetime.now)
    last_update_time = fields.Datetime(
        string='最后更新时间',
        compute='_compute_last_update_time',
        store=True
    )
    # 可能的逻辑，2023-04-01 14:33 访问视频获取合集，此时的封面过期时间 日期+2周14天，时间向上取整数点
    # 2023-04-15 14:00 过期
    # 如果2023-04-01 16:33 访问，封面的有效日期变更为2023-04-15 16:00
    # 每次访问动画时，合集的cover链接有效期都会更新（不在同一个整点时间段）
    cover_url = fields.Char(string='合集封面', help='这个封面连接是有时效性的。伪合集使用的是第一集视频的封面')
    updated_to_episode = fields.Integer(string='当前集数', compute='_compute_updated_to_episode', store=True)
    merged_to_episode = fields.Integer(string='已合并集数', default=0)
    # 如果手动指定合集，则需要指定开始与结束位置
    manual_merge = fields.Boolean(string='手动指定合集')
    merge_start = fields.Integer(string='合集开始位置')
    merge_end = fields.Integer(string='合集结束位置')
    unmerged_to_episode = fields.Integer(
        string='未合并集数',
        compute='_compute_unmerged_to_episode',
        store=True
    )
    author_id = fields.Many2one(comodel_name='douyin.author', string='作者', ondelete='cascade')
    video_ids = fields.One2many(comodel_name='douyin.video', inverse_name='collection_id', string='视频')
    alias = fields.Char(string='别名')
    merge_ids = fields.One2many(comodel_name='douyin.merge', inverse_name='collection_id', string='合并的视频')
    hours = fields.Float(string='时长(小时)', compute='_compute_hours')
    collection_url = fields.Char(string='合集页面', compute='_compute_collection_url')
    tag_ids = fields.Many2many(comodel_name='douyin.tag', string='标签')
    play_count = fields.Integer(string='播放数量', compute='_compute_statistics', store=True)
    digg_count = fields.Integer(string='点赞数量', compute='_compute_statistics', store=True)
    average_play_count = fields.Integer(string='平均播放', compute='_compute_statistics', store=True)
    average_digg_count = fields.Integer(string='平均点赞', compute='_compute_statistics', store=True)
    celery_task_ids = fields.Many2many(comodel_name='celery.task', string='Celery Task')

    _sql_constraints = [
        ('mix_id_uniq', 'unique (mix_id)', 'Mix ID是唯一的')
    ]

    @api.depends('video_ids')
    def _compute_updated_to_episode(self):
        '''该方法可以同时处理合集与伪合集'''
        for rec in self:
            rec.updated_to_episode = len(rec.video_ids)

    @api.depends('video_ids', 'video_ids.play_count', 'video_ids.digg_count')
    def _compute_statistics(self):
        for rec in self:
            video_count = len(rec.video_ids)
            if video_count:
                rec.play_count = sum(rec.video_ids.mapped('play_count'))
                rec.digg_count = sum(rec.video_ids.mapped('digg_count'))
                rec.average_play_count = rec.play_count / video_count
                rec.average_digg_count = rec.digg_count / video_count

    @api.depends('merged_to_episode', 'updated_to_episode')
    def _compute_unmerged_to_episode(self):
        for rec in self:
            rec.unmerged_to_episode = rec.updated_to_episode - rec.merged_to_episode

    @api.depends('video_ids', 'video_ids.create_time')
    def _compute_last_update_time(self):
        for rec in self:
            last_video = self.env['douyin.video'].search(
                [('collection_id', '=', rec.id)], limit=1, order='create_time desc')
            rec.last_update_time = last_video.create_time

    @api.depends('mix_id', 'updated_to_episode', 'author_id', 'author_id.author_url')
    def _compute_collection_url(self):
        for rec in self:
            if rec.is_fake_collection:
                rec.collection_url = rec.author_id.author_url
            else:
                rec.collection_url = f'https://www.douyin.com/collection/{rec.mix_id}/{rec.updated_to_episode}'

    @api.depends('video_ids')
    def _compute_hours(self):
        for rec in self:
            rec.hours = round(sum(rec.video_ids.mapped('duration'))/3600000, 2)

    def action_reload_collection(self):
        '''重新获取合集关联的视频，合集顺序出现问题时调用
            * 会删除之前的视频
            * 重置合集的集数变为0
            * 只有非伪合集才能重载，伪合集如果重载需要人工审查
        '''
        if self.filtered('is_fake_collection'):
            raise UserError('重载的合集中包含伪合集，请重新选择')
        self.mapped('video_ids').unlink()
        self.with_context(task_type='collection_reload').action_update_collection()

    def action_update_collection(self):
        # 创建任务序列以及任务
        if self:
            task_type = self.env.context.get('task_type', 'collection_update')
            task_sequence = self.env['celery.task.sequence'].create({
                'sequence_type': task_type
            })
            collections = self.browse()
            for collection in self:
                collections |= collection
                if len(collections) == 20:
                    self.env['celery.task'].create({
                        'task_type': task_type,
                        'sequence_id': task_sequence.id,
                        'collection_ids': [(6, 0, collections.ids)]
                    })
                    collections = self.browse()
            if collections:
                self.env['celery.task'].create({
                    'task_type': task_type,
                    'sequence_id': task_sequence.id,
                    'collection_ids': [(6, 0, collections.ids)]
                })
            task_sequence.action_run_sequence()

    def action_generate_merge(self):
        '''产生合并视频记录'''
        for rec in self:
            rec._generate_merge()

    def action_open_manual_update_wizard(self):
        '''打开手动更新向导'''
        self.ensure_one()
        action = self.env['ir.actions.act_window']._for_xml_id('douyin_management.douyin_exec_wizard_action')
        action_context = safe_eval(action['context'])
        action_context.update({
            'default_manual_collection_id': self.id,
            'default_exec_type': 'collection_manual_update'
        })
        action['context'] = action_context
        return action

    def action_export_ep_discord_wizard(self):
        '''导出指定集数到discord'''
        self.ensure_one()
        action = self.env['ir.actions.act_window']._for_xml_id('douyin_management.douyin_exec_wizard_action')
        action_context = safe_eval(action['context'])
        action_context.update({
            'default_export_collection_id': self.id,
            'default_exec_type': 'export_collection_ep_discord'
        })
        action['context'] = action_context
        return action

    def action_open_query_video_episode_wizard(self):
        '''打开时长所属视频向导'''
        self.ensure_one()
        action = self.env['ir.actions.act_window']._for_xml_id('douyin_management.douyin_exec_wizard_action')
        action_context = safe_eval(action['context'])
        action_context.update({
            'default_query_collection_id': self.id,
            'default_exec_type': 'query_video_episode'
        })
        action['context'] = action_context
        return action

    def _create_merge(self, prepare_videos, start):
        end = start + len(prepare_videos) - 1

        merge_id = self.env['douyin.merge'].create({
            'start': start,
            'end': end,
            'collection_id': self.id
        })
        prepare_videos.update({'merge_id': merge_id.id})
        return end

    def _generate_merge(self):
        if self.merged_to_episode == self.updated_to_episode:
            raise UserError(f'合集【{self.alias}】已经全部合并完成')

        if not self.manual_merge:
            ready_merged_videos = self.video_ids.filtered(
                lambda r: r.current_episode > self.merged_to_episode)
            start = self.merged_to_episode + 1
        else:
            ready_merged_videos = self.video_ids.filtered(
                lambda r: r.current_episode >= self.merge_start and r.current_episode <= self.merge_end)
            start = self.merge_start
        prepare_videos = self.env['douyin.video'].browse()

        # 制作的合集时长不大于9小时
        max_secs = 32400
        left_secs = max_secs
        for video in ready_merged_videos:
            left_secs -= (video.duration / 1000)
            if left_secs <= 0:
                end = self._create_merge(prepare_videos, start)
                start = end + 1
                left_secs = max_secs
                prepare_videos = self.env['douyin.video'].browse()
                prepare_videos |= video
            else:
                prepare_videos |= video
        if prepare_videos:
            end = self._create_merge(prepare_videos, start)
        if not self.manual_merge:
            self.merged_to_episode = end
