#!/usr/bin/env python
# encoding: utf-8
# author: 04

import copy
import time
import datetime
import json

from django.core.cache import cache
from django.db import close_old_connections, transaction
from django.db.models import F
from django_mysql.utils import index_name
from main import _script_head  # noqa
from main import config
from main.apps.purification import models as pr_models
from main.common.audit_queue import PurifyMainQueue
from main.common.redis.redis_cli import RedisClient, RedisClientOrderContent
from main.common.redis.redis_keys import PURIFY_SUBORDERGEN_INSTANT_KEY
from main.common.utils.utils import model_to_dict_by_fields
from main.models.data_map import (AUDIT_ORDER_INITIAL, AUDIT_ORDER_SPLIT,
                                  AUDIT_TODO, CONTENT_TYPE_CHOICES, ON,
                                  URGENCY_CHOICE)
from main.schedule.async_tasks.update_to_audit_num import \
    get_timeout_auditing_suborder_from_map
from main.service.base import BaseService
from rest_framework import serializers


class PurifyOrderSerializer(serializers.ModelSerializer):
    urgency = serializers.IntegerField(source='customer_purifyconfig.urgency')
    need_verify_count = serializers.IntegerField(source='customer_purifyconfig.need_verify_count')
    purify_config_id = serializers.IntegerField(source='customer_purifyconfig.purifyconfig.id')

    class Meta:
        model = pr_models.AuditOrder
        fields = (
            'id',
            'urgency',
            'need_verify_count',
            'purify_config_id',
            'create_time',
            'content_type',
        )


class PurifySuborderGen(BaseService):
    MAX_QUEUE_LENGTH = 100000  # 避免分单过多， 导致redis压力过大
    FILL_PER_STEP = 3000
    loop_step = 5

    conflict_key = 'PurifySuborderGen' if config.ENV != 'dev' else ''
    lock_multi_duration = 2 if config.ENV != 'dev' else 100
    ENV_PRINT_PER_LOOP = 1

    def __init__(self):
        super().__init__()
        self.main_queue = PurifyMainQueue()
        self.deal_key_prefix = 'purify_suborder_split:'
        self.deal_key_ttl = 60 * 5
        self.client = RedisClient

    def task(self):
        if config.ENV not in ['test_ci', 'dev']:
            close_old_connections()  # https://www.jianshu.com/p/d2ba42feee15  避免长时间运行之后出现 MySQL server has gone away

        # self.logger.info('MainQueue: <Stand Alone>Purification Suborder Generator starting...')
        self.recover_from_db()
        self.init_new_order()
        self.recover_from_queue()
        if self.loop_times % self.ENV_PRINT_PER_LOOP == 0:
            info = self.main_queue.len_from_all_data()
            self.logger.info(f'MainQueue: loop {self.loop_times} Now info: {json.dumps(info, indent=2)}')
        time.sleep(30)

    def init_new_order(self, ):
        def get_to_split_qs(purify_config, urgency_val, content_type_val):

            # 这些已经在快速分单中处理了
            r_key = PURIFY_SUBORDERGEN_INSTANT_KEY.format(
                purify_config=purify_config.id, urgency_val=urgency_val, content_type_val=content_type_val)
            _ids = RedisClientOrderContent.lrange(r_key, 0, -1)
            _ids = [int(ii) for ii in _ids]

            customer_purifyconfig_ids = list(pr_models.CustomerPurifyConfig.objects.filter(
                purifyconfig_id=purify_config.id, urgency=urgency_val,
            ).values_list('id', flat=True))

            index_result_sync = index_name(pr_models.AuditOrder, 'result_sync')
            order_qs = pr_models.AuditOrder.objects.force_index(index_result_sync).filter(
                result_sync=AUDIT_ORDER_INITIAL,
                customer_purifyconfig_id__in=customer_purifyconfig_ids,
                content_type=content_type_val,
            ).exclude(id__in=_ids).select_related(
                'customer_purifyconfig'
            ).order_by('id')[:self.FILL_PER_STEP]
            return order_qs
        return self._init_new_order(get_to_split_qs)

    def _init_new_order(self, get_to_split_qs):
        """ 收集订单创建子订单
        """
        all_active_config = pr_models.PurifyConfig.objects.filter(status=ON)
        for purify_config in all_active_config:
            # 最高优先级
            ids = cache.get('MOSST_HIGH_LEVEL_IDS')
            if ids:

                customer_purifyconfig_ids = list(pr_models.CustomerPurifyConfig.objects.filter(
                    purifyconfig_id=purify_config.id,
                ).values_list('id', flat=True))

                order_qs = pr_models.AuditOrder.objects.filter(
                    result_sync=AUDIT_ORDER_INITIAL,
                    id__in=ids,
                    customer_purifyconfig_id__in=customer_purifyconfig_ids,
                ).select_related(
                    'customer_purifyconfig'
                ).order_by('id')[:self.FILL_PER_STEP]
                print('ids[:10]', ids[:10])
                count = order_qs.count()
                print('order_qs.count()', count)

                self.split_datas(order_qs, purify_config, urgency_val=4)
                # 最高优先级

            for urgency_val in dict(URGENCY_CHOICE).keys():
                for content_type_val in dict(CONTENT_TYPE_CHOICES).keys():
                    # total_len = self.main_queue.len_for_config(str(purify_config.id),
                    #                                            str(urgency_val), content_type_val)
                    # if total_len > self.MAX_QUEUE_LENGTH:  # 取消这个最大队列的机制， 支持1000万待审核；
                    #     self.logger.info(
                    #         f'MainQueue: Too Many Order in Queue for config {purify_config.id}. ' +
                    #         f'total_len {total_len} urgency_val {urgency_val} content_type_val {content_type_val} ')
                    #     continue
                    order_qs = get_to_split_qs(purify_config, urgency_val, content_type_val)  # get_to_split_qs
                    # self.logger.info(order_qs.query)
                    self.split_datas(order_qs, purify_config)

    def split_datas(self, order_qs, purify_config, urgency_val=0):
        order_datas = PurifyOrderSerializer(order_qs, many=True).data
        self.logger.info(
            f'MainQueue: Initializing split {len(order_datas)} orders for config {purify_config.id}...')

        deal_count = 0
        for order in order_datas:
            order_obj = pr_models.AuditOrder.objects.filter(id=order['id']).annotate(
                need_verify_count=F('customer_purifyconfig__need_verify_count')).first()
            order.update(model_to_dict_by_fields(order_obj, fields=['create_time', 'need_verify_count']))
            if urgency_val:
                order['urgency'] = urgency_val
            # print('order', order)
            ret = self.do_split(order)
            if ret:
                deal_count += 1
        self.logger.info(f'MainQueue: Initialize done[{deal_count}].')

    def do_split(self, order):
        """ 有重复分单出现，在找到原因之前，用缓存防止
        --- update ---
        redis-key 保留 以观察重复现象。
        在测试环境观测，事务结束后重新查询一次数据，数据不会出现状态未写入的情况。
        (感觉有可能这种频繁大量update触及了某种bug,导致未写入成功。所以数据库中状态还是initial，造成重复单)
        --- update ---
        重复单现象已定位, 非本处造成
        """
        redis_key = f"{self.deal_key_prefix}{order['id']}"
        deal = self.client.get(redis_key)
        if deal:
            self.logger.warn(f"Found Repeat Split for {order['id']}.")
            return False
        verify_times = order['need_verify_count']
        _order = order
        with transaction.atomic():

            for i in range(verify_times):
                order = copy.deepcopy(_order)
                suborder = self.push_suborder(order)
                suborder_id = suborder.id
                self.push_queue(order, suborder_id)
            pr_models.AuditOrder.objects.filter(id=order['id']).update(result_sync=AUDIT_ORDER_SPLIT)
        self.client.set(redis_key, 1, ex=self.deal_key_ttl)
        verify_obj = pr_models.AuditOrder.objects.filter(id=order['id']).first()
        return verify_obj.result_sync == AUDIT_ORDER_SPLIT

    def push_suborder(self, order):
        """ 创建子订单
        """
        instance = pr_models.UserSubOrderAnswer.objects.create(
            order_id=order['id'],
        )
        order_obj = pr_models.AuditOrder.objects.filter(id=order['id']).annotate(
            need_verify_count=F('customer_purifyconfig__need_verify_count')).first()
        order.update(model_to_dict_by_fields(order_obj, fields=['create_time', 'need_verify_count']))

        instance.create_time = order['create_time']
        instance.save(update_fields=['create_time'])
        return instance

    def push_queue(self, order, suborder_id):
        """ 推送至主队列
        """
        order['suborder_id'] = suborder_id
        del order['need_verify_count']
        del order['create_time']
        self.main_queue.put(order)

    def recover_from_queue(self):
        """ 从main_queue 的recover_queue获取订单数据并进行检查，确认是否需要重发订单，使被忽略的订单能快速重发
        """
        recover_data = self.main_queue.flush_recover_queue()
        if recover_data:
            recover_data_map = {i['suborder_id']: i for i in recover_data}
            try:
                undeal_order_qs = pr_models.UserSubOrderAnswer.objects.filter(
                    id__in=recover_data_map.keys(),
                    first_result=AUDIT_TODO,
                )
                self.logger.info(f'MainQueue: Recovering from queue {undeal_order_qs.count()} sub orders...')
                undeal_order_qs.update(receive_time=None, user=None)  # 清除receive_time, 确保主队列的数据不存在receive_time
                undeal_order_ids = undeal_order_qs.values_list('id', flat=True)
            except Exception:
                self.main_queue.rollback_recover_queue(recover_data)
                raise
            undeal_data = [recover_data_map[i] for i in undeal_order_ids]
            for one in reversed(undeal_data):
                order_obj = pr_models.AuditOrder.objects.filter(id=one['id']).annotate(
                    urgency=F('customer_purifyconfig__urgency'), 
                    purify_config_id=F('customer_purifyconfig__purifyconfig_id')).first()
                one.update(model_to_dict_by_fields(order_obj, fields=['create_time', 'urgency', 
                                                                      'purify_config_id', 'content_type']))

                self.main_queue.put_to_first(one)
            self.logger.info('MainQueue: Recover from queue done.')

    def recover_from_db(self):
        """
        """
        hour_1 = str(datetime.datetime.now() - datetime.timedelta(minutes=10))
        ids = get_timeout_auditing_suborder_from_map(hour_1)  # 获取正在审核并且超时的数据

        to_recover_qs = pr_models.UserSubOrderAnswer.objects.filter(
            id__in=ids, 
            first_result='to_audit').select_related('order').order_by('id')
        if not to_recover_qs.count():
            self.logger.info(f'MainQueue: Recovering from db {to_recover_qs.count()} sub orders...')
            return
        self.logger.info(f'MainQueue: Recovering from db {to_recover_qs.count()} sub orders...')
        for suborder in to_recover_qs[:1000]:
            audit_data = PurifyOrderSerializer(suborder.order).data
            pr_models.UserSubOrderAnswer.objects.filter(id=suborder.id, 
                                                        first_result='to_audit').update(receive_time=None)
            if not self.main_queue.is_order_register(audit_data['id']):
                self.logger.info('push_queue: %s', audit_data)
                self.push_queue(audit_data, suborder.id)
        self.logger.info('MainQueue: Recover from db done.')


if __name__ == "__main__":
    PurifySuborderGen().run()
