#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
新版周报中的qcc源部分维度数据统计
'''

import json
import sys
import subprocess
import os
import re
import psutil
import time
import requests
from copy import deepcopy
from datetime import datetime, timedelta
from pybloom_live import ScalableBloomFilter
from apscheduler.schedulers.blocking import BlockingScheduler

from base import ExportAndTjBase
from utils import Utils
from models import MongoQueryConfig, RobotConfig
from robot import DingtalkRobot

reload(sys)
sys.setdefaultencoding('utf-8')


class ExportAndTj(ExportAndTjBase):
    week_min, week_max = Utils.get_date_range(ndays=7)

    def __init__(self, *args, **kwargs):
        super(ExportAndTj, self).__init__(*args, **kwargs)

    def handle_one_mongo_data(self, record_dic, result_dic=None):
        contact = record_dic.get('contact')
        contactType = record_dic.get('contactType')
        nameId = record_dic.get('nameId')
        update_time = record_dic.get('import_update_time')
        create_time = record_dic.get('create_time')
        result_dic['week_count'] += 1
        must_exists_values = [update_time,create_time]  # 这些字段都必须是非空值
        must_has_one_exists_values = []  # 这些字段必须最少有一个是非空值
        if not self.is_data_legal(must_exists_values, must_has_one_exists_values):
            return result_dic
        else:
            if self.week_min <= create_time <= self.week_max:
                result_dic['week_add_count'] += 1
                # 库内联系方式与映射企业数去重
                if contactType == 0:
                    result_dic['mobile_add_count'] += 1
                    if nameId:
                        _id = '{}{}'.format(nameId, contact)
                        result_dic['mobile_and_nameId_add_count'] += 1
                        result_dic['mobile_and_nameId_add_dup'].add(_id)
                        contact_count = self.contact_col.find_one({'nameId':nameId,'contact':contact})
                        if contact_count:
                            result_dic['into_contact_table_count'] += 1
            elif self.week_min <= update_time <= self.week_max:
                result_dic['week_update_count'] += 1
                # 库内联系方式与映射企业数去重
                if contactType == 0:
                    result_dic['mobile_update_count'] += 1
                    if nameId:
                        _id = '{}{}'.format(nameId, contact)
                        result_dic['mobile_and_nameId_update_count'] += 1
                        result_dic['mobile_and_nameId_update_dup'].add(_id)
                        # contact_count = self.contact_col.find_one({'nameId':nameId,'contact':contact})
                        # if contact_count:
                        #     result_dic['into_contact_table_count'] += 1
        return result_dic


    def run(self):
        # save_result_dic可以放入类变量中，避免传参调用导致逻辑混乱
        save_result_dic = {
            'week_count': 0,  # 入bidding表总条数
            'week_add_count': 0,  # bidding表新增条数
            'week_update_count': 0,  # bidding表更新条数

            'mobile_add_count': 0,  # 新增手机号条数
            'mobile_update_count': 0,  # 更新手机号条数
            'mobile_and_nameId_add_count': 0,  # 有手机和nameID新增总数
            'mobile_and_nameId_update_count': 0,  # 有手机和nameID更新总数
            'mobile_and_nameId_add_dup': Utils.bloom_filter_fac(),  # 有手机和nameID新增去重数
            'mobile_and_nameId_update_dup': Utils.bloom_filter_fac(),  # 有手机和nameID更新去重数
            'into_contact_table_count': 0,  # 入联系方式表数量

        }
        config = MongoQueryConfig.dsf_s4_tj_config(self.week_min,self.week_max)
        result_dic = self.get_data_from_mongo(config, save_result_dic)
        with open('/mnt5/contact_team/dsf/week_data/qcc','w') as f:
            json_res = Utils.serializable_bloom(deepcopy(result_dic))
            json.dump(json_res,f,ensure_ascii=False,indent=4)
        # 机器人相关
        url =  RobotConfig.dsf_s2_web_hook_url
        title = 'qcc联系方式入库监控'
        robot = DingtalkRobot(self.write_robot_f)
        mk_text = self.make_mk_text(result_dic)
        robot.main(url, title=title, mk_text=mk_text)

    def make_mk_text(self, result_dic):
        '''
        通过统计结果dict 拼接出mk_text
        @param result_dic: 统计结果dict
        @return: 发给钉钉的内容  str
        '''
        mk_text = '''  \n****  
                    **入contact_tmp表条数：{}**  
                    **contact_tmp表新增条数：{}**  
                    **contact_tmp表更新条数：{}**  
                    **contact_tmp表新增手机号条数：{}**  
                    **contact_tmp表更新手机号条数：{}**  
                    **contact_tmp表有手机和nameID新增条数：{}**  
                    **contact_tmp表有手机和nameID更新条数：{}**  
                    **contact_tmp表有手机和nameID新增去重数：{}**  
                    **contact_tmp表有手机和nameID更新去重数：{}**  
                    **contact_tmp表入联系方式表手机数：{}**  
                    '''.format(result_dic['week_count'], result_dic['week_add_count'],
                               result_dic['week_update_count'],
                               result_dic['mobile_add_count'],
                               result_dic['mobile_update_count'],
                               result_dic['mobile_and_nameId_add_count'],
                               result_dic['mobile_and_nameId_update_count'],
                               result_dic['mobile_and_nameId_add_dup'].count,
                               result_dic['mobile_and_nameId_update_dup'].count,
                               result_dic['into_contact_table_count'],

                               )

        return mk_text


def run(abs_file_name):
    export_and_tj = ExportAndTj(abs_file_name)
    export_and_tj.main()


def scheduler_run(abs_file_name):
    scheduler = BlockingScheduler()
    scheduler.add_job(func=run, trigger='cron',
                      args=(abs_file_name,),
                      # day_of_week="sun",
                      hour='20', minute='52', id='dd_run')
    scheduler.start()


if __name__ == '__main__':
    file_name = '企查查周报统计'
    # py文件名+file_name+小时时间
    abs_file_name = Utils.generate_abs_path(file_name, current_py_path=__file__,name='dsf')

    run(abs_file_name)
    # scheduler_run(abs_file_name)
