#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
昨天
招聘表新增数据条数
有nameId且岗位描述条数
提取出联系方式条数
去重条数
入库条数

有nameId且recruitingContact条数
联系方式条数
去重条数
入库条数

由于统计与上传之间查了近2天，
所以，我们每天统计当天的数据保存到本地，并将dc上传，
并发送三天前的推送结果，我们

导出昨天一天入招聘表的数据
'''

import json
import sys
sys.path.append('/mnt/moonbrook/Moonbrook')
from aes import contact_aes, name_aes
import subprocess
import os
import re
import psutil
import time
import requests
from copy import deepcopy
from datetime import datetime, timedelta
from pybloom_live import ScalableBloomFilter

from apscheduler.schedulers.blocking import BlockingScheduler

from base import ExportAndTjBase
from utils import Utils
from models import MongoQueryConfig, RobotConfig
from robot import DingtalkRobot

reload(sys)
sys.setdefaultencoding('utf-8')

def generate_venom_name():
    '''生成venom数据文件名称'''
    timestamp = time.time()
    timeArray = time.localtime(timestamp)
    time_str = time.strftime("%Y-%m-%d", timeArray)
    timestamp_str = str(timestamp).split('.')[0]
    log_name = '/mnt5/dsf/recruiting/data/1506.log.{}-{}'.format(time_str, timestamp_str)
    time.sleep(1)
    print(log_name)
    return log_name


class ExportAndTj(ExportAndTjBase):
    week_min, week_max = Utils.get_date_range(ndays=2)

    def __init__(self, *args, **kwargs):
        super(ExportAndTj, self).__init__(*args, **kwargs)
        self.annual_report_col = self.db.p_col("enterprise_annual_report")
        self.mobile_cmp = re.compile(r"\d{11}")
        self.desc_file_name = '/mnt5/dsf/recruiting/desc_recruiting_nameId_and_contact_{}'.format(self.week_min.strftime("%Y-%m-%d"))
        self.history_file = '/mnt5/dsf/recruiting/recruiting_history_{}'.format(self.week_min.strftime("%Y-%m-%d"))
        self.init_file_v1(self.file_name)
        # self.history_file_name = '/mnt5/dsf/recruiting/recruiting_history_{}'.format(self.week_min.strftime("%Y-%m-%d"))

    def init_file_v1(self, abs_file_name, mod='w'):
        # self.desc_recruiting_nameId_and_contact_dc = open('/mnt5/dsf/recruiting/desc_recruiting_nameId_and_contact', 'w')
        self.desc_recruiting_nameId_and_contact_dc = open(self.desc_file_name, 'w')
        self.recruiting_history_file = open(self.history_file, 'w')
    def close_file_v1(self):
        self.desc_recruiting_nameId_and_contact_dc.close()
        self.recruiting_history_file.close()

    def handle_one_mongo_data(self, recruiting_record, save_result_dic=None):
        # recruiting_record = Utils.decrypt_one_record(recruiting_record,need_decrypt=True,need_strf_time=True)

        nameId = recruiting_record.get('nameId')
        recruitingPageUrl = recruiting_record.get('recruitingPageUrl')
        recruitingDesc = recruiting_record.get('recruitingDesc')
        recruitingContact = recruiting_record.get('recruitingContact')
        recruitingSource = recruiting_record.get('recruitingSource')

        recruitingPageImage = recruiting_record.get('recruitingPageImage')
        update_time = recruiting_record.get('import_update_time')
        create_time = recruiting_record.get('create_time')

        save_result_dic['recruiting_table_count'] += 1
        must_exists_values = [nameId, recruitingPageUrl, recruitingSource]  # 这些字段都必须是非空值
        must_has_one_exists_values = [recruitingContact,recruitingDesc]  # 这些字段必须最少有一个是非空值
        if not self.is_data_legal(must_exists_values, must_has_one_exists_values):
            return save_result_dic
        else:
            has_contact_falg = False
            one_record_dic = {
                'nameId': nameId,
                'recruitingPageUrl': recruitingPageUrl,
                'recruitingPageImage': recruitingPageImage,
                'import_update_time': update_time,
                'create_time': create_time,
                'recruitingContact': recruitingContact,
                'recruitingSource': recruitingSource
            }
            if recruitingDesc:
                save_result_dic['nameId_and_desc'] += 1
                extract_mobile_ls = re.findall(self.mobile_cmp, recruitingDesc)
                if extract_mobile_ls:
                    has_contact_falg = True
                    one_record_dic.update({'extract_mobile_ls': extract_mobile_ls})
                    save_result_dic['extract_desc_contact_count'] += len(extract_mobile_ls)
                    for mobile in extract_mobile_ls:
                        # 以前不存在返回True，否则False
                        is_new = save_result_dic['extract_desc_contact_dup'].add(mobile)
                        if is_new:
                            self.desc_recruiting_nameId_and_contact_dc.write('{}\t{}\n'.format(nameId, mobile))
                            self.desc_recruiting_nameId_and_contact_dc.flush()

            if recruitingContact:
                has_contact_falg = True
                save_result_dic['nameId_and_contact'] += 1
                save_result_dic['extract_contact_count'] += len(recruitingContact)
                for tmp_contact_dic in recruitingContact:
                    contact = tmp_contact_dic.get('contact', '')
                    contact = contact_aes.decrypt(contact)
                    contactType = tmp_contact_dic.get('contactType', '')
                    _id = "{}_{}".format(contact, contactType)
                    is_new = save_result_dic['extract_contact_dup'].add(_id)
                    if is_new:
                        self.desc_recruiting_nameId_and_contact_dc.write('{}\t{}\n'.format(nameId, contact))
            if has_contact_falg:
                self.recruiting_history_file.write(json.dumps(Utils.decrypt_one_record(one_record_dic,need_strf_time=True), ensure_ascii=False) + '\n')
            else:
                self.write_error_f.write('this info no contact\n')
        return save_result_dic

    def run(self):
        # save_result_dic可以放入类变量中，避免传参调用导致逻辑混乱
        save_result_dic = {
            'recruiting_table_count': 0,  # 昨天招聘表入库数量
            'nameId_and_desc': 0,  # 有nameId且岗位描述条数
            'extract_desc_contact_count': 0,  # 提取联系方式数量
            'extract_desc_contact_dup': Utils.bloom_filter_fac(),  # 提取联系方式去重数
            'desc_into_contact_count': 0,  # 入库条数

            'nameId_and_contact': 0,  # 有nameId且recruitingContact条数
            'extract_contact_count': 0,  # 联系方式数量
            'extract_contact_dup': Utils.bloom_filter_fac(),  # 提取联系方式去重数
            'into_contact_count': 0,  # 入库条数
        }
        config = MongoQueryConfig.dsf_s28_dc_config(self.week_min, self.week_max)
        result_dic = self.get_data_from_mongo(config, save_result_dic)
        self.close_file_v1()

        # 机器人相关
        url = RobotConfig.dsf_s2_web_hook_url
        title = RobotConfig.dsf_s2_title
        robot = DingtalkRobot(self.write_robot_f)
        mk_text = self.make_mk_text(result_dic)
        robot.main(url, title=title, mk_text=mk_text)

        # 拆分文件
        file_name = self.history_file
        with open(file_name, 'r') as f:
            ls = []
            for line in f:
                line = line.strip()
                if line:
                    ls.append(line)
                if len(ls) == 10000:
                    log_name = generate_venom_name()
                    with open(log_name, 'w') as _f:
                        _f.write('\n'.join(ls))
                        ls = []
            else:
                if len(ls):
                    log_name = generate_venom_name()
                    with open(log_name, 'w') as _f:
                        _f.write('\n'.join(ls))

    def make_mk_text(self, result_dic):
        '''通过数据 拼接出mk_text'''
        result_dic = Utils.serializable_bloom(result_dic)
        mk_text = ''

        mk_text += '''  
                    **==========**  
                    **昨天招聘表符合条件入库数量：{}**  
                    **有nameId且岗位描述条数：{}**  
                    **提取联系方式数量：{}**  
                    **提取联系方式去重数：{}**  
                    **==========**  
                    '''.format(result_dic['recruiting_table_count'],result_dic['nameId_and_desc'],
                               result_dic['extract_desc_contact_count'],result_dic['extract_desc_contact_dup']
                               )

        return mk_text

def run(abs_file_name):
    export_and_tj = ExportAndTj(abs_file_name)
    export_and_tj.main()


def scheduler_run(abs_file_name):
    scheduler = BlockingScheduler()
    scheduler.add_job(func=run, trigger='cron',
                      args=(abs_file_name,),
                      # day_of_week="sun",
                      day="*/2",
                      hour='1', minute='1', id='dd_run_1')
    scheduler.start()


if __name__ == '__main__':
    file_name = '招聘定时导出昨天入库数据'
    # py文件名+file_name+小时时间
    abs_file_name = Utils.generate_abs_path(file_name, current_py_path=__file__, name='dsf')

    # run(abs_file_name)
    scheduler_run(abs_file_name)
