# -*- coding: utf-8 -*-
import multiprocessing
import time

import requests
from fake_useragent import UserAgent
from gne import GeneralNewsExtractor  # pip install --upgrade gne
from tqdm import tqdm

from base.base_parser import check_violate_keyword
from db.elastic_search import ES
from db.mongodb import MongoDB
from db.mysqldb import MysqlDB
from db.proxy_db import get_proxy
from utils.confident import site_checker
from utils.log import get_logger
from utils.own_tools import format_date, get_current_date

log = get_logger(name='update.log', path=r"D:\wuzheng\log\\")

es = ES()
db = MongoDB()
mysql_db = MysqlDB()
ua = UserAgent()


def _check_proram_update_main(item):
    msg = 'begin:' + item['home_page']
    log.warning(msg)
    site_checker(item, depath=3, fxck_oo=True)
    msg = 'end:' + item['home_page']
    log.warning(msg)


def save_es(host, oid, site_name):
    try:
        min_confidence = db.find('WEB_urls', {"host": host})[0]['min_confidence']
    except:
        min_confidence = 200
    print('min_confidence:', min_confidence)

    todo_item_list = db.find('WEB_search_info', {"es_read": {'$exists': False}, "host": host})
    if not todo_item_list:
        print('no update')
        return

    now_time = get_current_date()

    for item in tqdm(todo_item_list):
        if '搜索结果' in item['title']:
            db.update('WEB_searcch_info', {"_id": item['_id']}, {"es_read": 1})
            continue

        mongo_id = item['_id']
        mongo_id = int(str(mongo_id)[-6:], 16) + int(str(mongo_id)[2:8], 16)
        uuid = str(mongo_id) + '_3'

        program_type = 0 if item['Confidence'] < int(min_confidence) else 1

        try:
            if not item.get('html'):  # 重新入库
                for _ in range(30):
                    try:
                        fake_headers = {'User-Agent': ua.random, 'Content-Type': 'text/html;charset=UTF-8',
                                        "referer": item['url']}
                        html = requests.get(item['url'], headers=fake_headers, proxies=get_proxy(),
                                            timeout=(30.1, 30.1),
                                            verify=False)
                        html.raise_for_status()
                        html.encoding = 'gbk' if html.apparent_encoding.startswith('ISO') else html.apparent_encoding
                        item['html'] = html.text.replace('\/', '/')
                    except:
                        pass
                    else:
                        break
            assert item['html'], Exception('error page')

            result = GeneralNewsExtractor().extract(item['html'], host=item['url'])
        except Exception as e:
            print(e, item)
            db.delete('WEB_search_info', {"_id": item['_id']})

            continue

        release_time = ''
        if result['publish_time'].startswith('20'):
            release_time = format_date(result['publish_time']) or ''
        if '微信群' in result['author'] or result['author'] == 'var' or '不存在' in result['author']:
            result['author'] = ''
        image_url = ""
        for image_url_i in result['images']:
            if image_url_i.startswith("http") and len(image_url_i) < 32766:
                image_url = image_url_i
                break

        hit_type, violate_str, hit_word = check_violate_keyword(item['title'], result['content'])

        es_content_info = {
            'ID': mongo_id,
            'SOURCE_NAME': site_name,
            'SOURCE_ID': oid,
            'TITLE': item['title'],  # or result['title'],  # 微博TITLE是正文 正常为是标题
            'ARTICLE_URL': item['url'],

            'DATA_TYPE': '全部',  # 数据类型（体育，娱乐等采集模块名称）
            'PROGRAM_TYPE': program_type,  # 文章类型（0：文章 1：视频默认值 0）

            'VIDEO_URL': ','.join(item['video_url']) if item['video_url'] else '',
            'VIDEO_LOCAL_URL': 1,  # 视频存储地址
            'UPLOAD_VIDEO_URL': '',

            'AUTHOR': result['author'],  # 作者
            'IMAGE_URL': image_url,

            'SUMMARY': '',
            'CONTENT': result['content'] if len(result['content']) > 200 else '',
            # 'CONTENT': item['html'],
            # 'SUMMARY': result['content'],

            'RELEASE_TIME': release_time or now_time,
            'RECORD_TIME': now_time,

            'REVIEW_COUNT': -1,  # 观看数（需要转换为数字）默认0
            'COMMENT_COUNT': -1,  # 评论数（需要转换数字）默认0
            'UP_COUNT': -1,  # 点赞数 默认0
            'TRANSMIT_COUNT': -1,  # 转发数    默认0

            'INFO_TYPE': 3,  # （1-持证 2-备案 3-无证 4-app 5-微博 6-微信 11-抖音  12-快手 13-头条 ）
            'READ_STATUS': 0,  # 是否已读（需要塞默认值0  0:未读；1：已读）
            'IMAGE_CODE': 5,  # 图像识别状态（需要塞默认值5：正常）
            'VIOLATE_CHECK_TYPE': -1,
            'CHECK_STATUS': 0,  # 违规审核类型（默认值-1）
            'CHECK_INFO': '',  # 审核信息 默认空
            'EMOTION_TYPE': 2,  # 情感类型默认值为中性（如果中性为2则默认值为2）
            'SITE_WEIGHT_VALUE': 5,  # 站点权重 (需塞默认5)

            'FINANCIAL_MEDIA_STATUS': 0,  # 是否是融媒体（1：是 0:不是根据站点表的MAIN_ID字段大于0;用户规定）

            'VISIT_STATUS': 1,  # 是否可访问（1:正常 2、失效3、删除 默认值1）
            'OUT_STATUS': 0  # 默认0
        }

        if es.add('tab_article_info', es_content_info, uuid):
            if hit_type:
                es_content_info.update({'KEYWORD_WEIGHT_VALUE': hit_word,
                                        'HIT_TYPE': hit_type,
                                        'ARTICLE_WEIGHT': 1,  # 文章权重（默认值1）
                                        'TEXT_SECOND_KNOWLEDGE_ID': violate_str,
                                        # 文本（标题和内容）命中关键词的二级类型:tab_knowledge_keyword_info的KNOWLEDGE_ID（k+二级类型id+k）多个逗号分隔
                                        'CAPTION_SECOND_KNOWLEDGE_ID': '',
                                        # 字幕转写结果命中关键词的二级类型:tab_second_knowledge_info的KNOWLEDGE_ID（k+二级类型id+k）多个逗号分隔 默认空
                                        'AUDIO_SECOND_KNOWLEDGE_ID': '',
                                        # 语音转写结果命中关键词的二级类型:tab_second_knowledge_info的KNOWLEDGE_ID（k+二级类型id+k）多个逗号分隔  默认空
                                        'FACE_STATUS': '',  # 人脸识别结果状态（1：正面 2：负面 0未命中）默认空
                                        'IMAGE_STATUS': ''  # STATUS人脸识别结果状态（1正常,2敏感）
                                        })
                if not es.add('tab_article_recommend_info', es_content_info, uuid):
                    print('error', es_content_info)
                    continue
            db.update('WEB_search_info', {"_id": item['_id']}, {"es_read": 1, 'html': ''})
        else:
            print("-----------------------es error-----------------------------------")
            time.sleep(30)


def save_main(exist_item):
    set_video_confidence()
    for home_page, host, oid, site_name in exist_item:
        print('save', host)
        save_es(host, oid, site_name)



def check_proram_update_main(exist_item):
    print('待检查url数量： ', len(exist_item))
    p = multiprocessing.Pool(4)
    print("-----start-----")
    for home_page, DOMAIN, oid, site_name in exist_item:
        p.apply_async(_check_proram_update_main, ({'home_page': home_page, 'host': DOMAIN, 'license': '', 'icp': ''},))
        # break
    p.close()
    p.join()

    print("-----end-----")


def set_video_confidence():
    pass
    # db.update('WEB_search_info', {"es_read": {'$exists': False}, "home_page": "http://www.0779auto.com",
    #                               "url": {"$regex": 'www.0779auto.com/media/'}, "Confidence": {'$gt': 0}},
    #           {'Confidence': 200})


if __name__ == '__main__':
    exist_item = mysql_db.find(
        "SELECT URL,DOMAIN,ID,site_name FROM TAB_VIDEO_SITE_INFO "
        "WHERE SITE_TYPE = 3 and DISPLAY_STATUS=1 and CHECK_TYPE=1 and CHECK_STATUS=1 AND MONITOR_STATUS =1 AND IS_VALIDITY =0 "
        # "and ID=1000003509"
    )
    print(exist_item)

    save_main(exist_item)
    check_proram_update_main(exist_item)
    save_main(exist_item)
