# coding=utf8
import datetime
import os
import re
import time
import pickle
from io import BytesIO

import jieba
import requests
import zbarlight
from PIL import Image
from bs4 import BeautifulSoup
from bs4.element import Tag
from bson import ObjectId

from libs import config
from libs.dbs import model_redis
import libs.NavieBayes as naiveBayes
from libs.help import str_to_time, get_root_path, time_to_str, content_to_imme, md5sum
from libs.selenium import driver
from models.resource.classify import add_nx_classify, model_res_classify
from models.resource.crawl import model_res_crawl
from models.resource.media import model_res_media

base_path = get_root_path('data/training')
# 加载训练好的模型信息
vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam = naiveBayes.getTrainedModelInfo()
img_md5_coll = set()
with open(base_path + 'ImgCollection.txt', 'r+', encoding='utf8') as f:
    for line in f.read().split("\n"):
        if line != '':
            img_md5_coll.add(line)
with open(base_path + 'ImgCollection.txt', 'w', encoding='utf8') as f:
    f.write("\n".join(img_md5_coll))  # 重新写入

search_num = re.compile(r'(\d+)')
NUM = 100  # 用来控制订阅次数

# todo 加入时间过滤
lastest_key = 'WxNews:crawl:sougou:{cid}:lastest_time'
crawl_list_key = 'WxNews:crawl:sougou:{cid}'


def get_more():
    print("get more")
    try:
        more_btn = driver.find_element_by_id('look-more')
        more_btn.click()
    except:
        print("获取完毕,开始疯狂的读取文章")
        return False
    return True


def filter_ads(contents):
    check = False
    for content in contents:
        # 分词
        words = list(jieba.cut(content))
        if naiveBayes.classify(vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam, words):
            print("广告:" + content)
            check = True
            break
    return check


def filter_img(img):
    image = Image.open(BytesIO(img))
    codes = zbarlight.scan_codes('qrcode', image)
    if codes is None:
        return False
    else:
        return True


def save_to_local(image, image_url, content_type, content_id, idx):
    img_server = config['ImageServer']['host']
    static_path = 'upload_image/' + time_to_str("%Y%m%d") + '/' + str(content_id)
    root_static_path = get_root_path("statics/" + static_path)
    if not os.path.exists(root_static_path):
        os.makedirs(root_static_path, exist_ok=True)
    model_res_media.coll.update_one({'_id': content_id}, {
        '$push': {
            'Source': image_url
        }
    })
    file_name = str(idx) + content_to_imme(content_type)
    with open(root_static_path + file_name, 'wb') as f:
        f.write(image)
    return img_server + '/' + static_path + '/' + file_name


def correct_content(soup):
    # soup = BeautifulSoup(content, 'lxml')
    source = str(soup)
    content_id = ObjectId()
    # 处理段落,进行广告过滤
    # 增加兼容性
    if len(soup.contents) < 3:
        body = soup.contents[0].children
    else:
        body = soup.children
    for p in body:
        if not isinstance(p, Tag):
            continue
        # todo 改善分句
        if filter_ads(p.get_text().split()):
            p.decompose()
            continue
    # 对图片进行处理
    i = 0
    for img in soup.find_all('img'):
        img_url = img['data-src']
        # 增加兼容性,不考虑webp
        img_url = img_url.replace('&tp=webp', '')
        r_img = requests.get(img_url)
        # 过滤二维码
        if filter_img(r_img.content):
            img.decompose()
            continue
        # 增加crc校验
        img_md5 = md5sum(BytesIO(r_img.content))
        if img_md5 in img_md5_coll:
            img.decompose()
            continue
        # 存在本地
        if i == 0:
            model_res_media.coll.insert_one({
                '_id': content_id,
                'Update': time.time()
            })
        img['src'] = save_to_local(r_img.content, img_url, r_img.headers['Content-Type'], content_id, i)
        i += 1
    # 过滤iframe
    for iframe in soup.find_all('iframe'):
        iframe_url = iframe['data-src']
        if 'v.qq.com' not in iframe_url:
            iframe.decompose()
        else:
            iframe['src'] = iframe_url
    return source, str(soup)


def get_content(item):
    try:
        soup = BeautifulSoup(requests.get(item['Url']).text, 'lxml')
        print("爬取:" + item['Url'])
        title = soup.find(id='activity-name').get_text(strip=True)
        # title = driver.find_element_by_id('activity-name').text
        content = soup.find(id='js_content')
        # content = driver.find_element_by_id('js_content').get_attribute('innerHTML')
        source, content = correct_content(content)
        item.update({
            'Title': title,
            'Content': content,
            'PageSource': source
        })
        return item
    except Exception as e:
        print(e)
        return False


def get_list(idx):
    while True:
        if not get_more():
            break
        time.sleep(1)
    list_div = driver.find_element_by_id('pc_' + str(idx) + '_d')
    articles = []
    for li in list_div.find_elements_by_tag_name('li'):
        thumb = li.find_element_by_class_name('wx-img-box').find_element_by_tag_name('img').get_attribute('src')
        content_div = li.find_element_by_class_name('wx-news-info2')
        content_href = content_div.find_element_by_tag_name('a').get_attribute('href')
        info_div = content_div.find_element_by_class_name("s-p").text.split()
        read_num = int(search_num.search(info_div[-2]).group(0))
        if read_num < NUM:
            continue
        for p in li.find_element_by_class_name('pos-wxrw').find_elements_by_tag_name('p'):
            if p.text != '':
                source = p.text
                break
        # 貌似只有小时差
        time_diff = int(search_num.search(info_div[-1]).group(0))
        time_str = (datetime.datetime.now() - datetime.timedelta(hours=time_diff)).strftime("%Y-%m-%d %H:%M")
        time_stamp = str_to_time(time_str, format='%Y-%m-%d %H:%M')
        articles.append({
            'Source': source,
            'Thumb': thumb,
            'Url': content_href,  # todo 这个url会过期
            'TimeStr': time_str,
            'TimeStamp': time_stamp,
        })
    return articles


def get_classify_from_page():
    classify_ul = driver.find_element_by_id('wx-tabbox-ul')
    classify = []
    for idx, li in enumerate(classify_ul.find_elements_by_tag_name('a')):
        if li.text == "热门" or li.text == "推荐" or li.text == "段子手":
            continue
        if li.text == '更多':
            break
        classify.append((li.text, li, idx))
    return classify


def load_article_form_redis(cid):
    while True:
        article = model_redis.rpop(crawl_list_key.format(cid=cid))
        if article is not None:
            yield pickle.loads(article)
        else:
            break


def get_articles():
    for item_info in model_res_classify.coll.find():
        for item in load_article_form_redis(str(item_info['_id'])):
            item = get_content(item)
            if not item:
                continue
            item.update({
                'ClassifyName': item_info['ClassifyName'],
                'ClassifyId': item_info['_id']
            })
            model_res_crawl.coll.insert_one(item)
            print("新增成功1个")


def main():
    home_uri = "http://weixin.sogou.com"
    driver.get(home_uri)
    # 分类分了2层,1层直接可以爬,另一层要悬浮在more上才显示
    for classify_name, classify_btn, idx in get_classify_from_page():
        print("分类名:" + classify_name)
        classify_id = add_nx_classify(classify_name)
        classify_btn.click()  # 获取新闻
        print("开始获取list")
        for article in get_list(idx):
            model_redis.lpush(crawl_list_key.format(cid=str(classify_id)), pickle.dumps(article))
    print("分类全部读取完毕,开始读取文章")
    driver.close()
    # 切换requests
    get_articles()


if __name__ == '__main__':
    main()
    # get_articles()
