# -*- encoding: utf-8 -*-
"""
@File    :   crawler_util.py
@Time    :   2020/07/13 16:16:16
@Author  :   Song Zewen 
@Version :   1.0
@Contact :   stg1205@163.com
@License :   (C)Copyright 2020-2021, Liugroup-NLPR-CASIA
@Desc    :   爬虫工具模块
"""


import requests
from requests.exceptions import *
import re
import json
import time


CONTENT_SELECTOR = {
    'github': '#readme',
    'csdn': '#article_content',
    'news163': '#endText',
    'jianshu': '.article',
    'douban': '#link-report'
}


# 统计一个div中的中文占比
def count_chinese(s):
    pattern = re.compile(u'[\u1100-\uFFFDh]+?')
    response = pattern.findall(s)
    chinese_num = len(response)
    fre = chinese_num / len(str(s))
    return chinese_num, fre
    
    
# 过滤一些tag
def filter_tags(htmlstr: str):
    re_doctype = re.compile('<![DOCTYPE|doctype].*>')
    re_nav = re.compile('<nav.+</nav>')
    re_cdata = re.compile('//<!\[CDATA\[.*//\]\]>', re.DOTALL)
    re_script = re.compile('<\s*script[^>]*>.*?<\s*/\s*script\s*>', re.DOTALL | re.I)
    re_style = re.compile('<\s*style[^>]*>.*?<\s*/\s*style\s*>', re.DOTALL | re.I)
    re_textarea = re.compile('<\s*textarea[^>]*>.*?<\s*/\s*textarea\s*>', re.DOTALL | re.I)
    re_br = re.compile('<br\s*?/?>')
    re_h = re.compile('</?\w+.*?>', re.DOTALL)
    re_comment = re.compile('<!--.*?-->', re.DOTALL)
    re_space = re.compile(' +')
    s = re_cdata.sub('', htmlstr)
    s = re_doctype.sub('', s)
    s = re_nav.sub('', s)
    s = re_script.sub('', s)
    s = re_style.sub('', s)
    s = re_textarea.sub('', s)
    s = re_br.sub('', s)
    s = re_h.sub('', s)
    s = re_comment.sub('', s)
    s = re.sub('\\t', '', s)
    s = re_space.sub(' ', s)

    # 转义字符
    s = re.sub('&gt', '>', s)
    s = re.sub('&lt', '<', s)
    s = re.sub('&amp', '&', s)
    s = re.sub('&quot', '"', s)
    s = re.sub('&copy', '©', s)
    s = re.sub('&reg', '®', s)

    return s


def get_html(url):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
                      '(KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36 Edg/83.0.478.58'
    }

    count = 0

    while count < 5:
        try:
            response = requests.get(url, headers=headers, timeout=10)
            if response.status_code == 200:
                # encoding_info = chardet.detect(response.content)
                # response.encoding = encoding_info['encoding']
                return response.text
            else:
                print('Fail to get page', response.status_code)
                count = count + 1
                time.sleep(3)
        except ConnectionError:
            print('-------------------Connection error, wait 3 sec---------------------------')
            count = count + 1
            time.sleep(3)
        except ReadTimeout:
            print('-----------------Time is out!----------------------')
            count = count + 1
            # time.sleep(3)
        except:
            print('---------------------Unknown error!-------------------------')
            count = count + 1
            time.sleep(3)

    return None


# 得到json格式，返回字典   
def get_json(url):
    html = get_html(url)

    if html:
        response = json.loads(html)
        return response
    else:
        return None


# 获取正文
def content_extractor(soup, website):
    
    """
    先提取所有div，然后统计每个div里中文的比例
    然后找出满足fre > 0.15且中文大于300个的包含
    中文最少的div，就是正文div
    仅适用于中文提取，只要正文字数大于300，效果还不错，然而效率太低
    没什么用
    """
    # select all the divs
    # divs = soup.select('div')
    # content = divs[0]
    # lens = 100000

    # for para in divs:
    #     chinese_num, fre = count_chinese(str(para))
    #     if fre > 0.15 and 300 < chinese_num < lens:
    #         lens = chinese_num
    #         content = para

    content = soup.select(CONTENT_SELECTOR[website])  # CSDN

    if not content:
        return None

    content = filter_tags(str(content[0]))

    return content
