#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time    : 2021/3/15 下午1:59
# @Author  : Samge
# html数据清洗
import re
import sys
from urllib.parse import urljoin
from bs4 import BeautifulSoup, Comment

from itkz.resources.utils import angle_brackets_util


def filter_tags(htmlstr):
    # 先过滤CDATA
    re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I)  # 匹配CDATA
    re_script = re.compile(r'(?<= )re_script=".*?"', re.I)  # re_script
    re_style = re.compile(r'(?<= )style=".*?"', re.I)  # style
    re_class = re.compile(r'(?<= )class=".*?"', re.I)  # class
    re_label = re.compile(r'(?<= )label=".*?"', re.I)  # label
    re_vaguest = re.compile(r'(?<= )vsbhref=".*?"', re.I)  # vsbhref
    re_orison = re.compile(r'(?<= )orisrc=".*?"', re.I)  # orisrc
    re_vail = re.compile(r'(?<= )vurl=".*?"', re.I)  # vurl
    re_id = re.compile(r'(?<= )id=".*?"', re.I)  # id
    re_title = re.compile(r'(?<= )title=".*?"', re.I)  # title
    re_value = re.compile(r'(?<= )value=".*?"', re.I)  # value
    re_name = re.compile(r'(?<= )name=".*?"', re.I)  # name
    re_br = re.compile('<br\s*?/?>')  # 处理换行
    # re_h = re.compile('</?\w+[^>]*>')  # HTML标签
    re_comment = re.compile('<!--[^>]*-->')  # HTML注释
    s = re_cdata.sub('', htmlstr)  # 去掉CDATA
    s = re_script.sub('', s)  # 去掉SCRIPT
    s = re_style.sub('', s)  # 去掉style
    s = re_class.sub('', s)  # 去掉class
    s = re_vail.sub('', s)  # 去掉vurl
    s = re_orison.sub('', s)  # 去掉orisrc
    s = re_vaguest.sub('', s)  # 去掉vsbhref
    s = re_label.sub('', s)  # 去掉class
    s = re_id.sub('', s)  # 去掉id
    s = re_title.sub('', s)  # 去掉title
    s = re_value.sub('', s)  # 去掉value
    s = re_name.sub('', s)  # 去掉name
    s = re_br.sub('\n', s)  # 将br转换为换行
    # s = re_h.sub('', s)  # 去掉HTML 标签
    s = re_comment.sub('', s)  # 去掉HTML注释
    # 去掉多余的空行
    blank_line = re.compile('\n+')
    s = blank_line.sub('\n', s)
    s = replaceCharEntity(s)  # 替换实体
    return s


def replaceCharEntity(htmlstr):
    CHAR_ENTITIES = {'nbsp': ' ', '160': ' ',
                     'lt': '<', '60': '<',
                     'gt': '>', '62': '>',
                     'amp': '&', '38': '&',
                     'quot': '"', '34': '"', }

    re_charEntity = re.compile(r'&#?(?P<name>\w+);')
    sz = re_charEntity.search(htmlstr)
    while sz:
        entity = sz.group()  # entity全称，如&gt;
        key = sz.group('name')  # 去除&;后entity,如&gt;为gt
        try:
            htmlstr = re_charEntity.sub(CHAR_ENTITIES[key], htmlstr, 1)
            sz = re_charEntity.search(htmlstr)
        except KeyError:
            # 以空串代替
            htmlstr = re_charEntity.sub('', htmlstr, 1)
            sz = re_charEntity.search(htmlstr)
    return htmlstr


def clean_attrs(soup, whitelist=None):
    # 清除所有标签的属性,[]表示删除所有属性,['scr']表示只保留src属性
    if whitelist is None:
        whitelist = {
            'a': ['href'],
            'img': ['src'],
            'link': ['href'],
            'col': ['height', 'width'],
            'tr': ['height', 'width'],
            'td': ['height', 'width'],
        }
    for tag in soup.find_all(True):
        if tag.name not in whitelist:
            # 不在白名单的清除所有属性
            tag.attrs = {}
        else:
            # 在白名单的，遍历所有属性，只保留白名单里的属性
            allowed_attrs = whitelist[tag.name]
            new_attrs = {}
            if allowed_attrs is None:
                new_attrs = tag.attrs
            else:
                # 遍历所有属性，对指定属性进行保留
                for k, v in tag.attrs.items():
                    if k in allowed_attrs:
                        new_attrs[k] = v
            tag.attrs = new_attrs
    return soup


def clean_attrs_with_black(soup, black_list=None):
    # 清除黑名单中的标签属性,[]表示保留所有属性,['style', 'class']表示只删除style、class属性
    if black_list is None:
        black_list = ['style', 'class']
    for tag in soup.find_all(True):
        # 遍历所有属性，清除标签的属性
        new_attrs = {}
        # 遍历所有属性，对指定属性进行保留
        for k, v in tag.attrs.items():
            is_ok_attr = v and k not in black_list
            if is_ok_attr:
                new_attrs[k] = v

        # 20210413 雷总需求：a标签统一加一个属性target=_blank，在新标签页打开
        if 'a' == tag.name and not new_attrs.get('target'):
            new_attrs['target'] = "_blank"

        tag.attrs = new_attrs
    return soup


def clean_css_class(soup, clean_class_names):
    # 移出指定class样式的元素
    if not clean_class_names:
        return soup
    for tag in soup.find_all(True):
        if tag.attrs and len(tag.attrs.get('class') or []) > 0:
            _class = tag.attrs.get('class')
            for class_name in _class:
                if class_name in clean_class_names:
                    tag.decompose()
    return soup


def clean_tags(soup):
    # 去除无用标签，只保留指定标签
    whitelist = ['p', 'br', 'div', 'strong', 'img', 'table', 'thead', 'tbody', 'tr', 'td', 'th', 'a', 'link', 'span']
    for tag in soup.find_all(True):
        if tag.name not in whitelist:
            tag.unwrap()
    return soup


def clean_tags_with_black(soup):
    # 去除指定的黑名单标签
    blacklist = ['script', 'style']
    for tag in soup.find_all(True):
        if tag.name in blacklist:
            tag.unwrap()
    return soup


def clean_extra(soup):
    # 清除不包含src链接的空img
    for tag in soup.find_all('img'):
        if 'src' not in tag.attrs:
            tag.decompose()

    # 清除script 、style
    for tag in soup.find_all(['script', 'style']):
        tag.decompose()

    # 清除js注释
    for comment in soup.findAll(text=lambda text: isinstance(text, Comment)):
        comment.extract()

    # 清除不包含任何内容的标签
    target_list = ['p', 'div']
    for tag in soup.find_all(target_list):
        if tag.string is None and not tag.contents:
            tag.decompose()

    return soup


def pre_clean(text):
    # 清除\n换行，防止干扰标签清洗
    text = re.sub(r'\n', '', text) or ''
    return text.replace(' ', '')


def clean_text(text):
    '''处理特殊的规则（注意：非通用规则请放在spider内处理）'''

    # 转换HTML特殊字符为unicode，比如&nbsp,\xa0等（bs4可以自动转换，暂时注释掉）
    # text = unescape(text)

    # 替换连续换行<br><br>为<br>
    text = re.sub(r'(<br/*>)+', '<br>', text)

    # 替<p>标签结尾换行<p>.......<br><p>
    text = re.sub(r'(<br/*>)+</p>', '</p>', text)

    # 清除空段落换行<p><br></p>
    text = re.sub(r'<p>(<br/*>)*</p>', '', text)

    # 清除空段落换行<div><br></div>
    text = re.sub(r'<div>(<br/*>)*</div>', '', text)

    # 清除空段落换行<p>\xa0</p>
    text = re.sub(r'<p>\s*</p>', '', text)

    # 清除空段落换行<div>\xa0</div>
    text = re.sub(r'<div>\s*</div>', '', text)

    # 处理单引号
    text = text.replace(u"'", u'&apos;')
    return text


def join_url(soup, origin_url, tags=None):
    # 对a/img/link/script标签中的相对地址链接进行还原
    if tags is None:
        tags = ['img', 'a', 'link', 'script']
    for tag in soup.find_all(tags):
        if 'src' in tag.attrs and 'http' not in tag['src']:
            tag['src'] = urljoin(origin_url, tag['src'])
        if 'href' in tag.attrs and 'http' not in tag['href']:
            tag['href'] = urljoin(origin_url, tag['href'])
    return soup


def join_url_by_response(soup, response, tags=None):
    # 对a/img/link/script标签中的相对地址链接进行还原
    if tags is None:
        tags = ['img', 'a', 'link', 'script']
    for tag in soup.find_all(tags):
        if 'src' in tag.attrs and 'http' not in tag['src']:
            tag['src'] = response.urljoin(tag['src'])
        if 'href' in tag.attrs and 'http' not in tag['href']:
            tag['href'] = response.urljoin(tag['href'])
    return soup


def clean_script(text):
    """
    清除script

    :param text:
    :return:
    """
    try:
        if text and str(text).startswith('http'):
            return text
        soup = BeautifulSoup(text, 'lxml')
        for tag in soup.find_all(['script']):
            tag.decompose()
        return extract(soup) or text
    except:
        return text


def extract(soup):
    try:
        # 设置最大递归深度，默认是1000
        sys.setrecursionlimit(5000)
        return str(soup.extract())
    except:
        # 可能会触发超过递归深度错误，错误时返回源数据
        return None


def clean_data_default(text, origin_url=None, response=None):
    """
    默认的文本预清洗，如果需要其他清洗规章，需要自己另外定制

    :text : 要进行清洗的html网页文本
    :origin_url : 源链接，修复href时使用
    :response : scrapy 中返回的response，用response.urljoin修复链接效果更好

    :return : 返回清洗后的html文本
    """
    if text is None:
        return None

    text = angle_brackets_util.protect_angle_brackets(text)

    text = pre_clean(text)

    if text and str(text).startswith('http'):
        return text

    # 标签清洗
    soup = BeautifulSoup(text, 'lxml')
    # 20210412参考恩哥的清洗规则，只删除特定属性，保留其他所有属性, 方法由 clean_attrs 改为 clean_attrs_with_black
    soup = clean_attrs_with_black(soup)
    soup = clean_extra(soup)
    soup = clean_tags_with_black(soup)  # soup = clean_tags(soup)
    # 对相对地址进行还原
    if origin_url:
        soup = join_url(soup, origin_url)
    elif response:
        soup = join_url_by_response(soup, response)

    # 转换为文本继续处理
    text = extract(soup) or text
    text = clean_text(text)
    return angle_brackets_util.restore_angle_brackets(text, replace_v='&lt;')


if __name__ == "__main__":
    # a = '"home&nbsp;-&nbsp;study\xa0"'
    a = '<img src="http://www.baiasdsadsadsadu.com">sdfdsffsd</img>'
    t = clean_data_default(a, 'http://www.baidu.com')
    print(t)
    # a = '"home&nbsp;-&nbsp;study\xa0"'
    # print(repr(clean(a)))
    # print(a)
    # print(repr(a))
