#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import hashlib
import string
import random
from .escape import to_unicode

code_list = list(string.ascii_lowercase + string.digits + string.ascii_uppercase)


def safe_float(value, default=0):
    try:
        value = float(value)
    except:
        value = default
    return value


def safe_int(value, default=0):
    try:
        value = int(value)
    except:
        value = default
    return value


def compatmd5(key):
    if isinstance(key, unicode):
        key = key.encode('utf8')
    m = hashlib.md5(str(key))
    return m.hexdigest()


def get_random_string(num=5):
    return ''.join(random.sample(code_list, num))


def get_ch_text(text):
    """
        返回内容中的中文字符
    """
    if not text:
        return ""
    res = re.findall(u"[\u4e00-\u9fa5]+", to_unicode(text))
    return " ".join(" ".join(res).split()) if res else ""


def remove_emoj(text):
    text = to_unicode(text)
    re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE)
    filtered_string = re_pattern.sub(u'', text)
    return filtered_string


class FancyDict(dict):

    def __getattr__(self, key):
        try:
            return self[key]
        except KeyError as k:
            raise

    def __setattr__(self, key, value):
        self[key] = value

    def __delattr__(self, key):
        try:
            del self[key]
        except KeyError as k:
            raise


def check_mobile_request(user_agent):
    long_matches = r'googlebot-mobile|android|avantgo|blackberry|blazer|elaine|hiptop|ip(hone|od)|kindle|midp|mmp|mobile|o2|opera mini|palm( os)?|pda|plucker|pocket|psp|smartphone|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce; (iemobile|ppc)|xiino|maemo|fennec'
    long_matches = re.compile(long_matches, re.IGNORECASE)
    short_matches = r'1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|e\-|e\/|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(di|rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|xda(\-|2|g)|yas\-|your|zeto|zte\-'
    short_matches = re.compile(short_matches, re.IGNORECASE)

    if long_matches.search(user_agent) is not None:
        return True
    user_agent = user_agent[0:4]
    if short_matches.search(user_agent) is not None:
        return True
    return False


URL_RE = r'^(?:http)s?://' \
         r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' \
         r'localhost|' \
         r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' \
         r'(?::\d+)' \
         r'?(?:/?|[/?]\S+)$'


def check_url(url):
    if url:
        regex = re.compile(URL_RE, re.IGNORECASE)
        if regex.match(url):
            return True
    return False


def sub_string_by_length(content, max_length=None):
    if content:
        if not max_length:
            return content
        if len(content) > max_length:
            return content[0: max_length] + '...'
        else:
            return content
    else:
        return u''

class ClearElement(object):
    allow_tag = ['p', 'img', 'span', 'a']
    inline_tag = ['b', 'big', 'small', 'tt', 'abbr', 'acronym', 'cite', 'code',
                  'dfn', 'em', 'kdb', 'samp', 'var', 'bdo', 'strong',
                  'sub', 'sup', 'u', 's', 'pre']
    block_tag = ['fieldset', 'blockquote', 'q', 'aside', 'div', 'article', 'section']
    allow_class = ['imgdesc', 'small_margin_top']

    def __init__(self, content, clear_space=False, clear_a=False):
        self.content = content
        self.b = BeautifulSoup("", "html.parser")
        self.clear_space = clear_space
        self.clear_a = clear_a
        self.del_tag = [
            'br', 'script', 'object',
            'map', 'input', 'select', 'textarea',
            'style', 'title', 'head',
            'noscript', 'applet'
        ]

    def clearup_element(self, element):
        for e in element.find_all(True, recursive=False):
            if e.name == 'iframe':
                if e.get('src', '') and e.get("video", ""):
                    continue
                else:
                    e.extract()
            _attrs = [_ for _ in e.attrs]
            if e.name not in self.allow_tag:
                if e.name in self.inline_tag:
                    e.name = 'span'
                else:
                    e.name = 'p'

            if e.name == 'a':
                if self.clear_a:
                    e.name = 'span'
                elif 'href' in _attrs:
                    _attrs.remove('href')  # 避免把链接的 href 删除掉
            if e.name == 'p' and 'class' in _attrs:
                e_class = e['class']
                if e_class and ''.join(e_class) in self.allow_class:
                    _attrs.remove('class')
            if e.name == 'span' and 'style' in _attrs:
                style = e['style'].strip()
                if style == u'font-weight: bold;':
                    _attrs.remove('style')
            if e.name == "img" and 'src' in _attrs:
                _attrs.remove('src')
            for _ in _attrs:
                del e[_]

            def is_empty():
                return e.string is None or e.string.strip() == "" or not re.sub(r'\\UFEFF', '', e.string.strip(), flags=re.IGNORECASE)

            if e.find_all(True, recursive=False) == [] and is_empty():
                text = re.sub(r'\\UFEFF', '', ''.join(e.get_text().strip().split()), flags=re.IGNORECASE)
                if e.name != 'img' and not text:
                    e.extract()
            else:
                self.clearup_element(e)

    def merget_element(self, soup):
        _s = BeautifulSoup("", "html.parser")
        _l = ['span']
        for e in soup.find_all(_l):
            es = e.find_all(True, recursive=False)
            if es:
                continue
            _ = to_unicode(' '.join(e.get_text().strip().split())).strip()
            if _:
                e.string = _
        for e in soup.find_all(_l):
            attrs = e.attrs
            if 'style' not in attrs:
                e.unwrap()
                continue
            style = e['style'].strip()
            if style != u'font-weight: bold;':
                e.unwrap()
        for e in soup.find_all('p'):
            _s.append(e)
        return _s

    def clear_spacing(self, soup):
        for e in soup.find_all('p'):
            _s = to_unicode(''.join(e.get_text().strip().split()))
            if _s and not e.find_all():  # 没有字节点才替换内容
                e.string = _s

    def unwrap_img(self, element, soup):
        top_tag = '[document]'
        parent = element.parent
        if parent.name == 'p':
            parent_parent = parent.parent

            parent_index = parent_parent.index(parent)
            my_index = parent.index(element)

            contents_list = parent.contents[:]

            # 分割后的第一部分
            first_tag = self.b.new_tag("p")
            second_tag = self.b.new_tag("p")
            third_tag = self.b.new_tag("p")
            parent.extract()

            parent_parent.insert(parent_index, third_tag)
            parent_parent.insert(parent_index, second_tag)
            parent_parent.insert(parent_index, first_tag)

            for child in contents_list[:my_index]:
                first_tag.append(child)

            # 分割后的第二部分，也就是img部分

            second_tag.append(element)

            # 分割后的第三部分

            for child in contents_list[my_index + 1:]:
                third_tag.append(child)

        elif parent.name == top_tag:
            element.wrap(self.b.new_tag("p"))
        else:
            self.unwrap_img(parent, soup)

    def clearup_content(self):
        soup = BeautifulSoup(self.content, "html.parser")
        for _ in self.del_tag:
            [s.extract() for s in soup(_)]
        self.clearup_element(soup)
        imgs = soup.find_all('img')
        for c in imgs:
            src = c.get('src', '')
            if src and (src.startswith("http") or src.startswith("https")):
                self.unwrap_img(c, soup)
            else:
                c.extract()
        _s = self.merget_element(soup)
        self.clearup_element(_s)
        if self.clear_space:
            self.clear_spacing(_s)
        c = _s.prettify() \
            .replace("<html>", '').replace('<body>', '') \
            .replace("</html>", '').replace('</body>', '')
        return c

    def sub_first_p_for_summary(self):
        _s = BeautifulSoup(self.content,"html.parser")
        p_list = _s.find_all('p')
        summary = ''
        for p in p_list:
            text = p.text.strip() if p.text else ''
            if not text or len(text) < 20:
                continue
            summary = text
            break
        return summary
