# -*- coding:utf-8 -*-
import re
from bs4 import BeautifulSoup
import urllib


# FIXME add log
def get_html(url):
    page = urllib.request.urlopen(url)
    html = page.read()
    return html


class TextContentExtractor:
    REGEX_SCRIPT = re.compile(r"<script[^>]*?>[\s\S]*?</script>", re.IGNORECASE)
    REGEX_STYLE = re.compile(r"<style[^>]*?>[\s\S]*?</style>", re.IGNORECASE)
    REGEX_MARKUP = re.compile(r"<[^>]+>", re.IGNORECASE)
    REGEX_SPACE = re.compile(r"\s+|\t|\r|\n", re.IGNORECASE)

    def __init__(self):
        self._string = ''

    def set_html(self, html):
        self._string = html
        return self

    def __del_script(self):
        self._string = re.sub(self.REGEX_SCRIPT, '', self._string)
        return self

    def __del_style(self):
        self._string = re.sub(self.REGEX_STYLE, '', self._string)
        return self

    def __del_html(self):
        self._string = re.sub(self.REGEX_MARKUP, '', self._string)
        return self

    def __del_space(self):
        self._string = re.sub(self.REGEX_SPACE, ' ', self._string)
        return self

    def get_text(self):
        self.__del_script()
        self.__del_style()
        self.__del_html()
        self.__del_space()
        return self._string


# FIXME add log when Exception happens
def extract(html):
    soup = BeautifulSoup(html, 'html.parser')
    cont_img_list = []
    text_extractor = TextContentExtractor()

    try:
        content_list = filter(None, [i for i in soup.select("div#js_content")[0].find_all('p')])

        for content in content_list:
            try:
                img_url = content.select("img")[0].get('data-src')
            except Exception as e:
                img_url = ''
            try:
                text = text_extractor.set_html(content.text).get_text()
                if str(text).strip() is None:
                    text = ''
            except Exception as e:
                text = ''

            if img_url:
                cont_img_list.append({'type': 'img', 'content': img_url})
            if text:
                cont_img_list.append({'type': 'text', 'content': text})

    except Exception as e:
        pass

    return cont_img_list


def combine_nearby_text_img(cont_img_list):
    out = []
    text_list = []
    img_list = []
    for k in cont_img_list:
        if k['type'] == 'text':
            text_list.append(k['content'])
            if img_list:
                out.append({'type': 'img', 'content': img_list})
            img_list = []
        if k['type'] == 'img':
            img_list.append(k['content'])
            if text_list:
                out.append({'type': 'text', 'content': ' '.join(text_list)})
            text_list = []

    return out


def doc_parser(url):
    return combine_nearby_text_img(extract(get_html(url)))


if __name__ == '__main__':
    url_test = 'https://mp.weixin.qq.com/s?__biz=MjM5OTM5NjE2MA==&mid=2650217718&idx=1&sn=eef0cf0106135fa754fbfebe1d51c372&chksm=bf3faad9884823cfca28251c1c4721b03515004aabd821833fe08b37a279ed5e0fb0669c9511&mpshare=1&scene=1&srcid=0811OqJscklMPd799Nt9Jk3A#rd'
    print(extract(get_html(url_test)))

    print(doc_parser(url_test))
