#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import sys
import os
import re
import sys
import logging

import yaml
import scrapy

try:
    folder = os.path.dirname(os.path.realpath(__file__))
    if folder not in sys.path:
       sys.path.insert(0, folder)

    from utils import get_leveled_config
except(ImportError):
    raise Exception("Cannot find utils")


RST_HEADER = f'''
.. new format in html, using colors, font size..

.. role:: raw-html(raw)
    :format: html

.. raw:: html

    <style>
        .orange {{color:orange; }}
        .note {{color:grey; font-size:24px}}
        .grey {{color:grey; }}
        .red {{color:red; }}
        .blue {{color:blue; }}
        .orange {{color:orange; }}
        .grey1 {{color:grey; font-size:24px}}
        .red1 {{color:red; font-size:24px}}
        .blue1 {{color:blue; font-size:24px}}
        .grey2 {{color:grey; font-size:18px}}
        .red2 {{color:red; font-size:18px}}
        .blue2 {{color:blue; font-size:18px}}
    </style>

.. role:: orange
.. role:: note
.. role:: grey
.. role:: red
.. role:: blue
.. role:: orange
.. role:: grey1
.. role:: red1
.. role:: blue1
.. role:: grey2
.. role:: red2
.. role:: blue2

%s

Category: %s

.. contents:: Table of Contents
    :depth: 3

.. begin

'''

RST_TAILER = f'''
.. end

注解
~~~~~~~~~~~~~
'''

# 这个函数有问题， 本意是想找到第一个空闲的数字
def _detect_file_index(folder, base_number):
    # assumption: data file name starts with "x\d+"
    current_index = base_number
    re_filename = re.compile("^x(\d+).*\.rst")
    for ele in sorted(os.listdir(folder)):
        res = re_filename.match(ele)
        if res:
            idx = int(res.group(1))
            if idx == (current_index + 1):
                current_index = idx

    return current_index + 1


class WeixinSpider(scrapy.Spider):
    '''

    - CRAWLER_PAGES : how many pages to download, by default is 0, to download all..
    '''

    name = 'generic'

    # configuration flag
    CONF_SKIP_FIRST_PARAGRAPH_IMAGE = False

    # configuration for first_output, image list...
    status = {}

    def start_requests(self):
        '''Entry point
        '''

        configuration = self.load_configurations()
        host_callbacks = {
            "mp.weixin.qq.com" : self.entry_handler_weixin_pages,
        }

        index = int( os.environ.get("CRAWLER_FILENAME_IDX", "0") )

        # default filename prefix, when the setting does not exist in targets.
        filename_prefix_default = os.environ.get("CRAWLER_FILENAME", "x")

        urls = []
        url_file = os.environ.get("CRAWLER_URL_LIST", None)
        if url_file:
            with open(url_file) as fh:
                urls = [ ele.strip() for ele in fh.readlines() if len(ele.strip()) > 0 ]

        # this is provided by crawl command line now, not really necessary to check it.
        target_folder_base = os.environ.get("CRAWLER_DATA_FOLDER", None)
        if target_folder_base is None:
            # create folder in the level of "crawlers"
            target_folder_list = os.path.dirname(os.path.realpath(__file__)).split(os.sep)[:-2]
            target_folder_list.append("misc.%s" % (self.name))
            target_folder_base = os.sep.join(target_folder_list)

        current_category = None
        for url in urls:
            url = url.strip()
            if url.startswith("#"):
                if url.startswith("# cfg:baseindex="):
                    index = _detect_file_index(target_folder_base ,int(url[ len("# cfg:baseindex="):]))
                elif url.startswith("# cfg:category="):
                    current_category = url[len("# cfg:category="):].strip().replace(" ", "-").replace("/", "-").replace(":", "-")
                elif url == "#":
                    current_category = None
                continue

            elif url.startswith(";"):
                continue

            hostname = self._get_hostname(url)
            callback = host_callbacks.get(hostname, self.entry_handler_pages_default)
            conf = configuration.get(hostname, {})
            filename_prefix = get_leveled_config("_conf/prefix", conf, filename_prefix_default)

            if current_category is None:
                category = get_leveled_config("_conf/category", conf, None)
                suffix = None
            else:
                category = current_category
                suffix = current_category

            logging.info(f"start downloading {url} {index}...")
            yield scrapy.Request(url=url,
                                callback = callback,
                                meta={
                                    "root": target_folder_base,
                                    "filename": "%s%04d" % (filename_prefix, index),
                                    "url": url,
                                    "conf" : conf,
                                    "suffix" : suffix,
                                    "category" : category,
                                    "contentpath" : get_leveled_config("_conf/contentpath", conf, None),
                                    "titlepath" : get_leveled_config("_conf/titlepath", conf, "//title/text()"),
                                })

            index = index + 1


    def _auto_detect_base_element(self, response):
        ''' try to search h1/h2/h3... in response, and their parents shall be the root element

        H1 is normally for <header>, not for content.
        '''
        print(response)
        for idx in range(2, 10):
            res = response.xpath(f"//body//h{idx}/parent::node()")
            print(f"--> {idx}", res)
            if res:
                if len(res) > 1:
                    return res[1]
                else:
                    return res[0]
                # # print(res[1])
                # # 返回哪一个  FIXME: 都是数组吗？
                # # print(res.getall())
                # print("-----", len(res))
                # exit(1)
                # return res[0]
                # # return res
        return None


    def entry_handler_pages_default(self, response, meta = None):
        if not meta:
            meta = response.meta

        url = meta.get("url", None)
        conf = meta.get("conf", {})
        category = meta.get("category", "misc")

        # print(response.xpath("//div[@id=\"article_content\"]/*").getall())

        try:
            title = response.xpath( meta.get("titlepath", "//title/text()") ).get()
            title = title.strip()
        except Exception as exp:
            logging.error(str(exp))
            logging.error(f"{url}: failed reading page title")
            title = "Not known"

        filename = meta.get("filename", None)
        if not filename:
            logging.error(f"{url}: failed getting filename.")
            return

        suffix = meta.get("suffix", None)
        if not suffix:
            suffix = os.environ.get("CRAWLER_FILENAME_SUF",  get_leveled_config(f"{category}/suffix", conf, None) )
        if suffix:
            filename = f"{filename}-{suffix}"

        folder = meta.get("root", None)
        if not os.path.exists(folder):
            os.makedirs(folder)

        self.status[url] = {
            "category" : category,
            "imgs": [],
            "1st_image": True,
            "1st_image_paragraph": True,
            "1st_output": True,
            "page_end": False
        }
        self.status[url]["imgs"] = []

        content_path = meta.get("contentpath", None)
        if content_path is None:
            root_ele = self._auto_detect_base_element(response)
            if not root_ele:
                logging.error(f"{url}: cannot determine content.")
                return

            response = root_ele.getall()
        else:
            response = response.xpath(content_path).getall()

        # print("=====================")
        # print(content_path)
        # print(response)

        # process page content.
        # class cannot be used as it normally ends with many space character....
        generated_content = ""
        for content in response: #.xpath("/*").getall():
            ret_text = self.process_content(url, content, 0, folder, filename, first_call=True, configurations=conf)
            if ret_text:
                generated_content = generated_content + f"{ret_text}"

            if self.status[url]["page_end"]:
                break


        for img_url, img_name in self.status[url]["imgs"]:
            if not img_url:
                logging.error(f"{url}: invalid image url {img_url} -- {img_name}")
                continue

            if img_url.startswith("/"):
                prefix, urls = url.split("://")
                urls = urls.split('/')
                if urls[0] not in img_url:
                    prefix = prefix + "://" + urls[0]
                img_url = prefix + img_url

            logging.debug(f"{url}: download image {img_url} -- {img_name}")

            img_name = os.sep.join([folder, img_name])
            if not os.path.exists(img_name):
                yield scrapy.Request(url=img_url,
                                    callback=self.parse_image,
                                    meta={
                                        "name": img_name
                                    })

        title = f"`{title} <{url}>`_"
        title_mark = "=" * (len(title)+30)
        title = f"\n{title_mark}\n{title}\n{title_mark}\n\n"

        self._save_content_to_rst(generated_content, folder, filename, header = RST_HEADER % (title, category), url = url)


    def entry_handler_weixin_pages(self, response, meta = None, **kwargs):
        '''
        here the page shall be written to file after
            - change image url
        '''
        if not meta:
            meta = response.meta

        filename = meta.get("filename", None)
        if not filename:
            logging.error("failed getting filename.")
            return

        try:
            title = response.xpath('//h1[@id="activity-name"]/text()').get()
            title = title.strip()
        except Exception as exp:
            logging.error(str(exp))
            logging.error(f"{filename}: failed reading page title")
            return

        conf = meta.get("conf", {})
        url = meta.get("url", None)
        category = meta.get("category", None)
        suffix = meta.get("suffix", None)

        if not category:
            try:
                category = "%s@%s" % (category, response.xpath('//a[@id="js_name"]/text()').get().strip())
            except Exception as exp:
                logging.error(str(exp))
                logging.error(f"{url}: failed reading category")
                return

        if not suffix:
            suffix = os.environ.get("CRAWLER_FILENAME_SUF",  get_leveled_config(f"{category}/suffix", conf, None) )
        if suffix:
            filename = f"{filename}-{suffix}"

        folder = meta.get("root", None)
        if not os.path.exists(folder):
            os.makedirs(folder)

        self.status[url] = {
            "imgs": [],
            "category" : category,
            "1st_image": True,
            "1st_image_paragraph": True,
            "1st_output": True,
            "page_end": False
        }

        self.status[url]["imgs"] = []

        # process page content.
        # class cannot be used as it normally ends with many space character....
        generated_content = ""
        for content in response.xpath('//div[@id="js_content"]/*').getall():
            ret_text = self.process_content(url, content, 0, folder, filename, first_call=True, configurations=conf)
            if ret_text:
                generated_content = generated_content + f"{ret_text}"

            if self.status[url]["page_end"]:
                break


        for img_url, img_name in self.status[url]["imgs"]:
            img_name = os.sep.join([folder, img_name])
            if img_url and not os.path.exists(img_name):
                yield scrapy.Request(url=img_url,
                                    callback=self.parse_image,
                                    meta={
                                        "name": img_name
                                    })

        title = f"`{title} <{url}>`_"
        title_mark = "=" * (len(title)+30)
        title = f"\n{title_mark}\n{title}\n{title_mark}\n\n"

        self._save_content_to_rst(generated_content, folder, filename, header = RST_HEADER % (title, category), url = url)


    def process_content(self, url, content, level, folder, filename, first_call=False, configurations = {}):
        '''process content and return the result string.

        Attention: as return value is used, so in side this function yield MUST NOT
        be used (there will be strange errors) As a workaround, the list of image
        to download is appended to a list.

        Parameter first_call: input, to indicate this is called from upper or from itself.
        '''
        content = content.strip()
        if len(content) == 0:
            return ""

        # nothing output yet, always use level 0..
        if self.status[url]["1st_output"]:
            level = 0
        restart_level = False
        ret_text = ""

        try:
            selector = scrapy.Selector(text=content)
        except RecursionError as exp:
            logging.error(f"{url}: recursive call: {content}")
            logging.error(exp)

            return content

        children_element = selector.xpath("/html/body/child::node()").getall()
        logging.debug(f"{level}--> {len(children_element)}, {selector}")
        if len(children_element) > 1:
            for ele in children_element:
                ret_text = ret_text + self.process_content(url, ele, level, folder, filename, configurations=configurations)

            if ret_text:
                self.status[url]["1st_output"] = False

            return ret_text

        # there is only one element.
        loop = 0
        while not self.status[url]["page_end"]:

            # FIXME: this loop is debug code, shall be removed later.
            loop = loop + 1
            if loop > 50:
                logging.warning(f"{url}: too many loops")
                break

            ending_rule = get_leveled_config(f"{self.status[url]['category']}/endmark", configurations)
            if ending_rule:
                res = selector.xpath(ending_rule).getall()
                if res:
                    self.status[url]["page_end"] = True
                    break

            header_rules = get_leveled_config(f"{self.status[url]['category']}/header", configurations, [])
            if isinstance(header_rules, str):
                header_rules = [header_rules]
            found = False
            for single_rule in header_rules:
                res = selector.xpath(single_rule).getall()
                if res:
                    ret_text = ret_text + self.generate_paragraph_header(2, ''.join([ele.strip() for ele in res]))
                    restart_level = True
                    found = True
                    break
            if found:
                break

            # for h[1234567] here, it must be using // before text() to bypass levels in the middle...
            for idx in range(1,8):
                res = selector.xpath(f"/html/body/h{idx}//text()").getall()
                if res:
                    ret_text = ret_text + self.generate_paragraph_header(idx, ''.join([ele.strip() for ele in res]))
                    restart_level = True
                    break
            if res:
                break


            # figure...
            res = selector.xpath("/html/body/figure").getall()
            if res:
                if get_leveled_config(f"{self.status[url]['category']}/skip_first_image", configurations, False) and self.status[url]["1st_image"]:
                    self.status[url]["1st_image"] = False
                    break

                img_url_formats = [
                    "/html/body/figure//img/@data-src",
                    "/html/body/figure//img/@data-lazy-src",
                    "/html/body/figure//span/@dataurl",
                    "/html/body/figure//img/@src",
                ]

                img_url = None
                for img_url_fmt in img_url_formats:
                    img_url = selector.xpath(img_url_fmt).get()
                    if img_url:
                        break
                if not img_url:
                    break

                img_text = "".join([ele for ele in selector.xpath("/html/body/figure/figcaption").getall()])
                if img_text:
                    img_text = re.sub(r"</?figcaption.*?>", "", img_text)
                    if img_text:
                        img_text = self.process_content(url, img_text, level, folder, filename, configurations=configurations)
                    else:
                        img_text = None

                # to check if image is in list already. img_name has no path.
                img_name = None
                for x_url, x_name in self.status[url]["imgs"]:
                    if x_url == img_url:
                        img_name = x_name
                        break

                if not img_name:
                    img_ext = os.path.splitext(img_url)[-1]
                    img_name = os.sep.join([ "image", "%s-%03d%s"%(filename, len(self.status[url]["imgs"]), img_ext) ])
                    self.status[url]["imgs"].append([img_url, img_name])

                ret_text = ret_text + f"\n\n{'    '*(level)}.. figure:: {img_name}\n"
                if img_text:
                    ret_text = ret_text + f"\n{'    '*(level+1)}{img_text}\n\n"

                break


            # images
            res = selector.xpath("/html/body/img").getall()
            if res:
                if get_leveled_config(f"{self.status[url]['category']}/skip_first_image", configurations, False) and self.status[url]["1st_image"]:
                    self.status[url]["1st_image"] = False
                    break

                img_url_formats = [
                    "/html/body/img/@src",
                    "/html/body/img/@data-src",
                    "/html/body/img/@data-original-src",
                ]

                img_url = None
                for img_url_fmt in img_url_formats:
                    img_url = selector.xpath(img_url_fmt).get()
                    if img_url:
                        break
                if not img_url:
                    break

                # to check if image is in list already. img_name has no path.
                img_name = None
                for x_url, x_name in self.status[url]["imgs"]:
                    if x_url == img_url:
                        img_name = x_name
                        break

                if not img_name:
                    img_ext = selector.xpath("/html/body/img/@data-type").get()
                    if not img_ext:
                        img_ext = "png"

                    img_name = os.sep.join([ "image", "%s-%03d.%s"%(filename, len(self.status[url]["imgs"]), img_ext) ])
                    self.status[url]["imgs"].append([img_url, img_name])

                ret_text = ret_text + f"\n\n{'    '*(level)}.. figure:: {img_name}\n\n"

                break


            # the code below will disable the 2nd section check after.
            feature_flag_check_images_inside_sections = False
            if feature_flag_check_images_inside_sections:
                # this code is because the children detection has_children_tags() is not working well...
                res = selector.xpath("/html/body/*[local-name()='section' or local-name()='p' or local-name()='em']/img").getall()
                if res:
                    if get_leveled_config(f"{self.status[url]['category']}/skip_first_image", configurations, False) and self.status[url]["1st_image"]:
                        self.status[url]["1st_image"] = False
                        break

                    img_url = selector.xpath("/html/body/*[local-name()='section' or local-name()='p' or local-name()='em']/img/@src").get()
                    if not img_url:
                        img_url = selector.xpath("/html/body/*[local-name()='section' or local-name()='p' or local-name()='em']/img/@data-src").get()

                    # to check if image is in list already. img_name has no path.
                    img_name = None
                    for x_url, x_name in self.status[url]["imgs"]:
                        if x_url == img_url:
                            img_name = x_name
                            break

                    if not img_name:
                        img_ext = selector.xpath("/html/body/*[local-name()='section' or local-name()='p' or local-name()='em']/img/@data-type").get()
                        if not img_ext:
                            img_ext = "png"

                        img_name = os.sep.join([ "image", "%s-%03d.%s"%(filename, len(self.status[url]["imgs"]), img_ext) ])
                        self.status[url]["imgs"].append([img_url, img_name])

                    ret_text = ret_text + f"\n\n{'    '*(level)}.. figure:: {img_name}\n\n"

                    break


            # 不能使用这种两级的。 如果有部分内容符合条件， 那么剩下不符合条件的会被丢弃。。。
            # # check <p>/<span> first, this may result in a dead loop...
            # res = selector.xpath("/html/body/p/span/child::node()").getall()
            # if res:
            #     for ele in res:
            #         if self.has_children_tags(ele):
            #             temp_text = self.process_content(url, ele, level, folder, filename, configurations=configurations)
            #             ret_text = ret_text.rstrip() + "    "*level + temp_text
            #         else:
            #             ret_text = ret_text + "    "*level + ele

            #     if not ret_text.startswith("\n\n"):
            #         ret_text = f"\n\n{ret_text}"
            #     if not ret_text.endswith("\n"):
            #         ret_text = f"{ret_text}\n"
            #     break

            # 2nd section check.
            # section is container, just like figure, p, ...
            res = selector.xpath("/html/body/*[local-name()='section' or local-name()='span' or local-name()='p' or local-name()='article' or local-name()='center' or local-name()='main']/child::node()").getall()
            if res:
                for ele in res:
                    if self.has_children_tags(ele):
                        try:
                            temp_text = self.process_content(url, ele, level, folder, filename, configurations=configurations)
                        except RecursionError:
                            logging.error(f"{url}: recursive error for {ele}")
                            temp_text = ele
                        ret_text = ret_text.rstrip() + "    "*level + temp_text
                    else:
                        ret_text = ret_text + "    "*level + ele

                if not ret_text.startswith("\n\n"):
                    ret_text = f"\n\n{ret_text}"
                if not ret_text.endswith("\n"):
                    ret_text = f"{ret_text}\n"
                break


            # there are two different styles of code: several words in one line, multiple lines.
            res = selector.xpath('/html/body/pre/code[contains(@style, "overflow-x:")]/child::node()').getall()
            if res:
                ret_text = ret_text + f"\n\n{'    '*(level)}::\n\n{'    '*(level+1)}"
                for ele in res:
                    if "<br" in ele:
                        ret_text = ret_text + f"\n{'    '*(level+1)}"
                    else:
                        if not ret_text.endswith(" "):
                            ret_text = ret_text  + " "
                        ret_text = ret_text + " ".join(scrapy.Selector(text=ele).xpath('/html/body//text()').getall())

                ret_text = ret_text + "\n"
                break

            # single word of <code></code>
            res = selector.xpath("/html/body/code//text()").getall()
            if res:
                res = ''.join([ele.strip() for ele in res])
                ret_text = f"{ret_text} *{res}* "
                break

            # pre: reference block must be a block standalone...
            # FIXME:  some pages contain images or format text inside PRE -> local-name()='pre'
            res = selector.xpath("/html/body/pre//text()").getall()
            if len(res):
                ret_text = ret_text + f"{'    '*level}::\n\n"
                for ele in "".join(res).split("\n"):
                    ret_text = ret_text + f"{'    '*(level+1)}{ele}\n"

                ret_text = f"\n\n{ret_text}\n"
                break

            # FIXME: some format data may exist in strong
            res = selector.xpath('/html/body/*[local-name()="strong" or local-name()="b" or local-name()="em"]//text()').getall()
            if res:
                res = ''.join([ele.strip() for ele in res])
                ret_text = ret_text + f" **{res}** "
                break

            res = selector.xpath('/html/body/*[local-name()="i"]//text()').getall()
            if res:
                res = ''.join([ele.strip() for ele in res])
                ret_text = ret_text + f" *{res}* "
                break


            # TODO: block quote
            # <blockquote><p>在 Unicode 规范到 14.0...</p></blockquote>
            # res = selector.xpath("/html/body/blockquote").getall()
            # if res:
            #     temp_text = "".join([ele.strip() for ele in res if len(ele.strip()) > 0])
            #     temp_text = self.strip_outer_tag(temp_text, "blockquote")
            #     temp_selector = scrapy.Selector(text=temp_text)
            #     # FIXME: cannot detect <a> inside <p> such as 先说一下背景，是一个轻量独立的
            #     children_element = temp_selector.xpath("/html/body/p/child::node()").getall()
            #     if len(children_element) > 1:
            #         for ele in children_element:
            #             # their level shall be decided here
            #             ret_text = ret_text.rstrip() + self.process_content(url, ele, level+1, folder, filename, configurations=configurations)
            #         ret_text = f"\n{ret_text}\n"
            #         break

            #     # only 1 element...
            #     ret_text = ret_text + "    "*(level+1) + "".join([ele.strip() for ele in temp_selector.xpath("/html/body/p/text()").getall()])
            #     break

            # <blockquote><p>在 Unicode 规范到 14.0...</p></blockquote>
            res = selector.xpath("/html/body/div").getall()
            if res:
                temp_text = ""
                for ele in [self.strip_outer_tag(ele, "div") for ele in res if len(ele.strip()) > 0]:
                    if self.has_children_tags(ele):
                        try:
                            temp_text = temp_text + self.process_content(url, ele, level+1, folder, filename, configurations=configurations)
                        except RecursionError:
                            temp_text = temp_text + ele
                    else:
                        temp_text = temp_text + ele

                # only 1 element...
                ret_text = ret_text + "    "*(level+1) + temp_text
                break


            # <blockquote><p>在 Unicode 规范到 14.0...</p></blockquote>
            res = selector.xpath("/html/body/blockquote").getall()
            if res:
                temp_text = ""
                for ele in [self.strip_outer_tag(ele, "blockquote") for ele in res if len(ele.strip()) > 0]:
                    if self.has_children_tags(ele):
                        temp_text = temp_text + self.process_content(url, ele, level+1, folder, filename, configurations=configurations)
                    else:
                        temp_text = temp_text + ele

                # only 1 element...
                ret_text = ret_text + "    "*(level+1) + temp_text
                break

            # FIXME: bulletin can be embedded...
            res = selector.xpath("/html/body/ul/child::node()").getall()
            if res:
                for ele in [self.strip_outer_tag(ele, "li") for ele in res if len(ele.strip()) > 0]:
                    if self.has_children_tags(ele):
                        temp_text = self.process_content(url, ele, level+1, folder, filename, configurations=configurations)
                    else:
                        temp_text = ele

                    ret_text = ret_text + f"- {temp_text.lstrip()}\n"

                ret_text = f"\n\n{ret_text}\n"
                break

            res = selector.xpath("/html/body/ol/child::node()").getall()
            if res:
                for ele in [self.strip_outer_tag(ele, "li") for ele in res if len(ele.strip()) > 0]:
                    if self.has_children_tags(ele):
                        temp_text = self.process_content(url, ele, level+1, folder, filename, configurations=configurations)
                    else:
                        temp_text = ele

                    ret_text = ret_text + f"#. {temp_text.lstrip()}\n"

                ret_text = f"\n\n{ret_text}\n"
                break


            res = selector.xpath("/html/body/a").getall()
            if res:
                href = selector.xpath("/html/body/a/@href").get()
                if href:
                    href .strip()

                    if href.startswith("/"): # without domain name.
                        href = "https://www.scaledagileframework.com" + href

                    text = selector.xpath("/html/body/a/text()").getall()
                    if text:
                        text = ' '.join([ele.strip() for ele in text])
                        ret_text = ret_text + f" `{text} <{href}>`_ "
                    else:
                        ret_text = ret_text + f" `{href} <{href}>`_ "
                    break


            res = selector.xpath("/html/body/br").getall()
            if res:
                ret_text = ret_text + f"\n"
                break


            # table treatment.
            if content.startswith("<table") :
                header = re.findall("<th.*?>(.*?)</th>", content)
                content = content.replace("\r", "").replace("\n", "")    # remove EOL.
                ret_text = ret_text + f"\n\n.. csv-table::\n    :delim: #\n"
                if header:
                    header = ",".join(header)
                    header = re.sub(r"<tr.*?><th.*?>", "", header)
                    ret_text = ret_text + "    :header: " + header + "\n"

                ret_text = ret_text + "\n"
                body = re.findall("<tbody.*?>(.*)</tbody>", content)
                if not body:
                    ret_text = ret_text + f"{content}"
                else:
                    tds = []
                    trs = re.findall("<tr.*?>(.*?)</tr>", body[0])
                    for x in trs:
                        tds = re.findall("<td.*?>(.*?)</td>", x)
                        ret_text = ret_text + "    " + "#".join(tds) + "\n"

                break



            # default handler....
            logging.warning(f"{url}： skip {content[:32]}")
            break

        if ret_text:
            self.status[url]["1st_output"] = False
            if restart_level:
                self.status[url]["1st_output"] = True

            if not self.status[url]["page_end"]:
                ending_re = get_leveled_config(f"{self.status[url]['category']}/end_re", configurations)
                if ending_re and re.findall(ending_re, ret_text):
                    self.status[url]["page_end"] = True

        return ret_text

    def parse_image(self, response):
        img_name = response.meta.get("name", None)
        if not img_name:
            logging.error("{response.url} is not recognized.", file=sys.stderr)
            return

        folder, _ = os.path.split(img_name)
        if not os.path.exists(folder):
            os.makedirs(folder)

        with open(img_name, "w+b") as fh:
            fh.write(bytearray(response.body))

    def strip_outer_tag(self, data, tag=None):
        data = data.strip()
        if tag is None:
            # tag may contain some attributes
            tag = re.match(r"^<([\s\S]+).*>", data.strip())
            if tag is not None:
                tag = tag.group(1)

        if tag is not None:
            data = re.sub(r'^<%s.*?>' % (tag), '', data)
            data = re.sub(r'</%s>$' % (tag), '', data)

        return data

    def get_outer_tag(self, data):
        tag = re.match(r"^<([\s\S]+).*>", data.strip())
        if tag is not None:
            tag = tag.group(1)

        return tag

    def has_children_tags(self, data):
        '''to check if <p></p> contains more child tag.
        '''
        results = re.search(r"<([\s\S]+).*?>", data.strip())
        if results:
            return True

        return False

    base_level = 0  # or the first met level is the top level
    level_chars = ['~', '-', '`', '^', '_', '$']

    def generate_paragraph_header(self, level, text):
        if self.base_level == 0:
            self.base_level = level

        sep = self.level_chars[level - self.base_level] * (len(text)*2 + 10)
        return f"\n\n{text}\n{sep}\n"



    def load_configurations(self):
        '''Load host_*.yaml files into configurations
        '''
        result = {}

        pattern = re.compile("host_.*\.yaml")

        for ele in os.listdir("."):
            if not os.path.isfile(ele) or not pattern.match(ele):
                continue

            yaml_content = {}

            try:
                yaml_file = open(ele, 'r')
                yaml_content = yaml.load(yaml_file, Loader=yaml.FullLoader)

            except OSError:
                continue

            result.update(yaml_content)

        return result


    def _get_hostname(self, url):
        _, url = url.split("://")
        url = url.split("/")[0]
        return url


    def _save_content_to_rst(self, content, folder, filename, url = None, header = ""):
        empty_line = 0
        temp_content = []
        for ele in content.split("\n"):
            ele = ele.rstrip()
            if len(ele) == 0:
                empty_line += 1
                if empty_line > 2:
                    continue
            else:
                empty_line = 0

            if ele.startswith(" **"):
                temp_content.append(ele[1:])
            else:
                temp_content.append(ele)
        content = "\n".join(temp_content)

        content = header + content + RST_TAILER

        with open("%s.rst"%( os.sep.join([folder, filename])), "w+") as handle:
            print(content, file=handle)

        return

