'''
爬虫模块，使用requests库实现，主要负责网络资源的获取，如帖子页面html、图片资源等
'''

import os
import re
import time
# noinspection PyUnresolvedReferences
from collections import deque, UserDict    # PyCharm真让人无语
from datetime import datetime
from typing import Iterable, Optional

import requests

try:
    from bs4 import BeautifulSoup
except ModuleNotFoundError:
    BS4 = False
else:
    BS4 = True

# 原链接
# https://tieba.baidu.com/p/1766018024

# 刷新指向的链接
# http://tieba.baidu.com/mo/q---45E97D5C18EB92F0388E46EFCA0FACD4%3AFG%3D1--1-3-0--2--
# wapp_1576220327765_337/m?kz=1766018024&lp=6000&pn=0

# 楼中楼链接
# http://tieba.baidu.com/mo/q---45E97D5C18EB92F0388E46EFCA0FACD4%3AFG%3D1--1-3-0--2--
# wapp_1576220327765_337/flr?pid=22616319749&kz=1766018024&pn=0

# 下一页指向的链接
# http://tieba.baidu.com/mo/q---45E97D5C18EB92F0388E46EFCA0FACD4%3AFG%3D1--1-3-0--2--
# wapp_1576220327765_337/m?kz=1766018024&new_word=&pn=30&lp=6005

# 常用名称翻译: 
# 主题帖: topic
# 楼层: floor
# 楼中楼: reply

__all__ = ["BasePage", "TopicPage", "FloorPage", "BaseSpider", "TopicSpider",
           "FloorSpider", "TopicInfo"]

# 全局常量和全局变量
TIEBA = "https://tieba.baidu.com"
PIC_DIR = "data/pics"
UPDATE_PICS = False


class BasePage:
    '''帖子页面、楼中楼页面的基类
    
    :param str html: 网页的html文本字符串
    :param int pg: 本页的页码
    '''

    def __init__(self, html: str, pg: int):
        if not isinstance(html, str):
            raise TypeError("`html` must be a string")
        if not isinstance(pg, int):
            raise TypeError("`pg` must be an integer")
        self.html = html
        self.pg = pg

    def page_is_found(self) -> bool:
        '''该页面是否存在（若不存在，通常重新访问一次即可获取）'''
        return page_is_found(self.html)

    @property
    def parent_id(self) -> int:
        '''返回页面所在的主题帖的id或是楼层的id'''
        raise NotImplementedError("Method hasn't been implemented yet.")

    @classmethod
    def from_requests_response(
            cls,
            response: requests.Response,
            parent: "BaseSpider",
            pg: int
        ) -> "BasePage":
        '''通过requests.Response对象来创建一个网页对象
        
        :type response: request.Response
        :param parent: 该页面所对应的主题帖或楼层对象
        :param int pg: 该页面的页码
        '''
        raise NotImplementedError("ClassMethod hasn't been implemented yet.")


class TopicPage(BasePage):
    '''主题帖内的页面
    
    :param str html: 网页的html文本字符串
    :param parent_topic: 页面所对应的主题帖对象
    :type parent_topic: TopicSpider
    :param int pg: 本页的页码
    '''

    def __init__(self, html: str, parent_topic: "TopicSpider", pg: int):
        super().__init__(html, pg)
        self.parent_topic = parent_topic

    @property
    def parent_id(self) -> int:
        '''返回页面所在的主题帖的id'''
        return self.parent_topic.topic_id

    @classmethod
    def from_requests_response(
            cls,
            response: requests.Response,
            parent_topic: "TopicSpider",
            pg: int
    ) -> Optional["TopicPage"]:
        '''通过requests.Response对象来创建一个主题帖网页对象
        
        :type response: request.Response
        :param parent_topic: 该页面所对应的主题帖对象
        :type parent_topic: TopicSpider
        :param int pg: 该页面的页码
        :rtype: TopicPage or None
        '''
        if isinstance(response, requests.Response):
            return cls(response.text, parent_topic, pg)
        else:
            return None


class FloorPage(BasePage):
    '''展示楼中楼的页面
    
    :param str html: 网页的html文本字符串
    :param parent_floor: 页面所对应的楼层对象
    :type parent_floor: FloorSpider
    :param int pg: 本页的页码
    '''

    def __init__(self, html: str, parent_floor: "FloorSpider", pg: int):
        super().__init__(html, pg)
        self.parent_floor = parent_floor

    @property
    def parent_id(self) -> int:
        '''返回页面所在的楼层的id'''
        return self.parent_floor.floor_id

    @classmethod
    def from_requests_response(
            cls,
            response: requests.Response,
            parent_floor: "FloorSpider",
            pg: int
    ) -> Optional["FloorPage"]:
        '''通过requests.Response对象来创建一个楼中楼网页对象
        
        :type response: request.Response
        :param parent_floor: 页面所对应的楼层对象
        :type parent_floor: FloorSpider
        :param int pg: 该页面的页码
        :rtype: FloorPage or None
        '''
        if isinstance(response, requests.Response):
            return cls(response.text, parent_floor, pg)
        else:
            return None


class TopicInfo:
    '''存储帖子基本信息的类'''
    __slots__ = ["topic_id", "wap_url", "lz", "datetime", "title",
                 "total_replys", "total_floors", "total_pages",
                 "_isfrozen", "_html", "info"]

    def __init__(self, topic_id: int):
        self._isfrozen = False
        self.topic_id = topic_id
        self._html = ""
        self.info = {}

        self.wap_url = ""
        self.lz = ""
        self.datetime = datetime.fromtimestamp(0)
        self.title = ""
        self.total_replys = 0
        self.total_floors = 0
        self.total_pages = 0
        self.generate_info_dict()

    def __getattr__(self, name):
        '''仅仅是让属性未赋予初始值时有更可读的报错，也不影响性能'''
        if name in self.__slots__:
            raise AttributeError(f"Attribute `{name}` has not been initialized")
        else:
            return self.__getattribute__(name)

    def __setattr__(self, name, value):
        if hasattr(self, "_isfrozen") and self._isfrozen:
            raise AttributeError("Topic info is already frozen now.")
        else:
            super().__setattr__(name, value)

    def __getitem__(self, key):
        return getattr(self, key)

    def get(self, key, default=None):
        return getattr(self, key) if hasattr(self, key) else default

    def parse_wap_url(self):
        '''获取wap版贴吧的帖子链接'''
        pattern = r"<a\s*href=(\"|')(/mo/q--[^<>]*?)/m\?[^<>]*?\1>\s*刷新\s*</a>"
        self.wap_url = TIEBA + re.search(pattern, self._html[0]).group(2)

    def parse_lz_datetime(self):
        '''获取楼主的用户名、发帖时间'''
        pattern = re.compile(
            r"<div[^>]*>"        # 开头的div标签
            r"1楼\."             # 1楼的标记
            r".*?"               # 其它字符
            r"<a href=[^>]*>"    # 带链接的标签，标志用户名的开始
            r"(.*?)"             # 楼主的用户名
            r"</a>.*?"           # 标志用户名结束、其它字符
            r"((\d+-\d+-\d+)"    # 匹配日期时间字符串的三种形式: 年-月-日
            r"|(\d+-\d+\s\d+:\d+)"    # 月-日 时间
            r"|(\d+:\d+))"            # 仅时间
            r"</span>.*?"        # 其它字符
            r"</div>"            # 结束的div标签
        )
        match = re.search(pattern, self._html[0])
        self.lz = match.group(1).strip()

        format_list = ["{} 00:00", r"%Y-{}", r"%Y-%m-%d {}"]
        for i, fmt in enumerate(format_list, 3):
            if match.group(i):
                dtstr = datetime.strftime(datetime.now(), fmt) \
                    .format(match.group(i))
                self.datetime = datetime.strptime(dtstr, r"%Y-%m-%d %H:%M")

    def parse_title_total_replys(self):
        '''获取标题、总回复数'''
        # <div class="bc p"><strong>直 播，MC 1.3 原版生存</strong>&#160;第1页(共178521贴)<br/>
        pattern = r"<div[^>]*>\s*<strong>(.*?)</strong>.*\(共(\d+)贴\)\s*<br/>"
        match = re.search(pattern, self._html[0])
        self.title = match.group(1).strip()
        self.total_replys = int(match.group(2))

    def parse_total_floors(self):
        '''获取总楼层数'''
        pattern = r"<div[^>]*>\s*(\d+)楼\. .*?</div>"
        self.total_floors = int(re.search(pattern, self._html[1]).group(1))

    def parse_total_pages(self):
        '''获取帖子总页数'''
        pattern = r"<br/>\s*第\d+/(\d+)页\s*<input"
        self.total_pages = int(re.search(pattern, self._html[0]).group(1))

    def download_first_last_pages(self, retry=2, wait=0.0) -> bool:
        '''爬取帖子第一页和最后一页'''
        raw_url = TIEBA + f"/p/{self.topic_id}"
        headers = {"user-agent": "Nokia"}
        params = {"last": "1"}    # 倒序查看

        for i in range(retry + 1):
            first, successful = get_a_page(raw_url, headers=headers)
            successful = successful and page_is_found(first.text)
            if successful:
                break
            else:
                time.sleep(wait)
        else:
            return False

        for i in range(retry + 1):
            last, successful = get_a_page(raw_url, params, headers)
            successful = successful and page_is_found(last.text)
            if successful:
                break
            else:
                time.sleep(wait)
        else:
            return False

        self._html = (first.text.replace("\n", " "),
                      last.text.replace("\n", " "))
        return True

    def download_topic_info(self, retry=2, wait=0.0) -> bool:
        '''进行一次爬取，获得该主题帖的一些基本信息，并将信息冻结为不可修改

        :param int retry: 爬取失败后重试的次数
        :param float wait: 爬取失败后等待的时间，单位为秒
        :return: 返回一个bool值，表示信息是否获取成功
        '''
        successful = self.download_first_last_pages(retry, wait)
        if not successful:
            return False

        self.parse_wap_url()
        self.parse_lz_datetime()
        self.parse_title_total_replys()
        self.parse_total_floors()
        self.parse_total_pages()
        self.generate_info_dict()

        self._isfrozen = True
        return True

    def generate_info_dict(self):
        # 似乎info这个字典已经不必要存在了，目前仅为了__str__和__repr__而存在
        self.info = {k: getattr(self, k) for k in self.__slots__
                     if k not in ("_isfrozen", "_html", "info")}

    def pprint(self):
        from pprint import pprint
        pprint(self.info)


class BaseSpider:
    '''本模块中爬虫的基类'''    # 似乎这个公共基类也没有存在的必要了

    def __init__(self, *args, **kwargs):
        raise NotImplementedError("Method hasn't been implemented yet.")


class TopicSpider(BaseSpider):
    '''爬取主题帖的类

    :param int topic_id: 要爬取的主题帖的id，即url后面的那串数字
    '''

    def __init__(self, topic_id=0, topic_info: Optional["TopicInfo"] = None):
        if not isinstance(topic_id, int):
            raise TypeError("`topic_id` must be an integer")
        if (not topic_id) and (not topic_info):
            raise ValueError("未传入有效的帖子id信息(topic_id)")

        if isinstance(topic_info, TopicInfo):
            if topic_id and topic_id != topic_info.topic_id:
                raise ValueError("`topic_id`与`topic_info`中的topic_id不一致")
            else:
                self._info = topic_info
                self.topic_id = topic_info.topic_id
        else:
            self.topic_id = topic_id
            self._info = TopicInfo(topic_id)

    @property
    def raw_url(self) -> str:
        '''通过帖子id获取到其对应的pc版贴吧url'''
        return TIEBA + "/p/" + str(self.topic_id)

    @property
    def wap_url(self) -> str:
        '''该帖的wap版贴吧链接'''
        return self._info.get("wap_url")

    @property
    def total_replys(self) -> int:
        '''该帖的总回复数，包括楼层和楼中楼'''
        return self._info.get("total_replys")

    @property
    def total_floors(self) -> int:
        '''该帖的总楼层数'''
        return self._info.get("total_floors")

    @property
    def total_pages(self) -> int:
        '''该帖的总页数，每页30层楼'''
        return self._info.get("total_pages")

    @property
    def lz(self) -> int:
        return self._info.get("lz")

    @property
    def datetime(self) -> datetime:
        return self._info.get("date")

    @property
    def title(self) -> str:
        return self._info.get("title")

    @property
    def info(self) -> "TopicInfo":
        return self._info

    def download_topic_info(self, retry=2, wait=0.0) -> bool:
        '''进行一次爬取，获得该主题帖的一些基本信息

        :param int retry: 爬取失败后重试的次数
        :param float wait: 爬取失败后等待的时间，单位为秒
        :return: 返回一个bool值，表示信息是否获取成功
        '''
        if self._info["lz"]:
            return True
        topic_info = TopicInfo(self.topic_id)
        successful = topic_info.download_topic_info(retry, wait)
        if not successful:
            return False
        self._info = topic_info
        return True

    def get_topic_page(self, pages: Iterable[int], retry=3, wait=0.0, see_lz=False):
        '''爬取整个帖子
        :param pages: 表示需要爬取的页码(每页有30层楼)
        :type pages: tuple[int, ...]
        :param int retry: 获取页面失败时的重试次数
        :param wait: 获取失败后，下次重新获取前的等待时间，单位为秒
        :type wait: float or int
        :param bool see_lz: 是否开启只看楼主模式

        :return: 返回一个生成器，每次生成一个长度为3的元组: 
            tuple[pg: int, webpage: TopicPage, status_code: int]
            pg : 当前页面的页码
            webpage : 爬取得到的主题帖网页对象，有可能为None
            status_code : 请求返回的状态码，-1表示其它错误
        '''
        if not isinstance(retry, int):
            raise TypeError("`retry` must be an integer")
        see_lz = bool(see_lz)

        raw_url = self.raw_url
        headers = {"user-agent": "Nokia"}
        params = {"see_lz": str(int(see_lz)),
                  "pn": "0"}

        _get_a_page = get_a_page    # 局部变量相比全局变量访问较快
        for pg in pages:
            assert isinstance(pg, int) and pg > 0
            pn = str(30 * (pg - 1))
            params.update(pn=pn)

            webpage, status_code = None, 0    # pycharm可能不信任for循环里定义的变量……
            for i in range(retry + 1):
                r, successful = _get_a_page(raw_url, params, headers)
                webpage = TopicPage.from_requests_response(r, parent_topic=self, pg=pg)
                status_code = r.status_code if (r is not None) else -1
                if successful and webpage.page_is_found():
                    break
                else:
                    time.sleep(wait)

            yield pg, webpage, status_code

    def copy(self):
        '''并不是复制对象，而是创建新的实例并引用self在init中定义的基本信息'''
        other = self.__class__(0)
        other.topic_id = self.topic_id
        other._info = self._info
        return other


class FloorSpider(BaseSpider):
    '''爬取楼中楼的类
    
    :param parent_topic: 该楼层所在的主题帖对象
    :type parent_topic: TopicSpider
    :param int floor_id: 该楼层的id，在url中是pid参数
    '''

    def __init__(self, floor_id: int,
                 parent_topic: Optional["TopicSpider"] = None,
                 topic_info: Optional["TopicInfo"] = None):
        if not isinstance(floor_id, int):
            raise TypeError("`floor_id` must be an integer")
        if not isinstance(parent_topic, (TopicSpider, type(None))):
            raise TypeError("`parent_topic` must be a TopicSpider instance")

        if (not topic_info) and (not parent_topic):
            raise ValueError("`topic_info`和`parent_topic`不可同时为空")
        elif topic_info and (not parent_topic):
            pass
        elif (not topic_info) and parent_topic:
            topic_info = parent_topic.info
        elif topic_info.topic_id != parent_topic.topic_id:
            raise ValueError("`topic_info`和`parent_topic`的topic_id不一致")

        self.topic_info = topic_info
        self.parent_topic = parent_topic    # 现在这个属性似乎没什么用了
        self.floor_id = floor_id

    @property
    def topic_id(self) -> int:
        return self.topic_info.topic_id

    @property
    def wap_url(self) -> str:
        '''该帖的wap版贴吧链接'''
        return self.topic_info.wap_url

    @property
    def floor_url(self) -> str:
        '''获取该楼层的url'''
        if self.wap_url == "":
            if not self.topic_info.download_topic_info():
                return ""
        return self.wap_url + "/flr"

    def get_floor_page(self, pages: Iterable[int], retry=3, wait=0.0, see_lz=False):
        '''爬取该层的全部楼中楼页面
        :param pages: 表示需要爬取的页码(每页有30层楼)
        :type pages: tuple[int, ...]
        :param int retry: 获取页面失败时的重试次数
        :param wait: 获取失败后，下次重新获取前的等待时间，单位为秒
        :type wait: float or int
        :param bool see_lz: 是否开启只看楼主模式

        :return: 返回一个生成器，每次生成一个长度为3的元组: 
            tuple[pg: int, webpage: FloorPage, status_code: int]
            pg : 当前页面的页码，若为0则表示获取wap_url失败
            webpage : 爬取得到的楼中楼网页对象，有可能为None
            status_code : 请求返回的状态码，-1表示其它错误
        '''
        if self.floor_url == "":
            yield 0, None, -1
            return
        params = {"kz": str(self.topic_id),
                  "pid": str(self.floor_id),
                  "see_lz": str(int(see_lz)),
                  "fpn": "1"}    # 楼中楼使用的是正整数页码fpn

        for pg in pages:
            assert isinstance(pg, int) and pg > 0
            fpn = str(pg)
            params.update(fpn=fpn)

            webpage, status_code = None, 0    # pycharm可能不信任for循环里定义的变量……
            _get_a_page = get_a_page    # 局部变量相比全局变量访问较快
            for i in range(retry + 1):
                r, successful = _get_a_page(self.floor_url, params=params)
                webpage = FloorPage.from_requests_response(r, parent_floor=self, pg=pg)
                status_code = r.status_code if (r is not None) else -1
                if successful and webpage.page_is_found():
                    break
                else:
                    time.sleep(wait)

            yield pg, webpage, status_code


def get_a_page(url, params=None, headers=None):
    '''爬取一个网页，参数的含义与requests库相同

    :return: 返回一个元组，包含爬取到的对象和是否爬取成功
    :rtype: tuple[requests.Response or None, bool]
    '''
    r = None
    try:
        r = requests.get(url, headers=headers, params=params)
        r.raise_for_status()
    except requests.HTTPError:
        successful = False
    except requests.RequestException:
        successful = False
    else:
        successful = True

    return r, successful


def page_is_found(html: str) -> bool:
    '''该页面是否存在（若不存在，通常重新访问一次即可获取）'''
    txt = html.replace("\n", " ")
    pattern = r"<div>\s*您要浏览的贴子不存在\s*<br/>.*?</div>"
    return re.search(pattern, txt) is None


def download_pic(url: str, *, root_dir=PIC_DIR, update=UPDATE_PICS) -> bool:
    '''用于下载一张图片

    :param str url: 图片的超链接
    :param str root_dir: 保存图片的目录，图片将保存在root_dir目录下
    :param bool update: 如果对应文件已存在，仍然下载并覆盖保存
    '''
    path_list = url.split("?")[0].split("/")[2:]
    path = os.path.join(root_dir, *path_list[:-1])
    fname = os.path.join(path, path_list[-1])
    if os.path.exists(fname) and not update:
        return True

    r = None
    try:
        r = requests.get(url)
        r.raise_for_status()
    except requests.RequestException:
        return False

    os.makedirs(path, mode=0o755, exist_ok=True)
    with open(fname, "wb") as f:
        f.write(r.content)
    return True


def _test_get_topic_info():
    t = TopicSpider(1766018024)
    print(t.download_topic_info())
    t.info.pprint()


def _test_楼中楼():
    t = TopicSpider(1766018024)
    fl = FloorSpider(22616319749, t)
    for i, webpage, code in fl.get_floor_page(range(1, 4)):
        print(f"楼中楼 第{i}页: state_code = {code}")
        if webpage is not None:
            if BS4:
                html = BeautifulSoup(webpage.html, "html.parser").prettify()
            else:
                html = webpage.html
            with open(f"html/{fl.floor_id}_{i}.html", "w", encoding="utf-8") as f:
                f.write(html)
        else:
            print(f"楼中楼 第{i}页 爬取失败。")


def _test_主题帖():
    t = TopicSpider(1766018024)
    for i, webpage, code in t.get_topic_page(range(1, 4)):
        print(f"第{i}页: state_code = {code}")
        if webpage is not None:
            if BS4:
                html = BeautifulSoup(webpage.html, "html.parser").prettify()
            else:
                html = webpage.html
            with open(f"html/p{i:0>4d}.html", "w", encoding="utf-8") as f:
                f.write(html)
        else:
            print(f"第{i}页 爬取失败。")


if __name__ == "__main__":
    # download_pic("http://tb2.bdstatic.com/tb/editor/images/face/i_f25.png?t=20140803")
    # download_pic("http://imgsrc.baidu.com/forum/pic/item/0bd162d9f2d3572c1833b74a8a13632763d0c356.jpg")
    _test_get_topic_info()
    _test_主题帖()
    _test_楼中楼()
