from crawler.random_header import RandomFakeHeaders
from util.log import logger
from ippool.redis_ippool import IPPool
import requests
import time
from bs4 import BeautifulSoup
import re
import sys
import random


class CsdnCrawler(object):
    """CSDN爬虫器
    """
    def __init__(self, home_page="https://blog.csdn.net/TOMOCAT", retry_time=3):
        self.__home_page = home_page  # 主页链接
        self.__retry_time = retry_time  # 访问连接的重试次数

    @staticmethod
    def visit_csdn(url, retry_time=3):
        response = None
        for i in range(retry_time):
            try:
                headers = RandomFakeHeaders().random_headers_for_csdn()
                ip = IPPool().get_random_key()
                proxies = {"http": "http://" + ip}
                logger.debug("visit csdn with proxies:{}, url:{}", proxies, url)
                response = requests.get(url=url, headers=headers, proxies=proxies)

                if response.status_code == 200:
                    break
                else:
                    logger.warn("visit url {} fail, sleep 5 second to retry...", url)
                    time.sleep(5)
                    continue
            except Exception as e:
                logger.exception(e)
                logger.error("visit url {} fail, please check the network", url)
                return None
        if response.status_code != 200:
            logger.error("visit csdn with error resp code:{}", response.status_code)
            return None
        else:
            return response

    def get_article_info(self):
        """获取CSDN下的所有文章信息
        """
        page_num = 1
        all_article_infos = []

        while True:
            sleep_time = 6 * random.random()
            article_list_url = self.__home_page + "/article/list/{}".format(page_num)
            logger.info("Info: now we are visiting {}...", article_list_url)

            response = self.visit_csdn(article_list_url)
            if response is None:
                logger.error("get article info from {url} fail", url=article_list_url)
            article_infos = self.__parse_html_to_article_info(response.text)

            if article_infos is None:
                logger.error("parse html to article info fail, url:[{}]", article_list_url)
                return None
            if len(article_infos) > 0:
                all_article_infos += article_infos
            else:
                logger.info("get empty article info from {}, stop", article_list_url)
                break

            logger.success("get article from {} success, article cnt {}", article_list_url, len(article_infos))
            logger.info("sleep for {} seconds to get article info", sleep_time)
            time.sleep(sleep_time)
            page_num += 1

        if len(all_article_infos) <= 150:
            logger.error("get not enough articles, article cnt:[{}]", len(all_article_infos))
        logger.info("get all article succ, article cnt:[{}]", len(all_article_infos))
        return all_article_infos

    @staticmethod
    def __parse_html_to_article_info(html):
        article_infos = []
        soup = BeautifulSoup(html, "html.parser")
        try:
            contents = soup.find_all("div", {"class", "article-item-box csdn-tracking-statistics"})
        except Exception as e:
            logger.exception(e)
            logger.error("parse html failed, please check the html")
            return None
        for content in contents:  # content样例可以在.src/crawler/content.xml中查看
            try:
                """单篇文章信息
                id: 文章唯一标识
                href: 文章链接
                title: 文章标题, 替换掉content.a.get_text()中的空格和换行符
                data: 文章发表时间
                read_num: 阅读数量
                """
                article_info = {}
                article_info['id'] = content.attrs['data-articleid']
                article_info['href'] = content.a.attrs['href']
                article_info['title'] = re.sub(r"\s+|\n", "", content.a.get_text())
                article_info['date'] = content.find("span", {"class": "date"}).get_text()
                article_info['read_num'] = int(content.find("span", {"class": "read-num"}).get_text())

                # avoid anti-crawler strategy
                if (article_info['id'] == '82762601') | (article_info['title'] == '原帝都的凛冬'):
                    logger.warn("get wrong article, article info:{}", article_info)
                    continue
                else:
                    article_infos.append(article_info)
            except Exception as e:
                logger.error("parse content fail with unexpected error")
                logger.exception(e)
                continue
        return article_infos

    def get_total_pv(self):
        """
        获取当前CSDN博客首页的总访问量
        """
        response = self.visit_csdn(self.__home_page)
        if response is None:
            logger.error("get article info from {url} fail", url=article_list_url)
            return 0
        html = response.text
        soup = BeautifulSoup(html, "html.parser")
        try:
            # 获取首页的总访问量
            content = soup.find("div", {"class", "user-profile-statistics-num"})
        except Exception as e:
            logger.exception(e)
            logger.error("parse html failed, please check the html")
            return 0
        pv_text = content.get_text()  # pv_test: 5,414,302
        pv_int = int(re.sub(r",", "", pv_text))
        return pv_int


if __name__ == "__main__":
    """执行方法
    cd src
    python -m crawler.csdn_crawler
    """
    # 重置logger: 在控制台输出日志
    logger.remove()
    logger.add(
        sys.stderr, colorize=True,
        format="[<green>{time:YYYY-MM-DDTHH:mm:ss}</green>]<level>{message}</level>",
        level="DEBUG")

    # get total pv
    logger.info("Test: get total pv")
    logger.info("total pv:{}", CsdnCrawler().get_total_pv())
    sys.exit()

    # unit test
    test = CsdnCrawler()
    article_list_url = "https://blog.csdn.net/TOMOCAT/article/list/1"
    response = test.visit_csdn(article_list_url)
    if response.status_code == 200:
        logger.info("Test: get response from url successfully")
    else:
        logger.info("Test: get response fail")
    soup = BeautifulSoup(response.text, "html.parser")
    contents = soup.find_all("div", {"class", "article-item-box csdn-tracking-statistics"})
    if contents is not None:
        logger.info("Tets: get contents successfully")
    article_infos = []
    try:
        for content in contents:
            article_info = {}
            article_info['id'] = content.attrs['data-articleid']
            article_info['href'] = content.a.attrs['href']
            article_info['title'] = re.sub(r"\s+|\n", "", content.a.get_text())
            article_info['date'] = content.find("span", {"class": "date"}).get_text()
            article_info['read_num'] = int(content.find("span", {"class": "read-num"}).get_text())

            # avoid anti-crawler strategy
            if (article_info['id'] == '82762601') | (article_info['title'] == '原帝都的凛冬'):
                continue
            else:
                article_infos.append(article_info)
    except Exception as e:
        logger.exception(e)
    logger.info("article_infos:{}", article_infos)

    # overall test
    test = CsdnCrawler()
    all_infos = test.get_article_info()
    logger.info("all_info:{}", all_infos)
