# coding=utf-8
# spider_main.py
"""
Craw data from csdn
:author: antony zhao
:date: 24/03/2020
"""

# the class for managing other class and get blog data
from bs4 import BeautifulSoup, Comment
import logging

from config import conf
from spider.article_details import ArticleDetails
from spider.html_parser import HtmlParser


class SpiderMain(object):

    def __init__(self):
        self.html_parser = HtmlParser()
        pass

    def get_all_article_info(self):
        """
        get all article info
        """
        url = conf.csdn_url
        soup = self.html_parser.parse(url)
        self.get_current_page_article_info(current_url=url, current_soup=soup)
        page_num = self.get_page_count(soup)
        if page_num > 1:
            for i in range(2, page_num + 1):
                current_url = url + "/article/list/" + str(i)
                self.get_current_page_article_info(current_url=current_url)

    def get_current_page_article_info(self, current_url=None, current_soup=None):
        """
        the function for getting current page article details
        :param current_url: the url of web page
        :param current_soup: the soup of html
        """
        logging.info("ready to craw %s" % current_url)
        soup_article_list = current_soup
        if current_soup is None:
            soup_article_list = self.html_parser.parse(current_url)
        div_article_list = soup_article_list.find('div', class_='article-list')
        for div_article in div_article_list.find_all("div", class_="article-item-box csdn-tracking-statistics"):
            article_info = ArticleDetails()
            article_info.id = div_article["data-articleid"]
            div_article.h4.a.span.extract()
            article_info.title = div_article.h4.a.get_text().replace("\n", "")
            article_info.href = div_article.h4.a['href']
            article_info.article_des = div_article.p.get_text()
            article_info.date = div_article.find('span', class_="date").get_text()
            for i, span_num in enumerate(div_article.find_all('span', class_="num")):
                if i == 0:
                    article_info.page_view = span_num.get_text()
                elif i == 1:
                    article_info.comments_count = span_num.get_text()
            article_info.article_body = self.get_atricle_body(article_info)
            logging.info("craw article %s successful, reay to store to mysql" % article_info.href)
            article_info.save_article_info_to_mysql()
        logging.info("craw %s finished" % current_url)

    def get_atricle_body(self, article_info: ArticleDetails):
        """
        get article body
        :param article_info: class ArticleInfo
        :return html of article body
        """
        soup_article_body = self.html_parser.parse(article_info.href)
        main_body = soup_article_body.find("article", class_="baidu_pl")
        main_body.find("div", class_="more-toolbox").extract()
        main_body.find("div", class_="person-messagebox").extract()
        for element in main_body(text=lambda text: isinstance(text, Comment)):
            element.extract()
        article_body = str(main_body)
        return article_body

    def get_page_count(self, soup: BeautifulSoup):
        """
        get page count by soup of html
        :param soup: html parse soup
        :return: page count
        """
        article_num = soup.find("div", class_="data-info d-flex item-tiling").find("dl")["title"]
        return int(int(article_num) / conf.eve_page_num) + 1


# just for test
if __name__ == "__main__":
    logging.basicConfig(level=logging.NOTSET)
    spider = SpiderMain()
    spider.get_all_article_info()


# url = "https://blog.csdn.net/weixin_41475710"
# html = HtmlDownloader().download(url)
# html_parser = HtmlParser()
# html_parser.parse(html)

# url = "https://blog.csdn.net/baishuiniyaonulia"
# html = HtmlDownloader.download(url)
# soup = BeautifulSoup(html, 'lxml', from_encoding='utf-8')
# print(HtmlParser().get_page_count(soup))
