import json
from lxml import etree
import requests
import re


class BlogSpider:

    def __init__(self):
        self.url = 'https://sysheng.cn/?page={}'
        self.domain = 'https://sysheng.cn/'
        self.str2num_pattern = re.compile(r'[^0-9]')
        self.page = 1  # 每次爬取的页码

    # 请求页面数据
    @staticmethod
    def send_request(url):
        response = requests.get(url)
        if response.status_code == 200:
            return response.content.decode()
        else:
            raise Exception('数据出错，暂停抓取')

    # 获取xpath对象
    @staticmethod
    def get_xpath_element(html):
        return etree.HTML(html)

    # 解析xpath数据
    def parse_data(self, element):
        articles = element.xpath('//li[@class="pad"]')
        articles_list = []
        for a in articles:
            item = dict()
            item['img'] = a.xpath('div/div[1]/a/img/@src')[0]
            item['href'] = self.domain + a.xpath('div/div[1]/a/@href')[0]
            item['title'] = a.xpath('div/div[2]/div/span//text()')[0]
            item['time'] = a.xpath('div/div[2]/div/div[1]/span[1]//text()')[0]
            item['category'] = a.xpath('div/div[2]/div/div[1]/span[2]//text()')[0]
            read = a.xpath('div/div[2]/div/div[1]/span[3]//text()')[0]
            item['read'] = int(self.str2num_pattern.sub('', read))
            comment = a.xpath('div/div[2]/div/div[1]/span[4]//text()')[0]
            item['comment'] = int(self.str2num_pattern.sub('', comment))
            articles_list.append(item)
        # 检测是否还有下一页
        next_page = self.get_next_page(element)
        return articles_list, next_page

    # 写入文件
    def write_file(self, data):
        with open('myblog.txt', 'a', encoding='utf-8') as f:
            f.write('------第{}页数据------\n'.format(self.page-1))
            f.write(json.dumps(data, ensure_ascii=False, indent=4))
            f.write('\n')

    # 判断有没有下一页&设置下一页的页码
    def get_next_page(self, element):
        page = element.xpath('//ul[@class ="pagination"]/li[last()]/a/@href')
        if len(page) == 0:
            return None
        else:
            self.page = int(self.str2num_pattern.sub('', page[0]))
            return True

    # 文章详情
    def get_detail(self, url='https://sysheng.cn/article/15.html'):
        html_str = self.send_request(url=url)
        element = self.get_xpath_element(html=html_str)
        detail = element.xpath('//section[@class="content"]//div[1]//div[1]//div[1]//div[2]/div[1]//text()')
        detail_str = ''.join(detail)
        return detail_str

    # 整体调度
    def run(self):
        try:
            page_url = self.url.format(self.page)
            html_str = self.send_request(url=page_url)
            element = self.get_xpath_element(html=html_str)
            articles_data, next_page = self.parse_data(element)
            self.write_file(articles_data)
            if next_page is not None:
                self.run()
            else:
                print('已经抓取全部文章')
        except Exception as e:
            print(e)


if __name__ == '__main__':
    spider = BlogSpider()
    spider.get_detail()
