import json
import time

import scrapy


def cmp(item):
    return item['page']


class HwBlog(scrapy.Spider):
    """
    在最外层的blog目录进行操作
    scrapy crawl hw
    """
    name = "hw"  # scrapy crawl hw
    res_list = []
    req_count = 0
    total_url = 8

    def start_requests(self):
        self.res_list = []
        urls = []
        base_url = 'https://bbs.huaweicloud.com/community/usersnew/id_1606985929124732/page_'
        for i in range(1, self.total_url + 1):
            urls.append(base_url + str(i))

        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        self.req_count = self.req_count + 1
        page = response.url.split("_")[-1]

        for item in response.css('div.blog-menu'):
            title_se = item.css("a.common-blog-title")
            blog_url = 'https://bbs.huaweicloud.com' + (title_se.css("::attr(href)").extract()[0])
            blog_title = title_se.css("::attr(title)").extract()[0]
            self.res_list.append({"title": blog_title, "url": blog_url, "page": page})

        json_res = json.dumps(self.res_list, ensure_ascii=False)
        print(json_res)

        res_file_path = 'raw/hw.json'
        with open(res_file_path, 'w+') as f:
            f.write(json_res)

        if self.req_count < self.total_url:
            return
        res_md = 'raw/hw.md'
        self.res_list.sort(key=cmp)  # 用页码来排序
        with open(res_md, 'w+') as f:
            f.writelines('## 华为云社区')
            f.writelines('\n更新日期: ' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
            f.write('\n博客数量:' + str(len(self.res_list)) + '\n')
            for d in self.res_list:
                f.write('\n')
                f.write('- [' + d['title'] + '](' + d['url'] + ')')
