"""
爬虫获取36kr首页的新闻列表的文章内容

网址：[36kr](https://36kr.com/)

获取文章字段：
    1. 标题
    2. 标题对应的url地址
    3. 标题对应的图片

最后保存：
    1. 把所有文本保存为json文件
    2. 把图片保存到本地的目录中
"""
from pprint import pprint

import requests
import re
import json
from jsonpath import jsonpath
import os
import pprint


class Spider_36kr:

    def __init__(self):
        """
        爬取准备，目标地址，请求头，请求参数
        """
        self.url = 'https://36kr.com/'
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
        }

    def get_data(self):
        """
        2. 发起请求
        :return:
        """
        resp = requests.get(self.url, headers=self.headers)
        return resp.content.decode()

    def parse_data(self, data):
        """
        提取我们需要的数据
        :param data:
        :return:
        """
        json_data = re.findall(r'<script>window.initialState=(.*?)</script>', data)[0]
        dict_data = json.loads(json_data)

        news_list = jsonpath(dict_data, '$..itemList')[0]

        results = []
        for news in news_list:
            item = {}
            if news.get('itemId') is None or news.get('route') is None or \
                    news.get('templateMaterial').get('widgetTitle') is None or \
                    news.get('templateMaterial').get('widgetImage') is None:
                continue
            item['id'] = news.get('itemId')
            item['url'] = news.get('route')
            item['title'] = news.get('templateMaterial').get('widgetTitle')
            item['image'] = news.get('templateMaterial').get('widgetImage')
            results.append(item)

        return results

    def save_data(self, result_list):
        """
        保存
        :param result_list:
        :return:
        """
        # 保存文本
        json_data = json.dumps(result_list, ensure_ascii=False)
        with open('json_36kr.json', 'w') as f:
            f.write(json_data)

        # 保存图片
        if not os.path.exists('36kr_images'):
            os.mkdir('36kr_images')

        for news in result_list:
            response = requests.get(news.get('image'))
            with open('36kr_images/' + str(news.get('id')) + '.png', 'wb') as f:
                f.write(response.content)

    def run(self):
        # 1. 爬取准备，目标地址，请求头，请求参数
        # 2. 发起请求
        # 3. 提取我们需要的数据
        # 4. 保存

        data = self.get_data()
        results = self.parse_data(data)
        self.save_data(results)


if __name__ == '__main__':
    kr = Spider_36kr()
    kr.run()
