# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
from utils.HttpUtils import HttpUtils


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


"""
CNN
https://edition.cnn.com/
40大邮报
https://www.allyoucanread.com/american-newspapers/
"""


class CNN():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        self.cookie = cookiejar.CookieJar()
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            # "User-Agent": ua.random
            # 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Safari/605.1.15',
            'authority': 'search.api.cnn.io',
            'method': 'GET',
            'path': '/content?size=10&q=COVID&page=1',
            'scheme': 'https',
            'accept': '*/*',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9',
            'dnt': '1',
            'origin': 'https://edition.cnn.com',
            'referer': 'https://edition.cnn.com/search?size=10&q=COVID&page=1',
            'sec-fetch-dest': 'empty',
            'sec-fetch-mode': 'cors',
            'sec-fetch-site': 'cross-site',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36'

        }


    def get_contents(self, cursor):
        postData = {
            'size':'10',
            'q':'COVID-19',  # 关键词
            'from':'10',
            'page':cursor,  # 页号
        }
        # url = f"https://search.api.cnn.io/content?size=10&q=COVID&from=10&page={cursor}"
        url = f"https://search.api.cnn.io/content"
        html = HttpUtils.do_request("GET", url, self.headers, postData)
        data_json = json.loads(html.text)
        total_page = data_json['meta']['total']  # 总页数
        count = data_json['meta']['of']  # 总数
        for d in data_json['result']:
            dict_data = dict()
            dict_data['headline'] = d['headline']  # 标题
            dict_data['body'] = d['body']  # 正文内容
            dict_data['lastPublishDate'] = d['lastPublishDate']  # 发布时间
            dict_data['path'] = "https://www.cnn.com" + d['path'] + "/index.html"  # 链接
            print(str(dict_data))


if __name__ == '__main__':
    cnn = CNN()
    for i in range(1, 100):
        print(f"============{i}=============")
        cnn.get_contents(i)