# -*- coding = utf-8 -*-
#@Time: 2021/2/14 11:44
#@Author: 卜白
#@File: 多线程央视新闻APP爬虫.py
#@Software: PyCharm

import re
import time
import json
import parsel
import pymongo
import requests
import datetime
from multiprocessing import Queue   # 引入队列
# ThreadPoolExecutor包 引入线程池
from concurrent.futures import ThreadPoolExecutor

class CCTV_NEWS():
    def __init__(self):
        self.list_headers = {       # 列表页请求头
            "User-Agent": "DT1901A",
            "Host": "api.cportal.cctv.com",
            "Connection": "Keep-Alive",
            "Accept-Encoding": "gzip",
        }
        self.detail_headers = {     # 详情数据请求头
            "Host": "m.news.cctv.com",
            "User-Agent": "Mozilla/5.0 (Linux; Android 5.1.1; DT1901A Build/LMY49I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36 CntvNews",
            "Accept-Encoding": "gzip, deflate",

        }
        self.list_url = 'http://api.cportal.cctv.com/api/rest/articleInfo/getScrollList'    # 列表页链接

        # 创建队列
        self.queue = Queue()

    '''获取时间链'''
    def get_time_chain(self):
        time_chain_list = []
        start_date = datetime.date(2019,12,31)      # date日期
        start_time = datetime.time(13, 54, 36)      # time时间  分 秒
        datetime_now = datetime.datetime.combine(start_date, start_time)
        # print(type(datetime_now))
        # 格式化时间戳为标准格式
        today_now = time.strftime('%Y-%m-%d', time.localtime(time.time())) # 当日时间   %H:%M:%S
        today_now_data = datetime.date(*map(int, today_now.split('-')))     # 将str转化为datetime.date类型
        dayes = (today_now_data-start_date).days     # 相差天数

        day = datetime.timedelta(days=1)
        for i in range(dayes):
            datetime_now += day
            # 时间戳，时间
            timestamp = int(datetime_now.timestamp() * 1000)
            dates = datetime_now.strftime("%Y-%m-%d")
            # 将时间戳和日期传给列表页请求函数
            # self.get_detail_url(timestamp,dates)

            time_chain_list.append([timestamp, dates])
        return time_chain_list

    '''请求头'''
    def get_params(self,page,str_time):
        params = {
            "n": "20",
            "version": "1",
            "p": str(page),   # 页数
            "pubDate": str(str_time),    # 时间戳
            "app_version": "808",
        }
        return params

    '''请求详情页'''
    def get_detail_url(self,time,date):
        total = 20      # 一页的数据量  等于第一页
        while True:
            # 使用while确定每一天有多少天数据
            # 获取请求参数
            # 第一次请求
            params = self.get_params(int(total / 20), time)
            response = requests.get(url=self.list_url,headers=self.list_headers,params=params)  # 请求网页
            text = response.text            # 编码
            results = json.loads(text)       # json反序列化
            if len(results['itemList']) != 0:   # 无数据
                totals = results['total']   # 总数据量
                for result in results['itemList']:
                    itemID = result['itemID']        # 详情页id
                    # 拼接完整id
                    id_ago = itemID[:4].upper()     # 提取id前四位 然后重新定义大写
                    idrear = itemID[4:]             # 提取id第四以后的字符串
                    ids = id_ago + idrear
                    # 时间戳
                    str_time = date.replace('-','/')
                    detail_url = f'http://m.news.cctv.com/{str_time}/{ids}.shtml'    # 重新拼接完整详细链接
                    print(str_time,detail_url)

                    # 将详情页链接添加到队列
                    self.queue.put(detail_url)

                # 增加二十条数据
                total += 20
                if total >= totals:    # 总数据量
                    break       # 超过每页的长度便结束

            else:
                print('数据有误。。')


    '''获取新闻数据'''
    def get_news_data(self,href_url):
        response = requests.get(url=href_url,headers=self.detail_headers,allow_redirects=False)       # 请求详情页
        text = response.content.decode('utf-8','ignore')
        # print(href_url,text)
        sele = parsel.Selector(text)
        title = sele.css('div.cnt_bd h1::text').extract_first()             # 新闻标题
        edit = re.findall(r'（编辑 (.*?)）',text)[0]
        info = sele.css('span.info i::text').extract_first().split( )
        source = info[0]        # 来源
        pub_time = info[1] + ' ' + info[2]  # 发布时间

        # 新闻内容
        news_content = ''.join(sele.xpath(
            '//div[@class="cnt_bd"]//p/text()|//div[@class="cnt_bd"]//span/text()'
        ).extract())
        # news_content = news_content.replace('\t','').replace('\n','')
        news_content = re.sub(r'[\s\n\t]','',news_content)

        item = {
            "title": title,
            "edit": edit,
            "source": source,
            "pub_time": pub_time,
            "content": news_content,
            "detail_url": href_url
        }
        print(item)
        self.save_mon(item)

    '''存入mongodb数据库'''
    def save_mon(self,item):
        client = pymongo.MongoClient(host='127.0.0.1',port=27017)  # 创建mongodb连接
        # client = pymongo.MongoClient('mongodb://127.0.0.1:27017/')
        db = client.spider  # spider数据库   或者db = client['test']
        data = db.news      # news集合，=表  或者data = db['news']
        # insert_one插入数据
        data.insert_one(item)


if __name__ == '__main__':
    print('项目爬虫开始采集。。。')
    worker = CCTV_NEWS()
    # 获取时间段
    time_list = worker.get_time_chain()
    for time, date in time_list:  # 时间戳,日期
        print(time,date)
        worker.get_detail_url(time, date)

    # 实现多线程抓取，引入线程池
    pool = ThreadPoolExecutor(max_workers=15)
    # worker.queue.qsize()查看队列数据量
    while worker.queue.qsize() > 0:
        pool.submit(worker.get_news_data,worker.queue.get())   # queue_list.get()从队列中取出


