import requests,re
import time
import os
import xlwt
import sys
import ssl
import importlib
import jsonpath
import numpy as np
import pprint
import json
# import http.client

# http.client.HTTPConnection._http_vsn = 10
# http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
ssl._create_default_https_context = ssl._create_unverified_context
importlib.reload(sys)
startTime = time.time() #记录起始时间
#--------------------------------------------文件存储-----------------------------------------------------
path='中国疫情1.xls'
wookbook=xlwt.Workbook(encoding='utf-8')#创建一个工作簿
sheet=wookbook.add_sheet("sheet1",cell_overwrite_ok=True)#在工作薄中新建一个表格,cell覆盖以前内容
col=('文章链接','文章标题', '作者昵称','发布时间', '评论量')
for t in range(5):
    sheet.write(0,t,col[t])
#-----------------------------------爬取战疫情首页的每个主题的ID------------------------------------------
# comments_ID = []#具体微博的文章url:group_id的列表
comments_ID1=[]
group_id1=[]
def get_title_id():
    for page in range(0,3):
        #请求头, 如果cookie过期重新设置
        headers = {
        'Referer': 'https://www.toutiao.com/search/?keyword=%E7%96%AB%E6%83%85',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
        'Accept': 'application/json, text/javascript',
        'Accept-Language': 'zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3',
        'Content-Type': 'application/x-www-form-urlencoded',
        'X-Requested-With': 'XMLHttpRequest',
        'Accept-Encoding': 'gzip, deflate, br',
        'Host': 'www.toutiao.com',
        'Connection': 'Keep-Alive',
        'Cookie': 's_v_web_id=verify_kb90po8l_5VoH8y3w_JYlx_4HK5_8AXd_0nMHIfRMFxoo; ttcid=6095688242c1419b8cd0102d7ff142aa33; WEATHER_CITY=%E5%8C%97%E4%BA%AC; __tasessionId=j0wqkrf6y1591773286181; SLARDAR_WEB_ID=85bdaf29-adac-40e7-b853-e63efaaa7099; csrftoken=5c0d300e216f0dd2de117437340e11fe; tt_webid=6836618306042103303; tt_webid=6836618306042103303; tt_scid=ppdLBUQrMVH6IW9fz4.4XNjlnH7opgdrBXlzqQwf8Vwf5gqz4B8e09P6KLEi6YmF642a'}
        time.sleep(1)
        #该链接通过抓包获得
        page1=str(page*20)
        api_url = 'https://www.toutiao.com/api/search/content/?aid=24&app_name=web_search&offset={}&format=json&keyword=%E6%88%98%E7%96%AB%E6%83%85&autoload=true&count=20&en_qc=1&cur_tab=1&from=search_tab&pd=synthesis'.format(page1)
        print(api_url)
        rep = requests.get(url=api_url, headers=headers,stream=True)#获取源码
        #获取url值和id并写入comment_ID中
        print(rep.content)
        pprint.pprint(rep.json()['data'])
        comments_ID=jsonpath.jsonpath(rep.json()['data'],'$..article_url')
        group_id=jsonpath.jsonpath(rep.json()['data'],'$..group_id')
        text_time=jsonpath.jsonpath(rep.json()['data'],'$..datetime')
        comments_ID1.append(comments_ID)
        group_id1.append(group_id)
        # print(group_id1)
        # print(comments_ID,len(comments_ID))
        # print(group_id1)
        comments_ID1.append(group_id1)
        print(text_time)
        if len(text_time)==0:
            break
        k1=len(text_time)
        k3=len(comments_ID)

        if k3 >= k1:
            for k in range(k1):
                sheet.write(k+1+k1*page,3,text_time[k])
                sheet.write(k+1+k1*page,0,comments_ID1[0][page][k])
        else:
            for k in range(k3):
                sheet.write(k+1+k3*page,3,text_time[k])
                sheet.write(k+1+k3*page,0,comments_ID1[0][page][k])

    return comments_ID1
#-----------------------------------爬取战疫情每个主题的详情页面------------------------------------------
total_number1=[]
title1=[]
user_name1=[]
def spider_title(comments_ID1):
    filename='t.txt'
    f=open(filename,'a',encoding='utf-8')
    for p in range(0,3):
        h=len(comments_ID1[1][p])
        for i in range(h//2):
            # print(h1)
            article_url ='https://www.toutiao.com/article/v2/tab_comments/?aid=24&app_name=toutiao-web&group_id={}&item_id={}&offset=0&count=5'.format(comments_ID1[1][p][2*i],comments_ID1[1][p][2*i])
            print ("article_url = ", article_url)
            headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
                    'Host': 'www.toutiao.com',
                    'Connection': 'Keep-Alive',
                    'Cookie': 's_v_web_id=verify_kb90po8l_5VoH8y3w_JYlx_4HK5_8AXd_0nMHIfRMFxoo; ttcid=6095688242c1419b8cd0102d7ff142aa33; WEATHER_CITY=%E5%8C%97%E4%BA%AC; __tasessionId=j0wqkrf6y1591773286181; SLARDAR_WEB_ID=85bdaf29-adac-40e7-b853-e63efaaa7099; csrftoken=5c0d300e216f0dd2de117437340e11fe; tt_webid=6836618306042103303; tt_webid=6836618306042103303; tt_scid=ppdLBUQrMVH6IW9fz4.4XNjlnH7opgdrBXlzqQwf8Vwf5gqz4B8e09P6KLEi6YmF642a'}
            html_text2=requests.get(article_url,headers)
            date=html_text2.json()

            title=jsonpath.jsonpath(date['repost_params'],'$..title')
            # print(title)
            title1.append(title)
            user_name=jsonpath.jsonpath(date['group'],'$..user_name')
            user_name1.append(user_name)
            total_number=date['total_number']
            total_number1.append(total_number)

            name1=jsonpath.jsonpath(date['data'],'$..user_name')

            text1=jsonpath.jsonpath(date['data'],'$..text')
            for j in range(len(date['data'])):

                f.write(name1[j]+':')
                f.write('\n')
                f.write(text1[j])
                f.write('\n')
        print(title1)
        g1=len(title1)
        for g in range(g1):
            sheet.write(g+1+p*g1,1,title1[g])
            sheet.write(g+1+p*g1,2,user_name1[g])
            sheet.write(g+1+p*g1,4,total_number1[g])

    f.close()
    wookbook.save(path)




#-------------------------------------------------主函数---------------------------------------------------
def main():

    comments_ID1=get_title_id()

    for n in range(0,3):
        count_title = len(comments_ID1[0][n])
        print(count_title)
        spider_title(comments_ID1)

        for count in range(count_title):
            print ("正在爬取第%s个文章，一共找到个%s文章需要爬取"%(count+1, count_title))
            #maxPage获取返回的最大评论数量
            # maxPage=list[count]
            # print ('maxPage = ', maxPage)
            print ("--------------------------分隔符---------------------------")

if __name__ == '__main__':
    #获取话题ID
    #主函数操作
    main()
    #计算使用时间
    endTime = time.time()
    useTime = (endTime-startTime) / 60
    print("该次所获的信息一共使用%s分钟"%useTime)
    wookbook.save(path)