import requests,re
import time
import os
import xlwt
import sys
import ssl
import importlib
import jsonpath
import pprint
import json
# import http.client

# http.client.HTTPConnection._http_vsn = 10
# http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
ssl._create_default_https_context = ssl._create_unverified_context
importlib.reload(sys)
startTime = time.time() #记录起始时间
#--------------------------------------------文件存储-----------------------------------------------------
path='中国疫情1.xls'
wookbook=xlwt.Workbook(encoding='utf-8')#创建一个工作簿
sheet=wookbook.add_sheet("sheet1",cell_overwrite_ok=True)#在工作薄中新建一个表格,cell覆盖以前内容
col=('文章链接','文章标题', '作者昵称','发布时间', '评论量')
for t in range(5):
    sheet.write(0,t,col[t])
#-----------------------------------爬取战疫情首页的每个主题的ID------------------------------------------
# comments_ID = []#具体微博的文章url:group_id的列表
comments_ID1=[]

def get_title_id():
    for page in range(0,1):
        #请求头, 如果cookie过期重新设置
        headers = {
        'Referer': 'https://www.toutiao.com/search/?keyword=%E7%96%AB%E6%83%85',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
        'Accept': 'application/json, text/javascript',
        'Accept-Language': 'zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3',
        'Content-Type': 'application/x-www-form-urlencoded',
        'X-Requested-With': 'XMLHttpRequest',
        'Accept-Encoding': 'gzip, deflate, br',
        'Host': 'www.toutiao.com',
        'Connection': 'Keep-Alive',
        'Cookie': 'ttcid=d7cbb8ced35e4a80a3dbeee3314bf2bc22; SLARDAR_WEB_ID=1b4a4093-2dc3-47a4-8605-2e30acba4b79; WEATHER_CITY=%E5%8C%97%E4%BA%AC; csrftoken=f37210ae13a654ba5a9d1468ac888dba; __tasessionId=df5mda08d1591687848612; s_v_web_id=verify_kb7mu4az_pjvFYKJR_LSJH_4BpT_9baG_HNhTyG8nOlEq; tt_webid=6836254315318904327; tt_scid=4S-AkxKaR.g1JnfZ-bwe5fSaCPq5VX6Y7gi.A3DaLh9cV3BU5TeDnCuAfMTZuKzQe045; tt_webid=6836254387535889927'
        }
        time.sleep(1)
        #该链接通过抓包获得
        page=str(page*20)
        api_url = 'https://www.toutiao.com/api/search/content/?aid=24&app_name=web_search&offset={page}&format=json&keyword=%E7%96%AB%E6%83%85&autoload=true&count=20&en_qc=1&cur_tab=1&from=search_tab&pd=synthesis&timestamp=1591606025642&_signature=e-sqoAAgEBAMfv7wNJhR23vqK7AACU5Nyd0H78fAO8UHODYF0XpapmhaAgwT-3ndzHKwbXvM57bUZoRajKUXyDlEDEgEk01d72A1TbhDJKbp1KOS-MLCCvppm60amD.SEcM'
        print(api_url)
        rep = requests.get(url=api_url, headers=headers,stream=True)#获取源码
        #获取url值和id并写入comment_ID中
        # pprint.pprint(rep.json()['data'])
        comments_ID=jsonpath.jsonpath(rep.json()['data'],'$..article_url')
        group_id=jsonpath.jsonpath(rep.json()['data'],'$..group_id')
        text_time=jsonpath.jsonpath(rep.json()['data'],'$..datetime')
        comments_ID1.append(comments_ID)
        comments_ID1.append(group_id)


        for k in range(len(text_time)):
            sheet.write(k+1,3,text_time[k])
            sheet.write(k+1,0,comments_ID[k])
    return comments_ID1
#-----------------------------------爬取战疫情每个主题的详情页面------------------------------------------
total_number1=[]
title1=[]
user_name1=[]
def spider_title(comments_ID):
    filename="D:\主妇空姐模特联系方式.txt"
    f=open(filename,'a',encoding='utf-8')
    h=len(comments_ID[1])
    h1=comments_ID[1]
    for i in range(h//2):
        # print(h1)
        article_url ='https://www.toutiao.com/article/v2/tab_comments/?aid=24&app_name=toutiao-web&group_id={}&item_id={}&offset=0&count=5'.format(h1[2*i],h1[2*i])
        # print ("article_url = ", article_url)
        headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
                'Host': 'www.toutiao.com',
                'Connection': 'Keep-Alive',
                'Cookie': 'ttcid=d7cbb8ced35e4a80a3dbeee3314bf2bc22; SLARDAR_WEB_ID=1b4a4093-2dc3-47a4-8605-2e30acba4b79; WEATHER_CITY=%E5%8C%97%E4%BA%AC; csrftoken=f37210ae13a654ba5a9d1468ac888dba; __tasessionId=df5mda08d1591687848612; s_v_web_id=verify_kb7mu4az_pjvFYKJR_LSJH_4BpT_9baG_HNhTyG8nOlEq; tt_webid=6836254315318904327; tt_scid=4S-AkxKaR.g1JnfZ-bwe5fSaCPq5VX6Y7gi.A3DaLh9cV3BU5TeDnCuAfMTZuKzQe045; tt_webid=6836254387535889927'
        }

        html_text2=requests.get(article_url,headers)
        date=html_text2.json()

        title=jsonpath.jsonpath(date['repost_params'],'$..title')
        # print(title)
        title1.append(title)
        user_name=jsonpath.jsonpath(date['group'],'$..user_name')
        user_name1.append(user_name)
        total_number=date['total_number']
        total_number1.append(total_number)

        name1=jsonpath.jsonpath(date['data'],'$..user_name')

        text1=jsonpath.jsonpath(date['data'],'$..text')
        for j in range(len(date['data'])):

            f.write(name1[j])
            f.write('\n')
            f.write(text1[j])
            f.write('\n')
    for g in range(len(title1)):
                sheet.write(g+1,1,title1[g])
                sheet.write(g+1,2,user_name1[g])
                sheet.write(g+1,4,total_number1[g])
    f.close()
    wookbook.save(path)




#-------------------------------------------------主函数---------------------------------------------------
def main():

    comments_ID=get_title_id()
    count_title = len(comments_ID[0])
    spider_title(comments_ID)

    for count in range(count_title):
        print ("正在爬取第%s个文章，一共找到个%s文章需要爬取"%(count+1, count_title))
        #maxPage获取返回的最大评论数量
        # maxPage=list[count]
        # print ('maxPage = ', maxPage)
        print ("--------------------------分隔符---------------------------")

if __name__ == '__main__':
    #获取话题ID
    #主函数操作
    main()
    #计算使用时间
    endTime = time.time()
    useTime = (endTime-startTime) / 60
    print("该次所获的信息一共使用%s分钟"%useTime)
    wookbook.save(path)