import requests
import time
import xlwt
import sys
import ssl
import importlib
import jsonpath
from selenium import webdriver
import pprint
import json
import http.client


url1='http://httpbin.org/ip'

proxy={
    'HTTP':'27.43.185.55:9999'
}
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'#requests请求头使用http/1.0版本
ssl._create_default_https_context = ssl._create_unverified_context#服务器把自己的证书传给客户端，客户端对服务器端的证书验证关闭
importlib.reload(sys)#设置python的默认编码，一般为utf-8，因为Python是ASCII
startTime = time.time() #记录起始时间
#--------------------------------------------文件存储-----------------------------------------------------
path='中国疫情1.xls'
wookbook=xlwt.Workbook(encoding='utf-8')#创建一个工作簿
sheet=wookbook.add_sheet("sheet1",cell_overwrite_ok=True)#在工作薄中新建一个表格,cell覆盖以前内容
col=('文章链接','文章标题', '作者昵称','发布时间', '评论量')
for t in range(5):
    sheet.write(0,t,col[t])
#-----------------------------------爬取战疫情首页的每个主题的ID------------------------------------------

comments_ID1=[]
group_id1=[]
title1=[]
num=[]
title2=[]

def get_title_id():
    sum=0
    for page in range(0,1):

        #请求头, 如果cookie过期重新设置
        headers = {
        'Referer': 'https://www.toutiao.com/search/?keyword=%E7%96%AB%E6%83%85',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',
        'Accept': 'application/json, text/javascript',
        'Accept-Language': 'zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3',
        'Content-Type': 'application/x-www-form-urlencoded',
        'X-Requested-With': 'XMLHttpRequest',
        'Accept-Encoding': 'gzip, deflate, br',
        'Host': 'www.toutiao.com',
            'Cookie':'ttcid=1081d0f9c3674e6a986833eecf4d9a7c63; SLARDAR_WEB_ID=7007ddac-7da0-474d-a2b9-9b0f72c020c8; tt_webid=6837798181318559246; csrftoken=e1d648c36d53ddc6b26bb09b2d00ef1a; tt_webid=6837798181318559246; WEATHER_CITY=%E5%8C%97%E4%BA%AC; s_v_web_id=verify_kbdkuil5_t62KDoOk_io3F_4DVN_BvJ1_tyMyGJ06uTPl; tt_scid=JnCDJy12i7.nfu08Zmda-mFRD3NOvTDgZkkk0LwGukJH6Ex.qFIlGARKiQk.1bHa44d2; __tasessionId=hhijnikx41592052732507',
        'Connection': 'Keep-Alive'}
        time.sleep(1)
        #该链接通过抓包获得
        page1=str(page*20)
        api_url = 'https://www.toutiao.com/api/search/content/?aid=24&app_name=web_search&offset={}&format=json&keyword=%E6%88%98%E7%96%AB%E6%83%85&autoload=true&count=20&en_qc=1&cur_tab=1&from=search_tab&pd=synthesis'.format(page1)
        print(api_url)
        rep = requests.get(url=api_url, headers=headers,stream=True)#获取源码
        #获取url值和id并写入comment_ID中
        # pprint.pprint(rep.json()['data'])
        comments_ID=jsonpath.jsonpath(rep.json()['data'],'$..article_url')
        group_id=jsonpath.jsonpath(rep.json()['data'],'$..group_id')
        text_time=jsonpath.jsonpath(rep.json()['data'],'$..datetime')
        comments_count=jsonpath.jsonpath(rep.json()['data'],'$..comments_count')
        media_name=jsonpath.jsonpath(rep.json()['data'],'$..media_name')
        # print(len(media_name))
        k3=len(comments_ID)
        for h in range(k3):
            title=jsonpath.jsonpath(rep.json()['data'][h],'$..title')

            title1.append(title)
            # print(title1)

            if title1[h] == (False):
                title2.append('无标题或者没有爬取到')
                # print(title1[h])
            else:
                title2.append(title1[h][0])
                # print(title1[h][0])

        # print(title2)
        # print(len(title2))
        # print(media_name)
        comments_ID1.append(comments_count)
        # print(group_id1)
        # print(comments_ID,len(comments_ID))
        # print(group_id1)
        comments_ID1.append(group_id)
        print(comments_ID1)
        # print(text_time)
        # if len(text_time)==0:
        #     break
        # k4=len(title)
        k1=len(text_time)
        num.append(k3)

        # print(k1,k3)
        if k3 >= k1:

            for k in range(k1):

                sheet.write(k+1+sum,3,text_time[k])
                sheet.write(k+1+sum,0,comments_ID[k])
                sheet.write(k+1+sum,4,comments_count[k])
                sheet.write(k+1+sum,2,media_name[k])
                sheet.write(k+1+sum,1,title2[k+sum])
            sum=sum+num[page]

        else:
            for k in range(k3):
                sheet.write(k+1+sum,3,text_time[k])
                sheet.write(k+1+sum,0,comments_ID[k])
                sheet.write(k+1+sum,4,comments_count[k])
                sheet.write(k+1+sum,2,media_name[k])
                sheet.write(k+1+sum,1,title2[k+sum])
            sum=sum+num[page]



    return comments_ID1
#-----------------------------------爬取战疫情每个主题的详情页面------------------------------------------
total_number1=[]

user_name1=[]

def spider_title(comments_ID1):

    filename='s.txt'
    f=open(filename,'a',encoding='utf-8')
    for p in range(0,8):
        p1=len(comments_ID1[2*p])
        # print(comments_ID1[2*p][1])
        for h3 in range(p1):
            if comments_ID1[2*p][h3] == 0:
                print('没有人评论')

            else:
                article_url='https://www.toutiao.com/article/v2/tab_comments/?aid=24&app_name=toutiao-web&group_id={}&item_id={}&offset=0&count=5'.format(comments_ID1[2*p+1][2*h3],comments_ID1[2*p+1][2*h3])
                print(article_url)
                '''
                header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',
                        'Cookie': 'ttcid=1081d0f9c3674e6a986833eecf4d9a7c63; SLARDAR_WEB_ID=7007ddac-7da0-474d-a2b9-9b0f72c020c8; tt_webid=6840255915562141192; csrftoken=e1d648c36d53ddc6b26bb09b2d00ef1a; tt_webid=6840255915562141192; WEATHER_CITY=%E5%8C%97%E4%BA%AC; s_v_web_id=verify_kbn1jp5c_KaO5axI1_5yFM_4I2j_8okW_ZRMQd695OtNS; __tasessionId=2v9ix2sy81592621208640; tt_scid=BfIQrQchIr3Gu2JTMMSv.e4Zf8NzKaCl.cIFU9L.Oc70FatkNVZnNU-49ReyUMpl1d43; passport_csrf_token=29024dcbbfa50c16478d6603b01f7924; __ac_nonce=05eed78a900215c1a4c85; __ac_signature=_02B4Z6wo00f01D8NGqQAAIBAyqF-7kZwJqg.CB4AAFEzTLk9kyX5w.WVOaV7S8KymV.QqrsjhEUWB5rXbeZhjxIR5QRGltnz-zU7pmPNbYx41C2YYBHDAdtYE.RA.QLwldwcVGeLBU1YJab.d8'}
                a = requests.get(article_url,header).json()
                print(a)


                b=a['data']
                '''
                headers = {
                    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'
                    }
                response=requests.get(article_url,headers=headers)
                response.encoding=response.apparent_encoding
                data=response.text
                print(data)
                for i in data:
                    f.write('article_url')
                    f.write('\n')
                    f.write(i['comment']['user_name'])
                    f.write('\t')
                    f.write(i['comment']['text'])
                    f.write('\n')

    f.close()





#-------------------------------------------------主函数---------------------------------------------------
def main():

    comments_ID1=get_title_id()
    # print(comments_ID1)
    for n in range(0,1):
        count_title = len(comments_ID1[2*n])

        spider_title(comments_ID1)

        for count in range(count_title):
            print ("正在爬取第%s个文章，一共找到个%s文章需要爬取"%(count+1, count_title))
            #maxPage获取返回的最大评论数量
            # maxPage=list[count]
            # print ('maxPage = ', maxPage)
            print ("--------------------------分隔符---------------------------")

if __name__ == '__main__':
    #获取话题ID
    #主函数操作
    main()
    #计算使用时间
    endTime = time.time()
    useTime = (endTime-startTime) / 60
    print("该次所获的信息一共使用%s分钟"%useTime)
    wookbook.save(path)