import requests,re
import time
import os
import xlwt
import sys
import ssl
import importlib
import jsonpath
from selenium import webdriver
import numpy as np
import pprint
import json
import http.client
import urllib.request
from bs4 import BeautifulSoup
url1='http://httpbin.org/ip'

proxy={
    'HTTP':'27.43.185.55:9999'
}
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
ssl._create_default_https_context = ssl._create_unverified_context
importlib.reload(sys)
startTime = time.time() #记录起始时间
#--------------------------------------------文件存储-----------------------------------------------------
path='中国疫情1.xls'
wookbook=xlwt.Workbook(encoding='utf-8')#创建一个工作簿
sheet=wookbook.add_sheet("sheet1",cell_overwrite_ok=True)#在工作薄中新建一个表格,cell覆盖以前内容
col=('文章链接','文章标题', '作者昵称','发布时间', '评论量')
for t in range(5):
    sheet.write(0,t,col[t])
#-----------------------------------爬取战疫情首页的每个主题的ID------------------------------------------
# comments_ID = []#具体微博的文章url:group_id的列表
comments_ID1=[]
group_id1=[]
title1=[]
num=[]
title2=[]

def get_title_id():
    sum=0
    for page in range(0,1):

        #请求头, 如果cookie过期重新设置
        headers = {
        'Referer': 'https://www.toutiao.com/search/?keyword=%E7%96%AB%E6%83%85',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',
        'Accept': 'application/json, text/javascript',
        'Accept-Language': 'zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3',
        'Content-Type': 'application/x-www-form-urlencoded',
        'X-Requested-With': 'XMLHttpRequest',
        'Accept-Encoding': 'gzip, deflate, br',
        'Host': 'www.toutiao.com',
        'Connection': 'Keep-Alive',
        'Cookie': 'ttcid=1081d0f9c3674e6a986833eecf4d9a7c63; SLARDAR_WEB_ID=7007ddac-7da0-474d-a2b9-9b0f72c020c8; tt_webid=6837062297140512263; csrftoken=e1d648c36d53ddc6b26bb09b2d00ef1a; tt_webid=6837062297140512263; WEATHER_CITY=%E5%8C%97%E4%BA%AC; s_v_web_id=verify_kbaqu3d5_PtNGlnds_xfMb_4GIN_BFWx_jbjRvqGLMbHB; __tasessionId=i85fuyxyk1591877623846; tt_scid=jppShMSDQP7xw7.vah3jzkUpsaTbIpcDVFcYi1P842ixuXj5dz6MOiQonYzHWUBL1220'}
        time.sleep(1)
        #该链接通过抓包获得
        page1=str(page*20)
        api_url = 'https://www.toutiao.com/api/search/content/?aid=24&app_name=web_search&offset={}&format=json&keyword=%E6%88%98%E7%96%AB%E6%83%85&autoload=true&count=20&en_qc=1&cur_tab=1&from=search_tab&pd=synthesis'.format(page1)
        print(api_url)
        rep = requests.get(url=api_url, headers=headers,stream=True)#获取源码
        #获取url值和id并写入comment_ID中
        # pprint.pprint(rep.json()['data'])
        comments_ID=jsonpath.jsonpath(rep.json()['data'],'$..article_url')
        group_id=jsonpath.jsonpath(rep.json()['data'],'$..group_id')
        text_time=jsonpath.jsonpath(rep.json()['data'],'$..datetime')
        comments_count=jsonpath.jsonpath(rep.json()['data'],'$..comments_count')
        media_name=jsonpath.jsonpath(rep.json()['data'],'$..media_name')
        # print(len(media_name))
        k3=len(comments_ID)
        for h in range(k3):
            title=jsonpath.jsonpath(rep.json()['data'][h],'$..title')

            title1.append(title)
            # print(title1)

            if title1[h] == (False):
                title2.append('无标题或者没有爬取到')
                # print(title1[h])
            else:
                title2.append(title1[h][0])
                # print(title1[h][0])


        # print(media_name)
        comments_ID1.append(comments_count)
        # print(group_id1)
        # print(comments_ID,len(comments_ID))
        # print(group_id1)
        comments_ID1.append(group_id)
        # print(text_time)
        if len(text_time)==0:
            break
        # k4=len(title)
        k1=len(text_time)
        num.append(k1)

        print(k1,k3)
        if k3 >= k1:

            for k in range(k1):

                sheet.write(k+1+sum,3,text_time[k])
                sheet.write(k+1+sum,0,comments_ID[k])
                sheet.write(k+1+sum,4,comments_count[k])
                sheet.write(k+1+sum,2,media_name[k])
                sheet.write(k+1+sum,1,title2[k+sum])
            sum=sum+num[page]

        else:
            for k in range(k3):
                sheet.write(k+1+k3*page,3,text_time[k])
                sheet.write(k+1+k3*page,0,comments_ID[k])
                sheet.write(k+1+k3*page,4,comments_count[k])
                sheet.write(k+1+k3*page,2,media_name[k])
                sheet.write(k+1+sum,1,title2[k+sum])


    print(comments_ID1)
    return comments_ID1
#-----------------------------------爬取战疫情每个主题的详情页面------------------------------------------
total_number1=[]

user_name1=[]

def spider_title(comments_ID1):

    filename='t.txt'
    f=open(filename,'a',encoding='utf-8')
    for p in range(0,9):
        p1=len(comments_ID1[2*p])
        print(comments_ID1[2*p][1])
        for h3 in range(p1):
            if comments_ID1[2*p][h3] == 0:
                print('没有人评论')
            else:
                article_url='https://www.toutiao.com/article/v2/tab_comments/?aid=24&app_name=toutiao-web&group_id={}&item_id={}&offset=0&count=5'.format(comments_ID1[2*p+1][2*h3],comments_ID1[2*p+1][2*h3])
                print(article_url)
                r = requests.get(article_url).content
                print(r)
                a = json.loads(r)
                b=a['data']
                for i in b:
                    f.write('article_url')
                    f.write('\n')
                    f.write(i['comment']['user_name'])
                    f.write('\t')
                    f.write(i['comment']['text'])
                    f.write('\n')

        '''
        for i in range(h//2):
            # print(comments_ID1[2*p+1])
             =comments_ID1[2*p][i]
            #https://m.gmw.cn/toutiao/2020-02/12/content_123088372.htm?tt_group_id=6792369189832098312
            # print(article_url)
            headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',
            'Accept':'application/json, text/javascript, */*; q=0.01',

            'Cookie':'wdcid=7e3b493aba84bef2; wdlast=1591877735; wdses=219a55a6ae7e1460; __tasessionId=zivd7gsb41591877735164; _ga=GA1.2.461508734.1591877737; _gid=GA1.2.1882407957.1591877737; _gat_gtag_UA_20947729_1=1; __asc=66fefa87172a34e9b03fbab1b8f; __auc=66fefa87172a34e9b03fbab1b8f'}

            html_text2=requests.get(url=article_url,headers=headers)
            html_text2.raise_for_status()
            html_text2.encoding=html_text2.apparent_encoding

            '''
        '''
            req=urllib.request.Request(article_url,headers)
            print(req)
            response=urllib.request.urlopen(req)
            html_text2=bytes(str,response.read())
            date=BeautifulSoup(html_text2,"html.parser")
            print(date)

            name1=jsonpath.jsonpath(date['data'],'$..user_name')

            text1=jsonpath.jsonpath(date['data'],'$..text')


        '''

    f.close()


#-------------------------------------------------主函数---------------------------------------------------
def main():

    comments_ID1=get_title_id()

    for n in range(0,9):
        count_title = len(comments_ID1[2*n])

        spider_title(comments_ID1)

        for count in range(count_title):
            print ("正在爬取第%s个文章，一共找到个%s文章需要爬取"%(count+1, count_title))
            #maxPage获取返回的最大评论数量
            # maxPage=list[count]
            # print ('maxPage = ', maxPage)
            print ("--------------------------分隔符---------------------------")

if __name__ == '__main__':
    #获取话题ID
    #主函数操作
    main()
    #计算使用时间
    endTime = time.time()
    useTime = (endTime-startTime) / 60
    print("该次所获的信息一共使用%s分钟"%useTime)
    wookbook.save(path)