import requests,re
import time
import os
import csv
import sys
import ssl
import importlib
import jsonpath
import pprint
import urllib.request
import http.client
from  bs4 import BeautifulSoup
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
ssl._create_default_https_context = ssl._create_unverified_context
importlib.reload(sys)
startTime = time.time() #记录起始时间
#--------------------------------------------文件存储-----------------------------------------------------
path = os.getcwd() + "/toutaioComments.csv"
csvfile = open(path, 'a', newline='', encoding = 'utf-8-sig')
writer = csv.writer(csvfile)
#csv头部
writer.writerow(('文章链接','文章标题', '作者昵称','发布日期',

                 '发布时间', '评论量','评论者昵称','评论内容'))
#-----------------------------------爬取战疫情首页的每个主题的ID------------------------------------------
comments_ID = []#具体微博的文章url:group_id的列表
a=[]
def get_title_id():
    for page in range(0,1):  #每个页面大约有20个话题,这里修改要翻的页面
        #请求头, 如果cookie过期重新设置
        headers = {
        'Referer': 'https://www.toutiao.com/search/?keyword=%E7%96%AB%E6%83%85',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
        'Accept': 'application/json, text/javascript',
        'Accept-Language': 'zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3',
        'Content-Type': 'application/x-www-form-urlencoded',
        'X-Requested-With': 'XMLHttpRequest',
        'Accept-Encoding': 'gzip, deflate, br',
        'Host': 'www.toutiao.com',
        'Connection': 'Keep-Alive',
        'Cookie': 'ttcid=d7cbb8ced35e4a80a3dbeee3314bf2bc22; SLARDAR_WEB_ID=1b4a4093-2dc3-47a4-8605-2e30acba4b79; WEATHER_CITY=%E5%8C%97%E4%BA%AC; csrftoken=f37210ae13a654ba5a9d1468ac888dba; __tasessionId=df5mda08d1591687848612; s_v_web_id=verify_kb7mu4az_pjvFYKJR_LSJH_4BpT_9baG_HNhTyG8nOlEq; tt_webid=6836254315318904327; tt_scid=4S-AkxKaR.g1JnfZ-bwe5fSaCPq5VX6Y7gi.A3DaLh9cV3BU5TeDnCuAfMTZuKzQe045; tt_webid=6836254387535889927'
        }
        time.sleep(1)
        #该链接通过抓包获得
        page=str(page*20)
        api_url = 'https://www.toutiao.com/api/search/content/?aid=24&app_name=web_search&offset={page}&format=json&keyword=%E7%96%AB%E6%83%85&autoload=true&count=20&en_qc=1&cur_tab=1&from=search_tab&pd=synthesis&timestamp=1591606025642&_signature=e-sqoAAgEBAMfv7wNJhR23vqK7AACU5Nyd0H78fAO8UHODYF0XpapmhaAgwT-3ndzHKwbXvM57bUZoRajKUXyDlEDEgEk01d72A1TbhDJKbp1KOS-MLCCvppm60amD.SEcM'
        print(api_url)
        rep = requests.get(url=api_url, headers=headers,stream=True)#获取源码
        #获取url值和id并写入comment_ID中
        pprint.pprint(rep.json()['data'])
        comments_ID=jsonpath.jsonpath(rep.json()['data'],'$..article_url')
        group_id=jsonpath.jsonpath(rep.json()['data'],'$..group_id')
        a.append(comments_ID)
        a.append(group_id)
        print(a)

    return a
#-----------------------------------爬取战疫情每个主题的详情页面------------------------------------------

def spider_title(comments_ID):

    for i in range(len(comments_ID)):
        article_url =comments_ID[i]
        print ("article_url = ", article_url)
        headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
                'Host': 'www.toutiao.com',
                'Connection': 'Keep-Alive',
                'Cookie': 'ttcid=d7cbb8ced35e4a80a3dbeee3314bf2bc22; SLARDAR_WEB_ID=1b4a4093-2dc3-47a4-8605-2e30acba4b79; WEATHER_CITY=%E5%8C%97%E4%BA%AC; csrftoken=f37210ae13a654ba5a9d1468ac888dba; __tasessionId=df5mda08d1591687848612; s_v_web_id=verify_kb7mu4az_pjvFYKJR_LSJH_4BpT_9baG_HNhTyG8nOlEq; tt_webid=6836254315318904327; tt_scid=4S-AkxKaR.g1JnfZ-bwe5fSaCPq5VX6Y7gi.A3DaLh9cV3BU5TeDnCuAfMTZuKzQe045; tt_webid=6836254387535889927'
        }
        req=urllib.request.Request(article_url,headers)
        response=urllib.request.urlopen(req)
        html_text1=response.read().endoce("utf-8")
        html_text=BeautifulSoup(html_text1,"html.parser")
        #标题
        title_text = re.findall('<h1 class="article-title">(.*?)</h1>', html_text)
        #楼主昵称
        title_user_NicName = re.findall('<span>(.*?)</span>', html_text)
        print ("title_user_NicName = ", title_user_NicName)
        #发布时间
        data_time=re.findall('<span>(.*?)</span>', html_text)[1].split(' ')
        #日期
        title_created_YMD=data_time[0]
        print ("title_created_YMD = ", title_created_YMD)
        #发布时间
        add_title_time = data_time[1]
        print ("add_title_time = ", add_title_time)
        #评论量
        comments_count = re.findall('<div class="c-header"><em>(.*?)&nbsp;</em>条评论</div>', html_text)[0]
        print ("comments_count = ", comments_count)

        position1 = (article_url, title_text, title_user_NicName,title_created_YMD, add_title_time, comments_count," ", " ")
        #写入数据
        writer.writerow(position1)


#-------------------------------------------------抓取评论信息---------------------------------------------------
#comment_ID话题编号
def get_page(comments_ID):
    t=len(comments_ID)
    for i in range(t//2):
        url = 'https://www.toutiao.com/article/v2/tab_comments/?aid=24&app_name=toutiao-web&group_id={}&item_id=6833391629412352519&offset=5&count=20'.format(comments_ID[2][i])
        headers = {
                'Referer': 'https://www.toutiao.com/search/?keyword=%E7%96%AB%E6%83%85',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                'Accept-Language': 'zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3',
                'Upgrade-Insecure-Requests': '1',
                'Accept-Encoding': 'gzip, deflate, br',
                'Host': 'www.toutiao.com',
                'Connection': 'Keep-Alive',
                'Cookie': 's_v_web_id=verify_kb61bnsm_kfxvdafq_Iiqv_4aNj_BCCY_kc4nhsjSIlzO; WEATHER_CITY=%E5%8C%97%E4%BA%AC; tt_scid=F90vHSDuOzp6YJoWzXlKCxvkNrrPU-kTvMrW5b7liQiqgo0wlmrDVSu6GHot3KN72b54; SLARDAR_WEB_ID=70177b41-4efb-4d47-b308-df2980256b1c; __ac_nonce=05eddf644001ff57dec09; tt_webid=6835839392033408519; csrftoken=5a1e9e6d98ec3809ddcab6cba95cb7b9; __ac_signature=nx5SvgAgEBDoi4buZJP6kZ8fU6AAMHPlfCrevo-9HNcaCodzey65Mbj0VeBea7Xb24F6I19hYVOkUS7FtMYBsd6nkyDdTBM5y8B0kFFRuEUUmqujh4rXQxrYLWoAh4PIZOa; __tasessionId=y9y6v4v981591604539239; ttcid=ef0990a6461141688564aa484d2103e141; tt_webid=6835889351370638861'
                }
        try:
            r = requests.get(url, headers=headers,stream=True)
            if r.status_code == 200:
                return r.json()['data']
        except requests.ConnectionError as e:
            print('error', e.args)
            pass
#-------------------------------------------------抓取评论信息---------------------------------------------------
def write_csv(jsondata):
    for json in jsondata:
        # 用户昵称
        user_name = json['user_name']
        #获取评论
        comments_text = json['text']
        position2 = (" ", " ", " ", " "," ", " ", user_name, comments_text)
        print(comments_text)
        writer.writerow(position2)#写入数据
       #print (user_id, user_name, user_gender, created_YMD, created_time)
#-------------------------------------------------主函数---------------------------------------------------
def main():

    comments_ID=get_title_id()
    count_title = len(comments_ID)
    spider_title(comments_ID)

    for count, comment_ID in enumerate(comments_ID):
        print ("正在爬取第%s个文章，一共找到个%s文章需要爬取"%(count+1, count_title))
        #maxPage获取返回的最大评论数量
        # maxPage=list[count]
        # print ('maxPage = ', maxPage)
        try:
            #自定义函数-抓取网页评论信息
            jsondata = get_page(comments_ID)
        except:
            #自定义函数-写入CSV文件
            write_csv(jsondata)
            pass
        print ("--------------------------分隔符---------------------------")
    csvfile.close()
if __name__ == '__main__':
    #获取话题ID
    #主函数操作
    main()
    #计算使用时间
    endTime = time.time()
    useTime = (endTime-startTime) / 60
    print("该次所获的信息一共使用%s分钟"%useTime)
