# -*- coding:utf-8 -*-
# 开发时间: 2019/10/21 13:29
import csv
import os
import requests
import json
import time
# import random
import hashlib
import pymysql

# 类型
# category=movie
# 电视剧
# category=%E7%94%B5%E8%A7%86%E5%89%A7
# 综艺
# %E7%BB%BC%E8%89%BA
# 八卦
# category=gossip
# 娱乐
# news_entertainment

def get_as_cp():
    zz ={}
    now = round(time.time())
    # print (now)  #获取计算机时间
    e = hex(int(now)).upper()[2:]  #hex()转换一个整数对象为十六进制的字符串表示
    # print (e)
    i = hashlib.md5(str(int(now)).encode('utf8')).hexdigest().upper() #hashlib.md5().hexdigest()创建hash对象并返回16进制结果

    if len(e)!=8:
        zz = {'as': "479BB4B7254C150",
            'cp': "7E0AC8874BB0985"}
        return zz
    n=i[:5]
    a=i[-5:]
    r = ""
    s = ""
    for i in range(5):
        s = s+n[i]+e[i]
    for j in range(5):
        r = r+e[j+3]+a[j]
    zz = {
            'as': "A1" + s + e[-3:],
            'cp': e[0:3] + r + "E1"
        }
    return zz
url_list = [
    'https://www.toutiao.com/api/pc/feed/?category=movie&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&tadrequire=true&',
    'https://www.toutiao.com/api/pc/feed/?category=%E7%94%B5%E8%A7%86%E5%89%A7&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&tadrequire=true&',
    'https://www.toutiao.com/api/pc/feed/?category=gossip&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&tadrequire=true&',
    'https://www.toutiao.com/api/pc/feed/?category=%E7%BB%BC%E8%89%BA&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&tadrequire=true&',
]
zz = get_as_cp()
url = 'https://www.toutiao.com/api/pc/feed/?category=movie&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&tadrequire=true&' + 'as=' + zz['as'] + 'cp=' + zz['cp']
headers1 = {
    'cookie': 'tt_webid=6750805972391904782',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3730.400 QQBrowser/10.5.3805.400'
}
headers2 = {
 'cookie': 'tt_webid=6749789110511093262',
 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}

headers_list = [headers1, headers2]
headers = {
    'cookie': 'tt_webid=6749789110511093262',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}

def change_headers(headers_list, num, headers):
    if num % 13 == 0:
        k = headers_list.index(headers)
        headers = headers_list[(k + 1) % 2]
        return headers
    else:
        return headers
def change_url(url_list,url,time):
    zz = get_as_cp()
    # if num % 20 == 0:
    #     new_url_list = url.split('&')
    #     new_url = new_url_list[0] + '&' + new_url_list[1] + '&' + new_url_list[2] + '&' + new_url_list[3] + '&' + new_url_list[4] + '&' + new_url_list[5] + '&'
    #     x = url_list.index(new_url)
    #     print(x)
    #     url = url_list[(x + 1) % 4] + 'as=' + zz['as'] + '&cp=' + zz['cp']
    #     time.sleep(1)
    #     return url
    # else:
    new_url_list = url.split('&')
    new_url = new_url_list[0] + '&' + new_url_list[1] + '&' + new_url_list[2] + '&' + new_url_list[3].split('=')[0] + '=' + '0' + '&' + \
              new_url_list[4][: -1].split('=')[0] + '=' + '0' + '&' + new_url_list[5] + '&'
    print(new_url)
    print('&&&&&&&&&&&&&&&')
    x1 = url_list.index(new_url)
    url_1 = url_list[x1]
    second_url = url_1.split('&')
    url = second_url[0] + '&' + second_url[1] + '&' + second_url[2] + '&' + second_url[3][:-1] + str(time) + '&' + \
          second_url[4][:-1] + str(time) + '&' + second_url[5] + '&' + 'as=' + zz['as'] + '&cp=' + zz['cp']
    print(url)
    return url
def E_change_headers(headers_list, headers):
        k = headers_list.index(headers)
        headers = headers_list[(k + 1) % 2]
        return headers
def E_change_url(url_list, url):
    new_url_list = url.split('&')
    new_url = new_url_list[0] + '&' + new_url_list[1] + '&' + new_url_list[2] + '&' + new_url_list[3].split('=')[
        0] + '=' + '0' + '&' + \
              new_url_list[4][: -1].split('=')[0] + '=' + '0' + '&' + new_url_list[5] + '&'
    print(new_url)
    print('&&&&&&&&&&&&&&&')
    x1 = url_list.index(new_url)
    url_1 = url_list[(x1 + 1) % 4]
    second_url = url_1.split('&')
    url = second_url[0] + '&' + second_url[1] + '&' + second_url[2] + '&' + second_url[3][:-1] + str(time) + '&' + \
          second_url[4][:-1] + str(time) + '&' + second_url[5] + '&' + 'as=' + zz['as'] + '&cp=' + zz['cp']
    #
    # zz = get_as_cp()
    # new_url_list = url.split('&')
    # new_url = new_url_list[0] + '&' + new_url_list[1] + '&' + new_url_list[2] + '&' + new_url_list[3] + '&' + \
    #           new_url_list[4] + '&' + new_url_list[5] + '&'
    # x2 = url_list.index(new_url)
    # url = url_list[(x2 + 1) % 4] + 'as=' + zz['as'] + '&cp=' + zz['cp']
    return url
# j解析数据
def pares_first(url ,headers,num):
    global headers1
    global headers2
    item ={}
    count = 1
    response = requests.get(url=url, headers=headers)
    # print(response.text)
    result = response.content.decode('utf8')
    resultdata = json.loads(result)
    print(len(resultdata['data']))
    print(resultdata['next']['max_behot_time'])
    next_time = resultdata['next']['max_behot_time']
    if len(resultdata['data']):
        for list in resultdata['data']:
            print('爬取第' + str(num) + '页' + '第' + str(count) + '项数据')
            try:
                # 标题
                # print('标题      ' + list['title'])
                item['title'] = list['title']
                # 超链接 https://www.toutiao.com/a6750106121635103245/
                chao_lsit = 'https://www.toutiao.com/a' + list['item_id'] + '/'
                # chao_lsit='https://www.toutiao.com'+list['source_url']
                # print('超链接      ' + chao_lsit)
                item['super_url'] = chao_lsit
                # 时间戳
                # print('时间戳'       + list['behot_time'])
                # item['time'] = list['behot_time']
                # 图片链接
                # print('图片连接     ' + list['middle_image'])
                item['image_url'] = list['middle_image']
                name = item['title'].split('，')[0]
                # print('图片名字     ' + name)
                image = down_image(item['image_url'])
                # print('开始下载数据')
                # 下载图片
                filepath = write_image(name, image)
                item['image_path'] = filepath
                # write_text(item)
                save_data(item)
                print('爬取第' + str(num) + '页' + '第' + str(count) + '项数据爬取完成')
                count += 1
            except:
                item['image_path'] = ''
                print('部分数据错误，已跳过')
        # 下一个请求的url地址
        # zz = get_as_cp()
        # print(zz['as'], zz['cp'])
        # url_2 = 'https://www.toutiao.com/api/pc/feed/?category=movie&utm_source=toutiao&widen=1&max_behot_time={}&max_behot_time_tmp={}&tadrequire=true&as={}&cp={}'.format(next_time, next_time, zz['as'], zz['cp'])
        time.sleep(1)
        url = change_url(url_list, url, next_time)
        headers = change_headers(headers_list, num, headers)
        print(headers)
        return url, headers
    else:
        headers = E_change_headers(headers_list, headers)
        url = E_change_url(url_list, url)
        print("===========重新刷新爬取数据==========")
        time.sleep(2)
        return url, headers
# 下载图片的url
def down_image(ur):
    return requests.get(url=ur,headers=headers)
# 下载图片
def write_image(name, image):
    dirname = 'image'
    if not os.path.exists(dirname):
        os.mkdir(dirname)
    filepath = dirname + '/' + name + '.jpg'
    with open(filepath, 'wb') as fp:
        fp.write(image.content)
    return filepath
# 存入数据
def write_text(item):
    filename = 'jnri.csv'
    with open(filename, 'a', newline='', encoding='utf8') as fp:
            name2 = ['类型', '标题', '文章链接', '图片url', '图片位置',]
            writer = csv.DictWriter(fp, fieldnames=name2)
            if os.path.getsize(filename) == 0:
                 writer.writeheader()
            writer.writerow({'类型': 'move', '标题': item['title'], '文章链接': item['super_url'], '图片url': item['image_url'], '图片位置': item['image_path']})

def save_data(item):
    db = pymysql.connect(host="localhost", user='root', password='root', db='spiders')
    cursor=db.cursor()
    sql = 'insert into list(`title`,`link`,`imgurl`,`createtime`,`imagepath`) values (%s,%s,%s,%s,%s)'
    try:
        cursor.execute(sql,(item['title'],item['super_url'],item['image_url'],time.time(),item['image_path']))
        db.commit()
        print('插入成功')
    except:
        db.rollback()
        print('插入失败')
    db.close()
def main(url, headers, pa):
    for j in range(pa):
        print("开第" + str(j + 1) + '次扫描')
        for i in range(120 + j + 1):
            print('开始爬取第' + str(i + 1) + '页数据')
            try:
                url, headers = pares_first(url, headers, i + 1)
            except KeyError:
                print('第' + str(i + 1) + '页数据爬取失败，找不到目标键')
            print('第' + str(i + 1) + '页数据爬取完成')
        print("第" + str(j + 1) + '次扫描结束')
if __name__ == '__main__':
    page = int(input("请输入需要扫荡的次数(1-5),扫荡一次约1000条数据："))
    while page == 0 or page > 5:
        print('输入扫荡次数超过范围，请重新输入\n"请输入需要扫荡的次数(1-5):"')
        page = int(input())
    main(url, headers, page)
# https://www.toutiao.com/api/pc/feed/?category=news_entertainment&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&tadrequire=true&as=A1C5DDBAEDBCD1D&cp=5DAD0CCD014D5E1&_signature=FzvytAAASpoed4ih7ZYn5hc78q
# # https://www.toutiao.com/api/pc/feed/?category=news_entertainment&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&tadrequire=true&as=A1C53DEA0F91D2A&cp=5DAF112D72AA0E1&_signature=FzvytAAASpoed4ih7Zb3Chc78q


