#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :deviantart_util_hsw.py
# @Time      :2023/7/3 
# @Author    :CL
# @email     :1037654919@qq.com

import time

#代理 pigcha
proxies_pigcha = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}
ipidea_list=[{"ip":"49.51.189.174","port":10415},{"ip":"49.51.189.174","port":10467},{"ip":"49.51.189.174","port":10396},{"ip":"49.51.189.174","port":10489},{"ip":"49.51.189.174","port":10399},{"ip":"49.51.189.174","port":10463},{"ip":"49.51.189.174","port":10425},{"ip":"49.51.189.174","port":10471},{"ip":"49.51.189.174","port":10454},{"ip":"49.51.189.174","port":10477},{"ip":"49.51.189.174","port":10475},{"ip":"49.51.189.174","port":10392},{"ip":"49.51.189.174","port":10488},{"ip":"49.51.189.174","port":10398},{"ip":"49.51.189.174","port":10459},{"ip":"49.51.189.174","port":10491},{"ip":"49.51.189.174","port":10455},{"ip":"49.51.189.174","port":10395},{"ip":"49.51.189.174","port":10437},{"ip":"49.51.189.174","port":10394},{"ip":"49.51.189.174","port":10420},{"ip":"49.51.189.174","port":10461},{"ip":"49.51.189.174","port":10430},{"ip":"49.51.189.174","port":10433},{"ip":"49.51.189.174","port":10413},{"ip":"49.51.189.174","port":10431},{"ip":"49.51.189.174","port":10490},{"ip":"49.51.189.174","port":10442},{"ip":"49.51.189.174","port":10460},{"ip":"49.51.189.174","port":10423},{"ip":"49.51.189.174","port":10484},{"ip":"49.51.189.174","port":10426},{"ip":"49.51.189.174","port":10447},{"ip":"49.51.189.174","port":10453},{"ip":"49.51.189.174","port":10476},{"ip":"49.51.189.174","port":10462},{"ip":"49.51.189.174","port":10408},{"ip":"49.51.189.174","port":10486},{"ip":"49.51.189.174","port":10444},{"ip":"49.51.189.174","port":10451},{"ip":"49.51.189.174","port":10403},{"ip":"49.51.189.174","port":10441},{"ip":"49.51.189.174","port":10445},{"ip":"49.51.189.174","port":10446},{"ip":"49.51.189.174","port":10434},{"ip":"49.51.189.174","port":10470},{"ip":"49.51.189.174","port":10448},{"ip":"49.51.189.174","port":10418},{"ip":"49.51.189.174","port":10458},{"ip":"49.51.189.174","port":10472},{"ip":"49.51.189.174","port":10449},{"ip":"49.51.189.174","port":10474},{"ip":"49.51.189.174","port":10402},{"ip":"49.51.189.174","port":10400},{"ip":"49.51.189.174","port":10438},{"ip":"49.51.189.174","port":10410},{"ip":"49.51.189.174","port":10428},{"ip":"49.51.189.174","port":10439},{"ip":"49.51.189.174","port":10452},{"ip":"49.51.189.174","port":10401},{"ip":"49.51.189.174","port":10412},{"ip":"49.51.189.174","port":10457},{"ip":"49.51.189.174","port":10450},{"ip":"49.51.189.174","port":10416},{"ip":"49.51.189.174","port":10417},{"ip":"49.51.189.174","port":10456},{"ip":"49.51.189.174","port":10466},{"ip":"49.51.189.174","port":10479},{"ip":"49.51.189.174","port":10443},{"ip":"49.51.189.174","port":10481},{"ip":"49.51.189.174","port":10469},{"ip":"49.51.189.174","port":10464},{"ip":"49.51.189.174","port":10485},{"ip":"49.51.189.174","port":10421},{"ip":"49.51.189.174","port":10478},{"ip":"49.51.189.174","port":10435},{"ip":"49.51.189.174","port":10419},{"ip":"49.51.189.174","port":10409},{"ip":"49.51.189.174","port":10414},{"ip":"49.51.189.174","port":10424},{"ip":"49.51.189.174","port":10406},{"ip":"49.51.189.174","port":10422},{"ip":"49.51.189.174","port":10487},{"ip":"49.51.189.174","port":10407},{"ip":"49.51.189.174","port":10483},{"ip":"49.51.189.174","port":10468},{"ip":"49.51.189.174","port":10465},{"ip":"49.51.189.174","port":10436},{"ip":"49.51.189.174","port":10482},{"ip":"49.51.189.174","port":10480},{"ip":"49.51.189.174","port":10427},{"ip":"49.51.189.174","port":10432},{"ip":"49.51.189.174","port":10397},{"ip":"49.51.189.174","port":10440},{"ip":"49.51.189.174","port":10405},{"ip":"49.51.189.174","port":10429},{"ip":"49.51.189.174","port":10411},{"ip":"49.51.189.174","port":10404},{"ip":"49.51.189.174","port":10393},{"ip":"49.51.189.174","port":10473}]
import random
ip = random.sample(ipidea_list,1)
proxies_ipidea = {'http':str(ip[0]['ip'])+':'+str(ip[0]['port']),
              'https':str(ip[0]['ip'])+':'+str(ip[0]['port'])}
proxies = proxies_pigcha
import requests
from bs4 import BeautifulSoup
from utils import MongoDBUtil,mongo_manager
from sqlalchemy import create_engine
import pandas as pd
import os
import json
import shutil
from sqlalchemy import create_engine
engine = create_engine("mysql+pymysql://cl007:ChengLei-0711@127.0.0.1:3306/test?charset=utf8")
MongoDBUtil = MongoDBUtil()
images_deviantart =mongo_manager("images_deviantart",db="AI_car_images")

headers = {
    "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/114.0",
    "Accept": "application/json, text/plain, */*",
    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
    "Accept-Encoding": "gzip, deflate, br",
    "Referer": "https://www.deviantart.com/search/deviations?q=car",
    "Connection": "keep-alive",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-origin",
    "TE": "trailers"
}

cookies = {
    "userinfo": "__c5feb6763250224f255c%3B%7B%22username%22%3A%22mashaoming%22%2C%22uniqueid%22%3A%224481e293f9d1d47e024ef899a3bc0f81%22%2C%22dvs9-1%22%3A1%2C%22ab%22%3A%22tao-fp4-1-b-7%7Ctao-pcp-1-b-6%7Ctao-aap-1-b-6%7Ctao-stt-1-b-3%7Ctao-ter-1-a-10%22%7D",
    "_pbjs_userid_consent_data": "3524755945110770",
    "_iiq_fdata": "%7B%22pcid%22%3A%2206778380-4e3d-4c06-a2d9-7bc193f3c431%22%7D",
    "cookie": "e2fcdb84-e53a-4383-82f6-860180ec1863",
    "cto_bundle": "cYQwrl9INUQ2TmRZOGRQTHhEdm1sN3FZaU5hOExOQVUlMkJlcTgyYmwyRDNWcHd2VlBrMzFRUDZNNFElMkJQJTJCMmozejF3bzN2WVYyMEdFQnFPSWRiJTJCVnlMeVdXcndVWU9WdUdNQkI1MU9WS3NUalBnZ2RGWCUyQmFXcjBRWkFtbDZyQ0VqWEdBJTJCc1BpclA0VWY5MnZXcVJWdGM0bm9mUkElM0QlM0Q",
    "cto_bidid": "-0Bfk19aRTF6dTZGWlpHZkoydk5zR0pYTDVtdGo3Q2JVJTJGeW9PbWRtMHJJeUJzZEQ5Vk5oUWolMkZMcWJzeHZ0Yno1Z3VpTlJER24yVnFtcXJGWTdMWXdDSTZ2THJPT2hXcXZqQm91Qlh3eTFtZmwxQ0klMkJFVEw2Zm1vRWx1cnM2NzNTTyUyRmxx",
    "__qca": "P0-483085797-1688019575436",
    "cto_dna_bundle": "YxilK19ObEJNZVNBaVB3Z2dvc2RXTDVjS2kwbmtGbktnRk5pJTJGRWRyVFdvMCUyRjVTZzBnMDN6M2xHakkzaCUyRlNyVmRhZlZ4VFNZZmphUjFvMGt2alRqakxTQmlmZyUzRCUzRA",
    "__gads": "ID=820756d59f47e933:T=1688019594:RT=1688347258:S=ALNI_MbPczHwE5QGYtEd85lIKADluQyWMQ",
    "__gpi": "UID=00000c1c75cdebba:T=1688019594:RT=1688347258:S=ALNI_Ma9DwCCijm2vWYjIGqrbShfHd5r_A",
    "vd": "__d4c8e45a5a309bf9976f%3B%22Bkn%2BVw%2CBkomOX%2CA%2CB%2CA%2C%2CB%2CA%2CB%2CBkomOX%2CBkomOX%2CA%2CA%2CA%2CA%2C13%2CA%2CB%2CA%2CA%2CA%2CA%2CB%2CA%2CA%2C%22",
    "FCNEC": "%5B%5B%22AKsRol_HDx9VBXFNg-7L1KFZrIwjNmKrcP608iMdctTh1nAZom1iVgC4opmjVkk30d1qORL8vccyNyQ-sdo7CZxbSw3A6fgZRa6VrD0d8wG9A_5cA3ir7DGtF1qVe9uQm3ViHFliG_8WCFokP_KnNw3QKr_KqzDHTA%3D%3D%22%5D%2Cnull%2C%5B%5D%5D",
    "_px": "Us/3EWv1lww4nlWsYb/3m7OaC+iUT19csoPTefXCY0qrT7YbpzduHHgy6IDytcU1XbJVGukpvHOCbSHMcJyM4w==:1000:hIGJ9D8MQA7MGVg+c2OW/q5xfIrE13AJqDFpNkQ0q0KFzJHrCFbkGae+wR+PAao/Qx+SIl7bzTx3nsSdnBwsMV39WQlrJUvw/ye51sD3xf24wtC+zOzv1+hiGKY+itmnOnxdGCm83Jc4xndIQOl1Gv5EzNx1QtC4i4qQfsb80w2Ap7jYzNqgYDL148ynmCU9uqpft25PGNVymgy/q0HuueeW8FhepmN1gMgVlzN3rXNz5CxVmXpSmPH66Lr9xcUFaIo2mIzXaUYxCgpQXu7qzw==",
    "_pxvid": "288c607f-193f-11ee-98bd-885e84320cf1",
    "g_state": "{\"i_l\":0}",
    "auth": "__41c9a9096263fe9980c0%3B%22f3b018a25f8c795ee4001f28668f0080%22",
    "auth_secure": "__590f0e80e174697a4589%3B%22cfb8452b13ba59edfad72104bd44dc0e%22",
    "td": "6:1044x532%3B7:960%3B12:1040x777.5%3B20:1919",
    "pxcts": "288c7175-193f-11ee-98bd-4b7a6d776254"
}
cookies['vd']='__f9577806da4dd67ef197%3B%22BkouqH%2CBkouqH%2CA%2CB%2CA%2C%2CB%2CA%2CB%2CBkouqH%2CBkouqH%2CA%2CA%2CA%2CA%2C13%2CA%2CB%2CA%2CA%2CA%2CA%2CB%2CA%2CA%2C%22'

def get_json(kw,cursor =None):
    response =None
    headers = {
        "authority": "www.deviantart.com",
        "accept": "application/json, text/plain, */*",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.7,en-GB;q=0.6",
        "referer": "https://www.deviantart.com/search/deviations?q=car",
        "sec-ch-ua": "\"Microsoft Edge\";v=\"113\", \"Chromium\";v=\"113\", \"Not-A.Brand\";v=\"24\"",
        "sec-ch-ua-mobile": "?1",
        "sec-ch-ua-platform": "\"Android\"",
        "sec-ch-viewport-height": "18604",
        "sec-ch-viewport-width": "980",
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-origin",
        "user-agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Mobile Safari/537.36 Edg/113.0.0.0"
    }
    cookies = {
        "pxcts": "392208e8-196f-11ee-93ad-426c486f6574",
        "_pxvid": "392159bb-196f-11ee-93ad-8c24d60e9489",
        "_px": "zxhz+aHcdMXWMjPary1Z3Xz1qZaSflyGsvaz97VM06rDQjjEDCO4K8XBCiesjSE1hPYDcBAEAWEGhgtiCWdVPg==:1000:K/6ZkoLVvw9ME0ukbqdx+NO+mTQaSeZkhX+vORFweFo3BjOxjH+VBrztc7V2p2F0AiWIBx83jR0rcyPfiYCZUlDDlULalJHNOy1n0o+ZksDG8FzIVtPZ7MkH/s9V0KeFcPizar1RuXJqu/LcNmSre7e2B4oHpIgjPVTEA20e4zOuo3bEaRzQPgyEsJCkj3nKJp7GcYrvMPCe5Zhq3Fljn1injCbbuzDyQ0OY69t7/JmSXFQsaPwv7GBa7LI0KFbiey2uoWxKbPZZo73i9lP3sw==",
        "g_state": "{\"i_l\":0}",
        "auth": "__52a29a3c605a65caae04%3B%22fdc7725533f8165ccb8998d9add38efb%22",
        "auth_secure": "__d02f9013986d367bb161%3B%22d89378bd2dba8a769e97fb92baae06e8%22",
        "userinfo": "__8a0b3d4198f00a79c4a1%3B%7B%22username%22%3A%22mashaoming%22%2C%22uniqueid%22%3A%226b16efb77810db5bcefc393a8fddd03f%22%2C%22dvs9-1%22%3A1%2C%22ab%22%3A%22tao-fp4-1-b-10%7Ctao-pcp-1-b-6%7Ctao-aap-1-b-6%7Ctao-stt-1-b-3%7Ctao-ter-1-a-10%7Ctao-chh-1-a-1%22%7D",
        "td": "6:824x1041%3B7:944%3B8:308%3B12:256x4860%3B20:2272"
    }
    results=[]
    url = "https://www.deviantart.com/_napi/da-browse/api/networkbar/search/deviations"
    params = {
        "q":kw,
        "cursor":cursor,
        "csrf_token": "QDP9IhkX40UaJu29.rx7kww.seDWVJGwGg31sV6f9JW05em3FnoI4ZYlgpNIHnjVPyM"
    }

    while True:
        try:
            response = requests.get(url, headers=headers, proxies=proxies, timeout=10, cookies=cookies,
                                    params=params)
            if response:
                break
        except Exception as e:
            time.sleep(10)
            print('获取数据失败，重试:',e)

    if response.json():
        json_data = response.json()
        # print(json_data)
        nextCursor = json_data['nextCursor']
        print(response.status_code)
        print(response.cookies)
        try:
            print(response.cookies.get('vd'))
            cookies['vd'] = response.cookies.get('vd')
        except:
            pass
        deviations= json_data['deviations']
        for data in deviations:
            results.append(data['url'])
        # print(results)
        return nextCursor,results


# 基于关键词搜索获取图片链接
def get_img_url(keywordurl):
    global cursor
    print('begin:',keywordurl)
    try:
        response = requests.get(keywordurl, proxies=proxies, headers=headers, cookies=cookies)
        requests.session().close()
    except :
        print("dont get search data  time.sleep(10)")
        time.sleep(10)
        response = requests.get(keywordurl, proxies=proxies, headers=headers, cookies=cookies)
        requests.session().close()
    # print(response.text)
    soups = BeautifulSoup(response.text, 'lxml')
    # print(soups)
    results = []
    try:
        datas = soups.find_all('div', class_='_2pZkk')
        print(len(datas))
        for data in datas:
            try:
                data2 = data.find_all('div', style=True, class_=False)
                for d in data2:
                    try:
                        href = d.find('a')['href']
                        imgurl = d.find('img')['src']
                        # print(href, imgurl)
                        results.append([href, imgurl])

                        datass = parse_image(href)
                        #datass =[url2, img_src, info_title,info_tag,description]
                        if datass:
                            file = 'deviantart/vehicle/{}/'.format(str(href).split('/')[-1])
                            if os.path.exists(file):
                                print('文件夹已经存在,已经爬取过')
                                continue
                            else:
                                os.mkdir(file)
                            result =down_image(url=datass[1], PWD=file)
                            if result ==2:
                                print('图片下载失败，跳过其他内容,并删除相应文件夹')
                                if os.path.exists(file):
                                    shutil.rmtree(file)
                                continue
                            filename = file + datass[1].split('/')[-1].split('?')[0].split('.')[0] + '.txt'
                            txt_data = str(datass[2])+','+str(datass[4])
                            for tag  in datass[3]:
                                txt_data += ','+str(tag)
                            download_tag(filename=filename, data=txt_data)
                            json_dict = {"page_url": datass[0], "images": [
                                {
                                    "img_file": datass[1].split('/')[-1].split('?')[0],
                                    "label_file": datass[1].split('/')[-1].split('?')[0].split('.')[0] + '.txt',
                                    "tag": datass[3],
                                    "title": datass[2],
                                    "description":datass[4]
                                }
                            ]}
                            get_summary(save_path=file, json_dict=json_dict)
                            global bianlilist
                            bianlilist.append(href)
                    except Exception as e:
                        print('获取href失败，',e)
            except  Exception as e:
                print('data2',e)
    except Exception as e :
        print('解析关键词搜索返回页失败',e)

    cursor = None
    try:
        cursors = soups.find_all('a', class_='_1OGeq')
        for page in cursors:
            info = page.text
            print(info)
            if info == 'Next':
                cursor = page['href']
    except:
        print('not found cursor')
    return cursor

# 解析每张图片的信息
def parse_image(url2):
    time.sleep(2)
    # url2 = 'https://www.deviantart.com/nancorocks/art/Mclaren-720S-custom-widebody-839733985'
    print('begin:', url2)
    img_src =''
    info_title=''
    description=''
    info_tag = []
    try:
        response = requests.get(url2, proxies=proxies, headers=headers, cookies=cookies,timeout=10)
        requests.session().close()
        # print(response.text)
        soups = BeautifulSoup(response.text, 'lxml')
        # print(soups)
        try:
            img = soups.find('img', class_='TZM0T _2NIJr')
            if img == None:
                print('img=',img)
                img = soups.find_all('img', class_=True)[0]
            img_src = img['src']
            # print('img_src:',img_src)

        except Exception as e:
            print('img_src解析失败',e)
        try:
            info_title = soups.find('div',class_='_3UDQj').find('h1',class_=True).text
        except Exception as e:
            print('info_title解析失败', e)
        try:
            aa = soups.find('div',class_ = 'aodIv wT4l_').find_all('a')
            for a in aa:
                info_tag.append(a.text)
        except Exception as e:
            print('info_tag解析失败', e)
        try:
            description = soups.find('div', class_='_2xzib').find('div').text
            # print(description)
        except Exception as e:
            print('description解析失败', e)
    except Exception as e:
        print('parse_image 解析具体的图片网站失败',e)
    return [url2, img_src, info_title,info_tag,description]
# parse_image(url2)
# 保存图片信息

# 下载图片
def down_image(url,PWD):
    name = url.split('/')[-1].split('?')[0]
    # 将图片以最后的数字+后缀命名，方便检查文件是否存在
    filename = PWD + name
    if os.path.isfile(filename):  # 如果文件已爬取，则不再重复爬取
        print("文件存在：", filename)
        return 0
    try:
        response = requests.get(url, proxies=proxies, stream=True, timeout=30)
        with open(filename, 'wb') as fd:
            fd.write(response.content)
        requests.session().close()
        return 1
    except Exception as e:
        print('下载图片失败',e)
        return 2

#下载标签
def download_tag(filename,data):
    with open(filename,'w') as f:
        f.write(data)
    f.close()
def get_summary(save_path,json_dict):
    with open(save_path + 'summary.json', 'w') as f2:
        f2.write(json.dumps(json_dict))
    f2.close()
def save_data(results,basefile):
    for href in results:
        if href in donelist:
            print('已有数据，跳过')
            continue
        datass = parse_image(href)
        # datass =[url2, img_src, info_title,info_tag,description]
        if datass[1]:
            file = basefile+'{}/'.format(str(href).split('/')[-1])
            if os.path.exists(file):
                print('文件夹已经存在,已经爬取过')
                bianlilist.append(href)
                continue
            else:
                os.mkdir(file)
            result = down_image(url=datass[1], PWD=file)
            if result == 2:
                print('图片下载失败，跳过其他内容,并删除相应文件夹')
                if os.path.exists(file):
                    shutil.rmtree(file)
                continue
            filename = file + datass[1].split('/')[-1].split('?')[0].split('.')[0] + '.txt'
            txt_data = str(datass[2]) + ',' + str(datass[4])
            for tag in datass[3]:
                txt_data += ',' + str(tag)
            download_tag(filename=filename, data=txt_data)
            json_dict = {"page_url": datass[0], "images": [
                {
                    "img_file": datass[1].split('/')[-1].split('?')[0],
                    "label_file": datass[1].split('/')[-1].split('?')[0].split('.')[0] + '.txt',
                    "tag": datass[3],
                    "title": datass[2],
                    "description": datass[4]
                }
            ]}
            get_summary(save_path=file, json_dict=json_dict)
            bianlilist.append(href)
def send_email(userlist_add=[], mail_msg_add=' \nthat is all'):
    import smtplib
    from email.mime.text import MIMEText
    from email.utils import formataddr

    my_sender = '1037654919@qq.com'  # 发件人邮箱账号
    my_pass = 'iffxtippimrnbfdj'  # 发件人邮箱密码 （用授权码代替） ,每次使用需要打开qq邮箱网页端授权smtp
    my_user = '1037654919@qq.com'  # 收件人邮箱账号 #大鹏：dapeng.hu@geely.com
    my_user2 = 'chenglei3@geely.com'
    userlist = [my_user, my_user2]
    userlist += userlist_add
    mail_msg = """deviantart爬虫代码运行结束，请留意.

    """
    mail_msg += str(mail_msg_add)
    msg = MIMEText(mail_msg, 'plain', 'utf-8')  # 'html' 网页； 'plain'：文本

    msg['From'] = formataddr(["cl", my_sender])  # 括号里的对应发件人邮箱昵称、发件人邮箱账号
    msg['To'] = formataddr(["cl", my_user])  # 括号里的对应收件人邮箱昵称、收件人邮箱账号
    msg['To'] = formataddr(["cl", my_user2])  # 括号里的对应收件人邮箱昵称、收件人邮箱账号
    msg['Subject'] = "deviantart代码运行结束"  # 邮件的主题，也可以说是标题
    try:
        server = smtplib.SMTP_SSL("smtp.qq.com", 465)  # 发件人邮箱中的SMTP服务器，端口是25
        server.login(my_sender, my_pass)  # 括号中对应的是发件人邮箱账号、邮箱密码
        server.sendmail(my_sender, userlist, msg.as_string())  # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
        server.quit()  # 关闭连接
    except:
        print('邮件发送失败')
def  create_wenjianjia(basefile):

    if os.path.exists(basefile):
        print('文件夹已经存在')
    else:
        os.mkdir(basefile)
def get_len_done(file = 'deviantart/bianlilist.txt'):
    with open(file) as f:
        for line in f:
            donelist.append(line.strip('\n'))
    f.close()
    print('len(donelist):',len(donelist))
if __name__ =="__main__":
    from pprint import  pprint
    kw= 'motorcar'
    basefile = 'deviantart/{}/'.format(kw)
    create_wenjianjia(basefile)
    bianlilist = []

    #已经爬取列表
    donelist =[]
    get_len_done()

    cursor = None
    page =321
    cursor="MTQwYWI2MjA9MzIyJjU5MGFjYWQwPTMyMTAmNzgwMDk5ODIlNUIwJTVEPTI1NjM2NDg0MCY3ODAwOTk4MiU1QjElNUQ9Nzc0NzMyMjImNzgwMDk5ODIlNUIyJTVEPTg3NDQ1MjUzMCY3ODAwOTk4MiU1QjMlNUQ9Njk2MTM1MzczJjc4MDA5OTgyJTVCNCU1RD0xMzQ0MzMxNzM",

    cursor,results=get_json(kw,cursor)
    print('print(cursor):', cursor)
    save_data(results,basefile)
    # 遍历
    while cursor and page<=500:
        time.sleep(5)
        page+=1
        cursor, results = get_json(kw, cursor)
        print('print(cursor):',page, cursor)
        save_data(results, basefile)


    # 保存bianlilist
    with open (basefile+'bianlilist.txt','a') as f:
        for line in bianlilist:
            f.write(line+'\n')
    f.close()

    # 发送邮件
    send_email(userlist_add=[], mail_msg_add='')


