#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :deviantart_util.py
# @Time      :2023/6/29 
# @Author    :CL
# @email     :1037654919@qq.com
import time

#代理 pigcha
proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}
import requests
from bs4 import BeautifulSoup
from utils import MongoDBUtil,mongo_manager
from sqlalchemy import create_engine
import pandas as pd
import os
import json
import shutil
from sqlalchemy import create_engine
engine = create_engine("mysql+pymysql://cl007:ChengLei-0711@127.0.0.1:3306/test?charset=utf8")
MongoDBUtil = MongoDBUtil()
images_deviantart =mongo_manager("images_deviantart",db="AI_car_images")

headers = {
    "authority": "www.deviantart.com",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\"",
    "sec-ch-viewport-height": "980",
    "sec-ch-viewport-width": "966",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "none",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58"
}
cookies = {
    "userinfo": "__75d4bb36d3f680e7dce6%3B%7B%22username%22%3A%22%22%2C%22uniqueid%22%3A%22cf82b7cf2e5372dc899064b7bb75259f%22%2C%22dvs9-1%22%3A1%2C%22ab%22%3A%22tao-fp4-1-a-4%22%7D",
    "_pbjs_userid_consent_data": "3524755945110770",
    "_iiq_fdata": "%7B%22pcid%22%3A%22ea7377be-a929-48a5-84bd-632634427fc1%22%7D",
    "cookie": "9bbae4ce-b0be-491f-8722-98e9f78c0202",
    "__qca": "P0-1592134825-1687943683376",
    "__gads": "ID=0bbc9f746c835e3e:T=1687943686:RT=1688017193:S=ALNI_MYOdp3Y-GMeeKDqbuDCmduoYAJuOw",
    "__gpi": "UID=00000c1ac34598e0:T=1687943686:RT=1688017193:S=ALNI_Mb8JzwBKSJKTZ5GTfXPYVjzQllk5Q",
    "cto_bundle": "rxTzsl9PazR4THN6dHFTWHpuWDVPSiUyQnlWdVk3OVY5RlRINVZBT2xHaTJUN05jSjB0RE5na1o3bGp4cjFpd3h1cDZKSlg1S3RMJTJCU3RXYWVLTCUyRm11M1c4VURHU0k3ZGVhamtsTHk3WHc5bSUyQjFjcHUwQmdwWnBxQVhDamdSUHZYQUJCcnZv",
    "cto_bidid": "ZQ0uW19hekpBOEJnd1M1bTdyVGlEbXc2UXZSUWt6cDVlUW5Cd2tSVHRzSCUyQmhUczdTN0ZXS1FvZlBXYXBySVJZWlBYZ3FFYlM5dUwzSmslMkI3ejZZcnhhTjVFYVElM0QlM0Q",
    "FCNEC": "%5B%5B%22AKsRol_0t4fLDuWDwKd_o_JN37RFxUWfU5JlrY10PfvCWyF3sSR9kfGR4JXBWO3r49fP7B1bj2lFoAf_26JUKnIUQq6xJ13E7eDcO7kP5RcyMrpY-qkKU539Qc1-4yhkyx9oMLN1b5wQ88XSrmRWBaZ9f0AjuBXznQ%3D%3D%22%5D%2Cnull%2C%5B%5D%5D",
    "td": "6:1222x712%3B7:751%3B12:1778x980",
    "pxcts": "0b9d9c9e-1640-11ee-a6a3-777a4477564d",
    "_pxvid": "0b9d8f7e-1640-11ee-a6a3-199ff04d4663",
    "_px": "6h5RPIxJDGSTbwuCCH2XqviQ6xhmnvpBb3j3yC0YR+AZ1gyqUbYvf8zoU1r6N+ywvW/m7Ej+O0wdgs01xAWK/Q==:1000:X3KtK2fKFW9q45eoAJM8Q5WW6OPIUlk7spiQKxcnqmTFSONRBNnC2Cs+AfW5a5SBh3t+1f8Mvp1Ue3D4Xh+oSbufsZKDXj282yjXq7CuBleGxnvlq5y4IzRVzD62QOxT7IPv/lsDy/NqcnUGoE7Co0vAcAiTZKrjaMd1GddXMota7SB0D3ervvmgjSqjoFFu2rJCuPpuZAPdR6pbPj//RrV9IbBR5pgXYLbs1RwixfyMjgXlcmSzoZYNx1UwaRncgg7PAIpm7uoA1L84ST3/4w==",
    "vd": "__538575f3aa8e94dbcd51%3B%22Bkm%5C%2FoB%2CBknRk1%2CA%2Cc%2CA%2C%2CB%2CA%2CB%2CBknRk1%2CBknRpa%2CA%2CA%2CA%2CA%2C13%2CA%2CB%2CA%2CA%2CA%2CA%2CB%2CA%2CA%2C%22"
}


def get_json(url):
    import requests
    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/114.0",
        "Accept": "application/json, text/plain, */*",
        "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
        "Accept-Encoding": "gzip, deflate, br",
        "Referer": "https://www.deviantart.com/search/deviations?q=car",
        "Connection": "keep-alive",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "TE": "trailers"
    }
    cookies = {
        "userinfo": "__c5feb6763250224f255c%3B%7B%22username%22%3A%22mashaoming%22%2C%22uniqueid%22%3A%224481e293f9d1d47e024ef899a3bc0f81%22%2C%22dvs9-1%22%3A1%2C%22ab%22%3A%22tao-fp4-1-b-7%7Ctao-pcp-1-b-6%7Ctao-aap-1-b-6%7Ctao-stt-1-b-3%7Ctao-ter-1-a-10%22%7D",
        "_pbjs_userid_consent_data": "3524755945110770",
        "_iiq_fdata": "%7B%22pcid%22%3A%2206778380-4e3d-4c06-a2d9-7bc193f3c431%22%7D",
        "cookie": "e2fcdb84-e53a-4383-82f6-860180ec1863",
        "cto_bundle": "cYQwrl9INUQ2TmRZOGRQTHhEdm1sN3FZaU5hOExOQVUlMkJlcTgyYmwyRDNWcHd2VlBrMzFRUDZNNFElMkJQJTJCMmozejF3bzN2WVYyMEdFQnFPSWRiJTJCVnlMeVdXcndVWU9WdUdNQkI1MU9WS3NUalBnZ2RGWCUyQmFXcjBRWkFtbDZyQ0VqWEdBJTJCc1BpclA0VWY5MnZXcVJWdGM0bm9mUkElM0QlM0Q",
        "cto_bidid": "-0Bfk19aRTF6dTZGWlpHZkoydk5zR0pYTDVtdGo3Q2JVJTJGeW9PbWRtMHJJeUJzZEQ5Vk5oUWolMkZMcWJzeHZ0Yno1Z3VpTlJER24yVnFtcXJGWTdMWXdDSTZ2THJPT2hXcXZqQm91Qlh3eTFtZmwxQ0klMkJFVEw2Zm1vRWx1cnM2NzNTTyUyRmxx",
        "__qca": "P0-483085797-1688019575436",
        "cto_dna_bundle": "YxilK19ObEJNZVNBaVB3Z2dvc2RXTDVjS2kwbmtGbktnRk5pJTJGRWRyVFdvMCUyRjVTZzBnMDN6M2xHakkzaCUyRlNyVmRhZlZ4VFNZZmphUjFvMGt2alRqakxTQmlmZyUzRCUzRA",
        "__gads": "ID=820756d59f47e933:T=1688019594:RT=1688347258:S=ALNI_MbPczHwE5QGYtEd85lIKADluQyWMQ",
        "__gpi": "UID=00000c1c75cdebba:T=1688019594:RT=1688347258:S=ALNI_Ma9DwCCijm2vWYjIGqrbShfHd5r_A",
        "vd": "__6c436d9f83ea52ef2a1e%3B%22Bkn%2BVw%2CBkoV4y%2CA%2Cq%2CA%2C%2CB%2CA%2CB%2CBkohs8%2CBkoiK7%2CA%2CA%2CA%2CA%2C13%2CA%2CB%2CA%2CA%2CA%2CA%2CB%2CA%2CA%2C%22",
        "td": "6:1508x774%3B7:1275%3B12:1355.63330078125x1041.8499755859375%3B20:1919",
        "FCNEC": "%5B%5B%22AKsRol_HDx9VBXFNg-7L1KFZrIwjNmKrcP608iMdctTh1nAZom1iVgC4opmjVkk30d1qORL8vccyNyQ-sdo7CZxbSw3A6fgZRa6VrD0d8wG9A_5cA3ir7DGtF1qVe9uQm3ViHFliG_8WCFokP_KnNw3QKr_KqzDHTA%3D%3D%22%5D%2Cnull%2C%5B%5D%5D",
        "_px": "Us/3EWv1lww4nlWsYb/3m7OaC+iUT19csoPTefXCY0qrT7YbpzduHHgy6IDytcU1XbJVGukpvHOCbSHMcJyM4w==:1000:hIGJ9D8MQA7MGVg+c2OW/q5xfIrE13AJqDFpNkQ0q0KFzJHrCFbkGae+wR+PAao/Qx+SIl7bzTx3nsSdnBwsMV39WQlrJUvw/ye51sD3xf24wtC+zOzv1+hiGKY+itmnOnxdGCm83Jc4xndIQOl1Gv5EzNx1QtC4i4qQfsb80w2Ap7jYzNqgYDL148ynmCU9uqpft25PGNVymgy/q0HuueeW8FhepmN1gMgVlzN3rXNz5CxVmXpSmPH66Lr9xcUFaIo2mIzXaUYxCgpQXu7qzw==",
        "_pxvid": "288c607f-193f-11ee-98bd-885e84320cf1",
        "pxcts": "288c7175-193f-11ee-98bd-4b7a6d776254",
        "g_state": "{\"i_l\":0}",
        "auth": "__9407b4ad33577cf9f903%3B%22e7f041eafbf709e22123f8cfede21902%22",
        "auth_secure": "__590f0e80e174697a4589%3B%22cfb8452b13ba59edfad72104bd44dc0e%22"
    }
    url = "https://www.deviantart.com/_napi/da-browse/api/networkbar/search/deviations"
    params = {
        "q": "car",
        "cursor": "MTQwYWI2MjA9MiY1OTBhY2FkMD0yNCZkMTc0YjZiYz1OJTJGQSY3ODAwOTk4MiU1QjAlNUQ9ODEzODMwODIwJjc4MDA5OTgyJTVCMSU1RD05MDAxOTgyMTUmNzgwMDk5ODIlNUIyJTVEPTg3ODM1ODc4MCY3ODAwOTk4MiU1QjMlNUQ9NzE2MDI3NzQ4Jjc4MDA5OTgyJTVCNCU1RD0yOTYwMDM3MDU",
        "csrf_token": "EbXKHlwzj_CXWenj.rx754r.PbuT-yGpzFgBsRTgbrwm2JPagJ2f9AKFBMDtmgpU_tU"
    }
    response = requests.get(url, headers=headers, cookies=cookies, params=params)

    print(response.text)
    print(response)

url = "https://www.deviantart.com/search?q=car&cursor=MTQwYWI2MjA9MyY1OTBhY2FkMD00OCZkMTc0YjZiYz1OJTJGQSY3ODAwOTk4MiU1QjAlNUQ9ODc2MDgxNDE3Jjc4MDA5OTgyJTVCMSU1RD01ODMxMDExODYmNzgwMDk5ODIlNUIyJTVEPTU0MzgzMjMyOSY3ODAwOTk4MiU1QjMlNUQ9Njg1ODA2NjcxJjc4MDA5OTgyJTVCNCU1RD04NDA4NTYyMDk"

# 基于关键词搜索获取图片链接
def get_img_url(keywordurl):
    global cursor
    print('begin:',keywordurl)
    try:
        response = requests.get(keywordurl, proxies=proxies, headers=headers, cookies=cookies)
        requests.session().close()
    except :
        print("dont get search data  time.sleep(10)")
        time.sleep(10)
        response = requests.get(keywordurl, proxies=proxies, headers=headers, cookies=cookies)
        requests.session().close()
    # print(response.text)
    soups = BeautifulSoup(response.text, 'lxml')
    # print(soups)
    results = []
    try:
        datas = soups.find_all('div', class_='_2pZkk')
        print(len(datas))
        for data in datas:
            try:
                data2 = data.find_all('div', style=True, class_=False)
                for d in data2:
                    try:
                        href = d.find('a')['href']
                        imgurl = d.find('img')['src']
                        # print(href, imgurl)
                        results.append([href, imgurl])
                        global bianlilist
                        bianlilist.append(href)
                        continue
                        datass = parse_image(href)
                        #datass =[url2, img_src, info_title,info_tag,description]
                        if datass:
                            file = 'deviantart/vehicle/{}/'.format(str(href).split('/')[-1])
                            if os.path.exists(file):
                                print('文件夹已经存在,已经爬取过')
                                continue
                            else:
                                os.mkdir(file)
                            result =down_image(url=datass[1], PWD=file)
                            if result ==2:
                                print('图片下载失败，跳过其他内容,并删除相应文件夹')
                                if os.path.exists(file):
                                    shutil.rmtree(file)
                                continue
                            filename = file + datass[1].split('/')[-1].split('?')[0].split('.')[0] + '.txt'
                            txt_data = str(datass[2])+','+str(datass[4])
                            for tag  in datass[3]:
                                txt_data += ','+str(tag)
                            download_tag(filename=filename, data=txt_data)
                            json_dict = {"page_url": datass[0], "images": [
                                {
                                    "img_file": datass[1].split('/')[-1].split('?')[0],
                                    "label_file": datass[1].split('/')[-1].split('?')[0].split('.')[0] + '.txt',
                                    "tag": datass[3],
                                    "title": datass[2],
                                    "description":datass[4]
                                }
                            ]}
                            get_summary(save_path=file, json_dict=json_dict)
                    except Exception as e:
                        print('获取href失败，',e)
            except  Exception as e:
                print('data2',e)
    except Exception as e :
        print('解析关键词搜索返回页失败',e)

    cursor = None
    try:
        cursors = soups.find_all('a', class_='_1OGeq')
        for page in cursors:
            info = page.text
            print(info)
            if info == 'Next':
                cursor = page['href']
    except:
        print('not found cursor')
    return cursor

# 解析每张图片的信息
def parse_image(url2):
    # url2 = 'https://www.deviantart.com/nancorocks/art/Mclaren-720S-custom-widebody-839733985'
    print('begin:', url2)
    img_src =''
    info_title=''
    description=''
    info_tag = []
    try:
        response = requests.get(url2, proxies=proxies, headers=headers, cookies=cookies)
        requests.session().close()
        # print(response.text)
        soups = BeautifulSoup(response.text, 'lxml')
        # print(soups)
        try:
            img = soups.find('img', class_='TZM0T _2NIJr')
            if img == None:
                print('img=',img)
                img = soups.find_all('img', class_=True)[0]
            img_src = img['src']
            # print('img_src:',img_src)

        except Exception as e:
            print('img_src解析失败',e)
        try:
            info_title = soups.find('div',class_='_3UDQj').find('h1',class_=True).text
        except Exception as e:
            print('info_title解析失败', e)
        try:
            aa = soups.find('div',class_ = 'aodIv wT4l_').find_all('a')
            for a in aa:
                info_tag.append(a.text)
        except Exception as e:
            print('info_tag解析失败', e)
        try:
            description = soups.find('div', class_='_2xzib').find('div').text
            # print(description)
        except Exception as e:
            print('description解析失败', e)
    except Exception as e:
        print('parse_image 解析具体的图片网站失败',e)
    return [url2, img_src, info_title,info_tag,description]
# parse_image(url2)
# 保存图片信息
def save_data(datas):
    try:
        # print(data)
        datas['_id'] = datas['img_src'].split('/')[-1].split('?')[0]
        MongoDBUtil.insert_one('images_deviantart', datas)
    except Exception as e:
        print(e)


def save_data_sql(datas):
    try:
        datax = pd.DataFrame([datas],columns=['img_src','info_title','info_tag','iamge_url'])
        datax.to_sql('deviantart_image_infos', engine, if_exists='append', index=False)
    except Exception as e:
        print(e)

# 下载图片
def down_image(url,PWD):
    name = url.split('/')[-1].split('?')[0]
    # 将图片以最后的数字+后缀命名，方便检查文件是否存在
    filename = PWD + name
    if os.path.isfile(filename):  # 如果文件已爬取，则不再重复爬取
        print("文件存在：", filename)
        return 0
    try:
        response = requests.get(url, proxies=proxies, stream=True, timeout=30)
        with open(filename, 'wb') as fd:
            fd.write(response.content)
        requests.session().close()
        return 1
    except Exception as e:
        print('下载图片失败',e)
        return 2

#下载标签
def download_tag(filename,data):
    with open(filename,'w') as f:
        f.write(data)
    f.close()
def get_summary(save_path,json_dict):
    with open(save_path + 'summary.json', 'w') as f2:
        f2.write(json.dumps(json_dict))
    f2.close()
def send_email(userlist_add=[], mail_msg_add=' \nthat is all'):
    import smtplib
    from email.mime.text import MIMEText
    from email.utils import formataddr

    my_sender = '1037654919@qq.com'  # 发件人邮箱账号
    my_pass = 'iffxtippimrnbfdj'  # 发件人邮箱密码 （用授权码代替） ,每次使用需要打开qq邮箱网页端授权smtp
    my_user = '1037654919@qq.com'  # 收件人邮箱账号 #大鹏：dapeng.hu@geely.com
    my_user2 = 'chenglei3@geely.com'
    userlist = [my_user, my_user2]
    userlist += userlist_add
    mail_msg = """deviantart爬虫代码运行结束，请留意.

    """
    mail_msg += str(mail_msg_add)
    msg = MIMEText(mail_msg, 'plain', 'utf-8')  # 'html' 网页； 'plain'：文本

    msg['From'] = formataddr(["cl", my_sender])  # 括号里的对应发件人邮箱昵称、发件人邮箱账号
    msg['To'] = formataddr(["cl", my_user])  # 括号里的对应收件人邮箱昵称、收件人邮箱账号
    msg['To'] = formataddr(["cl", my_user2])  # 括号里的对应收件人邮箱昵称、收件人邮箱账号
    msg['Subject'] = "舆情监控代码运行报警"  # 邮件的主题，也可以说是标题
    try:
        server = smtplib.SMTP_SSL("smtp.qq.com", 465)  # 发件人邮箱中的SMTP服务器，端口是25
        server.login(my_sender, my_pass)  # 括号中对应的是发件人邮箱账号、邮箱密码
        server.sendmail(my_sender, userlist, msg.as_string())  # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
        server.quit()  # 关闭连接
    except:
        print('邮件发送失败')


if __name__ =="__main__":
    from pprint import  pprint
    kw=  'automobile'
    bianlilist=[]
    basefile = 'deviantart/{}/'.format(kw)
    if os.path.exists(basefile):
        print('文件夹已经存在')
    else:
        os.mkdir(basefile)
    #关键词
    url = 'https://www.deviantart.com/search?q={}'.format(kw)
    # 临时
    # url = 'https://www.deviantart.com/search?q=concept+car&cursor=MTQwYWI2MjA9MTQzJjU5MGFjYWQwPTM0MDgmNzgwMDk5ODIlNUIwJTVEPTEzOTkzMjY3OSY3ODAwOTk4MiU1QjElNUQ9ODg0ODg5MjYyJjc4MDA5OTgyJTVCMiU1RD01OTA5OTA0NzEmNzgwMDk5ODIlNUIzJTVEPTY1MTg1ODM5MyY3ODAwOTk4MiU1QjQlNUQ9NzU1MTY2NDQ1'

    cursor=get_img_url(url)
    print('print(cursor):',cursor)

    while cursor:
        time.sleep(5)
        url = 'https://www.deviantart.com{}'.format(cursor)
        cursor = get_img_url(url)
        print('print(cursor):', cursor)


    print('done')
    with open (basefile+'bianlilist.txt','a') as f:
        for line in bianlilist:
            f.write(line+'\n')
    f.close()


    mail_msg_add = 'deviantart运行结束‘’'
    send_email(userlist_add=['dapeng.hu@geely.com'], mail_msg_add=mail_msg_add)


