#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :down_url_images.py
# @Time      :2023/7/6 
# @Author    :CL
# @email     :1037654919@qq.com

import time
import shutil
from lxml import html, etree
from bs4 import BeautifulSoup
import requests
import os
import random
import json
from multiprocessing import Pool
from utils import MongoDBUtil, get_html, mongo_manager
MongoDBUtil = MongoDBUtil()
deviantart_images = mongo_manager("deviantart_images", db="car_images")
from get_proxy import get_proxy

# 代理 pigcha
proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}
headers = {
    "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/114.0",
    "Accept": "application/json, text/plain, */*",
    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
    "Accept-Encoding": "gzip, deflate, br",
    "Referer": "https://www.deviantart.com/search/deviations?q=car",
    "Connection": "keep-alive",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-origin",
    "TE": "trailers"
}

cookies = {
    "userinfo": "__c5feb6763250224f255c%3B%7B%22username%22%3A%22mashaoming%22%2C%22uniqueid%22%3A%224481e293f9d1d47e024ef899a3bc0f81%22%2C%22dvs9-1%22%3A1%2C%22ab%22%3A%22tao-fp4-1-b-7%7Ctao-pcp-1-b-6%7Ctao-aap-1-b-6%7Ctao-stt-1-b-3%7Ctao-ter-1-a-10%22%7D",
    "_pbjs_userid_consent_data": "3524755945110770",
    "_iiq_fdata": "%7B%22pcid%22%3A%2206778380-4e3d-4c06-a2d9-7bc193f3c431%22%7D",
    "cookie": "e2fcdb84-e53a-4383-82f6-860180ec1863",
    "cto_bundle": "cYQwrl9INUQ2TmRZOGRQTHhEdm1sN3FZaU5hOExOQVUlMkJlcTgyYmwyRDNWcHd2VlBrMzFRUDZNNFElMkJQJTJCMmozejF3bzN2WVYyMEdFQnFPSWRiJTJCVnlMeVdXcndVWU9WdUdNQkI1MU9WS3NUalBnZ2RGWCUyQmFXcjBRWkFtbDZyQ0VqWEdBJTJCc1BpclA0VWY5MnZXcVJWdGM0bm9mUkElM0QlM0Q",
    "cto_bidid": "-0Bfk19aRTF6dTZGWlpHZkoydk5zR0pYTDVtdGo3Q2JVJTJGeW9PbWRtMHJJeUJzZEQ5Vk5oUWolMkZMcWJzeHZ0Yno1Z3VpTlJER24yVnFtcXJGWTdMWXdDSTZ2THJPT2hXcXZqQm91Qlh3eTFtZmwxQ0klMkJFVEw2Zm1vRWx1cnM2NzNTTyUyRmxx",
    "__qca": "P0-483085797-1688019575436",
    "cto_dna_bundle": "YxilK19ObEJNZVNBaVB3Z2dvc2RXTDVjS2kwbmtGbktnRk5pJTJGRWRyVFdvMCUyRjVTZzBnMDN6M2xHakkzaCUyRlNyVmRhZlZ4VFNZZmphUjFvMGt2alRqakxTQmlmZyUzRCUzRA",
    "__gads": "ID=820756d59f47e933:T=1688019594:RT=1688347258:S=ALNI_MbPczHwE5QGYtEd85lIKADluQyWMQ",
    "__gpi": "UID=00000c1c75cdebba:T=1688019594:RT=1688347258:S=ALNI_Ma9DwCCijm2vWYjIGqrbShfHd5r_A",
    "vd": "__d4c8e45a5a309bf9976f%3B%22Bkn%2BVw%2CBkomOX%2CA%2CB%2CA%2C%2CB%2CA%2CB%2CBkomOX%2CBkomOX%2CA%2CA%2CA%2CA%2C13%2CA%2CB%2CA%2CA%2CA%2CA%2CB%2CA%2CA%2C%22",
    "FCNEC": "%5B%5B%22AKsRol_HDx9VBXFNg-7L1KFZrIwjNmKrcP608iMdctTh1nAZom1iVgC4opmjVkk30d1qORL8vccyNyQ-sdo7CZxbSw3A6fgZRa6VrD0d8wG9A_5cA3ir7DGtF1qVe9uQm3ViHFliG_8WCFokP_KnNw3QKr_KqzDHTA%3D%3D%22%5D%2Cnull%2C%5B%5D%5D",
    "_px": "Us/3EWv1lww4nlWsYb/3m7OaC+iUT19csoPTefXCY0qrT7YbpzduHHgy6IDytcU1XbJVGukpvHOCbSHMcJyM4w==:1000:hIGJ9D8MQA7MGVg+c2OW/q5xfIrE13AJqDFpNkQ0q0KFzJHrCFbkGae+wR+PAao/Qx+SIl7bzTx3nsSdnBwsMV39WQlrJUvw/ye51sD3xf24wtC+zOzv1+hiGKY+itmnOnxdGCm83Jc4xndIQOl1Gv5EzNx1QtC4i4qQfsb80w2Ap7jYzNqgYDL148ynmCU9uqpft25PGNVymgy/q0HuueeW8FhepmN1gMgVlzN3rXNz5CxVmXpSmPH66Lr9xcUFaIo2mIzXaUYxCgpQXu7qzw==",
    "_pxvid": "288c607f-193f-11ee-98bd-885e84320cf1",
    "g_state": "{\"i_l\":0}",
    "auth": "__41c9a9096263fe9980c0%3B%22f3b018a25f8c795ee4001f28668f0080%22",
    "auth_secure": "__590f0e80e174697a4589%3B%22cfb8452b13ba59edfad72104bd44dc0e%22",
    "td": "6:1044x532%3B7:960%3B12:1040x777.5%3B20:1919",
    "pxcts": "288c7175-193f-11ee-98bd-4b7a6d776254"
}
cookies[
    'vd'] = '__50bc57dcfe82839ed346%3B%22BkovIi%2CBkovIi%2CA%2CB%2CA%2C%2CB%2CA%2CB%2CBkovIi%2CBkovIi%2CA%2CA%2CA%2CA%2C13%2CA%2CB%2CA%2CA%2CA%2CA%2CB%2CA%2CA%2C%22'



def down_image(url,PWD):
    name = url.split('/')[-1].split('?')[0]
    # 将图片以最后的数字+后缀命名，方便检查文件是否存在
    filename = PWD + name
    if os.path.isfile(filename):  # 如果文件已爬取，则不再重复爬取
        print(url,"文件已经存在，更新seed",)
        return 0
    try:
        response = requests.get(url, proxies=proxies, stream=True, timeout=30)
        with open(filename, 'wb') as fd:
            fd.write(response.content)
        requests.session().close()
        return 1
    except Exception as e:
        print('下载图片失败',e)
        return 2

#下载标签
def download_tag(filename,data):
    with open(filename,'w') as f:
        f.write(data)
    f.close()
def get_summary(save_path,json_dict):
    with open(save_path + 'summary.json', 'w') as f2:
        f2.write(json.dumps(json_dict))
    f2.close()

def parse_image(url2):
    # time.sleep(2)
    # url2 = 'https://www.deviantart.com/nancorocks/art/Mclaren-720S-custom-widebody-839733985'
    print('begin:', url2)
    img_src =''
    info_title=''
    description=''
    info_tag = []
    for i in range(5):
        try:
            response = requests.get(url2, proxies=proxies, headers=headers, cookies=cookies,timeout=10)
            requests.session().close()
            print(url2,response.status_code)
            if response.status_code == 200:
                soups = BeautifulSoup(response.text, 'lxml')
                # print(soups)
                img = soups.find('img', class_='TZM0T _2NIJr')
                if img == None:
                    print('img=',img)
                    img = soups.find_all('img', class_=True)[0]
                img_src = img['src']
                try:
                    info_title = soups.find('div',class_='_3UDQj').find('h1',class_=True).text
                except Exception as e:
                    print('info_title解析失败', e)
                try:
                    aa = soups.find('div',class_ = 'aodIv wT4l_').find_all('a')
                    for a in aa:
                        info_tag.append(a.text)
                except Exception as e:
                    print('info_tag解析失败', e)
                try:
                    description = soups.find('div', class_='_2xzib').find('div').text
                    # print(description)
                except Exception as e:
                    print('description解析失败', e)
                return [url2, img_src, info_title,info_tag,description]
            else:
                print(response.status_code,'take a rest sleep 5s')
                time.sleep(random.randint(2, 10))
        except Exception as e:
            pass

    print('parse_image 解析具体的图片网站失败',url2)
    return [url2, img_src, info_title,info_tag,description]
def down_images(seed):
    deviantart_images = mongo_manager("deviantart_images", db="car_images")
#     seed  = {
#   "_id": "https://www.carbodydesign.com/2016/10/ied-and-pininfarina-announce-new-concept-car-for-geneva-2017/",
#   "kw": "concept car",
#   "page": 2,
#   "url2": "https://www.carbodydesign.com/2016/10/ied-and-pininfarina-announce-new-concept-car-for-geneva-2017/",
#   "image_url": "/media/2016/10/IED-Students-and-the-Pininfarina-Cambiano-Concept-01-440x330.jpg"
# }
    href = seed['url']
    basefile = f"deviantart/{seed['kw']}/"
    if os.path.exists(basefile):
        pass
    else:
        os.mkdir(basefile)
    datass = parse_image(href)
    # datass =[url2, img_src, info_title,info_tag,description]
    if datass[1]:
        file = basefile + '{}/'.format(str(href).split('/')[-1])
        if os.path.exists(file):
            pass
        else:
            os.mkdir(file)
        result = down_image(url=datass[1], PWD=file)
        if result == 2:
            print('图片下载失败，跳过其他内容,并删除相应文件夹')
            if os.path.exists(file):
                shutil.rmtree(file)
            return
        elif result == 1:
            filename = file + datass[1].split('/')[-1].split('?')[0].split('.')[0] + '.txt'
            txt_data = str(datass[2]) + ',' + str(datass[4])
            for tag in datass[3]:
                txt_data += ',' + str(tag)
            download_tag(filename=filename, data=txt_data)
            json_dict = {"page_url": datass[0], "images": [
                {
                    "img_file": datass[1].split('/')[-1].split('?')[0],
                    "label_file": datass[1].split('/')[-1].split('?')[0].split('.')[0] + '.txt',
                    "tag": datass[3],
                    "title": datass[2],
                    "description": datass[4]
                }
            ]}
            get_summary(save_path=file, json_dict=json_dict)
        seed['status'] = 'success'
        deviantart_images.updateOne({'_id': seed['_id']}, seed)
    deviantart_images.close()




if __name__ == '__main__':

    pool = Pool(processes=1)
    seeds = deviantart_images.findAll({"status": None})
    lists =[]
    for seed in seeds:
        lists.append(seed)
    print(len(lists))
    deviantart_images.close()
    # run
    pool.map(down_images, lists)
    pool.close()  # 关闭进程池，不再接受新的进程
    pool.join()
