#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :adobe_util.py
# @Time      :2023/6/29
# @Author    :CL
# @email     :1037654919@qq.com

import time
import os
import requests
import time
import shutil
import json
from utils import MongoDBUtil, get_html, mongo_manager
from bs4 import BeautifulSoup
from pprint import pprint
from multiprocessing import Pool

MongoDBUtil = MongoDBUtil()
adobe_url = mongo_manager("adobe_url", db="car_images")
# 代理 pigcha
proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}

headers = {
    "Referer": "https://www.formtrends.com/rolls-royce-returns-to-coachbuilding-roots-with-new-boat-tail/",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\""
}


def get_page(url):
    print('begin get：', url)
    for __ in range(5):
        try:
            response = requests.get(url, headers=headers, proxies=proxies, timeout=5)
            requests.session().close()
            if response.status_code == 200:
                return response
            time.sleep(3)
        except Exception as e:
            print(e)

    return None

def parse_main_page(response):
    soups = BeautifulSoup(response.text, 'lxml')
    datas = soups.find('div', class_='block block-61 tipi-flex preview-review-bot').find_all('article')
    urls = []
    for data in datas:
        try:
            urls.append(data.find('a', class_='mask-img')['href'])
        except Exception as e:
            print(e)
    return urls


# print(response.text)
def down_image(file, url):  # 下载图片方法
    print("开始下载：", url)
    for i in range(3):
        try:
            response = requests.get(url, proxies=proxies, timeout=10)
            with open(file, 'wb') as fd:
                fd.write(response.content)
            requests.session().close()
            return 1
        except Exception as e:
            time.sleep(5)
    print("下载失败了", url)
    return 0


def save_txt(PWD, txt_filename, data):
    with open(PWD + txt_filename, 'w') as f:
        f.write(data)
    f.close()

def save_summary(path, file, jsondata):
    with open(path + file, 'w') as f:
        f.write(json.dumps(jsondata))
    f.close()

def save_data(insert_data):
    # 请求 获取单个页的数据
    adobe_url = mongo_manager("adobe_url", db="car_images")
    content_id = insert_data['content_id']
    key_word = insert_data['key_word']
    path = f'adobe/{key_word}/{content_id}/'
    images = []
    if os.path.exists(path):
        name = os.listdir(path)
        if len(name) > 2 and 'summary.json' in name:
            print('文件夹已经存在,文件已经爬取，更新数据库')
            insert_data['status'] = 'success'
            adobe_url.updateOne({'_id': insert_data['_id']}, insert_data)
            adobe_url.close()
            return
        else:
            print('删除文件夹，重跑数据', path)
            shutil.rmtree(path)
            os.mkdir(path)
    else:
        os.mkdir(path)
    img_src = insert_data['content_thumb_extra_large_url']
    filename = str(img_src).rsplit('/', 1)[-1]
    result = down_image(file=str(path) + str(filename), url=img_src)
    if result == 0:
        return
    txt_filename = filename.rsplit('.', 1)[0] + '.txt'
    txt_data = str(insert_data['title']) + ',' + str(insert_data['desc'])

    for tag in insert_data['tags']:
        txt_data += ',' + str(tag)
    save_txt(PWD=path, txt_filename=txt_filename, data=txt_data)
    images.append({
        "img_file": filename,
        "label_file": txt_filename,
        "tag": insert_data['tags'],
        "title": insert_data['title'],
        "description": insert_data['desc']
    })

    if len(images) > 0:
        jsondata = {"page_url": insert_data['content_url'], "images": images}
        save_summary(path=path, file='summary.json', jsondata=jsondata)
        # 保存爬取信息到达mongodb
        insert_data['status'] = 'success'
    else:
        insert_data['status'] = 'unsuccess'
        shutil.rmtree(path)
    adobe_url.updateOne({'_id': insert_data['_id']}, insert_data)
    adobe_url.close()


def parse_json_page(response):
    results=[]
    json_data =response.json()
    items =json_data['items']
    for key,value in items.items() :
        result ={}
        result['content_id'] = value['content_id']
        result['title'] = value['title']
        result['content_thumb_extra_large_url'] = value['content_thumb_extra_large_url']
        result['comp_file_path'] = value['comp_file_path']
        result['media_type_label'] = value['media_type_label']
        result['content_url'] = value['content_url']
        results.append(result)
    return results

def main(key_word):
    path = f'adobe/{key_word}/'
    if os.path.exists(path):
        pass
    else:
        os.mkdir(path)
    maxpage = 200
    for page in range(1, maxpage + 1):
        response = get_json_page(kw=key_word,pageNo=page)
        if response:
            results = parse_json_page(response)
            print(key_word,page, 'len(results)',len(results))
            for result in results:
                result['_id'] =  result['content_id']
                result['key_word'] = key_word
                result['tags']=[]
                result['desc'] =''
                try:
                    MongoDBUtil.insert_one('adobe_url', result)
                except Exception as e:
                    print(e)
                    continue
                #数据保存函数
                save_data(result)
            if len(results)<100:
                break

def get_json_page(kw,pageNo):
    headers = {
        "authority": "stock.adobe.com",
        "accept": "*/*",
        "x-requested-with": "XMLHttpRequest"
    }
    url = "https://stock.adobe.com/hk/Ajax/Search"
    params = {
        "filters%5Bcontent_type%3Aphoto%5D": "1",
        "filters%5Bcontent_type%3Aillustration%5D": "1",
        "filters%5Bcontent_type%3Azip_vector%5D": "1",
        "filters%5Bcontent_type%3Avideo%5D": "1",
        "filters%5Bcontent_type%3Atemplate%5D": "1",
        "filters%5Bcontent_type%3A3d%5D": "1",
        "filters%5Bcontent_type%3Aaudio%5D": "0",
        "filters%5Binclude_stock_enterprise%5D": "0",
        "filters%5Bis_editorial%5D": "0",
        "filters%5Bfree_collection%5D": "0",
        "filters%5Bcontent_type%3Aimage%5D": "1",
        "k": kw,
        "order": "relevance",
        "safe_search": "1",
        "limit": "100",
        "search_page": pageNo,
        "get_facets": "0",
        "search_type": "pagination"
    }
    for __ in range(5):
        try:
            response = requests.get(url, proxies= proxies,headers=headers, params=params,timeout=10)
            if response.status_code==200:
                return response
            time.sleep(5)
        except:
            pass
    return  None


def mongodb_seed():
    # 从数据库获取未成功的数据作为种子
    adobe_url = mongo_manager("adobe_url", db="car_images")
    seeds = adobe_url.findAll({'status': 'unsuccess'})

    lists = []
    for seed in seeds:
        lists.append(seed)
    print('len(seeds):', len(lists))
    adobe_url.close()
    # run
    pool = Pool(processes=10)
    pool.map(get_data, lists)
    pool.close()  # 关闭进程池，不再接受新的进程
    pool.join()

    # # test
    # for ll in lists[:100]:
    #     get_data(ll)

def get_data(seed: dict):
    # 根据数据库种子尝试获取数据
    datas = seed
    url2 = datas['url']
    response = get_page(url2)
    # print(response.text)
    if response:
        # print(response.text)
        datas.update(parse_json_page(response))
        datas['url'] = url2
        datas['_id'] = url2
        print("datas['imgs']:", datas['imgs'])
        # pprint(datas)
        save_data(datas)


if __name__ == "__main__":

    path = f'formtrends/'
    if os.path.exists(path):
        pass
    else:
        os.mkdir(path)

    page_info = {'concept-cars': 42}
    # 关键词  formtrends更喜欢词语标准写法，不要连写，如conceptcar，其搜索结果为0
    kws = [ 'concept car', 'futuristic car', 'car',
             'sports car','super car',
            'Automotive',
            'fantastic car', 'amazing car',
            'vehicle car', 'vehicle',
            'wonderful car', 'beautiful car',
            'car design'
            ]


    pool = Pool(processes=len(kws))
    pool.map(main, kws)
    pool.close()  # 关闭进程池，不再接受新的进程
    pool.join()
    # # 从数据库获取未成功的数据作为种子
    # mongodb_seed()

    # test()
