#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :behance_crawler.py
# @Time      :2023/7/6 
# @Author    :CL
# @email     :1037654919@qq.com

import time
import shutil
import imageio
import fast_http
import json
import behance_util
import requests
from utils import MongoDBUtil, get_html, mongo_manager
import os
from bs4 import BeautifulSoup
images_keywords = mongo_manager('images_keywords',db='car_images')
behance_url = mongo_manager('behance_url',db='car_images')
proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}


def save_data(insert_data):
    # 请求 获取单个页的数据
    url = insert_data['url']
    key_word = insert_data['key_word']
    behance_url = mongo_manager("behance_url", db="car_images")
    path = f'/media/chenglei3/77D014CED257D1411/images/behance/{key_word}/'
    os.makedirs(path, exist_ok=True)
    content = behance_util.get_content_detail(url)
    #     解析单个页的数据 变成结构化的数据并保存到本地
    if content is None:
        print('no get imageurl data')
        return
    desc = ''
    soups = BeautifulSoup(content, 'lxml')
    try:
        datas = soups.find('div', id='project-modules').find('div', class_='main-text')
        desc = datas.get_text()
    except:
        pass
    projecttag = []
    try:
        datas = soups.find('div', class_='ProjectInfo-sideBar-Nmz e2e-ProjectInfo-sidebar').find('ul',
                                                                                                 class_=True).find_all(
            'li')
        for li in datas:
            tag = li.get_text()
            if len(str(tag)):
                projecttag.append(tag)
    except:
        pass
    insert_data['desc'] = desc
    insert_data['projecttag'] = projecttag
    datas = behance_util.parse_content_detail(content)
    if datas:
        images = 0
        for data in datas:
            if data['src']:
                img_src = data['src']
                if 'alt' in data:
                    insert_data['alt'] = data['alt']
                filename = str(img_src).rsplit('/', 1)[-1]
                # 筛选
                if 'blank' in filename:
                    continue
                result = down_image(file=f'{path}{filename}', url=img_src)
                if result == 0:
                    continue

                image_path = f'{path}{filename}'
                img = imageio.v2.imread(image_path)
                # 获取图片尺寸
                height, width = img.shape[0], img.shape[1]
                label = str(insert_data['name']) + ',' + str(desc)
                if len(projecttag) > 0:
                    for tag in projecttag:
                        label += ',' + str(tag)

                json_filename = filename.rsplit('.', 1)[0]
                json_data = {"id": filename.split('.')[0], 'key_word': key_word, "image_url": img_src,
                             'size': f'{height},{width}',
                             "label": label, "desc": desc}
                with open(f'{path}{json_filename}.json', 'w') as f:
                    json.dump(json_data, f, ensure_ascii=False)
                images += 1
        if images > 0:
            # 保存爬取信息到达mongodb
            insert_data['status'] = 'success'
        insert_data['lens'] = images
    behance_url.updateOne({'_id': insert_data['_id']}, insert_data)
    behance_url.close()


class behance_crawler(fast_http.Base_Model):

    def crawler(self, seed):
        key_word = seed['keyword_en']
        print(f"开始采集关键字 {key_word}")
        path = f'/media/chenglei3/77D014CED257D1411/images/behance/{key_word}/'
        os.makedirs(path, exist_ok=True)
        after = ''
        page = 0
        while True:
            # 传入 json中返回的 after 去进行翻页 # json中返回的   返回  after 和   nodes
            response = behance_util.get_page(key_word, after)
            page += 1
            if page > 10:
                break
            after = response['data']['search']['pageInfo']['endCursor']
            print(after)
            nodes = response['data']['search']['nodes']
            # 判断是否有nodes
            if nodes:
                # 去拿单个页的数据
                for node in nodes:
                    print(node)
                    print(node['id'],node['name'], node['url'])
                    url =  node['url']
                    name = node['name']

                    #
                    insert_data = {'_id': str(url), 'url': url, 'key_word': key_word, 'page': page, 'after': after,
                                    'name': name
                                   }
                    print(insert_data)
                    try:
                        behance_url.insertOne(insert_data)
                    except Exception as e:
                        print(e,'已经有数据，跳过')
                        continue
                    # 解析 并保存单页数据
                    save_data(insert_data)
            else:
                break
            break
        seed['behance'] = 'done'
        images_keywords.updateOne({"_id":seed['_id']},seed)


def down_image(file, url):  # 下载图片方法
    print("开始下载：", url)
    for i in range(5):
        try:
            response = requests.get(url, proxies=proxies, timeout=20)
            with open(file, 'wb') as fd:
                fd.write(response.content)
            requests.session().close()
            return 1
        except Exception as e:
            print(e, f"下载失败，重新下载,第{i}次", url)
            time.sleep(5)
    print("下载失败了", url)
    if os.path.exists(file):
        os.remove(file)
    return 0

if __name__ == '__main__':
    keys = []
    seeds = images_keywords.findAll({'behance':None}).limit(4)
    lists=[]
    for seed in seeds:
        lists.append(seed)
    # event_size=10 进程数
    pc = behance_crawler(event_size=2)
    pc.init_seed(lists)
    pc.start()
