#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :behance_crawler.py
# @Time      :2023/7/6 
# @Author    :CL
# @email     :1037654919@qq.com

import time
import shutil
import fast_http
from gloab_key_words import key_words
import json
import behance_util
import requests
from utils import MongoDBUtil, get_html, mongo_manager
import os
from bs4 import BeautifulSoup

MongoDBUtil = MongoDBUtil()

behance_url = mongo_manager("behance_url", db="car_images")
proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}


def save_data(insert_data):
    # 请求 获取单个页的数据
    url = insert_data['url']
    key_word = insert_data['key_word']
    behance_url = mongo_manager("behance_url", db="car_images")
    PWD = f'behance/{key_word}/'
    if os.path.exists(PWD):
        pass
    else:
        os.mkdir(PWD)
    pathname = str(url).rsplit('/', 1)[-1]
    path = f'behance/{key_word}/{pathname}/'
    if os.path.exists(path):
        name = os.listdir(path)
        if len(name) > 2 and 'summary.json' in name:
            print('文件夹已经存在,文件已经爬取，更新数据库')
            insert_data['status'] = 'success'
            behance_url.updateOne({'_id': insert_data['_id']}, insert_data)
            behance_url.close()
            return
        else:
            print('删除文件夹，重跑数据', path)
            shutil.rmtree(path)
            os.mkdir(path)
    else:
        os.mkdir(path)
    content = behance_util.get_content_detail(url)
    #     解析单个页的数据 变成结构化的数据并保存到本地
    if content is None:
        print('no get imageurl data')
        return
    desc = ''
    soups = BeautifulSoup(content, 'lxml')
    try:
        datas = soups.find('div', id='project-modules').find('div', class_='main-text')
        desc = datas.get_text()
    except:
        pass
    projecttag = []
    try:
        datas = soups.find('div', class_='ProjectInfo-sideBar-Nmz e2e-ProjectInfo-sidebar').find('ul',
                                                                                                 class_=True).find_all(
            'li')
        for li in datas:
            tag = li.get_text()
            if len(str(tag)):
                projecttag.append(tag)
    except:
        pass
    insert_data['desc'] = desc
    insert_data['projecttag'] = projecttag
    datas = behance_util.parse_content_detail(content)
    if datas:
        images = []
        for data in datas:
            # data  =[src,title,description,tag_list]
            if data['src']:
                img_src = data['src']
                try:
                    insert_data['alt'] = data['alt']
                except:
                    pass

                filename = str(img_src).rsplit('/', 1)[-1]
                # 筛选
                if 'blank' in filename:
                    continue
                result = down_image(file=str(path) + str(filename), url=img_src)
                if result == 0:
                    continue
                txt_filename = filename.rsplit('.', 1)[0] + '.txt'
                txt_data = str(insert_data['name']) + ',' + str(desc)
                if len(projecttag) > 0:
                    for tag in projecttag:
                        txt_data += ',' + str(tag)
                else:
                    for tag in insert_data['tags']:
                        txt_data += ',' + str(tag)
                save_txt(PWD=path, txt_filename=txt_filename, data=txt_data)
                images.append({
                    "img_file": filename,
                    "label_file": txt_filename,
                    "tag": insert_data['tags'],
                    "title": insert_data['name'],
                    "description": desc
                })

        if len(images) > 0:
            jsondata = {"page_url": url, "images": images}
            save_summary(path=path, file='summary.json', jsondata=jsondata)
            # 保存爬取信息到达mongodb
            insert_data['status'] = 'success'
        insert_data['lens'] = len(images)
    behance_url.updateOne({'_id': insert_data['_id']}, insert_data)
    behance_url.close()


class behance_crawler(fast_http.Base_Model):

    def crawler(self, key_word):
        """
        采集一个关键字
        :param key_word:
        :return:
        """
        print(f"开始采集关键字 {key_word}")
        path = f'behance/{key_word}/'
        if os.path.exists(path):
            pass
        else:
            os.mkdir(path)
        after = ''
        page = 0
        # 临时 继续
        after = 'OTgw'  #
        page = 20
        while True:
            # 传入 json中返回的 after 去进行翻页 # json中返回的   返回  after 和   nodes
            response = behance_util.get_page(key_word, after)
            page += 1
            after = response['data']['search']['pageInfo']['endCursor']
            nodes = response['data']['search']['nodes']
            # 判断是否有nodes
            if nodes:
                # 去拿单个页的数据
                for node in nodes:
                    print(node['id'], str(node['src']))
                    src = str(node['src']).replace('disp', 'max_632')
                    url = str(node['project']['url'])
                    name = str(node['project']['name'])
                    tags = (node['tags'])
                    insert_data = {'_id': str(url), 'url': url, 'key_word': key_word, 'page': page, 'after': after,
                                   'first_src': src, 'name': name, 'tags': tags
                                   }
                    try:
                        MongoDBUtil.insert_one('behance_url', insert_data)
                    except Exception as e:
                        print(e)
                        print('已经有数据，跳过')
                        continue
                    # 解析 并保存单页数据
                    save_data(insert_data)

            else:
                break


def down_image(file, url):  # 下载图片方法
    print("开始下载：", url)
    for i in range(5):
        try:
            response = requests.get(url, proxies=proxies, timeout=10)
            with open(file, 'wb') as fd:
                fd.write(response.content)
            requests.session().close()
            return 1
        except Exception as e:
            time.sleep(5)
    print("下载失败了", url)
    return 0


def save_summary(path, file, jsondata):
    with open(path + file, 'w') as f:
        f.write(json.dumps(jsondata))
    f.close()


def save_txt(PWD, txt_filename, data):
    with open(PWD + txt_filename, 'w') as f:
        f.write(data)
    f.close()


if __name__ == '__main__':
    keys = []
    donlist = []
    for ss in key_words:
        for k, v in ss.items():
            keys.append(v.strip())
    print(keys)
    keys = ['cardesign', 'concept car', 'futuristic car',   'conceptcar', 'car',
             'sportscar','supercar',
            'Automotive', 'super car',
            'fantastic car', 'amazing car',
            'vehicle car', 'vehicle',
            'wonderful car', 'beautiful car',

            # 'Automotive', 'conceptcar car',
            #  'supercar  vehicle',
            #  'vehicle car',
            # 'automotive car',
            # 'wonderful car', 'fascinating car',
            # 'colorful car', 'beautiful car',
            ]
    print(keys)
    pc = behance_crawler(event_size=len(keys))
    pc.init_seed(keys)
    pc.start()
