#!/usr/bin/python3
# author:szw
# cl
# 2023年06月29日
# Email:1259577135@qq.com
# desc: pinterest 网站采集
import time
import imageio
import fast_http
import json
import pinterest_util
import requests
from utils import mongo_manager
import os

pinterest_images = mongo_manager("pinterest_images", db="car_images")
pinterest_crawler_info = mongo_manager("pinterest_crawler_info", db="car_images")
images_keywords = mongo_manager("images_keywords", db="car_images")
proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}


class pinterest_crawler(fast_http.Base_Model):

    def crawler(self, seed):
        """
        采集一个关键字
        :param key_word:
        :return:
        """
        key_word = seed['keyword']
        print(f"开始采集关键字 {key_word}")
        path = f'/media/chenglei3/77D014CED257D1411/images/pinterest/{key_word}/'
        os.makedirs(path, exist_ok=True)
        bookmarks = ""  # 首页
        # bookmarks = "Y2JVSG81V2sxcmNHRlpWM1J5VFVad1dHTjZSazlpUlZwNFdWVmFRMVl3TVZkV2FsSlhUVlp3VkZaWGN6RlNhelZaVW14T1YxSnNjR2hYVjNoV1RWZE9WMVZZYUZaaVJUVnZWRlZTYzFKc1ZsZFZiR1JWWWtWc00xWXlOVWRXVjBwWlVXNUdWVlpzVmpSVmJGcExaRWRLUjFGc1pGTmlSbFkwVm10a01HRXhXWGxTYTFwUFYwVmFWbFpyV2t0VlJteHpWbTVrYWxac1NscFpNR1F3WVVaYVZWSnJiRmRTZWtJMFZrZDRZVkp0VGtoUFZsWlhWbXR3ZUZkc1ZtRmpNazVYVW14V1ZHRjZWbkJXYkZwWFRsWmFSMkZJWkdwTmEzQlhWR3hhVjFWc1drZFRiRVpYVFVkb2RsWlZXbGRqTVdSMFpFWkNVbFpFUVRWYWExcFhVMWRLTmxWdGVGTldSbHBoVjJ0b2QxWXhVWGxXYms1VFYwaENWbFpzV2t0Uk1YQkZVbTVPV0ZKc2NGcFhhMXBEVmpGS2RHUjZSbGROYm1oVVZqSnplRkl5U2tkaFJsWnBWa1ZhVVZkc1pEQldhekZIVlc1U1RsWXdXbTlVVlZKVFRsWmFSMkZIT1ZwV2Eyd3pWbXhTUjFaWFJuSmpSVGxoVmxad00xa3hXa3RYVjBwR1QxZDRhVll5YURWV2ExcFhZVEZrZEZac1drNVdSbkJXVmpCa05GVkdVbGhsUldSUFZteEdORlpYZEhkVWJFcFZWbXRhVjJKSGFIcFdNbk40WTJ4a2RWVnNWbGRXYTNCVVZrZDRZV1F4VGtkWGJHeHBVbnBHVkZacldtRlRSbHAwWTBVNVRrMVZTbE5WUmxGNFQwVXhSVlJVUmxCU01XdDNWR3BLV21Rd05WVlRWRUpQVWpBd2VWZHJaRk5pVlRWSVZHMXNVRlpIZERaVWJGSlNaREF4Y1ZGVVRsQldSMUp3VkdwS1lXRkZNVVZSVkZaaFVrZDRjVmRYY0hOaFZUbFZWVzB4WVZJd05YUlVXSEJXWlZVeFJWWlVTbHBXUjAwd1ZERmtUazB4Y0hWbFJUbFRWbTFSTkdaRVFtbFBSMDVzVFZkS2JVNTZVbTFQVjBwcFRYcHJkMWxxWXpST1JFRXhXbFJOZWs1cVJteE5WMHByVFdwTk1rNUVaM3BPTWxac1dtcGpNRTFFVFhwTmFrVjZXWHBWTWs1cWJHdGFhbFY1V2tScmQxbHFiRGhVYTFaWVprRTlQUT09fFVIbzVOVTVIYUZGVFYzUnFVbFJDYmxGVU1XWk5lbU14V0hrd2VHWkhWWGhhUkVVMVdsUlJNVmxVUVROYVJFVXlUakpSTUU5VVNYbGFhbVJzVGtSRk5FNUVaM2RPVkdSb1RXcENiVnBFU1RSUFYxWnRXVEpHYVZsNlFYcE5hbEY1VFVSQmVWbDZWVFZOUjFKcVQxUm9hMDFVYkRoVWExWllaa0U5UFE9PXxOb25lfDZiYzE1NGEwN2UwOTc0ZDgyYjhiZDYxNDBjM2VkNjYxNzA3NzJjMjZiMzY0MjQ3YThhZjBkNGY2Zjk0NzQ0N2V8TkVXfA"

        page = 1
        while True:
            # 传入 json中返回的 bookmarks 去进行翻页
            list_content = pinterest_util.get_list(key_word, bookmarks)
            # 解析list   返回  data 和   bookmarks
            datas, bookmarks = pinterest_util.parse_list(list_content)
            page += 1
            if page > 50:
                break
            data_mon = {'_id': str(key_word) + '_' + str(page),
                        'page': page,
                        'key_word': key_word,
                        'bookmarks': bookmarks,
                        }
            try:
                pinterest_crawler_info.insertOne(data_mon)
            except Exception as e:
                print(e, key_word, page, '已经存在，跳过')
                continue
            # 判断是否有data
            if datas:
                # 去拿单个页的数据
                pinterest_images = mongo_manager("pinterest_images", db="car_images")
                for result in datas:
                    # 请求 获取单个页的数据
                    id = result['id']
                    if pinterest_images.exist({"id": result['id']}):
                        continue
                    else:
                        try:
                            insert_data = {'_id': id, 'id': id, 'key_word': key_word}
                            pinterest_images.insertOne(insert_data)
                        except Exception as e:
                            print(e)

                    content = pinterest_util.get_content_detail(id)
                    #     解析单个页的数据 变成结构化的数据并保存到本地
                    if content is None:
                        print('no get imageurl data')
                        continue
                    data = pinterest_util.parse_content_detail(content)
                    if data:
                        filename = str(data[0]).split('/')[-1]
                        suffix = filename.split('.')[-1]
                        result = down_image(file=f'{path}{id}.{suffix}', url=data[0])
                        if result == 0:  # 下载失败，跳出
                            continue

                        label = ','.join(data[3])
                        # 图片文件路径
                        image_path = f'{path}{id}.{suffix}'
                        img = imageio.v2.imread(image_path)
                        # 获取图片尺寸
                        height, width = img.shape[0], img.shape[1]
                        json_data = {'_id': id, 'image_url': data[0], 'size': f'{height},{width}',
                                     'label': label, 'desc': data[2]}
                        # json_name = filename.split('.')[0]
                        with open(f'{path}{id}.json', 'w') as f:
                            json.dump(json_data, f, ensure_ascii=False)

                        # 保存爬取信息到达mongodb
                        insert_data = {'_id': id, 'id': id, 'key_word': key_word, 'src': data[0], 'title': data[1],
                                       'description': data[2],
                                       'tag_list': data[3], 'status': 'success'}
                        pinterest_images.updateOne({'_id': insert_data['_id']}, insert_data)
                pinterest_images.close()
            else:
                break


def down_image(file, url):  # 下载图片方法
    print("开始下载：", url)
    for i in range(5):
        try:
            response = requests.get(url, proxies=proxies, timeout=10)
            with open(file, 'wb') as fd:
                fd.write(response.content)
            requests.session().close()
            return 1
        except Exception as e:
            time.sleep(5)
    print("下载失败了", url)
    if os.path.exists(file):
        os.remove(file)
    return 0

def main():
    seeds = images_keywords.findAll({'pinterest': None,'freejpg':None})
    lists = []
    for seed in seeds:
        lists.append(seed)
        print(seed)
    pc = pinterest_crawler(event_size=5)
    pc.init_seed(lists)
    pc.start()
if __name__ == '__main__':
    print()
    main()



