#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :hippopx.py
# @Time      :2024/2/29 
# @Author    :CL
# @email     :1037654919@qq.com
import os
import time

import imageio
from multiprocessing import Pool
from fanyi import fanyi
import requests
from bs4 import BeautifulSoup
import json
from utils import proxies, mongo_manager

hippopx_images = mongo_manager("hippopx_images", db="car_images")
images_keywords  =  mongo_manager("images_keywords", db='car_images')

def get_kw_image(kw='cat', pageNo=1):
    headers = {
        "authority": "www.hippopx.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9",
        "purpose": "prefetch",
        "referer": "https://www.hippopx.com/zh",
        "sec-ch-ua": "\"Not_A Brand\";v=\"8\", \"Chromium\";v=\"120\", \"Google Chrome\";v=\"120\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "no-cors",
        "sec-fetch-site": "same-origin",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    }
    cookies = {
        "_ga": "GA1.2.199250898.1709189945",
        "_gid": "GA1.2.1961579321.1709189945",
        "_gat": "1",
        "_ga_DXD0KCJXGN": "GS1.2.1709189946.1.0.1709189946.0.0.0",
        "__gads": "ID=2212df6124fd1027:T=1709189947:RT=1709189947:S=ALNI_MYZlWbgDuwJhPap7JFLVvAl_nAVKQ",
        "__gpi": "UID=00000dc4e3f7df47:T=1709189947:RT=1709189947:S=ALNI_Mb1evSZwCU8sO2DGcLMTWq_gLI7sA",
        "__eoi": "ID=8496e1cf3c136ff1:T=1709189947:RT=1709189947:S=AA-Afjasc5apup3uysis9GE0wret",
        "FCNEC": "%5B%5B%22AKsRol95aLt9WTBAOqwDVguuLjfsnYAig_dD97QP8AjDFFhWtxNiE3qgl3jF3E9hanOSUUOSaVmEBp-qjABRmvB5oYRfCA6Put6ZKM7BtXoz92qbKYzpPAUp6UcFH_gYPyQqJqsBRsYsVZGx6D5he0HwIXQlMIddRQ%3D%3D%22%5D%5D"
    }
    url = "https://www.hippopx.com/zh/query"
    params = {
        "q": kw,
        "page": pageNo
    }
    response = requests.get(url, headers=headers, cookies=cookies, params=params)
    print('response.url', response.url)
    requests.session().close()
    if response.text:
        return response.text


def down_image(file, url):  # 下载图片方法
    print("开始下载：", url)
    for i in range(5):
        try:
            response = requests.get(url, proxies=proxies, timeout=10)
            with open(file, 'wb') as fd:
                fd.write(response.content)
            requests.session().close()
            return 1
        except Exception as e:
            time.sleep(5)
    print("下载失败了", url)
    if os.path.exists(file):
        os.remove(file)
    return 0


def get_pictrue(seed):
    keyword = str(seed['keyword']).replace('图片', '').replace('其他', '')

    trans = seed['keyword_en']
    print(f'kw:{keyword},trans:{trans}')
    # if len(trans.split()) > 1:
    #     seed['hippopx'] = 'invalid'
    #     images_keywords.updateOne({'_id': seed['_id']}, seed)
    #     return seed
    path = f'/media/chenglei3/77D014CED257D1411/images/hippopx/{trans}/'
    os.makedirs(path, exist_ok=True)
    page = 1
    while page <= 50:
        page += 1
        response = get_kw_image(trans, page)
        soups = BeautifulSoup(response, 'lxml')
        datas = soups.find_all('figure')
        print('len(datas):',trans, page, len(datas))
        for data in datas:
            id = data.find('a')['href'].split('-')[-1]  # 获取id 也就是名字
            if hippopx_images.exist({"id": id}):  # 判断是否已经下载 #'stutus': {"$exists": True}
                continue
            else:
                try:
                    insert_data = {'_id': id, 'id': id, 'key_word': trans}
                    hippopx_images.insertOne(insert_data)
                except Exception as e:
                    print(e)

            title = data.find('img')['title']
            src = data.find('img')['src']
            img_houzhui = src.split('.')[-1]
            result = down_image(file=f'{path}{id}.{img_houzhui}', url=src)
            if result == 0:  # 下载失败，跳出
                continue
            image_path = f'{path}{id}.{img_houzhui}'
            img = imageio.v2.imread(image_path)
            # 获取图片尺寸
            height, width = img.shape[0], img.shape[1]
            json_data = {"_id": id, 'key_word': trans, "image_url": src, 'size': f'{height},{width}',
                         "label": title, "desc": ''}
            with open(f'{path}{id}.json', 'w') as f:
                json.dump(json_data, f, ensure_ascii=False)
            json_data['stutus'] = 'success'
            hippopx_images.updateOne({'_id': json_data['_id']}, json_data)
        if len(datas) < 30:
            break
    seed['hippopx'] = 'done'
    images_keywords.updateOne({'_id': seed['_id']}, seed)
    return print(seed)


def main():
    seeds = images_keywords.findAll({'hippopx': None})
    lists = []
    for seed in seeds:
        lists.append(seed)
        print(seed)
    # 创建进程池，设置最大进程数为10
    pool = Pool(processes=10)
    pool.map(get_pictrue, lists)  #不能定时控制，修改
    # # 使用定时器控制 func 的调用频率
    # for seed in lists:
    #     pool.apply_async(get_pictrue, args=(seed,))
    #     # time.sleep(3)
    pool.close()  # 关闭进程池，不再接受新的进程
    pool.join()


if __name__ == '__main__':
    print()
    main()
    # print(get_kw_image())