#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :freejpg.py
# @Time      :2024/3/12 
# @Author    :CL
# @email     :1037654919@qq.com
import json
import os
import re
import time

import imageio
import requests
from bs4 import BeautifulSoup
from utils import proxies, mongo_manager

freejpg_images = mongo_manager("freejpg_images", db="car_images")
freejpg_categories = mongo_manager("freejpg_categories", db='car_images')
images_keywords =  mongo_manager("images_keywords", db='car_images')
headers = {
    "authority": "en.freejpg.com.ar",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "referer": "https://en.freejpg.com.ar/free/images/?criterio=beauty",
    "sec-ch-ua": "\"Not_A Brand\";v=\"8\", \"Chromium\";v=\"120\", \"Google Chrome\";v=\"120\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\"",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "same-origin",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
cookies = {
    "_ga": "GA1.3.1559610457.1710234399",
    "_gid": "GA1.3.163045312.1710234399",
    "_hjSessionUser_2255515": "eyJpZCI6IjgyODEzYWM5LTRjYTctNWEyZC05YzZmLWI2ZDI4YzFjOTBjNSIsImNyZWF0ZWQiOjE3MTAyMzQ0MTgzNDcsImV4aXN0aW5nIjp0cnVlfQ==",
    "_cc_id": "cbcb686e7404620566aaab7522a34b2d",
    "panoramaId_expiry": "1710320957557",
    "panoramaId": "4ccf35a3485262c1b137dee537e5a9fb927aba4a9c9e615fa38767dfa6d311a7",
    "panoramaIdType": "panoDevice",
    "_hjSession_2255515": "eyJpZCI6ImIyMzEwZGY0LWFhMWItNGNkOS04MzA2LWIyODViMjAyODMwMiIsImMiOjE3MTAyOTM5MTYyNDEsInMiOjAsInIiOjAsInNiIjowLCJzciI6MCwic2UiOjAsImZzIjowLCJzcCI6MX0=",
    "_gat": "1",
    "_ga_1EQTQ7WH8Z": "GS1.3.1710293917.2.1.1710294160.60.0.0",
    "free": "1",
    "__gads": "ID=511e09ec3278d353:T=1710234554:RT=1710294161:S=ALNI_MaIbu9Nsz0IOBubn5ZRwapt6k0WOg",
    "__gpi": "UID=00000dc742727ac2:T=1710234554:RT=1710294161:S=ALNI_MYPYYYtgGD2cphWGFVDX0RO4m6CeA",
    "__eoi": "ID=42ff4812b1e5692a:T=1710234554:RT=1710294161:S=AA-AfjbO5VlevkEn727e8-3LuNXT"
}


def freejpg_Categories(url="https://en.freejpg.com.ar/"):
    response = requests.get(url, headers=headers, cookies=cookies, proxies=proxies)
    print(response.url, response)
    soups = BeautifulSoup(response.text, "lxml")

    datas = soups.find("li", class_="dropdown").find_all('li')
    # print(datas)
    lists = []
    for data in datas:
        name = data.text
        href = data.find('a')['href']
        print(name, href)
        lists.append(re.split(r'\s|/', name.strip())[0])
    return lists

# 根据关键词获取图片链接 问题：访问限制很严格 暂时没有解决
def get_images_url(url="https://en.freejpg.com.ar/free/images/animals", params=None):
    if params is None:
        params = {
            "pag": "1"
        }
    page = int(params['pag'])
    if page == 1:
        referer = "https://en.freejpg.com.ar/free/images/"
    else:
        referer = f"{url}?p={page-1}"
    headers = {
        "authority": "en.freejpg.com.ar",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9",
        "cache-control": "no-cache",
        "pragma": "no-cache",
        "referer": referer,
        "sec-ch-ua": "\"Not_A Brand\";v=\"8\", \"Chromium\";v=\"120\", \"Google Chrome\";v=\"120\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "same-origin",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    }
    cookies = {
        "_ga": "GA1.3.1559610457.1710234399",
        "_gid": "GA1.3.163045312.1710234399",
        "_hjSessionUser_2255515": "eyJpZCI6IjgyODEzYWM5LTRjYTctNWEyZC05YzZmLWI2ZDI4YzFjOTBjNSIsImNyZWF0ZWQiOjE3MTAyMzQ0MTgzNDcsImV4aXN0aW5nIjp0cnVlfQ==",
        "_cc_id": "cbcb686e7404620566aaab7522a34b2d",
        "panoramaId_expiry": "1710320957557",
        "panoramaId": "4ccf35a3485262c1b137dee537e5a9fb927aba4a9c9e615fa38767dfa6d311a7",
        "panoramaIdType": "panoDevice",
        "_hjSession_2255515": "eyJpZCI6ImIyMzEwZGY0LWFhMWItNGNkOS04MzA2LWIyODViMjAyODMwMiIsImMiOjE3MTAyOTM5MTYyNDEsInMiOjAsInIiOjAsInNiIjowLCJzciI6MCwic2UiOjAsImZzIjowLCJzcCI6MX0=",
        "free": "1",
        "MCPopupClosed": "yes",
        "__gads": "ID=511e09ec3278d353:T=1710234554:RT=1710298904:S=ALNI_MaIbu9Nsz0IOBubn5ZRwapt6k0WOg",
        "__gpi": "UID=00000dc742727ac2:T=1710234554:RT=1710298904:S=ALNI_MYPYYYtgGD2cphWGFVDX0RO4m6CeA",
        "__eoi": "ID=42ff4812b1e5692a:T=1710234554:RT=1710298904:S=AA-AfjbO5VlevkEn727e8-3LuNXT",
        "_sess": "cac9b189-c71f-45ce-bc73-aaa50d6ea9b3.ca11b897-678d-481a-b5f0-fbd51ec2aac3.1710298904.1",
        "dmd-vid": "ca11b897-678d-481a-b5f0-fbd51ec2aac3",
        "dmd-sid": "cac9b189-c71f-45ce-bc73-aaa50d6ea9b3",
        "dmd-ahk": "10b2b2bd31",
        "dmd-signal-194-3051-2EAEFDBB-cac9b189-c71f-45ce-bc73-aaa50d6ea9b3": "e30=",
        "_ga_1EQTQ7WH8Z": "GS1.3.1710298906.3.1.1710298911.55.0.0"
    }
    response = requests.get(url, headers=headers, cookies=cookies, params=params, verify=False, proxies=proxies)
    print(response.url, response)
    soups = BeautifulSoup(response.text, 'lxml')
    datas = soups.find('div', class_="rownew33").find_all('a')[:-1]
    # print(datas)
    lists = [a['href'] for a in datas]
    return lists


def down_image(file, url):  # 下载图片方法
    print("开始下载：", url)
    for i in range(5):
        try:
            response = requests.get(url, proxies=proxies, timeout=10)
            with open(file, 'wb') as fd:
                fd.write(response.content)
            requests.session().close()
            return 1
        except Exception as e:
            print(e,f"下载失败，重新下载,第{i}次", url)
            time.sleep(5)
    print("下载失败了", url)
    if os.path.exists(file):
        os.remove(file)
    return 0


# 获取图片的信息，下载链接 标题 标签
def get_data(path = '/media/chenglei3/77D014CED257D1411/images/freejpg/test/',url='https://en.freejpg.com.ar/free/info/100038301/cows-in-the-countryside'):
    response = requests.get(url, headers=headers, cookies=cookies, proxies=proxies)
    print(response.url, response)
    soups = BeautifulSoup(response.text, 'lxml')
    data = soups.find('div', class_="col-md-9")
    href = data.find('img', src=True)['src']
    title = data.find('h1').text
    label = data.find('h4').text
    # print(href, title, label)
    suffix = href.split('.')[-1]
    image_id = str(url).split('/')[-2]
    result = down_image(file=f'{path}{image_id}.{suffix}', url=href)
    if result == 0:
        return
    image_path = f'{path}{image_id}.{suffix}'
    img = imageio.v2.imread(image_path)
    # 获取图片尺寸
    height, width = img.shape[0], img.shape[1]
    json_data = {"id":image_id.split('.')[0] , 'key_word': path.split('/')[-2], "image_url": href, 'size': f'{height},{width}',
                 "label": label, "desc": title}
    with open(f'{path}{image_id}.json', 'w') as f:
        json.dump(json_data, f, ensure_ascii=False)
    json_data['stutus'] = 'done'
    json_data['url'] = url
    freejpg_images.updateOne({'url': json_data['url']}, json_data)

#  按页获取关键词对应图片
def run_keyword(keyword = 'beauty'):
    url = f'https://en.freejpg.com.ar/free/images/{keyword}'
    page = 1
    path = f'/media/chenglei3/77D014CED257D1411/images/freejpg/{keyword}/'
    os.makedirs(path, exist_ok=True)
    while True:
        if page > 100:
            break
        params = {
            "pag": str(page)
        }
        urlp = f'{url}?pag={page}'
        print(f'正在爬取{keyword} 第{page}页')
        if freejpg_categories.not_exist({'url': urlp}):
            try:
                images_url = get_images_url(url=url, params=params)
                freejpg_categories.insertOne({'_id': urlp, 'url': urlp, 'images_url': images_url})
                for i in images_url:
                    try:
                        freejpg_images.insertOne(
                            {'_id': i, 'id': str(i).split('/')[-2], 'url': i, 'key_word': seed["category"]})
                    except:
                        pass
                    if freejpg_images.not_exist({'url': i, 'status': 'done'}):
                        get_data(path=path, url=i)

                if len(images_url) < 32:
                    break
            except:
                pass
        page += 1
        # break  # test
# 获取所有分类
def main1():  # 保存freejpg_categories
    kwywords = freejpg_Categories()
    print(kwywords)
    # for i in kwywords:
    #     if i:
    #         url = 'https://en.freejpg.com.ar/free/images/' + str(i)
    #         try:
    #             freejpg_categories.insertOne({'_id': url, 'url': url, 'category': i})
    #         except Exception as e:
    #             print(e)


def main2():
    # 获取图片
    while True:
        seed = images_keywords.find_one_and_update({'freejpg': None}, {'freejpg': 'running'})
        if seed:
            keyword = seed['keyword_en']
            if len(keyword.split()) > 1:
                print(seed,'关键词无效')
                images_keywords.updateOne({'_id': seed['_id']}, {'freejpg': 'invalid'})
                continue
            run_keyword(keyword=keyword)
        else:
            break
        seed['freejpg']= 'done'
        images_keywords.updateOne({'_id': seed['_id']}, seed)
        # break  # test


if __name__ == '__main__':
    print()
    main1()
