#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :cc0_image_util.py
# @Time      :2024/10/31 
# @Author    :CL
# @email     :1037654919@qq.com
# 实现对cc0图片的采集 https://cc0.cn/image
import os

import requests
from bs4 import BeautifulSoup
from retrying import retry


# 实现对cc0图片的采集
class CC0Image:
    def __init__(self):
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Pragma": "no-cache",
            "Referer": "https://cc0.cn/image/ziran/",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
            "sec-ch-ua": "\"Google Chrome\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Linux\""
        }
        self.cookies = {
            "Hm_lvt_fe455db2d13268de6c3385ee3e23b818": "1730364492",
            "HMACCOUNT": "8A7B5B515FA69801",
            "Hm_lvt_4bf62225c36b73a10df5a8644b66ee11": "1730364528",
            "__gads": "ID=203e48d361693b6e:T=1730364508:RT=1730365977:S=ALNI_MZ2LQefppQkah9iGLVpkezvoegC6w",
            "__gpi": "UID=00000f47b2942032:T=1730364508:RT=1730365977:S=ALNI_MYNX5pVVMLOoLeKuZ3tX7dv-KY8TQ",
            "__eoi": "ID=4e160c4ae0a320a3:T=1730364508:RT=1730365977:S=AA-AfjbExYgCLcDFi8wS4hg4lwUS",
            "Hm_lpvt_4bf62225c36b73a10df5a8644b66ee11": "1730366009",
            "Hm_lpvt_fe455db2d13268de6c3385ee3e23b818": "1730366046"
        }

    def get_label(self, url="https://cc0.cn/image/"):

        response = requests.get(url, headers=self.headers, cookies=self.cookies)
        # print(response.text)
        print(response.url, response)
        response.encoding = 'utf-8'
        if response.status_code == 200:
            # return response.text
            soup = BeautifulSoup(response.text, 'html.parser')
            data = soup.find('ul', class_='sub-menu clearfix').findAll('li')
            for item in data:
                yield (item.a['href'], item.a.text)
            # return  data

    def get_tags(self, url='https://cc0.cn/image/shiwu/'):

        # url = "https://cc0.cn/image/ziran/"
        response = requests.get(url, headers=self.headers, cookies=self.cookies)

        print(response.url, response)
        response.encoding = 'utf-8'
        if response.status_code == 200:
            # return response.text
            soup = BeautifulSoup(response.text, 'html.parser')
            data = soup.find('div', class_='subnav clearfix').findAll('a')
            for item in data:
                yield (item['title'], item['href'], item.text)

    def get_pictrue(self, url="https://cc0.cn/tags/kafei/index_2.html"):

        response = requests.get(url, headers=self.headers, cookies=self.cookies)

        print(response.url, response)
        response.encoding = 'utf-8'
        if response.status_code == 200:
            # return response.text
            soup = BeautifulSoup(response.text, 'html.parser')
            data = soup.find('div', class_='masonry').findAll('article')
            for item in data:
                yield (item.find('img')['src'], item.find('a')['href'], item.text)

    #基于关键词的采集, cc0的关键词搜索对基于搜索词语生成searchid，过程中有一个post请求，尚未在请求结果中找到id，  实际获取数据的链接类似为https://cc0.cn/e/search/result/?searchid=27958
    def post_pictrue_by_keyword(self, keyword='办公室'):
        url = "https://cc0.cn/e/search/index.php"
        data = {
            "tempid": "1",
            "show": "title",
            "tbname": "news",
            "keyboard": keyword,
            "Submit22": ""
        }
        response = requests.post(url, headers=self.headers, cookies=self.cookies, data=data)

        # print(response.text)
        print(response.url, response)
        response.encoding = 'utf-8'
        if response.status_code == 200:
            # return response.text
            soup = BeautifulSoup(response.text, 'html.parser')
            data = soup.find('div', class_='masonry').findAll('article')
            for item in data:
                yield (item.find('img')['src'], item.find('a')['href'], item.text)

        pass
@retry(stop_max_attempt_number=3)
def download_pic(url,path):
    res = requests.get(url=url)
    with open(path, 'wb') as f:
        f.write(res.content)
#todo 加入数据库模块实现下载控制

if __name__ == '__main__':
    print()
    cc0 = CC0Image()
    # for label in cc0.get_label():
    #     print(label)
    #     for tag in cc0.get_tags(label[0]):
    #         print(tag)
    keyword = '办公室'
    for pictrue in cc0.post_pictrue_by_keyword(keyword=keyword):
        print(pictrue)
        path= f'/media/chenglei3/77D014CED257D1411/images/cc0/{keyword}/'
        os.makedirs(path, exist_ok=True)
        url = 'https:'+pictrue[0].replace('w500.jpg','cc0.cn.jpg')
        download_pic(url=url, path=path+url.split('/')[-1])
