#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :sucai999.py
# @Time      :2024/2/29 
# @Author    :CL
# @email     :1037654919@qq.com
import json
import os
import imageio
import requests
from bs4 import BeautifulSoup
from pypinyin import lazy_pinyin
from retrying import retry

from util import mongo_manager, proxies

sucai_title_code = mongo_manager("sucai_title_code", db='car_images')
sucai_image = mongo_manager("sucai_image", db='car_images')


def get_pinyin(kw):
    # 使用 lazy_pinyin 返回拼音（默认拼音没有声调）
    # 如果需要拼音带声调，可以使用 pinyin(kw, style=Style.TONE)
    pinyin_result = lazy_pinyin(kw,)

    # 将拼音列表连接成一个字符串
    return ''.join(pinyin_result)
class SuCai999():
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
            "Accept-Encoding": "gzip, deflate, br",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "none",
            "Sec-Fetch-User": "?1",
            "TE": "trailers"
        }
        self.cookies = {
            "f": "",
            "v": "",
            "sid": "",
            "PHPSESSID": "br5nfprm5tu50b7sd42ose4mes",
            "Hm_lvt_e57c2e187cc1668bba7f86d1784c0298": "1709196174",
            "Hm_lpvt_e57c2e187cc1668bba7f86d1784c0298": "1709196187",
            "shareid": "561292"
        }
    # 保存菜鸟素材网title code,即分类标签
    def save_label_code(self,url="https://www.sucai999.com/pic.html"):
        response = requests.get(url, headers=self.headers, cookies=self.cookies)

        print(url, response)
        soups = BeautifulSoup(response.text, 'lxml')
        datas = soups.find_all('div', class_='tag_item_list')
        for data in datas:
            # print(data)
            aa = data.find_all('a')
            for a in aa:
                href = a.get('href')
                title = a.get('title')
                print(href, title)
                try:
                    sucai_title_code.insertOne({'title': title, 'href': href, '_id': 'https://www.sucai999.com' + href})
                except:
                    print('重复')
        print('save_label_code success')

    #获取分类图片，示例：办公室图片
    def get_label_image(self,page=1, url="https://www.sucai999.com/pic/cate/263_380.html"):

        params = {
            "page": page
        }
        response = requests.get(url, headers=self.headers, cookies=self.cookies, params=params)

        # print(response.text)
        print(response.url, response)
        return response.text

    # 获取图片标签,用于图文对需求
    def get_image_label(self,url='https://www.sucai999.com/pic/3350330.html'):
        response = requests.get(url, headers=self.headers, cookies=self.cookies)
        label_str = ''
        soups = BeautifulSoup(response.text, 'lxml')
        try:
            data = soups.find('div', class_='right_row').findAll('div', class_='detail_info')[-1]
            label_str = str(data.getText()).split('：')[-1].replace('  ', ',').replace('*', '').strip()
        except:
            print('无标签')
        print(url, response, label_str)
        return label_str
    # 获取关键词图片
    def get_kw_image(self,url="https://www.sucai999.com/searchlist/laoren.html"):

        response = requests.get(url, headers=self.headers, cookies=self.cookies)

        # print(response.text)
        print(response.url, response)
        return response.text
sucai = SuCai999()

def test(): #测试
    response = sucai.get_label_image()
    soups = BeautifulSoup(response, 'lxml')
    datas = soups.find_all('figure')
    print(datas[0])
    print('len(datas):', len(datas))
    path = 'sucai/办公人物/'
    os.makedirs(path, exist_ok=True)
    for data in datas:
        print(data.find('img'))
        title = data.find('img')['alt']
        src = data.find('img')['data-src']
        name = src.split('/')[-1]
        json_data = {'label': title.split(', ') + ['办公人物'], 'desc': title}
        json_name = name.split('.')[0]
        with open(f'{path}{name}', 'wb') as f:
            f.write(requests.get(src).content)

        with open(f'{path}{json_name}.json', 'w') as f:
            json.dump(json_data, f, ensure_ascii=False)
# 基于网址分类，遍历网站，采集所有图片
def main():
    seeds = sucai_title_code.findAll({'status': None})
    for seed in seeds:
        code2 = str(seed['href']).split('_')[-1].split('.')[0]
        if code2 == '0':  # 跳过大类，避免重复
            continue
        print(seed)
        page = 1
        url = seed['_id']
        catelog = seed['title']
        path = f'/media/chenglei3/77D014CED257D1411/images/sucai/{catelog}/'

        os.makedirs(path, exist_ok=True)
        while True:
            if page >= 100:  # 临时
                break
            response = sucai.get_label_image(page, url)
            soups = BeautifulSoup(response, 'lxml')
            datas = soups.find_all('figure')
            print('len(datas):', len(datas))
            if len(datas) == 0:
                break
            page += 1
            for data in datas:
                # href =  data.find('a')['href']
                sucaiid = data.find('a')['data-sucaiid']
                if sucai_image.not_exist({'_id': sucaiid}):
                    title = data.find('img')['alt']
                    src = data.find('img')['data-src']
                    href = f'https://www.sucai999.com/pic/{sucaiid}.html'
                    label = sucai.get_image_label(url=href)
                    if label == '':
                        label = title
                    print(title, src)
                    name = src.split('/')[-1]
                    reslut = {'_id': sucaiid, 'url': href, 'image_url': src, 'catelog': catelog,
                              'label': label, 'desc': title}
                    json_name = name.split('.')[0]
                    try:
                        sucai_image.insertOne(reslut)
                    except:
                        print('更新失败')
                    # 判断文件是否存在
                    if not os.path.exists(f'{path}{name}'):
                        with open(f'{path}{name}', 'wb') as f:
                            f.write(requests.get(src).content)
                    else:
                        print(f"图片 {name} 已存在于 {path}")
                    # 图片文件路径
                    # list =['ziran',''] #
                    image_path = f'{path}{name}'
                    img = imageio.v2.imread(image_path)
                    # 获取图片尺寸
                    height, width = img.shape[0], img.shape[1]
                    json_data = {'_id': sucaiid, 'image_url': src, 'size': f'{height},{width}',
                                 'label': label, 'desc': title}

                    with open(f'{path}{json_name}.json', 'w') as f:
                        json.dump(json_data, f, ensure_ascii=False)

                    sucai_image.updateOne({'_id': reslut['_id']}, {'status': 'done', 'size': f'{height},{width}'})

        sucai_title_code.updateOne({'_id': url}, {'status': 'done'})  # 更新状态
        # break # 临时

#基于关键词搜索
def main2():
    kw_list=['老人']

    headers = {
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9",
        "cache-control": "no-cache",
        "pragma": "no-cache",
        "priority": "u=0, i",
        "referer": "https://globalimg.sucai999.com/preimg/8AA05E/700/8AA05E/103/6e66bae29ad64f92b821d5a99b1bf9b.jpg?x-oss-process=image/resize,w_320/format,webp",
        "sec-ch-ua": "\"Google Chrome\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "same-origin",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
    }
    cookies = {
        "Hm_lvt_e57c2e187cc1668bba7f86d1784c0298": "1730098052,1730185237,1730859020",
        "HMACCOUNT": "8A7B5B515FA69801",
        "Hm_lpvt_e57c2e187cc1668bba7f86d1784c0298": "1730875644"
    }
    for kw in kw_list:
        print(kw)
        kw_pinyin = get_pinyin(kw)
        print(kw_pinyin)
        pages = 1 #需要采集的页数，每页40张
        path = f'/media/chenglei3/77D014CED257D1411/images/sucai/{kw}/'
        os.makedirs(path, exist_ok=True)
        for page in range(1,pages+1):
            #url的构造 逻辑 可根据需求详细设置url
            # base:https://www.sucai999.com/searchlist/laoren.html
            # 第二页： https://www.sucai999.com/searchlist/laoren-2.html
            # 摄影图： https://www.sucai999.com/searchlist/laoren-%E6%91%84%E5%BD%B1%E5%9B%BE---all-0-0.html
            # 摄影图 第二页 https://www.sucai999.com/searchlist/laoren-%E6%91%84%E5%BD%B1%E5%9B%BE---all-0-0-2.html
            # png  https://www.sucai999.com/searchlist/laoren-%E6%91%84%E5%BD%B1%E5%9B%BE-png--all-0-0.html
            url = f'https://www.sucai999.com/searchlist/{kw_pinyin}-%E6%91%84%E5%BD%B1%E5%9B%BE-jpg--all-0-0-{page}.html'
            data = sucai.get_kw_image(url)
            soups = BeautifulSoup(data, 'lxml')
            datas = soups.find_all('figure')
            print('len(datas):', len(datas))
            for data in datas:
                print(data.find('img'))
                title = data.find('img')['alt']
                src = data.find('img')['data-src']
                name = src.split('?')[0].split('/')[-1]
                json_data = {'label': title.split(', ') + [kw], 'desc': title}
                json_name = name.split('.')[0]
                res = requests.get(src,headers= headers,cookies=cookies)
                with open(f'{path}{name}', 'wb') as f:
                    f.write(res.content)
            if len(datas) < 40:
                break
if __name__ == '__main__':
    print()

    # sucai.save_label_code() # 保存标签 ，在新建的数据库中应首先运行一遍
    # # sucai_title_code.updateMany({'status': 'done'}, {'status': None}) # 修    # main() # 采集图片复数据
    # 菜鸟图库搞商业 采集不了了
    main() # 采集网站分类采集图片
    main2() # 采集关键词图片

