#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :sucai999.py
# @Time      :2024/2/29 
# @Author    :CL
# @email     :1037654919@qq.com
import json
import os
import imageio
import requests
from bs4 import BeautifulSoup
from retrying import retry

from utils import mongo_manager, proxies

sucai_title_code = mongo_manager("sucai_title_code", db='car_images')
sucai_image = mongo_manager("sucai_image", db='car_images')


# 保存菜鸟素材网title code
def save_label_code(url="https://www.sucai999.com/pic.html"):
    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
        "Accept-Encoding": "gzip, deflate, br",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": "1",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "none",
        "Sec-Fetch-User": "?1",
        "TE": "trailers"
    }
    cookies = {
        "f": "",
        "v": "",
        "sid": "",
        "PHPSESSID": "br5nfprm5tu50b7sd42ose4mes",
        "Hm_lvt_e57c2e187cc1668bba7f86d1784c0298": "1709196174",
        "Hm_lpvt_e57c2e187cc1668bba7f86d1784c0298": "1709196187",
        "shareid": "561292"
    }

    response = requests.get(url, headers=headers, cookies=cookies)

    print(url, response)
    soups = BeautifulSoup(response.text, 'lxml')
    datas = soups.find_all('div', class_='tag_item_list')
    for data in datas:
        # print(data)
        aa = data.find_all('a')
        for a in aa:
            href = a.get('href')
            title = a.get('title')
            print(href, title)
            try:
                sucai_title_code.insertOne({'title': title, 'href': href, '_id': 'https://www.sucai999.com' + href})
            except:
                print('重复')
    print('save_label_code success')


def get_kw_image(page=1, url="https://www.sucai999.com/pic/cate/263_380.html"):
    import requests
    headers = {
        "authority": "www.sucai999.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9",
        "referer": "https://www.sucai999.com/pic/cate/263_380.html",
        "sec-ch-ua": "\"Not_A Brand\";v=\"8\", \"Chromium\";v=\"120\", \"Google Chrome\";v=\"120\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "same-origin",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    }
    cookies = {
        "f": "",
        "sid": "",
        "shareid": "561292",
        "PHPSESSID": "br5nfprm5tu50b7sd42ose4mes",
        "Hm_lvt_e57c2e187cc1668bba7f86d1784c0298": "1709195190",
        "v": "",
        "Hm_lpvt_e57c2e187cc1668bba7f86d1784c0298": "1709195497"
    }

    params = {
        "page": page
    }
    response = requests.get(url, headers=headers, cookies=cookies, params=params)

    # print(response.text)
    print(response.url, response)
    return response.text


def test():
    response = get_kw_image()
    soups = BeautifulSoup(response, 'lxml')
    datas = soups.find_all('figure')
    print('len(datas):', len(datas))
    path = 'sucai/办公人物/'
    os.makedirs(path, exist_ok=True)
    for data in datas:
        print(data.find('img'))
        title = data.find('img')['alt']
        src = data.find('img')['data-src']
        name = src.split('/')[-1]
        json_data = {'label': title.split(', ') + ['办公人物'], 'desc': title}
        json_name = name.split('.')[0]
        with open(f'{path}{name}', 'wb') as f:
            f.write(requests.get(src).content)

        with open(f'{path}{json_name}.json', 'w') as f:
            json.dump(json_data, f, ensure_ascii=False)


# 获取图片标签
def get_image_label(url='https://www.sucai999.com/pic/3350330.html'):
    headers = {
        "authority": "www.sucai999.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9",
        "cache-control": "max-age=0",
        "referer": "https://www.sucai999.com/pic/cate/263_264.html",
        "sec-ch-ua": "\"Not_A Brand\";v=\"8\", \"Chromium\";v=\"120\", \"Google Chrome\";v=\"120\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "same-origin",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    }
    cookies = {
        "f": "",
        "sid": "",
        "shareid": "561292",
        "Hm_lvt_e57c2e187cc1668bba7f86d1784c0298": "1709195190",
        "v": "",
        "PHPSESSID": "batkt5chsf87qmdb84ofd607j1",
        "Hm_lpvt_e57c2e187cc1668bba7f86d1784c0298": "1709273303"
    }
    response = requests.get(url, headers=headers, cookies=cookies)
    label_str = ''
    soups = BeautifulSoup(response.text, 'lxml')
    try:
        data = soups.find('div', class_='right_row').findAll('div', class_='detail_info')[-1]
        label_str = str(data.getText()).split('：')[-1].replace('  ', ',').replace('*', '').strip()
    except:
        print('无标签')
    print(url, response, label_str)
    return label_str


def main():
    seeds = sucai_title_code.findAll({'status': None})
    for seed in seeds:
        code2 = str(seed['href']).split('_')[-1].split('.')[0]
        if code2 == '0':  # 跳过大类，避免重复
            continue
        print(seed)
        page = 1
        url = seed['_id']
        catelog = seed['title']
        path = f'/home/chenglei3/work/data/images/sucai/{catelog}/'  # 内存不够
        path = f'/media/chenglei3/77D014CED257D1411/images/sucai/{catelog}/'

        os.makedirs(path, exist_ok=True)
        while True:
            if page >= 100:  # 临时
                break
            response = get_kw_image(page, url)
            soups = BeautifulSoup(response, 'lxml')
            datas = soups.find_all('figure')
            print('len(datas):', len(datas))
            if len(datas) == 0:
                break
            page += 1
            for data in datas:
                # href =  data.find('a')['href']
                sucaiid = data.find('a')['data-sucaiid']
                if sucai_image.not_exist({'_id': sucaiid}):
                    title = data.find('img')['alt']
                    src = data.find('img')['data-src']
                    href = f'https://www.sucai999.com/pic/{sucaiid}.html'
                    label = get_image_label(url=href)
                    if label == '':
                        label = title
                    print(title, src)
                    name = src.split('/')[-1]
                    reslut = {'_id': sucaiid, 'url': href, 'image_url': src, 'catelog': catelog,
                              'label': label, 'desc': title}
                    json_name = name.split('.')[0]
                    try:
                        sucai_image.insertOne(reslut)
                    except:
                        print('更新失败')
                    # 判断文件是否存在
                    if not os.path.exists(f'{path}{name}'):
                        with open(f'{path}{name}', 'wb') as f:
                            f.write(requests.get(src).content)
                    else:
                        print(f"图片 {name} 已存在于 {path}")
                    # 图片文件路径
                    # list =['ziran',''] #
                    image_path = f'{path}{name}'
                    img = imageio.v2.imread(image_path)
                    # 获取图片尺寸
                    height, width = img.shape[0], img.shape[1]
                    json_data = {'_id': sucaiid, 'image_url': src, 'size': f'{height},{width}',
                                 'label': label, 'desc': title}

                    with open(f'{path}{json_name}.json', 'w') as f:
                        json.dump(json_data, f, ensure_ascii=False)

                    sucai_image.updateOne({'_id': reslut['_id']}, {'status': 'done', 'size': f'{height},{width}'})

        sucai_title_code.updateOne({'_id': url}, {'status': 'done'})  # 更新状态
        # break # 临时


if __name__ == '__main__':
    print()
    # save_label_code() # 保存标签
    # # sucai_title_code.updateMany({'status': 'done'}, {'status': None}) # 修复数据
    # # test
    response = get_kw_image()
    soups = BeautifulSoup(response, 'lxml')
    datas = soups.find_all('figure')
    print(datas[0])
    # # test end
    main()
