#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :test.py
# @Time      :2024/8/27 
# @Author    :CL
# @email     :1037654919@qq.com
import os
import time

import requests
from bs4 import BeautifulSoup
import logging

from retrying import retry
#这是我本地的代理，换环境之后需要改用自己的代理，如果没有代理，应该也能跑，但是需要注意访问频率，
proxies = {
    'http': 'http://127.0.0.1:15732',
    'https': 'http://127.0.0.1:15732'
}
# 配置日志记录器
logging.basicConfig(
    filename='log.txt',       # 日志文件名
    level=logging.INFO,       # 日志级别
    format='%(asctime)s - %(levelname)s - %(message)s',  # 日志格式
    filemode='a'              # 以追加模式打开文件
)
headers = {
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "priority": "u=0, i",
    "referer": "https://car.autohome.com.cn/pic/series-s67423/7650-1.html",
    "sec-ch-ua": "\"Chromium\";v=\"124\", \"Google Chrome\";v=\"124\", \"Not-A.Brand\";v=\"99\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\"",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "same-origin",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
}
cookies = {
    "fvlid": "1720686841109aIKiddsFrR",
    "sessionid": "465747BE-A98F-46F4-8768-2BAA47F8E22B%7C%7C2024-07-11+16%3A34%3A01.496%7C%7C0",
    "autoid": "067970021ff472e86851025828df8fc0",
    "__ah_uuid_ng": "c_465747BE-A98F-46F4-8768-2BAA47F8E22B",
    "sessionuid": "465747BE-A98F-46F4-8768-2BAA47F8E22B%7C%7C2024-07-11+16%3A34%3A01.496%7C%7C0",
    "historyseries": "6453%2C6139%2C7010%2C692",
    "ahsids": "5264_7546_6700_6453_6139_7010",
    "Hm_lvt_9924a05a5a75caf05dbbfb51af638b07": "1723016387,1724742532",
    "HMACCOUNT": "8A7B5B515FA69801",
    "_ac": "9c7d04fafc56305c.1724742531",
    "sessionip": "122.225.146.110",
    "sessionvid": "A6D005DA-4A62-4A58-8A9E-345E18810CCB",
    "area": "330299",
    "ahpvno": "8",
    "pvidchain": "2042220,2042220,2042220,2042220,2042220,2042220,2042220,2042220",
    "v_no": "8",
    "visit_info_ad": "465747BE-A98F-46F4-8768-2BAA47F8E22B||A6D005DA-4A62-4A58-8A9E-345E18810CCB||-1||-1||8",
    "Hm_lpvt_9924a05a5a75caf05dbbfb51af638b07": "1724742720",
    "ref": "www.google.com.hk%7C0%7C0%7Cwww.baidu.com%7C2024-08-27+15%3A12%3A02.394%7C2024-07-31+15%3A14%3A29.070",
    "ahrlid": "1724742719387onVBT3KwXi-1724742987109"
}
types = {
    "车身外观": "1",
    "中控方向盘": "10",
    "车厢座椅": "3",
    "其它细节": "12",
    "官图": "53",
    "重要特点": "14"
}
colors ={
  "晨雾灰": "12319",
  "流光银": "12322",
  "晚樱粉": "12323",
  "曙光白": "12318",
  "天青绿": "12320",
  "霞光红": "12321",
  "黑色/云初白": "i4219",
  "黑色/薄暮蓝": "i4217",
  "黑色/霓光虹": "i4218"
}
def get_image_html():
    for typename, type in types.items():
        for colorname,color in colors.items():
            """"代码不是绝对保险的，如果跑的过程中断，记得在中断的位置continue"""
            # if typename == '车身外观' :
            #     continue
            # if typename == '中控方向盘' :
            #     continue
            # if typename == '车厢座椅' :
            #     continue
            # if typename == '其它细节' :
            #     continue
            # if typename == '官图' :
            #     continue
            if typename == '官图' or  typename == '重要特点':
                # 编号7650对应的是银河E5
                url = f'https://car.autohome.com.cn/pic/series-s67423/7650-{type}.html#pvareaid=2042220'
                # time.sleep(10) 没有代理的话记得控制频率
                response = requests.get(url, headers=headers, cookies=cookies, proxies=proxies)
                print(response.url, response)
                soup = BeautifulSoup(response.text, 'html.parser')
                datas = soup.find('div', class_='uibox-con carpic-list03 border-b-solid').find_all('li')
                print(len(datas))

                for li in datas:
                    logging.info(li.find('a')['href'])
                    yield (li.find('a')['href'], li.find('a')['title'], typename, colorname)
                break
            for page in range(1, 8+1): # 分类前最高页码8
                url  = f'https://car.autohome.com.cn/pic/spec-67423-{color}-{type}-{page}.html#pvareaid=2042220'
                print(url)
                flag = 0
                for __ in range(5):
                    try:
                        response = requests.get(url, headers=headers, cookies=cookies,proxies=proxies)
                        flag =1
                        break
                    except:
                        time.sleep(10)
                if flag == 1:
                    print(response.url,response)
                    if url != response.url:
                        break
                    soup = BeautifulSoup(response.text, 'html.parser')
                    datas = soup.find('div', class_='uibox-con carpic-list03 border-b-solid').find_all('li')
                    print(len(datas))

                    for li in datas:
                        logging.info(li.find('a')['href'])
                        yield (li.find('a')['href'],li.find('a')['title'],typename,colorname)

                    if len(datas)<=60:
                        break
                else:break
            #
            #     break
# 检查日志文件中是否已经包含该信息
def is_message_in_log(message):
    try:
        with open('log.txt', 'r') as file:
            return message in file.read()
    except FileNotFoundError:
        return False
@retry(stop_max_attempt_number=3)
def download_pic(url,path):
    res = requests.get(url=url)
    with open(path, 'wb') as f:
        f.write(res.content)
if __name__ == '__main__':
    # info = get_image_html()
    for info  in get_image_html():
        # # 判断 如果info[0] 在log 中，跳过
        # if is_message_in_log(info[0]):
        #     print('跳过',info[0])
        #     continue
        url  = 'https://car.autohome.com.cn' + info[0]
        #重试五次，成功直接跳出
        for __ in range(5):
            try:
                response = requests.get(url, headers=headers, cookies=cookies,proxies=proxies)
                print(response.url, response)
                soup = BeautifulSoup(response.text, 'html.parser')
                img = soup.find('div', class_='pic').find('img')
                #下载图片
                name = f"{info[0].split('.html')[0].split('-')[-1].split('/')[-1]}.jpg"
                path = f"{info[1]}/{info[2]}/{info[3]}/"
                os.makedirs(path, exist_ok=True)
                print('https:' +  img['src'])
                download_pic(url = 'https:' +  img['src'], path = path + name )
                break
            except Exception as e:
                print(e)
                time.sleep(10)