#!/usr/bin/env python  
# -*- coding: utf-8 -*-  
"""  
@author: ઈ一笑ഒ 
@time: 2024/7/19 18:07  
@file: car-paqu.py  
@project: vue-router.js  
@describe: TODO  
"""
import pymysql
import requests
from bs4 import BeautifulSoup
import csv
import random

db_config = {
    'host': 'localhost',
    'port': 3306,
    'user': 'root',
    'password': '123456',
    'database': 'mysite_db',
    'charset': 'utf8mb4',
}

# 创建 MySQL 连接对象
conn = pymysql.connect(**db_config)
cursor = conn.cursor()


def get_cars():
    url = 'https://www.yoojia.com/rank/1-0-0-0-0-0.html'

    # 设置headers以模拟浏览器请求
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
        'Cookie': 'CITY=%7B%22code%22%3A%22131%22%2C%22name%22%3A%22%E5%8C%97%E4%BA%AC%22%7D; BAIDUID=9D23C137298086D96666A51D14C84F7A:FG=1; STOKEN=62c3048312b479c300fb7b599b6d363b0931128aa534671734504a0642746aa7; CITY=%7B%22code%22%3A%22131%22%2C%22name%22%3A%22%E5%8C%97%E4%BA%AC%22%7D; BDUSS=jhHMkEtaWZtaEFDVS1ucThtTThWaTloRHVwTnhhZUNyNjNmeVMzUTZmMTNOTkJtRUFBQUFBJCQAAAAAAQAAAAEAAAC9-X1DAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHenqGZ3p6hme; Hm_lvt_3d2ca9e65ec4a450b97f705740dc51b5=1722239863,1722306350,1722391117,1722408916; HMACCOUNT=F1CFED5070C2FCDC; Hm_lvt_74dc9c641a3e6ae783a2ad1b67e90643=1722239863,1722306351,1722391117,1722408916; Hm_lpvt_3d2ca9e65ec4a450b97f705740dc51b5=1722409489; Hm_lpvt_74dc9c641a3e6ae783a2ad1b67e90643=1722409489',

    }

    try:
        proxy = get_proxy(5)
        proxys = {"http": proxy}
        # 发送网络请求
        response = requests.get(url, headers=headers, proxies=proxys)
        # 如果需要，可以取消注释以检查响应状态
        # response.raise_for_status()

        # 解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')
        # 注意：这里的类名可能需要您根据实际页面调整
        car_list = soup.find_all('a', class_="ranks-item")  # 替换为实际的类名
        # print(car_list)
        i = 0
        for car in car_list:
            i = i + 1
            carname = car.find('span', class_="info-name")
            if carname:
                carname = carname.get_text(strip=True).split('[')[0]
            else:
                carname = ''

            price = car.find('div', class_="info-price")
            if price:
                price = price.get_text(strip=True)
            else:
                price = ''

            numbers = car.find('div', class_='car-pop', attrs={'data-v-703f5eae': ''})
            # print(numbers)
            if numbers:
                # 查找第二个<span>标签（索引从0开始，所以第二个元素的索引是1）
                number_span = numbers.find_all('span', attrs={'data-v-703f5eae': ''})[1] if len(
                    numbers.find_all('span', attrs={'data-v-703f5eae': ''})) > 1 else None
                # 提取<span>标签内的文本
                if number_span:
                    sales_number = number_span.get_text(strip=True)
                    print(sales_number)  # 输出：12387
                else:
                    print("未找到全国销量的数字")

            if i <= 3:

                # 查找第二个<img>标签（索引从0开始，所以第二个元素的索引是1）
                # 注意：我们跳过了第一个<img class="ranks-idx-img" ... />
                img_tags = car.find_all('img', attrs={'data-v-703f5eae': ''})
                print(img_tags)
                if len(img_tags) > 1:
                    second_img = img_tags[1]  # 第二个<img>标签
                    image = second_img['src']  # 提取src属性
                    print(
                        image)  # 输出：https://youjia-image.cdn.bcebos.com/seriesImage/165770985240549a9858.png@!w_600_fp
                else:
                    print("未找到足够的<img>标签")

            elif i > 3:
                image = car.find('img')
                if image:
                    image = image['src']  # 获取img标签的src属性
                else:
                    image = ''

                    # 构建商品信息字
            print(image)
            message = {
                'name': carname,
                'price': price,
                'numbers': sales_number,
                'images': image,
            }
            if i > 15:
                break

            # 存入
            save_to_mysql(message)


    except requests.RequestException as e:
        print(f"Error occurred: {e}")
def save_to_mysql(result):
    try:
        sql = "INSERT INTO {} (name,price,numbers,images) VALUES (%s, %s, %s, %s)".format(
            'analysis_car_analysis')
        print("sql语句为:  " + sql)
        cursor.execute(sql, (result['name'],result['price'],result['numbers'],result['images'] ))
        conn.commit()
        print('存储到MySQL成功: ', result)
    except Exception as e:
        print('存储到MYsql出错: ', result, e)


# 获取代理地址
def get_proxy(pages):
    ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
    proxy_ips = []
    # 设置headers
    headers = {"User-Agent": ua}
    # 从第一页开始循环访问
    for page in range(1, pages + 1):
        url = "https://www.89ip.cn/index_{page}.html"
        res = requests.get(url, headers=headers)
        # 使用.text属性获取网页内容，赋值给html
        html = res.text
        # 用BeautifulSoup()传入变量html和解析器lxml，赋值给soup
        soup = BeautifulSoup(html, "html.parser")
        # 使用find_all()方法查找类名为layui-table的标签
        table = soup.find_all(class_="layui-table")[0]
        # 使用find_all()方法查找tr标签
        trs = table.find_all("tr")
        # 使用for循环逐个访问trs列表中的tr标签,一个tr代表一行，第一行为表头，不记录
        for i in range(1, len(trs)):
            # 使用find_all()方法查找td标签
            ip = trs[i].find_all("td")[0].text.strip()
            port = trs[i].find_all("td")[1].text.strip()
            # 拼接代理地址
            proxy_ip = f"http://{ip}:{port}"
            # 将获取的代理地址保存到proxy_ips列表
            proxy_ips.append(proxy_ip)
    return random.choice(proxy_ips)


if __name__ == '__main__':
    get_cars()
