# -*- coding:utf-8 -*-
"""

@author: liandyao
@date: 2025/1/7 13:50
"""
import random
import time

import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urlunparse


import pymysql


# Step 3: 存储到 MySQL
# 数据库配置
db_config = {
    'host': '127.0.0.1',
    'user': 'root',
    'password': '123',
    'database': 'mymusic'
}

# 连接到 MySQL
connection = pymysql.connect(**db_config)
cursor = connection.cursor()

# 创建表（如果不存在）
cursor.execute('''  
    CREATE TABLE IF NOT EXISTS dati_beautiful_girl (  
        id BIGINT NOT NULL AUTO_INCREMENT COMMENT '主键',  
        bg_url VARCHAR(100) COMMENT '网址',  
        bg_title VARCHAR(100) COMMENT '标题',    
        remark VARCHAR(255) COMMENT '备注',  
        create_time DATETIME COMMENT '创建时间',  
        sort BIGINT DEFAULT 99 COMMENT '排序',   
        tags VARCHAR(255) COMMENT '标签分类',  
        exp VARCHAR(255) COMMENT '扩展字段',  
        PRIMARY KEY (id)  
    ) CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='对联表'  
''')
def insertDb(couplets):
    # 插入数据
    for url, title, tags in couplets:

        cursor.execute('''  
            INSERT INTO dati_beautiful_girl (bg_url, bg_title,tags)  
            VALUES (%s, %s, %s)  
        ''', (url, title, tags))

    connection.commit()




headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}

def clean_url(url):
    """去掉URL中的查询参数"""
    parsed_url = urlparse(url)
    clean_url = urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, parsed_url.params, '', ''))
    return clean_url


def scrape_images(page_number,tags):
    """爬取指定页面的图片"""
    url = f'https://www.chichi-pui.com/photo/posts/tags/%E7%BE%8E%E5%A5%B3/?p={page_number}'
    response = requests.get(url,headers=headers)
    response.raise_for_status()  # 确保请求成功
    time.sleep(random.randint(3,6))
    print(f"正在爬取第{page_number}页...")
    soup = BeautifulSoup(response.text, 'html.parser')
    image_cards = soup.find('div', class_='p-image-cards')
    if not image_cards:
        print(f"没有找到图片卡片 (page {page_number})")
        return []

    images = []
    for item in image_cards.find_all('div', class_='p-image-cards-with-like__item'):
        img_tag = item.find('img')
        if img_tag:
            img_src = clean_url(img_tag['src'])  # 清理URL
            img_alt = img_tag.get('alt', '')  # 获取alt属性
            images.append((img_src,img_alt,tags))

    return images


def scrape_all_pages(total_pages):
    """爬取所有页面"""
    tags = '日本chichi-pui'
    for page_number in range(1, total_pages + 1):
        print(f"正在爬取第{page_number}页...")
        images = scrape_images(page_number,tags)
        insertDb(images)



def main():
    # 指定要爬取的总页数，例如10页
    total_pages = 600  # 根据实际情况调整
    scrape_all_pages(total_pages)



if __name__ == '__main__':
    main()