'''

@date 2024年11月20日
@author liandyao
抖音号: liandyao
'''
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import re
import pymysql


# Step 3: 存储到 MySQL
# 数据库配置
db_config = {
    'host': '127.0.0.1',
    'user': 'root',
    'password': '123',
    'database': 'mymusic'
}

# 连接到 MySQL
connection = pymysql.connect(**db_config)
cursor = connection.cursor()

# 创建表（如果不存在）
cursor.execute('''  
    CREATE TABLE IF NOT EXISTS dati_beautiful_girl (  
        id BIGINT NOT NULL AUTO_INCREMENT COMMENT '主键',  
        bg_url VARCHAR(100) COMMENT '网址',  
        bg_title VARCHAR(100) COMMENT '标题',    
        remark VARCHAR(255) COMMENT '备注',  
        create_time DATETIME COMMENT '创建时间',  
        sort BIGINT DEFAULT 99 COMMENT '排序',   
        tags VARCHAR(255) COMMENT '标签分类',  
        exp VARCHAR(255) COMMENT '扩展字段',  
        PRIMARY KEY (id)  
    ) CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='对联表'  
''')
def insertDb(couplets):
    # 插入数据
    for i in couplets:
        url = i['url']
        title = i['title']
        tags = i['tags']
        cursor.execute('''  
            INSERT INTO dati_beautiful_girl (bg_url, bg_title,tags)  
            VALUES (%s, %s, %s)  
        ''', (url, title, tags))

    connection.commit()



base_path = 'http://www.vopox.net'
def getUrl(url,tags):
    # 初始化 WebDriver
    driver = webdriver.Chrome()  # 如果没有安装 chromedriver，需提前安装并放入环境变量里
    driver.get(url)  # 打开目标网站
    time.sleep(5)  # 等待页面加载

    total_articles = set()  # 使用集合来避免重复
    # 滚动两次到底部，每次等待 5 秒
    for _ in range(10):
        # 获取当前的 articles
        articles = driver.find_elements(By.CSS_SELECTOR, ".post-item-list.post.picture.scl")

        # 将新获取的 articles 添加到集合中
        for article in articles:
            total_articles.add(article)

            # 打印当前获取到的 articles 数量
        print(f"当前获取到的 articles 数量: {len(total_articles)}")

        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")  # 滚动到底部
        time.sleep(5)  # 等待新内容加载

        # 最终结果
    print(f"总共获取到的 articles 数量: {len(total_articles)}")

    # 定位所有符合条件的 article 并直接查找 a 标签
    try:

        urls = [

        ]

        for article in articles:
            # 在 article 中找到 <a> 标签
            a_tags = article.find_elements(By.TAG_NAME, "a")
            a_tag = a_tags[0]  # 取第一个 <a> 标签
            style_attr = a_tag.get_attribute("style")
            item = {
                "tags":tags
            }
            if style_attr and "background-image" in style_attr:
                # 匹配 URL 地址
                match = re.search(r'url\((.*?)\)', style_attr)
                if match:
                    url = match.group(1).replace('"', '')
                    if url.strip() and not url.strip().startswith('http'):
                        item['url'] = base_path+url
                        title = article.find_element(By.CSS_SELECTOR, ".grid-title.gdz").text
                        item['title'] = title
                        urls.append(item)

        # 打印收集到的 URLs
        print("Found URLs:")
        for url in urls:
            print(url)
        return urls
    finally:
        driver.quit()  # 关闭 WebDriver
    return None

def main():

    items =[

        {
            "url": "http://www.vopox.net/siwa/",
            "tags": "丝袜"
        },
        {
            "url": "http://www.vopox.net/qingchun/",
            "tags": "清纯"
        },
        {
            "url": "http://www.vopox.net/mote/",
            "tags": "模特"
        },
        {
            "url": "http://www.vopox.net/yishu/",
            "tags": "艺术"
        },
        {
            "url": "http://www.vopox.net/tstx/",
            "tags": "街拍"
        },
    ]
    for item in items:
        urls = getUrl(item['url'],item['tags'])
        if urls is None:
            print("未找到符合条件的 URLs。")
            return
        insertDb(urls)
        print("数据存储成功！",item['tags'])
        # 关闭连接
    cursor.close()
    connection.close()

if __name__ == '__main__':
    main()