from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
import requests
from urllib.parse import urlparse
import os
import datetime
import random






def get_img(url):
    # 1. 初始化浏览器驱动（以Chrome为例）
    driver = webdriver.Chrome()  # 需提前安装对应版本驱动
    driver.get(url)  # 替换为目标页面URL

    # 2. 显式等待元素加载（关键步骤）
    try:
        # 根据ID定位元素的等待策略
        element = WebDriverWait(driver, 15).until(
            EC.presence_of_element_located(
                (By.ID, "shoplaza-section-1539149755240")  # 若ID固定则直接写死
            )
        )
    except:
        print("元素加载超时")
        driver.quit()
        exit()

    # 3. 获取元素内容（根据需要选择）
    # 方式一：提取整个元素的HTML代码
    # print("元素完整HTML:\n", element.get_attribute("outerHTML"))

    text  = element.get_attribute("outerHTML")
    pattern = r'//img\..+?\.jpeg'
    matches = re.findall(pattern, text, flags=re.IGNORECASE)
    print(matches)
    driver.quit()
    return matches



# 获取当前目录
current_directory = os.getcwd()

# 获取当前日期
current_date = datetime.datetime.now().strftime("%Y%m%d")

# 生成四位随机数
random_number = random.randint(1000, 9999)

# 创建文件夹名称
folder_name = f"{current_date}_{random_number}"

# 创建文件夹路径
download_folder = os.path.join(current_directory, folder_name)

# 如果文件夹不存在，则创建文件夹
if not os.path.exists(download_folder):
    os.makedirs(download_folder)

print(f"创建的文件夹路径为：{download_folder}")

if not os.path.exists(download_folder):
    os.makedirs(download_folder)
    print(f"创建下载目录：{download_folder}")
else:
    print(f"使用已有目录：{download_folder}")


def download_images(url_list):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }

    for index, url in enumerate(url_list, 1):
        try:
            # 自动补全协议头
            if not url.startswith(('http://', 'https://')):
                full_url = f'https:{url}' if url.startswith('//') else f'https://{url}'
            else:
                full_url = url

            # 获取文件名
            parsed_url = urlparse(full_url)
            filename = os.path.basename(parsed_url.path).split('?')[0]  # 去除查询参数
            if not filename.lower().endswith('.jpeg'):
                filename += '.jpeg'

            # 发送请求
            response = requests.get(full_url,
                                    headers=headers,
                                    timeout=10,
                                    stream=True)
            response.raise_for_status()  # 检查HTTP错误

            # 保存文件
            save_path = os.path.join(download_folder, filename)
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)

            print(f"已下载 [{index}/{len(url_list)}]：{save_path}")

        except Exception as e:
            print(f"下载失败 [{index}] {url} | 错误：{str(e)}")



if __name__ == '__main__':
    url = "https://www.fromsparcoo.com/collections/exercise-mens-shorts?spm=..collection_19c6cebd-c551-4748-9018-fa1327a776dd.header_1.1&spm_prev=..index.header_1.1"
    download_images(get_img(url))



# # 方式二：提取内部图片链接（兼容动态生成属性）
# if element:
#     images = element.find_elements(By.TAG_NAME, "img")
#     for img in images:
#         # 优先获取实际内容地址（应对懒加载）
#         img_url = img.get_attribute("data-src") or img.get_attribute("src")
#         print("图片地址:", img_url)

# 4. 关闭浏览器
# driver.quit()
def getimg(url):
    for page in range(1, 5):
        # 直接拼接分页参数
        paginated_url = f"{url}&page={page}"
        print(f"正在处理：{paginated_url}")
        download_images(get_img(paginated_url))