import openpyxl
import requests
import parsel
from openpyxl.drawing.image import Image


def download_image(image_url, save_path):
    response = requests.get(image_url, stream=True)
    if response.status_code == 200:
        with open(save_path, 'wb') as f:
            for chunk in response.iter_content(1024):
                f.write(chunk)


def crawl_images_to_excel(url, excel_file, sheet_name):
    # Load the Excel workbook or create a new one if not exists
    try:
        wb = openpyxl.load_workbook(excel_file)
    except FileNotFoundError:
        wb = openpyxl.Workbook()

    # Select the sheet by name
    sheet = wb[sheet_name]

    # Send HTTP request and parse the HTML content
    response = requests.get(url)
    if response.status_code != 200:
        print(f"Failed to crawl images from {url}")
        return

    soup = BeautifulSoup(response.content, 'html.parser')

    # Find all image tags in the HTML content
    image_tags = soup.find_all('img')

    # Find all image titles (assuming they are in a specific tag, e.g., <h2>)
    title_tags = soup.find_all('h2')  # Replace 'h2' with the actual tag containing image titles

    # Insert images and titles into Excel
    for idx, img_tag in enumerate(image_tags):
        image_url = img_tag.get('src')
        title = title_tags[idx].text if idx < len(title_tags) else f"Image {idx + 1}"

        if not image_url:
            continue

        # Download the image to a local file
        image_filename = f"image_{idx}.jpg"
        download_image(image_url, image_filename)

        # Add the title and image to the worksheet
        cell_to_insert = f"A{idx + 1}"
        sheet[cell_to_insert] = title
        img = Image(image_filename)
        sheet.add_image(img, f"B{idx + 1}")

        # Remove the downloaded image file (optional)
        # Comment out this line if you want to keep the images locally
        import os
        os.remove(image_filename)

    # Save the workbook
    wb.save(excel_file)


def qianku_img():
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
    }
    # div.clearfix.data-list.dataList.V-maronyV1.Vmarony
    url = 'https://588ku.com/so/23431/?h=bd&sem=1&kw=bd0104344&bd_vid=11386679338203132945'
    # 获取数据
    response = requests.get(url, headers=headers)
    data_html = response.text
    # 解析数据
    select = parsel.Selector(data_html)
    divs = select.css('div.clearfix.data-list.dataList>div')
    for div in divs:
        img_url = div.css('img.lazy::attr(src)').get()
        print(img_url)
    # print(response)


if __name__ == '__main__':
    qianku_img()

# Usage example:
# website_url = "https://example.com"  # Replace with the target website URL
# excel_file = "path/to/your/excel.xlsx"
# sheet_name = "Sheet1"
#
# crawl_images_to_excel(website_url, excel_file, sheet_name)
