import boto3
import os
import requests
import subprocess
SESSION = boto3.Session(
    aws_access_key_id="AKIAQQTQBCTDH74YCGCW",
    aws_secret_access_key="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",
    region_name="ap-northeast-2"  # Optional
)
async def extract_images_from_pdf_url(session, pdf_url: str, output_folder: str = "cache_image"):
    file_name = pdf_url.split("/")[-1].split(".")[0]
    """
    使用pdfimages从给定的PDF URL中提取所有的图像，上传到Amazon S3，并返回图片的URL列表。

    :param pdf_url: PDF文件的URL
    :param output_folder: 输出图像的文件夹
    :return: 图片的URL列表
    """
    # 创建输出文件夹
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    # 从URL下载PDF
    response = requests.get(pdf_url)
    pdf_file = os.path.join(output_folder, "temp.pdf")
    with open(pdf_file, "wb") as f:
        f.write(response.content)

    # 使用pdfimages提取图像
    subprocess.run(["pdfimages", "-png", pdf_file, os.path.join(output_folder, file_name)])

    # 删除临时PDF文件
    os.remove(pdf_file)

    # 上传到S3并获取URL
    s3_client = session.client('s3')
    bucket_name = 'bx-saas-bucket'
    region_name = 'ap-northeast-2'  # e.g., 'us-west-1'

    image_urls = []

    for image_file in os.listdir(output_folder):
        if image_file != "temp.pdf":
            image_path = os.path.join(output_folder, image_file)
            s3_client.upload_file(image_path, bucket_name, image_file)  # , ExtraArgs={'ACL': 'public-read'})

            image_url = f"https://{bucket_name}.s3-{region_name}.amazonaws.com/{image_file}"
            image_urls.append(image_url)

            os.remove(image_path)  # Remove the file after upload

    return image_urls