from weasyprint import HTML
import sys,os,io
import requests
from PIL import Image
from io import BytesIO
from bs4 import BeautifulSoup
from urllib.parse import urljoin

def get_image_width(url):
    response = requests.get(url)
    img = Image.open(BytesIO(response.content))
    return img.size

def get_html_snippet(url, start_id, end_id):
    # 获取整个网页的HTML内容
    response = requests.get(url)
    if url == 'https://www.gutenberg.org/cache/epub/10/pg10-images.html':
        soup = BeautifulSoup(open("/home/lighthouse/pg10-images.html","r").read().strip(), 'html.parser')
    else:
        soup = BeautifulSoup(response.text, 'html.parser')

    for img in soup.find_all('img'):
        # 获取src属性的值
        if 'src' in img.attrs:
            img['src'] = urljoin(url, img['src'])
        # 调整图片尺寸
        size = get_image_width(img['src'])
        current_width = size[0]
        current_height = size[1]
        if current_width / current_height < 1.0:
            scale0 = min(1.0, 595.0 / current_width)
            scale1 = min(1.0, 855.0 / current_height)
            scale = min(scale0,scale1)
            new_width = int(current_width * scale)
            new_height = int(current_height * scale)
            img['width'] = new_width
            img['height'] = new_height
            img['style'] = f"max-width: {new_width}px; max-height: {new_height}px; width: auto; height: auto; display: flex; justify-content: center; align-items: center;"
        else: img['style'] = "display: flex; justify-content: center; align-items: center;"
    start_tag = soup.find(id=start_id)
    end_tag = soup.find(id=end_id)

    if start_tag is None:
        raise ValueError(f"Start tag not found in the document.")
    if end_tag is None and url != 'https://www.gutenberg.org/cache/epub/10/pg10-images.html':
        raise ValueError(f"End tag not found in the document.")

    # 提取两个标签之间的内容
    content = start_tag.find_next_siblings(recursive=False)
    while content and content[0] != end_tag:
        temp = content[0].find_next_siblings(recursive=False)
        content.extend(temp)
        content.pop(0)
    # 创建一个新的HTML文档，包含所需的片段
    snippet = soup.new_tag('div')
    for tag in content:
        #if any('gutenberg' in s.lower() for s in tag.stripped_strings): continue
        if tag != end_tag: snippet.append(tag)

    # 创建一个新的HTML文档
    new_html = soup.new_tag('html')
    new_html.append(soup.new_tag('body'))
    new_html.body.append(snippet)

    # 将新的HTML文档转换为字符串
    return str(new_html)

def url_to_pdf(url, output_pdf, start_id='pg-header', end_id='pg-footer'):
    # 从URL加载HTML，并获取指定部分的HTML片段
    html_snippet = get_html_snippet(url, start_id, end_id)

    # 使用WeasyPrint将HTML片段转换为PDF
    pdf = HTML(string=html_snippet).write_pdf()

    # 将PDF写入文件
    with open(output_pdf, 'wb') as f:
        f.write(pdf)

def extract_author_and_title(url):
    print(url)
    # 发送HTTP请求获取网页内容
    response = requests.get(url)
    # 检查请求是否成功
    if response.status_code != 200:
        raise Exception(f"Failed to load page {url}")
    # 解析HTML内容
    soup = BeautifulSoup(response.text, 'html.parser')
    # 寻找作者和标题
    author = '/'
    #author = soup.find('th', text='Author').find_next_sibling('td').text.strip()
    title = soup.find('th', text='Title').find_next_sibling('td').text.strip()
    lang = soup.find('th', text='Language').find_next_sibling('td').text.strip()
    #sub = soup.find_all('th', text='Subject').find_next_sibling('td').text.strip()
    sub = []
    th_elements = soup.find_all('th')
    
    # 遍历这些元素，找到包含 "Language" 的 <th> 元素
    for th in th_elements:
        if 'Subject' in th.get_text(strip=True):
            # 获取相邻的 <td> 元素
            td = th.find_next_sibling('td')
            if td is not None:
                # 添加文本到列表中
                sub.append(td.get_text(strip=True))
        if 'Author' in th.get_text(strip=True):
            td = th.find_next_sibling('td')
            if td is not None: author = td.get_text(strip=True)
    if not title or not lang:
        raise ValueError("Could not find author or title information")
    return author, title, lang, sub

# 使用示例
if __name__ == "__main__":
    if len(sys.argv) == 2:
        nums = [ sys.argv[1] ]
    else: nums = [ int(line.split("_")[1].split(".")[0]) if len(line.strip()) > 2 else None for line in os.popen("ls *.pdf").read().split("\n") ]
    #nums1 = [ int(line.split("\t")[0]) for line in open("popular.txt","r") ]
    #nums = list(set(nums1) - set(nums))
    for num0 in nums:
        if num0 in [100,10]: continue
        num = str(num0)
        url = "https://www.gutenberg.org/ebooks/{0}".format(num)
        author, title,lang,sub = extract_author_and_title(url)
        if lang.lower() not in ['english','英语']: 
            print("skip "+str(num0))
            continue
        with open('output_{0}.txt'.format(num),"w") as f: f.write(f"{author}\n{title}\n{lang}\n%s"%("|".join(sub)))
        url = 'https://www.gutenberg.org/cache/epub/{0}/pg{0}-images.html'.format(num)
        output_pdf_path = 'output_{0}.pdf'.format(num)
        if num == '1795': url_to_pdf(url, output_pdf_path,start_id='id00042')
        else: url_to_pdf(url, output_pdf_path)
