import requests
import os
import sys
from bs4 import BeautifulSoup

def fetch_book_metadata(book_id):
    url = f'https://www.gutenberg.org/ebooks/{book_id}'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'}

    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()  # 检查请求是否成功

        soup = BeautifulSoup(response.text, 'html.parser')

        # 提取元数据
        title = ""
        title_dom = soup.find('h1', itemprop='name')
        if title_dom is not None:
            title = title_dom.text.strip()
        author = ""
        author_dom = soup.find('a', typeof='pgterms:agent',itemprop="creator")
        if author_dom is not None:
            author = author_dom.text.strip()
        cover = ""
        cover_dom = soup.find('img', class_='cover-art')
        if cover_dom is not None:
            cover = cover_dom.get('src')
        trs = soup.find_all('tr',typeof='pgterms:file')
        urls = []
        for tr in trs:
            urls.append(tr.get('about'))
        # description = soup.find('div', id='book-description').text.strip()

        return {
            'title': title,
            'author': author,
            'cover': cover,
            'urls':urls
            # 'Description': description
        }
    except Exception as e:
        print(f'Error fetching data: {e}')
        return None

def download_file(url, local_filename=None):
    """
    使用requests下载文件
    :param url: 文件的URL
    :param local_filename: 本地保存的文件名，默认为URL的最后一部分
    :return: 本地文件的路径
    """
    if local_filename is None:
        # 如果未指定本地文件名，则从URL中提取
        local_filename = url.split('/')[-1]

    with requests.get(url, stream=True) as r:
        r.raise_for_status()  # 如果响应状态码不是200，将抛出HTTPError异常
        # 开始下载，以二进制写模式打开文件
        with open(local_filename, 'wb') as f:
            for chunk in r.iter_content(chunk_size=8192):
                # 使用iter_content逐步下载，chunk_size是每次下载的数据块大小
                if chunk:  # filter out keep-alive new chunks
                    f.write(chunk)
    return local_filename


def main(argv):
    if len(argv) < 4:
        print("参数不完整，程序退出")
        sys.exit(1)

    start_id = argv[1]
    end_id = argv[2]
    folder_path = argv[3]
    for book_id in range(int(start_id),int(end_id)+1):
        metadata = fetch_book_metadata(book_id)
        if metadata:
            print(metadata)
        else:
            print('Failed to fetch metadata.')
            # continue

        files_dir = folder_path+str(book_id)
        os.makedirs(files_dir,exist_ok=True)
        urls = metadata['urls']
        local_path = []
        if urls is not None:
            file = ""
            for url in urls:
                fileName = url.rsplit('.',2)
                # print(fileName)
                if "zip" in url:
                    file = download_file(url,files_dir+"/book.zip")
                elif "html" in fileName[1]:
                    file = download_file(url,files_dir+"/"+fileName[1]+fileName[2]+".html")
                elif "epub" in fileName[1]:
                    file = download_file(url,files_dir+"/"+fileName[1]+fileName[2]+".epub")
                elif "kf8" in fileName[1] or "kindle" in fileName[1]:
                    file = download_file(url,files_dir+"/"+fileName[1]+fileName[2]+".mobi")
                elif "txt" in fileName[1]:
                    file = download_file(url,files_dir+"/"+fileName[1]+fileName[2]+".txt")
                    with open(file,'r',encoding='utf-8') as f:
                        metadata['content'] = f.read(100)
                local_path.append(file)
                print(file+" load success.")
        metadata['files'] = local_path
        if metadata['cover'] is not None:
            cover = download_file(metadata['cover'],files_dir+"/cover."+metadata['cover'].rsplit('.',1)[1])
            metadata['cover'] = cover
            print("cover load success.")

        metadata['source'] = f'https://www.gutenberg.org/ebooks/{book_id}'
        print(metadata)
        # 发送请求，推送数据
        headers = {'Content-Type': 'application/json'}
        response = requests.post('http://localhost:9088/book/add', json=metadata,headers=headers)
        if response.status_code == 200:
            print("数据推送成功")
            print(response.json())
        else :
            print("数据推送失败")
            print(response.json())



if __name__ == "__main__":
    main(sys.argv)