# -*- coding: utf-8 -*-

import re, requests, time, json, os
from pathlib import Path
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import landscape
from PIL import Image



"""
爬取book118
url: 
    https://max.book118.com/html/2019/0215/6101202153002010.shtm 6101202153002010这个是文档标识：aid
referer_url: 
    book118文档预览地址，其中v可有可无，aid来自url
    https://max.book118.com/index.php?g=Home&m=NewView&a=index&aid=6101202153002010&v=20200730
png_url: 
    book118会将文档转成png图片的形式展示，filetype、callback可有可无，每次返回6张png即page=1、7、13、...，其余参数来自referer_url页面的script标签
    https://openapi.book118.com/getPreview.html?&project_id=1&aid=206693774&view_token=SRzAztN42jxXvHBnNDa@EJvpKxREJZqO&aid_encode=6101202153002010&page=14&filetype=pdf&callback=jQuery17107533416035535168_1596618148237&_=1596618151387
"""


def crawl_data():
    # 爬取地址
    url = input('input url: ')
    # regxe = re.compile(r".+?/(\d+?)\.shtm")
    regxe = re.compile(r"^http.?:.+?(\w+?)\.shtm$")
    aid = regxe.findall(url)
    if len(aid) == 0:
        print("url error, please check url!!")
        return
    
    referer_url = "https://max.book118.com/index.php?g=Home&m=NewView&a=index&aid={}".format(aid[0])

    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36'}
    rsp = requests.get(referer_url, headers = headers)

    # 解析预览页面
    # 通过正则表达，直接从rsp中抓取数据
    params = {
        "project_id": re.compile(r".*?project_id:.?(\d+?)", re.DOTALL).findall(rsp.text)[0],
        "aid": re.compile(r".*?aid:.+?(\d+)", re.DOTALL).findall(rsp.text)[0], # aid与之前的可能不一样
        "view_token": re.compile(r".*?view_token:.+?'(.+?)'", re.DOTALL).findall(rsp.text)[0],
        "aid_encode": re.compile(r".*?aid_encode:.+?'(\d+?)'", re.DOTALL).findall(rsp.text)[0],# 和之前的aid相同，文档标识
    }
    preview_page = int(re.compile(r".*?preview:.+?'(\d+?)'", re.DOTALL).findall(rsp.text)[0])
    
    # soup = BeautifulSoup(rsp.text, "lxml")
    # scripts = soup.find_all("script")
    # for item in soup.find_all("script"):
    #     print(item)
    #     print("*******************************")
    #     print(script.get_text())

    png_url = "https://openapi.book118.com/getPreview.html"

    page = 1
    json_regex = re.compile("^.*?\"data\":({.*?}),.*?;$")

    # 下载
    path = "download/{}/mgr".format(aid[0])
    if not os.path.exists(path):
        os.makedirs(path)

    while page <= preview_page:

        params["page"] = page
        time.sleep(2)
        png = requests.get(png_url, headers = headers, params = params)

        for k, v in json.loads(json_regex.findall(png.text)[0]).items():

            pdf_url = "https:{}".format(v)
            
            time.sleep(1)
            with open("{}/{}.png".format(path, k), "wb") as f:
                f.write(requests.get(pdf_url, headers = headers).content)

        page += 6 # page_size


def generate_pdf(aid):
    """使用reportlab库，将图片转成pdf
    :param: aid book118文件标志，爬取png的父级文件夹
    """

    # python 目录：使用内建库：pathlib
    # Path("file_path"), 创建path对象，path可能存在，可能不存在
    path = Path("download/{}".format(aid))
    if not Path.exists(path):
        print("先爬数据")
        return


    # 待处理的数据目录
    img_path = path.joinpath("mgr")

    c = canvas.Canvas(str(path.joinpath("{}.pdf".format(aid))))
    for img in sorted(img_path.iterdir(), key = lambda img : int(img.stem)):
        i = Image.open(img)
        c.setPageSize(landscape((i.width, i.height)))
        c.drawImage(i.filename, 0, 0, i.width, i.height)

        c.showPage()
    
    c.save()


if __name__ == "__main__":
    
    # crawl_data()
    generate_pdf(6101202153002010)


# selenium操作浏览器
from selenium.webdriver import Chrome, chrome, Firefox, firefox, Edge

def chrome_init():
    options = chrome.options.Options()
    options.headless = True
    return Chrome(options = options)

def firefox_init():
    options = firefox.options.Options()
    options.headless = True
    return Firefox(options = options)

def edge_init():
    return Edge()

