# coding=utf-8
import os
import re
import time
import pdfkit
import requests
from bs4 import BeautifulSoup
import sys

# 保存后文件模板
html_template = """ 
<!DOCTYPE html> 
<html lang="en"> 
<head> 
    <meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="http://192.168.138.19:8090/css/main.css"> 
</head> 
<body> 
{content} 
</body> 
</html> 

"""
path_wk = r'D:\soft\wkhtmltopdf\bin\wkhtmltopdf.exe'  # 安装位置
config = pdfkit.configuration(wkhtmltopdf=path_wk)

# ----------------------------------------------------------------------


def parse_url_to_html(url, title, name):
    """ 
    解析URL，返回HTML内容 
    :param url:解析的url 
    :param name: 保存的html文件名 
    :return: html 
    """
    try:
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')
        # 正文
        body = soup.find_all(class_="post-body")[0]

        # 标题加入到正文的最前面，居中显示
        center_tag = soup.new_tag("center")

        title_tag = soup.new_tag('h1')

        title_tag.string = title
        center_tag.insert(1, title_tag)
        body.insert(1, center_tag)
        html = str(body)
        html = html_template.format(content=html)

        html = html.encode("utf-8")
        with open(name, 'wb') as f:
            f.write(html)
        return name

    except Exception as e:
        print(e)
        # print("解析错误！")

# ----------------------------------------------------------------------


def save_pdf(htmls, file_name):
    """ 
    把所有html文件保存到pdf文件 
    :param htmls:  html文件列表 
    :param file_name: pdf文件名 
    :return: 
    """
    options = {
        'page-size': 'Letter',
        'margin-top': '0.75in',
        'margin-right': '0.75in',
        'margin-bottom': '0.75in',
        'margin-left': '0.75in',
        'encoding': "UTF-8",
        'custom-header': [
            ('Accept-Encoding', 'gzip')
        ],
        'cookie': [
            ('cookie-name1', 'cookie-value1'),
            ('cookie-name2', 'cookie-value2'),
        ],
        'outline-depth': 10,
    }
    pdfkit.from_file(htmls, file_name, options=options, configuration=config)

# ----------------------------------------------------------------------


def main(url):
    start = time.time()
    response = requests.get(url)
    soup = BeautifulSoup(response.content, 'html.parser')
    # 标题
    temptitle = str(soup.title.string)
    title = temptitle.replace(r"| TSC'WIKI", '')  # 去除标题里的特殊字符，避免创建文件失败
    filename = title+".html"
    pdf_filename = title+".pdf"
    parse_url_to_html(url, title, filename)
    print(filename+u" 转换完成")

    save_pdf(filename, pdf_filename)
    print(u"输出PDF成功！")

    if os.path.exists(filename):
        os.remove(filename)
        print(u"删除临时文件 "+filename)

    total_time = time.time() - start
    print(u"总共耗时：%f 秒" % total_time)

# ----------------------------------------------------------------------


def changeDir(dir_name):
    """
    目录切换
    """
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)

    os.chdir(dir_name)


# ----------------------------------------------------------------------
if __name__ == '__main__':
    # 存放文件的路径
    dir_name = 'c:\\12'
    changeDir(dir_name)
    main(r"http://192.168.138.19:8090/2020/02/22/337%E5%8F%B7%E6%96%87%E6%A3%80%E6%B5%8B%E6%96%B9%E6%B3%95/")
