# 导入库
import datetime

import pdfkit
import os
import requests
from bs4 import BeautifulSoup


savepath = "WeChatFiles"
pdfpath = "pdf"

options = {
    "enable-local-file-access": None
}



'''将网页url生成pdf文件'''
def url_to_pdf(url, to_file):
    # 将wkhtmltopdf.exe程序绝对路径传入config对象
    path_wkthmltopdf = r'D:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'
    config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
    # 生成pdf文件，to_file为文件路径
    pdfkit.from_url(url, to_file, configuration=config)
    print('完成')


def html_to_pdf(html, to_file):
    # 将wkhtmltopdf.exe程序绝对路径传入config对象
    path_wkthmltopdf = r'D:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'
    config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
    # 生成pdf文件，to_file为文件路径
    pdfkit.from_file(html, to_file, configuration=config,options=options)
    print('完成')


# 定义一个保存文件的函数
def SaveFile(fpath, fileContent):
    with open(fpath, 'w', encoding='utf-8') as f:
        f.write(fileContent)


# 定义一个下载url网页并保存的方法
def DownLoadHtml(url):
    # 构造请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Connection': 'keep-alive',
        'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'
    }
    # 模拟浏览器发送请求
    response = requests.get(url, headers=headers)
    if response.status_code == 200:  # 返回码为200表示正常返回
        htmltxt = response.text  # 网页正文
        # print(htmltxt)
        return htmltxt
    else:
        return None


# 将图片从远程下载保存到本地
def DownImg(url, savepath):
    # 构造请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Connection': 'keep-alive',
        'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'
    }
    r = requests.get(url, headers=headers, proxies=None)
    with open(savepath, 'wb') as f:
        f.write(r.content)


def ChangeCssSrc(bs):
    linkList = bs.findAll("link")
    for link in linkList:
        href = link.attrs["href"]
        if href.startswith("//"):
            newhref = "http:" + href
            link.attrs["href"] = newhref


def ChangeContent(bs):
    jscontent = bs.find(id="js_content")
    jscontent.attrs["style"] = ""


def ChangeImgSrc(htmltxt,savepath):
    saveimgdir = os.path.join(savepath,"images")
    print("saveimgdir:" + saveimgdir)
    bs = BeautifulSoup(htmltxt, "lxml")
    imgList = bs.findAll("img")
    titleTag = bs.find(id="activity-name")
    filename = datetime.datetime.now().strftime('%Y-%m-%d')
    if titleTag is not None:
        filename = filename + "_" + titleTag.get_text().replace(" ", "").replace("  ","").replace("\n","")
    savepath = os.path.join(savepath,filename + ".html")
    pdfname = os.path.join(pdfpath, filename+".pdf")
    imgindex = 0
    for img in imgList:
        imgindex += 1
        originalURL = ""  # 图片真实url
        if "data-src" in img.attrs:  # 有的<img 标签中可能没有data-src
            originalURL = img.attrs['data-src']
        elif "src" in img.attrs:  # 如果有src则提取出来
            originalURL = img.attrs['src']
        else:
            originalURL = ""
        if originalURL.startswith("//"):  # 如果url以//开头，则需要添加http：
            originalURL = "http:" + originalURL
        if len(originalURL) > 0:
            print("down img", imgindex)
            if "data-type" in img.attrs:
                imgtype = img.attrs["data-type"]
            else:
                imgtype = "png"
            imgname = filename + "_" + str(imgindex) + "." + imgtype
            imgsavepath = os.path.join(saveimgdir, imgname)
            DownImg(originalURL, imgsavepath)
            img.attrs["src"] = "images/" + imgname  # 网页中图片的相对路径
            print(img.attrs["src"])
        else:
            img.attrs["src"] = ""
    ChangeCssSrc(bs)  # 修改link标签
    ChangeContent(bs)  # 修改js_content的style，使正文能正常显示
    [s.extract() for s in bs(["script", "iframe"])]
    htmlstr =  str(bs)  # 将BeautifulSoup对象再转换为字符串，用于保存
    SaveFile(savepath, htmlstr)
    html_to_pdf(savepath, pdfname)
    return


def DownLoadHtmlMain(url,savepath):
    htmltxt = DownLoadHtml(url)
    ChangeImgSrc(htmltxt,savepath)


if __name__ == "__main__":
    url = "https://mp.weixin.qq.com/s/9VMEZlh_X_YqCi87q_Ju9Q"

    savepath = "WeChatFiles"


    DownLoadHtmlMain(url,savepath)
    # SaveFile(savepath, htmlstr)

    # html_to_pdf(savepath, 'test4.pdf')
