# coding=utf-8
from urllib import request
from pyquery import PyQuery as pq
import urllib
import re
import os
config = {
    'url' : 'http://www.liaoxuefeng.com',
    'htmlSavePath' : './save/'
}
# 
def getUrlLinks(url):
    Urls = []
    httpf = request.Request(url)
    try:
        reqL = request.urlopen(httpf,timeout=30)
    except :
        return False
    else :
        content = reqL.read()
    finally:
        reqL.close()
    # Query 操作
    dom = pq(content.decode('utf-8'))
    dom = dom('div.x-sidebar-left')
    dom = dom('a')
    for one in dom.items():
        Urls.append(config['url']+one.attr('href'))
    title = dom('title').text()
    return Urls,title   
# 
def save2Html(url,savePath):
    htmlT = """
<!DOCTYPE html>
<html>
<head>
  <title></title>
  <meta charset="utf-8" />
  <style type="text/css">body{width: 960px;margin: 10px auto;}</style>
</head>
<body>
  %s
</body>
</html>
    """
    httpf = request.Request(url)
    try:
        reqL = request.urlopen(httpf,timeout=30)
    except :
        return False
    else :
        content = reqL.read()
    finally:
        reqL.close()
    # Query 操作
    dom = pq(content.decode('utf-8'))
    tit = dom('title').text()
    dom = dom('div.x-content')
    dom = dom.remove('video')
    dom = dom.remove('div.x-display-if-not-signin')
    dom = dom.remove('div.x-wiki-info')
    dom = dom.make_links_absolute(base_url=config['url'])
    con = dom.html()
    htmlC = htmlT % con
    tit = re.split('-',tit)[0][:-1]
    tit = tit.replace('/',' ')
    path = savePath +tit+'.html'
    with open(path,'w',encoding="utf-8") as fp:
        fp.write(htmlC)

    return path
# 把所有html文件转换成pdf文件
def save_pdf(htmls,file_name):
    options = {
        'page-size': 'Letter',
        'margin-top': '0.75in',
        'margin-right': '0.75in',
        'margin-bottom': '0.75in',
        'margin-left': '0.75in',
        'encoding': "UTF-8",
        # 'custom-header': [
        #     ('Accept-Encoding', 'gzip')
        # ],
        'cookie': [
            ('cookie-name1', 'cookie-value1'),
            ('cookie-name2', 'cookie-value2'),
        ],
        'outline-depth': 10,
    }
    pdfkit.from_file(htmls, file_name, options=options)
    return True
def main():
    urls = getUrlLinks(config['url']+'/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000')[0]
    if not os.path.isdir(config['htmlSavePath']):
         os.makedirs(config['htmlSavePath'])
    hPath = []
    for oneurl in urls:
        head = save2Html(oneurl,config['htmlSavePath'])
        with open('./menu.txt','a') as fp:
            li =  str(urls.index(oneurl)+1)+' - '+head+'\n'
            fp.write(li)
        hPath.append(head)
        print(li)
    for oneHtml in hPath: 
        save_pdf(oneHtml,'./pdf/'+ str(hPath.index(oneHtml)+1)+'.pdf')

main()