# coding=utf-8
# 爬取《廖雪峰官方教程》 

from urllib import request
from pyquery import PyQuery as pq
import os

# 配置文件信息
config = {
    'url'           : 'http://www.liaoxuefeng.com',
    'htmlSavePath'  : './saveHtml/',
    'retime'        : 10,
    'kinds': {
        'git'       : '/wiki/0013739516305929606dd18361248578c67b8067c8c017b000',
        'Python'    : '/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000',
        'JavaScript': '/wiki/001434446689867b27157e896e74d51a89c25cc8b43bdb3000'
    }
}
# 获取url连接池
def getUrlLinks(url):
    Urls = []           # 连接池
    httpf = request.Request(url)
    try:
        reqL = request.urlopen(httpf,timeout=30)
    except :
        return False
    else :
        content = reqL.read()
    finally:
        try:
            reqL.close()
        except :
            return False
    # Query 操作
    # with open('./dat.html','w',encoding='utf-8') as f: f.write(content.decode('utf-8'))
    dom = pq(content.decode('utf-8'))    # 初始化Query操作
    title = dom('title').text()			 # 筛选当前页面的标题<title>
    dom = dom('div.x-sidebar-left')		 
    dom = dom('a')  
    for one in dom.items():              # 遍历所有筛选出来的 pyQuery对象获取其href属性
        Urls.append(config['url']+one.attr('href'))
    return Urls,title                    # 返回连接池及标题


def url2con(url):
    httpf = request.Request(url)
    try:
        reqL = request.urlopen(httpf,timeout=30)
        content = reqL.read()
    except :
        return False
    finally: 
        try: 
            reqL.close()
        except :
            return False
    # Query 操作 筛选内容
    dom = pq(content.decode('utf-8'))
    tit = dom('title').text()
    dom = dom('div.x-content')
    # 删除多余的元素（节点）
    dom = dom.remove('video')  
    dom = dom.remove('div.x-display-if-not-signin')
    dom = dom.remove('div.x-wiki-info')
    dom = dom.remove('div#x-comment-area')
    dom = dom.remove('ul#x-comment-list')
    dom = dom.make_links_absolute(base_url=config['url'])  #将相对链接转换为绝对链接
    con = dom.html()
    con = con.replace('<h3>Comments</h3>','')
    con = con.replace('<h3>Make a Comment</h3>','')
    return con  
def main():
    htmlHead = """
<!DOCTYPE html>
<html>
<head>
  <title></title>
  <meta charset="utf-8" />
  <style type="text/css">body{width: 1080px;margin: 10px auto;}</style>
</head>
<body>
    """
    htmlFoot = """
</body>
</html>
    """
    # urls = getUrlLinks('http://www.liaoxuefeng.com/wiki/0013739516305929606dd18361248578c67b8067c8c017b000')[0]      # git 教程
    # urls = getUrlLinks('http://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000')[0]      # Python 教程
    # urls = getUrlLinks('http://www.liaoxuefeng.com/wiki/001434446689867b27157e896e74d51a89c25cc8b43bdb3000')[0]      # JavaScript 教程
    if not os.path.isdir(config['htmlSavePath']):
        os.makedirs(config['htmlSavePath'])
    for kind in config['kinds']:
        getPath = config['url']+config['kinds'][kind]
        print("worke in [%s] url(%s)"%(kind,getPath))
        urls = getUrlLinks(getPath)[0]
        # html 暂存路径
        tempHtmlPath = config['htmlSavePath']+'liaoxuefeng['+kind+'].html'
        # 先写头部信息
        with open(tempHtmlPath,'w',encoding="utf-8") as fp: fp.write(htmlHead)
        for oneurl in urls:
            i = urls.index(oneurl)
            # 超时重连
            con = False
            retime = config['retime']
            while retime and(con ==False) :
                con = url2con(oneurl)
                retime -= 1
            with open(tempHtmlPath,'a',encoding="utf-8") as fp:
                fp.write(con)
            print('(%d/%d)'%(len(urls),i+1))
        # 写尾信息
        with open(tempHtmlPath,'a',encoding="utf-8") as fp: fp.write(htmlFoot)
        
if __name__ == '__main__':
	main()