# coding=utf-8
# 爬取 菜鸟教程（wwww.runoob.com）

from urllib import request
from pyquery import PyQuery as pq
import os
import gzip
# 配置文件信息
config = {
    'url'           : 'http://blog.csdn.net/rodestillfaraway/article/details/52048175',
    'htmlSavePath'  : './saveHtml/',
    'retime'        : 10,
    'h' :{
        "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0",
        "Accept-Language":"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
        "Accept"   :"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "Accept-Encoding" :"gzip, deflate"
    },
    'htmlHead' : """
        <!DOCTYPE html>
        <html>
        <head>
          <title></title>
          <meta charset="utf-8" />
          <link rel="stylesheet" href="http://c.csdnimg.cn/public/common/toolbar/css/index.css">
        </head>
        <body style="width: 1080px;margin: 10px auto;">
            """,
    'htmlFoot':"""
        </body>
        </html>
            """
}
# 获取url连接池
def getUrlLinks(url):
    global config
    Urls = []           # 连接池
    httpf = request.Request(url,headers=config['h'])
    try:
        reqL = request.urlopen(httpf,timeout=30)
    except :
        return False
    else :
        content = reqL.read()
    finally:
        try:
            reqL.close()
        except :
            return False
    # Query 操作
    # con = content.decode("utf-8")  
    con = gzip.decompress(content).decode("utf-8")  #解压网页数据
    dom = pq(con)                        # 初始化Query操作
    title = dom('title').text()          # 筛选当前页面的标题<title>
    dom = dom('div.article_content')     
    dom = dom('a')
    dom = dom.make_links_absolute(base_url=config['url'])  #将相对链接转换为绝对链接
    for one in dom.items():              # 遍历所有筛选出来的 pyQuery对象获取其href属性
        a_text = one.text()
        # print(a_text)
        if a_text == "Go" : Urls.append(one.attr('href'))
        else : pass
    return Urls,title                    # 返回连接池及标题
# 获取网页内正文并返回html格式文本
def url2con(url):
    global config
    httpf = request.Request(url,headers=config['h'])
    try:
        reqL = request.urlopen(httpf,timeout=30)
        content = reqL.read()
    except :
        return False
    finally: 
        try: 
            reqL.close()
        except :
            return False
    # con = content.decode("utf-8")  
    con = gzip.decompress(content).decode("utf-8")  #解压网页数据
    # Query 操作 筛选内容
    dom = pq(con)
    tit = dom('title').text()
    dom = dom('div.article_content')
    htmlCon = dom.html()
    return htmlCon,tit
# 超时重连函数
def main():
    if not os.path.isdir(config['htmlSavePath']):
        os.makedirs(config['htmlSavePath'])
    getPath = config['url']
    res = getUrlLinks(getPath)
    urls,title = res
    # html 暂存路径
    print('work in 【%s】(url:%s)'%(title,getPath)) # 提示信息
    tempHtmlPath = config['htmlSavePath']+'蓝桥杯算法.html'
    print("saved %s"%tempHtmlPath) # 提示信息
    # 先写头部信息
    with open(tempHtmlPath,'w',encoding="utf-8") as fp: fp.write(config["htmlHead"])
    for oneurl in urls:
        i = urls.index(oneurl)
        con = url2con(oneurl)   
        con = con[0]
        with open(tempHtmlPath,'a',encoding="utf-8") as fp : fp.write(con)
        print('(%d/%d)'%(len(urls),i+1))  # 进度信息
    # 写尾信息
    with open(tempHtmlPath,'a',encoding="utf-8") as fp: fp.write(config["htmlFoot"])
    
if __name__ == '__main__':
    main()