#coding:utf-8
import gc
import re
import os
import sys
import thread
import threading
import time
import urllib2
import chardet
import threadpool as threadpool
import line_profiler
import cProfile

pool=threadpool.ThreadPool(200)
#目录集合
dirs = []
'''
urllib2工具类
      urllib2.urlopen(req) 请求指定的url 
      read()    读取网页内容
      判断转码格式并转码：
      charset = chardet.detect(html)
      unicode(html,"gbk").encode("utf-8")
      close关闭请求
      gc.collect()  内存回收
'''
def gbk2utf8(url):
    req = urllib2.Request(url)
    res = urllib2.urlopen(req)
    html = res.read()
    charset = chardet.detect(html)
    if charset["encoding"].lower()!="utf-8":
        html = unicode(html,"gbk").encode("utf-8")
    res.close()
    del res
    gc.collect()
    return html

'''
    os工具类：
    os.mkdir(name)   创建目录
    os.close()   关闭流
    os.pardir  当前目录
    os.chdir()  进入指定目录
    os.path.exists(name)  判断目录名称是否存在
'''
def createBoke(params):
    name = params[0]
    url = params[1]
    try:
        # if os.path.exists(name):
        if name in dirs:
            print name+unicode("__名称已存在","utf-8")
        else:
           os.mkdir(name)
           dirs.append(name)
           os.close()
    except:
        # print name+unicode("___创建失败","utf-8")
        pass
'''
性能分析：
    prof = line_profiler.LineProfiler(待检测方法名)
    prof.enable()  # 开始性能分析
    待分析代码
    prof.disable()  # 停止性能分析
    prof.print_stats(sys.stdout)    # 将分析完的结果输出到控制台
'''
prof = line_profiler.LineProfiler(createBoke)
def getBookName(params):
    type_url = params[0]
    pageNo = params[1]
    html = gbk2utf8(type_url)
    bookName = re.compile(r'<ul\sclass="seeWell cf">([\s\S]*?)</ul')
    content = unicode(re.findall(bookName,html)[0],'utf-8')
    name = re.compile(r'title="([\s\S]*?"\shref="[a-zA-z]+://[^\s]*)" ')
    name_url = re.findall(name,content)
    startTime = time.time()
    # 开始性能分析
    prof.enable()
    print '------------------------第'+str(pageNo)+'页开始--------------------------------------------'
    # task_pool = threadpool.ThreadPool(32)
    params = []

    for i in name_url:
        nameAndUrl = i.split('" href="')
        paramlist = []
        paramlist.append(nameAndUrl[0])
        paramlist.append(nameAndUrl[1])
        params.append(paramlist)
        createBoke(paramlist)


    # requests =threadpool.makeRequests(createBoke,params)
    # for req in requests:
    #     task_pool.putRequest(req)
    # task_pool.wait()

    # map(task_pool.putRequest,request_list)
    # task_pool.poll()


    # for i in name_url:
    #     nameAndUrl = i.split('" href="')
    #     # thread.start_new_thread(createBoke,(nameAndUrl[0],nameAndUrl[1],))
    #     createBoke(nameAndUrl[0],nameAndUrl[1])
    endTime = time.time()
    print '------------------------第'+str(pageNo)+'页结束,总耗时:'+str(endTime-startTime)+'------------'
    print '线程名称：'+threading.current_thread().getName()
    prof.disable()  # 停止性能分析
    prof.print_stats(sys.stdout)

#访问分类首页url获取首页的总页码，创建所有的页码url，在url中获取书名及链接地址

'''
多线程：
线程池使用：
pool=threadpool.ThreadPool(200)  #创建线程池
requests = threadpool.makeRequests(线程执行方法, 方法接收参数)   #创建
for req in requests:
    pool.putRequest(req)   # 将req放到线程池中
pool.wait()    
'''
def start_type(url):
    # print first_url
    html = gbk2utf8(url)
    pages = re.compile(r'<em\sid="pagestats">1/([0-9]+)</em>')
    page = re.findall(pages,html)
    url = url[0:url.index("_") + 1]
    print "##############################总共" + page[0] + "页########################################"
    params = []
    for pg in range(int(page[0])):
        type_url = url+str(pg+1)+".html"
        # thread.start_new_thread(getBookName,(type_url,))
        paramlist = []
        paramlist.append(type_url)
        paramlist.append(pg+1)
        params.append(paramlist)
        # getBookName(type_url,pg+1)
    requests = threadpool.makeRequests(getBookName, params)
    for req in requests:
        pool.putRequest(req)
    pool.wait()
    # os.mkdir(page[0])
    os.close()
    # 返回当前目录
    os.chdir(os.pardir)


# 获取所有分类，并创建目录，获取分类的url跟名称
def type_page(param):
        url = param[0]
        name = param[1]
        print name
        # 创建目录
        if os.path.exists(name):
           print name
        else:
           os.mkdir(name)
    # finally:
        # 开始访问分类首页
        path = unicode('D:/py/book/','utf-8')+name
        print path
        os.chdir(path)
        start_type(url)

if __name__=="__main__":
    url = "http://www.quanshuwang.com/"
    req = urllib2.Request(url)
    res = urllib2.urlopen(req)
    html = res.read()
    char_type = chardet.detect(html)
    if char_type['encoding'].lower()!="utf-8":
        html = unicode(html, "gbk").encode("utf8")
        li = re.compile(r'<ul\sclass="channel-nav-list">([\s\S]*?)</ul')
        lis = li.findall(html)
        for li in lis:
            a_name = re.findall(r'([a-zA-z]+://[^\s]*)</a',li)
            params = []
            for a in a_name:
                type = unicode(a,"utf-8").split('">')
                paramlist = []
                paramlist.append(type[0])
                paramlist.append(type[1])
                params.append(paramlist)
                # type_page(type[0],type[1])
                # thread.start_new_thread(type_page,(type[0],type[1],))

                type_page(paramlist)
            # requests = threadpool.makeRequests(type_page, params)
            # for req in requests:
            #     pool.putRequest(req)
            # pool.wait()



    # cProfile.run("type_page()", filename="book.out")



