#! /usr/bin/env python
# -*- coding=utf-8 -*-

import urllib2
from bs4 import BeautifulSoup
import urllib
import Queue
import threading
import re
import os
import sys
import json
#用到的常量
rootUrl = "http://www.dmzx.com/manhua/44/"


custom_header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) '
                              'AppleWebKit/537.11 (KHTML, like Gecko)'
                              ' Chrome/23.0.1271.64 Safari/537.11',
'Accept':'text/html;q=0.9,*/*;q=0.8',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding':'gzip',
'Connection':'close',
'Referer':None #注意如果依然不能抓取的话，这里可以设置抓取网站的host
}
req_timeout = 5



#模型

class MyModel():
    pass
    #不需要转变成dict的属性，可以append添加你自己需要的
    url = ""
    name = ""
    noNeed = ['__doc__', '__module__', 'objToDict','noNeed']
    def objToDict(self):

        attrList = dir(self)

        for n in self.noNeed:
           attrList.remove(n)

        dic = {}
        seqs = tuple, list, set, frozenset

        def dictToDict(dic):

            tmpDic = {}
            for key, value in dic.items():
                if isinstance(value,MyModel):
                    tmpDic[key] = value.objToDict()
                elif isinstance(value,seqs):
                    tmpDic[key] = listToList(value)
                elif isinstance(value,dict):
                    tmpDic[key] = dictToDict(value)
                else:
                    tmpDic[key] = value
            return tmpDic
        def listToList(list):

            tmpList = []
            for i in list:
                if isinstance(i,MyModel):
                    tmpList.append(i.objToDict())
                elif isinstance(i,seqs):

                    tmpList.append(listToList(i))
                elif isinstance(i,dict):
                    tmpList.append(dictToDict(i))
                else:
                    tmpList.append(i)
            return tmpList
        for name in  attrList:

            attr = getattr(self,name)

            if isinstance(attr,MyModel):
                dic[name] = attr.objToDict()
            elif isinstance(attr,seqs):
                dic[name] = listToList(attr)
            elif isinstance(attr,dict):
                dic[name] = dictToDict(attr)
            else:
                dic[name] = attr

        return dic
#线程池
class ThreadPoll(object):
    def __init__(self,thead_num = 10):
        self.thead_num = thead_num
        self.workQueue = Queue.Queue()   # 请求队列
        self.resultQueue = Queue.Queue()  # 输出结果的队列
        self.workers = []
        self.saved_urls_lock = threading.Lock()
        self.initWorkThreads(thead_num)

    def initWorkThreads(self, thread_num):
        for i in range(thread_num):
            worker = WorkThread(self.workQueue, self.resultQueue)  # 创建工作线程
            self.workers.append(worker)
    #开始
    def start(self):
        for w in self.workers:
            w.start()

    #等待完成
    def waitForComplete(self):
        while len(self.workers):
            worker = self.workers.pop()
            worker.join()
            if worker.isAlive() and not self.workQueue.empty():
                self.workers.append(worker)
    # 向工作队列中加入请求
    def addJod(self,callable,*args,**kwds):
        self.workQueue.put((callable,args,kwds))

    def getResult(self,*args,**kwds):
        return self.resultQueue.get(*args, **kwds)
#线程任务
class WorkThread(threading.Thread):
    def __init__(self, workQueue, resultQueue, **kwds):
        threading.Thread.__init__(self, **kwds)
        self.setDaemon(True)
        self.workQueue = workQueue
        self.resultQueue = resultQueue

    def run(self):
        while True:
            try:
                callable,args,kwds = self.workQueue.get(False)
                res = callable(*args,**kwds)
                self.resultQueue.put(res)
            except Queue.Empty:
                break

#解析主页
def parseRoot():
    req = urllib2.Request(rootUrl, None, custom_header)
    resp = urllib2.urlopen(req, None, req_timeout)
    html = resp.read()

    soup = BeautifulSoup(html, "html.parser")
    root_div = soup.find("div", class_="subsrbelist center")
    all_li = root_div.find_all("li")

    model_list = []
    for i in all_li:
        a = i.find('a')
        m = MyModel()
        m.url = a.get('href')
        m.name = a['title']
        model_list.append(m)
    return model_list

#下载图片
def downloadImage(url,filePath,number = 1):
    imageDir = os.path.dirname(filePath)
    if not os.path.exists(imageDir):
        os.makedirs(imageDir)
    imageUrl = url
    reload(sys)  # 2
    sys.setdefaultencoding('utf-8')
    try:
        urllib.urlretrieve(imageUrl, filePath)
    except Exception, e:
        # print '下载%s出错'% imageUrl,e
        if number < 6:
            downloadImage(imageUrl, filePath, number + 1)
        else:
            logFile = os.path.join(imageDir,"log.txt")
            try:
                output = open(logFile, "a")
                output.write(imageUrl + "\n" + filePath + "\n")
                print imageUrl, filePath
            except Exception,e:
                print e,logFile

            return

#解析章节
def parsePage(m):
    req = urllib2.Request(m, None, custom_header)
    resp = urllib2.urlopen(req, None, req_timeout)
    html = resp.read()
    soup = BeautifulSoup(html, "html.parser")
    js = soup.find("script").get_text()
    pattern = re.compile(
        'picHosts = "(.*?)"',
        re.S)
    picHosts = re.findall(pattern, js)[0]
    pattern = re.compile(
        'picAy\[\d*\]="(.*?)"',
        re.S)
    items = re.findall(pattern, js)
    models = []
    inx = 0
    for item in items:
        m1 = MyModel()

        m1.name = str(inx)
        inx = inx + 1

        m1.url = picHosts + item
        models.append(m1)
    return models


#程序入口
def main():
    dirImags = "~/Desktop/tmp"

    model_list = parseRoot()

    # wpoll = ThreadPoll(thead_num=10)
    data_list = []
    for i in model_list:
        dic_i = i.objToDict()
        page_list = parsePage(i.url)
        arr_i = []
        for j in page_list:
            dic_j = j.objToDict()
            arr_i.append(dic_j)
            # imgUrl = j.url
            # imgPath = os.path.join(dirImags,i.name,j.name + ".jpg")
            # downloadImage(imgUrl,imgPath)
        dic_i["data"] = arr_i
        dic_i["count"] = len(arr_i)
        data_list.append(dic_i)


    json_str = json.dumps(data_list)
    reload(sys)  # 2
    sys.setdefaultencoding('utf-8')
    logFile = os.path.join(dirImags, "log.txt")
    try:
        output = open(logFile, "a")
        output.write(json_str)
    except Exception, e:
        print e, logFile

    return
    # wpoll.start()
    # wpoll.waitForComplete()
if __name__ == "__main__":
    main()