#! /usr/bin/env python
# -*- coding=utf-8 -*-
import urllib2
from bs4 import BeautifulSoup
import urllib
import Queue
import threading
import os
import sys
import json
import requests
from requests.adapters import HTTPAdapter
#用到的常量

projectDir = os.path.dirname(os.path.abspath(__file__))
req_timeout = 80
#开始的页数
fromPage = 1
#到达的页数
toPage = 100



#模型

class MyModel():
    pass
    #不需要转变成dict的属性，可以append添加你自己需要的
    url = ""
    name = ""
    noNeed = ['__doc__', '__module__', 'objToDict','noNeed']
    def objToDict(self):

        attrList = dir(self)

        for n in self.noNeed:
           attrList.remove(n)

        dic = {}
        seqs = tuple, list, set, frozenset

        def dictToDict(dic):

            tmpDic = {}
            for key, value in dic.items():
                if isinstance(value,MyModel):
                    tmpDic[key] = value.objToDict()
                elif isinstance(value,seqs):
                    tmpDic[key] = listToList(value)
                elif isinstance(value,dict):
                    tmpDic[key] = dictToDict(value)
                else:
                    tmpDic[key] = value
            return tmpDic
        def listToList(list):

            tmpList = []
            for i in list:
                if isinstance(i,MyModel):
                    tmpList.append(i.objToDict())
                elif isinstance(i,seqs):

                    tmpList.append(listToList(i))
                elif isinstance(i,dict):
                    tmpList.append(dictToDict(i))
                else:
                    tmpList.append(i)
            return tmpList
        for name in  attrList:

            attr = getattr(self,name)

            if isinstance(attr,MyModel):
                dic[name] = attr.objToDict()
            elif isinstance(attr,seqs):
                dic[name] = listToList(attr)
            elif isinstance(attr,dict):
                dic[name] = dictToDict(attr)
            else:
                dic[name] = attr

        return dic
#线程池
class ThreadPoll(object):
    def __init__(self,thead_num = 10):
        self.thead_num = thead_num
        self.workQueue = Queue.Queue()   # 请求队列
        self.resultQueue = Queue.Queue()  # 输出结果的队列
        self.workers = []
        self.saved_urls_lock = threading.Lock()
        self.initWorkThreads(thead_num)

    def initWorkThreads(self, thread_num):
        for i in range(thread_num):
            worker = WorkThread(self.workQueue, self.resultQueue)  # 创建工作线程
            self.workers.append(worker)
    #开始
    def start(self):
        for w in self.workers:
            w.start()

    #等待完成
    def waitForComplete(self):
        while len(self.workers):
            worker = self.workers.pop()
            worker.join()
            if worker.isAlive() and not self.workQueue.empty():
                self.workers.append(worker)
    # 向工作队列中加入请求
    def addJod(self,callable,*args,**kwds):
        self.workQueue.put((callable,args,kwds))

    def getResult(self,*args,**kwds):
        return self.resultQueue.get(*args, **kwds)
#线程任务
class WorkThread(threading.Thread):
    def __init__(self, workQueue, resultQueue, **kwds):
        threading.Thread.__init__(self, **kwds)
        self.setDaemon(True)
        self.workQueue = workQueue
        self.resultQueue = resultQueue

    def run(self):
        while True:
            try:
                callable,args,kwds = self.workQueue.get(False)
                res = callable(*args,**kwds)
                self.resultQueue.put(res)
            except Queue.Empty:
                break

def PrintMessage(msg):
        print >> sys.stderr, '\r',
        print >> sys.stderr, msg,

def DownloadFile(url, tofile, CallBackFunction=PrintMessage):
    f = urllib2.urlopen(url)
    outf = open(tofile, 'wb')
    c = 0
    CallBackFunction('Download %s to %s' % (url, tofile))
    while True:
        s = f.read(1024 * 32)
        if len(s) == 0:
            break
        outf.write(s)
        c += len(s)
        CallBackFunction('Download %d' % (c))
#解析主页
def parseRoot(url_str):
    try:
        s = requests.Session()
        s.mount('http://', HTTPAdapter(max_retries=3))
        s.mount('https://', HTTPAdapter(max_retries=3))
        req = s.get(url_str,timeout = req_timeout)
        html = req.text
        soup = BeautifulSoup(html, "html.parser")
        root_div = soup.find("div", id="list_videos_latest_videos_list_items")
        all_li = root_div.find_all("div",class_='item ')

        model_list = []
        for i in all_li:
            a = i.a

            m = {}
            m['title'] = a['title']
            m['url'] = a.get('href')
            m['img'] = a.find('img',class_='lazy-load')['data-original']
            model_list.append(m)
        return model_list
    except Exception,e:
        print '解析列表出错',e
#下载
def download(m,number = 1):

    name = m['title']
    myDir = os.path.join(projectDir,'download',name)
    if not os.path.exists(myDir):
        os.makedirs(myDir)
    imgFile = os.path.join(myDir,'img.jpg')
    mp4File = os.path.join(myDir,name + '.mp4')
    logFile = os.path.join(myDir, "json.text")
    imageUrl = m['img']
    mp4Url = m['download']
    reload(sys)  # 2
    sys.setdefaultencoding('utf-8')
    try:
        urllib.urlretrieve(imageUrl, imgFile)
    except Exception, e:
        print e
        return

    try:
        s = requests.Session()
        s.mount('http://', HTTPAdapter(max_retries=3))
        s.mount('https://', HTTPAdapter(max_retries=3))
        r = s.get(mp4Url, timeout=req_timeout)
        f = open(mp4File, "wb")
        c = 0
        for chunk in r.iter_content(chunk_size=512):
            if chunk:
                f.write(chunk)
                c += len(chunk)
                print name,c
        try:
            json_str = json.dumps(m)
            output = open(logFile, "a")
            output.write(json_str)
        except Exception,e:
            e = ''

    except Exception,e:
        if number < 6:
            download(m, number + 1)
        else:
            print '下载%s出错'% name,e

#解析章节
def parsePage(m):
    s = requests.Session()
    s.mount('http://', HTTPAdapter(max_retries=3))
    s.mount('https://', HTTPAdapter(max_retries=3))
    req = s.get(m['url'], timeout=req_timeout)
    html = req.text
    soup = BeautifulSoup(html, "html.parser")
    root_div = soup.find("div", id="tab_video_info")
    infodiv = root_div.find("div", class_="info")
    div_list = infodiv.find_all('div')

    m['download'] = div_list[4].a.get('href')
    infodic = {}
    infoStr = ''
    for i in div_list[0].find_all('span'):
        infoStr = infoStr + '  ' + i.get_text()
    infodic['信息'] = infoStr
    infodic['描述'] = div_list[1].get_text()
    label = ''
    for i in div_list[2].find_all('a'):
        label  = label + '  ' + i.get_text()
    infodic['描述'] = label
    m['info'] = infodic
    download(m)
#程序入口
def main():
    rootUrl = 'http://www.cao0009.com/latest-updates/'

    for i in range(fromPage,toPage,1):
        pageUrl = os.path.join(rootUrl,str(i),'')
        model_list = parseRoot(pageUrl)
        wpoll = ThreadPoll(thead_num=5)
        for j in model_list:
            wpoll.addJod(parsePage,j)
        wpoll.start()
        wpoll.waitForComplete()
        print '完成',pageUrl
if __name__ == "__main__":
    main()