#!/usr/bin/python
#coding=utf-8
import sys
import os
reload(sys)
sys.setdefaultencoding("utf8")
from gevent import monkey
monkey.patch_all()
import requests
import urllib2
import urllib
import gevent
from gevent.pool import Pool
from gevent.queue import Queue
from pyquery import PyQuery as pq
import re
import socket
from threading import Thread

socket.setdefaulttimeout(10)
#localDir = '/Users/mac/Pictures/zms/'
localDir = '/Users/mac/Pictures/zgp/'
pool_num = 100
listUrls = []
errorUrls = {} #失败url字典,key为url,值为失败次数
retryMax = 3 #最大重试次数
downQueue = Queue()

def Schedule(a,b,c):
    '''''
    a:已经下载的数据块
    b:数据块的大小
    c:远程文件的大小
   '''
    per = 100.0 * a * b / c
    if per > 100 :
        per = 100
    print '%.2f%%' % per

def downloadImageFile(imgUrl,index,title,ext):
    i=0
    while True:
        try:
            r = requests.get(imgUrl, stream=True,timeout=20) # here we need to set stream = True parameter
            with open('%s%s.%s%s' % (localDir,index,title,ext), 'wb') as f:
                for chunk in r.iter_content(chunk_size=1024000000):
                    if chunk: # filter out keep-alive new chunks
                        f.write(chunk)
                        f.flush()
                f.close()
        except Exception,e:
            i = i +1
            print '下载%s:%s超时，进行第%s重试' %(index,title)
            continue
        finally:
            print '%s:%s下载完成' %(index,title)
            break


def requestUrl(url):
    i = 0
    while (True):
        try:
            response = requests.get(url,timeout=10.0)
            response.encoding = 'utf-8'
            if response.status_code == 200:
                #text = urllib2.urlopen(url, timeout=10).read()
                doc = pq(response.text)
                zurl = doc(".notice").next().find('a').attr('href')
                index = re.findall(r'\d+',url)[0] #索引
                try:
                    ext = zurl[zurl.rfind('.'):] #文件扩展
                except Exception,e:
                    print '是文本忽略下载'
                    break
                title = doc('title').text().split('-')[0].strip()
                filePath = '%s%s.%s%s' % (localDir,index,title,ext)
                if os.path.isfile(filePath):
                    print '文件已存在'
                    break
                if zurl :
                    #downloadImageFile(zurl,index,title,ext)
                    downQueue.put_nowait(zurl,index,title,ext)
                break
            else:
                print 'url:%s访问失败,失败原因:%d' %(url,response.status_code)
                break
        except Exception,e:
            i = i+1
            print 'url:%s访问超时,正在重试第%s' %(url,i)

def crawlerZanMeiShi():
    #22964
    for i in range(1,22964) :
        listUrls.append('http://www.zanmeishi.com/tab/%s.html' %(i))
    gpool = Pool(pool_num)
    for url in listUrls:
        gpool.spawn(requestUrl,url)
    gpool.join()
    print 'finished!'

if __name__ == '__main__':
    crawlerZanMeiShi()