#!/usr/bin/python
#coding=utf-8
import sys
import os
reload(sys)
sys.setdefaultencoding("utf8")
from gevent import monkey
monkey.patch_all()
import requests
import urllib2
import urllib
import gevent
from gevent.pool import Pool
from gevent.queue import Queue
from pyquery import PyQuery as pq
import re
localDir = '/Users/mac/Pictures/zms/'
#localDir = '/Users/mac/Pictures/zgp/'
successNum = 0
pool_num = 100
listUrls = []
errorListUrls = []
abandonListUrls = []
errorUrls = {} #失败url字典,key为url,值为失败次数
retryMax = 3 #最大重试次数
downQueue = Queue()
resultUrls = []
def Schedule(a,b,c):
    '''''
    a:已经下载的数据块
    b:数据块的大小
    c:远程文件的大小
   '''
    per = 100.0 * a * b / c
    if per > 100 :
        per = 100
    print '%.2f%%' % per

def downloadImageFile(imgUrl,index,title,ext):
    i=0
    global successNum
    while True:
        try:
            r = requests.get(imgUrl, stream=True,timeout=10) # here we need to set stream = True parameter
            with open('%s%s.%s%s' % (localDir,index,title,ext), 'wb') as f:
                for chunk in r.iter_content(chunk_size=1024000000):
                    if chunk: # filter out keep-alive new chunks
                        f.write(chunk)
                        f.flush()
                f.close()
            print '%s:%s下载完成,成功(%d)' %(index,title,successNum+1)
            break
        except Exception,e:
            i = i +1
            print '下载%s:%s超时，进行第%s重试' %(index,title,i)
            continue
        '''finally:
            print '%s:%s下载完成' %(index,title)
            break'''

def requestUrl(url):
    i = 0
    while (True):
        try:
            response = requests.get(url,timeout=10)
            response.encoding = 'utf-8'
            if response.status_code == 200:
                #text = urllib2.urlopen(url, timeout=10).read()
                doc = pq(response.text)
                zurl = doc(".notice").next().find('a').attr('href')
                index = re.findall(r'\d+',url)[0] #索引
                try:
                    ext = zurl[zurl.rfind('.'):] #文件扩展
                except Exception,e:
                    print '是文本忽略下载'
                    break
                title = doc('title').text().split('-')[0].strip()
                filePath = '%s%s.%s%s' % (localDir,index,title,ext)
                '''if os.path.isfile(filePath) and os.path.getsize(filePath)>0:
                    #print '文件已存在'
                    break'''
                if zurl :
                    resultUrls.append(zurl)
                    #downloadImageFile(zurl,index,title,ext)
                    print '完成%d个,进度%.2f%%' %(len(resultUrls),len(resultUrls)/22964.00)
                    break
                break
            else:
                print 'url:%s访问失败,失败原因:%d' %(url,response.status_code)
                errorListUrls.append(url)
                break
        except Exception,e:
            if i >2 :
                print '遇到错误的url:%s，丢弃' %(url)
                abandonListUrls.append(url)
                print '目前错误url总数:%d' %(len(abandonListUrls))
                break
            i = i+1
            print 'url:%s访问超时,正在重试第%s' %(url,i)

def crawlerZanMeiShi():
    #22964
    for i in range(1,22964) :
        listUrls.append('http://www.zanmeishi.com/tab/%s.html' %(i))
    gpool = Pool(100)
    for url in listUrls:
        gpool.spawn(requestUrl,url)
    gpool.join()
    errorHanle = open('error.txt','a')
    resultHanle = open('result.txt','a')
    abandonHanle = open('abandonHanle.txt','a')
    abandonHanle.writelines([line+'\n' for line in abandonListUrls])
    errorHanle.writelines([line+'\n' for line in errorListUrls])
    resultHanle.writelines([line+'\n' for line in resultUrls])
    errorHanle.close()
    abandonHanle.close()
    print 'finished!'


def getContentUrl(url):
    i = 0
    while (True):
        try:
            headers = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                       'Accept-Encoding':'gzip, deflate, sdch',
                        'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6',
                        'Connection':'keep-alive',
                        'Host':'www.zhaogepu.com','Referer':'http://www.zhaogepu.com/jianpu/1.html',
                        'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.104 Safari/537.36'
                       }
            res = requests.get(url,headers=headers,timeout=5.0)
            res.encoding = 'utf-8'
            if res.status_code == 200 :
                doc = pq(res.text)
                zurl = doc(".images_list").find('a').attr('href')
                title = doc(".images_list").find('a').attr('title')
                index = re.findall(r'\d+',url)[0] #索引
                try:
                    ext = zurl[zurl.rfind('.'):] #文件扩展
                except Exception,e:
                    print '是文本忽略下载'
                filePath = '%s%s.%s%s' % (localDir,index,title,ext)
                '''if os.path.isfile(filePath):
                    print '%s:%s文件已存在' %(index,title)
                    break'''
                if zurl :
                    downloadImageFile(zurl,index,title,ext)
                    break
            else:
                print 'url:%s访问失败,失败原因:%d' %(url,res.status_code)
                break
        except Exception,e:
            if i >100 :
                print '遇到错误的url，丢弃'
                break
            i = i+1
            print 'url:%s访问超时,正在重试第%s' %(url,i)

def crawlerZhaoGePu():
    for i in range(1,200):
        listUrls.append('http://www.zhaogepu.com/jianpu/%s.html' %(i))
    gpool = Pool(300)
    for url in listUrls:
        gpool.spawn(getContentUrl,url)
    gpool.join()
    print 'finish'

if __name__ == '__main__':
    #crawlerZhaoGePu()
    from timeit import Timer
    t = Timer(stmt="crawlerZanMeiShi()", setup="from __main__ import crawlerZanMeiShi")
    print 'by requests: %s seconds'%t.timeit(number=1)
    #crawlerZanMeiShi()
    #singleDown()
