#!/usr/bin/python
#coding=utf-8
import sys
import os
reload(sys)
sys.setdefaultencoding("utf8")
import requests
from pyquery import PyQuery as pq
import re
from timeit import Timer
import multiprocessing

pNum = 100
listUrls = []
listUrls = []
errorListUrls = []
abandonListUrls = []
localDir = '/Users/mac/Pictures/zms/'
downQueue = multiprocessing.Queue()
resultUrls = []
#def download(url):


def requestUrl(url):
    i = 0
    while (True):
        try:
            response = requests.get(url,timeout=10)
            response.encoding = 'utf-8'
            if response.status_code == 200:
                #text = urllib2.urlopen(url, timeout=10).read()
                doc = pq(response.text)
                zurl = doc(".notice").next().find('a').attr('href')
                index = re.findall(r'\d+',url)[0] #索引
                try:
                    ext = zurl[zurl.rfind('.'):] #文件扩展
                except Exception,e:
                    print '是文本忽略下载'
                    break
                title = doc('title').text().split('-')[0].strip()
                filePath = '%s%s.%s%s' % (localDir,index,title,ext)
                '''if os.path.isfile(filePath) and os.path.getsize(filePath)>0:
                    #print '文件已存在'
                    break'''
                if zurl :
                    resultUrls.append(zurl)
                    #downloadImageFile(zurl,index,title,ext)
                    print '完成%d个,进度%.2f%%' %(len(resultUrls),len(resultUrls)/22964.00)
                    break
                break
            else:
                print 'url:%s访问失败,失败原因:%d' %(url,response.status_code)
                errorListUrls.append(url)
                break
        except Exception,e:
            if i >2 :
                print '遇到错误的url:%s，丢弃' %(url)
                abandonListUrls.append(url)
                print '目前错误url总数:%d' %(len(abandonListUrls))
                break
            i = i+1
            print 'url:%s访问超时,正在重试第%s' %(url,i)


def crawlerZanMeiShi():
    #22964
    for i in range(1,22964) :
        listUrls.append('http://www.zanmeishi.com/tab/%s.html' %(i))
    '''
        分割任务数组为二维，启动进程数量
    '''
    jobs = [listUrls[i:i+pNum] for i in range(0,len(listUrls),pNum)]
    pool = multiprocessing.Pool(4)
    for job in jobs:
        procs = []
        for url in job :
            '''p = multiprocessing.Process(target=requestUrl,args=(url,))
            procs.append(p)
            p.daemon = True
            p.start()'''
            pool.apply_async(requestUrl,args=(url,))
        pool.close()
        pool.join()
        '''for p in procs:
            p.join()
            if p.is_alive():
                p.terminate()'''
        '''边获取边下载测试实现
        while not downQueue.empty():
            try:
                url = downQueue.get()
                download(url)
            except:
                pass'''






if __name__ == '__main__':
    #crawlerZhaoGePu()
    try:
        #crawlerZanMeiShi()
        for i in range(1,22964) :
            listUrls.append('http://www.zanmeishi.com/tab/%s.html' %(i))
        jobs = [listUrls[i:i+pNum] for i in range(0,len(listUrls),pNum)]
        pool = multiprocessing.Pool(4)
        for job in jobs:
            procs = []
        for url in job :
            pool.apply_async(requestUrl,args=(url,))
        pool.close()
        pool.join()
        #t = Timer(stmt="crawlerZanMeiShi()", setup="from __main__ import crawlerZanMeiShi")
        #print 'by requests: %s seconds'%t.timeit(number=1)
    except KeyboardInterrupt:
        errorHanle = open('error.txt','a')
        resultHanle = open('result.txt','a')
        abandonHanle = open('abandonHanle.txt','a')
        abandonHanle.writelines([line+'\n' for line in abandonListUrls])
        errorHanle.writelines([line+'\n' for line in errorListUrls])
        resultHanle.writelines([line+'\n' for line in resultUrls])
        errorHanle.close()
        abandonHanle.close()
    print 'finished!'

    #crawlerZanMeiShi()
    #singleDown()