#!/usr/bin/python
#coding=utf-8
import sys
import gevent
from gevent import monkey
import urllib2
import requests
from pyquery import PyQuery as pq
import grequests

monkey.patch_all()

urls = []

def downloadImageFile(imgUrl):
    local_filename = imgUrl.split('/')[-1]
    print "Download Image File=", local_filename
    r = requests.get(imgUrl, stream=True) # here we need to set stream = True parameter
    with open("/Users/mac/tmp/img/"+local_filename, 'wb') as f:
        for chunk in r.iter_content(chunk_size=1024):
            if chunk: # filter out keep-alive new chunks
                f.write(chunk)
                f.flush()
        f.close()
    return local_filename

def worker(url):
    #print url
    res = requests.get(url,timeout=1)
    res.encoding = 'utf-8'
    doc = pq(res.text)
    print(doc)
    imgUrl = doc('.images_list img').attr('src')
    print(imgUrl)
    #downloadImageFile(imgUrl)'''

def getcontent(url):
    re = requests.get(url,timeout=10)
    re.encoding = 'utf-8'
    return re.text

def by_requests():
    #jobs = [gevent.spawn(worker, url) for url in urls]
    jobs = [gevent.spawn(getcontent, url) for url in urls]
    gevent.joinall(jobs)
    print jobs
    for doc in jobs :
        d = pq(doc)
        print(d)
        #print d('.images_list img').attr('src')

def actionRes(res):
    doc = pq(res.text)
    zurl = doc(".notice").next().find('a').attr('href')
    if zurl :
        file.write(zurl+'\n')

if __name__=='__main__':
    '''res = requests.get('http://www.vvic.com/item.htm?uuid=19610742933',timeout=10)
    res.encoding = 'utf-8'
    threaddoc = pq(res.text)
    for e in threaddoc('img') :
        if pq(e).attr('src')=='http://a.tbcdn.cn/kissy/1.0.0/build/imglazyload/spaceball.gif':
            print pq(e).attr('src2')
            urls.append(pq(e).attr('src2'))
        else:
            if "http://" in pq(e).attr('src'):
                urls.append(pq(e).attr('src'))
            else:
                urls.append('http://www.vvic.com/'+pq(e).attr('src'))
    for i in range(1,10000) :
        urls.append('http://www.zhaogepu.com/jianpu/%s.html' %(i))
    print urls  280776'''
    file = open('zUrls.txt','a')
    for i in range(1,2000) :
        urls.append('http://www.zanmeishi.com/tab/%s.html' %(i))
    #print urls
    reqs = (grequests.get(url,hooks = {'res':actionRes}) for url in urls)
    reponse = grequests.map(reqs)
    #file.close()
    '''for r in reponse :
        #print r.text
        doc = pq(r.text)
        #print doc(".notice").next().find('a').attr('href')
        zurl = doc(".notice").next().find('a').attr('href')
        if zurl:
            file.write(zurl +'\n')
    file.close()'''
    '''from timeit import Timer
    t = Timer(stmt="by_requests()", setup="from __main__ import by_requests")
    print 'by requests: %s seconds'%t.timeit(number=1)
    pass'''
    sys.exit(0)