# _*_ coding: utf-8 _*_
from bs4 import BeautifulSoup
from queue import Queue
import time, json, math
import sys, os
import asyncio
import aiohttp
import aiofiles

try:
    import uvloop
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
    pass

class CrawlerAsync:

    def __init__(self):

        self._workers = 0
        self._workers_max = 50
        self._queue = Queue()
        self.loop = asyncio.get_event_loop()
        self.session = aiohttp.ClientSession(loop=self.loop)

    async def download(self, url, dpath):

        async with self.session.get(url) as html:
            imgpath = './wxhead'
            imgname = url.split('/')[-1]
            if not os.path.exists(imgpath):
                os.makedirs(imgpath)
            '''
            img = await html.read()
            fp  = await aiofiles.open(imgpath + '/' + imgname, 'wb')
            await fp.write(img)
            '''
            async with aiofiles.open(imgpath + '/' + imgname, 'wb') as fp:
                while True:
                    chunk = await html.content.read(1024)
                    if not chunk:
                        break
                    await fp.write(chunk)


    async def fetchhtml(self, url):

        async with self.session.get(url) as html:
            tmp = await html.text(encoding='utf-8')
            return tmp

    async def gen_url(self, url=None):

        if url is None:
            for i in range(1, 9):
                url = 'http://www.duoziwang.com/head/shadiao/list_%s.html' % i
                tmp = await self.fetchhtml(url)
                htm = BeautifulSoup(tmp, 'lxml')
                lst = htm.select('.pics li')
                for li in lst:
                    lan = li.select('a img')[0]
                    lnk = lan.get('src')
                    self._queue.put((lnk, ''))
            

    async def process(self, url, dpath):

        if url.endswith('.jpg'):
            await self.download(url, dpath)
        else:
            await self.gen_url(url)

        self._workers -= 1

    async def loop_crawl(self):

        await self.gen_url()
        while True:
            for i in range(0, self._workers_max):
                try:
                    url = self._queue.get(False)
                    asyncio.ensure_future(self.process(url[0], url[1]))
                    print('crawl:', url[0])

                    self._workers += 1
                except Exception as e:
                    await asyncio.sleep(1)
                    continue

            if self._queue.empty():
                break

            if self._workers > self._workers_max:
                print('> workers_max, sleep 3s')
                await asyncio.sleep(3)
        await self.session.close()

    def run(self):

        stime = time.time()
        try:
            self.loop.run_until_complete(self.loop_crawl())
        except KeyboardInterrupt:
            print('stop')
        etime = time.time()
        print('time used： %ss' % str(etime - stime))

if __name__ == '__main__':

    crawler = CrawlerAsync()
    crawler.run()