﻿#!/usr/bin/env python
# -*- coding: utf-8 -*-

# 更新利率表的异步/多线程(线程池)/同步爬虫版本

import re,time,os,math
from collections import deque
import queue,threading
from urllib import request,parse
import asyncio
import aiohttp
import csv

def ppp():print("ppp_func...")
def myencode(wcs):
    if isinstance(wcs,str):
        return wcs.encode('gbk')
    return wcs
    
def getOldDates(ratesfile):     # 获取原利率文件的日期列表
    with open(ratesfile,'a+') as fold:
        fold.seek(0,0)
        if ratesfile.endswith('.csv'):
            fcsv = csv.reader(fold)
            next(fcsv)
            oldList = []
            for line in fcsv:
                if len(line) == 8:
                    oldList.append(line[0])
        else:
            oldList = re.findall(r'\s*[Ll]?\s*(\d{4}\D+\d{1,2}\D+\d{1,2})',fold.read())
    return oldList 
    
def _save(file,dataList):       # 保存(更新)利率文件
    dataList.sort()
    with open(file,'a+') as f:
        f.writelines(dataList)
        
def _dump(date,rates,iscsv=False):          # 字符化一条利率记录
    if not iscsv:
        strRt = ['{:5.2f}'.format(float(r)) for r in rates]
        return 'L  {}\t{}\n'.format(date,'\t'.join(strRt))
    else:
        return '{},{}\n'.format(date,','.join('{0:6.2f}'.format(float(i)) for i in rates)) 
        
class GHshare:  # 特定网站共享数据
    __slots__ = ()
    pat_rates = re.compile(r'(\d+\.?\d*)</td>')
    postData={
                '__EVENTTARGET' :'Sel_Date',
                # '__EVENTARGUMENT' : '',
                # '__LASTFOCUS' : '',
                # '__VIEWSTATE' : ''
              }
    headers = {"User-Agent":'sMozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) \
                AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.56 Safari/537.17'}
    url = r'http://www.icbc.com.cn/ICBCDynamicSite2/other/rmbdeposit.aspx' 
    firstIndex = 0
    ENCODE = 'UTF-8'
    
    @classmethod
    def setDate(cls,date):
        cls.postData[cls.postData['__EVENTTARGET']] = date
        
    @classmethod
    def dateList_from_net(cls,ratesf):     # 获取原利率文件中没有的日期集合
        o_dates = getOldDates(ratesf)
        
        req = request.Request(cls.url,headers=cls.headers)
        with request.urlopen(req) as f:
            res = f.read().decode(cls.ENCODE)
        pat_state = re.compile(r'(?<=__VIEWSTATE" value=").+?(?="\s*?/>)')
        pat_dates = re.compile(r'(?<=option value=")\d[\d-]+(?=")') 
        i = res.find('__VIEWSTATE" value')
        
        cls.postData['__VIEWSTATE'] = pat_state.search(res,i).group()
        i += len(cls.postData['__VIEWSTATE'])        
        dates = pat_dates.findall(res,i)
        cls.firstIndex = i
        return set(dates).difference(o_dates)
        
class Task:     # 任务调度器  # 模拟并发(非异步)
    def __init__(self):
        self.q = deque(maxlen=5)
    def add(self,gen):
        self.q.append(gen)
    def run(self):
        while self.q:
            try:
                gen = self.q.popleft()
                next(gen)
            except StopIteration: pass
            else:
                self.q.append(gen)
#-----------------------------------------------------------   

class crawler_async:      # 异步并发爬虫(不用asyncio.wait())
    def __init__(self):
        self.loop = asyncio.get_event_loop()
        self.q = asyncio.Queue(loop=self.loop)
        # breakpoint()
        # print(self.loop.is_running())
        self.session = aiohttp.ClientSession(headers=GHshare.headers,loop=self.loop)
    
    def close(self):
        self.loop.run_until_complete(self.session.close())
        self.loop.run_until_complete(asyncio.sleep(0))
        self.loop.close()
        
    async def _fetch(self,date):
        GHshare.setDate(date)
        async with self.session.post(GHshare.url,data=GHshare.postData) as resp:
            response = await resp.text(encoding=GHshare.ENCODE)
        rates = GHshare.pat_rates.findall(response,GHshare.firstIndex)
        self.rates.append(_dump(date,rates[:7],self.csv))
        
    async def _work(self):
        # while not self.q.empty():
        while True:
            # date = await self.q.get()
            try:
                date = self.q.get_nowait()
                await self._fetch(date)
                self.q.task_done()
            except asyncio.queues.QueueEmpty:
                break
            
    async def _crawl(self,max_tasks):
        workers = [asyncio.Task(self._work()) for _ in range(max_tasks)]
        await self.q.join()
        for w in workers:
            w.cancel()
        # self.session.close()
        
    def ghlsUpdate(self,ratesf,max_tasks=12,closed=True):
        self.rates = []
        self.csv = ratesf.endswith('.csv')
        for date in GHshare.dateList_from_net(ratesf):
            self.q.put_nowait(date)
        self.loop.run_until_complete(self._crawl(max_tasks))
        # 返回 None (这与crawler_async2中不一样)
        
        if closed:  self.close()
        if self.rates:
            _save(ratesf,self.rates)     
# #----------------------------------------------------

class crawler_async2:     # 异步并发爬虫(用asyncio.wait())
    def __init__(self):
        self.loop = asyncio.get_event_loop()
        self.session = aiohttp.ClientSession(headers=GHshare.headers)
        # self.q = asyncio.Queue()
        # self._put(ratesf)
        
    # def _put(self,ratesf):
        # dates = GHshare.dateList_from_net(ratesf)
        # for date in dates:
            # self.q.put_nowait(date)
            
    async def _fetch(self,date): 
        GHshare.setDate(date)
        async with self.session.post(GHshare.url,data=GHshare.postData,timeout=aiohttp.ClientTimeout(total=30)) as resp:
            response = await resp.text(encoding=GHshare.ENCODE)
        rates = GHshare.pat_rates.findall(response,GHshare.firstIndex)
        self.rates.append(_dump(date,rates[:7],self.csv))
        # raise TypeError('ERROR...')   # TEST
        
    async def _crawl(self): 
        # 在另一个协程中链接协程,并使用try / except处理异常
        while self.q:
            date = self.q.popleft()
            # date  = self.q.get_nowait()
            try:
                await self._fetch(date) 
            except Exception as err:
                print(str(err),date)
                
    def close(self):
        self.loop.run_until_complete(self.session.close())
        self.loop.run_until_complete(asyncio.sleep(0))
        self.loop.close()
        
    def ghlsUpdate(self,ratesf,max_tasks=12,closed=True):
        try:
            q = GHshare.dateList_from_net(ratesf)
            self.rates = []
            self.csv = ratesf.endswith('.csv')
            def start(loop):
                loop.run_forever()
            loop = self.loop
            t = threading.Thread(target=start,args=(loop,))
            t.daemon= 1
            t.start()
            tasks = []
            while q:
                date = q.pop()
                tasks.append(asyncio.run_coroutine_threadsafe(self._fetch(date),loop))
            for task in tasks:  
                try:
                    task.result()# 阻塞主线程(同步)
                except Exception as err:
                    print(str(err))
            loop.call_soon_threadsafe(loop.stop)# 重要
        except:
            raise
        else:
            if self.rates:
                _save(ratesf,self.rates)
        finally:
            if closed:   self.close()
        
    def ghlsUpdate2(self,ratesf,max_tasks=12,closed=True):
        # self._put(ratesf)
        try:
            self.q = deque(GHshare.dateList_from_net(ratesf))
            self.rates = []
            self.csv = ratesf.endswith('.csv')
            tasks = [self._crawl() for _ in range(max_tasks)]
            # self.q.join() # for Queue
            self.loop.run_until_complete(asyncio.wait(tasks))
            # 返2元组,成员是集合(第2个为空集/待处理任务集),集合成员是Task对象
            # task对象含有协程的返回值:task.result()
        except :
            raise
        else:
            if self.rates:  _save(ratesf,self.rates)
        finally:
            if closed:  self.close()
        # async with session :
            # tasks = [self._crawl(session) for _ in range(max_tasks)]
            
    async def _3(self,date):
        # with (await self.sema):
        async with self.sema:
            try:
                await self._fetch(date)
            except Exception as err:
                print(str(err),date)
        
    def ghlsUpdate3(self,ratesf,max_tasks=12,closed=True):
        self.rates = []
        self.csv = ratesf.endswith('.csv')
        self.sema = asyncio.Semaphore(max_tasks)
        try:
            q = GHshare.dateList_from_net(ratesf)
            cor = asyncio.wait([self._3(date) for date in q])
            self.session.loop.run_until_complete(cor)
        except:
            raise
        else:
            for r in sorted(self.rates):print(r,end='')
        finally:
            if closed:self.close()
        
#------------------------------------------------            
        
class crawler_thread:   # 多线程并发爬虫
    def __init__(self):
        self.qdate = queue.Queue(3)
        self.qhtml = queue.Queue(3)
        self.lock = threading.Lock()
        self.req = request.Request(GHshare.url,headers=GHshare.headers)
        
    def getHtm(self):
        while True:
            try:
                date = self.qdate.get()
                with self.lock:  # 用with语句代替acquire/release
                    GHshare.setDate(date)
                    data =parse.urlencode(GHshare.postData).encode('utf-8')
                try:
                    f = request.urlopen(self.req,data)
                except:
                    raise IOError('{}: 页面打开失败...'.format(date))
                self.qhtml.put((date,f))
            finally:
                self.qdate.task_done()
                
    def getLS(self):
        while True:
            try:
                date,htm = self.qhtml.get()
                with htm:
                    data = htm.read().decode(GHshare.ENCODE)
                lrates = GHshare.pat_rates.findall(data,GHshare.firstIndex)
                
                with self.lock:
                    self.rates.append(_dump(date,lrates[:7],self.csv))
            finally:
                self.qhtml.task_done()
                
    def ghlsUpdate(self,ratesf,max_tasks=12):
        self.rates = []
        self.csv = ratesf.endswith('.csv')
        for _ in range(max_tasks):
            t = threading.Thread(target=self.getHtm)
            t.setDaemon(True)
            t.start()
        for _ in range(max_tasks-5):
            t = threading.Thread(target=self.getLS)
            t.setDaemon(True)
            t.start()
        for date in GHshare.dateList_from_net(ratesf):
            self.qdate.put(date)
            time.sleep(0.01)
        self.qdate.join()
        self.qhtml.join()
        if self.rates:
            _save(ratesf,self.rates)  

#-------------------------------------------
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import Pool,Queue,Manager
from functools import partial

def ghlsUpdate2(lsfile,max_tasks=12): # 线程池爬虫
    lock = threading.Lock()
    req = request.Request(GHshare.url,headers=GHshare.headers)
    dates = GHshare.dateList_from_net(lsfile)
    Csv = lsfile.endswith('.csv')
    def fetch(date):
        nonlocal lock,req
        with lock:
            GHshare.setDate(date)
            data = parse.urlencode(GHshare.postData)
        with request.urlopen(req,data=data.encode('utf-8'),timeout=30) as htm:
            res = htm.read().decode(GHshare.ENCODE)
        res = GHshare.pat_rates.findall(res,GHshare.firstIndex)
        return _dump(date,res[:7],Csv)
    pool = ThreadPool(max_tasks)        
    rates = pool.map(fetch,dates)
    pool.close()
    pool.join()
    print(rates)
    if rates:
        _save(lsfile,rates)
        
# ----------------------------------------------
class crawler_test:     # 测试多进程+协程
    def __init__(self,q,loop):
        self.session = aiohttp.ClientSession(headers=GHshare.headers,loop=loop)
        self.q = q
        
    async def _fetch(self,date): 
        self.data['Sel_Date'] = date        
        async with self.session.post(GHshare.url,data=self.data,timeout=aiohttp.ClientTimeout(total=60)) as resp:
            response = await resp.text(encoding=GHshare.ENCODE)
        rates = GHshare.pat_rates.findall(response,GHshare.firstIndex)
        self.rates.append(_dump(date,rates[:7],self.csv))
        
    async def _crawl(self):
        try:
            while 1:
                date = self.q.get(False)
                await self._fetch(date)
        except:   pass
            
    async def _crawl2(self):
        try:
            while 1:
                date = self.q.popleft()
                await self._fetch(date)
        except:    pass
            
    def ghlsUpdate(self,data,index,token,max_tasks=12,closed=True):
        self.rates = []
        self.csv = ratesf.endswith('.csv')
        GHshare.firstIndex = index
        self.data = data
        worker = self._crawl if token else self._crawl2
        tasks = [worker() for _ in range(max_tasks)]
        self.session.loop.run_until_complete(asyncio.wait(tasks))
        if closed:  self.close()
    
    def close(self):
        self.session.loop.close()
        self.session.close()
        
def run_in_coroutine(q,data,index,token=False):# 将资源显式传递给子进程如data/index
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    if not token: q = deque(q)
    c = crawler_test(q,loop)  # 创建异步爬虫
    c.ghlsUpdate(data,index,token,15)
    # c.close()
    return c.rates
    
def thread_cor2(f):  # 效率更高(比thread_cor)  多进程+协程
    n = os.cpu_count()
    dates = list(GHshare.dateList_from_net(f))
    if not dates:return
    sep = math.ceil(len(dates) / n)
    chunk = (dates[i:i+sep] for i in range(0,len(dates),sep))
    pool = Pool()
    func = partial(run_in_coroutine,data=GHshare.postData,index=GHshare.firstIndex) #
    rates = pool.imap_unordered(func,chunk) # 此处func只接受一个参数
    pool.close()
    pool.join()
    if rates:
        res = [r for rs in rates for r in rs]
        _save(f,res)
        # for r in sorted(res):   print(r,end='')  # test          
    else:print('empty')  
    
def thread_cor(f):
    m = Manager()   # 服务器进程管理器
    q = m.Queue() 
    # q = Queue()
    '''    Queue对象只能使用继承（inheritance）的方式共享(进程池时)。这是因为Queue
    本身基于unix的Pipe对象实现，而Pipe对象的共享需要通过继承。
    '''
    for date in GHshare.dateList_from_net(f):
        q.put(date,False)
    pool=Pool()
    res = []
    def _add(r):
        nonlocal res
        res.append(r)
    for _ in range(os.cpu_count()):
        pool.apply_async(run_in_coroutine,args=(q,GHshare.postData,GHshare.firstIndex,True),callback=_add)
    pool.close()
    pool.join()
    if res:
        res = [r for rs in res for r in rs]
        for r in sorted(res):   print(r,end='')            
    else:print('empty')
    # m.shutdown()
#------------------------------------------
if __name__ == '__main__': 
    import _wh_lib as wh
    
    fn = os.path.join(os.path.dirname(__file__),'ls.csv')
    with wh.TimeTest():
        # thread_cor2(fn)      # 多进程+协程(比下面的都慢)
        # ghlsUpdate2(fn,15)     # 线程池(快)
        # crawler_async().ghlsUpdate(fn,15)   # 异步(较快)
        crawler_async2().ghlsUpdate2(fn,15) # 标准异步(较快)
        # crawler_thread().ghlsUpdate(fn,15)  # 多线程(快)
