'''
wikipedia网页处理模块
'''

import re
import aiohttp
import asyncio
import bs4
from bs4 import BeautifulSoup
from concurrent import futures
from multiprocessing import cpu_count 
import subprocess
import time 

from datastore import datastore
from model.models import modelCategory, modelLink, modelDeletedcategory, modelHtmlText, modelDoneLink
import wordStandardized
import statistics
from log import log
from wordCount import wordCount
from utils import costCount
import settings
from wordTranslate import wordTranslate

wikipedialog = log(filename=settings.WIKI_LOG_FILE, level=log.INFO)

class wikipedia:
    BASE_URL = r'https://en.wikipedia.org'
    START_URL = r'/wiki/Category:Computer_science'
    LINK_SCHEMA = r'.mw-category-generated .mw-content-ltr ul li a ' #bs4获取链接的的查找字符串
    URL_WORKERS = 4 #分析URL的工作协程数量
    TEXT_WORKERS = 6 #分析HTML TEXT的工作协程数量
    IGNORE_TAGS = ['pre', 'code', 'i', '.mwe-math-element', 'sub', 'a'] #分析文本时，不关心的tag
    TEXT_SCHEMA = r'html body .mw-body .mw-body-content .mw-content-ltr .mw-parser-output p' #bs4获取文本时的查找字符串
    SITE = 'wikipedia'
    SLEEP_TIME = 1
    SCHEDULE_TIME = 10
    CYCLE_GAP = 0.01

    MIN_PREANALYZE_LINK = 200 #最小待分析链接的个数，数据库中的数量低于这个数量，启动分析协程
    MAX_PREANALYZE_LINK = 1000 #最大待分析链接的个数，数据库中的数量高于这个数量，停止分析协程
    MIN_PREANALYZE_TEXT = 200 #最小待分析文本的个数，数据库中的数量低于这个数量，启动分析协程
    MAX_PREANALYZE_TEXT = 1000 #最大待分析文本的个数，数据库中的数量高于这个数量，停止分析协程

    def __init__(self):
        self.datastore = datastore()
        self.logging = wikipedialog
        self.wordCount = wordCount()
        self.linkAnalyzeSleeping = False
        self.textAnalyzeSleeping = False

    async def downloadHtml(self, session, url):
        '''
        下载url
        '''
        self.logging.info('download Html url = {}'.format(url))
        async with session.get(url) as resp:
            return await resp.read()

    async def scheduleLink(self) -> bool:
        '''
        链接分析协程调度函数，维持数据库中的待分析链接数量在MIN_PREANALYZE_LINK和MAX_PREANALYZE_LINK
        之间，高于最大值，停止协程执行，低于最小值启动协程
        '''
        if self.linkAnalyzeSleeping:
            if statistics.getwikiPreAnalyzeLinkCount() < wikipedia.MIN_PREANALYZE_LINK:
                self.linkAnalyzeSleeping = False
                self.logging.info('change link schedult to False')
            else:
                await asyncio.sleep(wikipedia.SCHEDULE_TIME)            
        else:
            if statistics.getwikiPreAnalyzeLinkCount() > wikipedia.MAX_PREANALYZE_LINK:
                self.linkAnalyzeSleeping = True
                self.logging.info('change link schedult to True')

        return self.linkAnalyzeSleeping        
        
    async def scheduleText(self) -> bool:
        '''
        分析协程调度函数，维持数据库中的待分析链接数量在MIN_PREANALYZE_TEXT和MAX_PREANALYZE_TEXT
        之间，高于最大值，停止协程执行，低于最小值启动协程
        '''    
        if self.textAnalyzeSleeping:
            if statistics.getwikiPreAnalyzeHtmlCount() < wikipedia.MIN_PREANALYZE_TEXT:
                self.textAnalyzeSleeping = False
                self.logging.info('change text schedult to False')
            else:
                await asyncio.sleep(wikipedia.SCHEDULE_TIME)            
        else:
            if statistics.getwikiPreAnalyzeHtmlCount() > wikipedia.MAX_PREANALYZE_TEXT:
                self.textAnalyzeSleeping = True
                self.logging.info('change text schedult to True')

        return self.textAnalyzeSleeping   

    async def analyzeUrl(self):
        '''
        BFS搜索url，将目录和链接存储到数据库中
        '''
        try:        
            while True:
                if await self.scheduleLink():
                    self.logging.info('scheduling url')
                    continue

                cat = self.datastore.top(modelCategory)                
                if not cat:
                    self.logging.warning('analyzeUrl no category')
                    await asyncio.sleep(wikipedia.SLEEP_TIME)
                    continue                    
                    
                curLink = wikipedia.BASE_URL + cat.link
                #链接查重，防止重复分析                    
                if self.datastore.queryDeletedCategory(curLink):
                    continue
                self.datastore.add(modelDeletedcategory(link=curLink))

                async with aiohttp.ClientSession() as session:
                    html = await self.downloadHtml(session, curLink)

                self.logging.info('start html for link = {}'.format(curLink))
                self.analyzeHtmlForLink(html)                
        except Exception as e:
            self.logging.error('analyzeUrl exception {},{}'.format(type(e), e))

    async def startAnalyzeUrl(self):
        '''
        读取数据库中的category目录表，BFS搜索目录中所有的链接
        '''
        try:
            tasks = []
            workers = wikipedia.URL_WORKERS

            for i in range(workers):
                tasks.append(self.analyzeUrl())
            
            await asyncio.gather(*tasks)
        except Exception as e:
            self.logging.error("startAnalyzeUrl error {}".format(e))
    
    def initializeCategoryList(self):
        '''
        从START_URL入口网页，初始化爬取目录
        '''
        initCat = modelCategory(title='Init Cate', link=wikipedia.START_URL)
        self.datastore.add(initCat)
        
        '''
        try:
            async with aiohttp.ClientSession() as session:
                html = await self.downloadHtml(session, wikipedia.BASE_URL + wikipedia.START_URL)
            
            self.analyzeHtmlForLink(html)
        except Exception as e:
            self.logging.error("initializeCategoryList error{}".format(e))
        '''

    @costCount
    def categoryInSoup(self, soup):
        '''
        查找soup对象中目录的链接，返回一个list，包含title和link的字典
        '''
        return [{'title' : i.contents[0], 'link': i['href']} for i in soup 
                 if 'stub' not in i['href'] #过滤掉stub的页面
                 and i['href'].startswith('/wiki/Category:')]

    @costCount
    def linkInSoup(self, soup):
        '''
        查找soup对象中文章的链接，返回一个list，包含title和link的字典
        '''        
        return [{'title' : i.contents[0], 'link': i['href']} for i in soup 
                 if i.has_attr('title') 
                 and '/wiki/Portal' not in i['href'] #过滤掉首页https://en.wikipedia.org/wiki/Portal:Computer_science
                 and 'stub' not in i['href'] #过滤掉stub的页面
                 and i['href'].startswith('/wiki/')]

    @costCount
    def analyzeHtmlForLink(self, html):
        '''
        分析html将目录和链接放入数据库中
        '''
        soup = BeautifulSoup(html, features='lxml')
                
        res = soup.select(wikipedia.LINK_SCHEMA)
            
        resCategory = self.categoryInSoup(res)
        resLink = self.linkInSoup(res)

        catList = []
        linkList = []
        for item in resCategory:
            catList.append(modelCategory(title=item['title'], link=item['link']))

        for item in resLink:
            linkList.append(modelLink(title=item['title'], link=item['link']))            

        self.logging.info('start batchAdd ')
        self.datastore.batchAdd(catList)
        ret = self.datastore.batchAdd(linkList)
        if ret :
            statistics.addwikiPreAnalyzeLinkCount(len(linkList))        

    @costCount
    def deleteSoupTag(self, soup, tags):
        '''
        在soup中删除tags列表中的tag
        '''
        for tag in tags:
            for s in soup.select(tag):
                s.extract()        

    async def startAnalyzeHtml(self):
        '''
        启动下载HTML页面
        '''
        try:
            tasks = []        
            workers = wikipedia.TEXT_WORKERS

            for i in range(workers):
                tasks.append(self.AnalyzeHtml())
            
            await asyncio.gather(*tasks)
        except Exception as e:
            self.logging.error("startAnalyzeHtml error{}".format(e))


    async def AnalyzeHtml(self):
        '''
        分析HTML中的文本任务
        '''
        try:            
            while True:
                if await self.scheduleText():
                    self.logging.info('scheduling text')
                    continue

                link = self.datastore.top(modelLink)    

                if not link:
                    self.logging.warning('AnalyzeHtml no link')
                    await asyncio.sleep(wikipedia.SLEEP_TIME)
                    continue

                statistics.decwikiPreAnalyzeLinkCount(1)

                curLink = wikipedia.BASE_URL + link.link
                async with aiohttp.ClientSession() as session:
                    html = await self.downloadHtml(session, curLink)

                self.logging.info('start html for text = {}'.format(curLink))
                await self.analyzeHtmlForWords(curLink, html)

        except Exception as e:
            self.logging.error('analyzeHtml exception {},{}'.format(type(e), e))

    @costCount
    async def analyzeHtmlForWords(self, link, html):
        '''
        提取出html页面中的英文单词
        '''        
        result = ''
        wordCntPerLoop = 30

        soup = BeautifulSoup(html, features='lxml')
        self.deleteSoupTag(soup, wikipedia.IGNORE_TAGS)        
        res = soup.select(wikipedia.TEXT_SCHEMA)
        
        #为了防止CPU任务长时间阻塞协程，这里将单词划分为wordCntPerLoop大小的单位，每次
        #循环只处理一部分，然后让出调度权。

        for s in res:
            result += ' '.join(s.strings)            
        strList = result.split(' ')
        result = ''
        cycle = (len(strList) // wordCntPerLoop) + 1
        for i in range(cycle):
            subStrList = strList[wordCntPerLoop * i : wordCntPerLoop * (i + 1)]
            subStr = ' '.join(subStrList)
            result += wordStandardized.standardizedWords(subStr)
            await asyncio.sleep(wikipedia.CYCLE_GAP)

        htmlTextObj = modelHtmlText(site=wikipedia.SITE,text=result)
        doneLinkObj = modelDoneLink(link=link)
        self.datastore.add(htmlTextObj)
        self.datastore.add(doneLinkObj)
        statistics.addwikiPreAnalyzeHtmlCount(1)
    
    async def start(self, tasks=[]):
        '''
        已废弃，启动入口管理移至task模块
        wikipedia类分析过程启动函数
        启动五个协程 
        initializeCategoryList 从入口地址开始解析第一层，将目录和文章链接分开放置
        startAnalyzeUrl 从数据库中读取目录，然后BFS搜索url，解析出文章链接
        startAnalyzeHtml 分析文章链接，过滤无效字段，提取出有效文本
        self.wordCount.count() 统计单词词频
        ###statistics.asyncShowstat() 统计任务，打印统计信息###
        '''
        selfTasks = [self.initializeCategoryList(), self.startAnalyzeUrl(),
                     self.startAnalyzeHtml(),statistics.asyncShowstat()] #wikipedia模块的协程任务
        for task in tasks:
            selfTasks.append(task)

        await asyncio.gather(*selfTasks)
        
    
   
def main():
    pass    
if __name__ == '__main__':
    main()