import asyncio
import urllib3
import aiofile
from lxml import etree
from aiohttp import ClientSession, client_exceptions

try:
    import uvloop
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError as e:
    pass


class BaiduKeywordSearch():

    def __init__(self):
        self.search_url = "http://www.baidu.com/s?"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36', 
        }
        self.page_stop = False

    async def fetch(self, session, params):
        async with session.get(url=self.search_url, params=params) as resp:
            print(resp.url, resp.status)
            if resp.status == 200:
                return await resp.text()
    
    async def get_pages(self, html):
        last_page = etree.HTML(html).xpath('//*[@id="page"]/div[@class="page-inner"]/a[last()-1]/span[@class="pc"]/text()')
        return 1 if len(last_page) == 0 else int(last_page[0])

    async def parser(self, html):
        tree = etree.HTML(html)
        links = tree.xpath("//*[@srcid='1599']/h3[@class='t']/a/@href")    # 条目链接
        titles = []     # 条目标题
        for i in range(0, len(links)):
            title = ''.join(tree.xpath("//*[@srcid='1599'][" + str(i+1) + "]/h3[@class='t']/a//text()"))    # 搜索项标题            
            titles.append(title)

        # 获取每一条目的信息
        async with ClientSession(headers=self.headers) as r_session:
            tasks = []
            for i, link in enumerate(links):
                task = asyncio.create_task(self.parse_item(r_session, link, titles[i]))
                tasks.append(task)
            items = await asyncio.gather(*tasks)

        return [item for item in items if item is not None]

    async def parse_item(self, session, url, title):
        try:
            item = {'title': title}
            async with session.get(url, timeout=3, verify_ssl=False) as resp:
                item['url'] = str(resp.url)     # 关键字链接
                item['domain'] = urllib3.get_host(str(resp.url))[1]     # 域名
                # 主机地址，异步获取
                item['host'] = (await asyncio.get_running_loop().getaddrinfo(host=item['domain'], port='http'))[-1][-1][0]
                try:
                    item['server'] = resp.headers['server']     # type of server
                except KeyError:
                    item['server'] = ''
                try:
                    html = await resp.text(errors='ignore')     # 忽略编码错误
                    item['title'] = ''.join(etree.HTML(html).xpath('/html/head/title/text()')[0].split())   # 标题
                except (IndexError, AttributeError):
                    item['title'] = ''
            print(item['url'], item['title'])
            return item
        except (asyncio.exceptions.TimeoutError, client_exceptions.ClientConnectorError):
            pass

    async def writer(self, items):
        async with aiofile.async_open('result.txt', 'a') as afp:
            for item in items:
                await afp.write('\t'.join(item.values()) + '\n')

    async def run(self, keyword):
        # 一、获取关键字的所有页面
        async with ClientSession(headers=self.headers) as session:
            params = {
                'wd': keyword, 
                'rn': 50,   # 默认50条目
                'pn': 0,    # 页码
                'ie': 'utf-8' 
            }
            # 1. Get first page 
            html = await self.fetch(session, params)
            items = await self.parser(html)
            await self.writer(items)

            # 2. Get other pages
            pages = await self.get_pages(html)
            tasks = []  
            for page in range(2, pages+1):   # 第2页开始
                params = {
                    'wd': keyword, 
                    'rn': 50,   # 默认50条目
                    'pn': (page-1)*50,    # 根据页数变化
                    'ie': 'utf-8' 
                }
                task = asyncio.create_task(self.fetch(session, params))
                tasks.append(task)
            htmls = await asyncio.gather(*tasks)

            # 二、解析页面条目
            # await asyncio.gather(*[asyncio.create_task(self.parser(html)) for html in htmls])
            for html in htmls:
                items = await self.parser(html)
                await self.writer(items)


def main():
    loop = asyncio.get_event_loop()
    loop.run_until_complete(BaiduKeywordSearch().run(keyword='python'))
    loop.run_until_complete(asyncio.sleep(0.250))   # 等待关闭SSL连接
    # loop.close()


if __name__ == '__main__':
    main()
