# -*- coding: utf-8 -*-
"""
@Time ： 2021/12/27 9:18
@Auth ： quanjie
@File ：crawler.py
@IDE ：PyCharm

"""

import asyncio
import logging
import time

import pyppeteer
from pyppeteer import launch

from quanjie_spider.base.basecrawler import BaseCrawler
from quanjie_spider.utils.useragent import getheaders


class Crawler(BaseCrawler):
    """
    pyppeteer爬虫
    """
    logger = logging.getLogger("crawler.request")
    logger.setLevel("DEBUG")

    def run(self):
        self.logger.info("google search")
        Crawler.factoryMain("https://www.google.com/")

    @staticmethod
    async def main(url,timeout=60,headless=True):
        width, height = 1366, 768
        headless = False
        # browser = await launch(headless=False,args=[
        # browser = await launch(args=[
        #               # '--window-size={1300},{800}',
        #               # '--start-maximized',  # 最大化窗口
        #               # '--proxy-server=http://118.24.51.247:1443',  # 浏览器代理 配合某些中间人代理使用
        #               # '--load-extension={}'.format(chrome_extension),  # 加载插件
        #               # '--disable-extensions-except={}'.format(chrome_extension),
        #               # '--disable-extensions',
        #               '--hide-scrollbars',
        #               '--disable-bundled-ppapi-flash',
        #               '--mute-audio',
        #               '--no-sandbox',  # 取消沙盒模式 沙盒模式下权限太小
        #               '--no-sandbox',  # 不显示信息栏  比如 chrome正在受到自动测试软件的控制
        #               '--disable-setuid-sandbox',
        #               '--disable-gpu',
        #               '--disable-infobars'
        #               # log等级设置 在某些不是那么完整的系统里 如果使用默认的日志等级 可能会出现一大堆的warning信息
        #           ])
        #
        # launcher.DEFAULT_ARGS.remove("--enable-automation")
        browser = await launch(headless=headless,handleSIGINT=False,handleSIGTERM=False,handleSIGHUP=False,
                               options={'args': ['--no-sandbox','--log-level=30','--start-maximized']},args=[f'--window-size={width},{height}'])

        # 打开标签页
        page = await browser.newPage()
        await page.setUserAgent(getheaders())

        await page.setViewport({'width': width, 'height': height})

        try:
            await page.goto(url,
                        {
                            'timeout': 1000 * timeout
                        }
                        )
            await page.evaluate("""
                        () =>{
                            Object.defineProperties(navigator,{
                                webdriver:{
                                get: () => false
                                }
                            })
                        }
                    """)
        except pyppeteer.errors.TimeoutError:
            logging.info("293")
        # await page.waitForSelector("#title_feature_div", {'timeout': 500})

        doc = await page.content()
        crawl_set = set()
        with open("crawled.txt", mode='r', encoding='gbk') as fr:
            line = fr.readline()
            while line:
                crawl_set.add(line.strip())
                line = fr.readline()
        import pandas as pd
        with open("crawled.txt", mode='a+') as fw:
            pds = pd.read_csv('data.csv', encoding='gbk', header=0)
            for i, j in pds.iterrows():
                time.sleep(0.001)
                print(i, j)
                query = j['authfull'] + " " + j['inst_name']
                print(query)
                if query in crawl_set:
                    logging.debug("已近爬取过")
                    continue

                try:
                    try:
                        await page.click("#tsf > div:nth-child(1) > div.A8SBwf > div.RNNXgb > div.GeTMDd > div > div.dRYYxd > div.BKRPef.M2vV3")
                    except:
                        print("清理输入框失败")
                        pass
                    await page.type("input[name='q']",query,{'delay':1})
                    await page.keyboard.press('Enter')
                    await page.waitForXPath('//div[@class="g"]')
                    # 获取文本
                    elem_list = await page.xpath('//div[@id="search"]//div[@class="g"]//a')
                    elem = elem_list[0]
                    href = await (await elem.getProperty("href")).jsonValue()
                    print(elem)
                    print(href)

                    res = {
                        "no": [i],
                        "authfull": [j['authfull']],
                        "inst_name": [j['inst_name']],
                        "href": [href]
                    }
                    df = pd.DataFrame(res)
                    df.to_csv("res.csv", mode='a', index=False, header=False)
                    fw.write(query + "\n")
                    fw.flush()
                    print()
                except:
                    print("跳过",query)
                finally:
                    pass
            try:
                await browser.close()
            except:
                pass

        return doc

    @staticmethod
    def factoryMain(url,timeout=60,headless=True):
        # doc = asyncio.new_event_loop().run_until_complete(Requestscc.main(url, timeout=timeout,headless=headless))

        # new_loop = asyncio.new_event_loop()
        # asyncio.set_event_loop(new_loop)
        # loop = asyncio.get_event_loop()
        # task = asyncio.ensure_future(Requestscc.main(url, timeout=timeout,headless=headless))
        # loop.run_until_complete(asyncio.wait([task]))
        # doc = task.result()

        loop1 = asyncio.new_event_loop()
        asyncio.set_event_loop(loop1)
        loop = asyncio.get_event_loop()
        doc = loop.run_until_complete(Crawler.main(url, timeout=timeout,headless=headless))  # 将协程加入到事件循环loop
        loop.close()
        return doc


if __name__ == "__main__":
    Crawler().run()