# -*- coding: utf-8 -*-
"""
@Time ： 2021/12/27 16:12
@Auth ： quanjie
@File ：crawler2.py
@IDE ：PyCharm

"""
import gzip
import os
import logging
import time
import traceback
import pandas as pd
from bs4 import BeautifulSoup
from quanjie_spider.base.basecrawler import BaseCrawler
from quanjie_spider.base.baserequests import BaseRequests


class Crawler(BaseCrawler, BaseRequests):
    """
        注意子类如果没有构造方法时，按括号内父类的继承顺序去继承父类构造方法，只继承一个
        单线程爬虫
    """
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    def run(self):
        crawl_set = set()
        with open(self.BASE_DIR + "/crawled.txt", mode='r', encoding='utf-8') as fr:
            line = fr.readline()
            while line:
                crawl_set.add(line.strip())
                line = fr.readline()
        import random
        with open(self.BASE_DIR + "/crawled.txt", mode='a+', encoding='utf-8') as fw:
            pds = pd.read_csv(self.BASE_DIR + "/data.csv", encoding='gbk', header=0)
            count = 0
            for i, j in pds.iterrows():
                count += 1
                query = j['authfull'] + " " + j['inst_name']

                if query in crawl_set:
                    # logging.debug("已近爬取过")
                    continue
                if count % 5 == 0:
                    time.sleep(random.randint(2, 10))
                else:
                    time.sleep(random.randint(1, 5))
                logging.debug((i, j))
                logging.debug(query)

                try:
                    self.crawl(i, j, query)
                    fw.write(query + "\n")
                    fw.flush()
                except:
                    logging.error(traceback.print_exc())

    def crawl(self, i, j, query):
        # 爬虫程序
        # desktop user-agent
        USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0"
        # mobile user-agent
        MOBILE_USER_AGENT = "Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36"
        query = query.replace(' ', '+')
        URL = f"https://google.com/search?q={query}"
        headers = {"user-agent": USER_AGENT}

        # headers['accept-encoding'] = 'gzip, deflate, br'
        headers['accept']="text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
        headers['referer'] = 'https://www.google.com/'
        logging.info(URL)
        status_code,resp = self.get(URL, headers=headers, timeout=60)
        if status_code==200:
            soup = BeautifulSoup(self.response.content, "lxml")
            item = soup.select_one("div#search div.g")
            logging.debug(item.select_one("a"))
            href = item.select_one("a")['href']
            logging.info(href)
            res = {
                "no": [i],
                "authfull": [j['authfull']],
                "inst_name": [j['inst_name']],
                "href": [href]
            }
            df = pd.DataFrame(res)
            df.to_csv(self.BASE_DIR + "/res.csv", mode='a', index=False, header=False)
            print()
        else:
            raise Exception("状态非200")


if __name__ == "__main__":
    Crawler().execute()
