# -*- coding: utf-8 -*-
"""
@Time ： 2021/12/27 16:12
@Auth ： quanjie
@File ：crawler2.py
@IDE ：PyCharm

"""
import logging
import os
import queue
import threading
import time
from concurrent.futures.thread import ThreadPoolExecutor
from random import random

import requests
from bs4 import BeautifulSoup
import pandas as pd

from quanjie_spider.base.basecrawler import BaseCrawler
from quanjie_spider.utils.useragent import getheaders


class Crawler(BaseCrawler):
    """
        多线程爬虫
    """
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    logger = logging.getLogger()
    # logger.setLevel("DEBUG")
    logger.setLevel("INFO")
    queue_list = queue.Queue(maxsize=100)
    queue_res = queue.Queue(maxsize=100)

    def proxyIp(self):
        ips = [
            '107.152.47.188:3128',
            '5.252.177.148:3128',
            '149.19.224.15:3128',
            '104.168.141.178:8089',
            '139.99.99.165:8000',
            '12.31.246.5:8080',
            '98.12.195.129:443',
            '104.149.146.170:81',
            '5.252.177.149:3128',
            '198.12.65.175:35036',
            '74.208.156.189:80',
            '139.99.99.165:3128',
            '50.251.229.185:80',
            '20.47.108.204:8888',
            '149.19.224.36:3128',
            '51.81.155.78:3128',
            '139.99.99.165:8080',
            '20.81.62.32:3128',
            '5.252.177.199:3128',
            '140.227.211.47:8080',
            '130.185.119.20:3128',
            '169.45.84.215:3128',
            '66.94.120.161:443',
        ]
        ip = random.choice(ips)
        return ip

    def after(self):
        while True:
            line = self.queue_res.get()
            # res = {
            #     "no":[i],
            #     "authfull":[j['authfull']],
            #     "inst_name":[j['inst_name']],
            #     "href":[href]
            # }
            logging.info(f"after{line}")
            query = line['authfull'][0]+" "+ line['inst_name'][0]
            df = pd.DataFrame(line)
            df.to_csv(self.BASE_DIR+"/res.csv",mode='a',index=False, header=False)
            with open(self.BASE_DIR+"/crawled.txt", mode='a+', encoding='utf-8') as fw:
                fw.write(query + "\n")
                fw.flush()

    def run(self):
        crawl_set = set()
        logging.info("读取数据")
        with open(self.BASE_DIR+"/crawled.txt", mode='r', encoding='utf-8') as fr:
            line = fr.readline()
            while line:
                crawl_set.add(line.strip())
                line = fr.readline()

        with ThreadPoolExecutor(max_workers=10) as executor:
            logging.info("*******************************创建线程***************************")
            executor.submit(self.crawl)
            executor.submit(self.crawl)
            executor.submit(self.after)

            logging.debug("读取数据51")

            pds = pd.read_csv(self.BASE_DIR+'/data.csv', encoding='gbk', header=0)
            for i, j in pds.iterrows():
                # logging.info(f"queue_list大小:{self.queue_list.qsize()}")
                # print(i, j)
                query = j['authfull']+" "+j['inst_name']
                # print(query)
                if query in crawl_set:
                    logging.debug("已近爬取过")
                    continue
                # self.crawl(i,j,query)
                temp = {
                    "i":i,
                    "j":j,
                    "query":query
                }
                self.queue_list.put(temp)

    def crawl(self):
        count = 0
        while True:
            count += 1
            if count % 10 == 0:
                logging.info(f"{threading.currentThread().getName()}-sleep")
                time.sleep(1.5)
            try:
                item = self.queue_list.get()
                i = item['i']
                j = item['j']
                query = item['query']
                logging.info(f"{i}-{query}")
                print(f"{i}-{query}")
                # 爬虫程序
                # desktop user-agent
                USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0"
                # mobile user-agent
                MOBILE_USER_AGENT = "Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36"
                query = query.replace(' ', '+')
                URL = f"https://google.com/search?q={query}"
                headers = {"user-agent": getheaders()}
                # headers['accept-encoding'] = 'gzip, deflate, br'
                headers['referer'] = 'https://www.google.com/'
                # headers['cookie'] = 'ANID=AHWqTUkptDn3hFXofkSxk0p4eAaMnUWFzN9RbKUAGIXM0HVIq2mQxzHFLtRDMn16; HSID=AiTRs9E87Lb0Aoo5d; SSID=Ad5o4HC6kULHPq_Ov; APISID=b38eYfvi4ifaOD4H/AXEWr2Qk_BH5gHywW; SAPISID=jYy0EMZiiEyc3aw4/A3OWxscncyyYuwFQZ; __Secure-3PAPISID=jYy0EMZiiEyc3aw4/A3OWxscncyyYuwFQZ; __Secure-1PAPISID=jYy0EMZiiEyc3aw4/A3OWxscncyyYuwFQZ; SEARCH_SAMESITE=CgQI3JMB; SID=EQiiAPbAfqlp8Rioa1GEXUqc_mlrtxYmUOqtJQY8rKyRVA_aHjxmync1Z9LhGyov-eGVUg.; __Secure-1PSID=EQiiAPbAfqlp8Rioa1GEXUqc_mlrtxYmUOqtJQY8rKyRVA_a20KMKsJiOo0ccyh0jhJC6w.; __Secure-3PSID=EQiiAPbAfqlp8Rioa1GEXUqc_mlrtxYmUOqtJQY8rKyRVA_akCITNDfeKjy71rPO50nLhg.; 1P_JAR=2021-12-28-05; UULE=a+cm9sZTogMQpwcm9kdWNlcjogMTIKdGltZXN0YW1wOiAxNjQwNjcwMzAwNDMxMDAwCmxhdGxuZyB7CiAgbGF0aXR1ZGVfZTc6IDI5NjQ4NjUxNwogIGxvbmdpdHVkZV9lNzogMTA2NTY0NTM2MQp9CnJhZGl1czogNjA1NzguMzQKcHJvdmVuYW5jZTogNgo=; NID=511=MytlJ9bbJtxpdQk_E2etWvRPgHJjoD-ulatyJd2VcHfu1SHwExe1dLNKTsEhTeuJZYT4nAlVKjC93HMzfasQauPueAAlYqxvpI0E3SsUZ0dIaZTi62b3si8b7u5lGbK4WWx9MtX84fQ4EiotXRnS9KBzWRg3moadlWrtuV3DFuWVNLzA4MUp98178PVlTiAWtGK5MYlvfQrxYnWNriiw-468Oh9Q7lAga2sXjIz85bzf_E_D5ogj38mpZHbzZ445FLb7QE9zMa3vkZluSpDpDJPIGKc94_OSj0tt_MYYXz60; SIDCC=AJi4QfEsdoFJuiEV5GaSkgjcF2Upo9nuT1M6iR06mQvHdMIkmCXSFY4DyNlPpacbpSXdTd8Eag; __Secure-3PSIDCC=AJi4QfHxtEP8UP1r42DYiyBAftbU6szIIKHOs3tAdSon8TFFz5FENue7Hee6yhDO7431Y0YwgIs'
                headers['accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
                # resp = requests.get(URL, headers=headers,proxies=proxyip,verify=False) # proxyip
                resp = requests.get(URL, headers=headers,timeout=60)
                logging.debug(resp.content)
                if resp.status_code == 200:
                    soup = BeautifulSoup(resp.content, "lxml")
                    item = soup.select_one("div#search div.g")
                    logging.debug(item.select_one("a"))
                    href = item.select_one("a")['href']
                    logging.info(href)
                    res = {
                        "no":[i],
                        "authfull":[j['authfull']],
                        "inst_name":[j['inst_name']],
                        "href":[href]
                    }

                    self.queue_res.put(res)
                else:
                    logging.warning(f"{URL} 爬取失败")

                print()
            except:
                logging.error("159")


if __name__ == "__main__":
    Crawler().run()
