# -*- coding: utf-8 -*-
# @Time    : 2018/9/7 11:31
import threading
import os
path = os.path.abspath(os.path.dirname(os.getcwd()))
import sys
sys.path.append(path)
import requests
import pymongo,re
import config,log
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from urllib.parse import unquote
from hashlib import md5
from time import sleep
from collections import OrderedDict
from lxml.html import etree
from multiprocessing import Process, Queue, Pool

new_url_google = config.new_li_db_google
li_db_words = config.li_db_words
log_google = log.Log("google")
class BiYing(object):
    def __init__(self) -> None:
        # self.pro = self.get_pro()
        self.num = 0

    def get_pro(self):
        db_one = config.con_proxy
        li_db_one = db_one['proxies_ip']
        xx_pro = []
        li_db_two = li_db_one.find()
        for i in li_db_two:
            ip = i['ip']
            xx_pro.append({"http":ip})
        return xx_pro

    def open_firefox(self, url, proxy):
        # proxy = {'http': 'http://yx827w:yx827w@123.249.47.16:888'}
        wait_time = 1
        while True:  # 浏览器向url地址发送请求
            try:
                profile = webdriver.ChromeOptions()
                prefs = {
                    'profile.default_content_setting_values': {
                        'images': 2,
                        'javascript': 2,
                        # 'css': 2
                    }}
                profile.add_experimental_option('prefs',prefs)
                # profile.add_argument('--headless')# 不弹出浏览器界面
                # profile.add_argument("'--proxy-server={}".format(proxy))
                self.browser = webdriver.Chrome(chrome_options=profile)
                # self.browser = webdriver.Chrome()
                url = "https://www.baidu.com/"
                self.browser.get(url)
                self.browser.find_element_by_xpath("//input[@id='kw']").send_keys("selenium")
                self.browser.find_element_by_xpath("//input[@id='su']").click()
                self.browser.get_cookies()
                break
            except Exception as e:
                print('尝试打开浏览器失败，重新尝试！Error information:', e, '\t[%s]s latter,try connection again!' % wait_time)
                try:
                    self.browser.quit()
                except:
                    pass
                sleep(wait_time)
                wait_time <<= 1
                if wait_time >= 628:
                    print('This file can\'t cralw!')
                    self.permit = False
                    return

    def find_one(self, data, proxies, word_type):
        # 拼接连接
        url = "https://www.google.com/search?q=" + str(data['q']) + "&num=" + str(data[
            'num']) + "&ei=VjAGXOnVBcuy0PEPiMCM2AQ&start=0&sa=N&ved=0ahUKEwjp2Lb41oXfAhVLGTQIHQggA0s4ZBDw0wMIpAc&biw=852&bih=770"
        self.open_firefox(url, proxies)
        # 请求数据
        for x in range(2):
            if x == 1:
                try:
                    more_page_btn = self.browser.find_element_by_xpath('//a[@class="pn"]')
                    # browser..find_element_by_xpath('//input[@class="gLFyf gsfi"]')
                    more_page_btn.click()
                except:
                    return
            response_url = self.browser.current_url
            if response_url.find("/sorry/index")>-1 or self.num == 3:
                print("结束程序")
                self.browser.quit()
                sys.exit()
            print(response_url)
            page_sour =self.browser.page_source
            con_et = etree.HTML(page_sour)
            try:
                result_s = con_et.xpath('//div[@id="res"]//div[@class="g"]')
                res_num = len(result_s)
            except:
                res_num = 0
            print(res_num)
            if res_num == 0:
                self.num += 1
                log_google.error("find 0 :%s"%(data['q']))
                return
            else:
                self.num = 0
            for xiabiao, nr in enumerate(result_s):
                try:
                    new_href = nr.xpath('.//div[@class="r"]/a/@href')[0]
                except:
                    new_href = nr.xpath('.//cite[@class="iUh30"]/text()')[0]
                # update_logo = nr.xpath('.//span[@class="sFZIhb b w xsm"]')
                title_s = nr.xpath('.//h3[@class="LC20lb"]//text()')
                title = "".join(title_s)
                content_s = nr.xpath('.//span[@class="st"]//text()')
                summary = self.repl("".join(content_s))
                # logo = update_logo[0]
                # print(new_href)
                self.insert_mongo_google(new_href, title, summary, word_type, data['q'])
                # if xiabiao == (res_len - 1) and (x + 1) % 30 == 0:
                # print("charu")
                if xiabiao == (res_num - 1):
                    print(new_href)
            if res_num < 100:
                return


    def insert_mongo_google(self, url, title, summary, word_type, q):
        url_md5 = self.md5_generator(url)
        save = OrderedDict()
        save['url'] = url
        save['title'] = title
        save['summary'] = summary
        save['state'] = 0 #状态
        save['url_md5'] = url_md5
        save['type'] = word_type #类型
        save['q'] = q
        try:
            new_url_google.insert(save)
        except Exception as e:
            if str(e).find("E11000") > -1:
                return
            print("插入失败：%s" % (e))

    def repl(self, text):
        try:
            new_text = re.sub(r"[\n\t\r\u3000\xa0\u2002]","",text).strip()
            return new_text
        except:
            return text

    def delete_mongo(self, proxies):
        print("stare : %s    :%s" % (os.getpid(), proxies))
        data = {
            'q': 'filetype:pdf α反义寡核苷酸',
            'num': '100',
        }
        while True:
            try:
                # 关键词
                item = li_db_words.find_and_modify({'state_google':0}, {'$set': {"state_google":0}})
                if not item:
                    return
                name = item['word']
                # 搜索查询关键词
                for word in ['doc','pdf','ppt']:
                    data['q'] = 'filetype:%s %s'%(word,name)
                    self.find_one(data, proxies, word)
                    self.browser.quit()
            except Exception as e:
                print(e)
                # biying_log.info("error delete_mongo: %s"%(e))

    def insert_url_text(self, href):
        with open('../file/google_href.txt', 'a+') as f:
            f.write(href + "\n")
            f.close()

    def md5_generator(self,url):
        return md5(url.encode()).hexdigest()

    def proce(self):
        trader = []
        for i in range(1):
            proxies = self.pro[i+24]
            pr = Process(target=self.delete_mongo, args=(proxies,))
            sleep(0.5)
            pr.start()
            trader.append(pr)
        for i in trader:
            i.join()
        print('proce this is pid: %s' % os.getpid())


if __name__ == '__main__':
    # BiYing().proce()
    path = "sss"
    BiYing().open_firefox(path,path)

