# -*- coding: utf-8 -*-
# @Time    : 2018/12/4 14:43
import os
import sys

path = os.path.abspath(os.path.dirname(os.getcwd()))
sys.path.append(path)
import re
from urllib.parse import unquote,quote
from time import sleep
from selenium import webdriver
from lxml.html import etree
class shishi(object):

    def __init__(self) -> None:
        super().__init__()

    def find_yuan(self, url):
        self.open_firefox(url)
        for i in range(2):
            if i == 1:
                try:
                    more_page_btn = self.browser.find_element_by_xpath('//a[@class="pn"]')
                    # browser..find_element_by_xpath('//input[@class="gLFyf gsfi"]')
                    more_page_btn.click()
                except:
                    return 
            print(self.browser.current_url)
            page_sour =self.browser.page_source
            con_et = etree.HTML(page_sour)
            result_s = con_et.xpath('//div[@id="res"]//div[@class="g"]')
            res_num = len(result_s)
            print(res_num)

            for xiabiao,nr in enumerate(result_s):
                try:
                    new_href = nr.xpath('.//div[@class="r"]/a/@href')[0]
                except:
                    new_href = nr.xpath('.//cite[@class="iUh30"]/text()')[0]
                update_logo = nr.xpath('.//span[@class="sFZIhb b w xsm"]')
                title_s = nr.xpath('.//h3[@class="LC20lb"]//text()')
                title = "".join(title_s)
                content_s = nr.xpath('.//span[@class="st"]//text()')
                summary = self.repl("".join(content_s))
                if xiabiao == (res_num-1):
                    print(new_href)
            if res_num < 100:
                return


    def open_firefox(self, url):
        proxy = {'http': 'http://yx827w:yx827w@123.249.47.16:888'}
        wait_time = 1
        while True:  # 浏览器向url地址发送请求
            try:
                profile = webdriver.ChromeOptions()
                # profile.add_argument('--headless')# 不弹出浏览器界面
                profile.add_argument("'--proxy-server={}".format(proxy))
                self.browser = webdriver.Chrome(chrome_options=profile)
                # self.browser = webdriver.Chrome()
                self.browser.get(url)
                break
            except Exception as e:
                print('尝试打开浏览器失败，重新尝试！Error information:', e, '\t[%s]s latter,try connection again!' % wait_time)
                try:
                    self.browser.quit()
                except:
                    pass
                sleep(wait_time)
                wait_time <<= 1
                if wait_time >= 628:
                    print('This file can\'t cralw!')
                    self.permit = False
                    return

    def repl(self, text):
        try:
            new_text = re.sub(r"[\n\t\r\u3000\xa0\u2002]", "", text).strip()
            return new_text
        except:
            return text

    def find_houzhu(self, url_link):
        houzhu = str(url_link).split(".")[-1]
        if len(houzhu) > 4:
            return "meiyou"
        else:
            if houzhu.find("com") > -1 or houzhu.find("/") > -1:
                return "meiyou"
            else:
                return houzhu

if __name__ == '__main__':
    # data = {
    #     'q': 'filetype:pdf α反义寡核苷酸',
    #     'num': '100',
    # }
    # url = "https://www.google.com/search?q="+str(data['q'])+"&num="+str(data['num'])+"&ei=VjAGXOnVBcuy0PEPiMCM2AQ&start=0&sa=N&ved=0ahUKEwjp2Lb41oXfAhVLGTQIHQggA0s4ZBDw0wMIpAc&biw=852&bih=770"
    url = "http://www.bidding.csg.cn/filesrv/srv/file/download/1200089413/e720e04f477a45b78bcadf6cc65af9e6"
    print(shishi().find_houzhu(url))
    houzhui = "docx"
    # if houzhui.find("ppt") == -1 and houzhui.find("doc") == -1 and houzhui.find("pdf") == -1:
    print(houzhui.find("doc"))


