import random
import re
import time
from functools import wraps

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
import validators

import base_settings
from Database import TablesInterface
from Database.init import init as mysql_init
from Operator.init import init as operator_init
from Proxy.Manager import Manager
from Spiders.Instagram import settings
from SMTP.SmtpServer import SmtpServer


class Spider():
    driver = None
    mysql = None
    operator = None
    page = 1
    search = None

    def __init__(self):
        # ----启动浏览器----
        if self.driver is None:
            # 1 浏览器配置
            option = webdriver.ChromeOptions()
            # 1（1）设置代理
            proxy = Manager('BotProxy')
            proxy_path = proxy.get()
            print(proxy_path)
            option.add_extension(proxy_path)

            # option.add_argument('--headless')  # 无法设置无头模式
            option.add_argument('--profile-directory=Default')
            option.add_argument(r"user-data-dir=" + settings.SPIDER_DRIVER_DATA_DIR + 'Instagram\Google')  # 浏览器数据路径
            # option.add_argument("blink-settings=imagesEnabled=false")  # 不加载图片
            option.add_argument("--start-maximized")  # 开始最大化
            option.add_argument("--test-type")
            option.add_argument("--ignore-certificate-errors")  # 忽略证书错误

            option.add_argument("--disable-popup-blocking")  # 禁用弹出拦截
            option.add_argument("no-sandbox")  # 取消沙盒模式
            option.add_argument("no-default-browser-check")  # 禁止默认浏览器检查
            option.add_argument("about:histograms")
            option.add_argument("about:cache")

            # option.add_argument("disable-extensions")  # 禁用扩展
            option.add_argument("disable-glsl-translator")  # 禁用GLSL翻译

            option.add_argument("disable-translate")  # 禁用翻译
            option.add_argument("--disable-gpu")  # 谷歌文档提到需要加上这个属性来规避bug
            # option.add_argument("--disable-dev-shm-usage")
            option.add_argument("--hide-scrollbars")  # 隐藏滚动条, 应对一些特殊页面

            # 2 查找驱动并驱动浏览器
            driver_path = base_settings.PB + '/Browser/Drivers/chromedriver_win32/chromedriver.exe'
            # -----------动态代理测试-----------------
            # for i in range(5):
            #     browser = webdriver.Chrome(executable_path=driver_path, options=option,chrome_options=chrome_options)
            #     browser.get('http://httpbin.org/get')
            # exit()
            # print(driver_path)
            driver = webdriver.Chrome(executable_path=driver_path, options=option)
            self.driver = driver

        # ----初始化数据库----
        if self.mysql is None:
            self.mysql = mysql_init()

        # ----初始化运营者----
        if self.operator is None:
            self.operator = operator_init(self.mysql)

    def google_search(self):
        """
        google搜索
        :return:
        """
        print('google搜索...')
        self.search = search = r'site:' + settings.SPIDER_SEARCH_SITE + ' ' + settings.SPIDER_SEARCH_WORDS

        print('检测搜索词上次爬取页位置：' + search)
        cursor = self.mysql.cursor()
        search_sql = 'select * from `{crawled_words_source}` where `words`=\'{search}\''.format(
            crawled_words_source=TablesInterface.table_crawled_words_source, search=search)
        cursor.execute(search_sql)
        has_search = cursor.fetchone()
        if has_search is None:
            print('尚未爬取过的搜索词：第一页')
            self.mysql.begin()
            try:
                cursor.execute('insert into {table} (`words`,`page`) values(\'{words}\',\'{page}\')'.format(table=TablesInterface.table_crawled_words_source,words=self.search,page=self.page))
                self.mysql.commit()
                cursor.close()
            except Exception:
                cursor.close()
                self.mysql.rollback()

            self.page = 1
        else:
            self.page = has_search[2]
            print('上次爬取位置：第 '+str(self.page)+' 页')
        cursor.close()

        print('查询：' + search)
        # self.driver.execute_script('window.open("https://www.google.com","_blank");')  # 新开标签页
        # self.driver.switch_to.window(self.driver.window_handles[1])
        self.driver.get(settings.SPIDER_START_URL)
        google_input = WebDriverWait(self.driver, 5).until(
            expected_conditions.presence_of_element_located(
                (By.XPATH, '/html/body/div[1]/div[3]/form/div[1]/div[1]/div[1]/div/div[2]/input')
            )
        )
        google_input.send_keys(search)
        google_input.send_keys(Keys.ENTER)

        self.check_link()
        # 进入下一页
        exit()
        self.next()

    # 读取下一页
    def next(self):
        # 配置代理
        # self.driver.
        self.page += 1
        # 回写数据库页码
        self.mysql.begin()
        try:
            cursor = self.mysql.cursor()
            cursor.execute('update `{table}` set `page`={page} where `words`=\'{search}\''.format(
                table=TablesInterface.table_crawled_words_source, page=self.page, search=self.search))
            self.mysql.commit()
            cursor.close()
            print('搜索数据更新成功...')
        except Exception:
            self.mysql.rollback()
            print('搜索数据更新失败...')
        print('进入下一页:' + str(self.page))
        time.sleep(3)
        # 点击下一页
        self.driver.find_element_by_xpath(
            '/html/body/div[7]/div/div[9]/div[1]/div/div[6]/span[1]/table/tbody/tr/td[12]/a').click()
        # 访问下一页
        next_page_url = '/search?q=site:instagram.com+%22%40gmail.com%22&amp;start=10'
        # 等待加载读取查询的列表
        next_page = WebDriverWait(self.driver,
                                  random.randint(5, settings.SPIDER_GOOGLE_SEARCH_RAND_WAITE_MAX_TIME_PER_PAGE)).until(
            expected_conditions.presence_of_element_located(
                (By.XPATH, '/html/body/div[7]/div/div[9]/div[1]/div/div[2]/div[2]/div/div/div/div[1]/div/div')
            )
        )
        # links = self.driver.find_elements_by_xpath(
        #     '/html/body/div[7]/div/div[9]/div[1]/div/div[2]/div[2]/div/div/div/div[1]/div/div')
        self.check_link()
        self.next()

    def check_link(self):
        # 读取查询的列表
        links = self.driver.find_elements_by_xpath(
            '/html/body/div[7]/div/div[9]/div[1]/div/div[2]/div[2]/div/div/div/div')
        for i, link in enumerate(links):
            # 获取连接地址
            link = link.find_element_by_xpath('//*[@id="rso"]/div/div[' + str(i + 1) + ']/div/div[1]/a')
            href = link.get_attribute("href")
            # 检查连接是否被爬取过
            cursor = self.mysql.cursor()
            cursor.execute('select * from `crawled` where `url`="' + href + '"')
            has_url = cursor.fetchone()
            cursor.close()
            if has_url is None:
                # 添加已爬取的url
                cursor = self.mysql.cursor()
                cursor.execute('insert into `crawled` (`url`) values("' + href + '")')
                self.mysql.commit()
                cursor.close()
                print('提取新URL:' + href)
                try:
                    receiver_nickname = link.text.split('@')[0]
                except Exception:
                    receiver_nickname = ''
                # 从描述信息中 获取粉丝数信息
                try:
                    info_span = self.driver.find_element_by_css_selector(
                        '#rso > div > div:nth-child(' + str(i + 1) + ') > div > div.IsZvec')
                    fans = info_span.text.split(' Followers')[0]
                    fans = fans.replace('.', '')
                    fans = fans.replace('k', '000')
                    fans = fans.replace('w', '0000')
                    if fans.isdigit():
                        if settings.FANS_MIN_NUMBER < int(fans.replace(',', '')) < settings.FANS_MAX_NUMBER:
                            print('粉丝合格：' + fans)
                            print('开始提取邮箱...')
                            # 提取邮件地址
                            words = re.findall(
                                r'([a-zA-Z0-9_.+-]+@[a-pr-zA-PRZ0-9-]+\.[a-zA-Z0-9-.]+)', info_span.text)
                            print('找到账号的邮件数据:')
                            print(words)
                            for email in words:
                                if validators.email(email):
                                    # 检测邮箱
                                    cursor = self.mysql.cursor()
                                    cursor.execute('select * from `celebrity` where email="' + email + '"')
                                    has_email = cursor.fetchone()
                                    cursor.close()
                                    self.mysql.commit()
                                    if has_email is None:
                                        print('正在发送邮件...')
                                        smtp: SmtpServer = SmtpServer()
                                        # sent_result = True
                                        sent_result = smtp.send_mail(settings.EMAIL_SUBJECT,
                                                                     settings.EMAIL_SENT_CONTENT,
                                                                     '1714255949@qq.com',
                                                                     receiver_nickname)
                                        if sent_result:
                                            print('发送成功！对方邮件：' + email)
                                            print('数据库录入...')
                                            sql = '''
                                                    INSERT INTO `celebrity` (`source_url`,`operator_id`,`source`,`nickname`,`email`,`fans`)
                                                    VALUES('{source_url}','{operator_id}','{source}','{nickname}','{email}','{fans}')
                                                    '''
                                            sql = sql.format(source_url=href,
                                                             operator_id=self.operator[0],
                                                             source='ins', nickname=receiver_nickname,
                                                             # email=email,
                                                             email='1714255949@qq.com',
                                                             fans=int(fans.replace(',', '')))
                                            print(sql)
                                            print('开启事务操作...')
                                            self.mysql.begin()
                                            try:
                                                cursor = self.mysql.cursor()
                                                cursor.execute(sql)
                                                self.mysql.commit()
                                                print('成功数据库录入...')
                                                cursor.close()
                                            except Exception:
                                                self.mysql.rollback()
                                                print('数据库录入失败，程序已回滚。EMAIL:' + email)
                                        else:
                                            print('发送失败！对方邮件：' + email)
                                    else:
                                        print('邮箱已经发送过：' + email)
                                else:
                                    print('邮箱验证不通过：' + email)
                        else:
                            print('粉丝不在筛选范围：' + fans)
                    else:
                        print('找不到粉丝数据:' + fans)
                except Exception:
                    print('找不到粉丝数据')
            else:
                print('URL已经爬取过：' + href)


if (__name__ == '__main__'):
    # FIXME 多线程未开发
    main = Spider()
    main.google_search()
