import time
import re
import requests
from requests.exceptions import ConnectionError

from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import MoveTargetOutOfBoundsException
from selenium.common.exceptions import WebDriverException

from bs4 import BeautifulSoup
from fake_useragent import UserAgent


class SearchMail:

    """ Поиск почты """

    def __init__(self, browser, initial_link_list):

        self.browser = browser
        self.initial_link_list = initial_link_list
        self.temporary_email_list = []
        self.ready_list_of_emails = []


    def get_a_list_of_sites(self):

        """ Получаем список сайтов """

        initial_link_list = self.initial_link_list.split('\n')
        return initial_link_list


    def open_a_page(self, link):

        """ Открываем страницу """

        user_agent = UserAgent()
        headers = {'User-Agent': user_agent.chrome}

        try:

            response = requests.get(link, headers=headers)

        except ConnectionError as error:

            print(error)
            return 'blocking'

        status_code = response.status_code
        page = response.text

        return page


    def convert_to_beautifulsoup(self, page):

        """ Преобразовываем в BeautifulSoup """

        page_bs = BeautifulSoup(page, 'lxml')
        return page_bs


    def get_all_links_from_the_page(self, ready_list_of_links, link, site_domain, protocol):

        """ Получаем все ссылки со страницы """

        all_links = []
        page = self.open_a_page(link)
        if page == 'blocking': return 'blocking'
        page = self.convert_to_beautifulsoup(page)

        for link in page.find_all('a'):

            if not link: continue

            try:

                link = link['href']
                all_links.append(link)

            except KeyError as error:

                print(error)
                continue

        for count, link in enumerate(all_links):

            if re.findall(r'.jpg$', link) or re.findall(r'.pdf$', link):

                print(f'Пропущена страница {link}')
                continue

            if re.findall(r'^//.+', link):

                link = protocol + link[2:]
                ready_list_of_links.append(link)
                continue

            if re.findall(r'/.+', link) and not 'http' in link:

                ready_list_of_links.append(site_domain + link)

            if site_domain in link:

                ready_list_of_links.append(link)


    def work_with_links_on_the_page(self, ready_list_of_links):

        """ Работаем с ссылками на странице """

        def search_for_mail_on_the_page(link):

            """ Ищем почту на странице """

            list_of_emails_selenium = self.browser.find_elements(
                By.XPATH,
                '//*[contains(text(), "@")]'
            )

            if list_of_emails_selenium: [self.temporary_email_list.append(email.text) for email in list_of_emails_selenium]

            print(f'После страницы {link}:\n{self.temporary_email_list}')


        def get_the_desired_type_of_emails():

            """ Добавляем электронные почты """

            print(f'found_emails - {self.temporary_email_list}')
            print(f'found_emails - {len(self.temporary_email_list)}')

            for email in set(self.temporary_email_list):

                if not email: continue

                email_one = re.findall(r'[a-zA-Z0-9-_]+?@.+\.ru\b', email)
                if email_one: [self.ready_list_of_emails.append(email) for email in email_one]

                email_two = re.findall(r'[a-zA-Z0-9-_]+?@.+\.com\b', email)
                if email_two: [self.ready_list_of_emails.append(email) for email in email_two]

                email_three = re.findall(r'[a-zA-Z0-9-_]+?@.+\.org\b', email)
                if email_three: [self.ready_list_of_emails.append(email) for email in email_three]


        # ВЫШЕ ОПРЕДЕЛЕНИЕ ФУНКЦИЙ

        ready_list_of_links = set(ready_list_of_links)

        print(f'ready_list_of_links - {ready_list_of_links}')
        print(f'ready_list_of_links - {len(ready_list_of_links)}')

        for count, link in enumerate(ready_list_of_links):

            try:

                self.browser.get(link)
                self.browser.execute_script(f"window.scrollBy({0}, {2000})")
                time.sleep(.5)


            except WebDriverException as error:

                print(error)
                continue

            search_for_mail_on_the_page(link)
            get_the_desired_type_of_emails()
            print(f'Проверено страниц {count+1}/{len(ready_list_of_links)}')


    def get_site_domain(self, link):

        """ Получаем домен сайта """

        domain = link.split('/')[:3]
        domain = '/'.join(domain)
        protocol = link.split('//')[0] + '//'

        return (domain , protocol)


    def start_mail_search(self):

        """ Начинаем поиск почты """

        initial_link_list = self.get_a_list_of_sites()

        for link in initial_link_list:

            print(f'Работаем с такой ссылкой - {link}')
            if not link: continue
            if link == 'Сайт не был указан в Яндекс Картах': return 'Сайт не был указан в Яндекс Картах'

            ready_list_of_links = []
            site_domain, protocol = self.get_site_domain(link)
            ready_list_of_links.append(link)

            well_what = self.get_all_links_from_the_page(ready_list_of_links, link, site_domain, protocol)
            if well_what == 'blocking': return 'blocking'

            self.work_with_links_on_the_page(ready_list_of_links)

        return set(self.ready_list_of_emails)

