from typing import Any, List, Optional, Tuple, Union
import random
import time
from datetime import datetime
import re

import argparse
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import ElementClickInterceptedException
from selenium.common.exceptions import JavascriptException
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains

from utils import get_list_of_links_of_prouducts_to_parsed
from . import browser as browser_module
from .browser import BrowserProxy
from .utils import (
    ReviewData,
    ProductData,
    ProductInformationData,
    PointsOfSaleData,
    get_list_of_proxies,
    ProxyData
)
from db.main import engine, session
from db.models import Product
from db.utils import add_product_to_db


PRODUCT_CATEGORIES_LINK = 'https://vkusvill.ru/goods/'

def wait(f=2, b=3.5):
    """ Чуть-чуть подождать """

    number = random.uniform(f, b)
    print(f'Ждем {number}\n')
    time.sleep(number)


class ScraperSelenium:
    """ Инструменты для парсинга относящиеся к selenium """

    def scroll_the_page(self, browser, a=.2, b=.5, w=0, h=50):
        """
        Прокрутить страницу js
        w - прокрутить страницу по ширине
        h - прокрутить страницу по высоте
        """

        browser.execute_script(f"window.scrollBy({w}, {h})")
        wait(a, b)


    def scroll_full_page(self, browser: webdriver.Chrome) -> None:
        """ Пролистываем всю страницу """

        browser.execute_script(
            "window.scrollTo(0, document.body.scrollHeight);"
        )


    def get_page_element_wait(
        self,
        browser: webdriver.Chrome,
        xpath: str
    ) -> WebElement:
        """ Получаем элемент старницы ожидая его появления """

        while True:
            try:
                element = browser.find_element(By.XPATH, xpath)
                return element
            except NoSuchElementException as error:
                message = f'Элемент по {xpath} не был найден, ждем...'
                print(message)
                time.sleep(1)


    def move_and_click_on_element(self, browser: webdriver.Chrome, xpath: str) -> None:
        """ Перемещаемся к элементу и кликаем по нему """

        element = self.get_page_element_wait(browser, xpath)
        actions = ActionChains(browser)
        actions.move_to_element(element)
        actions.perform()
        wait(1, 1.5)
        element.click()
        wait(1, 1.5)


    def get_page_element(
        self,
        browser: webdriver.Chrome,
        xpath: str
    ) -> Optional[WebElement]:
        """ Получаем элемент со страницы """

        try:
            element = browser.find_element(By.XPATH, xpath)
            return element
        except NoSuchElementException:
            print(f'Элемент по {xpath} не был найден')


class ScraperProduct:
    """ Инструменты для парсинга относящиеся к сбору информации о продукте """

    def get_date(self) -> datetime:
        """ Получаем дату парсинга продукта """
        return datetime.now()


    def get_name(self, browser: webdriver.Chrome) -> str:
        """ Получаем название продукта """

        element = self.get_page_element_wait(
            browser,
            '//div[@class="Product__head"]/div/h1'
        )
        name = element.text
        return name


    def get_raiting(self, browser: webdriver.Chrome) -> float:
        """ Получаем рейтинг """

        element = self.get_page_element_wait(
            browser,
            '//span[@class="ProductCommentsRating--avg"]'
        )
        raiting = float(element.text)
        return raiting


    def get_number_of_raitings(self, browser: webdriver.Chrome) -> int:
        """ Получаем количество оценок """

        element = self.get_page_element_wait(
            browser,
            '//span[@class="ProductCommentsRating--cnt-avg"]'
        )
        number_of_raitings = int(re.findall('\d+', element.text)[0])
        return number_of_raitings


    def get_link(self, browser: webdriver.Chrome) -> str:
        """ Получаем ссылку продукта """
        return browser.current_url


    def get_product_information(
        self,
        browser: webdriver.Chrome
    ) -> ProductInformationData:
        """ Получаем информацию о продукте """

        date = self.get_date()
        name = self.get_name(browser)
        raiting = self.get_raiting(browser)
        number_of_raitings = self.get_number_of_raitings(browser)
        link = self.get_link(browser)
        product_information = ProductInformationData(
            date=date,
            name=name,
            raiting=raiting,
            number_of_raitings=number_of_raitings,
            link=link,
        )
        return product_information


    def get_points_of_sale(
        self,
        browser: webdriver.Chrome
    ) -> PointsOfSaleData:
        """ Получаем точки продаж продукта """

        def move_to_open_button(browser: webdriver.Chrome) -> WebElement:
            """ Перемещаемся к кнопке открытия списка точек продаж """

            button1 = ScraperSelenium().get_page_element(
                browser, 
                '//span[contains(text(), "Добавить в список")]/../following::div/div/button'
            )
            button2 = ScraperSelenium().get_page_element(
                browser,
                '//a[contains(text(), "Показать наличие в магазинах")]'
            )
            button = button1 or button2

            actions = ActionChains(browser)
            actions.move_to_element(button)
            actions.perform()
            wait(1, 1.5)
            return button

        def open_list_of_points_of_sale(browser: webdriver.Chrome) -> Any:
            """ Открываем список точек продажи продуктов """

            button = move_to_open_button(browser)
            move_to_end_of_list_script = """
                document.querySelector("#js-shops-detail").previousElementSibling.scrollIntoView(false);window.scrollBy(0, 100);
            """

            while True:
                try:
                    button.click()
                    wait(4, 5)
                    browser.execute_script(move_to_end_of_list_script)
                    break
                except ElementClickInterceptedException:
                    ScraperSelenium().scroll_the_page(browser, h=150)


        def get_list_of_points_of_sale(
            browser: webdriver.Chrome
        ) -> Union[List[str], List]:
            """ Получаем список точек продаж """

            try:
                list_of_elements = browser.find_elements(
                    By.XPATH,
                    '//div[@class="js-shops-list"]/div'
                )
                list_of_elements = [element.text for element in list_of_elements]
                return list_of_elements
            except NoSuchElementException:
                return []


        def scroll_list_of_points_of_sale(
            browser: webdriver.Chrome
        ) -> List[str]:
            """ Скролим список точек продаж """

            while True:
                try:
                    browser.execute_script(
                        'document.querySelector(".simplebar-scroll-content").scrollBy(0, 1);'
                    )
                    break
                except JavascriptException:
                    print('Ждем появления списка точек продаж')
                    wait(.5, 1)

            while True:
                shops_are_empty = ScraperSelenium().get_page_element(
                    browser,
                    '//*[contains(text(), "Сожалеем, но данного товара сейчас нет ни в одном магазине. Мы делаем все возможное, чтобы вскоре он появился.")]'
                )
                if shops_are_empty: break

                browser.execute_script(
                    'document.querySelector(".simplebar-scroll-content").scrollBy(0, 40);'
                )
                element = ScraperSelenium().get_page_element(
                    browser,
                    '//div[@class="js-shops-list"]/div[last()]'
                )
                if element.text: break


        # ВЫШЕ ОПРЕДЕЛЕНИЕ ФУНКЦИЙ

        open_list_of_points_of_sale(browser)
        scroll_list_of_points_of_sale(browser)

        list_of_points_of_sale = get_list_of_points_of_sale(browser)
        list_of_points_of_sale = [element for element in list_of_points_of_sale if element]

        points_of_sale = PointsOfSaleData(
            list_of_points_of_sale=list_of_points_of_sale,
        )
        return points_of_sale


    def get_product_data(
        self,
        product_information: ProductInformationData,
        product_reviews: Union[List, List[ReviewData]],
        points_of_sale: PointsOfSaleData
    ) -> ProductData:
        """ Получаем данные продукта """

        product_data = ProductData(
            information=product_information,
            reviews=product_reviews,
            points_of_sale=points_of_sale
        )
        return product_data


    def get_list_of_evaluation_reviews(
        self,
        browser: webdriver.Chrome
    ) -> List[int]:
        """ Получаем список оценок отзывов """

        list_of_elements = browser.find_elements(
            By.XPATH,
            '//ul[@id="js-product-api-comments-list"]/li//div[@class="Rating__text"]'
        )
        list_of_elements = [int(element.text) for element in list_of_elements]
        return list_of_elements


    def get_list_of_review_names(
        self,
        browser: webdriver.Chrome
    ) -> List[str]:
        """ Получаем список имен отзывов """

        list_of_elements = browser.find_elements(
            By.XPATH,
            '//ul[@id="js-product-api-comments-list"]/li//span[@class="Comment__userLink"]'
        )
        list_of_elements = [element.get_attribute('title') for element in list_of_elements]
        return list_of_elements


    def get_list_of_review_cards(
        self,
        browser: webdriver.Chrome
    ) -> List[str]:
        """ Получаем список номеров карт отзывов """

        result = []
        list_of_elements = browser.find_elements(
            By.XPATH,
            '//ul[@id="js-product-api-comments-list"]/li//span[@class="Comment__userCard"]'
        )

        for element in list_of_elements:
            text = re.findall('x+\d+', element.text)[0]
            result.append(text)

        return result


    def get_list_of_review_description(
        self,
        browser: webdriver.Chrome
    ) -> List[str]:
        """ Получаем список описаний отзывов """

        list_of_elements = browser.find_elements(
            By.XPATH,
            '//ul[@id="js-product-api-comments-list"]/li//div[@class="Comment__text"]'
        )
        list_of_elements = [element.text for element in list_of_elements]
        return list_of_elements


    def get_list_of_review_is_below_four(
        self,
        list_of_evaluation_reviews: List[int]
    ) -> List[bool]:
        """ Формируем список из значений, которые показывают является ли отзыв "плохим" """

        return [True if element < 4 else False for element in list_of_evaluation_reviews]


    def get_product_reviews(
        self,
        browser: webdriver.Chrome
    ) -> Union[List, List[ReviewData]]:
        """ Получаем отзывы продукта """

        def open_all_reviews(browser: webdriver.Chrome) -> None:
            """ Открываем все отзывы """

            while True:
                try:
                    element = browser.find_element(
                        By.XPATH,
                        '//ul[@class="Comments__list"]/following::div/a[@title="Загрузить больше"]'
                    )
                    actions = ActionChains(browser)
                    actions.move_to_element(element)
                    actions.perform()
                    wait(1, 2)
                    element.click()
                    wait(1, 2)
                except ElementClickInterceptedException:
                    ScraperSelenium().scroll_the_page(browser, h=150)
                except NoSuchElementException:
                    break

        def parse_product_reviews(
            browser: webdriver.Chrome
        ) -> Union[List, List[ReviewData]]:
            """ Парсим отзывы продукта """

            product_reviews = []
            list_of_evaluation_reviews = self.get_list_of_evaluation_reviews(
                browser
            )
            list_of_review_names = self.get_list_of_review_names(browser)
            list_of_review_cards = self.get_list_of_review_cards(browser)
            list_of_review_description = self.get_list_of_review_description(
                browser
            )
            list_of_review_is_below_four = self.get_list_of_review_is_below_four(
                list_of_evaluation_reviews
            )
            data = [
                list_of_evaluation_reviews,
                list_of_review_names,
                list_of_review_cards,
                list_of_review_description,
                list_of_review_is_below_four
            ]

            for evaluation, name, card, description, is_below_four in zip(*data):
                product_review = ReviewData(
                    evaluation=evaluation,
                    name=name,
                    card_number=card,
                    is_below_four=is_below_four,
                    description=description
                )
                product_reviews.append(product_review)

            return product_reviews


        # ВЫШЕ ОПРЕДЕЛЕНИЕ ФУНКЦИЙ

        open_all_reviews(browser)
        list_of_product_reviews = parse_product_reviews(browser)
        return list_of_product_reviews


class Scraper(browser_module.Browser, ScraperSelenium, ScraperProduct):

    def get_list_of_product_categories(self) -> List[str]:
        """ Получаем список категорий продуктов """

        result = []
        browser = self.get_browser(True)
        browser.get(PRODUCT_CATEGORIES_LINK)

        list_of_product_categories = browser.find_elements(
            By.XPATH,
            '//span[contains(text(), "Категории товаров")]/../following::div/ul/li/a'
        )

        for product_category in list_of_product_categories:
            link = product_category.get_attribute('href')
            if 'vkusvill.ru/goods/' in link:
                result.append(link)

        self.close_browser(browser)
        return result


    def parse_single_product(self, browser: webdriver.Chrome) -> ProductData:
        """ Парсим отдельный продукт """

        self.scroll_full_page(browser)
        wait(1, 2)
        product_information = self.get_product_information(browser)
        product_reviews = self.get_product_reviews(browser)
        points_of_sale = self.get_points_of_sale(browser)
        product_data = self.get_product_data(
            product_information,
            product_reviews,
            points_of_sale
        )
        return product_data


    def parsing_products_from_page(self, browser: webdriver.Chrome) -> Any:
        """ Парсим продукты со страницы """

        def get_list_of_product_links(browser: webdriver.Chrome) -> List[str]:
            """ Получаем список ссылок продуктов """

            product_links_from_page = []
            products_from_page = browser.find_elements(
                By.XPATH,
                '//div[@class="ProductCard__imageInner"]/a'
            )
            for product in products_from_page:
                product_link = product.get_attribute('href')
                if 'vkusvill.ru/goods/' in product_link:
                    product_links_from_page.append(product_link)

            return product_links_from_page


        # ВЫШЕ ОПРЕДЕЛЕНИЕ ФУНКЦИЙ

        product_links_from_page = get_list_of_product_links(browser)

        for product_link in product_links_from_page:
            browser.get(product_link)
            wait(2, 3)
            product_data = self.parse_single_product(browser)
            add_product_to_db(engine, product_data)


    def open_next_page(
        self,
        browser: webdriver.Chrome,
        last_page_of_products: str
    ) -> Optional[Tuple[webdriver.Chrome, str]]:
        """ Открываем следующую страницу """

        try:
            browser.get(last_page_of_products)
            next_page_link = browser.find_element(
                By.XPATH,
                '//footer//span[contains(text(), "Вперёд")]/..'
            ).get_attribute('href')
            self.close_browser(browser)
            browser = self.get_browser(True)
            browser.get(next_page_link)
            return (browser, next_page_link)
        except NoSuchElementException:
            message = 'Парсинг категории закончен'
            print(message)
            self.close_browser(browser)


    def parsing_products_from_list_of_pages(
        self,
        browser: webdriver.Chrome,
        product_category: str) -> Any:
        """ Парсим продукты со списка страниц """

        last_page_of_products = product_category

        while True:
            self.scroll_full_page(browser)
            self.parsing_products_from_page(browser)
            result = self.open_next_page(
                browser,
                last_page_of_products
            )
            if not result: break
            browser, last_page_of_products = result


    def parse_category_products(
        self,
        list_of_porduct_categories: List[str]
    ) -> Any:
        """ Парсим продукты категорий """

        for product_category in list_of_porduct_categories:
            browser = self.get_browser(True)
            browser.get(product_category)
            self.parsing_products_from_list_of_pages(browser, product_category)


    def parse_all_products(self) -> None:
        """ Парсим все продукты """

        list_of_porduct_categories = self.get_list_of_product_categories()
        self.parse_category_products(list_of_porduct_categories)


    def parse_specified_products(self) -> None:
        """ Парсим указанные продукты """

        def get_browser_with_proxy(
        ) -> webdriver.Chrome:
            """ Получаем браузер с прокси """

            nonlocal count
            list_of_proxies = get_list_of_proxies()
            if count >= len(list_of_proxies): count = 0

            proxy_data = list_of_proxies[count]
            browser = BrowserProxy().get_browser(proxy_data)
            return browser


        # ВЫШЕ ОПРЕДЕЛЕНИЕ ФУНКЦИЙ

        count = 0
        list_of_links = get_list_of_links_of_prouducts_to_parsed()

        for product_link in list_of_links:
            browser = get_browser_with_proxy()
            browser.get(product_link)
            product_data = self.parse_single_product(browser)
            add_product_to_db(engine, product_data)

            count += 1
            if count % 3 == 0:
                self.close_browser(browser)
                browser = get_browser_with_proxy()

            self.close_browser(browser)
