# -*- coding: utf-8 -*-

import os
import threading

import spider_logging
import spider_thread
import re
import time

from crawler import LinkNode
from model import AmazonProduct

from random import Random

from typing import Callable

from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager

logger = spider_logging.get_logger(__name__)

_options = Options()
_options.add_argument("--lang=en-US")
_options.add_argument("--disable-gpu")
_options.add_argument("--disable-logging")
_options.add_argument("--no-sandbox")
_options.add_argument("--homepage=about:blank")
_options.headless = bool(os.getenv("selenium-headless", "1"))

proxy = os.getenv("https_proxy", os.getenv("HTTPS_PROXY", os.getenv("http_proxy", os.getenv("HTTP_PROXY"))))
if proxy:
    _options.add_argument(f"--proxy-server={proxy}")

_moneyPattern = re.compile('(\D*)(\d.*\d)|(\d+)')
_numPattern = re.compile('\D')
_productURLPattern = re.compile('(/dp/\w+)|(/gp/\w*redirect\w*)', flags=re.IGNORECASE)


class ExtractionError(Exception):
    def __init__(self, msg: str, locator: (By, str) = None):
        self.msg = msg
        self.locator = locator

    def __str__(self):
        return f"Message: {self.msg}"


class AmazonProductCrawler(object):
    lock = threading.Lock()

    def __init__(self, start_url="https://www.amazon.com", key=None, depth=2, thread=3, on_data_extracted=None,
                 on_data_extract_error=None):
        """初始化一个亚马逊产品爬虫实例

        Args:
            start_url: 起始url
            depth: 链接构成的节点树深度，0表示只爬取起始页面
            thread: 同时执行爬取工作的线程数
        """
        self.depth = depth
        self.thread = thread
        self.key = key

        self.data_extract_callback = on_data_extracted
        self.data_extract_error_callback = on_data_extract_error

        self.all_nodes = {}
        self.parsed_nodes = {}
        self.data_nodes = {}
        self.extracted_nodes = {}

        self.initial_node = LinkNode(start_url)

    def start(self) -> (int, int, int, int):
        """开始爬取，会阻塞到整个爬取完成之后才返回。

        Returns
            请查看status函数
        """
        self._process(self.initial_node, sub_node_processing=False)
        for sub_node in self.initial_node.sub_nodes:
            def task(sn=sub_node):
                self._process(sn)

            spider_thread.submit(task, self.thread)

        try:
            start = time.time()
            while spider_thread.pending_task_count() > 0:
                now = time.time()
                if now - start >= 10:
                    start = now
                    status = self.status()
                    logger.log(spider_logging.PROGRESS,
                               f"爬取进度: 已发现节点数={status[0]}, 已完成解析的节点数={status[1]}, 已发现的数据节点数={status[2]}, 已抽取数据的节点数={status[3]}")
        except KeyboardInterrupt:
            spider_thread.abort()

        return self.status()

    def status(self) -> (int, int, int, int):
        """当前的工作状态。

        Returns:
            (已发现节点数,已完成解析的节点数,已发现的数据节点数,已抽取数据的节点数)
        """
        return len(self.all_nodes), len(self.parsed_nodes), len(self.data_nodes), len(self.extracted_nodes)

    def on_data_extracted(self, callback: Callable[[LinkNode, AmazonProduct], None]):
        """当产品成功解析时回调。

        Args:
            callback: 回调参数为 *(产品所在节点,产品数据)
        """
        self.data_extract_callback = callback

    def on_data_extract_error(self, callback: Callable[[LinkNode], None]):
        """当产品解析失败时回调。

        Args:
            callback: 回调参数为 *(产品所在节点)
        """
        self.data_extract_error_callback = callback

    def _process(self, node: LinkNode, driver: WebDriver = None, sub_node_processing=True):
        """处理当前节点。
        处理内容包括：查找下级节点、提取数据（如果是数据节点的话）。
        """
        random = Random()

        if driver:
            try:
                webdriver.ActionChains(driver).click(
                    driver.find_element(By.CSS_SELECTOR, f'[href*="{node.dom_href}"]')
                ).perform()
                logger.log(spider_logging.OPERATION, f"链接点击: url={node.dom_href}, depth={node.depth}")
            except WebDriverException:
                driver.get(node.url)
        else:
            driver = webdriver.Chrome(
                service=Service(ChromeDriverManager(log_level=100).install()),
                options=_options
            )
            driver.implicitly_wait(3)
            driver.get(node.url)

        for i in range(random.randint(1, 3)):
            distance = random.randint(50, 300)
            driver.execute_script(f'window.scrollBy(0,{distance})')

        logger.log(spider_logging.PAGE, f"页面处理: url={node.url}, depth={node.depth}")

        AmazonProductCrawler.lock.acquire()
        self.all_nodes[node.url] = node
        if node.is_data_node:
            self.data_nodes[node.url] = node

        AmazonProductCrawler.lock.release()

        for url, href, is_data_node in self._find_sub_nodes(driver, node):
            if url not in self.all_nodes:
                sub_node = node.add_sub_node(url, dom_href=href, is_data_node=is_data_node)

                AmazonProductCrawler.lock.acquire()

                self.all_nodes[url] = sub_node

                if sub_node.is_data_node:
                    logger.log(spider_logging.PAGE, f"找到新详情页: url={url}")
                    self.data_nodes[url] = sub_node
                else:
                    logger.log(spider_logging.PAGE, f"找到新列表页: url={url}")

                AmazonProductCrawler.lock.release()

        AmazonProductCrawler.lock.acquire()
        node.is_parsed = True
        self.parsed_nodes[node.url] = node
        AmazonProductCrawler.lock.release()

        if node.is_data_node:
            try:
                product = self._extract(driver, node)
            except ExtractionError as e:
                logger.log(spider_logging.EXTRACTION_FAIL, f"{e.msg}: locator={e.locator}")
                if self.data_extract_error_callback:
                    self.data_extract_error_callback(node)
            else:
                if self.data_extract_callback:
                    self.data_extract_callback(node, product)
            finally:
                node.is_extracted = True
                AmazonProductCrawler.lock.acquire()
                self.extracted_nodes[node.url] = node
                AmazonProductCrawler.lock.release()

        if sub_node_processing and node.depth < self.depth:
            logger.log(spider_logging.OPERATION,
                       f"处理子节点: url={node.url}, next_depth={node.depth + 1}, depth_limit={self.depth}")
            pending_sub_nodes = (sub_node for sub_node in node.sub_nodes if not sub_node.is_parsed)
            for sub_node in pending_sub_nodes:
                self._process(sub_node, driver)

        if node.parent_node:
            logger.log(spider_logging.OPERATION,
                       f"返回上一页: back_url={node.parent_node.url}, back_depth={node.parent_node.depth}")
            driver.back()
        else:
            logger.log(spider_logging.OPERATION, f"退出页面: url={node.url}, depth={node.depth}")
            driver.quit()

    # noinspection DuplicatedCode
    def _find_sub_nodes(self, driver: WebDriver, node: LinkNode) -> [(str, str, bool)]:
        """查找当前节点的下级节点，有针对性，不会采用当前页面所有链接。

        Returns:
            (url, dom原始href, 是否数据节点)
        """
        wait = WebDriverWait(driver, timeout=3)
        pending = []
        try:
            card_layout_links = wait.until(
                ec.presence_of_all_elements_located((By.CSS_SELECTOR, ".gw-card-layout a[href]")))
        except WebDriverException:
            pass
        else:
            logger.log(spider_logging.PAGE, f"查找卡片布局内链接: url={node.url}")
            pending.extend(card_layout_links)

        try:
            search_result_links = wait.until(
                ec.presence_of_all_elements_located((By.CSS_SELECTOR, ".s-search-results a[href]")))
        except WebDriverException:
            pass
        else:
            logger.log(spider_logging.PAGE, f"查找搜索结果内链接: url={node.url}")
            pending.extend(search_result_links)

        try:
            cel_widget_links = wait.until(
                ec.presence_of_all_elements_located((By.CSS_SELECTOR, ".celwidget a[href]")))
        except WebDriverException:
            pass
        else:
            logger.log(spider_logging.PAGE, f"查找列表组件内链接: url={node.url}")
            pending.extend(cel_widget_links)

        # 过滤识别到的链接
        result = []
        for element in pending:
            url = element.get_attribute('href')
            href = element.get_dom_attribute('href')
            text = element.text

            if self.key and text:
                k = self.key.lower()
                h = href.lower()
                txt = text.lower()
                if k not in h and k not in txt:
                    continue

            if "amazon.com/s" in url or "amazon.com/b" in url:
                result.append((url, href, False))
            elif _productURLPattern.search(url):
                result.append((url, href, True))

        return result

    def _extract(self, driver: WebDriver, node: LinkNode) -> AmazonProduct:
        """从节点中提取产品数据。
        名称、价格为主属性，解析不到时会抛出ExtractionError异常。
        其余为补充属性，解析不到时留空。

        Returns:
            AmazonProduct 提取到的产品。

        Raises:
            ExtractionError 数据提取异常
        """
        wait = WebDriverWait(driver, timeout=5)
        random = Random()

        title_locator = (By.ID, 'productTitle')
        try:
            title_element = wait.until(ec.presence_of_element_located(title_locator))
            title = title_element.text
        except WebDriverException:
            raise ExtractionError("标题解析失败", title_locator)

        price_locator = (By.CSS_SELECTOR, '#corePrice_desktop .apexPriceToPay')
        try:
            price_element = wait.until(ec.presence_of_element_located(price_locator))
            money_symbol, price = _moneyPattern.search(
                price_element.text).group(1), float(_moneyPattern.search(price_element.text).group(2))
        except Exception:
            raise ExtractionError("价格解析失败", price_locator)

        rating_count = None
        rating_locator = (By.CSS_SELECTOR, '#acrCustomerReviewText')
        try:
            rating_element = wait.until(ec.presence_of_element_located(rating_locator))
            rating_count = _numPattern.sub('', rating_element.text)
        except WebDriverException as e:
            logger.log(spider_logging.EXTRACTION_FAIL, f"Review解析失败, error={e.msg}, locator={rating_locator}")

        main_img_url = None
        main_img_locator = (By.CSS_SELECTOR, '#imgTagWrapperId img')
        try:
            main_img_element = wait.until(ec.presence_of_element_located(main_img_locator))
            webdriver.ActionChains(driver).move_to_element_with_offset(
                main_img_element, random.randint(1, 10), random.randint(1, 10)
            ).perform()

            main_img_locator = (By.CSS_SELECTOR, '#detailImg')
            main_img_element = wait.until(ec.presence_of_element_located(main_img_locator))

            main_img_url = main_img_element.get_attribute('src')
        except WebDriverException as e:
            logger.log(spider_logging.EXTRACTION_FAIL, f"主图解析失败: error={e.msg}, locator={main_img_locator}")

        qa_css_selector = '.askTeaserQuestions'
        question_locator = (By.CSS_SELECTOR, f'{qa_css_selector} > .a-fixed-left-grid > .a-fixed-left-grid-inner > '
                                             f'.a-col-right > div:first-child a')
        answer_locator = (By.CSS_SELECTOR, f'{qa_css_selector} > .a-fixed-left-grid > '
                                           f'.a-fixed-left-grid-inner > .a-col-right > div:nth-child(2) '
                                           f'.a-col-right span')
        question = None
        answer = None
        try:
            webdriver.ActionChains(driver).click(
                wait.until(ec.element_to_be_clickable((By.CSS_SELECTOR, '#ask_feature_div a')))
            ).perform()

            wait.until(ec.presence_of_element_located((By.CSS_SELECTOR, qa_css_selector)))
            question_element = wait.until(ec.presence_of_element_located(question_locator))
            answer_element = wait.until(ec.presence_of_element_located(answer_locator))

            question = question_element.text
            answer = answer_element.text
        except WebDriverException as e:
            logger.log(spider_logging.EXTRACTION_FAIL,
                       f"QA解析失败, error={e.msg}, question_locator={question_locator}, answer_location={answer_locator}")

        try:
            webdriver.ActionChains(driver).click(
                driver.find_element(By.CSS_SELECTOR, f'{qa_css_selector} > .a-fixed-left-grid > '
                                                     f'.a-fixed-left-grid-inner > '
                                                     f'.a-col-right > div:nth-child(2) '
                                                     f'.a-col-right .askShortText a')
            ).perform()

            long_answer_element = wait.until(
                ec.presence_of_element_located((By.CSS_SELECTOR, f'{qa_css_selector} > .a-fixed-left-grid > '
                                                                 f'.a-fixed-left-grid-inner > .a-col-right > '
                                                                 f'div:nth-child(2) .a-col-right .askLongText'))
            )

            answer = long_answer_element.text
            if answer:
                answer = answer.replace('see less', '')
        except WebDriverException:
            pass

        logger.log(spider_logging.EXTRACTION, f"产品数据已提取: url={node.url}")

        return AmazonProduct(title=title,
                             money_symbol=money_symbol,
                             price=price,
                             main_img=main_img_url,
                             rating=rating_count,
                             question=question,
                             answer=answer)
