#!/usr/bin/env python 
# -*- coding: utf-8 -*-
# @Time    : 2023/9/4 19:31
# @Author  : WJQ
# @Site    : 
# @File    : crawler_baidu.py
# @Software: PyCharm

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/3/4 17:49
# @Author  : WJQ
# @Site    :
# @File    : crawl_wx.py
# @Software: PyCharm
from typing import Tuple, List

from browser.driver_kuaiapp import DriverHandlerKApp
from browser.page import PageHandler
from crawler.crawl import Crawler
from crawler.modules.click_handler import ClickHandler
from crawler.modules.form_handler import FormHandler
from crawler.modules.popup_handler import PopupHandler
from utils.common import CrawlerLogger

from models.Graph import graph, CrawlEdge, Component


class CrawlerKuaiapp(Crawler):

    def __init__(self, driver_handler: DriverHandlerKApp):
        super().__init__(driver_handler)
        self.driver_handler = driver_handler
        self.popup_handler = PopupHandler(self.driver_handler)
        self.form_handler = FormHandler(self.driver_handler)
        self.click_handler = ClickHandler(self.driver_handler)
        self.login_state = False
        self.collect_element_list: List = []
        self.driver_path: str = ""
        self.driver_query: str = ""
        self.node_id: int = 0

    def crawl_pages(self, click_element: Component, previous_node_id: int) -> Tuple[str, int]:
        '''
        input: 达到该页面的点击元素，之前的页面的node id
        output: 该页面的title，该页面的node id
        crawl target page and build graph
        Native context is major context
        If you need handle webview, you should switch to webview context
        '''

        # self.collect_element_list = []

        """
        Native 
        Step 1: click all Pop-ups
        """
        CrawlerLogger.info("[+] step 1 :Click all popups")
        # self.popup_handler.handle_native_popups()

        """
        Native 
        Step 2: fill input and chick switch
        """
        CrawlerLogger.info("[+] step 2 :Fill all form by value")
        # self.form_handler.handle_form_elements()

        CrawlerLogger.info("[+] step 3 :Click all check and switch")
        # self.driver_handler.switch_to_webview_context()
        # self.click_handler.safe_click_check_button()
        # self.click_handler.safe_click_switch_button()
        # self.driver_handler.switch_to_native_app_context()

        """
        Step 3: handle login form
        """
        # CrawlerLogger.info("[+] Handle login form")
        # self.form_handler.handle_login_form()

        """
        step 4: Deduplication
        """
        # TODO 去重优化？
        if self.judge_page_is_repeat() :
            CrawlerLogger.info(f"[+] {self.driver_path} is repeat, choose next click element")
            return self.driver_path, self.node_id

        """
        Step 5: build graph, Collect button image switch ...
        """

        CrawlerLogger.info(f"[+] step 4 :Build graph in {self.driver_path}")

        previous_node = graph.get_node_by_node_id(previous_node_id)
        now_node = graph.get_node_by_node_id(self.node_id)

        graph.connect(previous_node,
                      now_node,
                      CrawlEdge("Click", click_element), None)

        return self.driver_path, self.node_id

    def judge_page_is_repeat ( self ) -> bool :
        '''
        判断当前页面是否重复
        :param:
        :return:
        '''
        self.driver_path, self.driver_query = self.driver_handler.get_url()
        self.collect_element_list.extend(PageHandler.get_all_click_elements(self.driver_handler.driver))
        CrawlerLogger.info(f"[+] collect {len(self.collect_element_list)} elements in {self.driver_path}")
        now_node, flag = graph.add(self.driver_path, self.driver_query, self.collect_element_list)
        self.node_id = now_node.node_id
        return flag
