#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/3/3 10:58
# @Author  : WJQ
# @Site    :
# @File    : task.py
# @Software: PyCharm
import os
import subprocess
import threading
import time
import traceback
from lxml import etree
from collections import deque
from typing import Optional, Set, Union
from appium.webdriver.common.appiumby import AppiumBy

from browser.driver_bd import DriverHandlerBd
from browser.driver_wx import DriverHandlerWx
from browser.driver_zfb import DriverHandlerZfb
from browser.driver_kuaiapp import DriverHandlerKApp
from browser.page import PageHandler

import conf
from conf import WxConfig, ZfbConfig, KAppConfig, BdConfig
from crawler.crawl_wx import CrawlerWx
from crawler.crawl_zfb import CrawlerZfb
from crawler.crawl_kuaiapp import CrawlerKuaiapp
from crawler.crawl_bd import CrawlerBd

from crawler.modules.click_handler import ClickHandler
from crawler.modules.popup_handler import PopupHandler
from models.Graph import graph, Component

from crawler.modules_native.instrument import back_init_page, back_previous_page, capture_screenshot
from utils.common import CrawlerLogger, judge_mini_program_is_run, get_top_activity_name, find_webview_context, \
    switch_right_input, judge_mini_program_is_on_top

from database.mongo_handler import MongoHandler


class Task:
    def __init__(self, applets_name: str) -> None:
        """
        Task object, including async and task schedule and

        :param :
        """
        self.applets_name: str = applets_name
        self.driver_handler: Optional[DriverHandlerWx, DriverHandlerZfb,
                                      DriverHandlerBd, DriverHandlerKApp] = None
        self.popup_handler: Optional[PopupHandler] = None
        self.click_handler: Optional[ClickHandler] = None

        self.crawler: Optional[CrawlerWx, CrawlerZfb, CrawlerBd, CrawlerKuaiapp] = None

        self.applets_init_path: str = ""
        self.now_visible_path: str = ""
        self.last_path: str = ""
        self.init_path: str = ""
        self.path_pool: Set = set()
        self.js_path_pool: Set = set()
        self.navigation_list: list = []

        self.now_node_id: int = 0
        self.last_node_id: int = 0
        self.init_node_id: int = 0
        self.node_id_pool: Set = set()

        self.start_time = None
        self.end_time = None

        self.crawl_depth: int = 0
        self.click_number = 0
        self.js_click_component_number = 0
        self.max_depth: int = 10
        self.max_scroll: int = 2
        self.stop_flag: bool = False
        # dbname
        self.mongo = MongoHandler("page")


    def run(self):
        """
        main entry
        """
        try:
            '''
            Step 0: 初始化
            '''
            if self.crawler is None:
                CrawlerLogger.info("[*] Start init miniAPP...")

                if isinstance(conf.CLIENT_SIDE_CONF, WxConfig):
                    self.driver_handler = DriverHandlerWx.create(self.applets_name)
                    self.crawler = CrawlerWx(self.driver_handler)

                elif isinstance(conf.CLIENT_SIDE_CONF, ZfbConfig):
                    self.driver_handler = DriverHandlerZfb.create(self.applets_name)
                    self.crawler = CrawlerZfb(self.driver_handler)

                elif isinstance(conf.CLIENT_SIDE_CONF, BdConfig):
                    self.driver_handler = DriverHandlerBd.create(self.applets_name)
                    self.crawler = CrawlerBd(self.driver_handler)

                elif isinstance(conf.CLIENT_SIDE_CONF, KAppConfig):
                    self.driver_handler = DriverHandlerKApp.create(self.applets_name)
                    self.crawler = CrawlerKuaiapp(self.driver_handler)

                self.popup_handler = PopupHandler(self.driver_handler)
                self.click_handler = ClickHandler(self.driver_handler)

                self.driver_handler.enter_mini_program(self.applets_name)
                if not os.path.exists(f'./experiment/app_json/{conf.CLIENT_SIDE_CONF.PLATFORM}/{self.applets_name}.json') :
                    self.driver_handler.get_app_json(self.applets_name)

            # 初始化配置
            self.initial_configuration()

            # 开线程刷新 PageUrl 并且 dumpUI
            thread = threading.Thread(target=self.refreshPageUrl, args=(self.mongo,))
            thread.start()

            # 记录初级探索开始的时间
            with open(f"./{conf.LOG_DICTIONARY_NAME}/{conf.CLIENT_SIDE_CONF.PLATFORM}/record/{self.applets_name}.txt", "w") as f :
                f.write(f"native_start_time: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}\n")

            if self.navigation_list:
                root_node = graph.get_root_node()
                for item in root_node.components:
                    self.click_handler.safe_click_by_location_or_scroll(item, self.now_visible_path)
                    self.click_number += 1
                    # step 1: start task
                    self._run_task(item)
                    # step 2: back to init page
                    CrawlerLogger.info(f"[+] Back to init page {self.applets_init_path}")
                    self.driver_handler.go_to_target_url(self.applets_init_path)
                    self.now_node_id = 1
                    self.crawl_depth = 0
            else:
                position_dict = {
                    'rect': {
                        'x': 0,
                        'y': 0,
                        'width': 0,  # Add the width key-value pair
                        'height': 0  # Add the height key-value pair
                    },
                    'text': "noClick",
                    'tagName': "",
                    "className": ""
                }
                self._run_task(Component(position_dict, "click"))

        #  if user ctrl-c to stop crawling, we will save the path pool
        except KeyboardInterrupt:
            pass
        except Exception as e:
            CrawlerLogger.error(f"[-] {traceback.format_exc()}")
            CrawlerLogger.error(f"[-] Error in run: {e}")
        finally:
            # 记录初级探索的结果
            self.record_primary_explore_result()
            CrawlerLogger.info(f"[*] 切换成正确的输入法...")
            switch_right_input()

        try:
            self.visit_all_page(self.applets_name)
            pass
        except KeyboardInterrupt:
            pass
        except Exception as e:
            CrawlerLogger.error(f"[-] Error in run: {e}")
        finally:
            # 记录js探索的结果
            self.record_js_explore_result()

            time.sleep(3)
            self.driver_handler.driver.quit()
            CrawlerLogger.info("[*] Crawler over...")

    def _run_task(self, click_element: Union[Component, dict]):
        """
        任务调度器，负责调度任务, 任务包括：爬取页面，点击页面，返回页面
        :return:
        """
        global click_elements
        CrawlerLogger.info(f"[*] Start crawling...")
        self.stop_flag = False
        self.now_visible_path, self.now_node_id = self.crawler.crawl_pages(click_element, self.now_node_id)
        self.path_pool.add(self.now_visible_path)
        self.node_id_pool.add(self.now_node_id)
        self.init_path = self.now_visible_path
        self.init_node_id = self.now_node_id
        self.start_time = time.time()

        navigation_num = len(self.navigation_list) if self.navigation_list else 1

        while (time.time() - self.start_time) < (conf.CLIENT_SIDE_CONF.EXPLORE_TIME / navigation_num):
            '''
            step 1 :判断当前所处的页面是哪个node
            '''
            click_elements = PageHandler.get_all_click_elements(self.driver_handler.driver)
            node_id_tmp, title_tmp = graph.get_node_id_and_path(self.driver_handler, click_elements)
            if node_id_tmp:
                self.now_node_id = node_id_tmp
                self.now_visible_path = title_tmp
            else:
                CrawlerLogger.debug(f"[-] Can not find node id and title in graph, use crawler to get node id and title")
                self.now_visible_path, self.now_node_id = self.crawler.crawl_pages(click_element, self.now_node_id)
                if self.now_visible_path not in self.path_pool :
                    CrawlerLogger.info(f"[+] Add {self.now_visible_path} to title pool...")
                self.path_pool.add(self.now_visible_path)
                self.node_id_pool.add(self.now_node_id)
                if self.last_path == self.now_visible_path :
                    continue
                else :
                    self.crawl_depth += 1
                continue

            CrawlerLogger.info(f"[*] Crawl on {self.now_visible_path} id: {str(self.now_node_id)}, remaining time is {(conf.CLIENT_SIDE_CONF.EXPLORE_TIME / navigation_num) - time.time() + self.start_time}")

            """
            Step 2: select next click node (DFS, BFS...)
            """
            CrawlerLogger.info(f"[+] get next click node...")
            if conf.CLIENT_SIDE_CONF.SELECTION_MODE == 'BFS':
                click_element = self.bfs_from_graph(self.init_path)
            else:
                click_element = self.dfs_from_graph(self.now_node_id)

            if click_element is None:
                if self.stop_flag:
                    CrawlerLogger.info(f"[*] No more clickable node, stop crawling...")
                    break
                else:
                    if not judge_mini_program_is_on_top() :
                        self._go_back()
                        CrawlerLogger.info(f"[*] Leave mini program, Back to previous page")
                        if not judge_mini_program_is_on_top() :
                            CrawlerLogger.info(f"[-] Mini program is not running, restart mini program...")
                            self.driver_handler.enter_mini_program(self.applets_name)
                            self.driver_handler.go_to_target_url(self.init_path)
                            self.now_visible_path = self.init_path
                            self.now_node_id = self.init_node_id
                        else :
                            continue
                    continue

            """
            Step 3: click node trigger event or scroll
            """
            self.last_path = self.now_visible_path
            self.last_node_id = self.now_node_id
            page_str = f"{self.now_visible_path + ' node id: ' + str(self.now_node_id)}"
            if not self.click_handler.safe_click_by_location_or_scroll(click_element, page_str):
                continue
            self.click_number += 1

            """
            step 4 :判断小程序是否在顶层页面，如果不是，重新启动小程序
            """
            if not judge_mini_program_is_on_top():
                self._go_back()
                CrawlerLogger.info(f"[*] {click_element.label} make leave mini program, Back to previous page")
                if not judge_mini_program_is_on_top():
                    CrawlerLogger.info(f"[-] Mini program is not running, restart mini program...")
                    self.driver_handler.enter_mini_program(self.applets_name)
                    self.driver_handler.go_to_target_url(self.init_path)
                    self.now_visible_path = self.init_path
                    self.now_node_id = self.init_node_id
                else:
                    continue

            """
            Step 5: crawl page and build graph
            """
            self.now_visible_path, self.now_node_id = self.crawler.crawl_pages(click_element, self.last_node_id)
            if self.now_visible_path not in self.path_pool:
                CrawlerLogger.info(f"[+] Add {self.now_visible_path} to title pool...")
                self.path_pool.add(self.now_visible_path)
            self.node_id_pool.add(self.now_node_id)
            if self.last_path == self.now_visible_path:
                continue
            else:
                self.crawl_depth += 1

            """
            Step 6: 后续操作，例如返回等
            """
            if conf.CLIENT_SIDE_CONF.SELECTION_MODE == 'BFS':
                if self.now_visible_path != self.init_path:
                    back_previous_page(self.driver_handler)
                    self.now_visible_path = self.last_path
            else:
                pass
        return

    def initial_configuration( self ):
        self.now_visible_path = "FIRST_MINI_APP_PAGE_TITLE"
        self.now_node_id = 1
        cmd_activate_ime = f'adb -s {conf.CLIENT_SIDE_CONF.DEVICE_ID} shell ime set io.appium.settings/.UnicodeIME'
        subprocess.run(cmd_activate_ime, shell=True)
        CrawlerLogger.info(f"[*] 切换到UnicodeIME输入法")

        conf.CLIENT_SIDE_CONF.APPLET_ACTIVITY_NAME = get_top_activity_name()
        self.navigation_list = PageHandler.safe_find_elements(self.driver_handler.driver, AppiumBy.ID,
                                                         conf.CLIENT_SIDE_CONF.NAVIGATION_ID)
        graph.add(self.now_visible_path, "FIRST_MINI_APP_PAGE_QUERY", [Component(i) for i in self.navigation_list] or [])

        if conf.WEBVIEW_MODE:
            conf.CLIENT_SIDE_CONF.WX_APP_BRAND = find_webview_context()
            self.driver_handler.switch_to_webview_context()
            self.driver_handler.check_jump()
        for i in range(2):
            self.popup_handler.handle_webview_popups()
            self.popup_handler.handle_native_popups()
            self.popup_handler.handle_popups()
            self.popup_handler.handle_popups2()

        self.applets_init_path, _ = self.driver_handler.get_url()

    def record_primary_explore_result(self):
        with open(f"./{conf.LOG_DICTIONARY_NAME}/{conf.CLIENT_SIDE_CONF.PLATFORM}/record/{self.applets_name}.txt", "a") as f :
            for title in self.path_pool :
                f.write(title + "\n")
            f.write(f"native title number is {str(self.path_pool.__len__())} \n")
            f.write(f"native click number is {str(self.click_number)} \n")
            f.write(f"native node number is {str(self.node_id_pool.__len__())} \n")
            f.write(f"native click element number is {str(graph.get_all_click_element())} \n")

        CrawlerLogger.info(f"[+] All native title number is {len(self.path_pool)}")

        # time.sleep(10)
        # 记录js开始的时间
        with open(f"./{conf.LOG_DICTIONARY_NAME}/{conf.CLIENT_SIDE_CONF.PLATFORM}/record/{self.applets_name}.txt", "a") as f :
            f.write(f"js_start_time: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}\n")

        CrawlerLogger.info(f"[+] Start js crawl {self.applets_name}")

    def record_js_explore_result( self ):
        with open(f"./{conf.LOG_DICTIONARY_NAME}/{conf.CLIENT_SIDE_CONF.PLATFORM}/record/{self.applets_name}.txt", "a") as f :
            for title in self.path_pool :
                f.write(title + "\n")
            f.write(f"all title number is {str(self.path_pool.__len__())} \n")
            f.write(f"all click number is {str(self.click_number)} \n")
            f.write(f"all node number is {str(self.node_id_pool.__len__())} \n")
            f.write(
                f"all click element number is {str(graph.get_all_click_element() + self.js_click_component_number)} \n")

        with open(f"./{conf.LOG_DICTIONARY_NAME}/{conf.CLIENT_SIDE_CONF.PLATFORM}/graph/{self.applets_name}-graph.txt", "w") as f :
            f.write(f"{graph}")

    def bfs_from_graph(self, init_title: str) -> Union[Component, None]:
        """
        按照bfs从graph中获取一个page

        :return:
        """
        global title
        visited = set()
        queue = deque([(init_title, graph.get_root_node())])
        while queue:
            title, node = queue.popleft()
            visited.add(title)
            if node.visited == False and node.state:
                for component in node.components:
                    if not component.visited:
                        if title != self.now_visible_path:
                            back_init_page(self.driver_handler, self.init_path, self.applets_init_path)
                            self.driver_handler.go_to_target_url(title)
                        self.now_visible_path = title
                        component.visited = True
                        return component

            node.visited = True
            for children_node in node.children:
                queue.append((children_node.path, children_node))
        self.stop_flag = True
        return None

    def dfs_from_graph(self, now_node_id: int) -> Union[Component, None]:
        """
        Get a page from the graph using DFS.

        :return:
        """
        node = graph.get_node_by_node_id(now_node_id)

        if self.crawl_depth > self.max_depth or node.path == '':
            CrawlerLogger.info(f"[*] Reach the max depth or the node is empty, go back...")
            node.visited = True
            self._go_back()
            return None

        if not node.visited and node.state:
            for component in node.components:
                if not component.visited:
                    return component

        # The current page is processed, go back to the previous level and set the correct attributes
        if node.scroll_number < self.max_scroll:
            position_dict = {
                'rect': {
                    'x': conf.CLIENT_SIDE_CONF.PHONE_WIDTH / 2,
                    'y': conf.CLIENT_SIDE_CONF.PHONE_HEIGHT / 2,
                    'width': 0,  # Add the width key-value pair
                    'height': 0  # Add the height key-value pair
                },
                'text': "scroll",
                'tagName': "",
                "className": ""
            }
            node.scroll_number += 1
            return Component(position_dict, 'scroll')

        node.visited = True
        if node.parent.path != "FIRST_MINI_APP_PAGE_TITLE" and node.parent != node:
            CrawlerLogger.info("[*] No more clickable nodes, going back to the previous page...")
            try:
                self._go_target_node(node.parent.node_id)
            except Exception as e:
                CrawlerLogger.debug(f"[-] go to target {node.parent.path} error: {e}")
                self._go_back()
        else:
            self.stop_flag = True
        return None

    def _go_back(self):
        back_previous_page(self.driver_handler.driver)
        if conf.WEBVIEW_MODE:
            self.driver_handler.find_top_window()
        self.crawl_depth -= 1

    def _go_target_node(self, target_node_id):
        '''
        go to target node
        :param target_node_id:
        :return:
        '''
        target_node = graph.get_node_by_node_id(target_node_id)
        self.driver_handler.go_to_target_url(target_node.path, target_node.query)
        click_elements = PageHandler.get_all_click_elements(self.driver_handler.driver)
        node_id_tmp, path_tmp = graph.get_node_id_and_path(self.driver_handler, click_elements)
        if node_id_tmp == target_node_id:
            self.now_node_id = node_id_tmp
            self.now_visible_path = path_tmp
            return
        click_edge_list = graph.get_click_edge_list(node_id_tmp, target_node_id)
        if click_edge_list:
            for click_edge in click_edge_list:
                self.click_handler.safe_click_by_location_or_scroll(click_edge, path_tmp)
            click_elements = PageHandler.get_all_click_elements(self.driver_handler.driver)
            node_id_tmp, path_tmp = graph.get_node_id_and_path(self.driver_handler, click_elements)
            if node_id_tmp == target_node_id :
                self.now_node_id = node_id_tmp
                self.now_visible_path = path_tmp
                CrawlerLogger.info(f"[+] Go to target node {target_node_id} success")
            else:
                graph.get_node_by_node_id(node_id_tmp).parent = target_node.parent
                target_node.state = False
                CrawlerLogger.debug(f"[-] Click edge {click_edge_list} failed, go back...")
        else:
            target_node.state = False
            graph.get_node_by_node_id(node_id_tmp).parent = target_node.parent
            CrawlerLogger.debug(f"[-] No click edge from {node_id_tmp} to {target_node_id}")


    def visit_all_page(self, applet_name: str):

        file_path = r'experiment/app_json/{}/{}.json'
        file_path = file_path.format(conf.CLIENT_SIDE_CONF.PLATFORM, applet_name)
        all_page = self.driver_handler.get_all_pages(file_path)

        for page in all_page:
            if not judge_mini_program_is_run(self.driver_handler.driver) :
                CrawlerLogger.info(f"[-] Mini program is not running, restart mini program...")
                self.driver_handler.enter_mini_program(self.applets_name)
            CrawlerLogger.info(f"[+] next target url: {page}")
            is_part_of_set = any(page in element for element in self.path_pool)
            if is_part_of_set:
                CrawlerLogger.info(f"[+] {page} is added to title pool")
                continue
            try:
                self.driver_handler.go_to_target_url(page)
                # self.popup_handler.handle_native_popups()
            except Exception as e:
                CrawlerLogger.info(f"[-] js error, restart mini program")
                self.driver_handler.enter_mini_program(self.applets_name)
                self.driver_handler.go_to_target_url(page)

            tmp_path, _ = self.driver_handler.get_url()
            tmp = len(PageHandler.get_all_click_elements(self.driver_handler.driver))
            if tmp > 0:
                if tmp_path not in self.path_pool:
                    self.path_pool.add(tmp_path)
                    self.js_click_component_number += tmp
                    CrawlerLogger.info(f"[+] {tmp_path} adds to title pool")
            else:
                CrawlerLogger.debug(f"[-] {tmp_path} has no clickable element")
    

    def refreshPageUrl(self, mongo):
        
        # 初始化
        page_source = self.driver_handler.driver.page_source
        timestamp = time.time()
        
        # 保存截图
        screenshot_path = capture_screenshot(
            self.driver_handler.driver, 
            "initial", 
            self.applets_name, 
            timestamp
        )
        
        # 保存数据
        data = self.wrapPageData(page_source, "", timestamp)
        data["screenshot_path"] = screenshot_path
        mongo.insert_data(self.applets_name, data)
        
        while True:
            try:
                current_page = self.now_visible_path
                if current_page == None:
                    continue
                    
                if conf.MINIAPP_CONF.pageUrl.value != current_page:
                    # 更新URL
                    conf.MINIAPP_CONF.pageUrl.value = current_page
                    
                    # 创建时间戳
                    timestamp = time.time()
                    
                    # 保存页面源代码
                    page_source = self.driver_handler.driver.page_source
                    
                    # 保存截图
                    screenshot_path = capture_screenshot(
                        self.driver_handler.driver, 
                        current_page, 
                        self.applets_name, 
                        timestamp
                    )
                    
                    # 打包并保存数据
                    data = self.wrapPageData(page_source, current_page, timestamp)
                    data["screenshot_path"] = screenshot_path
                    mongo.insert_data(self.applets_name, data)
            except Exception as e:
                pass
            finally:
                time.sleep(0.1)

    def find_screenshots_with_code(mongo, applets_name, query=None):
        """查询MongoDB中的记录，返回包含截图路径的结果"""
        query = query or {}
        results = mongo.find_data(applets_name, query)
        
        # 检查截图文件是否存在
        for result in results:
            if "screenshot_path" in result and result["screenshot_path"]:
                if os.path.exists(result["screenshot_path"]):
                    result["screenshot_exists"] = True
                else:
                    result["screenshot_exists"] = False
        
        return results



    def wrapPageData(self, page_source, current_page, timestamp=None):
        current_timestamp = timestamp or time.time()
        return {
            "timestamp": current_timestamp,
            "page_source": page_source,
            "page_url": current_page,
            "screenshot_path": None  # 将在后续更新
        }