from collections import deque

from my_tools.df_data_utils import is_none_null
from domain_config import domain_config_dict
import os
import pandas as pd
import requests  # 导入 requests 库
from bs4 import BeautifulSoup  # 导入 BeautifulSoup
from tqdm import tqdm  # 导入 tqdm 库
from playwright.sync_api import sync_playwright
from urllib.parse import urljoin

from my_tools.mydatetime_utils import get_current_datetime
from my_tools.url_utils import get_domain, is_inner_link


class PlayWriteAutomation:
    def __init__(self):
        self.playwright = None
        self.browser = None
        self.context = None
        self.page = None
        self.executable_path = os.getenv('CHROM_PATH')  # 从环境变量加载 Chrome 路径
        self.visited = set()  # 用于存储已访问的链接，防止重复访问
        self.results = []  # 用于存储爬虫结果，保存为 [链接, 层级, 标题, 父链接, 是否能访问]
        self.counter = 0  # 记录爬取的链接数量
        self.domain_contain_set = set()
        self.url = ""
        self.domain = ""
        self.page_url_set = set()

        # 定义 HTTP 头，用于 requests 模拟浏览器请求
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Referer': 'https://www.baidu.com/'
        }

    def start_browser(self):
        if self.playwright is None:
            self.playwright = sync_playwright().start()
            # 监听所有请求
        self.browser = self.playwright.chromium.launch(
            headless=True,
            executable_path=self.executable_path,
            args=[
                '--ignore-certificate-errors',
                '--disable-blink-features=AutomationControlled'
            ]
        )
        self.context = self.browser.new_context(
            user_agent=self.headers['User-Agent'],
            locale='zh-CN',
            ignore_https_errors=True  # 忽略 HTTPS 错误
        )
        self.page = self.context.new_page()
        self.page_url_set.clear()
        # self.page.on("request", lambda request: self.page_url_set.add(request.url))
        print("浏览器已启动")

    def close_browser(self):
        if self.browser:
            self.browser.close()
        print("浏览器已关闭")

    def navigate_to_site(self, url):
        try:
            # 监听所有响应并获取状态码
            response = self.page.goto(url)
            # 获取状态码
            status_code = response.status

            # 等待页面加载完成，确保页面稳定
            try:
                self.page.wait_for_load_state("load")
            except:
                pass
            self.page.wait_for_timeout(1000)  # 等待 1 秒
            # 获取页面标题
            title = ""
            try:
                title = self.page.title()
            except:
                pass

            # 获取状态码
            # status_code = response.status
            if status_code == -1 and title == "":
                return False, None, "", f"无法访问"

            return True, status_code, title, ""
        except Exception as e:
            # 打印报错日志堆栈信息
            import traceback
            traceback.print_exc()
            print(f"访问 {url} 时出错: {e}")
            return False, None, "", f"错误:{e}"

    def get_all_links_via_tags(self, url):
        # 提取包含链接的标签
        tags_with_links = {
            'a': 'href',
            'img': 'src',
            'link': 'href',
            'script': 'src',
            'iframe': 'src',
            'video': 'src',
            'audio': 'src',
            'source': 'src',
            'form': 'action',
            'meta': 'content'  # 一些 meta 标签可能包含 URL，如重定向
        }
        # 提取页面上的所有子链接
        try:
            links = []
            # 在尝试获取链接之前确保页面完全加载
            try:
                self.page.wait_for_load_state("load")
            except:
                pass
            for tag_name, attribute_name in tags_with_links.items():
                tag_links = self.page.eval_on_selector_all(tag_name,
                                                           f'elements => elements.map(e => e.{attribute_name})')
                for link in tag_links:
                    if link and link.startswith('http'):
                        links.append(link)
                    elif link.startswith("."):
                        link = urljoin(url, link)
                        links.append(link)
                    elif link.startswith("/"):
                        link = urljoin(url, link)
                        links.append(link)
            return links
        except Exception as e:
            print(f"获取链接时发生错误: {e}")
            return []

    def get_all_links(self, url):
        links = []
        links1 = self.get_all_links_via_tags(url)
        links2 = self.page_url_set
        links.extend(links1)
        links.extend(links2)
        links = list(set(links))
        self.page_url_set.clear()
        links = [item for item in links if not is_none_null(item) and item.startswith("http")]
        print(f"目前链接长度:{len(links)}")
        return links

    def simple_request_check(self, url):
        """使用 requests 库发送简单的 HTTP 请求，判断是否能访问，并尝试获取标题或内容类型"""
        try:
            response = requests.get(url, timeout=5, headers=self.headers)  # 设置 5 秒超时并加入 headers
            status_code = response.status_code
            content_type = response.headers.get('Content-Type', '').lower()

            # 如果是 HTML，尝试提取标题
            if 'text/html' in content_type:
                try:
                    # 设置正确的编码
                    response.encoding = response.apparent_encoding
                    # 使用 BeautifulSoup 获取网页标题
                    soup = BeautifulSoup(response.content, 'html.parser')
                    title = soup.title.string if soup.title else "无标题"
                    return True, status_code, title, ""
                except:
                    return True, status_code, '获取标题失败', ""
            # 如果不是 HTML，返回对应的内容类型提示
            else:
                print(f"链接 {url} 返回非HTML内容: {content_type}")
                return True, status_code, f"非HTML内容: {content_type}", ""

        except requests.RequestException as e:
            # 处理请求异常
            print(f"请求 {url} 时出错: {e}")
            return False, None, "", f"错误: {e}"

        except Exception as e:
            # 处理其他异常
            print(f"解析 {url} 时发生未知错误: {e}")
            return False, None, "", f"错误: {e}"

    def crawl_links_bfs(self, start_url, max_depth=3):
        """Breadth-first search (BFS) for crawling links"""
        queue = deque([(start_url, 1, None)])  # Queue initialized with (url, depth, parent_url)
        self.visited.add(start_url)

        while queue:
            current_url, depth, parent_url = queue.popleft()  # Get the next URL and its depth

            # Use Playwright for browsing within the max depth, otherwise use simple requests
            if depth <= max_depth:
                accessible, status_code, title, error_reason = self.navigate_to_site(current_url) \
                    if depth < max_depth else self.simple_request_check(current_url)

                if not accessible:
                    print(f"Failed to access {current_url}, retrying with Playwright: {error_reason}")
                    accessible, status_code, title, error_reason = self.navigate_to_site(current_url)

                domain = get_domain(current_url)
                is_inner = is_inner_link(current_url, self.domain_contain_set)
                self.results.append({
                    "链接": current_url,
                    "层级": depth,
                    "标题": title,
                    "父链接": parent_url,
                    "是否能访问": accessible,
                    "状态码": status_code,
                    "域名": domain,
                    "是否是内链": is_inner,
                    "错误原因": error_reason,
                    "访问时间": get_current_datetime()
                })

                # Increment the counter and periodically save results
                self.counter += 1
                if self.counter % 100 == 0:
                    self.save_to_excel()

                # If it's not the last depth level and it's an internal link, enqueue the children
                if depth < max_depth and is_inner:
                    links = self.get_all_links(current_url)
                    print(f"At depth {depth}, found {len(links)} links.")
                    for link in tqdm(links, desc=f"Depth {depth + 1} progress", unit="link"):
                        if link not in self.visited:
                            self.visited.add(link)
                            queue.append((link, depth + 1, current_url))  # Enqueue for the next depth

    def save_to_excel(self):
        """保存结果到 Excel 文件"""
        df = pd.DataFrame(self.results)
        # 为避免覆盖文件，每次保存时生成不同的文件名")
        file_name = f"crawl_results_{self.domain}_{self.counter}.xlsx"
        file_base_path = f"{self.domain}"
        file_path = os.path.join(f"{self.domain}/{file_name}")
        os.makedirs(file_base_path, exist_ok=True)
        df.to_excel(file_path, index=False)
        print(f"爬虫结果已保存到 {file_name}")

    def start_crawl(self, url, max_depth=3):
        """Start the crawl process"""
        self.url = url
        self.domain_contain_set.clear()
        self.domain = get_domain(url)
        self.domain_contain_set.add(self.domain)
        if self.domain in domain_config_dict:
            self.domain_contain_set = self.domain_contain_set | domain_config_dict[self.domain]
        print(self.domain_contain_set)

        self.start_browser()
        self.crawl_links_bfs(url, max_depth)  # Start BFS crawling
        self.close_browser()

        # Save final results
        self.save_to_excel()


# 创建 PlayWriteAutomation 实例，并启动爬虫
playWrightAutomation = PlayWriteAutomation()
playWrightAutomation.start_crawl("https://www.ccgp-hebei.gov.cn/province/", max_depth=3)
