import csv  # 新增导入，用于处理CSV文件
import logging
import sys
import os
import time
from urllib.parse import urljoin, urlparse
from urllib.request import urlopen, URLError, HTTPError
from html.parser import HTMLParser
from collections import deque
import mimetypes  # 新增导入，用于处理文件扩展名
import fnmatch  # 新增导入，用于支持通配符匹配


def save_resource(url, content, output_dir, content_type):
    """
    保存资源到指定目录并添加扩展名（不创建子目录）
    :param url: 资源的URL
    :param content: 资源的内容（二进制数据）
    :param output_dir: 输出目录
    :param content_type: 资源的Content-Type
    :return: 保存的文件路径
    """
    parsed_url = urlparse(url)
    path = parsed_url.path
    if path == '/':
        path = '/index.html'  # 根路径默认保存为index.html
    
    # 构建文件路径（替换路径分隔符为下划线）
    path_without_slash = path.lstrip('/')
    sanitized_path = path_without_slash.replace('/', '_')  # 用下划线替代斜杠
    
    # 分割基础名和扩展名
    base_name, ext = os.path.splitext(sanitized_path)
    
    # 添加扩展名逻辑
    if not ext:  # 无扩展名时根据Content-Type推断
        guessed_ext = mimetypes.guess_extension(content_type)
        if guessed_ext:
            ext = guessed_ext
        else:
            ext = '.html'  # 默认扩展名
        sanitized_path = f"{base_name}{ext}"
    else:
        sanitized_path = sanitized_path  # 保留原始扩展名
    
    # 构建最终文件路径
    filename = os.path.join(output_dir, sanitized_path)
    
    # 直接保存文件（不创建子目录）
    with open(filename, 'wb') as f:
        f.write(content)
    return filename

class LinkExtractor(HTMLParser):
    """HTML链接提取器"""
    def __init__(self):
        super().__init__()
        self.links = []  # 存储提取的链接列表

    def handle_starttag(self, tag, attrs):
        """
        处理HTML标签提取链接
        :param tag: HTML标签名
        :param attrs: HTML标签属性列表
        """
        attr_map = {  # 标签与属性对应关系
            'a': 'href',
            'link': 'href',
            'script': 'src',
            'img': 'src'
        }
        attr_name = attr_map.get(tag)  # 获取当前标签对应的属性名
        if attr_name:
            for attr in attrs:
                if attr[0] == attr_name and attr[1]:  # 检查属性是否存在且值有效
                    link = attr[1]
                    # 过滤页内跳转链接
                    if '#' in link and not link.startswith('http'):
                        continue  # 跳过页内跳转链接
                    self.links.append(link)

def save_state(visited, queue, task_dir):
    """
    保存当前状态到文件
    :param visited: 已访问的URL集合
    :param queue: 待处理的URL队列
    :param task_dir: 任务目录
    """
    visited_file = os.path.join(task_dir, 'visited.txt')
    queue_file = os.path.join(task_dir, 'queue.txt')
    
    with open(visited_file, 'w') as f:
        for url in visited:
            f.write(f"{url}\n")
    
    with open(queue_file, 'w') as f:
        for url, depth, referrer in queue:
            f.write(f"{url}|{depth}|{referrer}\n")

def load_state(task_dir):
    """
    从文件加载已保存的状态
    :param task_dir: 任务目录
    :return: 已访问的URL集合和待处理的URL队列
    """
    visited = set()
    queue = deque()
    visited_file = os.path.join(task_dir, 'visited.txt')
    queue_file = os.path.join(task_dir, 'queue.txt')
    
    if os.path.exists(visited_file):
        with open(visited_file, 'r') as f:
            visited = set(line.strip() for line in f if line.strip())
            logging.info(f"Loaded {len(visited)} visited URLs from {visited_file}")
    else:
        # 创建空的 visited.txt 文件
        with open(visited_file, 'w') as f:
            logging.info(f"Created empty visited file: {visited_file}")
    
    if os.path.exists(queue_file):
        with open(queue_file, 'r') as f:
            for line in f:
                parts = line.strip().split('|')
                if len(parts) == 3:
                    url, depth, referrer = parts
                else:
                    url, depth = parts
                    referrer = ""  # 默认值为空字符串
                queue.append((url, int(depth), referrer))
    else:
        # 创建空的 queue.txt 文件
        with open(queue_file, 'w') as f:
            logging.info(f"Created empty queue file: {queue_file}")
    
    return visited, queue

def cleanup_state(task_dir):
    """
    清理状态文件
    :param task_dir: 任务目录
    """
    visited_file = os.path.join(task_dir, 'visited.txt')
    queue_file = os.path.join(task_dir, 'queue.txt')
    notfound_file = os.path.join(task_dir, 'notfound.txt')
    
    if os.path.exists(visited_file):
        os.remove(visited_file)
    if os.path.exists(queue_file):
        os.remove(queue_file)
    if os.path.exists(notfound_file):
        os.remove(notfound_file)

def normalize_url(url):
    """
    Normalize URL by removing the protocol (http/https) to treat them as the same.
    :param url: The URL to normalize
    :return: Normalized URL without protocol
    """
    if url.startswith('http://'):
        return url[len('http://'):]
    elif url.startswith('https://'):
        return url[len('https://'):]
    return url

def crawl(start_url, max_depth, allowed_prefixes, task_dir, strategy, excluded_prefixes=None):
    """
    爬取网页主逻辑
    :param start_url: 起始URL
    :param max_depth: 最大爬取深度
    :param allowed_prefixes: 允许的URL前缀列表（支持通配符）
    :param task_dir: 任务目录
    :param strategy: 爬取策略（BFS/DFS）
    :param excluded_prefixes: 排除的URL前缀列表（支持通配符）
    """
    visited, queue = load_state(task_dir)
    csv_file = os.path.join(task_dir, 'crawled_pages.csv')

    logging.info(f"Queue size: {len(queue)}")
    while queue:
        current_url, depth, referrer = queue.popleft()
        logging.info(f"Processing URL: {current_url} at depth {depth}")
        if normalize_url(current_url) in [normalize_url(url) for url in visited]:
            logging.info(f"URL already visited: {current_url}")
            continue

        try:
            response = urlopen(current_url)
            if response.getcode() == 404:
                # 如果返回404，将当前URL保存到notfound.txt
                notfound_file = os.path.join(task_dir, 'notfound.txt')
                with open(notfound_file, 'a') as f:
                    f.write(f"{current_url}|{depth}|{referrer}\n")
                logging.error(f"404 Not Found: {current_url}. URL saved to notfound.txt")
                continue  # Continue to the next URL in the queue

            content_type = response.getheader('Content-Type', '').split(';')[0]
            content = response.read()

            # 保存资源并记录到CSV
            save_resource(current_url, content, os.path.dirname(task_dir), content_type)
            with open(csv_file, 'a', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([current_url, "已保存"])
            logging.info(f"Resource saved and marked as visited: {current_url}")

            visited.add(current_url)
            save_state(visited, queue, task_dir)

            if max_depth is not None and depth >= max_depth:
                logging.info(f"Max depth reached for URL: {current_url}")
                continue

            # 提取链接并加入队列
            extractor = LinkExtractor()
            extractor.feed(content.decode('utf-8', errors='ignore'))
            for link in extractor.links:
                abs_link = urljoin(current_url, link)
                
                # 去重逻辑
                if normalize_url(abs_link) in [normalize_url(url) for url in visited] or normalize_url(abs_link) in [normalize_url(url) for url, _, _ in queue]:
                    logging.info(f"Duplicate link skipped: {abs_link}")
                    continue
                
                # 排除逻辑（支持通配符*）
                if excluded_prefixes and any(fnmatch.fnmatch(normalize_url(abs_link), normalize_url(prefix)) for prefix in excluded_prefixes):
                    status = "前缀排除"
                    logging.info(f"Link excluded due to excluded prefix: {abs_link}")
                    continue
                
                # 调整前缀过滤逻辑，允许用户选择是否启用前缀过滤
                if allowed_prefixes and not any(fnmatch.fnmatch(normalize_url(abs_link), normalize_url(prefix)) for prefix in allowed_prefixes):
                    status = "前缀排除"
                    logging.info(f"Link excluded due to prefix mismatch: {abs_link}")
                else:
                    status = "未保存"
                    queue.append((abs_link, depth + 1, current_url))
                    logging.info(f"New link added to queue: {abs_link}")
                
                # 记录链接状态到CSV
                with open(csv_file, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerow([abs_link, status])

        except (URLError, HTTPError) as e:
            if isinstance(e, HTTPError) and e.code == 404:
                # 如果返回404，将当前URL保存到notfound.txt
                notfound_file = os.path.join(task_dir, 'notfound.txt')
                with open(notfound_file, 'a') as f:
                    f.write(f"{current_url}|{depth}|{referrer}\n")
                logging.error(f"404 Not Found: {current_url}. URL saved to notfound.txt")
                continue  # Continue to the next URL in the queue
            else:
                # 其他网络错误时，将URL重新加入队列尾部
                queue.append((current_url, depth, referrer))
                logging.error(f"Network error processing {current_url}: {e}. URL moved to end of queue.")
        except Exception as e:
            logging.error(f"Unexpected error processing {current_url}: {e}")

    cleanup_state(task_dir)
    logging.info("Crawl completed and state cleaned up.")


def main():
    """程序入口，配置并启动爬虫"""
    print("Web Crawler Configuration:")
    start_url = "https://www.spec.org/"  # 设置起始URL
    output_dir = "output"  # 设置保存目录
    max_depth = None  # 设置最大深度（None表示不限制深度）
    # 修改allowed_prefixes的模式以包含路径匹配
    allowed_prefixes = ["*.spec.org/*"]  # 允许的URL前缀列表（支持通配符）
    strategy = "BFS"  # 设置爬取策略（BFS/DFS）
    excluded_prefixes = ["https://www.spec.org/*/results/"]  # 新增：排除的URL前缀列表

    # 创建任务文件夹
    task_dir = os.path.join(output_dir, "task")
    os.makedirs(task_dir, exist_ok=True)  # 确保任务文件夹存在
    print(f"Task directory created: {task_dir}")  # 新增日志记录

    # 更新日志配置，确保日志文件路径在任务文件夹内
    log_file = os.path.join(task_dir, 'spider.log')  # 修改日志文件路径
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(message)s',
        handlers=[
            logging.FileHandler(log_file),  # 使用任务文件夹内的日志路径
            logging.StreamHandler(sys.stdout)
        ]
    )
    print(f"Log file configured: {log_file}")  # 新增日志记录
    
    # 创建CSV文件用于记录网页
    csv_file = os.path.join(task_dir, 'crawled_pages.csv')
    with open(csv_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['URL', 'Status'])  # 新增表头：状态列
    logging.info(f"CSV file created: {csv_file}")  # 新增日志记录

    # 加载状态或初始化队列
    visited, queue = load_state(task_dir)
    if not queue:  # 如果队列为空，初始化队列
        queue = deque([(start_url, 0, "")])  # 将起始URL加入队列，深度为0，referrer为空字符串
        logging.info(f"Queue initialized with start URL: {start_url}")  # 新增日志记录
        save_state(visited, queue, task_dir)  # 保存初始状态，确保状态文件更新
        logging.info("Initial state saved to disk.")  # 新增日志记录

    # 启动爬虫，传入任务文件夹路径和排除的前缀列表
    crawl(start_url, max_depth, allowed_prefixes, task_dir, strategy.upper(), excluded_prefixes)

if __name__ == "__main__":
    main()