import csv  # 新增导入，用于处理CSV文件
import logging
import sys
import os
import time
from urllib.parse import urljoin, urlparse
from urllib.request import urlopen, URLError, HTTPError
from html.parser import HTMLParser
from collections import deque
import mimetypes  # 新增导入，用于处理文件扩展名
import fnmatch  # 新增导入，用于支持通配符匹配

# 使用统一CSV保存状态，未测试

def save_resource(url, content, output_dir, content_type):
    """
    保存资源到指定目录并添加扩展名（不创建子目录）
    :param url: 资源的URL
    :param content: 资源的内容（二进制数据）
    :param output_dir: 输出目录
    :param content_type: 资源的Content-Type
    :return: 保存的文件路径
    """
    parsed_url = urlparse(url)
    path = parsed_url.path
    if path == '/':
        path = '/index.html'  # 根路径默认保存为index.html
    
    # 构建文件路径（替换路径分隔符为下划线）
    path_without_slash = path.lstrip('/')
    sanitized_path = path_without_slash.replace('/', '_')  # 用下划线替代斜杠
    
    # 分割基础名和扩展名
    base_name, ext = os.path.splitext(sanitized_path)
    
    # 添加扩展名逻辑
    if not ext:  # 无扩展名时根据Content-Type推断
        guessed_ext = mimetypes.guess_extension(content_type)
        if guessed_ext:
            ext = guessed_ext
        else:
            ext = '.html'  # 默认扩展名
        sanitized_path = f"{base_name}{ext}"
    else:
        sanitized_path = sanitized_path  # 保留原始扩展名
    
    # 构建最终文件路径
    filename = os.path.join(output_dir, sanitized_path)
    
    # 直接保存文件（不创建子目录）
    with open(filename, 'wb') as f:
        f.write(content)
    return filename

class LinkExtractor(HTMLParser):
    """HTML链接提取器"""
    def __init__(self):
        super().__init__()
        self.links = []  # 存储提取的链接列表

    def handle_starttag(self, tag, attrs):
        """
        处理HTML标签提取链接
        :param tag: HTML标签名
        :param attrs: HTML标签属性列表
        """
        attr_map = {  # 标签与属性对应关系
            'a': 'href',
            'link': 'href',
            'script': 'src',
            'img': 'src'
        }
        attr_name = attr_map.get(tag)  # 获取当前标签对应的属性名
        if attr_name:
            for attr in attrs:
                if attr[0] == attr_name and attr[1]:  # 检查属性是否存在且值有效
                    link = attr[1]
                    # 过滤页内跳转链接
                    if '#' in link and not link.startswith('http'):
                        continue  # 跳过页内跳转链接
                    self.links.append(link)

def save_state(visited, queue, task_dir):
    """
    Save current state to file
    :param visited: Set of visited URLs
    :param queue: Queue of URLs to process
    :param task_dir: Task directory
    """
    state_file = os.path.join(task_dir, 'crawler_state.csv')
    with open(state_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['URL', 'Status', 'Depth', 'Referrer'])
        for url in visited:
            writer.writerow([url, 'visited', '', ''])
        for url, depth, referrer in queue:
            writer.writerow([url, 'pending', depth, referrer])

def load_state(task_dir):
    visited = set()
    queue = deque()
    state_file = os.path.join(task_dir, 'crawler_state.csv')
    
    if os.path.exists(state_file):
        with open(state_file, 'r', newline='', encoding='utf-8') as f:
            reader = csv.reader(f)
            next(reader)  # 跳过表头
            for row in reader:
                url, status, depth, referrer = row
                # 修改状态判断逻辑，确保所有非pending状态都被标记为已访问
                if status in ['visited', 'notfound', 'networkerror', 'excluded']:
                    visited.add(url)
                elif status == 'pending':
                    queue.append((url, int(depth), referrer))
        logging.info(f"Loaded state from {state_file}")
    else:
        # 创建空的 state_file
        with open(state_file, 'w', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow(['URL', 'Status', 'Depth', 'Referrer'])
        logging.info(f"Created empty state file: {state_file}")
    
    return visited, queue

def cleanup_state(task_dir):
    """
    清理状态文件
    :param task_dir: 任务目录
    """
    state_file = os.path.join(task_dir, 'crawler_state.csv')
    if os.path.exists(state_file):
        os.remove(state_file)

def normalize_url(url):
    """
    Normalize URL by removing the protocol (http/https) to treat them as the same.
    :param url: The URL to normalize
    :return: Normalized URL without protocol
    """
    if url.startswith('http://'):
        return url[len('http://'):]
    elif url.startswith('https://'):
        return url[len('https://'):]
    return url

def crawl(start_url, max_depth, allowed_prefixes, task_dir, strategy, excluded_prefixes=None):
    """
    Main crawling logic
    :param start_url: Starting URL
    :param max_depth: Maximum crawl depth
    :param allowed_prefixes: List of allowed URL prefixes (supports wildcards)
    :param task_dir: Task directory
    :param strategy: Crawling strategy (BFS/DFS)
    :param excluded_prefixes: List of excluded URL prefixes (supports wildcards)
    """
    visited, queue = load_state(task_dir)
    state_file = os.path.join(task_dir, 'crawler_state.csv')
    retry_queue = deque()  # Queue for URLs that need to be retried

    logging.info(f"Queue size: {len(queue)}")
    while queue:
        current_url, depth, referrer = queue.popleft()
        logging.info(f"Processing URL: {current_url} at depth {depth}")
        if normalize_url(current_url) in [normalize_url(url) for url in visited]:
            logging.info(f"URL already visited: {current_url}")
            continue

        try:
            response = urlopen(current_url)
            if response.getcode() == 404:
                with open(state_file, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerow([current_url, 'notfound', depth, referrer])
                logging.error(f"404 Not Found: {current_url}. URL saved to state file.")
                continue

            content_type = response.getheader('Content-Type', '').split(';')[0]
            content = response.read()

            save_resource(current_url, content, os.path.dirname(task_dir), content_type)
            with open(state_file, 'a', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([current_url, 'visited', depth, referrer])
            logging.info(f"Resource saved and marked as visited: {current_url}")

            visited.add(current_url)
            save_state(visited, queue, task_dir)

            if max_depth is not None and depth >= max_depth:
                logging.info(f"Max depth reached for URL: {current_url}")
                continue

            extractor = LinkExtractor()
            extractor.feed(content.decode('utf-8', errors='ignore'))
            for link in extractor.links:
                abs_link = urljoin(current_url, link)
                
                if normalize_url(abs_link) in [normalize_url(url) for url in visited] or normalize_url(abs_link) in [normalize_url(url) for url, _, _ in queue]:
                    logging.info(f"Duplicate link skipped: {abs_link}")
                    continue
                
                if excluded_prefixes and any(fnmatch.fnmatch(normalize_url(abs_link), normalize_url(prefix)) for prefix in excluded_prefixes):
                    status = "excluded"
                    logging.info(f"Link excluded due to excluded prefix: {abs_link}")
                    continue
                
                if allowed_prefixes and not any(fnmatch.fnmatch(normalize_url(abs_link), normalize_url(prefix)) for prefix in allowed_prefixes):
                    status = "excluded"
                    logging.info(f"Link excluded due to prefix mismatch: {abs_link}")
                else:
                    status = "pending"
                    queue.append((abs_link, depth + 1, current_url))
                    logging.info(f"New link added to queue: {abs_link}")
                
                with open(state_file, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerow([abs_link, status, depth + 1, current_url])

        except (URLError, HTTPError) as e:
            if isinstance(e, HTTPError) and e.code == 404:
                with open(state_file, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerow([current_url, 'notfound', depth, referrer])
                logging.error(f"404 Not Found: {current_url}. URL saved to state file.")
                continue
            else:
                retry_queue.append((current_url, depth, referrer))
                with open(state_file, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerow([current_url, 'networkerror', depth, referrer])
                logging.error(f"Network error processing {current_url}: {e}. URL added to retry queue.")
        except Exception as e:
            logging.error(f"Unexpected error processing {current_url}: {e}")

    # Retry URLs that had network errors
    if retry_queue:
        logging.info("Retrying URLs that had network errors...")
        queue = retry_queue
        while queue:
            current_url, depth, referrer = queue.popleft()
            try:
                response = urlopen(current_url)
                if response.getcode() == 404:
                    with open(state_file, 'a', newline='', encoding='utf-8') as f:
                        writer = csv.writer(f)
                        writer.writerow([current_url, 'notfound', depth, referrer])
                    logging.error(f"404 Not Found: {current_url}. URL saved to state file.")
                    continue

                content_type = response.getheader('Content-Type', '').split(';')[0]
                content = response.read()

                save_resource(current_url, content, os.path.dirname(task_dir), content_type)
                with open(state_file, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerow([current_url, 'visited', depth, referrer])
                logging.info(f"Resource saved and marked as visited: {current_url}")

            except (URLError, HTTPError) as e:
                if isinstance(e, HTTPError) and e.code == 404:
                    with open(state_file, 'a', newline='', encoding='utf-8') as f:
                        writer = csv.writer(f)
                        writer.writerow([current_url, 'notfound', depth, referrer])
                    logging.error(f"404 Not Found: {current_url}. URL saved to state file.")
                else:
                    with open(state_file, 'a', newline='', encoding='utf-8') as f:
                        writer = csv.writer(f)
                        writer.writerow([current_url, 'networkerror', depth, referrer])
                    logging.error(f"Network error processing {current_url}: {e}. URL will not be retried again.")
            except Exception as e:
                logging.error(f"Unexpected error processing {current_url}: {e}")

    cleanup_state(task_dir)
    logging.info("Crawl completed and state cleaned up.")


def main():
    """程序入口，配置并启动爬虫"""
    print("Web Crawler Configuration:")
    start_url = "https://www.spec.org/"  # 设置起始URL
    output_dir = "output"  # 设置保存目录
    max_depth = None  # 设置最大深度（None表示不限制深度）
    allowed_prefixes = ["https://*.spec.org/"]  # 允许的URL前缀列表
    strategy = "BFS"  # 设置爬取策略（BFS/DFS）
    excluded_prefixes = ["https://www.spec.org/*/results/"]  # 新增：排除的URL前缀列表

    # 创建任务文件夹
    task_dir = os.path.join(output_dir, "task")
    os.makedirs(task_dir, exist_ok=True)  # 确保任务文件夹存在
    print(f"Task directory created: {task_dir}")  # 新增日志记录

    # 更新日志配置，确保日志文件路径在任务文件夹内
    log_file = os.path.join(task_dir, 'spider.log')  # 修改日志文件路径
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(message)s',
        handlers=[
            logging.FileHandler(log_file),  # 使用任务文件夹内的日志路径
            logging.StreamHandler(sys.stdout)
        ]
    )
    print(f"Log file configured: {log_file}")  # 新增日志记录
    
    # 创建CSV文件用于记录网页
    csv_file = os.path.join(task_dir, 'crawled_pages.csv')
    with open(csv_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['URL', 'Status'])  # 新增表头：状态列
    logging.info(f"CSV file created: {csv_file}")  # 新增日志记录

    # 加载状态或初始化队列
    visited, queue = load_state(task_dir)
    if not queue:  # 如果队列为空，初始化队列
        queue = deque([(start_url, 0, "")])  # 将起始URL加入队列，深度为0，referrer为空字符串
        logging.info(f"Queue initialized with start URL: {start_url}")  # 新增日志记录
        save_state(visited, queue, task_dir)  # 保存初始状态，确保状态文件更新
        logging.info("Initial state saved to disk.")  # 新增日志记录

    # 启动爬虫，传入任务文件夹路径和排除的前缀列表
    crawl(start_url, max_depth, allowed_prefixes, task_dir, strategy.upper(), excluded_prefixes)

if __name__ == "__main__":
    main()