import scrapy
from hashlib import md5
from bs4 import BeautifulSoup
import json
import os
import re  # 用于正则表达式匹配

class AiSpider(scrapy.Spider):
    name = 'ai_spider'
    start_urls = [
        "https://blog.csdn.net/huntershuai/article/details/81571234"
        ]
    custom_settings = {
        'CONCURRENT_REQUESTS': 5,  # 最大并发请求数
        'DOWNLOAD_DELAY': 2,  # 请求间隔时间（秒）
        'DEPTH_LIMIT': 3,  # 设置爬取深度限制，避免过多层级的爬取
        'LOG_LEVEL': 'INFO',  # 设置日志级别
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
    }

    # 去重存储已爬取的 URL
    visited_urls = set()

    # 统计相关信息
    total_pages = 0
    unique_pages = 0
    duplicate_pages = 0

    # 存储爬取数据
    scraped_data = []

    def parse(self, response):
        """
        处理爬取的每一页，提取信息并判断去重
        """
        self.total_pages += 1

        # 判断URL是否已经爬取过
        url_hash = md5(response.url.encode('utf-8')).hexdigest()
        if url_hash in self.visited_urls:
            self.duplicate_pages += 1
            print(f"Duplicate URL: {response.url}")
            return
        self.visited_urls.add(url_hash)
        self.unique_pages += 1

        # 判断当前页面内容是否与AI相关，若不相关则跳过
        if not self.is_ai_related_content(response):
            self.logger.info(f"Skipped non-AI related URL: {response.url}")
            return

        # 提取页面的meta信息（如来源、更新时间）
        meta_info = self.extract_meta_info(response)

        # 输出meta信息，帮助调试
        self.logger.info(f"Extracted meta info: {meta_info}")

        # 提取网页中的文章内容
        soup = BeautifulSoup(response.text, 'html.parser')
        article_text = self.extract_article_text(soup)

        # 将数据存储到 scraped_data 列表中
        self.scraped_data.append({
            'url': response.url,
            'meta_info': meta_info,
            'content': article_text,
        })

        # 输出提取的数据
        yield {
            'url': response.url,
            'meta_info': meta_info,
            'content': article_text,
        }

        # 打印当前爬虫的统计信息
        self.print_stats()

        # 进一步爬取页面中的链接
        next_pages = soup.find_all('a', href=True)
        for link in next_pages:
            next_url = link['href']
            # 检查 URL 是否为有效的页面链接
            if next_url.startswith('http') and md5(next_url.encode('utf-8')).hexdigest() not in self.visited_urls:
                yield scrapy.Request(next_url, callback=self.parse, errback=self.handle_error)

    def is_ai_related_content(self, response):
        """
        判断页面内容是否与人工智能（AI）相关
        通过检查页面的标题、元数据、正文内容等是否包含人工智能相关的关键词
        """
        # 使用正则表达式检查页面标题、meta标签、正文等是否包含与人工智能相关的关键词
        ai_keywords = r'\b(?:人工智能|AI|ai|机器学习|深度学习|神经网络|数据科学|机器人学)\b'

        # 检查网页标题
        title = response.xpath('//title/text()').get(default='').lower()
        if re.search(ai_keywords, title):
            return True

        # 检查meta标签中的description或keywords等
        description = response.xpath('//meta[@name="description"]/@content').get(default='').lower()
        keywords = response.xpath('//meta[@name="keywords"]/@content').get(default='').lower()

        if re.search(ai_keywords, description) or re.search(ai_keywords, keywords):
            return True

        # 检查正文内容是否包含AI相关关键词
        soup = BeautifulSoup(response.text, 'html.parser')
        paragraphs = soup.find_all('p')
        text = " ".join([para.get_text().lower() for para in paragraphs])
        if re.search(ai_keywords, text):
            return True

        return False

    def extract_meta_info(self, response):
        """
        提取网页中的meta标签信息，例如来源和更新时间
        """
        meta_info = {
            'source': response.xpath('//meta[@name="source"]/@content').get(default='Unknown'),
            'update_time': response.xpath('//meta[@name="date"]/@content').get(default='Unknown'),
        }

        # 尝试查找其他 meta 信息
        if meta_info['source'] == 'Unknown':
            meta_info['source'] = response.xpath('//meta[@property="og:site_name"]/@content').get(default='Unknown')

        if meta_info['update_time'] == 'Unknown':
            meta_info['update_time'] = response.xpath('//meta[@property="article:published_time"]/@content').get(default='Unknown')

        # 还可以尝试从页面的正文中提取更新时间
        if meta_info['update_time'] == 'Unknown':
            update_time_from_content = response.xpath('//time[@class="article-date"]/@datetime').get()
            if update_time_from_content:
                meta_info['update_time'] = update_time_from_content

        return meta_info

    def extract_article_text(self, soup):
        """
        使用BeautifulSoup解析并提取文章内容
        """
        paragraphs = soup.find_all('p')
        text = " ".join([para.get_text() for para in paragraphs])
        return text

    def print_stats(self):
        """
        打印爬取统计信息
        """
        print(f"Total pages crawled: {self.total_pages}")
        print(f"Unique pages crawled: {self.unique_pages}")
        print(f"Duplicate pages encountered: {self.duplicate_pages}")

    def handle_error(self, failure):
        """
        错误回调函数，处理请求失败时的情况
        """
        self.logger.error(f"Request failed: {failure.request.url}")
        # 可根据实际需求记录错误信息或重试逻辑

    def close(self, reason):
        """
        爬虫结束时调用，保存数据
        """
        print("Crawl finished.")
        self.save_data()

    def save_data(self):
        """
        将爬取的数据保存为 JSON 文件
        """
        if not os.path.exists('data'):
            os.makedirs('data')

        file_path = os.path.join('data', 'ai_articles.json')
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(self.scraped_data, f, ensure_ascii=False, indent=4)
        print(f"Data saved to {file_path}")
