"""简单的示例爬虫，用于爬取金融新闻网站的内容"""
import scrapy
from .base_spider import BaseSpider
from .items import FinancialNewsItem


class FinancialNewsSpider(BaseSpider):
    """
    金融新闻爬虫，用于采集金融新闻网站的新闻文章
    """
    name = "financial_news_spider"
    
    # 金融新闻网站的起始URL
    start_urls = [
        'https://finance.sina.com.cn/',
        'https://www.cnstock.com/',
        'https://www.10jqka.com.cn/',
        'https://finance.qq.com/',
        'https://money.163.com/'
    ]
    
    # 网站特定配置
    website_config = {
        'headers': {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
        },
        'timeout': 60,
        'download_delay': 2.0,
        'allowed_domains': [
            'finance.sina.com.cn',
            'www.cnstock.com',
            'www.10jqka.com.cn',
            'finance.qq.com',
            'money.163.com'
        ],
        # 网站特定的提取规则
        'extraction_rules': {
            'finance.sina.com.cn': {
                'article_links': '//a[contains(@href, ".shtml") and contains(@href, "finance.sina.com.cn")]/@href',
                'title': '//h1[@class="main-title"]/text()',
                'content': '//div[@id="artibody"]//p/text()',
                'publish_time': '//span[@class="date"]/text()',
                'source': '//span[@class="source"]/text()'
            },
            'www.cnstock.com': {
                'article_links': '//a[contains(@href, "/article/")]/@href',
                'title': '//h1[@class="title"]/text()',
                'content': '//div[@class="content"]//p/text()',
                'publish_time': '//span[@class="timer"]/text()',
                'source': '//span[@class="source"]/text()'
            },
            'www.10jqka.com.cn': {
                'article_links': '//a[contains(@href, "/article/")]/@href',
                'title': '//h1[@class="artical-title"]/text()',
                'content': '//div[@class="artical-content"]//p/text()',
                'publish_time': '//div[@class="time"]/text()',
                'source': '//div[@class="source"]/text()'
            },
            'finance.qq.com': {
                'article_links': '//a[contains(@href, "/a/") and contains(@href, "finance.qq.com")]/@href',
                'title': '//h1[@class="title"]/text()',
                'content': '//div[@class="content"]//p/text()',
                'publish_time': '//span[@class="a_time"]/text()',
                'source': '//span[@class="a_source"]/text()'
            },
            'money.163.com': {
                'article_links': '//a[contains(@href, "/article/") and contains(@href, "money.163.com")]/@href',
                'title': '//h1[@class="post_title"]/text()',
                'content': '//div[@class="post_content"]//p/text()',
                'publish_time': '//div[@class="post_info"]/text()',
                'source': '//div[@class="post_info"]/text()'
            }
        }
    }
    
    def parse(self, response):
        """
        解析响应，提取文章链接和内容
        """
        # 获取当前域名
        domain = self._get_domain(response.url)
        
        # 获取该域名的提取规则
        rules = self.website_config['extraction_rules'].get(domain, {})
        
        if not rules:
            self.logger.warning(f"未找到域名 {domain} 的提取规则")
            return
            
        # 判断是否为文章页面
        if self._is_article_page(response, domain):
            # 解析文章页面
            yield self._parse_article(response, domain, rules)
        else:
            # 解析列表页面，提取文章链接
            article_links = response.xpath(rules['article_links']).extract()
            
            # 过滤有效的文章链接
            valid_links = self._filter_article_links(article_links, domain)
            
            # 对每个有效的文章链接发起请求
            for link in valid_links:
                # 补全相对URL
                full_url = self._complete_url(link, domain)
                yield scrapy.Request(
                    url=full_url,
                    callback=self.parse,
                    headers=self.website_config['headers'],
                    meta={'url': full_url, 'domain': domain, 'retry_count': 0}
                )
                # 添加下载延迟
                import time
                time.sleep(self.website_config['download_delay'])
    
    def _is_article_page(self, response, domain):
        """
        判断当前页面是否为文章页面
        """
        # 简单判断逻辑，实际项目中可能需要更复杂的判断
        if domain == 'finance.sina.com.cn':
            return '/doc/' in response.url or '/a/' in response.url
        elif domain == 'www.cnstock.com':
            return '/article/' in response.url
        elif domain == 'www.10jqka.com.cn':
            return '/article/' in response.url
        elif domain == 'finance.qq.com':
            return '/a/' in response.url
        elif domain == 'money.163.com':
            return '/article/' in response.url
        return False
    
    def _parse_article(self, response, domain, rules):
        """
        解析文章内容
        """
        # 提取文章信息
        title = response.xpath(rules['title']).extract_first(default='').strip()
        
        # 提取正文内容，合并多个段落
        content_paragraphs = response.xpath(rules['content']).extract()
        content = '\n'.join([p.strip() for p in content_paragraphs if p.strip()])
        
        # 提取发布时间
        publish_time = response.xpath(rules['publish_time']).extract_first(default='').strip()
        
        # 提取来源
        source = response.xpath(rules['source']).extract_first(default='').strip()
        
        # 提取关键词（如果有）
        keywords = self._extract_keywords(response, domain)
        
        # 构建新闻Item
        item = FinancialNewsItem(
            url=response.url,
            domain=domain,
            title=title,
            content=content,
            publish_time=publish_time,
            source=source,
            keywords=keywords,
            crawled_time=self.crawl_start_time.isoformat()
        )
        
        # 增加计数器
        self.item_count += 1
        
        # 每爬取100条数据记录一次日志
        if self.item_count % 100 == 0:
            self.logger.info(f"已爬取 {self.item_count} 条新闻")
            
        return item
    
    def _extract_keywords(self, response, domain):
        """
        从页面提取关键词
        """
        keywords = []
        
        # 根据不同网站的规则提取关键词
        if domain == 'finance.sina.com.cn':
            # 新浪财经的关键词通常在meta标签中
            keywords = response.xpath('//meta[@name="keywords"]/@content').extract_first(default='').split(',')
        elif domain == 'www.cnstock.com':
            # 中国证券网的关键词
            keywords = response.xpath('//meta[@name="keywords"]/@content').extract_first(default='').split(',')
            
        # 去除空关键词和前后空格
        keywords = [kw.strip() for kw in keywords if kw.strip()]
        
        return keywords
    
    def _filter_article_links(self, links, domain):
        """
        过滤有效的文章链接
        """
        filtered_links = []
        
        # 不同网站的链接过滤规则
        for link in links:
            # 去除空链接
            if not link or link.strip() == '':
                continue
                
            # 根据不同域名过滤
            if domain == 'finance.sina.com.cn':
                if '/doc/' in link or '/a/' in link:
                    filtered_links.append(link)
            elif domain == 'www.cnstock.com':
                if '/article/' in link:
                    filtered_links.append(link)
            elif domain == 'www.10jqka.com.cn':
                if '/article/' in link:
                    filtered_links.append(link)
            elif domain == 'finance.qq.com':
                if '/a/' in link:
                    filtered_links.append(link)
            elif domain == 'money.163.com':
                if '/article/' in link:
                    filtered_links.append(link)
                    
        # 去重
        return list(set(filtered_links))
    
    def _complete_url(self, link, domain):
        """
        补全相对URL为绝对URL
        """
        if link.startswith('http://') or link.startswith('https://'):
            return link
        
        # 补全协议和域名
        base_url = f'https://{domain}'
        
        if link.startswith('/'):
            return base_url + link
        else:
            return base_url + '/' + link
    
    def _get_domain(self, url):
        """
        从URL中提取域名
        """
        from urllib.parse import urlparse
        parsed_url = urlparse(url)
        return parsed_url.netloc
