#实时爬虫
import scrapy

from fake_useragent import UserAgent
import random
import time
import schedule
import threading
from datetime import datetime
import logging
import re
from scrapy import Request
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError

from scrapy.spiders import Spider  # ✅ 注意这行也要改

class CnhnbRealtimeSpider(Spider):  # ✅ 从 RedisSpider 改为 Spider
    """惠农网农产品价格实时爬虫"""
    name = 'cnhnb_realtime'
    allowed_domains = ['cnhnb.com']
    start_urls = [
        'https://www.cnhnb.com/hangqing/',
        'https://www.cnhnb.com/hangqing/jiage/',
        'https://www.cnhnb.com/hangqing/baojia/',
    ]

    # 添加自定义设置，覆盖可能有问题的设置
    custom_settings = {
        'DOWNLOAD_DELAY': 15,  # 增加延迟到15秒
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'CONCURRENT_REQUESTS': 1,
        'CONCURRENT_REQUESTS_PER_DOMAIN': 1,
        'LOG_LEVEL': 'DEBUG',
        'AUTOTHROTTLE_ENABLED': False,  # 禁用自动限速
        'DOWNLOAD_TIMEOUT': 60,
        'RETRY_TIMES': 2,
        'HTTPCACHE_ENABLED': False,
        'COOKIES_ENABLED': True,
        'REFERER_ENABLED': True,
    }
    
    def __init__(self, *args, **kwargs):
        super(CnhnbRealtimeSpider, self).__init__(*args, **kwargs)
        self.ua = UserAgent()
        self.is_running = False
        self.crawl_interval = 30  # 采集间隔改为30秒
        self.price_data_count = 0
        self.successful_extractions = 0
        
        # 从设置中获取爬取间隔
        if hasattr(self, 'settings'):
            self.crawl_interval = self.settings.get('REALTIME_CONFIG', {}).get('crawl_interval', 30)
    
    def get_random_delay(self):
        """生成随机延迟时间，避免请求过于频繁"""
        return random.uniform(0.5, 2.0)  # 实时爬取使用更短的延迟
        
    def get_headers(self):
        """生成随机请求头"""
        return {
            'User-Agent': self.ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'no-cache',  # 实时爬取不使用缓存
        }
    
    def start_requests(self):
        """重写start_requests方法，批量爬取所有目标页面"""
        base_urls = [
            # 原有通用行情页
            'https://www.cnhnb.com/hangqing/',
            'https://www.cnhnb.com/hangqing/jiage/',
            'https://www.cnhnb.com/hangqing/baojia/',
            'https://www.cnhnb.com/hangqing/yangcong/',
            'https://www.cnhnb.com/hangqing/niurou/',
            'https://www.cnhnb.com/hangqing/putao/',
            'https://www.cnhnb.com/hangqing/huanggua/',
            'https://www.cnhnb.com/hangqing/luobo/',
            'https://www.cnhnb.com/hangqing/xiangjiao/',
            'https://www.cnhnb.com/hangqing/caomei/',
            'https://www.cnhnb.com/hangqing/xigua/',
            'https://www.cnhnb.com/hangqing/mangguo/',
            'https://www.cnhnb.com/hangqing/boluo/',
            'https://www.cnhnb.com/hangqing/youzi/',
            'https://www.cnhnb.com/hangqing/baicai/',
            'https://www.cnhnb.com/hangqing/qiezi/',
            'https://www.cnhnb.com/hangqing/hongshu/',
            'https://www.cnhnb.com/hangqing/huasheng/',
            'https://www.cnhnb.com/hangqing/ganlan/',
            'https://www.cnhnb.com/hangqing/yangmei/',
            # 用户补充的品种URL
            'https://www.cnhnb.com/hangqing/mangguo/',
            'https://www.cnhnb.com/hangqing/lizhi/',
            'https://www.cnhnb.com/hangqing/longyan/',
            'https://www.cnhnb.com/hangqing/boluo/',
            'https://www.cnhnb.com/hangqing/hng/',
            'https://www.cnhnb.com/hangqing/xiangj/',
            'https://www.cnhnb.com/hangqing/liulian/',
            'https://www.cnhnb.com/hangqing/mugua/',
            'https://www.cnhnb.com/hangqing/lianwu/',
            'https://www.cnhnb.com/hangqing/shanzhuo/',
            'https://www.cnhnb.com/hangqing/yezi/',
            'https://www.cnhnb.com/hangqing/ganzhe/',
            'https://www.cnhnb.com/hangqing/blm/',
            'https://www.cnhnb.com/hangqing/nyg/',
            'https://www.cnhnb.com/hangqing/ganlan/',
            'https://www.cnhnb.com/hangqing/yangtao/',
            'https://www.cnhnb.com/hangqing/ningmeng/',
            'https://www.cnhnb.com/hangqing/ganju/',
            'https://www.cnhnb.com/hangqing/jinju/',
            'https://www.cnhnb.com/hangqing/jiuhuang/',
            'https://www.cnhnb.com/hangqing/suanmiao/',
            'https://www.cnhnb.com/hangqing/dasuan/',
            'https://www.cnhnb.com/hangqing/shengjiang/',
            'https://www.cnhnb.com/hangqing/xbh/',
            'https://www.cnhnb.com/hangqing/lusun/',
            'https://www.cnhnb.com/hangqing/luobo/',
            'https://www.cnhnb.com/hangqing/dongsun/',
            'https://www.cnhnb.com/hangqing/bc/',
            'https://www.cnhnb.com/hangqing/shengcai/',
            'https://www.cnhnb.com/hangqing/xianc/',
            'https://www.cnhnb.com/hangqing/mec/',
            'https://www.cnhnb.com/hangqing/xiangcai/',
            'https://www.cnhnb.com/hangqing/kxco/',
            'https://www.cnhnb.com/hangqing/maodou/',
            'https://www.cnhnb.com/hangqing/sijidou/',
            'https://www.cnhnb.com/hangqing/wandouo/',
        ]
        for url in base_urls:
            yield Request(
                url='https://www.cnhnb.com/',
                callback=self.parse_homepage,
                headers=self.get_realistic_headers(),
                meta={'target_url': url, 'dont_cache': True, 'dont_retry': False},
                errback=self.errback_httpbin
            )
    
    def parse_homepage(self, response):
        """先访问首页，然后访问目标页面"""
        import random
        import time
        
        print(f"首页访问成功: {response.url} - {response.status}")
        
        # 等待一段时间
        time.sleep(random.uniform(3, 8))
        
        target_url = response.meta['target_url']
        print(f"准备访问目标页面: {target_url}")
        
        yield Request(
            url=target_url,
            callback=self.parse,
            headers=self.get_realistic_headers(),
            meta={'dont_cache': True, 'dont_retry': False},
            errback=self.errback_httpbin
        )
    
    def get_realistic_headers(self):
        """生成更真实的请求头"""
        import random
        
        user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        ]
        
        return {
            'User-Agent': random.choice(user_agents),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Cache-Control': 'max-age=0',
            'DNT': '1',
            'Referer': 'https://www.cnhnb.com/',
        }
    
    def _start_scheduler(self):
        """启动定时调度器"""
        def run_scheduler():
            schedule.every(self.crawl_interval).seconds.do(self._schedule_crawl)
            
            while self.is_running:
                schedule.run_pending()
                time.sleep(1)
        
        scheduler_thread = threading.Thread(target=run_scheduler, daemon=True)
        scheduler_thread.start()
        self.logger.info(f"定时调度器已启动，爬取间隔: {self.crawl_interval}秒")
    
    def _schedule_crawl(self):
        """定时爬取任务"""
        if self.is_running:
            self.logger.info(f"执行定时爬取任务: {datetime.now()}")
            # 这里可以通过Redis队列触发新的爬取请求
            # 或者直接调用爬取方法
    
    def _crawl_data(self):
        """执行数据爬取"""
        urls = [
            'https://www.cnhnb.com/hangqing/',
            'https://www.cnhnb.com/hangqing/jiage/',  # 价格行情
            'https://www.cnhnb.com/hangqing/baojia/',  # 报价信息
        ]
        
        for url in urls:
            yield scrapy.Request(
                url=url,
                headers=self.get_headers(),
                callback=self.parse,
                dont_filter=True,
                meta={'crawl_time': datetime.now().isoformat()}
            )
    
    def parse(self, response):
        """解析页面，提取价格数据"""
        self.logger.info(f"收到响应: {response.url} - {response.status}")
        
        if response.status != 200:
            self.logger.warning(f"非200状态码: {response.url} - {response.status}")
            return
            
        self.logger.info(f"正在解析页面: {response.url}")
        
        # 记录页面基本信息
        page_title = response.css('title::text').get()
        if page_title:
            self.logger.info(f"页面标题: {page_title}")
        
        # 多种价格提取策略
        extracted_data = []
        
        # 策略1: 使用CSS选择器提取价格
        price_data = self.extract_with_css_selectors(response)
        if price_data:
            extracted_data.extend(price_data)
            self.logger.info(f"CSS选择器提取到 {len(price_data)} 条价格数据")
        
        # 策略2: 使用正则表达式提取价格
        if not extracted_data:
            price_data = self.extract_with_regex(response)
            if price_data:
                extracted_data.extend(price_data)
                self.logger.info(f"正则表达式提取到 {len(price_data)} 条价格数据")
        
        # 策略3: 使用XPath提取价格
        if not extracted_data:
            price_data = self.extract_with_xpath(response)
            if price_data:
                extracted_data.extend(price_data)
                self.logger.info(f"XPath提取到 {len(price_data)} 条价格数据")
        
        # 策略4: 文本分析提取价格
        if not extracted_data:
            price_data = self.extract_with_text_analysis(response)
            if price_data:
                extracted_data.extend(price_data)
                self.logger.info(f"文本分析提取到 {len(price_data)} 条价格数据")
        
        # 处理提取到的数据
        if extracted_data:
            self.successful_extractions += 1
            for data in extracted_data:
                yield data
                self.price_data_count += 1
        else:
            self.logger.warning(f"未能从页面提取到价格数据: {response.url}")
            # 保存页面内容用于调试
            self.save_debug_page(response)
    
    def extract_with_css_selectors(self, response):
        """使用CSS选择器提取价格数据"""
        items = []
        
        # 基于页面分析结果，使用正确的选择器
        # 每个产品信息都在 li.market-list-item 中
        list_items = response.css('li.market-list-item')
        self.logger.info(f"找到 {len(list_items)} 个产品列表项")
        
        for item in list_items:
            try:
                # 提取产品名称
                product_name = item.css('span.product::text').get()
                if not product_name:
                    product_name = item.css('span.product').get()
                    if product_name:
                        # 清理HTML标签
                        product_name = re.sub(r'<[^>]+>', '', product_name)
                
                # 提取价格
                price_text = item.css('span.price::text').get()
                if not price_text:
                    price_text = item.css('span.price').get()
                    if price_text:
                        # 清理HTML标签
                        price_text = re.sub(r'<[^>]+>', '', price_text)
                
                # 提取产地
                location = item.css('span.place::text').get()
                if not location:
                    location = item.css('span.place').get()
                    if location:
                        # 清理HTML标签
                        location = re.sub(r'<[^>]+>', '', location)
                
                # 提取日期
                date_text = item.css('span.time::text').get()
                if not date_text:
                    date_text = item.css('span.time').get()
                    if date_text:
                        # 清理HTML标签
                        date_text = re.sub(r'<[^>]+>', '', date_text)
                
                # 验证必要字段
                if not product_name or not price_text:
                    continue
                
                # 清理文本
                product_name = product_name.strip()
                price_text = price_text.strip()
                location = location.strip() if location else '未知'
                date_text = date_text.strip() if date_text else datetime.now().strftime('%Y-%m-%d')
                
                # 创建价格项
                price_item = self.create_price_item_with_details(
                    price_text, product_name, location, date_text, response.url
                )
                
                if price_item:
                    items.append(price_item)
                    self.logger.info(f"提取到产品: {product_name}, 价格: {price_text}, 产地: {location}")
                
            except Exception as e:
                self.logger.error(f"处理列表项时出错: {e}")
                continue
        
        # 如果没有找到数据，尝试备用方法
        if not items:
            self.logger.info("使用备用CSS选择器方法")
            items = self.extract_with_fallback_selectors(response)
        
        return items
    
    def extract_with_fallback_selectors(self, response):
        """备用CSS选择器方法"""
        items = []
        
        # 多种CSS选择器策略
        selectors = [
            'span[class*="price"]',
            'div[class*="price"]',
            'span[class*="jiage"]',
            'div[class*="jiage"]',
            'span[class*="money"]',
            'div[class*="money"]',
            '.price',
            '.jiage',
            '.money',
            '[class*="price"]',
            '[class*="jiage"]',
            '[class*="money"]',
        ]
        
        for selector in selectors:
            elements = response.css(selector)
            self.logger.info(f"使用备用选择器 '{selector}' 找到 {len(elements)} 个元素")
            
            for element in elements:
                text = element.get()
                if text and self.is_price_text(text):
                    item = self.create_price_item(text, response.url)
                    if item:
                        items.append(item)
        
        return items
    
    def extract_with_regex(self, response):
        """使用正则表达式提取价格数据"""
        items = []
        
        # 价格正则表达式模式
        price_patterns = [
            r'(\d+\.?\d*)\s*元[\/\s]*(斤|公斤|kg|g|吨|t)?',
            r'(\d+\.?\d*)\s*￥[\/\s]*(斤|公斤|kg|g|吨|t)?',
            r'(\d+\.?\d*)\s*块[\/\s]*(斤|公斤|kg|g|吨|t)?',
            r'价格[：:]\s*(\d+\.?\d*)\s*元',
            r'(\d+\.?\d*)\s*元\/斤',
            r'(\d+\.?\d*)\s*元\/公斤',
        ]
        
        page_text = response.text
        
        for pattern in price_patterns:
            matches = re.findall(pattern, page_text, re.IGNORECASE)
            for match in matches:
                if isinstance(match, tuple):
                    price_value = match[0]
                    unit = match[1] if len(match) > 1 else '斤'
                else:
                    price_value = match
                    unit = '斤'
                
                if self.is_valid_price(price_value):
                    item = self.create_price_item(f"{price_value}元/{unit}", response.url)
                    if item:
                        items.append(item)
        
        return items
    
    def extract_with_xpath(self, response):
        """使用XPath提取价格数据"""
        items = []
        
        # XPath选择器
        xpath_selectors = [
            "//span[contains(@class, 'price')]",
            "//div[contains(@class, 'price')]",
            "//span[contains(text(), '元')]",
            "//div[contains(text(), '元')]",
            "//*[contains(@class, 'price')]",
            "//*[contains(text(), '元')]",
        ]
        
        for xpath in xpath_selectors:
            elements = response.xpath(xpath)
            for element in elements:
                text = element.get()
                if text and self.is_price_text(text):
                    item = self.create_price_item(text, response.url)
                    if item:
                        items.append(item)
        
        return items
    
    def extract_with_text_analysis(self, response):
        """使用文本分析提取价格数据"""
        items = []
        
        # 查找包含"元"的文本
        text_elements = response.xpath("//text()[contains(., '元')]")
        
        for element in text_elements:
            text = element.get().strip()
            if text and len(text) < 100:  # 限制文本长度
                # 使用正则提取价格
                price_match = re.search(r'(\d+\.?\d*)\s*元', text)
                if price_match:
                    price_value = price_match.group(1)
                    if self.is_valid_price(price_value):
                        item = self.create_price_item(f"{price_value}元", response.url)
                        if item:
                            items.append(item)
        
        return items
    
    def is_price_text(self, text):
        """判断文本是否包含价格信息"""
        if not text:
            return False
        
        text = text.strip()
        
        # 检查是否包含价格关键词
        price_keywords = ['元', '￥', '块', '价格', 'jiage', 'price']
        has_keyword = any(keyword in text.lower() for keyword in price_keywords)
        
        # 检查是否包含数字
        has_number = bool(re.search(r'\d+\.?\d*', text))
        
        return has_keyword and has_number
    
    def is_valid_price(self, price_str):
        """验证价格是否有效"""
        try:
            price = float(price_str)
            # 农产品价格范围：0.1元到1000元/斤
            return 0.1 <= price <= 1000
        except (ValueError, TypeError):
            return False
    
    def create_price_item(self, price_text, source_url):
        """创建价格数据项"""
        try:
            # 方法1: 优先尝试提取带单位的价格（最精确）
            price_match = re.search(r'(\d+\.?\d*)\s*元[\/\s]*(斤|公斤|kg|g|吨|t)?', price_text)
            if price_match:
                price_value = float(price_match.group(1))
                unit = price_match.group(2) if len(price_match.groups()) > 1 else '斤'
            else:
                # 方法2: 提取带"元"的价格
                price_match = re.search(r'(\d+\.?\d*)\s*元', price_text)
                if price_match:
                    price_value = float(price_match.group(1))
                    unit = '斤'
                else:
                    # 方法3: 最后才使用简单数字提取，但要排除HTML属性
                    # 先清理HTML标签
                    clean_text = re.sub(r'<[^>]+>', '', price_text)
                    # 然后提取数字
                    price_match = re.search(r'(\d+\.?\d*)', clean_text)
                    if price_match:
                        price_value = float(price_match.group(1))
                        unit = '斤'
                    else:
                        return None
            
            # 验证价格合理性
            if not self.is_valid_price(str(price_value)):
                self.logger.warning(f"价格 {price_value} 不在合理范围内，跳过")
                return None
            
            # 提取产品名称（简化处理）
            product_name = self.extract_product_name(price_text)
            
            return {
                'name': product_name,
                'price_value': price_value,
                'price_original': price_text,
                'unit': unit,
                'location': '未知',
                'date': datetime.now().strftime('%Y-%m-%d'),
                'timestamp': datetime.now().timestamp(),
                'crawl_timestamp': datetime.now().isoformat(),
                'source_url': source_url,
                'spider_name': self.name,
            }
        except Exception as e:
            self.logger.error(f"创建价格项时出错: {e}")
            return None
    
    def extract_product_name(self, text):
        """提取产品名称（简化版本）"""
        # 这里可以根据实际页面结构优化
        return '农产品'
    
    def save_debug_page(self, response):
        """保存页面用于调试"""
        try:
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            filename = f"debug_page_{timestamp}.html"
            with open(filename, 'w', encoding='utf-8') as f:
                f.write(response.text)
            self.logger.info(f"调试页面已保存: {filename}")
        except Exception as e:
            self.logger.error(f"保存调试页面失败: {e}")
    
    def errback_httpbin(self, failure):
        """错误处理回调"""
        if failure.check(HttpError):
            response = failure.value.response
            self.logger.error(f'HttpError on {response.url}: {response.status}')
        elif failure.check(DNSLookupError):
            request = failure.request
            self.logger.error(f'DNSLookupError on {request.url}')
        elif failure.check(TimeoutError, TCPTimedOutError):
            request = failure.request
            self.logger.error(f'TimeoutError on {request.url}')
        else:
            request = failure.request
            self.logger.error(f'Unknown error on {request.url}: {failure.value}')
    
    def closed(self, reason):
        """爬虫关闭时的回调"""
        self.is_running = False
        self.logger.info(f"爬虫已关闭，原因: {reason}")
        self.logger.info(f"总共提取了 {self.price_data_count} 条价格数据")
        self.logger.info(f"成功解析了 {self.successful_extractions} 个页面")
    
    def spider_idle(self, spider):
        """爬虫空闲时的处理"""
        self.logger.info("爬虫空闲，等待新的任务...")
        # 可以在这里添加一些清理工作或状态检查

    def create_price_item_with_details(self, price_text, product_name, location, date_text, source_url):
        """创建包含详细信息的价格数据项"""
        try:
            # 提取价格数值和单位
            price_match = re.search(r'(\d+\.?\d*)\s*元[\/\s]*(斤|公斤|kg|g|吨|t)?', price_text)
            if price_match:
                price_value = float(price_match.group(1))
                unit = price_match.group(2) if len(price_match.groups()) > 1 else '斤'
            else:
                # 备用价格提取
                price_match = re.search(r'(\d+\.?\d*)\s*元', price_text)
                if price_match:
                    price_value = float(price_match.group(1))
                    unit = '斤'
                else:
                    return None
            
            # 验证价格合理性
            if not self.is_valid_price(str(price_value)):
                self.logger.warning(f"价格 {price_value} 不在合理范围内，跳过")
                return None
            
            return {
                'name': product_name,
                'price_value': price_value,
                'price_original': price_text,
                'unit': unit,
                'location': location,
                'date': date_text,
                'timestamp': datetime.now().timestamp(),
                'crawl_timestamp': datetime.now().isoformat(),
                'source_url': source_url,
                'spider_name': self.name,
            }
        except Exception as e:
            self.logger.error(f"创建详细价格项时出错: {e}")
            return None

class CnhnbSchedulerSpider(CnhnbRealtimeSpider):
    """调度器爬虫 - 用于启动和管理实时爬虫"""
    name = 'cnhnb_scheduler'
    redis_key = 'cnhnb_scheduler:start_urls'
    
    def start_requests(self):
        """启动调度器"""
        self.logger.info("调度器爬虫启动")
        # 这里可以添加调度逻辑
        pass 