import requests
import random
import time
import re
import json
from datetime import datetime
from kafka import KafkaProducer
from parsel import Selector
import logging

# Kafka配置
KAFKA_BOOTSTRAP_SERVERS = '192.168.88.130:9092'  # 修改为你的Kafka地址
KAFKA_TOPIC = 'agri_price_data'

# 日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger('cnhnb_kafka_spider')

producer = KafkaProducer(
    bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS,
    value_serializer=lambda v: json.dumps(v, ensure_ascii=False).encode('utf-8')
)

# 采集目标URL列表
BASE_URLS = [
    # 原有通用行情页
    'https://www.cnhnb.com/hangqing/',
    'https://www.cnhnb.com/hangqing/jiage/',
    'https://www.cnhnb.com/hangqing/baojia/',
    'https://www.cnhnb.com/hangqing/yangcong/',
    'https://www.cnhnb.com/hangqing/niurou/',
    'https://www.cnhnb.com/hangqing/putao/',
    'https://www.cnhnb.com/hangqing/huanggua/',
    'https://www.cnhnb.com/hangqing/luobo/',
    'https://www.cnhnb.com/hangqing/xiangjiao/',
    'https://www.cnhnb.com/hangqing/caomei/',
    'https://www.cnhnb.com/hangqing/xigua/',
    'https://www.cnhnb.com/hangqing/mangguo/',
    'https://www.cnhnb.com/hangqing/boluo/',
    'https://www.cnhnb.com/hangqing/youzi/',
    'https://www.cnhnb.com/hangqing/baicai/',
    'https://www.cnhnb.com/hangqing/qiezi/',
    'https://www.cnhnb.com/hangqing/hongshu/',
    'https://www.cnhnb.com/hangqing/huasheng/',
    'https://www.cnhnb.com/hangqing/ganlan/',
    'https://www.cnhnb.com/hangqing/yangmei/',
    # 用户补充的品种URL
    'https://www.cnhnb.com/hangqing/mangguo/',
    'https://www.cnhnb.com/hangqing/lizhi/',
    'https://www.cnhnb.com/hangqing/longyan/',
    'https://www.cnhnb.com/hangqing/boluo/',
    'https://www.cnhnb.com/hangqing/hng/',
    'https://www.cnhnb.com/hangqing/xiangj/',
    'https://www.cnhnb.com/hangqing/liulian/',
    'https://www.cnhnb.com/hangqing/mugua/',
    'https://www.cnhnb.com/hangqing/lianwu/',
    'https://www.cnhnb.com/hangqing/shanzhuo/',
    'https://www.cnhnb.com/hangqing/yezi/',
    'https://www.cnhnb.com/hangqing/ganzhe/',
    'https://www.cnhnb.com/hangqing/blm/',
    'https://www.cnhnb.com/hangqing/nyg/',
    'https://www.cnhnb.com/hangqing/ganlan/',
    'https://www.cnhnb.com/hangqing/yangtao/',
    'https://www.cnhnb.com/hangqing/ningmeng/',
    'https://www.cnhnb.com/hangqing/ganju/',
    'https://www.cnhnb.com/hangqing/jinju/',
    'https://www.cnhnb.com/hangqing/jiuhuang/',
    'https://www.cnhnb.com/hangqing/suanmiao/',
    'https://www.cnhnb.com/hangqing/dasuan/',
    'https://www.cnhnb.com/hangqing/shengjiang/',
    'https://www.cnhnb.com/hangqing/xbh/',
    'https://www.cnhnb.com/hangqing/lusun/',
    'https://www.cnhnb.com/hangqing/luobo/',
    'https://www.cnhnb.com/hangqing/dongsun/',
    'https://www.cnhnb.com/hangqing/bc/',
    'https://www.cnhnb.com/hangqing/shengcai/',
    'https://www.cnhnb.com/hangqing/xianc/',
    'https://www.cnhnb.com/hangqing/mec/',
    'https://www.cnhnb.com/hangqing/xiangcai/',
    'https://www.cnhnb.com/hangqing/kxco/',
    'https://www.cnhnb.com/hangqing/maodou/',
    'https://www.cnhnb.com/hangqing/sijidou/',
    'https://www.cnhnb.com/hangqing/wandouo/',
    # 新增用户指定的页面
    'https://www.cnhnb.com/hangqing/mianfen/',
    'https://www.cnhnb.com/hangqing/yumm/',
    'https://www.cnhnb.com/hangqing/huasheng/',
    'https://www.cnhnb.com/hangqing/zhima/',
    'https://www.cnhnb.com/hangqing/dadou/',
    'https://www.cnhnb.com/hangqing/lvdou/',
    'https://www.cnhnb.com/hangqing/dami/',
    'https://www.cnhnb.com/hangqing/xiaomi/',
    'https://www.cnhnb.com/hangqing/damai/',
    'https://www.cnhnb.com/hangqing/heidou/',
]

HEADERS_LIST = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
]

proxies = {
    'http': 'http://192.168.88.1:7897',
    'https': 'http://192.168.88.1:7897'
}

def get_realistic_headers():
    user_agents = HEADERS_LIST
    return {
        'User-Agent': random.choice(user_agents),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'none',
        'Sec-Fetch-User': '?1',
        'Cache-Control': 'max-age=0',
        'DNT': '1',
        'Referer': 'https://www.cnhnb.com/',
    }

def robust_request(session, url, max_retries=5):
    for attempt in range(max_retries):
        headers = get_realistic_headers()
        try:
            resp = session.get(url, headers=headers, timeout=30, proxies=proxies)
            if resp.status_code == 200:
                return resp
            elif resp.status_code in [429, 503, 403]:
                wait = random.uniform(5, 15) * (attempt + 1)
                logger.warning(f"遇到状态码{resp.status_code}，第{attempt+1}次重试，等待{wait:.1f}秒... {url}")
                time.sleep(wait)
            else:
                logger.warning(f"请求失败: {url} 状态码: {resp.status_code}")
                return None
        except Exception as e:
            logger.warning(f"请求异常: {e}，第{attempt+1}次重试 {url}")
            time.sleep(random.uniform(5, 15) * (attempt + 1))
    logger.error(f"多次重试后仍失败: {url}")
    return None

def is_price_text(text):
    if not text:
        return False
    text = text.strip()
    price_keywords = ['元', '￥', '块', '价格', 'jiage', 'price']
    has_keyword = any(keyword in text.lower() for keyword in price_keywords)
    has_number = bool(re.search(r'\d+\.?\d*', text))
    return has_keyword and has_number

def is_valid_price(price_str):
    try:
        price = float(price_str)
        return 0.1 <= price <= 1000
    except (ValueError, TypeError):
        return False

def create_price_item_with_details(price_text, product_name, location, date_text, source_url):
    try:
        price_match = re.search(r'(\d+\.?\d*)\s*元[\/\s]*(斤|公斤|kg|g|吨|t)?', price_text)
        if price_match:
            price_value = float(price_match.group(1))
            unit = price_match.group(2) if len(price_match.groups()) > 1 else '斤'
        else:
            price_match = re.search(r'(\d+\.?\d*)\s*元', price_text)
            if price_match:
                price_value = float(price_match.group(1))
                unit = '斤'
            else:
                return None
        if not is_valid_price(str(price_value)):
            logger.warning(f"价格 {price_value} 不在合理范围内，跳过")
            return None
        return {
            'name': product_name,
            'price_value': price_value,
            'price_original': price_text,
            'unit': unit,
            'location': location,
            'date': date_text,
            'timestamp': datetime.now().timestamp(),
            'crawl_timestamp': datetime.now().isoformat(),
            'source_url': source_url,
            'spider_name': 'cnhnb_kafka_spider_full',
        }
    except Exception as e:
        logger.error(f"创建详细价格项时出错: {e}")
        return None

def extract_with_css_selectors(sel, url):
    items = []
    list_items = sel.css('li.market-list-item')
    logger.info(f"找到 {len(list_items)} 个产品列表项")
    for item in list_items:
        try:
            product_name = item.css('span.product::text').get()
            if not product_name:
                product_name = item.css('span.product').get()
                if product_name:
                    product_name = re.sub(r'<[^>]+>', '', product_name)
            price_text = item.css('span.price::text').get()
            if not price_text:
                price_text = item.css('span.price').get()
                if price_text:
                    price_text = re.sub(r'<[^>]+>', '', price_text)
            location = item.css('span.place::text').get()
            if not location:
                location = item.css('span.place').get()
                if location:
                    location = re.sub(r'<[^>]+>', '', location)
            date_text = item.css('span.time::text').get()
            if not date_text:
                date_text = item.css('span.time').get()
                if date_text:
                    date_text = re.sub(r'<[^>]+>', '', date_text)
            if not product_name or not price_text:
                continue
            product_name = product_name.strip()
            price_text = price_text.strip()
            location = location.strip() if location else '未知'
            date_text = date_text.strip() if date_text else datetime.now().strftime('%Y-%m-%d')
            price_item = create_price_item_with_details(
                price_text, product_name, location, date_text, url
            )
            if price_item:
                items.append(price_item)
                logger.info(f"提取到产品: {product_name}, 价格: {price_text}, 产地: {location}")
        except Exception as e:
            logger.error(f"处理列表项时出错: {e}")
            continue
    return items if items else []

def extract_with_regex(html, url):
    items = []
    price_patterns = [
        r'(\d+\.?\d*)\s*元[\/\s]*(斤|公斤|kg|g|吨|t)?',
        r'(\d+\.?\d*)\s*￥[\/\s]*(斤|公斤|kg|g|吨|t)?',
        r'(\d+\.?\d*)\s*块[\/\s]*(斤|公斤|kg|g|吨|t)?',
        r'价格[：:]\s*(\d+\.?\d*)\s*元',
        r'(\d+\.?\d*)\s*元\/斤',
        r'(\d+\.?\d*)\s*元\/公斤',
    ]
    for pattern in price_patterns:
        matches = re.findall(pattern, html, re.IGNORECASE)
        for match in matches:
            if isinstance(match, tuple):
                price_value = match[0]
                unit = match[1] if len(match) > 1 else '斤'
            else:
                price_value = match
                unit = '斤'
            if is_valid_price(price_value):
                item = create_price_item_with_details(f"{price_value}元/{unit}", '农产品', '未知', datetime.now().strftime('%Y-%m-%d'), url)
                if item:
                    items.append(item)
    return items

def extract_with_xpath(sel, url):
    items = []
    xpath_selectors = [
        "//span[contains(@class, 'price')]",
        "//div[contains(@class, 'price')]",
        "//span[contains(text(), '元')]",
        "//div[contains(text(), '元')]",
        "//*[contains(@class, 'price')]",
        "//*[contains(text(), '元')]",
    ]
    for xpath in xpath_selectors:
        elements = sel.xpath(xpath)
        for element in elements:
            text = element.get()
            if text and is_price_text(text):
                item = create_price_item_with_details(text, '农产品', '未知', datetime.now().strftime('%Y-%m-%d'), url)
                if item:
                    items.append(item)
    return items

def extract_with_text_analysis(sel, url):
    items = []
    text_elements = sel.xpath("//text()[contains(., '元')]")
    for element in text_elements:
        text = element.get().strip()
        if text and len(text) < 100:
            price_match = re.search(r'(\d+\.?\d*)\s*元', text)
            if price_match:
                price_value = price_match.group(1)
                if is_valid_price(price_value):
                    item = create_price_item_with_details(f"{price_value}元", '农产品', '未知', datetime.now().strftime('%Y-%m-%d'), url)
                    if item:
                        items.append(item)
    return items

def send_to_kafka(item):
    producer.send(KAFKA_TOPIC, item)
    producer.flush()
    logger.info(f"已发送到Kafka: {item}")

def crawl_once():
    session = requests.Session()
    for url in BASE_URLS:
        try:
            logger.info(f"正在采集: {url}")
            resp = robust_request(session, url)
            if resp and resp.status_code == 200:
                sel = Selector(resp.text)
                extracted_data = []
                # 策略1: CSS选择器
                price_data = extract_with_css_selectors(sel, url)
                if price_data:
                    extracted_data.extend(price_data)
                    logger.info(f"CSS选择器提取到 {len(price_data)} 条价格数据")
                # 策略2: 正则
                if not extracted_data:
                    price_data = extract_with_regex(resp.text, url)
                    if price_data:
                        extracted_data.extend(price_data)
                        logger.info(f"正则表达式提取到 {len(price_data)} 条价格数据")
                # 策略3: XPath
                if not extracted_data:
                    price_data = extract_with_xpath(sel, url)
                    if price_data:
                        extracted_data.extend(price_data)
                        logger.info(f"XPath提取到 {len(price_data)} 条价格数据")
                # 策略4: 文本分析
                if not extracted_data:
                    price_data = extract_with_text_analysis(sel, url)
                    if price_data:
                        extracted_data.extend(price_data)
                        logger.info(f"文本分析提取到 {len(price_data)} 条价格数据")
                # 发送到Kafka
                if extracted_data:
                    for data in extracted_data:
                        send_to_kafka(data)
                else:
                    logger.warning(f"未能从页面提取到价格数据: {url}")
            elif resp:
                logger.warning(f"请求失败: {url} 状态码: {resp.status_code}")
        except Exception as e:
            logger.error(f"采集 {url} 出错: {e}")
        time.sleep(random.uniform(3, 8))

def main():
    while True:
        crawl_once()
        logger.info("本轮所有页面采集完毕，等待30秒...")
        time.sleep(30)

if __name__ == "__main__":
    main() 