import requests
import json
import time
from datetime import datetime
import os
import random
import sys
import logging
import subprocess
import argparse
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from kafka import KafkaProducer
# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("xinfadi_fetch.log", encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger("xinfadi_fetcher")


# 创建一个带有重试机制的会话
def create_session(retries=3, backoff_factor=0.5, status_forcelist=(500, 502, 503, 504)):
    session = requests.Session()
    retry = Retry(
        total=retries,
        read=retries,
        connect=retries,
        backoff_factor=backoff_factor,
        status_forcelist=status_forcelist,
    )
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    return session


def generate_user_agent():
    """生成随机User-Agent"""
    user_agents = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5 Safari/605.1.15',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.67'
    ]
    return random.choice(user_agents)


def fetch_price_data(use_proxy=False, max_retries=3):
    """获取新发地市场价格数据

    Args:
        use_proxy (bool): 是否使用代理
        max_retries (int): 最大重试次数
    """
    url = 'http://www.xinfadi.com.cn/getPriceData.html'

    # 创建带有重试机制的会话
    session = create_session(retries=max_retries)

    # 设置代理（如果需要）
    proxies = None
    if use_proxy:
        # 这里需要替换为实际的代理地址
        proxies = {
            'http': 'http://127.0.0.1:7890',  # 示例代理地址
            'https': 'http://127.0.0.1:7890',
        }
        logger.info(f"使用代理: {proxies}")

    # 设置请求头
    headers = {
        'User-Agent': generate_user_agent(),
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'X-Requested-With': 'XMLHttpRequest',
        'Origin': 'http://www.xinfadi.com.cn',
        'Referer': 'http://www.xinfadi.com.cn/priceDetail.html',
        'Connection': 'keep-alive',
        'Cache-Control': 'no-cache'
    }

    # 获取当前日期
    today = datetime.now().strftime("%Y-%m-%d")

    # 准备测试数据集，尝试不同的参数组合
    test_data_sets = [
        {
            'limit': '20',
            'current': '1',
            'pubDateStartTime': today,
            'pubDateEndTime': today,
            'prodPcatid': '',
            'prodCatid': '',
            'prodName': ''
        },
        {
            'limit': '20',
            'current': '1',
            'pubDateStartTime': '',  # 尝试不设置日期
            'pubDateEndTime': '',
            'prodPcatid': '',
            'prodCatid': '',
            'prodName': ''
        },
        {
            'limit': '20',
            'current': '1',
            'pubDateStartTime': '',
            'pubDateEndTime': '',
            'prodPcatid': '1186',  # 尝试设置蔬菜类别
            'prodCatid': '',
            'prodName': ''
        }
    ]

    all_products = []
    successful_data_set = None
    output_file = None

    # 尝试不同的数据集
    for data_set_index, data in enumerate(test_data_sets):
        logger.info(f"尝试数据集 {data_set_index + 1}/{len(test_data_sets)}: {data}")

        try:
            # 发送请求
            response = session.post(
                url,
                headers=headers,
                data=data,
                proxies=proxies,
                timeout=20
            )

            # 检查响应状态
            if response.status_code != 200:
                logger.warning(f"请求失败，状态码: {response.status_code}")
                logger.debug(f"响应内容: {response.text[:200]}...")
                continue

            # 解析JSON响应
            try:
                json_data = response.json()

                # 检查响应格式
                if not isinstance(json_data, dict):
                    logger.warning(f"响应格式错误，预期字典类型，实际为: {type(json_data)}")
                    continue

                # 检查是否包含列表数据
                if 'list' not in json_data:
                    logger.warning(f"响应中没有'list'字段")
                    logger.debug(f"响应内容: {json_data}")
                    continue

                products = json_data.get('list', [])
                logger.info(f"获取到 {len(products)} 条产品数据")

                if products:
                    # 找到有效的数据集
                    successful_data_set = data
                    all_products.extend(products)
                    break

            except json.JSONDecodeError as e:
                logger.warning(f"JSON解析错误: {e}")
                logger.debug(f"响应内容: {response.text[:200]}...")
                continue

        except requests.exceptions.RequestException as e:
            logger.warning(f"请求异常: {e}")
            continue

    # 如果找到有效的数据集，继续获取更多页面
    if successful_data_set and all_products:
        logger.info(f"找到有效的数据集: {successful_data_set}")

        current_page = 2  # 从第2页开始
        max_pages = 50  # 限制最大页数

        while current_page <= max_pages:
            # 更新当前页码
            successful_data_set['current'] = str(current_page)

            logger.info(f"正在获取第 {current_page} 页数据...")

            # 添加随机延迟，避免请求过快
            time.sleep(random.uniform(1.5, 3.0))

            try:
                # 发送请求
                response = session.post(
                    url,
                    headers=headers,
                    data=successful_data_set,
                    proxies=proxies,
                    timeout=20
                )

                # 检查响应状态
                if response.status_code != 200:
                    logger.warning(f"请求失败，状态码: {response.status_code}")
                    break

                # 解析JSON响应
                try:
                    json_data = response.json()

                    # 检查是否包含列表数据
                    if 'list' not in json_data:
                        logger.warning(f"响应中没有'list'字段")
                        break

                    products = json_data.get('list', [])
                    logger.info(f"获取到 {len(products)} 条产品数据")

                    # 添加到总列表
                    all_products.extend(products)

                    # 检查是否有更多页
                    total = json_data.get('count', 0)
                    if total <= current_page * int(successful_data_set['limit']):
                        logger.info(f"已获取所有数据，总计 {total} 条")
                        break

                    current_page += 1

                except json.JSONDecodeError as e:
                    logger.warning(f"JSON解析错误: {e}")
                    break

            except requests.exceptions.RequestException as e:
                logger.warning(f"请求异常: {e}")
                break

    # 保存数据
    if all_products:
        try:
            # 确保输出目录存在
            output_dir = os.path.join(os.path.dirname(__file__), 'output')
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            # 保存所有产品数据
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            output_file = os.path.join(output_dir, f'prices_{timestamp}.json')

            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump({'list': all_products, 'count': len(all_products)}, f, ensure_ascii=False, indent=2)

            logger.info(f'数据已保存到: {output_file}')
            logger.info(f'共获取 {len(all_products)} 条产品数据')

        except Exception as e:
            logger.error(f"保存数据时出错: {e}")
            import traceback
            logger.error(traceback.format_exc())
            output_file = None
    else:
        logger.warning("未获取到任何产品数据")

    return output_file, len(all_products)


# 生成示例数据（当无法从API获取数据时使用）
def generate_sample_data():
    """生成示例数据"""
    logger.info("生成示例数据")

    # 示例蔬菜数据
    sample_products = [
        {
            "prodName": "大白菜",
            "lowPrice": "1.5",
            "avgPrice": "1.8",
            "highPrice": "2.0",
            "specInfo": "白菜",
            "place": "河北",
            "unitInfo": "斤",
            "pubDate": datetime.now().strftime("%Y-%m-%d")
        },
        {
            "prodName": "土豆",
            "lowPrice": "2.0",
            "avgPrice": "2.5",
            "highPrice": "3.0",
            "specInfo": "马铃薯",
            "place": "内蒙古",
            "unitInfo": "斤",
            "pubDate": datetime.now().strftime("%Y-%m-%d")
        },
        {
            "prodName": "西红柿",
            "lowPrice": "3.5",
            "avgPrice": "4.0",
            "highPrice": "4.5",
            "specInfo": "番茄",
            "place": "山东",
            "unitInfo": "斤",
            "pubDate": datetime.now().strftime("%Y-%m-%d")
        },
        {
            "prodName": "黄瓜",
            "lowPrice": "3.0",
            "avgPrice": "3.5",
            "highPrice": "4.0",
            "specInfo": "普通黄瓜",
            "place": "河南",
            "unitInfo": "斤",
            "pubDate": datetime.now().strftime("%Y-%m-%d")
        },
        {
            "prodName": "茄子",
            "lowPrice": "4.0",
            "avgPrice": "4.5",
            "highPrice": "5.0",
            "specInfo": "紫皮茄子",
            "place": "河北",
            "unitInfo": "斤",
            "pubDate": datetime.now().strftime("%Y-%m-%d")
        }
    ]

    output_file = None
    try:
        # 创建输出目录
        output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "output")
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        # 生成带时间戳的文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        output_file = os.path.join(output_dir, f"xinfadi_sample_{timestamp}.json")

        # 写入JSON文件
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump({'list': sample_products, 'count': len(sample_products)}, f, ensure_ascii=False, indent=2)

        logger.info(f"已保存{len(sample_products)}条示例数据到文件: {output_file}")
    except Exception as e:
        logger.error(f"保存示例数据到文件失败: {str(e)}")
        output_file = None

    return output_file, len(sample_products)


def send_to_kafka(json_file):
    """调用kafka_producer.py发送数据到Kafka"""
    try:
        script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'kafka_producer.py')
        cmd = [sys.executable, script_path, json_file]

        logger.info(f"执行命令: {' '.join(cmd)}")
        process = subprocess.run(cmd, capture_output=True, text=True)

        if process.returncode == 0:
            logger.info("数据已成功发送到Kafka")
            return True
        else:
            logger.error(f"发送数据到Kafka失败: {process.stderr}")
            return False
    except Exception as e:
        logger.error(f"调用kafka_producer.py失败: {str(e)}")
        return False


def send_direct_to_kafka(json_file):
    """直接从JSON文件发送数据到Kafka"""
    try:
        # 读取JSON数据
        with open(json_file, 'r', encoding='utf-8') as f:
            data = json.load(f)

        # 确定数据格式
        records = data.get('list', []) if isinstance(data, dict) else data

        # 创建生产者
        producer = KafkaProducer(
            bootstrap_servers=['192.168.93.201:9092'],
            value_serializer=lambda v: json.dumps(v, ensure_ascii=False).encode('utf-8'),
            acks='all',
            retries=3,
            request_timeout_ms=15000
        )

        success_count = 0
        for record in records:
            try:
                # 转换为文档要求的格式
                kafka_record = {
                    'market': '北京新发地',
                    'product': record.get('prodName', ''),
                    'low_price': float(record.get('lowPrice', 0)),
                    'avg_price': float(record.get('avgPrice', 0)),
                    'high_price': float(record.get('highPrice', 0)),
                    'specification': record.get('specInfo', ''),
                    'origin': record.get('place', ''),
                    'unit': record.get('unitInfo', ''),
                    'date': record.get('pubDate', datetime.now().strftime("%Y-%m-%d")),
                    'crawl_time': datetime.now().isoformat()
                }
                producer.send('raw_prices', kafka_record)
                success_count += 1
            except Exception as e:
                logger.error(f"转换记录失败: {str(e)}")

        producer.flush()
        producer.close()
        logger.info(f"直接发送成功: {success_count}/{len(records)} 条数据")
        return True

    except Exception as e:
        logger.error(f"直接发送到Kafka失败: {str(e)}")
        return False


def main():
    """主函数"""
    try:
        # 解析命令行参数
        parser = argparse.ArgumentParser(description='新发地价格数据抓取')
        parser.add_argument('--proxy', action='store_true', help='使用代理')
        parser.add_argument('--sample', action='store_true', help='生成示例数据')
        parser.add_argument('--kafka', action='store_true', help='发送数据到Kafka')
        args = parser.parse_args()

        output_file = None
        count = 0

        if args.sample:
            logger.info("生成示例数据模式")
            output_file, count = generate_sample_data()
        else:
            logger.info("开始抓取新发地价格数据")
            output_file, count = fetch_price_data(use_proxy=args.proxy)

            if not output_file:
                logger.warning("API获取失败，生成示例数据作为备用")
                output_file, count = generate_sample_data()
            else:
                # 获取文件中的数据条数
                try:
                    with open(output_file, 'r', encoding='utf-8') as f:
                        data = json.load(f)
                    # 处理两种可能的JSON格式：数组或包含'list'字段的对象
                    if isinstance(data, list):
                        count = len(data)
                    elif isinstance(data, dict) and 'list' in data:
                        count = len(data['list'])
                    elif isinstance(data, dict) and 'count' in data:
                        count = data['count']
                    else:
                        count = 0
                        logger.warning(f"无法确定数据条数，文件格式不符合预期: {output_file}")
                except Exception as e:
                    logger.error(f"读取数据文件失败: {str(e)}")
                    count = 0

        # 如果指定了--kafka参数，则发送数据到Kafka
        if args.kafka and output_file:
            logger.info(f"准备将数据发送到Kafka: {output_file}")

            # 尝试两种发送方式
            if not send_to_kafka(output_file):
                logger.warning("调用kafka_producer.py失败，尝试直接发送")
                send_direct_to_kafka(output_file)

        logger.info(f"程序执行完成，共获取 {count} 条数据，保存在 {output_file}")

    except KeyboardInterrupt:
        logger.info("程序被用户中断")
    except Exception as e:
        logger.error(f"程序执行出错: {e}")
        import traceback
        logger.error(traceback.format_exc())


# 添加一个测试Kafka连接的函数
def test_kafka_connection():
    """测试Kafka连接"""
    try:
        from kafka import KafkaProducer
        producer = KafkaProducer(
            bootstrap_servers=['192.168.93.201:9092'],
            value_serializer=lambda v: json.dumps(v, ensure_ascii=False).encode('utf-8'),
            acks='all',
            retries=3,
            request_timeout_ms=15000
        )

        # 发送一条测试消息
        test_message = {
            'test': True,
            'timestamp': datetime.now().isoformat(),
            'message': 'Kafka连接测试'
        }

        future = producer.send('raw_prices', test_message)
        result = future.get(timeout=10)

        producer.flush()
        producer.close()

        logger.info("Kafka连接测试成功")
        logger.info(f"消息发送结果: {result}")
        return True

    except Exception as e:
        logger.error(f"Kafka连接测试失败: {str(e)}")
        return False


# 改进的直接发送到Kafka函数
def send_direct_to_kafka(json_file):
    """直接从JSON文件发送数据到Kafka"""
    try:
        # 读取JSON数据
        with open(json_file, 'r', encoding='utf-8') as f:
            data = json.load(f)

        # 确定数据格式
        records = data.get('list', []) if isinstance(data, dict) else data

        if not records:
            logger.warning("没有找到要发送的数据记录")
            return False

        # 创建生产者
        producer = KafkaProducer(
            bootstrap_servers=['192.168.93.201:9092'],
            value_serializer=lambda v: json.dumps(v, ensure_ascii=False).encode('utf-8'),
            acks='all',
            retries=3,
            request_timeout_ms=15000,
            # 添加更多配置
            batch_size=16384,
            linger_ms=10,
            buffer_memory=33554432
        )

        success_count = 0
        failed_count = 0

        for i, record in enumerate(records):
            try:
                # 转换为文档要求的格式
                kafka_record = {
                    'market': '北京新发地',
                    'product': record.get('prodName', ''),
                    'low_price': float(record.get('lowPrice', 0)),
                    'avg_price': float(record.get('avgPrice', 0)),
                    'high_price': float(record.get('highPrice', 0)),
                    'specification': record.get('specInfo', ''),
                    'origin': record.get('place', ''),
                    'unit': record.get('unitInfo', ''),
                    'date': record.get('pubDate', datetime.now().strftime("%Y-%m-%d")),
                    'crawl_time': datetime.now().isoformat(),
                    'record_id': i + 1  # 添加记录ID
                }

                # 发送消息并获取结果
                future = producer.send('raw_prices', kafka_record)
                # 等待发送完成
                result = future.get(timeout=10)
                success_count += 1

                # 每发送10条记录打印一次日志
                if success_count % 10 == 0:
                    logger.info(f"已发送 {success_count} 条数据")

            except Exception as e:
                failed_count += 1
                logger.error(f"发送第 {i + 1} 条记录失败: {str(e)}")
                logger.error(f"失败的记录: {record}")

        # 确保所有消息都被发送
        producer.flush()
        producer.close()

        logger.info(f"数据发送完成: 成功 {success_count} 条, 失败 {failed_count} 条, 总计 {len(records)} 条")
        return success_count > 0

    except Exception as e:
        logger.error(f"直接发送到Kafka失败: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        return False


if __name__ == '__main__':
    # 添加测试Kafka连接的选项
    if len(sys.argv) > 1 and sys.argv[1] == '--test-kafka':
        test_kafka_connection()
    else:
        main()