import urllib.parse
import requests
import json
import pandas as pd
from datetime import datetime, timedelta
import time
import threading
import os
from queue import Queue
import logging
import sys
from kafka import KafkaProducer
from collections import defaultdict


# 配置日志
def setup_logging_with_utf8():
    """配置UTF-8编码的日志系统"""

    # 清除现有的日志配置
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)

    # 创建UTF-8编码的日志配置
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            # 文件处理器 - 强制UTF-8编码
            logging.FileHandler(
                'xinfadi_realtime.log',
                mode='a',
                encoding='utf-8'  # 关键：指定UTF-8编码
            ),
            # 控制台处理器 - UTF-8编码
            logging.StreamHandler(
                stream=sys.stdout
            )
        ]
    )

    # Windows系统额外设置
    if sys.platform.startswith('win'):
        # 设置控制台编码
        os.system('chcp 65001')  # 设置为UTF-8代码页


# 在你的爬虫代码开头调用
setup_logging_with_utf8()


class XinfadiRealTimeCrawler:
    def __init__(self, interval_seconds=1800):  # 默认30分钟爬取一次
        self.interval_seconds = interval_seconds
        self.url = 'http://www.xinfadi.com.cn/getPriceData.html'
        self.headers = {
            'Accept': '*/*',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Host': 'www.xinfadi.com.cn',
            'Origin': 'http://www.xinfadi.com.cn',
            'Referer': 'http://www.xinfadi.com.cn/priceDetail.html',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'X-Requested-With': 'XMLHttpRequest',
        }

        self.products = ['大白菜', '小白菜', '萝卜', '土豆', '番茄', '黄瓜', '茄子', '辣椒',
                         '豆角', '苹果', '梨', '桃子', '葡萄', '香蕉', '西瓜', '胡萝卜',
                         '菠菜', '韭菜', '芹菜', '生菜', '菜花', '豆芽', '蒜苗','油菜','茴香']

        self.data_queue = Queue()
        self.is_running = False
        self.crawl_thread = None

        # 创建数据存储目录
        self.data_dir = 'realtime_data'
        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir)

        # 初始化Kafka发送统计
        self.kafka_stats = {
            'total_sent': 0,
            'total_failed': 0,
            'last_send_time': None,
            'send_errors': []
        }

        # 设置Kafka生产者
        self.setup_kafka_producer()

    def setup_kafka_producer(self):
        """设置Kafka生产者"""
        try:
            self.kafka_producer = KafkaProducer(
                bootstrap_servers=['192.168.79.138:9092'],  # 使用虚拟机IP
                value_serializer=lambda x: json.dumps(x, ensure_ascii=False).encode('utf-8'),
                key_serializer=lambda x: x if isinstance(x, bytes) else x.encode('utf-8'),
                # 性能和可靠性配置
                api_version=(0, 10, 1),
                retries=3,
                batch_size=16384,  # 批量发送大小
                linger_ms=10,  # 等待时间，提高吞吐量
                buffer_memory=33554432,  # 缓冲区大小
                max_block_ms=60000,  # 最大阻塞时间
                request_timeout_ms=30000,
                acks='all'  # 等待所有副本确认
            )

            self.kafka_topic = 'agriculture_data'
            self.summary_topic = 'agriculture_summary'

            # 测试连接
            self.test_kafka_connection()

        except Exception as e:
            logging.error(f"Kafka生产者初始化失败: {e}")
            self.kafka_producer = None

    def test_kafka_connection(self):
        """测试Kafka连接"""
        try:
            # 发送测试消息
            test_message = {
                'event_type': 'connection_test',
                'timestamp': datetime.now().isoformat(),
                'message': 'Kafka连接测试',
                'crawler_version': '2.0'
            }

            future = self.kafka_producer.send(self.kafka_topic, value=test_message)
            result = future.get(timeout=10)

            logging.info(f"Kafka连接测试成功: {result}")
            return True

        except Exception as e:
            logging.error(f"Kafka连接测试失败: {e}")
            return False

    def fetch_single_product(self, product):
        """爬取单个产品的数据"""
        end_date = datetime.now()
        start_date = end_date - timedelta(days=180)  # 爬取最近30天数据

        data = {
            'limit': '1000',
            'current': '1',
            'pubDateStartTime': start_date.strftime('%Y/%m/%d'),
            'pubDateEndTime': end_date.strftime('%Y/%m/%d'),
            'prodPcatid': '',
            'prodCatid': '',
            'prodName': product,
        }

        try:
            response = requests.post(
                self.url,
                headers=self.headers,
                data=urllib.parse.urlencode(data),
                timeout=10
            )

            if response.status_code == 200:
                try:
                    json_data = response.json()

                    if 'list' in json_data and json_data['list']:
                        product_list = json_data['list']

                        if isinstance(product_list, dict) and 'data' in product_list:
                            items = product_list['data']
                        elif isinstance(product_list, list):
                            items = product_list
                        else:
                            items = []

                        product_data = []
                        for item in items:
                            record = {
                                'product_name': item.get('prodName', product),
                                'low_price': float(item.get('lowPrice', 0)),
                                'high_price': float(item.get('highPrice', 0)),
                                'avg_price': float(item.get('avgPrice', 0)),
                                'place': item.get('place', ''),
                                'spec': item.get('specInfo', ''),
                                'unit': item.get('unitInfo', '元/公斤'),
                                'pub_date': item.get('pubDate', ''),
                                'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                                'timestamp': datetime.now().timestamp()
                            }
                            product_data.append(record)

                        if product_data:
                            logging.info(f"{product}: 获取到{len(product_data)}条数据")
                            return product_data
                        else:
                            logging.warning(f"{product}: 未获取到数据")
                            return []

                except json.JSONDecodeError as e:
                    logging.error(f"{product}: JSON解析失败 - {e}")
                    return []
            else:
                logging.error(f"{product}: 请求失败，状态码: {response.status_code}")
                return []

        except Exception as e:
            logging.error(f"{product}: 请求出错 - {e}")
            return []

    def send_to_kafka(self, data):
        """发送产品数据到Kafka"""
        if not hasattr(self, 'kafka_producer') or self.kafka_producer is None:
            logging.warning("Kafka生产者未初始化，跳过发送")
            return

        try:
            sent_count = 0
            failed_count = 0

            for record in data:
                try:
                    # 丰富数据
                    enriched_record = record.copy()
                    enriched_record.update({
                        'kafka_send_time': datetime.now().isoformat(),
                        'data_source': 'xinfadi_realtime_crawler',
                        'batch_id': datetime.now().strftime('%Y%m%d_%H%M%S'),
                        'record_id': f"{record['product_name']}_{record['timestamp']}"
                    })

                    # 发送到Kafka
                    future = self.kafka_producer.send(
                        self.kafka_topic,
                        value=enriched_record,
                        key=record['product_name'].encode('utf-8')
                    )

                    # 异步处理结果
                    future.add_callback(self.on_send_success, record['product_name'])
                    future.add_errback(self.on_send_error, record['product_name'])

                    sent_count += 1

                except Exception as record_error:
                    failed_count += 1
                    logging.error(f"记录发送失败: {record['product_name']} - {record_error}")

            # 强制发送缓冲区中的消息
            self.kafka_producer.flush()

            # 更新统计
            self.kafka_stats['total_sent'] += sent_count
            self.kafka_stats['total_failed'] += failed_count
            self.kafka_stats['last_send_time'] = datetime.now().isoformat()

            logging.info(f"Kafka发送完成: 成功{sent_count}, 失败{failed_count}")

        except Exception as e:
            logging.error(f"Kafka批量发送异常: {e}")
            self.kafka_stats['send_errors'].append(str(e))

    def send_summary_to_kafka(self, all_data, successful_products):
        """发送汇总统计信息到Kafka"""
        try:
            # 计算汇总统计
            summary_stats = {
                'event_type': 'crawl_summary',
                'timestamp': datetime.now().isoformat(),
                'total_records': len(all_data),
                'successful_products': successful_products,
                'total_products': len(self.products),
                'success_rate': (successful_products / len(self.products)) * 100,
                'avg_price_overall': sum(record['avg_price'] for record in all_data) / len(all_data) if all_data else 0,
                'product_stats': {}
            }

            # 按产品统计
            product_stats = defaultdict(list)
            for record in all_data:
                product_stats[record['product_name']].append(record['avg_price'])

            for product, prices in product_stats.items():
                summary_stats['product_stats'][product] = {
                    'count': len(prices),
                    'avg_price': sum(prices) / len(prices),
                    'min_price': min(prices),
                    'max_price': max(prices)
                }

            # 发送汇总统计到专门的主题
            future = self.kafka_producer.send(
                self.summary_topic,
                value=summary_stats,
                key='crawl_summary'.encode('utf-8')
            )

            future.get(timeout=10)
            logging.info("汇总统计已发送到Kafka")

        except Exception as e:
            logging.error(f"发送汇总统计失败: {e}")

    def on_send_success(self, product_name, record_metadata):
        """发送成功回调"""
        logging.debug(
            f"✓ {product_name} 发送成功: partition={record_metadata.partition}, offset={record_metadata.offset}")

    def on_send_error(self, product_name, exception):
        """发送失败回调"""
        logging.error(f"✗ {product_name} 发送失败: {exception}")

    def fetch_all_data(self):
        """爬取所有产品数据并发送到Kafka"""
        all_data = []
        successful_products = 0

        logging.info("开始新一轮数据爬取...")

        for product in self.products:
            product_data = self.fetch_single_product(product)
            if product_data:
                all_data.extend(product_data)
                successful_products += 1

                # 关键修改：实时发送每个产品的数据到Kafka
                self.send_to_kafka(product_data)

            # 产品间延时
            time.sleep(1)

        if all_data:
            # 保存到队列
            self.data_queue.put(all_data)

            # 保存到文件
            df = pd.DataFrame(all_data)
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = os.path.join(self.data_dir, f'xinfadi_realtime_{timestamp}.csv')
            df.to_csv(filename, index=False, encoding='utf-8-sig')

            logging.info(f"数据保存到 {filename}")
            logging.info(f"本轮爬取完成: {successful_products}/{len(self.products)} 个产品成功，共{len(all_data)}条数据")

            # 保存汇总数据
            self.save_summary_data(all_data)

            # 发送汇总统计到Kafka
            self.send_summary_to_kafka(all_data, successful_products)

        else:
            logging.warning("本轮未获取到任何数据")

    def save_summary_data(self, new_data):
        """保存汇总数据到总文件"""
        summary_file = os.path.join(self.data_dir, 'xinfadi_summary.csv')

        # 如果汇总文件存在，读取并合并
        if os.path.exists(summary_file):
            try:
                existing_df = pd.read_csv(summary_file, encoding='utf-8-sig')
                new_df = pd.DataFrame(new_data)
                combined_df = pd.concat([existing_df, new_df], ignore_index=True)

                # 去重（基于产品名、发布日期、价格）
                combined_df = combined_df.drop_duplicates(
                    subset=['product_name', 'pub_date', 'avg_price', 'place'],
                    keep='last'
                )

                # 保留最近7天的数据
                seven_days_ago = datetime.now() - timedelta(days=7)
                combined_df['crawl_datetime'] = pd.to_datetime(combined_df['crawl_time'])
                combined_df = combined_df[combined_df['crawl_datetime'] >= seven_days_ago]

                # 删除临时列
                combined_df = combined_df.drop('crawl_datetime', axis=1)

            except Exception as e:
                logging.error(f"读取汇总文件失败: {e}")
                combined_df = pd.DataFrame(new_data)
        else:
            combined_df = pd.DataFrame(new_data)

        # 保存汇总文件
        combined_df.to_csv(summary_file, index=False, encoding='utf-8-sig')
        logging.info(f"汇总数据已更新，共{len(combined_df)}条记录")

    def run_continuously(self):
        """持续运行爬取任务"""
        self.is_running = True

        while self.is_running:
            try:
                start_time = time.time()

                # 执行爬取
                self.fetch_all_data()

                # 计算下次爬取时间
                elapsed_time = time.time() - start_time
                sleep_time = max(0, self.interval_seconds - elapsed_time)

                if self.is_running:
                    logging.info(f"等待{sleep_time:.0f}秒后进行下一次爬取...")
                    time.sleep(sleep_time)

            except KeyboardInterrupt:
                logging.info("收到中断信号，停止爬取...")
                break
            except Exception as e:
                logging.error(f"爬取过程中出现错误: {e}")
                time.sleep(60)  # 出错后等待1分钟再继续

    def start_realtime_crawling(self):
        """启动实时爬取"""
        if self.crawl_thread and self.crawl_thread.is_alive():
            logging.warning("实时爬取已在运行中")
            return

        logging.info(f"启动实时爬取，间隔{self.interval_seconds}秒")
        self.crawl_thread = threading.Thread(target=self.run_continuously, daemon=False)
        self.crawl_thread.start()

        return self.crawl_thread

    def stop_realtime_crawling(self):
        """停止实时爬取"""
        self.is_running = False

        if self.crawl_thread:
            self.crawl_thread.join(timeout=10)

        # 关闭Kafka生产者
        if hasattr(self, 'kafka_producer') and self.kafka_producer is not None:
            try:
                self.kafka_producer.flush()  # 确保所有消息发送完成
                self.kafka_producer.close()
                logging.info("Kafka生产者已关闭")
            except Exception as e:
                logging.error(f"关闭Kafka生产者失败: {e}")

        # 输出最终统计
        stats = self.get_kafka_stats()
        logging.info(f"Kafka发送统计: 总发送{stats['total_sent']}, 总失败{stats['total_failed']}")

        logging.info("实时爬取已停止")

    def get_kafka_stats(self):
        """获取Kafka发送统计"""
        return self.kafka_stats.copy()

    def get_latest_data(self, max_items=100):
        """获取最新的数据"""
        latest_data = []

        # 从队列中获取数据
        while not self.data_queue.empty() and len(latest_data) < max_items:
            batch_data = self.data_queue.get()
            latest_data.extend(batch_data)

        return latest_data[-max_items:] if latest_data else []

    def get_price_trend(self, product_name, days=7):
        """获取指定产品的价格趋势"""
        summary_file = os.path.join(self.data_dir, 'xinfadi_summary.csv')

        if not os.path.exists(summary_file):
            return None

        try:
            df = pd.read_csv(summary_file, encoding='utf-8-sig')

            # 筛选指定产品
            product_df = df[df['product_name'] == product_name].copy()

            if product_df.empty:
                return None

            # 转换时间格式
            product_df['crawl_datetime'] = pd.to_datetime(product_df['crawl_time'])

            # 筛选最近几天的数据
            cutoff_date = datetime.now() - timedelta(days=days)
            product_df = product_df[product_df['crawl_datetime'] >= cutoff_date]

            # 按时间排序
            product_df = product_df.sort_values('crawl_datetime')

            return product_df[['crawl_datetime', 'avg_price', 'place', 'spec']].to_dict('records')

        except Exception as e:
            logging.error(f"获取价格趋势失败: {e}")
            return None


# 使用示例
def main():
    # 创建爬虫实例，每5分钟爬取一次（提高实时性）
    crawler = XinfadiRealTimeCrawler(interval_seconds=300)

    try:
        # 启动实时爬取
        thread = crawler.start_realtime_crawling()

        print("实时爬取已启动，按Ctrl+C停止...")
        print(f"数据将保存到 {crawler.data_dir} 目录")
        print("数据将实时发送到Kafka主题: agriculture_data")

        # 定期输出统计信息
        last_stats_time = time.time()

        while True:
            time.sleep(1)

            # 每60秒输出一次统计
            if time.time() - last_stats_time > 60:
                stats = crawler.get_kafka_stats()
                print(f"Kafka统计: 已发送{stats['total_sent']}条, 失败{stats['total_failed']}条")
                last_stats_time = time.time()

    except KeyboardInterrupt:
        print("\n正在停止实时爬取...")
        crawler.stop_realtime_crawling()
        print("程序已退出")


if __name__ == "__main__":
    main()
            for record in all_data:
                product_stats[record['product_name']].append(record['avg_price'])

            for product, prices in product_stats.items():
                summary_stats['product_stats'][product] = {
                    'count': len(prices),
                    'avg_price': sum(prices) / len(prices),
                    'min_price': min(prices),
                    'max_price': max(prices)
                }

            # 发送汇总统计到专门的主题
            future = self.kafka_producer.send(
                self.summary_topic,
                value=summary_stats,
                key='crawl_summary'.encode('utf-8')
            )

            future.get(timeout=10)
            logging.info("汇总统计已发送到Kafka")

        except Exception as e:
            logging.error(f"发送汇总统计失败: {e}")

    def on_send_success(self, product_name, record_metadata):
        """发送成功回调"""
        logging.debug(
            f"✓ {product_name} 发送成功: partition={record_metadata.partition}, offset={record_metadata.offset}")

    def on_send_error(self, product_name, exception):
        """发送失败回调"""
        logging.error(f"✗ {product_name} 发送失败: {exception}")

    def fetch_all_data(self):
        """爬取所有产品数据并发送到Kafka"""
        all_data = []
        successful_products = 0

        logging.info("开始新一轮数据爬取...")

        for product in self.products:
            product_data = self.fetch_single_product(product)
            if product_data:
                all_data.extend(product_data)
                successful_products += 1

                # 关键修改：实时发送每个产品的数据到Kafka
                self.send_to_kafka(product_data)

            # 产品间延时
            time.sleep(1)

        if all_data:
            # 保存到队列
            self.data_queue.put(all_data)

            # 保存到文件
            df = pd.DataFrame(all_data)
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = os.path.join(self.data_dir, f'xinfadi_realtime_{timestamp}.csv')
            df.to_csv(filename, index=False, encoding='utf-8-sig')

            logging.info(f"数据保存到 {filename}")
            logging.info(f"本轮爬取完成: {successful_products}/{len(self.products)} 个产品成功，共{len(all_data)}条数据")

            # 保存汇总数据
            self.save_summary_data(all_data)

            # 发送汇总统计到Kafka
            self.send_summary_to_kafka(all_data, successful_products)

        else:
            logging.warning("本轮未获取到任何数据")

    def save_summary_data(self, new_data):
        """保存汇总数据到总文件"""
        summary_file = os.path.join(self.data_dir, 'xinfadi_summary.csv')

        # 如果汇总文件存在，读取并合并
        if os.path.exists(summary_file):
            try:
                existing_df = pd.read_csv(summary_file, encoding='utf-8-sig')
                new_df = pd.DataFrame(new_data)
                combined_df = pd.concat([existing_df, new_df], ignore_index=True)

                # 去重（基于产品名、发布日期、价格）
                combined_df = combined_df.drop_duplicates(
                    subset=['product_name', 'pub_date', 'avg_price', 'place'],
                    keep='last'
                )

                # 保留最近7天的数据
                seven_days_ago = datetime.now() - timedelta(days=7)
                combined_df['crawl_datetime'] = pd.to_datetime(combined_df['crawl_time'])
                combined_df = combined_df[combined_df['crawl_datetime'] >= seven_days_ago]

                # 删除临时列
                combined_df = combined_df.drop('crawl_datetime', axis=1)

            except Exception as e:
                logging.error(f"读取汇总文件失败: {e}")
                combined_df = pd.DataFrame(new_data)
        else:
            combined_df = pd.DataFrame(new_data)

        # 保存汇总文件
        combined_df.to_csv(summary_file, index=False, encoding='utf-8-sig')
        logging.info(f"汇总数据已更新，共{len(combined_df)}条记录")

    def run_continuously(self):
        """持续运行爬取任务"""
        self.is_running = True

        while self.is_running:
            try:
                start_time = time.time()

                # 执行爬取
                self.fetch_all_data()

