import urllib.parse
import requests
import json
import pandas as pd
from datetime import datetime, timedelta
import time
import os
import logging
import sys
from kafka import KafkaProducer
from collections import defaultdict


# 配置日志
def setup_logging_with_utf8():
    """配置UTF-8编码的日志系统"""
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)

    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(
                'xinfadi_pagination.log',
                mode='a',
                encoding='utf-8'
            ),
            logging.StreamHandler(stream=sys.stdout)
        ]
    )

    if sys.platform.startswith('win'):
        os.system('chcp 65001')


setup_logging_with_utf8()


class XinfadiPaginationCrawler:
    def __init__(self, max_pages=100, page_delay=2):
        """
        初始化分页爬虫
        :param max_pages: 最大爬取页数
        :param page_delay: 页面间延时（秒）
        """
        self.max_pages = max_pages
        self.page_delay = page_delay
        self.url = 'http://www.xinfadi.com.cn/getPriceData.html'

        self.headers = {
            'Accept': '*/*',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Host': 'www.xinfadi.com.cn',
            'Origin': 'http://www.xinfadi.com.cn',
            'Referer': 'http://www.xinfadi.com.cn/priceDetail.html',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'X-Requested-With': 'XMLHttpRequest',
        }

        # 创建数据存储目录
        self.data_dir = 'pagination_data'
        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir)

        # 初始化统计信息
        self.stats = {
            'total_pages': 0,
            'total_records': 0,
            'successful_pages': 0,
            'failed_pages': 0,
            'start_time': None,
            'end_time': None
        }

        # 设置Kafka生产者
        self.setup_kafka_producer()

    def setup_kafka_producer(self):
        """设置Kafka生产者"""
        try:
            self.kafka_producer = KafkaProducer(
                bootstrap_servers=['192.168.93.201:9092'],
                value_serializer=lambda x: json.dumps(x, ensure_ascii=False).encode('utf-8'),
                key_serializer=lambda x: x if isinstance(x, bytes) else x.encode('utf-8'),
                api_version=(0, 10, 1),
                retries=3,
                batch_size=16384,
                linger_ms=10,
                buffer_memory=33554432,
                max_block_ms=60000,
                request_timeout_ms=30000,
                acks='all'
            )

            self.kafka_topic = 'agriculture_pagination_data'
            logging.info("Kafka生产者初始化成功")

        except Exception as e:
            logging.error(f"Kafka生产者初始化失败: {e}")
            self.kafka_producer = None

    def fetch_page_data(self, page_number, product_name="", start_date="", end_date=""):
        """
        爬取指定页面的数据
        :param page_number: 页码
        :param product_name: 产品名称（可选）
        :param start_date: 开始日期
        :param end_date: 结束日期
        :return: 页面数据列表
        """
        # 设置默认日期范围（最近30天）
        if not end_date:
            end_date = datetime.now().strftime('%Y/%m/%d')
        if not start_date:
            start_date = (datetime.now() - timedelta(days=30)).strftime('%Y/%m/%d')

        data = {
            'limit': '20',  # 每页20条数据
            'current': str(page_number),
            'pubDateStartTime': start_date,
            'pubDateEndTime': end_date,
            'prodPcatid': '',
            'prodCatid': '',
            'prodName': product_name,
        }

        try:
            logging.info(f"正在爬取第{page_number}页数据...")

            response = requests.post(
                self.url,
                headers=self.headers,
                data=urllib.parse.urlencode(data),
                timeout=15
            )

            if response.status_code == 200:
                try:
                    json_data = response.json()

                    if 'list' in json_data and json_data['list']:
                        page_data = []
                        items = json_data['list']

                        # 处理不同的数据结构
                        if isinstance(items, dict) and 'data' in items:
                            items = items['data']
                        elif isinstance(items, list):
                            pass
                        else:
                            logging.warning(f"第{page_number}页: 未知的数据结构")
                            return []

                        for item in items:
                            record = {
                                'page_number': page_number,
                                'product_name': item.get('prodName', ''),
                                'low_price': float(item.get('lowPrice', 0)),
                                'high_price': float(item.get('highPrice', 0)),
                                'avg_price': float(item.get('avgPrice', 0)),
                                'place': item.get('place', ''),
                                'spec': item.get('specInfo', ''),
                                'unit': item.get('unitInfo', '元/公斤'),
                                'pub_date': item.get('pubDate', ''),
                                'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                                'timestamp': datetime.now().timestamp()
                            }
                            page_data.append(record)

                        logging.info(f"第{page_number}页: 成功获取{len(page_data)}条数据")
                        return page_data
                    else:
                        logging.warning(f"第{page_number}页: 无数据返回")
                        return []

                except json.JSONDecodeError as e:
                    logging.error(f"第{page_number}页: JSON解析失败 - {e}")
                    return []
            else:
                logging.error(f"第{page_number}页: 请求失败，状态码: {response.status_code}")
                return []

        except Exception as e:
            logging.error(f"第{page_number}页: 请求异常 - {e}")
            return []

    def send_page_to_kafka(self, page_data, page_number):
        """发送页面数据到Kafka"""
        if not self.kafka_producer or not page_data:
            return

        try:
            # 为每条记录添加批次信息
            batch_id = f"page_{page_number}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"

            for record in page_data:
                enriched_record = record.copy()
                enriched_record.update({
                    'batch_id': batch_id,
                    'kafka_send_time': datetime.now().isoformat(),
                    'data_source': 'xinfadi_pagination_crawler',
                    'record_id': f"page_{page_number}_{record['product_name']}_{record['timestamp']}"
                })

                # 发送到Kafka
                future = self.kafka_producer.send(
                    self.kafka_topic,
                    value=enriched_record,
                    key=f"page_{page_number}".encode('utf-8')
                )

            # 强制发送
            self.kafka_producer.flush()
            logging.info(f"第{page_number}页数据已发送到Kafka")

        except Exception as e:
            logging.error(f"第{page_number}页Kafka发送失败: {e}")

    def crawl_by_pagination(self, product_name="", start_page=1, end_page=None):
        """
        按分页爬取数据
        :param product_name: 指定产品名称（空则爬取所有）
        :param start_page: 起始页码
        :param end_page: 结束页码（None则使用max_pages）
        """
        if end_page is None:
            end_page = self.max_pages

        self.stats['start_time'] = datetime.now()
        all_data = []

        logging.info(f"开始分页爬取: 第{start_page}页到第{end_page}页")
        if product_name:
            logging.info(f"指定产品: {product_name}")

        for page_num in range(start_page, end_page + 1):
            try:
                # 爬取当前页面
                page_data = self.fetch_page_data(page_num, product_name)

                if page_data:
                    all_data.extend(page_data)
                    self.stats['successful_pages'] += 1
                    self.stats['total_records'] += len(page_data)

                    # 保存单页数据
                    self.save_page_data(page_data, page_num)

                    # 发送到Kafka
                    self.send_page_to_kafka(page_data, page_num)

                else:
                    self.stats['failed_pages'] += 1
                    logging.warning(f"第{page_num}页无数据，可能已到达最后一页")

                    # 连续3页无数据则停止
                    if page_num - start_page >= 2:
                        empty_pages = 0
                        for check_page in range(max(start_page, page_num - 2), page_num + 1):
                            if not any(d['page_number'] == check_page for d in all_data):
                                empty_pages += 1

                        if empty_pages >= 3:
                            logging.info("连续3页无数据，停止爬取")
                            break

                self.stats['total_pages'] += 1

                # 页面间延时
                if page_num < end_page:
                    logging.info(f"等待{self.page_delay}秒后爬取下一页...")
                    time.sleep(self.page_delay)

            except KeyboardInterrupt:
                logging.info("用户中断爬取")
                break
            except Exception as e:
                logging.error(f"第{page_num}页爬取异常: {e}")
                self.stats['failed_pages'] += 1
                continue

        self.stats['end_time'] = datetime.now()

        # 保存汇总数据
        if all_data:
            self.save_summary_data(all_data, product_name)

        # 输出统计信息
        self.print_crawl_stats()

        return all_data

    def save_page_data(self, page_data, page_number):
        """保存单页数据"""
        if not page_data:
            return

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = os.path.join(self.data_dir, f'page_{page_number}_{timestamp}.csv')

        df = pd.DataFrame(page_data)
        df.to_csv(filename, index=False, encoding='utf-8-sig')
        logging.debug(f"第{page_number}页数据已保存到: {filename}")

    def save_summary_data(self, all_data, product_name=""):
        """保存汇总数据"""
        if not all_data:
            return

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        product_suffix = f"_{product_name}" if product_name else "_all"
        filename = os.path.join(self.data_dir, f'summary{product_suffix}_{timestamp}.csv')

        df = pd.DataFrame(all_data)

        # 按页码和时间排序
        df = df.sort_values(['page_number', 'crawl_time'])

        # 去重
        df = df.drop_duplicates(
            subset=['product_name', 'pub_date', 'avg_price', 'place', 'spec'],
            keep='last'
        )

        df.to_csv(filename, index=False, encoding='utf-8-sig')
        logging.info(f"汇总数据已保存到: {filename}")

        # 生成数据分析报告
        self.generate_analysis_report(df, filename.replace('.csv', '_analysis.txt'))

    def generate_analysis_report(self, df, filename):
        """生成数据分析报告"""
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                f.write("=== 新发地菜价爬取分析报告 ===\n\n")
                f.write(f"爬取时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"总记录数: {len(df)}\n")
                f.write(f"产品种类: {df['product_name'].nunique()}\n")
                f.write(f"价格范围: {df['avg_price'].min():.2f} - {df['avg_price'].max():.2f} 元\n")
                f.write(f"平均价格: {df['avg_price'].mean():.2f} 元\n\n")

                # 按产品统计
                f.write("=== 产品价格统计 ===\n")
                product_stats = df.groupby('product_name')['avg_price'].agg(['count', 'mean', 'min', 'max']).round(2)
                f.write(product_stats.to_string())
                f.write("\n\n")

                # 按产地统计
                f.write("=== 产地统计 ===\n")
                place_stats = df['place'].value_counts().head(10)
                f.write(place_stats.to_string())

            logging.info(f"分析报告已生成: {filename}")

        except Exception as e:
            logging.error(f"生成分析报告失败: {e}")

    def print_crawl_stats(self):
        """打印爬取统计信息"""
        duration = self.stats['end_time'] - self.stats['start_time']

        logging.info("=== 爬取统计信息 ===")
        logging.info(f"总页数: {self.stats['total_pages']}")
        logging.info(f"成功页数: {self.stats['successful_pages']}")
        logging.info(f"失败页数: {self.stats['failed_pages']}")
        logging.info(f"总记录数: {self.stats['total_records']}")
        logging.info(f"耗时: {duration}")
        logging.info(f"平均每页记录数: {self.stats['total_records'] / max(1, self.stats['successful_pages']):.1f}")

    def crawl_specific_products(self, product_list, max_pages_per_product=50):
        """
        爬取指定产品列表
        :param product_list: 产品名称列表
        :param max_pages_per_product: 每个产品最大爬取页数
        """
        all_results = {}

        for product in product_list:
            logging.info(f"开始爬取产品: {product}")

            product_data = self.crawl_by_pagination(
                product_name=product,
                start_page=1,
                end_page=max_pages_per_product
            )

            all_results[product] = product_data

            # 产品间延时
            if product != product_list[-1]:
                logging.info(f"等待{self.page_delay * 2}秒后爬取下一个产品...")
                time.sleep(self.page_delay * 2)

        return all_results

    def close(self):
        """关闭资源"""
        if hasattr(self, 'kafka_producer') and self.kafka_producer:
            try:
                self.kafka_producer.flush()
                self.kafka_producer.close()
                logging.info("Kafka生产者已关闭")
            except Exception as e:
                logging.error(f"关闭Kafka生产者失败: {e}")


# 使用示例
def main():
    # 创建分页爬虫实例
    crawler = XinfadiPaginationCrawler(max_pages=100, page_delay=2)

    try:
        print("新发地菜价分页爬虫")
        print("1. 爬取所有产品（分页）")
        print("2. 爬取指定产品")
        print("3. 爬取热门蔬菜")

        choice = input("请选择爬取方式 (1-3): ").strip()

        if choice == "1":
            # 爬取所有产品，最多100页
            crawler.crawl_by_pagination(start_page=1, end_page=100)

        elif choice == "2":
            product_name = input("请输入产品名称: ").strip()
            max_pages = int(input("请输入最大页数 (默认50): ") or "50")
            crawler.crawl_by_pagination(product_name=product_name, end_page=max_pages)

        elif choice == "3":
            # 爬取热门蔬菜
            popular_products = ['大白菜', '小白菜', '萝卜', '土豆', '番茄', '黄瓜', '茄子', '辣椒']
            crawler.crawl_specific_products(popular_products, max_pages_per_product=20)

        else:
            print("无效选择，默认爬取前10页所有数据")
            crawler.crawl_by_pagination(end_page=10)

    except KeyboardInterrupt:
        print("\n用户中断爬取")
    except Exception as e:
        logging.error(f"爬取过程中出现错误: {e}")
    finally:
        crawler.close()
        print("爬取完成")


if __name__ == "__main__":
    main()