#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import signal
import sys
import time
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
import json


class PaginationDataProcessor:
    def __init__(self):
        self.spark = None
        self.queries = []
        self.running = True

    def setup_spark_session(self):
        """初始化Spark会话，针对分页数据处理优化"""
        try:
            self.spark = SparkSession.builder \
                .appName("PaginationDataProcessor") \
                .master("local[*]") \
                .config("spark.sql.warehouse.dir", "/user/hive/warehouse") \
                .config("spark.sql.adaptive.enabled", "true") \
                .config("spark.sql.adaptive.coalescePartitions.enabled", "true") \
                .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
                .config("spark.sql.streaming.forceDeleteTempCheckpointLocation", "true") \
                .config("spark.jars.packages", "org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.2") \
                .config("spark.sql.hive.metastore.version", "2.3.9") \
                .config("spark.sql.hive.metastore.jars", "builtin") \
                .config("spark.sql.hive.metastore.schema.verification", "false") \
                .config("spark.sql.hive.convertMetastoreOrc", "true") \
                .config("spark.sql.hive.convertMetastoreParquet", "true") \
                .config("spark.sql.execution.arrow.pyspark.enabled", "true") \
                .config("spark.sql.streaming.stateStore.maintenanceInterval", "60s") \
                .config("spark.sql.streaming.statefulOperator.checkCorrectness.enabled", "false") \
                .config("spark.sql.adaptive.skewJoin.enabled", "true") \
                .config("spark.sql.adaptive.localShuffleReader.enabled", "true") \
                .enableHiveSupport() \
                .getOrCreate()

            self.spark.sparkContext.setLogLevel("WARN")
            print("Spark会话创建成功（分页数据处理专用配置，无水印模式）")
            return True
        except Exception as e:
            print("Spark会话创建失败: {}".format(e))
            return self.setup_spark_session_without_hive()

    def setup_spark_session_without_hive(self):
        """备用方案：不使用Hive支持的Spark会话"""
        try:
            self.spark = SparkSession.builder \
                .appName("PaginationDataProcessor") \
                .master("local[*]") \
                .config("spark.sql.adaptive.enabled", "true") \
                .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
                .config("spark.jars.packages", "org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.2") \
                .config("spark.sql.execution.arrow.pyspark.enabled", "true") \
                .config("spark.sql.streaming.stateStore.maintenanceInterval", "60s") \
                .config("spark.sql.streaming.statefulOperator.checkCorrectness.enabled", "false") \
                .getOrCreate()

            self.spark.sparkContext.setLogLevel("WARN")
            print("Spark会话创建成功（备用模式，分页数据处理，无水印）")
            return True
        except Exception as e:
            print("Spark会话创建失败: {}".format(e))
            return False

    def create_hdfs_directories(self):
        """创建HDFS目录结构"""
        directories = [
            "/pagination_data",
            "/pagination_data/cleaned_data",
            "/pagination_data/page_statistics",
            "/pagination_data/price_analysis",
            "/pagination_data/batch_summary",
            "/pagination_data/raw_data",
            "/pagination_data/deduplication_logs"
        ]

        for directory in directories:
            try:
                os.system("hdfs dfs -mkdir -p {}".format(directory))
                print("创建HDFS目录: {}".format(directory))
            except Exception as e:
                print("创建目录失败 {}: {}".format(directory, e))

    def create_hive_tables(self):
        """创建分页数据专用的Hive表"""
        try:
            # 检查Hive支持
            try:
                databases_df = self.spark.sql("SHOW DATABASES")
                databases = [row[0] for row in databases_df.collect()]
                print("可用数据库: {}".format(databases))
                hive_available = True
            except Exception as test_error:
                print("Hive功能测试失败: {}".format(test_error))
                hive_available = False

            if hive_available:
                # 创建数据库
                try:
                    self.spark.sql("CREATE DATABASE IF NOT EXISTS pagination_db")
                    print("创建pagination_db数据库")
                except Exception as db_error:
                    print("创建数据库时出现警告: {}".format(db_error))

                # 主数据表
                create_main_table_sql = """
                CREATE TABLE IF NOT EXISTS pagination_db.agriculture_pagination_data (
                    record_id STRING,
                    page_number INT,
                    product_name STRING,
                    low_price DOUBLE,
                    high_price DOUBLE,
                    avg_price DOUBLE,
                    place STRING,
                    spec STRING,
                    unit STRING,
                    pub_date STRING,
                    crawl_time STRING,
                    timestamp DOUBLE,
                    batch_id STRING,
                    kafka_send_time STRING,
                    data_source STRING,
                    processing_timestamp STRING,
                    price_anomaly_flag STRING,
                    data_quality_score INT,
                    is_deduplicated BOOLEAN,
                    page_batch_info STRING
                ) 
                STORED AS PARQUET
                LOCATION '/user/hive/warehouse/pagination_db.db/agriculture_pagination_data'
                """

                # 页面统计表
                create_stats_table_sql = """
                CREATE TABLE IF NOT EXISTS pagination_db.page_statistics (
                    batch_id STRING,
                    page_number INT,
                    total_records INT,
                    unique_products INT,
                    unique_locations INT,
                    avg_price_range DOUBLE,
                    processing_time STRING,
                    data_quality_avg DOUBLE
                ) 
                STORED AS PARQUET
                LOCATION '/user/hive/warehouse/pagination_db.db/page_statistics'
                """

                # 去重日志表
                create_dedup_table_sql = """
                CREATE TABLE IF NOT EXISTS pagination_db.deduplication_logs (
                    batch_id STRING,
                    page_number INT,
                    original_count INT,
                    deduplicated_count INT,
                    duplicate_count INT,
                    dedup_rate DOUBLE,
                    processing_timestamp STRING
                ) 
                STORED AS PARQUET
                LOCATION '/user/hive/warehouse/pagination_db.db/deduplication_logs'
                """

                self.spark.sql(create_main_table_sql)
                self.spark.sql(create_stats_table_sql)
                self.spark.sql(create_dedup_table_sql)

                print("分页数据专用Hive表创建成功")

                # 验证表创建
                try:
                    tables_df = self.spark.sql("SHOW TABLES IN pagination_db")
                    table_names = [row[1] for row in tables_df.collect()]
                    print("创建的表: {}".format(table_names))
                except Exception as verify_error:
                    print("表验证时出现问题: {}".format(verify_error))

                return True
            else:
                print("Hive不可用，将直接使用HDFS存储")
                return True
        except Exception as e:
            print("Hive表创建失败: {}".format(e))
            return True

    def setup_kafka_stream(self):
        """设置Kafka数据流，监听分页爬虫主题 - 修改为新的IP地址"""
        try:
            kafka_df = self.spark \
                .readStream \
                .format("kafka") \
                .option("kafka.bootstrap.servers", "192.168.93.201:9092") \
                .option("subscribe", "agriculture_pagination_data") \
                .option("kafka.request.timeout.ms", "60000") \
                .option("kafka.session.timeout.ms", "30000") \
                .option("kafka.max.poll.records", "500") \
                .option("kafka.fetch.max.wait.ms", "5000") \
                .option("startingOffsets", "latest") \
                .option("failOnDataLoss", "false") \
                .load()

            print("Kafka流设置成功（监听分页爬虫数据，服务器：192.168.93.201:9092，无水印模式）")
            return kafka_df
        except Exception as e:
            print("Kafka流设置失败: {}".format(e))
            return None

    def parse_pagination_kafka_data(self, kafka_df):
        """解析分页爬虫的Kafka数据"""
        # 分页爬虫数据的schema
        schema = StructType([
            StructField("page_number", IntegerType(), True),
            StructField("product_name", StringType(), True),
            StructField("low_price", DoubleType(), True),
            StructField("high_price", DoubleType(), True),
            StructField("avg_price", DoubleType(), True),
            StructField("place", StringType(), True),
            StructField("spec", StringType(), True),
            StructField("unit", StringType(), True),
            StructField("pub_date", StringType(), True),
            StructField("crawl_time", StringType(), True),
            StructField("timestamp", DoubleType(), True),
            StructField("batch_id", StringType(), True),
            StructField("kafka_send_time", StringType(), True),
            StructField("data_source", StringType(), True),
            StructField("record_id", StringType(), True)
        ])

        # 解析JSON数据
        parsed_df = kafka_df.select(
            col("key").cast("string").alias("kafka_key"),
            from_json(col("value").cast("string"), schema).alias("data"),
            col("timestamp").alias("kafka_timestamp"),
            col("partition").alias("kafka_partition"),
            col("offset").alias("kafka_offset")
        ).select(
            col("kafka_key"),
            col("data.*"),
            col("kafka_timestamp"),
            col("kafka_partition"),
            col("kafka_offset")
        )

        return parsed_df

    def clean_and_transform_pagination_data(self, df):
        """清洗和转换分页数据，去除水印机制，使用简化去重策略"""

        print("分页数据处理：无水印模式，使用简化去重策略")

        # 直接进行去重，不使用水印
        # 基于关键字段的去重策略
        deduplicated_df = df.dropDuplicates([
            "product_name",
            "place",
            "pub_date",
            "avg_price"  # 添加价格作为去重条件
        ])

        print("分页数据去重策略：产品名称+地区+发布日期+平均价格（无水印）")

        # 数据清洗和转换，针对分页数据特点
        cleaned_df = deduplicated_df.filter(
            col("product_name").isNotNull() &
            col("avg_price").isNotNull() &
            (col("avg_price") > 0) &
            col("page_number").isNotNull() &
            (col("page_number") > 0)
        ).withColumn(
            "price_anomaly_flag",
            when(col("avg_price") > col("high_price") * 1.3, "VERY_HIGH")
            .when(col("avg_price") > col("high_price") * 1.1, "HIGH")
            .when(col("avg_price") < col("low_price") * 0.7, "VERY_LOW")
            .when(col("avg_price") < col("low_price") * 0.9, "LOW")
            .otherwise("NORMAL")
        ).withColumn(
            "data_quality_score",
            when(col("product_name").isNotNull() &
                 col("avg_price").isNotNull() &
                 col("place").isNotNull() &
                 col("spec").isNotNull() &
                 col("page_number").isNotNull(), 100)
            .when(col("product_name").isNotNull() &
                  col("avg_price").isNotNull() &
                  col("place").isNotNull(), 85)
            .when(col("product_name").isNotNull() &
                  col("avg_price").isNotNull(), 70)
            .otherwise(50)
        ).withColumn(
            "processing_timestamp",
            current_timestamp().cast("string")
        ).withColumn(
            "is_deduplicated",
            lit(True)
        ).withColumn(
            "page_batch_info",
            concat(lit("page_"), col("page_number"), lit("_"), col("batch_id"))
        ).withColumn(
            "price_range_width",
            col("high_price") - col("low_price")
        ).withColumn(
            "price_position",
            when(col("price_range_width") > 0,
                 (col("avg_price") - col("low_price")) / col("price_range_width"))
            .otherwise(0.5)
        ).withColumn(
            "processing_mode",
            lit("no_watermark")  # 标记为无水印模式
        )

        return cleaned_df

    def write_pagination_data_to_storage(self, df, batch_id):
        """写入分页数据到存储，包含详细统计"""
        try:
            if df.count() > 0:
                # 收集批次统计信息
                total_records = df.count()
                unique_products = df.select("product_name").distinct().count()
                unique_locations = df.select("place").distinct().count()
                unique_pages = df.select("page_number").distinct().count()
                avg_price_overall = df.agg(avg("avg_price")).collect()[0][0]
                avg_quality_score = df.agg(avg("data_quality_score")).collect()[0][0]

                print("批次 {} 分页数据统计（无水印模式）:".format(batch_id))
                print("  - 处理记录数: {}".format(total_records))
                print("  - 唯一产品数: {}".format(unique_products))
                print("  - 唯一地区数: {}".format(unique_locations))
                print("  - 涉及页面数: {}".format(unique_pages))
                print("  - 平均价格: {:.2f}".format(avg_price_overall))
                print("  - 平均质量分: {:.1f}".format(avg_quality_score))
                print("  - 处理模式: 无水印实时处理")
                print("  - Kafka服务器: 192.168.93.201:9092")

                # 按页面统计
                page_stats = df.groupBy("page_number").agg(
                    count("*").alias("record_count"),
                    countDistinct("product_name").alias("unique_products"),
                    countDistinct("place").alias("unique_places"),
                    avg("avg_price").alias("avg_price"),
                    avg("data_quality_score").alias("avg_quality")
                ).withColumn("batch_id", lit("batch_{}".format(batch_id))) \
                    .withColumn("processing_time", current_timestamp().cast("string")) \
                    .withColumn("processing_mode", lit("no_watermark"))

                # 数据预处理
                df_processed = df.select(
                    coalesce(col("record_id"), lit("")).alias("record_id"),
                    coalesce(col("page_number"), lit(0)).alias("page_number"),
                    coalesce(col("product_name"), lit("")).alias("product_name"),
                    coalesce(col("low_price"), lit(0.0)).alias("low_price"),
                    coalesce(col("high_price"), lit(0.0)).alias("high_price"),
                    coalesce(col("avg_price"), lit(0.0)).alias("avg_price"),
                    coalesce(col("place"), lit("")).alias("place"),
                    coalesce(col("spec"), lit("")).alias("spec"),
                    coalesce(col("unit"), lit("")).alias("unit"),
                    coalesce(col("pub_date"), lit("")).alias("pub_date"),
                    coalesce(col("crawl_time"), lit("")).alias("crawl_time"),
                    coalesce(col("timestamp"), lit(0.0)).alias("timestamp"),
                    coalesce(col("batch_id"), lit("")).alias("batch_id"),
                    coalesce(col("kafka_send_time"), lit("")).alias("kafka_send_time"),
                    coalesce(col("data_source"), lit("")).alias("data_source"),
                    coalesce(col("processing_timestamp"), current_timestamp().cast("string")).alias(
                        "processing_timestamp"),
                    coalesce(col("price_anomaly_flag"), lit("NORMAL")).alias("price_anomaly_flag"),
                    coalesce(col("data_quality_score"), lit(60)).alias("data_quality_score"),
                    coalesce(col("is_deduplicated"), lit(True)).alias("is_deduplicated"),
                    coalesce(col("page_batch_info"), lit("")).alias("page_batch_info")
                )

                # 写入主数据到HDFS
                hdfs_path = "/pagination_data/cleaned_data/batch_{}".format(batch_id)
                df_processed.write \
                    .mode("append") \
                    .format("parquet") \
                    .save(hdfs_path)
                print("批次 {} 主数据已写入HDFS（无水印模式）".format(batch_id))

                # 写入页面统计到HDFS
                stats_path = "/pagination_data/page_statistics/batch_{}".format(batch_id)
                page_stats.write \
                    .mode("append") \
                    .format("parquet") \
                    .save(stats_path)
                print("批次 {} 页面统计已写入HDFS".format(batch_id))

                # 尝试写入Hive表
                try:
                    # 写入主表
                    df_processed.write \
                        .mode("append") \
                        .insertInto("pagination_db.agriculture_pagination_data")

                    # 写入统计表
                    page_stats.write \
                        .mode("append") \
                        .insertInto("pagination_db.page_statistics")

                    print("批次 {} 数据已同步到Hive表".format(batch_id))

                except Exception as hive_error:
                    print("Hive同步失败（数据已安全存储在HDFS）: {}".format(hive_error))

            else:
                print("批次 {} 没有数据需要写入".format(batch_id))

        except Exception as e:
            print("数据写入失败: {}".format(e))

    def generate_pagination_summary(self, df, batch_id):
        """生成分页数据汇总报告"""
        try:
            if df.count() > 0:
                # 生成汇总统计
                summary_stats = df.agg(
                    count("*").alias("total_records"),
                    countDistinct("product_name").alias("unique_products"),
                    countDistinct("place").alias("unique_places"),
                    countDistinct("page_number").alias("total_pages"),
                    min("page_number").alias("min_page"),
                    max("page_number").alias("max_page"),
                    avg("avg_price").alias("overall_avg_price"),
                    min("avg_price").alias("min_price"),
                    max("avg_price").alias("max_price"),
                    avg("data_quality_score").alias("avg_quality_score")
                ).withColumn("batch_id", lit("batch_{}".format(batch_id))) \
                    .withColumn("summary_timestamp", current_timestamp().cast("string")) \
                    .withColumn("processing_mode", lit("no_watermark"))

                # 写入汇总数据
                summary_path = "/pagination_data/batch_summary/batch_{}".format(batch_id)
                summary_stats.write \
                    .mode("overwrite") \
                    .format("parquet") \
                    .save(summary_path)

                print("批次 {} 汇总报告已生成（无水印模式）".format(batch_id))

        except Exception as e:
            print("汇总报告生成失败: {}".format(e))

    def setup_signal_handler(self):
        """设置信号处理器"""

        def signal_handler(signum, frame):
            print("\n收到停止信号，正在优雅关闭...")
            self.running = False
            self.graceful_shutdown()

        signal.signal(signal.SIGINT, signal_handler)
        signal.signal(signal.SIGTERM, signal_handler)
        print("已启用优雅关闭机制")

    def graceful_shutdown(self):
        """优雅关闭"""
        print("正在停止所有流查询...")
        for query in self.queries:
            if query.isActive:
                print("停止查询: {}".format(query.name))
                query.stop()

        print("等待查询完全停止...")
        for query in self.queries:
            try:
                query.awaitTermination(timeout=30)
            except:
                pass

        if self.spark:
            print("正在停止Spark会话...")
            self.spark.stop()
            print("Spark已优雅关闭")

        print("分页数据处理系统已安全关闭（无水印模式）")
        sys.exit(0)

    def run(self):
        """主运行方法"""
        print("启动分页爬虫数据实时处理系统（无水印模式，Kafka服务器：192.168.93.201）...")

        # 设置信号处理
        self.setup_signal_handler()

        # 初始化Spark
        if not self.setup_spark_session():
            print("Spark初始化失败，程序退出")
            return

        # 创建目录
        self.create_hdfs_directories()

        # 创建Hive表
        if not self.create_hive_tables():
            print("存储初始化失败，但继续运行...")

        # 设置Kafka流
        kafka_df = self.setup_kafka_stream()
        if kafka_df is None:
            print("Kafka连接失败，程序退出")
            return

        # 解析和清洗数据
        print("开始分页数据解析、清洗、转换和去重（无水印模式）...")
        parsed_df = self.parse_pagination_kafka_data(kafka_df)
        cleaned_df = self.clean_and_transform_pagination_data(parsed_df)

        # 设置数据写入流
        try:
            # 主要数据存储流
            data_storage_query = cleaned_df.writeStream \
                .outputMode("append") \
                .foreachBatch(lambda df, batch_id: [
                self.write_pagination_data_to_storage(df, batch_id),
                self.generate_pagination_summary(df, batch_id)
            ]) \
                .option("checkpointLocation", "/tmp/checkpoint/pagination_data_no_watermark") \
                .trigger(processingTime='60 seconds') \
                .queryName("pagination_data_storage_no_watermark") \
                .start()

            self.queries.append(data_storage_query)

            # 控制台输出（调试用）
            console_query = cleaned_df.select(
                "page_number", "product_name", "avg_price", "place",
                "price_anomaly_flag", "data_quality_score", "page_batch_info", "processing_mode"
            ).writeStream \
                .outputMode("append") \
                .format("console") \
                .option("truncate", "false") \
                .option("numRows", 10) \
                .trigger(processingTime='60 seconds') \
                .queryName("pagination_console_debug_no_watermark") \
                .start()

            self.queries.append(console_query)

            print("=" * 80)
            print("分页爬虫数据实时处理系统已启动成功（无水印模式）！")
            print("✓ 专门针对分页爬虫数据结构优化")
            print("✓ 支持页面级别的数据统计和分析")
            print("✓ 简化去重策略：产品+地区+日期+价格（无水印）")
            print("✓ 实时处理模式，无延迟等待")
            print("✓ 自动生成页面统计和批次汇总报告")
            print("✓ 支持价格异常检测和数据质量评分")
            print("✓ 多层次存储：主数据+统计数据+汇总数据")
            print("✓ Hive表结构专门为分页数据设计")
            print("✓ 监听主题：agriculture_pagination_data")
            print("✓ Kafka服务器：192.168.93.201:9092")
            print("✓ 处理间隔：60秒（无水印快速处理）")
            print("✓ 处理模式：无水印实时处理")
            print("✓ 使用 Ctrl+C 进行优雅关闭")
            print("=" * 80)

            # 等待流处理
            while self.running:
                for query in self.queries:
                    if not query.isActive:
                        print("查询 {} 已停止".format(query.name))
                        self.running = False
                        break

                if self.running:
                    time.sleep(5)

        except Exception as e:
            print("流处理启动失败: {}".format(e))
        finally:
            self.graceful_shutdown()




if __name__ == "__main__":
    processor = PaginationDataProcessor()
    try:
        processor.run()
    except KeyboardInterrupt:
        print("\n程序被用户中断")
    except Exception as e:
        print("程序异常退出: {}".format(e))
    finally:
        print("分页数据处理程序已完全退出（无水印模式）")
