#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import signal
import sys
import time
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
import json

class AgricultureDataProcessor:
    def __init__(self):
        self.spark = None
        self.queries = []
        self.running = True
        
    def setup_spark_session(self):
        """初始化Spark会话，修复Hive兼容性问题"""
        try:
            self.spark = SparkSession.builder \
                .appName("AgricultureDataProcessor") \
                .master("local[*]") \
                .config("spark.sql.warehouse.dir", "/user/hive/warehouse") \
                .config("spark.sql.adaptive.enabled", "true") \
                .config("spark.sql.adaptive.coalescePartitions.enabled", "true") \
                .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
                .config("spark.sql.streaming.forceDeleteTempCheckpointLocation", "true") \
                .config("spark.jars.packages", "org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.2") \
                .config("spark.sql.hive.metastore.version", "2.3.9") \
                .config("spark.sql.hive.metastore.jars", "builtin") \
                .config("spark.sql.hive.metastore.schema.verification", "false") \
                .config("spark.sql.hive.convertMetastoreOrc", "true") \
                .config("spark.sql.hive.convertMetastoreParquet", "true") \
                .config("spark.sql.hive.convertInsertingPartitionedTable", "false") \
                .config("spark.sql.hive.convertInsertingUnpartitionedTable", "false") \
                .config("spark.sql.execution.arrow.pyspark.enabled", "true") \
                .config("spark.sql.streaming.stateStore.maintenanceInterval", "60s") \
                .config("spark.sql.streaming.statefulOperator.checkCorrectness.enabled", "false") \
                .enableHiveSupport() \
                .getOrCreate()
            
            self.spark.sparkContext.setLogLevel("WARN")
            print("Spark会话创建成功（使用Hive 2.3.9兼容配置，已启用去重优化）")
            return True
        except Exception as e:
            print(f"Spark会话创建失败: {e}")
            return self.setup_spark_session_without_hive()
    
    def setup_spark_session_without_hive(self):
        """备用方案：不使用Hive支持的Spark会话"""
        try:
            self.spark = SparkSession.builder \
                .appName("AgricultureDataProcessor") \
                .master("local[*]") \
                .config("spark.sql.adaptive.enabled", "true") \
                .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
                .config("spark.jars.packages", "org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.2") \
                .config("spark.sql.execution.arrow.pyspark.enabled", "true") \
                .config("spark.sql.streaming.stateStore.maintenanceInterval", "60s") \
                .config("spark.sql.streaming.statefulOperator.checkCorrectness.enabled", "false") \
                .getOrCreate()
            
            self.spark.sparkContext.setLogLevel("WARN")
            print("Spark会话创建成功（备用模式，不使用Hive支持，已启用去重优化）")
            return True
        except Exception as e:
            print(f"Spark会话创建失败: {e}")
            return False
    
    def create_hdfs_directories(self):
        """创建HDFS目录"""
        directories = [
            "/agriculture_data",
            "/agriculture_data/cleaned_data",
            "/agriculture_data/price_statistics",
            "/agriculture_data/price_alerts",
            "/agriculture_data/raw_data"
        ]
        
        for directory in directories:
            try:
                os.system(f"hdfs dfs -mkdir -p {directory}")
                print(f"创建HDFS目录: {directory}")
            except Exception as e:
                print(f"创建目录失败 {directory}: {e}")
    
    def create_hive_table(self):
        """创建Hive表，修复属性访问问题"""
        try:
            # 检查Hive支持是否可用
            try:
                # 修正的Hive功能测试 - 使用索引而不是属性名
                databases_df = self.spark.sql("SHOW DATABASES")
                databases = [row[0] for row in databases_df.collect()]  # 修复：使用索引访问
                print(f"可用数据库: {databases}")
                hive_available = True
            except Exception as test_error:
                print(f"Hive功能测试失败: {test_error}")
                hive_available = False
                
            if hive_available:
                # 首先确保default数据库存在
                try:
                    self.spark.sql("CREATE DATABASE IF NOT EXISTS default")
                    print("确保default数据库存在")
                except Exception as db_error:
                    print(f"创建default数据库时出现警告: {db_error}")
                
                # 创建表的SQL语句 - 使用STORED AS而不是USING
                create_table_sql = """
                CREATE TABLE IF NOT EXISTS default.agriculture_cleaned_data (
                    record_id STRING,
                    product_name STRING,
                    product_category STRING,
                    avg_price DOUBLE,
                    low_price DOUBLE,
                    high_price DOUBLE,
                    place STRING,
                    spec STRING,
                    unit STRING,
                    pub_date STRING,
                    crawl_time STRING,
                    price_anomaly_flag STRING,
                    data_quality_score INT,
                    processing_timestamp STRING,
                    batch_id STRING,
                    data_source STRING,
                    is_deduplicated BOOLEAN
                ) 
                STORED AS PARQUET
                LOCATION '/user/hive/warehouse/agriculture_cleaned_data'
                """
                
                self.spark.sql(create_table_sql)
                print("Hive表创建成功（Parquet格式存储，已添加去重标记字段）")
                
                # 验证表是否创建成功 - 修复属性访问
                try:
                    tables_df = self.spark.sql("SHOW TABLES IN default")
                    table_names = [row[1] for row in tables_df.collect()]  # 修复：使用正确的列索引
                    if 'agriculture_cleaned_data' in table_names:
                        print("✓ 表创建验证成功")
                    else:
                        print("⚠ 表创建验证失败，但继续运行")
                except Exception as verify_error:
                    print(f"表验证时出现问题: {verify_error}")
                
                return True
            else:
                print("Hive不可用，将直接使用HDFS存储")
                return True
        except Exception as e:
            print(f"Hive表创建失败，将使用HDFS存储: {e}")
            return True
    
    def setup_kafka_stream(self):
        """设置Kafka数据流"""
        try:
            kafka_df = self.spark \
                .readStream \
                .format("kafka") \
                .option("kafka.bootstrap.servers", "localhost:9092") \
                .option("subscribe", "agriculture_data") \
                .option("kafka.request.timeout.ms", "60000") \
                .option("kafka.session.timeout.ms", "30000") \
                .option("kafka.max.poll.records", "500") \
                .option("kafka.fetch.max.wait.ms", "5000") \
                .option("startingOffsets", "latest") \
                .option("failOnDataLoss", "false") \
                .load()
            
            print("Kafka流设置成功（已启用去重功能）")
            return kafka_df
        except Exception as e:
            print(f"Kafka流设置失败: {e}")
            return None
    
    def parse_kafka_data(self, kafka_df):
        """解析Kafka数据"""
        # 定义数据schema
        schema = StructType([
            StructField("product_name", StringType(), True),
            StructField("product_category", StringType(), True),
            StructField("avg_price", DoubleType(), True),
            StructField("low_price", DoubleType(), True),
            StructField("high_price", DoubleType(), True),
            StructField("place", StringType(), True),
            StructField("spec", StringType(), True),
            StructField("unit", StringType(), True),
            StructField("pub_date", StringType(), True),
            StructField("crawl_time", StringType(), True)
        ])
        
        # 解析JSON数据
        parsed_df = kafka_df.select(
            col("key").cast("string").alias("record_id"),
            from_json(col("value").cast("string"), schema).alias("data"),
            col("timestamp").alias("kafka_timestamp")
        ).select(
            coalesce(col("record_id"), lit("unknown")).alias("record_id"),
            col("data.*"),
            col("kafka_timestamp")
        )
        
        return parsed_df
    def clean_and_transform_data(self, df):
        """数据清洗、转换和去重"""
        # 添加水印用于状态管理，设置15分钟的水印
        df_with_watermark = df.withWatermark("kafka_timestamp", "15 minutes")
        
        # 基于产品名称、地区和发布日期进行去重，保留最新记录
        deduplicated_df = df_with_watermark.dropDuplicates([
            "product_name", 
            "place", 
            "pub_date"
        ])
        
        print("已启用去重功能：基于产品名称、地区和发布日期去重")
        
        # 数据清洗和转换
        cleaned_df = deduplicated_df.filter(
            col("product_name").isNotNull() & 
            col("avg_price").isNotNull() & 
            (col("avg_price") > 0)
        ).withColumn(
            "price_anomaly_flag",
            when(col("avg_price") > col("high_price") * 1.2, "HIGH")
            .when(col("avg_price") < col("low_price") * 0.8, "LOW")
            .otherwise("NORMAL")
        ).withColumn(
            "data_quality_score",
            when(col("product_name").isNotNull() & 
                 col("avg_price").isNotNull() & 
                 col("place").isNotNull(), 100)
            .when(col("product_name").isNotNull() & 
                  col("avg_price").isNotNull(), 80)
            .otherwise(60)
        ).withColumn(
            "processing_timestamp", 
            current_timestamp().cast("string")
        ).withColumn(
            "batch_id", 
            lit("batch_" + str(int(time.time())))
        ).withColumn(
            "data_source", 
            lit("kafka_stream")
        ).withColumn(
            "is_deduplicated", 
            lit(True)
        )
        
        return cleaned_df
    def write_to_storage(self, df, batch_id):
        """优化的存储写入方法，支持Hive和HDFS双重存储，包含去重统计"""
        try:
            if df.count() > 0:
                # 统计去重效果
                total_records = df.count()
                unique_products = df.select("product_name").distinct().count()
                unique_locations = df.select("place").distinct().count()
                
                print(f"批次 {batch_id} 去重统计:")
                print(f"  - 处理记录数: {total_records}")
                print(f"  - 唯一产品数: {unique_products}")
                print(f"  - 唯一地区数: {unique_locations}")
                
                # 数据预处理
                df_processed = df.select(
                    coalesce(col("record_id"), lit("")).alias("record_id"),
                    coalesce(col("product_name"), lit("")).alias("product_name"),
                    coalesce(col("product_category"), lit("")).alias("product_category"),
                    coalesce(col("avg_price"), lit(0.0)).alias("avg_price"),
                    coalesce(col("low_price"), lit(0.0)).alias("low_price"),
                    coalesce(col("high_price"), lit(0.0)).alias("high_price"),
                    coalesce(col("place"), lit("")).alias("place"),
                    coalesce(col("spec"), lit("")).alias("spec"),
                    coalesce(col("unit"), lit("")).alias("unit"),
                    coalesce(col("pub_date"), lit("")).alias("pub_date"),
                    coalesce(col("crawl_time"), lit("")).alias("crawl_time"),
                    coalesce(col("price_anomaly_flag"), lit("NORMAL")).alias("price_anomaly_flag"),
                    coalesce(col("data_quality_score"), lit(60)).alias("data_quality_score"),
                    coalesce(col("processing_timestamp"), current_timestamp().cast("string")).alias("processing_timestamp"),
                    lit(f"batch_{batch_id}").alias("batch_id"),
                    lit("kafka_stream").alias("data_source"),
                    coalesce(col("is_deduplicated"), lit(True)).alias("is_deduplicated")
                )
                
                # 首先写入HDFS（主要存储）
                hdfs_path = f"/agriculture_data/cleaned_data/batch_{batch_id}"
                df_processed.write \
                    .mode("append") \
                    .format("parquet") \
                    .save(hdfs_path)
                print(f"批次 {batch_id} 去重后数据已写入HDFS，记录数: {df_processed.count()}")
                
                # 尝试写入Hive表
                try:
                    # 检查Hive表是否存在并可用
                    self.spark.sql("SHOW TABLES IN default").collect()
                    
                    # 使用insertInto方法写入Hive表
                    df_processed.write \
                        .mode("append") \
                        .insertInto("default.agriculture_cleaned_data")
                    print(f"批次 {batch_id} 去重后数据已同步到Hive表")
                    
                except Exception as hive_error:
                    print(f"Hive同步失败（数据已安全存储在HDFS）: {hive_error}")
                    
            else:
                print(f"批次 {batch_id} 没有数据需要写入（可能全部被去重过滤）")
                
        except Exception as e:
            print(f"数据写入失败: {e}")
    def setup_signal_handler(self):
        """设置信号处理器"""
        def signal_handler(signum, frame):
            print("\n收到停止信号，正在优雅关闭...")
            self.running = False
            self.graceful_shutdown()
        
        signal.signal(signal.SIGINT, signal_handler)
        signal.signal(signal.SIGTERM, signal_handler)
        print("已启用优雅关闭机制")
    
    def graceful_shutdown(self):
        """优雅关闭"""
        print("正在停止所有流查询...")
        for query in self.queries:
            if query.isActive:
                print(f"停止查询: {query.name}")
                query.stop()
        
        print("等待查询完全停止...")
        for query in self.queries:
            try:
                query.awaitTermination(timeout=30)
            except:
                pass
        
        if self.spark:
            print("正在停止Spark会话...")
            self.spark.stop()
            print("Spark已优雅关闭")
        
        print("系统已安全关闭")
        sys.exit(0)
    def run(self):
        """主运行方法"""
        print("启动农业数据实时处理系统（已集成去重功能）...")
        
        # 设置信号处理
        self.setup_signal_handler()
        
        # 初始化Spark
        if not self.setup_spark_session():
            print("Spark初始化失败，程序退出")
            return
        
        # 创建目录
        self.create_hdfs_directories()
        
        # 创建Hive表
        if not self.create_hive_table():
            print("存储初始化失败，但继续运行...")
        
        # 设置Kafka流
        kafka_df = self.setup_kafka_stream()
        if kafka_df is None:
            print("Kafka连接失败，程序退出")
            return
        
        # 解析和清洗数据
        print("开始数据解析、清洗、转换和去重...")
        parsed_df = self.parse_kafka_data(kafka_df)
        cleaned_df = self.clean_and_transform_data(parsed_df)
        
        # 设置数据写入流
        try:
            # 主要数据存储流
            data_storage_query = cleaned_df.writeStream \
                .outputMode("append") \
                .foreachBatch(lambda df, batch_id: self.write_to_storage(df, batch_id)) \
                .option("checkpointLocation", "/tmp/checkpoint/cleaned_data_dedup") \
                .trigger(processingTime='60 seconds') \
                .queryName("data_storage_with_dedup") \
                .start()
            
            self.queries.append(data_storage_query)
            
            # 控制台输出（用于调试）
            console_query = cleaned_df.writeStream \
                .outputMode("append") \
                .format("console") \
                .option("truncate", "false") \
                .option("numRows", 5) \
                .trigger(processingTime='60 seconds') \
                .queryName("console_debug_dedup") \
                .start()
            
            self.queries.append(console_query)
            
            print("=" * 70)
            print("实时处理系统已启动成功！")
            print("✓ 已修复Hive版本兼容性问题（使用2.3.9）")
            print("✓ 已修复属性访问问题（使用索引访问）")
            print("✓ 已解决Kafka依赖包问题")
            print("✓ 数据清洗和转换功能已启用")
            print("✓ 优化存储策略：HDFS主存储 + Hive表同步")
            print("✓ 使用Parquet格式提升兼容性和性能")
            print("✓ 【新增】智能去重功能：基于产品名称、地区、发布日期")
            print("✓ 【新增】15分钟水印机制，优化状态管理")
            print("✓ 【新增】去重统计监控，实时显示处理效果")
            print("✓ 【新增】去重标记字段，便于数据追踪")
            print("✓ 使用 Ctrl+C 进行优雅关闭")
            print("=" * 70)
            
            # 等待流处理
            while self.running:
                for query in self.queries:
                    if not query.isActive:
                        print(f"查询 {query.name} 已停止")
                        self.running = False
                        break
                
                if self.running:
                    time.sleep(5)
            
        except Exception as e:
            print(f"流处理启动失败: {e}")
        finally:
            self.graceful_shutdown()
if __name__ == "__main__":
    processor = AgricultureDataProcessor()
    try:
        processor.run()
    except KeyboardInterrupt:
        print("\n程序被用户中断")
    except Exception as e:
        print(f"程序异常退出: {e}")
    finally:
        print("程序已完全退出")