from flask import Flask, jsonify, request, send_from_directory
from flask_cors import CORS
from pyspark.sql import SparkSession
import json
import os
from datetime import datetime, timedelta
import logging
import pandas as pd
from sklearn.linear_model import LinearRegression
import numpy as np

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('app.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

app = Flask(__name__, static_folder='frontend')
CORS(app)

def create_spark_session():
    """创建Spark会话，针对虚拟机环境优化"""
    try:
        spark = SparkSession.builder \
            .appName("AgricultureDataVisualization") \
            .master("yarn") \
            .config("spark.driver.memory", "2g") \
            .config("spark.executor.memory", "2g") \
            .config("spark.executor.cores", "2") \
            .config("spark.yarn.am.memory", "2g") \
            .config("spark.yarn.am.cores", "2") \
            .config("spark.dynamicAllocation.enabled", "false") \
            .config("spark.executor.instances", "2") \
            .config("spark.sql.adaptive.enabled", "true") \
            .enableHiveSupport() \
            .getOrCreate()
        logger.info("Spark会话创建成功")
        return spark
    except Exception as e:
        logger.error(f"Spark会话创建失败: {str(e)}")
        # 尝试使用本地模式
        try:
            logger.info("尝试使用本地模式创建Spark会话")
            spark = SparkSession.builder \
                .appName("AgricultureDataVisualization") \
                .master("local[*]") \
                .config("spark.driver.memory", "4g") \
                .config("spark.executor.memory", "4g") \
                .config("spark.sql.adaptive.enabled", "true") \
                .config("spark.sql.execution.arrow.pyspark.enabled", "true") \
                .getOrCreate()
            logger.info("本地模式Spark会话创建成功")
            return spark
        except Exception as e2:
            logger.error(f"本地模式Spark会话创建也失败: {str(e2)}")
            return None

# 初始化Spark会话
spark = create_spark_session()

@app.route('/')
def index():
    """提供前端页面"""
    try:
        return send_from_directory(app.static_folder, 'index.html')
    except Exception as e:
        logger.error(f"提供前端页面失败: {str(e)}")
        return jsonify({"status": "error", "message": "前端页面加载失败"})

@app.route('/<path:path>')
def serve_static(path):
    """提供静态文件"""
    try:
        return send_from_directory(app.static_folder, path)
    except Exception as e:
        logger.error(f"提供静态文件失败: {str(e)}")
        return jsonify({"status": "error", "message": "静态文件加载失败，可能是刷新了"})

@app.route('/api/price-trends', methods=['GET'])
def get_price_trends():
    """获取价格趋势数据"""
    try:
        if spark is None:
            return jsonify({"status": "error", "message": "Spark会话未初始化"})

        df = spark.sql("""
            SELECT 
                pub_date,
                product_name,
                AVG(avg_price) as avg_price
            FROM pagination_db.agriculture_pagination_data
            WHERE pub_date >= date_sub(current_date(), 30)
            GROUP BY pub_date, product_name
            ORDER BY pub_date
        """)
        
        trends_data = df.toPandas().to_dict('records')
        return jsonify({"status": "success", "data": trends_data})
    except Exception as e:
        logger.error(f"获取价格趋势数据失败: {str(e)}")
        return jsonify({"status": "error", "message": str(e)})

@app.route('/api/product-stats', methods=['GET'])
def get_product_stats():
    """获取产品统计数据"""
    try:
        if spark is None:
            return jsonify({"status": "error", "message": "Spark会话未初始化"})

        df = spark.sql("""
            SELECT 
                product_name,
                COUNT(*) as record_count,
                AVG(avg_price) as avg_price,
                MIN(avg_price) as min_price,
                MAX(avg_price) as max_price,
                AVG(data_quality_score) as quality_score
            FROM pagination_db.agriculture_pagination_data
            GROUP BY product_name
            ORDER BY record_count DESC
            LIMIT 20
        """)
        
        stats_data = df.toPandas().to_dict('records')
        return jsonify({"status": "success", "data": stats_data})
    except Exception as e:
        logger.error(f"获取产品统计数据失败: {str(e)}")
        return jsonify({"status": "error", "message": str(e)})

@app.route('/api/place-analysis', methods=['GET'])
def get_place_analysis():
    """获取产地分析数据"""
    try:
        if spark is None:
            return jsonify({"status": "error", "message": "Spark会话未初始化"})

        df = spark.sql("""
            SELECT 
                place,
                COUNT(*) as record_count,
                AVG(avg_price) as avg_price,
                COUNT(DISTINCT product_name) as product_count
            FROM pagination_db.agriculture_pagination_data
            GROUP BY place
            ORDER BY record_count DESC
            LIMIT 15
        """)
        
        place_data = df.toPandas().to_dict('records')
        return jsonify({"status": "success", "data": place_data})
    except Exception as e:
        logger.error(f"获取产地分析数据失败: {str(e)}")
        return jsonify({"status": "error", "message": str(e)})

@app.route('/api/quality-metrics', methods=['GET'])
def get_quality_metrics():
    """获取数据质量指标"""
    try:
        if spark is None:
            return jsonify({"status": "error", "message": "Spark会话未初始化"})

        df = spark.sql("""
            SELECT 
                AVG(data_quality_score) as avg_quality,
                COUNT(CASE WHEN price_anomaly_flag != 'NORMAL' THEN 1 END) as anomaly_count,
                COUNT(*) as total_records,
                COUNT(DISTINCT product_name) as unique_products,
                COUNT(DISTINCT place) as unique_places
            FROM pagination_db.agriculture_pagination_data
        """)
        
        metrics_data = df.toPandas().to_dict('records')[0]
        return jsonify({"status": "success", "data": metrics_data})
    except Exception as e:
        logger.error(f"获取数据质量指标失败: {str(e)}")
        return jsonify({"status": "error", "message": str(e)})

@app.route('/api/price-distribution', methods=['GET'])
def get_price_distribution():
    """获取价格分布数据"""
    try:
        if spark is None:
            return jsonify({"status": "error", "message": "Spark会话未初始化"})

        df = spark.sql("""
            SELECT 
                product_name,
                percentile_approx(avg_price, array(0.25, 0.5, 0.75)) as price_quartiles,
                AVG(avg_price) as mean_price,
                STDDEV(avg_price) as price_stddev
            FROM pagination_db.agriculture_pagination_data
            GROUP BY product_name
            HAVING COUNT(*) > 10
            ORDER BY mean_price DESC
            LIMIT 10
        """)
        
        distribution_data = df.toPandas().to_dict('records')
        return jsonify({"status": "success", "data": distribution_data})
    except Exception as e:
        logger.error(f"获取价格分布数据失败: {str(e)}")
        return jsonify({"status": "error", "message": str(e)})

@app.route('/api/price-prediction', methods=['GET'])
def get_price_prediction():
    """获取价格预测数据"""
    try:
        if spark is None:
            return jsonify({"status": "error", "message": "Spark会话未初始化"})

        # 获取最近30天的数据
        df = spark.sql("""
            SELECT 
                pub_date,
                product_name,
                AVG(avg_price) as avg_price
            FROM pagination_db.agriculture_pagination_data
            WHERE pub_date >= date_sub(current_date(), 30)
            GROUP BY pub_date, product_name
            ORDER BY pub_date
        """)
        
        # 转换为pandas DataFrame
        df_pd = df.toPandas()
        
        # 预测结果列表
        predictions = []
        
        # 对每个产品进行预测
        for product in df_pd['product_name'].unique():
            product_data = df_pd[df_pd['product_name'] == product].sort_values('pub_date')
            
            if len(product_data) > 5:  # 只预测有足够历史数据的产品
                # 准备训练数据
                X = np.array(range(len(product_data))).reshape(-1, 1)
                y = product_data['avg_price'].values
                
                # 训练模型
                model = LinearRegression()
                model.fit(X, y)
                
                # 预测未来7天
                future_days = np.array(range(len(product_data), len(product_data) + 7)).reshape(-1, 1)
                future_prices = model.predict(future_days)
                
                # 生成未来日期
                last_date = pd.to_datetime(product_data['pub_date'].iloc[-1])
                future_dates = [(last_date + timedelta(days=i+1)).strftime('%Y-%m-%d') for i in range(7)]
                
                # 添加预测结果
                predictions.append({
                    'product_name': product,
                    'dates': future_dates,
                    'predicted_prices': future_prices.tolist(),
                    'last_price': float(y[-1])
                })
        
        return jsonify({"status": "success", "data": predictions})
    except Exception as e:
        logger.error(f"获取价格预测数据失败: {str(e)}")
        return jsonify({"status": "error", "message": str(e)})

@app.route('/api/health', methods=['GET'])
def health_check():
    """健康检查接口"""
    try:
        if spark is None:
            return jsonify({"status": "error", "message": "Spark会话未初始化"})
        return jsonify({"status": "success", "message": "服务正常运行"})
    except Exception as e:
        return jsonify({"status": "error", "message": str(e)})

if __name__ == '__main__':
    try:
        # 启动Flask应用
        app.run(host='0.0.0.0', port=5000, debug=False)
    except Exception as e:
        logger.error(f"应用启动失败: {str(e)}")
    finally:
        # 关闭Spark会话
        if spark is not None:
            spark.stop()
            logger.info("Spark会话已关闭")