from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, to_date, mean, lag, date_format, lit, array, expr
from pyspark.sql.window import Window
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DateType
from datetime import datetime, timedelta
from pyspark.sql import functions as F

# Initialize Spark Session with Hive support
spark = SparkSession.builder \
    .appName("CarSalesPrediction") \
    .master("local") \
    .enableHiveSupport() \
    .config("hive.metastore.uris","thrift://master:9083") \
    .config("spark.sql.shuffle.partitions",1) \
    .getOrCreate()

# Read data from Hive
df = spark.sql("SELECT * FROM car_sales_clean")

# Define schema for future predictions output
result_schema = StructType([
    StructField("brand_name", StringType()),
    StructField("date", StringType()),
    StructField("predicted_sales", IntegerType())
])

# Create empty DataFrame for results
all_brands_predictions = spark.createDataFrame([], schema=result_schema)

# Get unique brands
unique_brands = [row.brand_name for row in df.select("brand_name").distinct().collect()]

for brand in unique_brands:
    try:
        print(f"Processing brand: {brand}")

        # Filter data for current brand
        brand_df = df.filter(col("brand_name") == brand)

        # Convert date and calculate average sales by date
        brand_df = brand_df.withColumn("date", to_date(col("date")))
        avg_sales = brand_df.groupBy("date").agg(mean("count").alias("avg_count"))

        # Create window for lag features
        window_spec = Window.orderBy("date")

        # Add lag features
        avg_sales = avg_sales.withColumn("lag_1", lag("avg_count", 1).over(window_spec))
        avg_sales = avg_sales.withColumn("lag_2", lag("avg_count", 2).over(window_spec))
        avg_sales = avg_sales.withColumn("lag_3", lag("avg_count", 3).over(window_spec))

        # Drop rows with null values
        clean_df = avg_sales.na.drop()

        if clean_df.count() < 4:  # Need at least 4 points to have 3 lags
            print(f"Not enough data for brand {brand}")
            continue

        # Split into train and test (last 3 months for test)
        split_date = clean_df.select(F.max("date")).first()[0] - timedelta(days=90)

        train_df = clean_df.filter(col("date") <= split_date)
        test_df = clean_df.filter(col("date") > split_date)

        # Prepare feature vector
        assembler = VectorAssembler(
            inputCols=["lag_1", "lag_2", "lag_3"],
            outputCol="features")

        train_data = assembler.transform(train_df)
        test_data = assembler.transform(test_df)

        # Train linear regression model
        lr = LinearRegression(featuresCol="features", labelCol="avg_count")
        model = lr.fit(train_data)

        # Evaluate on test data if we have test data
        if test_data.count() > 0:
            predictions = model.transform(test_data)
            evaluator = RegressionEvaluator(
                labelCol="avg_count",
                predictionCol="prediction",
                metricName="mse")
            mse = evaluator.evaluate(predictions)
            print(f"Brand: {brand}, MSE: {mse}")

        # Prepare data for future predictions
        last_row = clean_df.orderBy(col("date").desc()).first()
        last_known_lags = [last_row.lag_1, last_row.lag_2, last_row.lag_3]
        last_date = last_row.date

        # Generate future dates (next 10 months)
        future_dates = [last_date + timedelta(days=30 * i) for i in range(1, 11)]
        future_dates_str = [d.strftime('%Y-%m') for d in future_dates]

        # Generate predictions for each future month
        predicted_values = []
        current_lags = last_known_lags.copy()

        for _ in range(10):
            # Create feature vector for prediction
            feature_dict = {
                "lag_1": current_lags[0],
                "lag_2": current_lags[1],
                "lag_3": current_lags[2]
            }

            # Create temp DataFrame for prediction
            temp_df = spark.createDataFrame([feature_dict])
            temp_df = assembler.transform(temp_df)

            # Make prediction
            pred = model.transform(temp_df).first().prediction
            predicted_sales = abs(int(pred))
            predicted_values.append(predicted_sales)

            # Update lag features for next prediction
            current_lags = [predicted_sales] + current_lags[:-1]

        # Create DataFrame with predictions for this brand
        brand_predictions = spark.createDataFrame(
            zip([brand] * 10, future_dates_str, predicted_values),
            schema=result_schema
        )

        # Add to overall predictions
        all_brands_predictions = all_brands_predictions.union(brand_predictions)

    except Exception as e:
        print(f"Error processing brand {brand}: {str(e)}")
        continue

# Write results to MySQL
if all_brands_predictions.count() > 0:
    print("Writing predictions to MySQL...")
    all_brands_predictions.write \
        .format("jdbc") \
        .option("url", "jdbc:mysql://localhost:3306/car_sales") \
        .option("dbtable", "car_prediction") \
        .option("user", "root") \
        .option("password", "123456") \
        .mode("overwrite") \
        .save()
    print("Predictions successfully saved to MySQL")
else:
    print("No predictions were generated")

spark.stop()
