# storage/data_storage.py
import os
from confg import Config
from pyspark.sql import SparkSession, DataFrame


class DataStorage:
    def __init__(self, spark: SparkSession):
        self.spark = spark
        self.config = Config()
        self.raw_data_path = os.path.join(self.config.DATA_DIR, "raw", "traffic_data.parquet")
        self.processed_data_path = os.path.join(self.config.DATA_DIR, "processed", "traffic_data.parquet")

    def save_raw_data(self, df: DataFrame):
        """保存原始数据到Parquet文件"""
        df.write.mode("overwrite").parquet(self.raw_data_path)
        print(f"原始数据已保存至: {self.raw_data_path}")

    def load_raw_data(self) -> DataFrame:
        """从Parquet文件加载原始数据"""
        return self.spark.read.parquet(self.raw_data_path)

    def save_processed_data(self, df: DataFrame):
        """保存处理后的数据到Parquet文件"""
        df.write.mode("overwrite").parquet(self.processed_data_path)
        print(f"处理后的数据已保存至: {self.processed_data_path}")

    def load_processed_data(self) -> DataFrame:
        """从Parquet文件加载处理后的数据"""
        return self.spark.read.parquet(self.processed_data_path)

    def export_to_csv(self, df: DataFrame, file_name: str):
        """将DataFrame导出为CSV文件（用于查看）"""
        csv_path = os.path.join(self.config.DATA_DIR, "exports", file_name)
        (df.coalesce(1)  # 减少分区数，避免生成太多小文件
         .write
         .mode("overwrite")
         .option("header", "true")
         .csv(csv_path))

        # 重命名文件（Spark会生成带目录的CSV）
        actual_csv = [f for f in os.listdir(csv_path) if f.endswith('.csv')][0]
        os.rename(os.path.join(csv_path, actual_csv), f"{csv_path}.csv")
        print(f"CSV文件已导出至: {csv_path}.csv")

    def data_exists(self, path: str) -> bool:
        """检查数据文件是否存在"""
        try:
            # 检查Parquet目录是否存在
            return len(os.listdir(path)) > 0
        except:
            return False