# 导入必要的包
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import pickle
import os
import glob
from tensorflow.keras.models import Sequential, save_model, load_model
from tensorflow.keras.layers import LSTM, Dense, Dropout, GRU
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import IsolationForest
from sklearn.metrics import mean_squared_error, mean_absolute_error
from datetime import datetime

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


class WeatherForecaster:
    """常州气象预测系统"""

    def __init__(self, data_file='常州.csv', target_col='Maximum Temperature',
                 time_step=10, model_type='gru', epochs=100, batch_size=32):
        """初始化预测系统"""
        self.data_file = data_file
        self.target_col = target_col
        self.time_step = time_step
        self.model_type = model_type
        self.epochs = epochs
        self.batch_size = batch_size

        # 提取文件名（不含扩展名）作为模型名称前缀
        self.model_prefix = os.path.splitext(os.path.basename(data_file))[0]
        self.model_path = f'{self.model_prefix}_{target_col.replace(" ", "_").lower()}_model.h5'
        self.info_path = f'{self.model_prefix}_{target_col.replace(" ", "_").lower()}_info.pkl'

        # 初始化变量
        self.df = None
        self.model = None
        self.scaler = None
        self.feature_cols = None
        self.all_cols = None
        self.target_idx = None
        self.X = None
        self.y = None

        print(f"初始化常州气象预测系统，目标变量: {target_col}")
        print(f"模型将保存为: {self.model_path}")

    def generate_sample_data(self):
        """生成示例气象数据，当无法加载实际数据时使用"""
        date_rng = pd.date_range(start='2010-01-01', end='2020-12-31', freq='D')
        n = len(date_rng)
        temps = 20 + 10 * np.sin(np.arange(n) * (2 * np.pi / 365)) + np.random.normal(0, 2, n)
        rainfall = np.abs(np.random.gamma(1, 5, n))
        pressure = 1010 + np.random.normal(0, 3, n)
        wind_speed = np.abs(np.random.normal(10, 5, n))
        dew_point = temps - 5 + np.random.normal(0, 1, n)

        df = pd.DataFrame({
            'Maximum Temperature': temps,
            'Total Rainfall': rainfall,
            'Mean Pressure': pressure,
            'Mean Wind Speed': wind_speed,
            'Mean Dew Point Temperature': dew_point
        }, index=date_rng)
        return df

    def load_data(self):
        """加载常州气象数据"""
        try:
            # 读取CSV文件
            df = pd.read_csv(self.data_file)
            # 修正列名
            df.columns = [
                'Maximum Temperature',
                'Total Rainfall',
                'Mean Pressure',
                'Mean Wind Speed',
                'Mean Dew Point Temperature',
                'Date'
            ]
            # 将日期列转换为datetime类型并设为索引
            df['Date'] = pd.to_datetime(df['Date'])
            df.set_index('Date', inplace=True)
            # 按时间排序
            df.sort_index(inplace=True)
            self.df = df
            print(f"成功加载数据，共 {len(df)} 条记录")
        except Exception as e:
            print(f"加载数据时出错: {e}")
            print("使用生成的示例数据代替")
            self.df = self.generate_sample_data()

        return self.df

    def detect_anomalies(self, contamination=0.01):
        """检测并标记异常值"""
        model_iso = IsolationForest(contamination=contamination, random_state=42)
        self.df['anomaly'] = model_iso.fit_predict(self.df[[self.target_col]])
        # 可选：移除异常值
        # self.df = self.df[self.df['anomaly'] == 1]
        print(f"异常检测完成，标记了 {sum(self.df['anomaly'] == -1)} 条异常记录")
        return self.df

    def add_features(self):
        """添加额外特征，如滞后特征、季节特征等"""
        # 添加滞后特征 (过去3天的目标变量)
        for i in range(1, 4):
            self.df[f'{self.target_col}_Lag_{i}'] = self.df[self.target_col].shift(i)

        # 添加季节性特征
        self.df['Month'] = self.df.index.month
        self.df['Day'] = self.df.index.day

        # 去掉前几行（因为滞后特征创建了NaN值）
        self.df = self.df.dropna()
        print("特征工程完成，添加了滞后特征和季节特征")

    def preprocess_data(self):
        """数据预处理，创建时间序列数据集"""
        # 定义特征列
        self.feature_cols = ['Maximum Temperature', 'Mean Pressure', 'Mean Wind Speed',
                             'Mean Dew Point Temperature',
                             f'{self.target_col}_Lag_1', f'{self.target_col}_Lag_2', f'{self.target_col}_Lag_3',
                             'Month', 'Day']

        # 确保没有缺失值
        self.df = self.df.dropna(subset=self.feature_cols + ([self.target_col]
                                if self.target_col not in self.feature_cols else []))

        # 创建所有需要标准化的特征列表
        self.all_cols = self.feature_cols.copy()
        if self.target_col not in self.all_cols:
            self.all_cols.append(self.target_col)

        # 保存列索引，以便稍后正确还原
        self.target_idx = self.all_cols.index(self.target_col)

        # 标准化数据
        self.scaler = MinMaxScaler(feature_range=(0, 1))
        scaled_data = self.scaler.fit_transform(self.df[self.all_cols])

        # 创建时序数据集
        X, y = [], []
        for i in range(len(scaled_data) - self.time_step):
            X.append(scaled_data[i:(i + self.time_step), :])
            y.append(scaled_data[i + self.time_step, self.target_idx])

        self.X = np.array(X)
        self.y = np.array(y)

        print(f"数据预处理完成，创建了 {len(X)} 个时间序列样本")
        return self.X, self.y

    def build_model(self):
        """构建深度学习模型"""
        model = Sequential()

        if self.model_type == 'lstm':
            model.add(LSTM(64, return_sequences=True, input_shape=(self.time_step, len(self.all_cols))))
            model.add(Dropout(0.2))
            model.add(LSTM(32))
        elif self.model_type == 'gru':
            model.add(GRU(64, return_sequences=True, input_shape=(self.time_step, len(self.all_cols))))
            model.add(Dropout(0.2))
            model.add(GRU(32))

        model.add(Dropout(0.2))
        model.add(Dense(16, activation='relu'))
        model.add(Dense(1))
        model.compile(loss='mean_squared_error', optimizer='adam')

        self.model = model
        print(f"构建了 {self.model_type.upper()} 模型")
        return model

    def train_model(self, train_size=0.8):
        """训练模型"""
        # 划分训练集和测试集
        split_idx = int(len(self.X) * train_size)
        X_train, X_test = self.X[:split_idx], self.X[split_idx:]
        y_train, y_test = self.y[:split_idx], self.y[split_idx:]

        # 早停策略和模型检查点
        checkpoint_path = f'best_{self.model_prefix}_{self.target_col.replace(" ", "_").lower()}_model.h5'
        model_checkpoint = ModelCheckpoint(
            checkpoint_path,
            save_best_only=True,
            monitor='val_loss'
        )
        early_stop = EarlyStopping(
            monitor='val_loss',
            patience=10,
            restore_best_weights=True
        )

        # 训练模型
        history = self.model.fit(
            X_train, y_train,
            epochs=self.epochs,
            batch_size=self.batch_size,
            validation_data=(X_test, y_test),
            callbacks=[early_stop, model_checkpoint],
            verbose=1
        )

        print(f"模型训练完成，最佳模型保存到 {checkpoint_path}")

        # 加载最佳模型
        self.model = load_model(checkpoint_path)

        return history, X_test, y_test

    def save_model(self):
        """保存模型和相关信息"""
        # 保存模型
        save_model(self.model, self.model_path)

        # 保存数据预处理信息
        model_info = {
            'scaler': self.scaler,
            'feature_cols': self.feature_cols,
            'target_col': self.target_col,
            'target_idx': self.target_idx,
            'time_step': self.time_step,
            'all_cols': self.all_cols,
            'last_date': self.df.index[-1],
            'model_type': self.model_type,
            'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        }

        with open(self.info_path, 'wb') as f:
            pickle.dump(model_info, f)

        print(f"模型已保存到 {self.model_path}")
        print(f"模型信息已保存到 {self.info_path}")

    def load_model(self):
        """加载已保存的模型和相关信息"""
        if not os.path.exists(self.model_path) or not os.path.exists(self.info_path):
            print(f"找不到模型文件 {self.model_path} 或信息文件 {self.info_path}")
            return False

        try:
            # 加载模型
            self.model = load_model(self.model_path)

            # 加载模型信息
            with open(self.info_path, 'rb') as f:
                model_info = pickle.load(f)

            # 恢复模型信息
            self.scaler = model_info['scaler']
            self.feature_cols = model_info['feature_cols']
            self.target_col = model_info['target_col']
            self.target_idx = model_info['target_idx']
            self.time_step = model_info['time_step']
            self.all_cols = model_info['all_cols']
            self.model_type = model_info['model_type']

            print(f"成功加载模型 {self.model_path}")
            print(f"模型创建于: {model_info.get('timestamp', '未知')}")
            print(f"目标变量: {self.target_col}")

            return True
        except Exception as e:
            print(f"加载模型时出错: {e}")
            return False

    def evaluate_model(self, X_test, y_test):
        """评估模型性能"""
        # 预测
        predictions = self.model.predict(X_test)

        # 反向转换预测值
        y_placeholder = np.zeros((len(y_test), len(self.all_cols)))
        y_placeholder[:, self.target_idx] = y_test
        pred_placeholder = np.zeros((len(predictions), len(self.all_cols)))
        pred_placeholder[:, self.target_idx] = predictions.reshape(-1)

        # 反向转换
        y_inv = self.scaler.inverse_transform(y_placeholder)[:, self.target_idx]
        pred_inv = self.scaler.inverse_transform(pred_placeholder)[:, self.target_idx]

        # 计算误差
        rmse = np.sqrt(mean_squared_error(y_inv, pred_inv))
        mae = mean_absolute_error(y_inv, pred_inv)

        print(f"测试集RMSE: {rmse:.2f}")
        print(f"测试集MAE: {mae:.2f}")

        return y_inv, pred_inv, rmse, mae

    def predict_future(self, days=7):
        """预测未来几天的数据"""
        if self.model is None:
            print("模型未加载，无法进行预测")
            return None

        # 获取最后一个序列
        last_sequence = self.X[-1]
        future_predictions = []
        future_dates = []

        for i in range(days):
            # 预测下一天
            next_pred = self.model.predict(last_sequence.reshape(1, self.time_step, len(self.all_cols)))[0]
            future_predictions.append(next_pred)

            # 更新序列
            temp_seq = last_sequence.copy()
            temp_seq = np.roll(temp_seq, -1, axis=0)
            temp_seq[-1, self.target_idx] = next_pred
            last_sequence = temp_seq

            # 生成日期
            next_date = self.df.index[-1] + pd.Timedelta(days=i + 1)
            future_dates.append(next_date)

        # 反向转换未来预测
        future_pred_placeholder = np.zeros((len(future_predictions), len(self.all_cols)))
        future_pred_placeholder[:, self.target_idx] = np.array(future_predictions).reshape(-1)
        future_pred_inv = self.scaler.inverse_transform(future_pred_placeholder)[:, self.target_idx]

        # 创建预测数据框
        future_df = pd.DataFrame({
            'Date': future_dates,
            f'Predicted_{self.target_col}': future_pred_inv
        })
        future_df.set_index('Date', inplace=True)

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        csv_path = f'{self.model_prefix}_{self.target_col.replace(" ", "_").lower()}_forecast_{timestamp}.csv'
        future_df.to_csv(csv_path)
        print(f"未来{days}天预测结果已保存到 {csv_path}")

        return future_df

  # 导入必要的包
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import pickle
import os
import glob
from tensorflow.keras.models import Sequential, save_model, load_model
from tensorflow.keras.layers import LSTM, Dense, Dropout, GRU
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import IsolationForest
from sklearn.metrics import mean_squared_error, mean_absolute_error
from datetime import datetime

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


class WeatherForecaster:
    """常州气象预测系统"""

    def __init__(self, data_file='常州.csv', target_col='Maximum Temperature',
                 time_step=10, model_type='gru', epochs=100, batch_size=32):
        """初始化预测系统"""
        self.data_file = data_file
        self.target_col = target_col
        self.time_step = time_step
        self.model_type = model_type
        self.epochs = epochs
        self.batch_size = batch_size

        # 提取文件名（不含扩展名）作为模型名称前缀
        self.model_prefix = os.path.splitext(os.path.basename(data_file))[0]
        self.model_path = f'{self.model_prefix}_{target_col.replace(" ", "_").lower()}_model.h5'
        self.info_path = f'{self.model_prefix}_{target_col.replace(" ", "_").lower()}_info.pkl'

        # 初始化变量
        self.df = None
        self.model = None
        self.scaler = None
        self.feature_cols = None
        self.all_cols = None
        self.target_idx = None
        self.X = None
        self.y = None

        print(f"初始化常州气象预测系统，目标变量: {target_col}")
        print(f"模型将保存为: {self.model_path}")

    def generate_sample_data(self):
        """生成示例气象数据，当无法加载实际数据时使用"""
        date_rng = pd.date_range(start='2010-01-01', end='2020-12-31', freq='D')
        n = len(date_rng)
        temps = 20 + 10 * np.sin(np.arange(n) * (2 * np.pi / 365)) + np.random.normal(0, 2, n)
        rainfall = np.abs(np.random.gamma(1, 5, n))
        pressure = 1010 + np.random.normal(0, 3, n)
        wind_speed = np.abs(np.random.normal(10, 5, n))
        dew_point = temps - 5 + np.random.normal(0, 1, n)

        df = pd.DataFrame({
            'Maximum Temperature': temps,
            'Total Rainfall': rainfall,
            'Mean Pressure': pressure,
            'Mean Wind Speed': wind_speed,
            'Mean Dew Point Temperature': dew_point
        }, index=date_rng)
        return df

    def load_data(self):
        """加载常州气象数据"""
        try:
            # 读取CSV文件
            df = pd.read_csv(self.data_file)
            # 修正列名
            df.columns = [
                'Maximum Temperature',
                'Total Rainfall',
                'Mean Pressure',
                'Mean Wind Speed',
                'Mean Dew Point Temperature',
                'Date'
            ]
            # 将日期列转换为datetime类型并设为索引
            df['Date'] = pd.to_datetime(df['Date'])
            df.set_index('Date', inplace=True)
            # 按时间排序
            df.sort_index(inplace=True)
            self.df = df
            print(f"成功加载数据，共 {len(df)} 条记录")
        except Exception as e:
            print(f"加载数据时出错: {e}")
            print("使用生成的示例数据代替")
            self.df = self.generate_sample_data()

        return self.df

    def detect_anomalies(self, contamination=0.01):
        """检测并标记异常值"""
        model_iso = IsolationForest(contamination=contamination, random_state=42)
        self.df['anomaly'] = model_iso.fit_predict(self.df[[self.target_col]])
        # 可选：移除异常值
        # self.df = self.df[self.df['anomaly'] == 1]
        print(f"异常检测完成，标记了 {sum(self.df['anomaly'] == -1)} 条异常记录")
        return self.df

    def add_features(self):
        """添加额外特征，如滞后特征、季节特征等"""
        # 添加滞后特征 (过去3天的目标变量)
        for i in range(1, 4):
            self.df[f'{self.target_col}_Lag_{i}'] = self.df[self.target_col].shift(i)

        # 添加季节性特征
        self.df['Month'] = self.df.index.month
        self.df['Day'] = self.df.index.day

        # 去掉前几行（因为滞后特征创建了NaN值）
        self.df = self.df.dropna()
        print("特征工程完成，添加了滞后特征和季节特征")

    def preprocess_data(self):
        """数据预处理，创建时间序列数据集"""
        # 定义特征列
        self.feature_cols = ['Maximum Temperature', 'Mean Pressure', 'Mean Wind Speed',
                             'Mean Dew Point Temperature',
                             f'{self.target_col}_Lag_1', f'{self.target_col}_Lag_2', f'{self.target_col}_Lag_3',
                             'Month', 'Day']

        # 确保没有缺失值
        self.df = self.df.dropna(subset=self.feature_cols + ([self.target_col]
                                if self.target_col not in self.feature_cols else []))

        # 创建所有需要标准化的特征列表
        self.all_cols = self.feature_cols.copy()
        if self.target_col not in self.all_cols:
            self.all_cols.append(self.target_col)

        # 保存列索引，以便稍后正确还原
        self.target_idx = self.all_cols.index(self.target_col)

        # 标准化数据
        self.scaler = MinMaxScaler(feature_range=(0, 1))
        scaled_data = self.scaler.fit_transform(self.df[self.all_cols])

        # 创建时序数据集
        X, y = [], []
        for i in range(len(scaled_data) - self.time_step):
            X.append(scaled_data[i:(i + self.time_step), :])
            y.append(scaled_data[i + self.time_step, self.target_idx])

        self.X = np.array(X)
        self.y = np.array(y)

        print(f"数据预处理完成，创建了 {len(X)} 个时间序列样本")
        return self.X, self.y

    def build_model(self):
        """构建深度学习模型"""
        model = Sequential()

        if self.model_type == 'lstm':
            model.add(LSTM(64, return_sequences=True, input_shape=(self.time_step, len(self.all_cols))))
            model.add(Dropout(0.2))
            model.add(LSTM(32))
        elif self.model_type == 'gru':
            model.add(GRU(64, return_sequences=True, input_shape=(self.time_step, len(self.all_cols))))
            model.add(Dropout(0.2))
            model.add(GRU(32))

        model.add(Dropout(0.2))
        model.add(Dense(16, activation='relu'))
        model.add(Dense(1))
        model.compile(loss='mean_squared_error', optimizer='adam')

        self.model = model
        print(f"构建了 {self.model_type.upper()} 模型")
        return model

    def train_model(self, train_size=0.8):
        """训练模型"""
        # 划分训练集和测试集
        split_idx = int(len(self.X) * train_size)
        X_train, X_test = self.X[:split_idx], self.X[split_idx:]
        y_train, y_test = self.y[:split_idx], self.y[split_idx:]

        # 早停策略和模型检查点
        checkpoint_path = f'best_{self.model_prefix}_{self.target_col.replace(" ", "_").lower()}_model.h5'
        model_checkpoint = ModelCheckpoint(
            checkpoint_path,
            save_best_only=True,
            monitor='val_loss'
        )
        early_stop = EarlyStopping(
            monitor='val_loss',
            patience=10,
            restore_best_weights=True
        )

        # 训练模型
        history = self.model.fit(
            X_train, y_train,
            epochs=self.epochs,
            batch_size=self.batch_size,
            validation_data=(X_test, y_test),
            callbacks=[early_stop, model_checkpoint],
            verbose=1
        )

        print(f"模型训练完成，最佳模型保存到 {checkpoint_path}")

        # 加载最佳模型
        self.model = load_model(checkpoint_path)

        return history, X_test, y_test

    def save_model(self):
        """保存模型和相关信息"""
        # 保存模型
        save_model(self.model, self.model_path)

        # 保存数据预处理信息
        model_info = {
            'scaler': self.scaler,
            'feature_cols': self.feature_cols,
            'target_col': self.target_col,
            'target_idx': self.target_idx,
            'time_step': self.time_step,
            'all_cols': self.all_cols,
            'last_date': self.df.index[-1],
            'model_type': self.model_type,
            'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        }

        with open(self.info_path, 'wb') as f:
            pickle.dump(model_info, f)

        print(f"模型已保存到 {self.model_path}")
        print(f"模型信息已保存到 {self.info_path}")

    def load_model(self):
        """加载已保存的模型和相关信息"""
        if not os.path.exists(self.model_path) or not os.path.exists(self.info_path):
            print(f"找不到模型文件 {self.model_path} 或信息文件 {self.info_path}")
            return False

        try:
            # 加载模型
            self.model = load_model(self.model_path)

            # 加载模型信息
            with open(self.info_path, 'rb') as f:
                model_info = pickle.load(f)

            # 恢复模型信息
            self.scaler = model_info['scaler']
            self.feature_cols = model_info['feature_cols']
            self.target_col = model_info['target_col']
            self.target_idx = model_info['target_idx']
            self.time_step = model_info['time_step']
            self.all_cols = model_info['all_cols']
            self.model_type = model_info['model_type']

            print(f"成功加载模型 {self.model_path}")
            print(f"模型创建于: {model_info.get('timestamp', '未知')}")
            print(f"目标变量: {self.target_col}")

            return True
        except Exception as e:
            print(f"加载模型时出错: {e}")
            return False

    def evaluate_model(self, X_test, y_test):
        """评估模型性能"""
        # 预测
        predictions = self.model.predict(X_test)

        # 反向转换预测值
        y_placeholder = np.zeros((len(y_test), len(self.all_cols)))
        y_placeholder[:, self.target_idx] = y_test
        pred_placeholder = np.zeros((len(predictions), len(self.all_cols)))
        pred_placeholder[:, self.target_idx] = predictions.reshape(-1)

        # 反向转换
        y_inv = self.scaler.inverse_transform(y_placeholder)[:, self.target_idx]
        pred_inv = self.scaler.inverse_transform(pred_placeholder)[:, self.target_idx]

        # 计算误差
        rmse = np.sqrt(mean_squared_error(y_inv, pred_inv))
        mae = mean_absolute_error(y_inv, pred_inv)

        print(f"测试集RMSE: {rmse:.2f}")
        print(f"测试集MAE: {mae:.2f}")

        return y_inv, pred_inv, rmse, mae

    def predict_future(self, days=7):
        """预测未来几天的数据"""
        if self.model is None:
            print("模型未加载，无法进行预测")
            return None

        # 获取最后一个序列
        last_sequence = self.X[-1]
        future_predictions = []
        future_dates = []

        for i in range(days):
            # 预测下一天
            next_pred = self.model.predict(last_sequence.reshape(1, self.time_step, len(self.all_cols)))[0]
            future_predictions.append(next_pred)

            # 更新序列
            temp_seq = last_sequence.copy()
            temp_seq = np.roll(temp_seq, -1, axis=0)
            temp_seq[-1, self.target_idx] = next_pred
            last_sequence = temp_seq

            # 生成日期
            next_date = self.df.index[-1] + pd.Timedelta(days=i + 1)
            future_dates.append(next_date)

        # 反向转换未来预测
        future_pred_placeholder = np.zeros((len(future_predictions), len(self.all_cols)))
        future_pred_placeholder[:, self.target_idx] = np.array(future_predictions).reshape(-1)
        future_pred_inv = self.scaler.inverse_transform(future_pred_placeholder)[:, self.target_idx]

        # 创建预测数据框
        future_df = pd.DataFrame({
            'Date': future_dates,
            f'Predicted_{self.target_col}': future_pred_inv
        })
        future_df.set_index('Date', inplace=True)

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        csv_path = f'{self.model_prefix}_{self.target_col.replace(" ", "_").lower()}_forecast_{timestamp}.csv'
        future_df.to_csv(csv_path)
        print(f"未来{days}天预测结果已保存到 {csv_path}")

        return future_df

    def get_predictions(self, days_to_predict=7, visualize=True, save_results=True):
        """
        获取预测结果的便捷方法，自动检查模型是否存在

        参数:
            days_to_predict: 预测天数
            visualize: 是否可视化结果
            save_results: 是否保存结果到CSV

        返回:
            DataFrame: 包含预测结果的数据框
        """
        # 加载数据
        self.load_data()

        # 检查模型是否存在
        if not self.load_model():
            print(f"警告: 找不到模型 {self.model_path}，无法进行预测")
            print("使用 train_and_save_model() 方法训练新模型")
            return None

        # 添加必要的特征，确保与训练时使用的特征一致
        self.add_features()

        # 预处理数据以创建时间序列样本
        self.preprocess_data()

        # 预测未来
        future_df = self.predict_future(days=days_to_predict)

        # 可视化预测结果（如果需要）
        if visualize:
            self.visualize_forecast(future_df)

        # 如果不需要保存结果，则删除CSV文件
        if not save_results:
            # 获取最新创建的CSV文件并删除
            csv_files = glob.glob(f'{self.model_prefix}_{self.target_col.replace(" ", "_").lower()}_forecast_*.csv')
            if csv_files:
                latest_file = max(csv_files, key=os.path.getctime)
                os.remove(latest_file)
                print(f"预测结果文件已删除: {latest_file}")

        return future_df

    def train_and_save_model(self):
        """训练新模型并保存"""
        print("开始训练新模型...")
        # 加载数据
        self.load_data()

        # 异常检测
        self.detect_anomalies()

        # 特征工程
        self.add_features()

        # 数据预处理
        self.preprocess_data()

        # 构建模型
        self.build_model()

        # 训练模型
        history, X_test, y_test = self.train_model()

        # 可视化训练历史
        self.visualize_history(history)

        # 评估模型
        split_idx = int(len(self.X) * 0.8)
        test_dates = self.df.index[split_idx + self.time_step:]
        y_true, y_pred, rmse, mae = self.evaluate_model(X_test, y_test)

        # 可视化测试结果
        self.visualize_test_results(test_dates, y_true, y_pred, rmse)

        # 保存模型
        self.save_model()
        print(f"模型训练并保存完成: {self.model_path}")

        return True

    def visualize_history(self, history):
        """可视化训练历史"""
        plt.figure(figsize=(10, 6))
        plt.plot(history.history['loss'], label='训练损失')
        plt.plot(history.history['val_loss'], label='验证损失')
        plt.title(f'常州{self.target_col}预测模型训练历史')
        plt.xlabel('轮次')
        plt.ylabel('均方误差')
        plt.legend()
        plt.grid(True)

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        plt.savefig(f'{self.model_prefix}_{self.target_col.replace(" ", "_").lower()}_training_history_{timestamp}.png')
        plt.close()

    def visualize_test_results(self, test_dates, y_true, y_pred, rmse):
        """可视化测试结果"""
        plt.figure(figsize=(14, 8))
        plt.plot(test_dates, y_true, label=f'实际{self.target_col}', color='blue')
        plt.plot(test_dates, y_pred, label=f'预测{self.target_col}', color='red')
        plt.title(f'常州市{self.target_col}预测 (RMSE: {rmse:.2f})')
        plt.xlabel('日期')
        plt.ylabel(self.target_col)
        plt.legend()
        plt.grid(True)

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        plt.savefig(f'{self.model_prefix}_{self.target_col.replace(" ", "_").lower()}_test_results_{timestamp}.png')
        plt.close()

    def visualize_forecast(self, future_df):
        """可视化未来预测结果"""
        plt.figure(figsize=(14, 8))

        # 历史数据
        history_days = 30  # 只显示最近30天历史数据
        plt.plot(self.df.index[-history_days:],
                 self.df[self.target_col].values[-history_days:],
                 'o-', color='royalblue', alpha=0.7,
                 label=f'历史{self.target_col}')

        # 预测数据
        plt.plot(future_df.index,
                 future_df[f'Predicted_{self.target_col}'],
                 'o-', color='crimson', alpha=0.8,
                 linewidth=2.5, markersize=8,
                 label=f'预测{self.target_col}')

        # 添加数据标签
        for date, value in zip(future_df.index, future_df[f'Predicted_{self.target_col}']):
            plt.text(date, value + 0.5, f'{value:.1f}',
                     ha='center', va='bottom', color='darkred',
                     fontsize=10, fontweight='bold')

        # 添加预测区域背景
        plt.axvspan(self.df.index[-1], future_df.index[-1],
                    alpha=0.1, color='pink', label='预测区域')

        # 设置图表格式
        plt.title(f'常州市未来{len(future_df)}天{self.target_col}预测',
                  fontsize=18, fontweight='bold', pad=15)
        plt.xlabel('日期', fontsize=14)
        plt.ylabel(f'{self.target_col}', fontsize=14)
        plt.grid(True, linestyle='--', alpha=0.7)
        plt.legend(loc='best', fontsize=12, framealpha=0.8)

        # 格式化日期
        plt.gca().xaxis.set_major_formatter(plt.matplotlib.dates.DateFormatter('%m-%d'))
        plt.gca().xaxis.set_major_locator(plt.matplotlib.dates.DayLocator(interval=5))
        plt.gcf().autofmt_xdate()

        # 添加统计信息
        avg_history = self.df[self.target_col][-history_days:].mean()
        avg_prediction = future_df[f'Predicted_{self.target_col}'].mean()
        max_prediction = future_df[f'Predicted_{self.target_col}'].max()
        min_prediction = future_df[f'Predicted_{self.target_col}'].min()

        stat_text = f"历史平均: {avg_history:.1f}\n"
        stat_text += f"预测平均: {avg_prediction:.1f}\n"
        stat_text += f"预测最高: {max_prediction:.1f}\n"
        stat_text += f"预测最低: {min_prediction:.1f}"

        plt.annotate(stat_text, xy=(0.97, 0.97), xycoords='axes fraction',
                     fontsize=11, ha='right', va='top',
                     bbox=dict(boxstyle="round,pad=0.5", fc="lightyellow", alpha=0.8))

        plt.tight_layout()

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        plt.savefig(f'{self.model_prefix}_{self.target_col.replace(" ", "_").lower()}_forecast_{timestamp}.png',
                    dpi=300, bbox_inches='tight')
        plt.show()

    def run(self, train_new_model=False, days_to_predict=7):
        """运行完整的预测流程"""
        # 加载数据
        self.load_data()

        # 检查是否有保存的模型
        if not train_new_model and self.load_model():
            print("使用已保存的模型进行预测")

            # 添加必要的特征，确保与训练时使用的特征一致
            self.add_features()

            # 预处理数据以创建时间序列样本
            self.preprocess_data()
        else:
            print("训练新模型")
            # 异常检测
            self.detect_anomalies()

            # 特征工程
            self.add_features()

            # 数据预处理
            self.preprocess_data()

            # 构建模型
            self.build_model()

            # 训练模型
            history, X_test, y_test = self.train_model()

            # 可视化训练历史
            self.visualize_history(history)

            # 评估模型
            split_idx = int(len(self.X) * 0.8)
            test_dates = self.df.index[split_idx + self.time_step:]
            y_true, y_pred, rmse, mae = self.evaluate_model(X_test, y_test)

            # 可视化测试结果
            self.visualize_test_results(test_dates, y_true, y_pred, rmse)

            # 保存模型
            self.save_model()

        # 预测未来
        future_df = self.predict_future(days=days_to_predict)
        # 可视化预测结果
        self.visualize_forecast(future_df)
        return future_df

class CustomWeatherForecaster(WeatherForecaster):
    """适配自定义数据格式的气象预测系统"""

    def load_data(self):
        """加载自定义格式的气象数据"""
        try:
            # 读取CSV文件
            df = pd.read_csv(self.data_file)

            # 重命名列以匹配父类预期的格式
            column_mapping = {
                'temperture': 'Maximum Temperature',
                '气压': 'Mean Pressure',
                '降雨': 'Total Rainfall',
                '风速': 'Mean Wind Speed',
                'date': 'Date'
            }

            # 重命名列
            df = df.rename(columns=column_mapping)

            # 添加缺失的列（如果需要）
            if 'Mean Dew Point Temperature' not in df.columns:
                # 可以用温度减去一个常数作为露点温度的估计值
                df['Mean Dew Point Temperature'] = df['Maximum Temperature'] - 5

            # 将日期列转换为datetime类型并设为索引
            df['Date'] = pd.to_datetime(df['Date'])
            df.set_index('Date', inplace=True)

            # 按时间排序
            df.sort_index(inplace=True)
            self.df = df
            print(f"成功加载数据，共 {len(df)} 条记录")
        except Exception as e:
            print(f"加载数据时出错: {e}")
            print("使用生成的示例数据代替")
            self.df = self.generate_sample_data()

        return self.df
# 主函数
def main():
    # 创建预测器实例
    forecaster = CustomWeatherForecaster(data_file='./merged_output/7.csv', target_col='Maximum Temperature')
    # 训练模型并进行预测
    forecaster.train_and_save_model()
    predictions = forecaster.get_predictions(days_to_predict=7)
    print("\n未来7天气温预测结果:")
    print(predictions)

if __name__ == "__main__":
    main()