from General import MapLoader
from General.DatabaseReader import DatabaseReader
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
import matplotlib.pyplot as plt
import os
import pickle



class TimeEstimaterV1:
    """
    简易的运送时长预测模型，使用全连接网络，不用路段特征进行训练
    """
    def __init__(self):
        self.model_path = 'model/time_estimater_model_v1.h5'
        self.preprocessor_path = 'model/preprocessor.pkl'

    def plot_taketime_distribution(self, y, title):
        plt.figure(figsize=(10, 5))
        plt.hist(y, bins=30, alpha=0.7, color='blue', edgecolor='black')
        plt.title(title)
        plt.xlabel('Taketime')
        plt.ylabel('Frequency')
        plt.grid(axis='y')
        plt.show()


    def train(self):
        db_reader = DatabaseReader()
        df = db_reader.read_robot_order()

        # 将 create_time 转换为 datetime 类型
        df['create_time'] = pd.to_datetime(df['create_time'])

        # 提取时间特征
        df['create_hour'] = df['create_time'].dt.hour
        df['create_weekday'] = df['create_time'].dt.weekday

        # 特征和目标变量
        X = df[['create_hour', 'create_weekday', 'start_node', 'end_node']]
        y = df['taketime']

        # 绘制原始 taketime 分布
        self.plot_taketime_distribution(y, 'Original Taketime Distribution')

        # IQR 异常值过滤
        Q1 = y.quantile(0.25)
        Q3 = y.quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR

        # 过滤异常值
        mask = (y >= lower_bound) & (y <= upper_bound)
        X = X[mask]
        y = y[mask]

        # 绘制过滤后的 taketime 分布
        self.plot_taketime_distribution(y, 'Filtered Taketime Distribution')

        # 处理类别特征
        column_transformer = ColumnTransformer([
            ('ohe', OneHotEncoder(handle_unknown='ignore'), ['start_node', 'end_node'])  # 对 start_node 和 end_node 进行 One-Hot 编码
        ], remainder='passthrough')

        # 构建数据处理和模型的 Pipeline
        pipeline = Pipeline([
            ('transform', column_transformer),
            ('scaler', StandardScaler(with_mean=False))  # 对所有特征进行标准化处理
        ])
        print(X)
        # 处理特征
        X_transformed = pipeline.fit_transform(X).toarray()
        print(X_transformed)

        # 划分训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(X_transformed, y, test_size=0.2, random_state=42)

        # 构建全连接神经网络模型
        model = Sequential([
            Dense(64, input_dim=X_train.shape[1], activation='relu'),
            Dropout(0.2),
            Dense(32, activation='relu'),
            Dropout(0.2),
            Dense(32, activation='relu'),
            Dropout(0.2),
            Dense(1)  # 输出层，预测 taketime
        ])

        # 编译模型
        model.compile(optimizer='adam', loss='mse', metrics=['mae'])

        # 训练模型
        history = model.fit(X_train, y_train, epochs=1, validation_split=0.2, batch_size=8, verbose=1)

        # 模型评估
        loss, mae = model.evaluate(X_test, y_test, verbose=0)
        print(f"Test MAE: {mae:.2f}")

        # 预测示例
        predictions = model.predict(X_test)
        print(f"Predictions: {predictions.flatten()}")

        # 保存训练后的模型
        model.save(self.model_path)
        print(f"Model saved to {self.model_path}")
        # 保存预处理器
        with open(self.preprocessor_path, 'wb') as f:
            pickle.dump(pipeline, f)

    def predict(self, X_new):
        """
                使用保存的模型文件进行预测
                :param X_new: 需要预测的数据
                """

        preprocessor_path = os.path.join(os.path.dirname(__file__), self.preprocessor_path)
        model_path = os.path.join(os.path.dirname(__file__), self.model_path)
        # 检查模型和预处理器文件是否存在
        if not os.path.exists(model_path) or not os.path.exists(preprocessor_path):
            raise FileNotFoundError("Model or preprocessor file not found. Please train the model first.")
        # 加载预处理器和模型
        with open(preprocessor_path, 'rb') as f:
            pipeline = pickle.load(f)
        model = tf.keras.models.load_model(model_path)

        # 使用预处理器转换新数据
        X_new_transformed = pipeline.transform(X_new)

        # 进行预测
        predictions = model.predict(X_new_transformed)
        # print(f"Predictions: {predictions.flatten()}")
        return predictions.flatten()

if __name__ == '__main__':
    ts = TimeEstimaterV1()
    # ts.train()
    # 示例数据用于预测
    X_new_example = pd.DataFrame({
        'create_hour': [14],
        'create_weekday': [2],
        'start_node': ['063$SITE-00966'],
        'end_node': ['049$SITE-00520']
    })
    ts.predict(X_new_example)