import numpy as np
import pandas as pd
import logging
import warnings
import matplotlib.pyplot as plt
import pickle
import os
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score

warnings.filterwarnings('ignore')

# 日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 配置项
FILE_PATH = r"C:\Users\hp\Desktop\mao\final_model\guangdong.csv"
TEXT_FEATURE_COLS = ['风向']
NUMERIC_FEATURE_COLS = ['温度℃', '降水量(mm)', '风力(级)', '风速(km/h)', '风向角度(度)', '气压(hPa)',
                        '湿度(%)', '空气质量', '能见度(km)', '云量%', '露点℃', '短波辐射W/m²',
                        '直接辐射W/m²', '散射辐射W/m²', '直接正常辐照度W/m²']
LABEL_COL = '天气'
TIME_STEP = 8
TEST_SIZE = 0.2

# 确保模型保存目录存在
os.makedirs('model', exist_ok=True)


def save_preprocessing_objects(scalers, encoders, class_mapping, label_encoder):
    """保存预处理所需的标量、编码器和类别映射"""
    try:
        # 保存数值特征标量
        with open('model/scalers.pkl', 'wb') as f:
            pickle.dump(scalers, f)
        logger.info("数值特征标量已保存")

        # 保存文本特征编码器
        with open('model/encoders.pkl', 'wb') as f:
            pickle.dump(encoders, f)
        logger.info("文本特征编码器已保存")

        # 保存类别映射
        with open('model/class_mapping.pkl', 'wb') as f:
            pickle.dump(class_mapping, f)
        logger.info("类别映射已保存")

        # 保存标签编码器
        with open('model/label_encoder.pkl', 'wb') as f:
            pickle.dump(label_encoder, f)
        logger.info("标签编码器已保存")

    except Exception as e:
        logger.error(f"保存预处理对象失败: {str(e)}")
        raise


def read_csv(file_path):
    try:
        df = pd.read_csv(
            file_path,
            encoding='utf-8',
            sep=',',
            on_bad_lines='skip',
            low_memory=False
        )

        df.columns = [col.strip() for col in df.columns]
        logger.info(f"CSV读取完成：形状{df.shape}")
        return df
    except Exception as e:

        logger.error(f"CSV读取失败：{str(e)}")
        raise


def drop_unused_cols(df):
    drop_cols = ['地区', '时间']
    df = df.drop(drop_cols, axis=1, errors='ignore')
    logger.info(f"排除后形状：{df.shape}")
    return df


def process_text_features(df, text_cols):
    encoders = {}
    for col in text_cols:
        if col not in df.columns:
            logger.warning(f"列{col}不存在，跳过")
            continue
        df[col] = df[col].fillna('未知').astype(str)
        le = LabelEncoder()
        df[col] = le.fit_transform(df[col])
        encoders[col] = le
    return df, encoders


def clean_numeric_features(df, numeric_cols):
    for col in numeric_cols:
        if col not in df.columns:
            logger.warning(f"列{col}不存在，跳过")
            continue

        df[col] = df[col].astype(str).str.findall(r'(-?\d+\.?\d*)')
        df[col] = df[col].apply(lambda x: x[0] if len(x) > 0 else None)
        df[col] = pd.to_numeric(df[col], errors='coerce')

        try:
            mean_val = df[col].mean()
            df[col] = df[col].fillna(mean_val)
        except TypeError:
            df[col] = df[col].fillna(0)
    return df


def normalize_numeric_features(df, numeric_cols):
    scalers = {}
    for col in numeric_cols:
        if col not in df.columns:
            continue
        scaler = StandardScaler()
        df[col] = scaler.fit_transform(df[col].values.reshape(-1, 1))
        scalers[col] = scaler
    return df, scalers


def create_sequences(data, labels, time_step):
    dataX, datay = [], []
    for i in range(len(data) - time_step):
        dataX.append(data[i:i + time_step])
        datay.append(labels[i + time_step])
    return np.array(dataX), np.array(datay)


def process_data():
    df = read_csv(FILE_PATH)
    df = drop_unused_cols(df)
    df = clean_numeric_features(df, NUMERIC_FEATURE_COLS)
    df, text_encoders = process_text_features(df, TEXT_FEATURE_COLS)
    df, numeric_scalers = normalize_numeric_features(df, NUMERIC_FEATURE_COLS)

    if LABEL_COL not in df.columns:
        raise ValueError(f"标签列{LABEL_COL}不存在")
    labels = df[LABEL_COL].values
    label_le = LabelEncoder()
    labels = label_le.fit_transform(labels)
    class_mapping = dict(zip(label_le.transform(label_le.classes_), label_le.classes_))
    logger.info(f"标签映射：{class_mapping}")

    # 保存预处理对象
    save_preprocessing_objects(numeric_scalers, text_encoders, class_mapping, label_le)

    feature_df = df.drop([LABEL_COL], axis=1)
    feature_data = feature_df.values
    X, y = create_sequences(feature_data, labels, TIME_STEP)

    split_idx = int(len(X) * (1 - TEST_SIZE))
    train_X, train_y = X[:split_idx], y[:split_idx]
    test_X, test_y = X[split_idx:], y[split_idx:]
    logger.info(f"训练集：{train_X.shape}, 测试集：{test_X.shape}")

    return {
        'train_X': train_X, 'train_y': train_y,
        'test_X': test_X, 'test_y': test_y,
        'class_mapping': class_mapping,
        'numeric_scalers': numeric_scalers,
        'text_encoders': text_encoders
    }


processed_data = process_data()
print("数据处理完成")
print(f"训练集形状：{processed_data['train_X'].shape}")
print(f"标签映射：{processed_data['class_mapping']}")


# 模型相关部分
def import_tensorflow_and_keras():
    """延迟导入 TensorFlow 和 Keras 相关模块"""
    try:
        global Adam, tf, Input, Conv1D, MaxPooling1D, GlobalAveragePooling1D
        global MultiHeadAttention, Dropout, LayerNormalization, Add, Dense, concatenate, BatchNormalization
        global Model, ReduceLROnPlateau, EarlyStopping, regularizers, TCN

        from keras.optimizers import Adam
        import tensorflow as tf
        from keras.layers import (Input, Conv1D, MaxPooling1D, GlobalAveragePooling1D,
                                  MultiHeadAttention, Dropout, LayerNormalization,
                                  Add, Dense, concatenate, BatchNormalization)
        from keras.models import Model
        from keras.callbacks import ReduceLROnPlateau, EarlyStopping
        from keras import regularizers
        from tcn import TCN
        return True
    except ImportError as e:
        logger.error(f"导入 TensorFlow/Keras 失败: {str(e)}")
        return False


def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout):
    # 延迟导入，确保 TensorFlow 已加载
    if 'tf' not in globals():
        if not import_tensorflow_and_keras():
            raise RuntimeError("无法导入 TensorFlow/Keras，无法创建模型")

    seq_len = inputs.shape[1]
    pos_encoding = tf.range(start=0, limit=seq_len, delta=1)
    pos_encoding = tf.expand_dims(pos_encoding, axis=0)
    pos_encoding = tf.cast(pos_encoding, dtype=tf.float32)
    pos_encoding = Dense(inputs.shape[-1])(pos_encoding)
    x = inputs + pos_encoding

    attn_output = MultiHeadAttention(num_heads=num_heads, key_dim=head_size)(x, x)
    attn_output = Dropout(dropout)(attn_output)
    out1 = LayerNormalization(epsilon=1e-6)(Add()([x, attn_output]))

    ffn_output = Dense(ff_dim, activation="relu")(out1)
    ffn_output = BatchNormalization()(ffn_output)
    ffn_output = Dense(ff_dim, activation="relu")(ffn_output)
    ffn_output = Dense(inputs.shape[-1])(ffn_output)
    ffn_output = Dropout(dropout)(ffn_output)
    return LayerNormalization(epsilon=1e-6)(Add()([out1, ffn_output]))


def build_cnn_tcn_transformer_classification(input_shape, n_classes, time_steps):
    # 延迟导入，确保 TensorFlow 已加载
    if 'Input' not in globals():
        if not import_tensorflow_and_keras():
            raise RuntimeError("无法导入 TensorFlow/Keras，无法创建模型")

    shared_input = Input(shape=(time_steps, input_shape[1]))

    # CNN分支
    cnn = Conv1D(filters=32, kernel_size=3, activation='relu',
                 padding='same', kernel_regularizer=regularizers.L2(1e-4))(shared_input)
    cnn = BatchNormalization()(cnn)
    cnn = MaxPooling1D(pool_size=2, padding='same')(cnn)
    cnn = Conv1D(filters=64, kernel_size=3, activation='relu',
                 padding='same', kernel_regularizer=regularizers.L2(1e-4))(cnn)
    cnn = BatchNormalization()(cnn)
    cnn_output = GlobalAveragePooling1D()(cnn)
    cnn_output = Dense(64, activation='relu')(cnn_output)

    # TCN分支（如果可用）
    if 'TCN' in globals() and globals()['TCN'] is not None:
        tcn_layer = TCN(
            nb_filters=32,
            kernel_size=3,
            dilations=(1, 2, 4, 8),
            return_sequences=False,
            padding='causal',
            use_skip_connections=True,
            dropout_rate=0.4,
            use_batch_norm=True,
        )
        tcn_output = tcn_layer(shared_input)
        tcn_output = Dense(64, activation='relu', kernel_regularizer=regularizers.L2(1e-5))(tcn_output)
    else:
        # 如果TCN不可用，使用替代方案
        logger.warning("TCN不可用，使用替代CNN层")
        tcn = Conv1D(filters=32, kernel_size=3, activation='relu',
                     padding='same', kernel_regularizer=regularizers.L2(1e-4))(shared_input)
        tcn = BatchNormalization()(tcn)
        tcn = MaxPooling1D(pool_size=2, padding='same')(tcn)
        tcn = Conv1D(filters=64, kernel_size=3, activation='relu',
                     padding='same', kernel_regularizer=regularizers.L2(1e-4))(tcn)
        tcn = BatchNormalization()(tcn)
        tcn_output = GlobalAveragePooling1D()(tcn)
        tcn_output = Dense(64, activation='relu', kernel_regularizer=regularizers.L2(1e-5))(tcn_output)

    # Transformer分支
    transformer = transformer_encoder(shared_input, head_size=64, num_heads=2, ff_dim=128, dropout=0.4)
    transformer_output = GlobalAveragePooling1D()(transformer)
    transformer_output = Dense(64, activation='relu')(transformer_output)

    # 注意力融合
    concat_features = concatenate([cnn_output, tcn_output, transformer_output])
    attention_weights = Dense(3, activation='softmax')(concat_features)
    weighted_cnn = cnn_output * attention_weights[:, 0:1]
    weighted_tcn = tcn_output * attention_weights[:, 1:2]
    weighted_transformer = transformer_output * attention_weights[:, 2:3]
    merged = Add()([weighted_cnn, weighted_tcn, weighted_transformer])

    merged = Dense(64, activation='relu', kernel_regularizer=regularizers.L2(1e-4))(merged)
    merged = Dropout(0.4)(merged)
    output = Dense(n_classes, activation='softmax')(merged)

    return Model(inputs=shared_input, outputs=output)


if __name__ == "__main__":
    # 尝试导入 TensorFlow/Keras
    if not import_tensorflow_and_keras():
        print("错误：无法导入 TensorFlow/Keras。请检查您的安装。")
        exit(1)

    train_X, train_y = processed_data['train_X'], processed_data['train_y']
    test_X, test_y = processed_data['test_X'], processed_data['test_y']
    n_classes = len(processed_data['class_mapping'])
    class_names = [processed_data['class_mapping'][i] for i in range(n_classes)]

    # 标签独热编码
    oh_encoder = OneHotEncoder(sparse_output=False)
    train_y_onehot = oh_encoder.fit_transform(train_y.reshape(-1, 1))
    test_y_onehot = oh_encoder.transform(test_y.reshape(-1, 1))

    # 保存独热编码器
    with open('model/onehot_encoder.pkl', 'wb') as f:
        pickle.dump(oh_encoder, f)
    logger.info("独热编码器已保存")

    # 模型构建
    input_shape = (train_X.shape[1], train_X.shape[2])
    time_steps = train_X.shape[1]
    model = build_cnn_tcn_transformer_classification(
        input_shape=input_shape,
        n_classes=n_classes,
        time_steps=time_steps
    )

    # 训练配置
    optimizer = Adam(learning_rate=0.001, weight_decay=1e-5)
    lr_scheduler = ReduceLROnPlateau(
        monitor='val_accuracy',
        factor=0.7,
        patience=3,
        min_lr=1e-6,
        verbose=1,
        mode='max'
    )
    early_stopping = EarlyStopping(
        monitor='val_accuracy',
        min_delta=0.001,
        patience=10,
        verbose=1,
        mode='max',
        restore_best_weights=True
    )

    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    model.fit(
        train_X, train_y_onehot,
        epochs=100, batch_size=32,
        validation_data=(test_X, test_y_onehot),
        verbose=1,
        callbacks=[lr_scheduler, early_stopping],
        shuffle=True,
    )

    # 保存训练完成的模型和相关参数
    model.save('model/final_model.keras')
    model.save_weights('model/final_model.weights.h5')
    logger.info("模型和权重已保存")

    # 评估
    test_pred_proba = model.predict(test_X)
    test_pred_class = np.argmax(test_pred_proba, axis=1)

    # 输出精度指标
    print(f"\n测试集准确率: {accuracy_score(test_y, test_pred_class):.4f}")
    print("\n分类报告:")
    print(classification_report(test_y, test_pred_class, target_names=class_names))

    # 绘制混淆矩阵
    cm = confusion_matrix(test_y, test_pred_class)
    plt.figure(figsize=(10, 8))
    plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    plt.title('混淆矩阵')
    plt.colorbar()
    tick_marks = np.arange(n_classes)
    plt.xticks(tick_marks, class_names, rotation=45)
    plt.yticks(tick_marks, class_names)

    # 在混淆矩阵中标记数值
    thresh = cm.max() / 2.
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            plt.text(j, i, format(cm[i, j], 'd'),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

    plt.ylabel('实际类别')
    plt.xlabel('预测类别')
    plt.tight_layout()
    plt.show()