import numpy as np
import pandas as pd
import logging
import warnings
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score

warnings.filterwarnings('ignore')

# 日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 配置项
FILE_PATH = r"C:\Users\feng1\Desktop\广东-02.csv"
TEXT_FEATURE_COLS = ['风向']
NUMERIC_FEATURE_COLS = ['温度℃', '降水量(mm)', '风力(级)', '风速(km/h)', '风向角度(度)', '气压(hPa)',
                        '湿度(%)', '空气质量', '能见度(km)', '云量%', '露点℃', '短波辐射W/m²',
                        '直接辐射W/m²', '散射辐射W/m²', '直接正常辐照度W/m²']
LABEL_COL = '天气'
TIME_STEP = 8
TEST_SIZE = 0.2


def read_csv(file_path):
    try:
        df = pd.read_csv(
            file_path,
            encoding='utf-8',
            sep=',',
            on_bad_lines='skip',
            low_memory=False
        )

        df.columns = [col.strip() for col in df.columns]
        logger.info(f"CSV读取完成：形状{df.shape}")
        return df
    except Exception as e:
        logger.error(f"CSV读取失败：{str(e)}")
        raise
def drop_unused_cols(df):
    drop_cols = ['地区', '时间']
    df = df.drop(drop_cols, axis=1, errors='ignore')
    logger.info(f"排除后形状：{df.shape}")
    return df
def process_text_features(df, text_cols):
    encoders = {}
    for col in text_cols:
        if col not in df.columns:
            logger.warning(f"列{col}不存在，跳过")
            continue
        df[col] = df[col].fillna('未知').astype(str)
        le = LabelEncoder()
        df[col] = le.fit_transform(df[col])
        encoders[col] = le
    return df, encoders
def clean_numeric_features(df, numeric_cols):
    for col in numeric_cols:
        if col not in df.columns:
            logger.warning(f"列{col}不存在，跳过")
            continue

        df[col] = df[col].astype(str).str.findall(r'(-?\d+\.?\d*)')
        df[col] = df[col].apply(lambda x: x[0] if len(x) > 0 else None)
        df[col] = pd.to_numeric(df[col], errors='coerce')

        try:
            mean_val = df[col].mean()
            df[col] = df[col].fillna(mean_val)
        except TypeError:
            df[col] = df[col].fillna(0)
    return df
def normalize_numeric_features(df, numeric_cols):
    scalers = {}
    for col in numeric_cols:
        if col not in df.columns:
            continue
        scaler = StandardScaler()
        df[col] = scaler.fit_transform(df[col].values.reshape(-1, 1))
        scalers[col] = scaler
    return df, scalers
def create_sequences(data, labels, time_step):
    dataX, datay = [], []
    for i in range(len(data) - time_step):
        dataX.append(data[i:i + time_step])
        datay.append(labels[i + time_step])
    return np.array(dataX), np.array(datay)
def process_data():
    df = read_csv(FILE_PATH)
    df = drop_unused_cols(df)
    df = clean_numeric_features(df, NUMERIC_FEATURE_COLS)
    df, text_encoders = process_text_features(df, TEXT_FEATURE_COLS)
    df, numeric_scalers = normalize_numeric_features(df, NUMERIC_FEATURE_COLS)

    if LABEL_COL not in df.columns:
        raise ValueError(f"标签列{LABEL_COL}不存在")
    labels = df[LABEL_COL].values
    label_le = LabelEncoder()
    labels = label_le.fit_transform(labels)
    class_mapping = dict(zip(label_le.transform(label_le.classes_), label_le.classes_))
    logger.info(f"标签映射：{class_mapping}")

    feature_df = df.drop([LABEL_COL], axis=1)
    feature_data = feature_df.values
    X, y = create_sequences(feature_data, labels, TIME_STEP)

    split_idx = int(len(X) * (1 - TEST_SIZE))
    train_X, train_y = X[:split_idx], y[:split_idx]
    test_X, test_y = X[split_idx:], y[split_idx:]
    logger.info(f"训练集：{train_X.shape}, 测试集：{test_X.shape}")

    return {
        'train_X': train_X, 'train_y': train_y,
        'test_X': test_X, 'test_y': test_y,
        'class_mapping': class_mapping
    }


processed_data = process_data()
print("数据处理完成")
print(f"训练集形状：{processed_data['train_X'].shape}")
print(f"标签映射：{processed_data['class_mapping']}")

# 模型相关部分
from keras.optimizers import Adam
import tensorflow as tf
from keras.layers import (Input, Conv1D, MaxPooling1D, GlobalAveragePooling1D,
                          MultiHeadAttention, Dropout, LayerNormalization,
                          Add, Dense, concatenate, BatchNormalization)
from keras.models import Model
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from keras import regularizers
from tcn import TCN
def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout):
    seq_len = inputs.shape[1]
    pos_encoding = tf.range(start=0, limit=seq_len, delta=1)
    pos_encoding = tf.expand_dims(pos_encoding, axis=0)
    pos_encoding = tf.cast(pos_encoding, dtype=tf.float32)
    pos_encoding = Dense(inputs.shape[-1])(pos_encoding)
    x = inputs + pos_encoding

    attn_output = MultiHeadAttention(num_heads=num_heads, key_dim=head_size)(x, x)
    attn_output = Dropout(dropout)(attn_output)
    out1 = LayerNormalization(epsilon=1e-6)(Add()([x, attn_output]))

    ffn_output = Dense(ff_dim, activation="relu")(out1)
    ffn_output = BatchNormalization()(ffn_output)
    ffn_output = Dense(ff_dim, activation="relu")(ffn_output)
    ffn_output = Dense(inputs.shape[-1])(ffn_output)
    ffn_output = Dropout(dropout)(ffn_output)
    return LayerNormalization(epsilon=1e-6)(Add()([out1, ffn_output]))
def build_cnn_tcn_transformer_classification(input_shape, n_classes, time_steps):
    shared_input = Input(shape=(time_steps, input_shape[1]))

    # CNN分支
    cnn = Conv1D(filters=32, kernel_size=3, activation='relu',
                 padding='same', kernel_regularizer=regularizers.L2(1e-4))(shared_input)
    cnn = BatchNormalization()(cnn)
    cnn = MaxPooling1D(pool_size=2, padding='same')(cnn)
    cnn = Conv1D(filters=64, kernel_size=3, activation='relu',
                 padding='same', kernel_regularizer=regularizers.L2(1e-4))(cnn)
    cnn = BatchNormalization()(cnn)
    cnn_output = GlobalAveragePooling1D()(cnn)
    cnn_output = Dense(64, activation='relu')(cnn_output)

    # TCN分支
    tcn_layer = TCN(
        nb_filters=32,
        kernel_size=3,
        dilations=(1, 2, 4, 8),
        return_sequences=False,
        padding='causal',
        use_skip_connections=True,
        dropout_rate=0.4,
        use_batch_norm=True,
    )
    tcn_output = tcn_layer(shared_input)
    tcn_output = Dense(64, activation='relu', kernel_regularizer=regularizers.L2(1e-5))(tcn_output)

    # Transformer分支
    transformer = transformer_encoder(shared_input, head_size=64, num_heads=2, ff_dim=128, dropout=0.4)
    transformer_output = GlobalAveragePooling1D()(transformer)
    transformer_output = Dense(64, activation='relu')(transformer_output)

    # 注意力融合
    concat_features = concatenate([cnn_output, tcn_output, transformer_output])
    attention_weights = Dense(3, activation='softmax')(concat_features)
    weighted_cnn = cnn_output * attention_weights[:, 0:1]
    weighted_tcn = tcn_output * attention_weights[:, 1:2]
    weighted_transformer = transformer_output * attention_weights[:, 2:3]
    merged = Add()([weighted_cnn, weighted_tcn, weighted_transformer])

    merged = Dense(64, activation='relu', kernel_regularizer=regularizers.L2(1e-4))(merged)
    merged = Dropout(0.4)(merged)
    output = Dense(n_classes, activation='softmax')(merged)

    return Model(inputs=shared_input, outputs=output)


if __name__ == "__main__":
    train_X, train_y = processed_data['train_X'], processed_data['train_y']
    test_X, test_y = processed_data['test_X'], processed_data['test_y']
    n_classes = len(processed_data['class_mapping'])
    class_names = [processed_data['class_mapping'][i] for i in range(n_classes)]

    # 标签独热编码
    oh_encoder = OneHotEncoder(sparse_output=False)
    train_y_onehot = oh_encoder.fit_transform(train_y.reshape(-1, 1))
    test_y_onehot = oh_encoder.transform(test_y.reshape(-1, 1))

    # 模型构建
    input_shape = (train_X.shape[1], train_X.shape[2])
    time_steps = train_X.shape[1]
    model = build_cnn_tcn_transformer_classification(
        input_shape=input_shape,
        n_classes=n_classes,
        time_steps=time_steps
    )

    # 训练配置
    optimizer = Adam(learning_rate=0.001, weight_decay=1e-5)
    lr_scheduler = ReduceLROnPlateau(
        monitor='val_accuracy',  # 改为监控验证集准确率（更关注性能指标）
        factor=0.7,  # 学习率衰减因子（0.7比0.5更平缓）
        patience=3,  # 容忍更多轮次不提升
        min_lr=1e-6,
        verbose=1,
        mode='max'  # 准确率需最大化
    )
    early_stopping = EarlyStopping(
        monitor='val_accuracy',  # 同步监控准确率
        min_delta=0.001,  # 最小变化阈值（比0.0005更严格）
        patience=10,  # 增加容忍轮次，避免过早停止
        verbose=1,
        mode='max',  # 准确率最大化
        restore_best_weights=True
    )

    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    model.fit(
        train_X,train_y_onehot,
        epochs=100, batch_size=32,
        validation_data=(test_X, test_y_onehot),
        verbose=1,
        callbacks=[lr_scheduler, early_stopping],
        shuffle=True , # 每个epoch打乱训练数据
    )

    # 保存训练完成的模型和相关参数（推荐使用新格式）
    model.save('final_model.keras')  # 使用.keras格式保存完整模型
    model.save_weights('final_model.weights.h5')

    # 评估
    test_pred_proba = model.predict(test_X)
    test_pred_class = np.argmax(test_pred_proba, axis=1)

    # 输出精度指标
    print(f"\n测试集准确率: {accuracy_score(test_y, test_pred_class):.4f}")
    print("\n分类报告:")
    print(classification_report(test_y, test_pred_class, target_names=class_names))

    # 绘制混淆矩阵（使用matplotlib原生实现）
    cm = confusion_matrix(test_y, test_pred_class)
    plt.figure(figsize=(10, 8))
    plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    plt.title('混淆矩阵')
    plt.colorbar()
    tick_marks = np.arange(n_classes)
    plt.xticks(tick_marks, class_names, rotation=45)
    plt.yticks(tick_marks, class_names)

    # 在混淆矩阵中标记数值
    thresh = cm.max() / 2.
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            plt.text(j, i, format(cm[i, j], 'd'),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

    plt.ylabel('实际类别')
    plt.xlabel('预测类别')
    plt.tight_layout()
    plt.show()