import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.layers import Input, LSTM, Dense, Dropout, Conv1D, MaxPooling1D, Flatten, concatenate
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
import ta

def preprocess_data(filepath):
    # 加载数据
    dataframe = pd.read_csv(filepath)
    df = dataframe[['Date', 'Close', 'Open', 'High', 'Low', 'Volume', 'Adj Close']]
    df['Date'] = pd.to_datetime(df['Date'])
    df = df.sort_values('Date')

    def convert_volume(volume):
        if isinstance(volume, str):
            if 'M' in volume:
                return float(volume.replace('M', '')) * 1e6
            elif 'K' in volume:
                return float(volume.replace('K', '')) * 1e3
        return float(volume)

    df['Volume'] = df['Volume'].apply(convert_volume)

    def convert_percentage(value):
        if isinstance(value, str) and '%' in value:
            return float(value.replace('%', '')) / 100
        return float(value)

    df['Adj Close'] = df['Adj Close'].apply(convert_percentage)

    # 计算技术指标
    df['RSI'] = ta.momentum.RSIIndicator(df['Close'], window=14).rsi()
    df['MACD'] = ta.trend.MACD(df['Close']).macd_diff()
    bollinger = ta.volatility.BollingerBands(df['Close'], window=20, window_dev=2)
    df['Bollinger_High'] = bollinger.bollinger_hband()
    df['Bollinger_Low'] = bollinger.bollinger_lband()
    df['Volatility'] = df['Close'].rolling(window=10).std()
    df.fillna(method='bfill', inplace=True)

    # 数据标准化
    scaler = StandardScaler()
    scaled_features = scaler.fit_transform(df[['Close', 'Open', 'High', 'Low', 'Volume', 'Adj Close', 'RSI', 'MACD',
                                               'Bollinger_High', 'Bollinger_Low', 'Volatility']])
    df_scaled = pd.DataFrame(scaled_features,
                             columns=['Close', 'Open', 'High', 'Low', 'Volume', 'Adj Close', 'RSI', 'MACD',
                                      'Bollinger_High', 'Bollinger_Low', 'Volatility'])
    return df, df_scaled, scaler

def create_sequences(data, seq_size):
    x_values, y_values = [], []
    for i in range(len(data) - seq_size):
        x_values.append(data[i:i + seq_size, :-1])
        y_values.append(data[i + seq_size, 0])
    return np.array(x_values), np.array(y_values)

def build_model(seq_size, feature_size, future_steps):
    input_layer = Input(shape=(seq_size, feature_size))
    # CNN 部分
    conv1 = Conv1D(filters=128, kernel_size=3, activation='relu', padding='same')(input_layer)
    conv1 = MaxPooling1D(pool_size=2)(conv1)
    conv2 = Conv1D(filters=256, kernel_size=3, activation='relu', padding='same')(conv1)
    conv2 = MaxPooling1D(pool_size=2)(conv2)
    cnn_output = Flatten()(conv2)

    # LSTM 部分
    lstm_output = LSTM(256, return_sequences=True)(input_layer)
    lstm_output = LSTM(128, return_sequences=False)(lstm_output)
    lstm_output = Dropout(0.3)(lstm_output)

    # 合并 CNN 和 LSTM 特征
    combined = concatenate([cnn_output, lstm_output])

    # 全连接层
    dense1 = Dense(256, activation='relu')(combined)
    dense1 = Dropout(0.3)(dense1)
    dense2 = Dense(128, activation='relu')(dense1)
    dense2 = Dropout(0.3)(dense2)

    # 输出层
    output = Dense(future_steps)(dense2)

    model = Model(inputs=input_layer, outputs=output)
    model.compile(optimizer=Adam(learning_rate=0.0001), loss='huber_loss')
    return model

def train_and_predict(filepath, seq_size, future_steps):
    df, df_scaled, scaler = preprocess_data(filepath)
    x_data, y_data = create_sequences(df_scaled.values, seq_size)
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, shuffle=False)

    model = build_model(seq_size, x_data.shape[2], future_steps)

    # 训练模型
    lr_scheduler = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=1e-6)
    early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
    checkpoint = ModelCheckpoint('best_model.h5', monitor='val_loss', save_best_only=True, verbose=1)

    model.fit(
        x_train, y_train,
        epochs=50,
        batch_size=32,
        validation_data=(x_test, y_test),
        callbacks=[lr_scheduler, early_stopping, checkpoint],
        verbose=1
    )
    model.save('trained_model.h5')

    # 预测未来 10 天的价格
    last_sequence = x_data[-1].reshape(1, seq_size, x_data.shape[2])
    future_predictions = model.predict(last_sequence).flatten()

    # 反标准化
    future_predictions_extended = np.zeros((future_predictions.shape[0], x_data.shape[2]))
    future_predictions_extended[:, 0] = future_predictions
    future_predictions_original = (future_predictions_extended[:, 0] * scaler.scale_[0]) + scaler.mean_[0]

    return future_predictions_original

if __name__ == "__main__":
    seq_size = 30
    future_steps = 10
    predictions = train_and_predict('BA.csv', seq_size, future_steps)
    np.save('predictions.npy', predictions)
