import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import glob
import chardet
import torch


def load_data(data_path):
    # 加载所有CSV文件
    csv_files = glob.glob(data_path)
    df_list = []

    for file in csv_files:
        with open(file, 'rb') as f:
            result = chardet.detect(f.read())
            encoding = result['encoding']

        try:
            temp_df = pd.read_csv(file, parse_dates=['trade_date'], encoding=encoding)
            df_list.append(temp_df)
        except Exception as e:
            print(f"读取文件 {file} 时出错: {e}")

    df = pd.concat(df_list, ignore_index=True)
    df.set_index('trade_date', inplace=True)
    df.sort_index(inplace=True)
    return df


def create_labels(data, forward_period=1, threshold=0.02):
    # 三分类标签：2=做多(大涨), 0=做空(大跌), 1=观望(小幅波动)
    future_price = data['close'].shift(-forward_period)
    current_price = data['close']

    future_return = (future_price - current_price) / current_price

    labels = np.ones_like(future_return)  # 默认观望
    labels[future_return > threshold] = 2  # 做多
    labels[future_return < -threshold] = 0  # 做空

    return pd.Series(labels, index=data.index).astype(int)


def add_advanced_tech_indicators(df):
    # 增强技术指标
    df = df.copy()

    windows = {'short': 30, 'medium': 120, 'long': 480}

    # 基础指标
    df['price_change'] = df['close'].pct_change()
    df['MA_short'] = df['close'].rolling(window=windows['short']).mean()
    df['MA_medium'] = df['close'].rolling(window=windows['medium']).mean()
    df['MA_long'] = df['close'].rolling(window=windows['long']).mean()
    df['volatility'] = df['close'].pct_change().rolling(window=windows['medium']).std()

    # MACD
    exp12 = df['close'].ewm(span=windows['short']).mean()
    exp26 = df['close'].ewm(span=windows['medium']).mean()
    df['MACD'] = exp12 - exp26
    df['MACD_signal'] = df['MACD'].ewm(span=windows['short'] // 2).mean()

    # RSI
    delta = df['close'].diff()
    gain = (delta.where(delta > 0, 0)).rolling(window=windows['short']).mean()
    loss = (-delta.where(delta < 0, 0)).rolling(window=windows['short']).mean()
    rs = gain / loss
    df['RSI'] = 100 - (100 / (1 + rs))

    # Volume indicators
    df['volume_ma_short'] = df['vol'].rolling(window=windows['short']).mean()
    df['volume_ma_medium'] = df['vol'].rolling(window=windows['medium']).mean()
    df['volume_ratio'] = df['vol'] / df['volume_ma_medium']

    # Price momentum
    df['momentum_short'] = df['close'].pct_change(windows['short'])
    df['momentum_medium'] = df['close'].pct_change(windows['medium'])

    return df.ffill().bfill()


def preprocess_advanced(data, lookback=30, forward_period=1, split_ratio=0.7, threshold=0.02, return_raw_data=False):
    # 增强预处理
    data = data.copy()

    # 基础变换
    data['close_log'] = np.log(data['close'])
    data['vol_log'] = np.log(data['vol'] + 1e-6)

    # 技术指标
    data = add_advanced_tech_indicators(data)

    feature_columns = [
        'close_log', 'vol_log', 'price_change',
        'MA_short', 'MA_medium', 'MA_long',
        'volatility', 'MACD', 'MACD_signal',
        'RSI', 'volume_ma_short', 'volume_ma_medium',
        'volume_ratio', 'momentum_short', 'momentum_medium'
    ]

    # 划分数据集
    split_idx = int(len(data) * split_ratio)
    train_data = data.iloc[:split_idx].copy()
    test_data = data.iloc[split_idx:].copy()

    # 创建标签
    train_labels = create_labels(train_data, forward_period, threshold)
    test_labels = create_labels(test_data, forward_period, threshold)

    # 移除无效行
    valid_mask = ~(pd.isna(train_labels) | train_data[feature_columns].isna().any(axis=1))
    train_data, train_labels = train_data[valid_mask], train_labels[valid_mask]

    valid_mask = ~(pd.isna(test_labels) | test_data[feature_columns].isna().any(axis=1))
    test_data, test_labels = test_data[valid_mask], test_labels[valid_mask]

    # 标准化
    scaler = StandardScaler()
    X_train = scaler.fit_transform(train_data[feature_columns])
    X_test = scaler.transform(test_data[feature_columns])

    # 创建序列
    def create_sequences(X, y, lookback):
        X_seq, y_seq = [], []
        for i in range(lookback, len(X)):
            X_seq.append(X[i - lookback:i])
            y_seq.append(y.iloc[i])
        return np.array(X_seq), np.array(y_seq)

    X_train_seq, y_train_seq = create_sequences(X_train, train_labels, lookback)
    X_test_seq, y_test_seq = create_sequences(X_test, test_labels, lookback)

    # 转换为Tensor
    X_train_tensor = torch.FloatTensor(X_train_seq)
    X_test_tensor = torch.FloatTensor(X_test_seq)
    y_train_tensor = torch.LongTensor(y_train_seq)
    y_test_tensor = torch.LongTensor(y_test_seq)

    if return_raw_data:
        return train_data, test_data, scaler, feature_columns
    return X_train_tensor, X_test_tensor, y_train_tensor, y_test_tensor, len(feature_columns)