import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
import yfinance as yf
from sklearn.preprocessing import MinMaxScaler
from torch.utils.data import Dataset, DataLoader
from typing import Tuple
import time
import datetime
import joblib

# 配置参数
class Config:
    seq_length = 60         # 输入序列长度
    batch_size = 32         # 批大小
    num_epochs = 300        # 最大训练轮数
    early_stop_patience = 50 # 早停耐心值
    lr = 1e-4               # 学习率
    train_ratio = 0.7       # 训练集比例
    val_ratio = 0.15        # 验证集比例
    test_ratio = 0.15       # 测试集比例
    latent_dim = 32         # VAE潜在空间维度
    beta = 0.5              # VAE的KL散度权重
    n_heads = 8             # Transformer头数
    dropout = 0.1           # Dropout概率
    lstm_hidden = 128       # LSTM隐藏层维度

# 技术指标计算模块
def calculate_technical_indicators(df: pd.DataFrame) -> pd.DataFrame:
    # 计算对数收益率
    df['log_return'] = np.log(df['Close'] / df['Close'].shift(1))
    
    # 计算简单移动平均
    for window in [5, 10, 20, 50]:
        df[f'SMA_{window}'] = df['Close'].rolling(window).mean()
    
    # 计算RSI
    delta = df['Close'].diff()
    gain = delta.where(delta > 0, 0)
    loss = -delta.where(delta < 0, 0)
    avg_gain = gain.rolling(14).mean()
    avg_loss = loss.rolling(14).mean()
    rs = avg_gain / avg_loss
    df['RSI'] = 100 - (100 / (1 + rs))
    
    # 计算MACD
    ema12 = df['Close'].ewm(span=12).mean()
    ema26 = df['Close'].ewm(span=26).mean()
    df['MACD'] = ema12 - ema26
    df['Signal'] = df['MACD'].ewm(span=9).mean()
    
    # Stochastic Oscillator
    low_min = df['Low'].rolling(14).min()
    high_max = df['High'].rolling(14).max()
    df['%K'] = 100 * (df['Close'] - low_min) / (high_max - low_min)
    df['%D'] = df['%K'].rolling(3).mean()
    
    # Average True Range (ATR)
    tr1 = df['High'] - df['Low']
    tr2 = (df['High'] - df['Close'].shift(1)).abs()
    tr3 = (df['Low'] - df['Close'].shift(1)).abs()
    df['TR'] = np.maximum.reduce([tr1, tr2, tr3])
    df['ATR'] = df['TR'].ewm(span=14).mean()
    
    # Rolling Volatility (14-day)
    df['Volatility'] = df['log_return'].rolling(14).std() * np.sqrt(252)
    
    # Bollinger Bands
    df['SMA_20'] = df['Close'].rolling(20).mean()
    # 确保std_20是单列Series
    std_20 = df['Close'].rolling(20).std()  # 直接对Close列计算
    df['Upper_Band'] = df['SMA_20'] + (2 * std_20)
    df['Lower_Band'] = df['SMA_20'] - (2 * std_20)
    
    # Force Index (1-period)
    df['Force_Index'] = (df['Close'] - df['Close'].shift(1)) * df['Volume']
    
    # On-Balance Volume (OBV)
    obv = (np.sign(df['Close'].diff()) * df['Volume']).fillna(0).cumsum()
    df['OBV'] = obv
    
    # Commodity Channel Index (CCI, 20-day)
    df['TP'] = (df['High'] + df['Low'] + df['Close']) / 3
    df['TP_SMA'] = df['TP'].rolling(20).mean()
    df['TP_MD'] = df['TP'].rolling(20).apply(lambda x: np.mean(np.abs(x - x.mean())))
    df['CCI'] = (df['TP'] - df['TP_SMA']) / (0.015 * df['TP_MD'])
    
    # 最后删除中间列并处理NaN
    df = df.drop(['TR', 'TP', 'TP_SMA', 'TP_MD'], axis=1)
    return df.dropna()

# 自定义数据集
class StockDataset(Dataset):
    def __init__(self, data: np.ndarray, seq_length: int, target_col: int = 0):
        self.data = data
        self.seq_length = seq_length
        self.target_col = target_col
        
    def __len__(self):
        return len(self.data) - self.seq_length
        
    def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor]:
        x = self.data[idx:idx+self.seq_length]
        y = self.data[idx+self.seq_length, self.target_col]
		# 这里假设目标是预测下一个时刻的收盘价
        return torch.FloatTensor(x), torch.tensor(y, dtype=torch.float32).unsqueeze(0)

# VAE模型
class VAE(nn.Module):
    def __init__(self, input_dim: int, latent_dim: int):
        super().__init__()
        # 编码器
        self.encoder = nn.Sequential(
            nn.Linear(input_dim, 64),
            nn.ReLU(),
            nn.Linear(64, 32),
        )
        self.fc_mu = nn.Linear(32, latent_dim)
        self.fc_var = nn.Linear(32, latent_dim)
        
        # 解码器
        self.decoder = nn.Sequential(
            nn.Linear(latent_dim, 32),
            nn.ReLU(),
            nn.Linear(32, 64),
            nn.ReLU(),
            nn.Linear(64, input_dim)
        )
    
    def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
        std = torch.exp(0.5 * logvar)
        eps = torch.randn_like(std)
        return mu + eps * std
    
    def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        h = self.encoder(x)
        mu, logvar = self.fc_mu(h), self.fc_var(h)
        z = self.reparameterize(mu, logvar)
        return self.decoder(z), mu, logvar

# Transformer模型
class StockTransformer(nn.Module):
    def __init__(self, input_dim: int, seq_len: int, n_heads: int, dropout: float):
        super().__init__()
        
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=input_dim,
            nhead=n_heads,
            dropout=dropout,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(self.encoder_layer, num_layers=2)
        self.fc = nn.Linear(input_dim * seq_len, 1)
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.transformer(x)
        x = x.reshape(x.size(0), -1)
        return self.fc(x)

# LSTM模型
class StockLSTM(nn.Module):
    def __init__(self, input_dim: int, hidden_dim: int):
        super().__init__()
        self.lstm = nn.LSTM(
            input_size=input_dim,
            hidden_size=hidden_dim,
            num_layers=2,
            bidirectional=True,
            dropout=0.2,
            batch_first=True
        )
        self.fc = nn.Sequential(
            nn.Linear(hidden_dim*2, 32),
            nn.BatchNorm1d(32),
            nn.Tanh(),
            nn.Linear(32, 1)
        )
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        out, _ = self.lstm(x)
        return self.fc(out[:, -1])

# 集成模型
class EnsembleModel(torch.nn.Module):
    def __init__(self, vae, transformer, lstm):
        super().__init__()
        self.vae = vae
        self.transformer = transformer
        self.lstm = lstm
        self.weights = torch.nn.Parameter(torch.tensor([0.3, 0.3, 0.4]))  # 必须添加
        
    def forward(self, x: torch.Tensor):
        recon, mu, logvar = self.vae(x)
        vae_out = recon[:, -1, -1].unsqueeze(1)
        transformer_out = self.transformer(x)
        lstm_out = self.lstm(x)
        # 使用学习的权重
        return (self.weights[0] * vae_out + 
                self.weights[1] * transformer_out + 
                self.weights[2] * lstm_out)

# 训练函数
def train_model(model, train_loader, val_loader, config):
    optimizer = optim.Adam(model.parameters(), lr=config.lr)
    best_loss = float('inf')
    patience_counter = 0
    
    for epoch in range(config.num_epochs):
        model.train()
        train_loss = 0
        for X_batch, y_batch in train_loader:
            optimizer.zero_grad()
            outputs = model(X_batch)
            loss = nn.MSELoss()(outputs, y_batch)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
        
        # 验证步骤
        model.eval()
        val_loss = 0
        with torch.no_grad():
            for X_val, y_val in val_loader:
                outputs = model(X_val)
                val_loss += nn.MSELoss()(outputs, y_val).item()
        
        # 早停判断
        if val_loss < best_loss:
            best_loss = val_loss
            patience_counter = 0
            torch.save(model.state_dict(), 'best_model.pth')
        else:
            patience_counter += 1
            
        if patience_counter >= config.early_stop_patience:
            print("Early stopping triggered")
            break
        
        print(f"Epoch {epoch+1}: Train Loss {train_loss/len(train_loader):.4f}, Val Loss {val_loss/len(val_loader):.4f}")

# ----------------------
# 1. 稳健获取数据（含重试机制）
# ----------------------
def safe_yfinance_download(ticker, max_retries=3, delay=5):
    for attempt in range(max_retries):
        try:
            data = yf.download(
                tickers=ticker,
                start="2014-01-01",
                end="2025-04-19",
                progress=False,
                auto_adjust=False,  # 显式禁用自动调整价格
                threads=False       # 禁用多线程以降低请求频率
            )
            if not data.empty:
                return data[['Open', 'High', 'Low', 'Close', 'Volume']].dropna()
            else:
                raise ValueError(f"No data found for {ticker}")
        except Exception as e:
            print(f"Error fetching data: {e}")
            raise

# 划分数据集（保持时间顺序）
def time_series_split(data, train_ratio, val_ratio):
    train_size = int(len(data) * train_ratio)
    val_size = int(len(data) * val_ratio)
    test_size = len(data) - train_size - val_size
    
    train_data = data[:train_size]
    val_data = data[train_size:train_size+val_size]
    test_data = data[train_size+val_size:]
    
    return train_data, val_data, test_data

# 主程序
def main():
    # 配置参数
    config = Config()
    #输入股票代码
    print("请输入股票代码（如：002594.SZ）或输入 'quit'，'exit'，'q' 退出:")
    ticker = input("ticker code : ")
    if ticker.lower() in ["quit", "exit", "q"]:
        print("Goodbye!")
        exit(0)  # 退出程序

    # 获取比亚迪数据（深交所代码：002594.SZ）
    # ticker = "002594.SZ"
    try:
        df = safe_yfinance_download(ticker)
        print("Data successfully downloaded:")
        print(df.head())
    except Exception as e:
        print(f"Failed to download data: {e}")
        exit(1)  # 终止程序避免后续错误
    # ----------------------
	# 2. 数据预处理
	# ----------------------
	# 确保数据有效性
    if df.empty:
        raise ValueError("DataFrame is empty after download.")
    
    # 计算技术指标
    df = calculate_technical_indicators(df)
    features = df[['Close', 'log_return', 'SMA_5', 'SMA_10', 'RSI', 'MACD']].values
    
    # 数据标准化
    scaler = MinMaxScaler()
    scaled_data = scaler.fit_transform(features)
    
    # 创建数据集
    dataset = StockDataset(scaled_data, config.seq_length)
    
    total_samples = len(scaled_data)
    print(f"Total samples: {total_samples}")
    train_size = int(total_samples * config.train_ratio)
    val_size = int(total_samples * config.val_ratio)
    test_size = total_samples - train_size - val_size

    # 按时间顺序划分原始数组
    train_data = scaled_data[:train_size]
    val_data = scaled_data[train_size:train_size+val_size]
    test_data = scaled_data[train_size+val_size:]

    # 创建独立的数据集对象
    train_dataset = StockDataset(train_data, config.seq_length)
    val_dataset = StockDataset(val_data, config.seq_length)
    test_dataset = StockDataset(test_data, config.seq_length)
	# 在创建数据集后添加检查
    assert len(train_dataset) > 0, "训练集长度必须大于0"
    assert len(val_dataset) > 0, "验证集长度必须大于0"
    assert len(test_dataset) > 0, "测试集长度必须大于0"

    print(f"数据集划分: 训练集 {len(train_dataset)} 样本, 验证集 {len(val_dataset)} 样本, 测试集 {len(test_dataset)} 样本")
    
    # 创建DataLoader（保持不变）
    train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=config.batch_size)
    test_loader = DataLoader(test_dataset, batch_size=config.batch_size)
    
    # 初始化模型
    input_dim = train_data.shape[1]
    print(f"Input dimension: {input_dim}")
    
    vae = VAE(input_dim, config.latent_dim)
    transformer = StockTransformer(input_dim, config.seq_length, config.n_heads, config.dropout)
    lstm = StockLSTM(input_dim, config.lstm_hidden)
    model = EnsembleModel(vae, transformer, lstm)
    
    # 训练模型
    train_model(model, train_loader, val_loader, config)
    
    # 加载最佳模型
    model.load_state_dict(torch.load('best_model.pth'))
    
    # 测试评估
    model.eval()
    predictions, actuals = [], []
    with torch.no_grad():
        for X_test, y_test in test_loader:
            outputs = model(X_test)
            predictions.extend(outputs.numpy())
            actuals.extend(y_test.numpy())
    
    # 计算指标
    predictions = np.array(predictions).flatten()
    actuals = np.array(actuals).flatten()
    
    # 方向准确率
    direction_acc = np.mean((predictions[1:] * actuals[1:] > 0).astype(float)) * 100
    
    # MAPE
    mape = np.mean(np.abs((actuals - predictions) / actuals)) * 100
    
    # RMSE
    rmse = np.sqrt(np.mean((actuals - predictions)**2))
    
    # 保持模型
    # 获取当前时间
    current_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
	 
	# 保存模型参数，文件名包含时间戳
    torch.save(model.state_dict(), f'stockmodel_{ticker}_{current_time}.pth')

    joblib.dump(scaler,f'scaler_{ticker}_{current_time}.save')
    
    print(f"Test Results:\nDirection Accuracy: {direction_acc:.2f}%\nMAPE: {mape:.2f}%\nRMSE: {rmse:.4f}")

if __name__ == "__main__":
    main()