# -*- coding: utf-8 -*-

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from torch.utils.data import DataLoader, TensorDataset

# 1. 生成虚拟时间序列数据
def generate_synthetic_data(seq_length=1000):
    t = np.linspace(0, 1000, seq_length)
    data = np.sin(t) + np.cos(0.5 * t) + 0.1 * np.random.randn(seq_length)  # 叠加正弦和余弦波
    return data

# 2. 数据预处理
def create_sequences(data, seq_length):
    X, y = [], []
    for i in range(len(data) - seq_length):
        X.append(data[i:i + seq_length])
        y.append(data[i + seq_length])
    return np.array(X), np.array(y)

data = generate_synthetic_data()
print(data)
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data.reshape(-1, 1)).flatten()

seq_length = 50
X, y = create_sequences(data_scaled, seq_length)
X = X.reshape(X.shape[0], 1, seq_length, 1)  # 适配CNN输入

train_size = int(len(X) * 0.8)
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

X_train_tensor = torch.tensor(X_train, dtype=torch.float32).to(device)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32).to(device)
X_test_tensor = torch.tensor(X_test, dtype=torch.float32).to(device)
y_test_tensor = torch.tensor(y_test, dtype=torch.float32).to(device)

train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
test_dataset = TensorDataset(X_test_tensor, y_test_tensor)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)

# 3. 定义CNN-LSTM混合模型
class CNNLSTM(nn.Module):
    def __init__(self):
        super(CNNLSTM, self).__init__()
        self.cnn = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=(3, 1), stride=1, padding=(1, 0)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 1))
        )
        self.lstm = nn.LSTM(16, 50, batch_first=True)
        self.fc = nn.Linear(50, 1)
    
    def forward(self, x):
        x = self.cnn(x)
        x = x.view(x.shape[0], x.shape[2], -1)  # 适配LSTM输入
        x, _ = self.lstm(x)
        x = self.fc(x[:, -1, :])
        return x

model = CNNLSTM().to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 4. 训练模型
num_epochs = 50
train_losses, test_losses = [], []

for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    for inputs, targets in train_loader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs.squeeze(), targets)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    
    train_losses.append(running_loss / len(train_loader))
    
    model.eval()
    test_loss = 0.0
    with torch.no_grad():
        for inputs, targets in test_loader:
            outputs = model(inputs)
            loss = criterion(outputs.squeeze(), targets)
            test_loss += loss.item()
    test_losses.append(test_loss / len(test_loader))
    
    print(f'Epoch {epoch+1}/{num_epochs}, Train Loss: {train_losses[-1]:.4f}, Test Loss: {test_losses[-1]:.4f}')

# 5. 可视化训练损失
plt.figure(figsize=(10, 5))
plt.plot(train_losses, label='Train Loss', color='blue')
plt.plot(test_losses, label='Test Loss', color='red')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training & Validation Loss Curve')
plt.legend()
plt.show()

# 6. 预测并可视化
model.eval()
predictions = []
true_values = []

with torch.no_grad():
    for inputs, targets in test_loader:
        outputs = model(inputs)
        predictions.extend(outputs.cpu().numpy().flatten())
        true_values.extend(targets.cpu().numpy().flatten())

true_values = scaler.inverse_transform(np.array(true_values).reshape(-1, 1)).flatten()
predictions = scaler.inverse_transform(np.array(predictions).reshape(-1, 1)).flatten()

plt.figure(figsize=(12, 6))
plt.plot(true_values, label='Actual', color='green')
plt.plot(predictions, label='Predicted', linestyle='dashed', color='orange')
plt.xlabel('Time')
plt.ylabel('Value')
plt.title('Actual vs Predicted Time Series')
plt.legend()
plt.show()

# 7. 残差分析
residuals = true_values - predictions
plt.figure(figsize=(10, 5))
plt.hist(residuals, bins=30, color='purple', alpha=0.7)
plt.xlabel('Residual Value')
plt.ylabel('Frequency')
plt.title('Residual Distribution')
plt.show()