import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from datetime import datetime, timedelta
from sqlalchemy import create_engine, Column, Integer, Float, Date
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from config import DB_URL

Base = declarative_base()


class WeatherPrediction(Base):
    __tablename__ = 'weather_predictions'
    id = Column(Integer, primary_key=True)
    day = Column(Integer)
    high_temp = Column(Float)
    low_temp = Column(Float)
    prediction_date = Column(Date)


def train_and_predict(csv_file_path, seq_length=50, num_epochs=100, learning_rate=0.001):
    # 读取数据
    data = pd.read_csv(csv_file_path)

    # 提取特征和目标
    high_temp = data['最高气温'].values
    low_temp = data['最低气温'].values

    # 合并特征
    features = np.stack((high_temp, low_temp), axis=-1)

    # 标准化数据
    scaler_temp = MinMaxScaler(feature_range=(-1, 1))
    features = scaler_temp.fit_transform(features)

    # 准备时间序列数据集 (输入序列, 预测值)
    def create_sequences(features, seq_length):
        xs = []
        ys = []
        for i in range(len(features) - seq_length - 1):
            x = features[i:(i + seq_length)]
            y = features[i + seq_length]
            xs.append(x)
            ys.append(y)
        return np.array(xs), np.array(ys)

    X, y = create_sequences(features, seq_length)

    # 转换为 PyTorch 张量
    X = torch.from_numpy(X).float()
    y = torch.from_numpy(y).float()

    # 确保输入张量的形状是 (batch_size, channels, sequence_length)
    X = X.permute(0, 2, 1)  # 调整维度 (batch_size, channels, sequence_length)

    # 划分训练集和测试集
    train_size = int(len(X) * 0.8)
    train_X, test_X = X[:train_size], X[train_size:]
    train_y, test_y = y[:train_size], y[train_size:]

    train_dataset = TensorDataset(train_X, train_y)
    test_dataset = TensorDataset(test_X, test_y)

    train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)

    class CNN_LSTM_Model(nn.Module):
        def __init__(self, input_size, hidden_size, num_layers, output_size):
            super(CNN_LSTM_Model, self).__init__()

            # 1D卷积层
            self.cnn = nn.Sequential(
                nn.Conv1d(in_channels=2, out_channels=32, kernel_size=3, padding=1),
                nn.ReLU(),
                nn.MaxPool1d(kernel_size=2)
            )

            # LSTM层
            self.lstm = nn.LSTM(input_size=32, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)

            # 全连接层
            self.fc = nn.Linear(hidden_size, output_size)

        def forward(self, x):
            x = self.cnn(x)
            x = x.permute(0, 2, 1)  # 调整维度 (batch, seq_len, features)
            lstm_out, _ = self.lstm(x)
            out = self.fc(lstm_out[:, -1, :])
            return out

    # 超参数
    input_size = 32
    hidden_size = 64
    num_layers = 2
    output_size = 2

    # 模型实例化
    model = CNN_LSTM_Model(input_size, hidden_size, num_layers, output_size)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 存储训练过程中的指标
    train_loss_history = []
    train_rmse_history = []
    test_loss_history = []
    test_rmse_history = []

    # 训练模型
    for epoch in range(num_epochs):
        model.train()
        train_loss = 0
        train_rmse = 0
        total_samples = 0

        for batch_X, batch_y in train_loader:
            optimizer.zero_grad()
            outputs = model(batch_X)
            loss = criterion(outputs, batch_y)
            loss.backward()
            optimizer.step()

            train_loss += loss.item() * batch_X.size(0)
            train_rmse += np.sqrt(mean_squared_error(batch_y.numpy(), outputs.detach().numpy())) * batch_X.size(0)
            total_samples += batch_X.size(0)

        # 计算平均训练损失和RMSE
        avg_train_loss = train_loss / total_samples
        avg_train_rmse = train_rmse / total_samples
        train_loss_history.append(avg_train_loss)
        train_rmse_history.append(avg_train_rmse)

        # 在测试集上评估
        model.eval()
        test_loss = 0
        test_rmse = 0
        total_test_samples = 0

        with torch.no_grad():
            for batch_X, batch_y in test_loader:
                outputs = model(batch_X)
                loss = criterion(outputs, batch_y)

                test_loss += loss.item() * batch_X.size(0)
                test_rmse += np.sqrt(mean_squared_error(batch_y.numpy(), outputs.numpy())) * batch_X.size(0)
                total_test_samples += batch_X.size(0)

        avg_test_loss = test_loss / total_test_samples
        avg_test_rmse = test_rmse / total_test_samples
        test_loss_history.append(avg_test_loss)
        test_rmse_history.append(avg_test_rmse)

        if (epoch + 1) % 10 == 0:
            print(
                f'Epoch [{epoch + 1}/{num_epochs}], '
                f'Train Loss: {avg_train_loss:.4f}, Train RMSE: {avg_train_rmse:.4f}, '
                f'Test Loss: {avg_test_loss:.4f}, Test RMSE: {avg_test_rmse:.4f}'
            )

    # 预测未来15天的温度
    model.eval()
    future_predictions = []

    # 使用最后 seq_length 天的数据来预测未来
    input_sequence = X[-1].unsqueeze(0)  # 初始形状 [1, 2, 50]

    for _ in range(15):
        with torch.no_grad():
            pred = model(input_sequence)  # 预测形状 [1, 2]

        future_predictions.append(pred.squeeze().numpy())

        # 更新输入序列
        new_input = input_sequence[0, :, 1:]  # 形状 [2, 49]
        pred = pred.permute(1, 0)  # 调整形状为 [2, 1]
        new_input = torch.cat((new_input, pred), dim=1)  # 正确拼接维度
        input_sequence = new_input.unsqueeze(0)  # 恢复batch维度 [1, 2, 50]

    # 反标准化
    future_high_temp = [scaler_temp.inverse_transform(np.array([pred[:2]]).reshape(-1, 2)).reshape(-1)[0] for pred in
                        future_predictions]
    future_low_temp = [scaler_temp.inverse_transform(np.array([pred[:2]]).reshape(-1, 2)).reshape(-1)[1] for pred in
                       future_predictions]

    # 创建数据库表并存储预测结果
    engine = create_engine(DB_URL)
    Base.metadata.create_all(engine)

    Session = sessionmaker(bind=engine)
    session = Session()

    # 计算预测日期
    start_date = datetime.now().date()

    # 插入预测结果到数据库
    predictions = []
    for i in range(15):
        prediction_date = start_date + timedelta(days=i + 1)
        new_prediction = WeatherPrediction(
            day=i + 1,
            high_temp=future_high_temp[i],
            low_temp=future_low_temp[i],
            prediction_date=prediction_date
        )
        session.add(new_prediction)
        predictions.append({
            'day': i + 1,
            'high_temp': future_high_temp[i],
            'low_temp': future_low_temp[i],
            'prediction_date': prediction_date
        })

    session.commit()
    session.close()

    # 返回训练过程中的指标和预测结果
    return {
        'predictions': predictions,
        'train_loss': train_loss_history,
        'train_rmse': train_rmse_history,
        'test_loss': test_loss_history,
        'test_rmse': test_rmse_history
    }


