import torch
import torch.nn as nn
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from datetime import datetime
import calendar
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import Column, Integer, String, Text, Boolean, SmallInteger, DateTime
import io
import pickle
from uuid import uuid4

app = FastAPI()

# CNN+LSTM 预测模型
class CNN_LSTM_Model(nn.Module):
    def __init__(self, input_dim, cnn_out_channels, lstm_hidden_size, output_dim, fixed_length=30):
        super(CNN_LSTM_Model, self).__init__()
        self.fixed_length = fixed_length
        self.conv1d = nn.Conv1d(in_channels=input_dim, out_channels=cnn_out_channels, kernel_size=3, padding=1)
        self.pool = nn.AdaptiveAvgPool1d(output_size=fixed_length)
        self.lstm = nn.LSTM(input_size=cnn_out_channels, hidden_size=lstm_hidden_size, batch_first=True)
        self.fc = nn.Linear(lstm_hidden_size, output_dim)

    def forward(self, x):
        x = [self.conv1d(seq.permute(1, 0).unsqueeze(0)) for seq in x]
        x = [self.pool(seq).squeeze(0).permute(1, 0) for seq in x]
        x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True)
        lstm_out, _ = self.lstm(x)
        output = self.fc(lstm_out[:, -1, :])
        return output

# 训练函数
def train_model(model, data, targets, epochs=5, learning_rate=0.001):
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    for epoch in range(epochs):
        optimizer.zero_grad()
        outputs = model(data)
        loss = criterion(outputs, torch.tensor(targets, dtype=torch.float32))
        loss.backward()
        optimizer.step()
        print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}")

# 滚动预测函数
def rolling_forecast(model, initial_data, predict_start_date):
    predict_start_date = datetime.strptime(predict_start_date, "%Y-%m-%d")
    days_in_month = calendar.monthrange(predict_start_date.year, predict_start_date.month)[1]
    remaining_days = days_in_month - predict_start_date.day + 1

    predictions = []
    current_data = torch.tensor(initial_data, dtype=torch.float32)
    for _ in range(remaining_days):
        output = model([current_data]).item()
        predictions.append(output)
        next_step = torch.tensor([[output] * current_data.shape[1]], dtype=torch.float32)
        current_data = torch.cat((current_data, next_step), dim=0)
    return predictions

# 连接到数据库
def connected_to_db():
    # Database connection parameters
    DB_CONFIG = {
        'user': 'root',
        'password': '3417',
        'host': 'localhost',
        'port': '3306',
        'database': 'day_energy_prediction',
        'charset': 'utf8mb4'
    }
    # Create connection URL string
    connection_url = (
        f"mysql+pymysql://{DB_CONFIG['user']}:{DB_CONFIG['password']}@"
        f"{DB_CONFIG['host']}:{DB_CONFIG['port']}/{DB_CONFIG['database']}?"
        f"charset={DB_CONFIG['charset']}"
    )
    # Create the SQLAlchemy engine
    engine = create_engine(connection_url)
    return engine

# 创建数据模型
Base = declarative_base()
# 定义一个模型类，对应数据库中的表
class ModelTrained(Base):
    __tablename__ = 'model_trained'  # 对应表名

    id = Column(String, primary_key=True, default=str(uuid4()))
    model_saved = Column(Text)
    user_type = Column(String)
    vol_level = Column(String) # 映射为 VARCHAR(50)，也可以表示 CHAR(50)

# FastAPI 请求格式
class ModelInitRequest(BaseModel): # 初始化模型的入参格式
    input_dim: int  # 输入有多上变量，例如【历史电量，气温，湿度】为3个变量
    cnn_out_channels: int  # 通过模型可以将变量数映射到高维或低维空间
    lstm_hidden_size: int  # lstm隐含层的维度
    output_dim: int  # 输出维度，例如输出为【电量】为1个输出
    fixed_length: int  #  因为天数是变化的，转化为固定值，建议为30

class TrainRequest(BaseModel):  # 训练模型的入参格式
    user_type: str  # 用户类型
    vol_level: str  # 电压等级
    data: list  # 训练模型的输入
    targets: list  # 训练模型的输出

class PredictRequest(BaseModel):  # 预测模型的入参格式
    user_type: str  # 用户类型
    vol_level: str  # 电压等级
    initial_data: list  # 预测时，输入的数据
    predict_start_date: str  # 预测的第一天日期

# FastAPI 端点
@app.post("/initialize_model")
async def initialize_model(model_params: ModelInitRequest):
    global model
    model = CNN_LSTM_Model(
        input_dim=model_params.input_dim,
        cnn_out_channels=model_params.cnn_out_channels,
        lstm_hidden_size=model_params.lstm_hidden_size,
        output_dim=model_params.output_dim,
        fixed_length=model_params.fixed_length
    )
    return {"message": "Model initialized successfully",
            "model_parameters": model_params}

@app.post("/train")
async def train_model_endpoint(request: TrainRequest):
    if model is None:
        raise HTTPException(status_code=400, detail="Model is not initialized")
    data_tensors = [torch.tensor(seq, dtype=torch.float32) for seq in request.data]
    target_tensors = [torch.tensor(target, dtype=torch.float32) for target in request.targets]
    train_model(model, data_tensors, target_tensors)

    buffer_nn = io.BytesIO()
    pickle.dump(model, buffer_nn)
    buffer_nn_content = buffer_nn.getvalue()
    buffer_nn_content = str(buffer_nn_content)
    random_uuid = str(uuid4())

    engine = connected_to_db()
    # 创建会话
    Session = sessionmaker(bind=engine)
    session = Session()
    new_model = ModelTrained(
        id=random_uuid,
        model_saved=buffer_nn_content,
        user_type=request.user_type,
        vol_level=request.vol_level)
    try:
        fetch_buffer_content = session.query(ModelTrained).filter(ModelTrained.user_type == request.user_type,
                                                                  ModelTrained.vol_level == request.vol_level).first()
        if fetch_buffer_content: # 如果存在了进行更新该条目
            fetch_buffer_content.model_saved = buffer_nn_content
        else:  # 如果模型不存在，插入新的条目
            # 插入数据
            session.add(new_model)
        session.commit()
        return {"message": "模型训练完毕！"}
    except Exception as e:
        session.rollback()  # 出现异常时回滚事务
        print(f"Error occurred: {e}")
    finally:
        session.close()  # 确保会话关闭

@app.post("/predict")
async def predict(request: PredictRequest):
    engine = connected_to_db()
    # 创建会话
    Session = sessionmaker(bind=engine)
    session = Session()
    try:
        fetch_buffer_content = session.query(ModelTrained).filter(ModelTrained.user_type == request.user_type,
                                                            ModelTrained.vol_level == request.vol_level).first()
        if fetch_buffer_content:
            buffer_nn = fetch_buffer_content.model_saved
            buffer_nn = io.BytesIO(buffer_nn)
            model_nn = pickle.load(buffer_nn)
            predictions = rolling_forecast(model_nn, request.initial_data, request.predict_start_date)
            return {"predictions": predictions}
        else:
            return {"没有取到对应的模型，请先训练后应用！"}
    except Exception as e:
        print(f"Error occurred: {e}")
    finally:
        session.close()  # Make sure to close the session

