import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
from collections import deque
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
import pandas as pd
import math
import torch.nn.functional as F
import os
from torch import nn
import yaml

file_name = os.path.basename(__file__)[:-3]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

import sys
from vehicle_cargo_environment import TransportMatchingEnv
from utils import *

with open('config.yaml', 'r', encoding="utf-8") as f:
    config = yaml.safe_load(f)
c = config['c']


class SelfAttention(nn.Module):
    def __init__(self, hidden_size, num_heads):
        super(SelfAttention, self).__init__()
        assert hidden_size % num_heads == 0  # 确保可以均分

        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.depth = hidden_size // num_heads

        self.Wq = nn.Linear(hidden_size, hidden_size)
        self.Wk = nn.Linear(hidden_size, hidden_size)
        self.Wv = nn.Linear(hidden_size, hidden_size)

        self.dense = nn.Linear(hidden_size, hidden_size)

        # 添加 Layer Normalization
        self.layer_norm = nn.LayerNorm(hidden_size)

    def split_heads(self, x, batch_size):
        x = x.view(batch_size, -1, self.num_heads, self.depth)
        return x.permute(0, 2, 1, 3)

    def forward(self, x):
        batch_size = x.size(0)
        # x:(64,1,128)
        query = self.split_heads(self.Wq(x), batch_size)
        #q :(64,2,21,64)
        key = self.split_heads(self.Wk(x), batch_size)
        value = self.split_heads(self.Wv(x), batch_size)

        scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.depth)
        weights = F.softmax(scores, dim=-1)

        output = torch.matmul(weights, value)
        output = output.transpose(1, 2).contiguous().view(batch_size, -1, self.hidden_size)

        # 添加 Layer Normalization
        output = self.layer_norm(output)
        return self.dense(output)


class LSTMCell(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(LSTMCell, self).__init__()
        self.hidden_size = hidden_size

        # 遗忘门
        self.W_f = nn.Linear(input_size + hidden_size, hidden_size)
        self.b_f = nn.Parameter(torch.zeros(hidden_size))

        # 输入门
        self.W_i = nn.Linear(input_size + hidden_size, hidden_size)
        self.b_i = nn.Parameter(torch.zeros(hidden_size))

        # 细胞状态
        self.W_c = nn.Linear(input_size + hidden_size, hidden_size)
        self.b_c = nn.Parameter(torch.zeros(hidden_size))

        # 输出门
        self.W_o = nn.Linear(input_size + hidden_size, hidden_size)
        self.b_o = nn.Parameter(torch.zeros(hidden_size))

    def forward(self, x, init_states):
        """前向传播

        参数:
        x: 输入张量，形状为 (batch_size, input_size)
        init_states: 包含两个元素的元组 (h, c)，分别表示上一个时间步的隐藏状态和细胞状态。

        返回:
        next_h, next_c: 下一个时间步的隐藏状态和细胞状态。
        """
        h_prev, c_prev = init_states

        # 合并输入和前一个时间步的隐藏状态
        combined = torch.cat((x, h_prev), dim=1)  # 形状: (batch_size, input_size + hidden_size)

        # 遗忘门
        f_t = torch.sigmoid(self.W_f(combined) + self.b_f)

        # 输入门
        i_t = torch.sigmoid(self.W_i(combined) + self.b_i)

        # 细胞状态更新
        c_tilde = torch.tanh(self.W_c(combined) + self.b_c)

        # 细胞状态
        c_t = f_t * c_prev + i_t * c_tilde

        # 输出门
        o_t = torch.sigmoid(self.W_o(combined) + self.b_o)

        # 计算当前时间步的隐藏状态
        h_t = o_t * torch.tanh(c_t)

        return h_t, c_t


class LSTM(nn.Module):
    def __init__(self, input_size, output_dim, hidden_size=c['hidden_size'], num_layers=1):
        super(LSTM, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        # 实例化 LSTMCell
        self.cells = nn.ModuleList(
            [LSTMCell(input_size if i == 0 else hidden_size, hidden_size) for i in range(num_layers)])
        self.fc = nn.Sequential(
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, output_dim)
        )

    def forward(self, x, init_states=None):
        x = x.squeeze(1)
        batch_size, _ = x.size()

        if init_states is None:
            h_t = [torch.zeros(batch_size, self.hidden_size).to(x.device) for _ in range(self.num_layers)]
            c_t = [torch.zeros(batch_size, self.hidden_size).to(x.device) for _ in range(self.num_layers)]
        else:
            h_t, c_t = init_states

        for i, cell in enumerate(self.cells):
            h_t[i], c_t[i] = cell(x, (h_t[i], c_t[i]))
            x = h_t[i]  # 为下一层准备输入
        return self.fc(c_t[0])
        # 最后一层的隐藏状态作为输出
        # return h_t[-1], (h_t, c_t)


class Encoder(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_dim=c['hidden_size'], lstm_layers=2, num_heads=2):
        super(Encoder, self).__init__()
        self.hidden_dim = hidden_dim
        self.lstm_layers = lstm_layers
        # LSTM
        self.lstm = LSTM(hidden_dim, hidden_dim)
        # 多头自注意力
        self.attention = SelfAttention(hidden_dim, num_heads)

        self.layer_norm = nn.LayerNorm(hidden_dim)
        self.fc = nn.Linear(hidden_dim, output_dim)
        self.fc1 = nn.Linear(hidden_dim, hidden_dim)

        self.fc2 = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim)
        )

    def forward(self, x):
        """

        :param x: shape(64,222)
        :return:
        """
        # x:(64,128)
        lstm_out = self.lstm(x).unsqueeze(1)
        # lstm_out:(64,1,128)
        attention_out = self.attention(lstm_out)

        attention_out = self.layer_norm(attention_out)

        # 仅使用最后一个时间步的上下文向量
        output = self.fc1(attention_out)

        return output


class Decoder(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_dim=c['hidden_size'], lstm_layers=2, num_heads=2):
        super(Decoder, self).__init__()
        self.hidden_dim = hidden_dim
        self.lstm_layers = lstm_layers
        self.lstm = LSTM(output_dim, hidden_dim)
        self.feed_forward = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim)
        )

    def forward(self, x):
        """
        x: shape(64,1,100)
        :param x:
        :return:
        """
        # x = x.squeeze(1)
        lstm_out = self.lstm(x)

        # 仅使用最后一个时间步的上下文向量
        output = self.feed_forward(lstm_out)

        return output


class LSTMWithAttention(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_dim=c['hidden_size'], lstm_layers=2, num_heads=2):
        super(LSTMWithAttention, self).__init__()
        self.hidden_dim = hidden_dim
        self.lstm_layers = lstm_layers
        self.fc = nn.Linear(input_dim, hidden_dim)
        self.encoder = Encoder(hidden_dim, output_dim, hidden_dim=hidden_dim, lstm_layers=lstm_layers)
        self.decoder = Decoder(hidden_dim, hidden_dim, hidden_dim=hidden_dim, lstm_layers=lstm_layers)
        self.fc2 = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        """

        :param x: shape(64,462)
        :return:
        """
        x = self.fc(x)  # x:{64,128}
        output1 = x
        for i in range(1):
            output1 = self.encoder(output1)
        # output1 = output1.squeeze(1)
        # output2 = self.fc(output1)
        # output1 = self.fc2(output1)
        output2 = output1
        for i in range(1):
            output2 = self.decoder(output1)
        return output2
