'''
用于定义可能用户块和模型
'''
from math import sqrt
import torch
from torch import nn
from torch.utils.data import Dataset
import numpy as np

'''
下面两行代码建议保留，自torch1.12开始默认为False，这将导致安培架构(GTX1060)的设备精度过低
'''
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True

class PriceDataSet(Dataset):
    def __init__(self,data:torch.Tensor, label:torch.Tensor=None,device='cpu') -> None:
        self.data = data
        self.label = label
        self.len = data.shape[0]
        super().__init__()
    def __getitem__(self, index) -> torch.Tensor | tuple[torch.Tensor,torch.Tensor]:
        if self.label is None:
            return self.data[index]
        else:
            return self.data[index], self.label[index]
    def __len__(self) -> int:
        return self.len
    
class Attemtion(nn.Module):
    def __init__(self, input_dim:int, features_ex:int,heads:int = 1):
        super().__init__()
        self.input_dim = input_dim
        self.features_ex = features_ex
        self.heads = heads
        assert features_ex % heads == 0 and input_dim % heads == 0, 'not fit'
        self.num_per_features_ex = features_ex // heads
        self.num_per_input_dim = input_dim // heads

        self.w_q = nn.Parameter(torch.randn(heads, self.num_per_input_dim, self.num_per_features_ex))
        self.b_q = nn.Parameter(torch.randn(heads, 1, self.num_per_features_ex))

        self.w_k = nn.Parameter(torch.randn(heads, self.num_per_input_dim, self.num_per_features_ex))
        self.b_k = nn.Parameter(torch.randn(heads, 1, self.num_per_features_ex))

        self.w_v = nn.Parameter(torch.randn(heads, self.num_per_input_dim, self.num_per_features_ex))
        self.b_v = nn.Parameter(torch.randn(heads, 1, self.num_per_features_ex))
    
    def forward(self, k:torch.Tensor, v:torch.Tensor, q:torch.Tensor) -> torch.Tensor:
        batch, num, dim = k.shape

        # 先对矩阵进行计算不符合原文描述，多头需要对特征(最后一维,左矩阵的列)进行划分，然而矩阵运算是左矩阵列不线性可分的
        # 先改shape
        q = torch.matmul(q.reshape(batch, num, self.heads, 1, self.num_per_input_dim), self.w_q) + self.b_q
        k = torch.matmul(k.reshape(batch, num, self.heads, 1, self.num_per_input_dim), self.w_k) + self.b_k
        v = torch.matmul(v.reshape(batch, num, self.heads, 1, self.num_per_input_dim), self.w_v) + self.b_v
        
        q = q.reshape(batch, num, self.heads, self.num_per_features_ex).transpose(1, 2)
        k = k.reshape(batch, num, self.heads, self.num_per_features_ex).transpose(1, 2)
        v = v.reshape(batch, num, self.heads, self.num_per_features_ex).transpose(1, 2)

        dist = torch.matmul(q, k.transpose(2, 3))
        dist = torch.softmax(dist, dim=-1)

        output = torch.matmul(dist, v).transpose(1, 2).reshape(batch, num, self.features_ex)
        return output

class Encoder(nn.Module):
    '''
    在前面atenntion的基础上实现Encoder模块
    '''
    def __init__(self, input_dim:int, features_ex:int, heads:int = 1) -> None:
        super().__init__()
        self.input_dim = input_dim
        self.attentions = Attemtion(input_dim, features_ex, heads)
        self.linear = nn.Linear(features_ex, input_dim)
        self.norm = nn.LayerNorm(input_dim)

        # self.pool = nn.Dropout(0.2)
        self.activate = nn.ReLU()
    def forward(self, input:torch.Tensor)  -> torch.Tensor:
        '''
        定义Encoder计算流程
        '''
        attentioned = self.attentions(input, input, input)
        output = self.linear(attentioned) + input
        # output = self.pool(output)
        output = self.norm(output)
        return self.activate(output)

class MHP_RNN(nn.Module):
    def __init__(self, input_size,hidden_size,heads=1, with_deep=False) -> None:
        assert input_size % heads == 0 and hidden_size % heads == 0, 'heads not fit!'
        assert not with_deep or input_size == hidden_size, 'input_size and output_size is not fit'
        super().__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.heads = heads
        self.with_deep = with_deep
        self.per_head_input_size = input_size // heads
        self.per_head_hidden_size = hidden_size // heads

        self.register_buffer('hiden_state', torch.randn(heads, 1, self.per_head_hidden_size))

        self.b_h = nn.Parameter(torch.randn(heads, 1, self.per_head_hidden_size))
        self.w_xh = nn.Parameter(torch.randn(heads, self.per_head_input_size, self.per_head_hidden_size))
        self.w_hh = nn.Parameter(torch.randn(heads, self.per_head_hidden_size, self.per_head_hidden_size))

        self.w_hy = nn.Parameter(torch.randn(heads, self.per_head_hidden_size, self.per_head_hidden_size))
        self.b_y = nn.Parameter(torch.randn(heads, 1, self.per_head_hidden_size))
        
        # self.pool = nn.Dropout(0.2)
        self.activate = nn.Tanh()
    
    def forward(self,input_data:torch.Tensor) -> torch.Tensor:
        batch, dim = input_data.shape
        input_data = input_data.reshape(batch, self.heads, 1, self.per_head_input_size)
        
        tmp = torch.matmul(input_data, self.w_xh) + torch.matmul(self.hiden_state, self.w_hh) + self.b_h
        hidden = self.activate(tmp)

        if self.training:
            self.hiden_state = torch.mean(hidden.detach(),dim=0)

        out = torch.matmul(hidden, self.w_hy) + self.b_h
        out = out.reshape(batch, self.hidden_size)
        if self.with_deep:
            out = out + input_data.reshape(batch, dim)
        # out = self.pool(out)
        return out

class Tokenizer(nn.Module):
    def __init__(self, featur_num:int, featur_ex:int) -> None:
        super().__init__()
        self.weight = nn.Sequential(
            nn.Linear(featur_num, featur_num + 2),
            nn.ReLU(),
            nn.Linear(featur_num + 2, featur_ex)
            )
    def forward(self, input:torch.Tensor):
        input = input.unsqueeze(-1)
        input = torch.matmul(input,input.transpose(-1,-2))
        out = self.weight(input)
        return out

class DLPriceModel(nn.Module):
    def __init__(self, features_num:int, features_ex:int, at_heads:int = 1, en_deepth:int = 6, kernel_size:int=5, rnn_deep:int=1) -> None:
        '''
        features_num:输入特征的数量
        features_dim:单个特征的维度
        features_ex:单个特征扩展后的维度
        at_heads:注意力头的数量
        deepth:深度
        '''
        self.at_heads = at_heads
        self.en_deepth = en_deepth
        self.kernel_size = kernel_size
        self.rnn_deep = rnn_deep

        super().__init__()
        self.tokenizer = Tokenizer(features_num,features_ex)

        self.en_weights = nn.Sequential()
        for _ in range(en_deepth):
            self.en_weights.append(
                Encoder(features_ex, features_ex, at_heads)
            )

        self.con = nn.Conv2d(1, 1, kernel_size=kernel_size)

        rnn_input_size = (features_ex - kernel_size + 1) * (features_num - kernel_size + 1)
        self.rnn_heads = int(sqrt(sqrt(rnn_input_size) + 1))
        rnn_hidden_size = self.rnn_heads**2
        
        self.rd = MHP_RNN(input_size=rnn_input_size,hidden_size=rnn_hidden_size)
        self.rnn_weights = nn.Sequential()
        for _ in range(rnn_deep):
            self.rnn_weights.append(
                MHP_RNN(
                    input_size=rnn_hidden_size, hidden_size=rnn_hidden_size, 
                    heads=self.rnn_heads, with_deep=True)
            )
        self.get_out = nn.Linear(rnn_hidden_size, 1)

    def forward(self, input_data:torch.Tensor) -> torch.Tensor:
        input_data = self.tokenizer(input_data)
        input_data = self.en_weights(input_data)
        input_data = torch.unsqueeze(input_data, -3)
        input_data = self.con(input_data)
        input_data = torch.squeeze(input_data, -3)
        input_data = torch.flatten(input_data, -2, -1)
        input_data = self.rd(input_data)
        output = self.rnn_weights(input_data)
        output = self.get_out(output)
        return output

    def save_to_onnx(self, input_data:np.ndarray, path:str):
        torch.onnx.export(self.to('cpu'),
            torch.tensor(input_data).float(),
            path,
            training=torch.onnx.TrainingMode.EVAL,
            do_constant_folding=False,
            input_names=['input'],
            output_names=['output'],
            dynamic_axes={
                'input':{0:'batch'},
                'output':{0:'batch'}
            }
        )

    def get_name(self) -> str:
        return f'DLPriceModel-{self.at_heads}-{self.en_deepth}-{self.kernel_size}-{self.rnn_deep}-{self.rnn_heads}'
