'''
用于定义可能用户块和模型
'''
from math import sqrt
import torch
from torch import nn
from torch.optim import Adam
from torch.utils.data import Dataset,DataLoader
from tqdm import tqdm
import numpy as np

class PriceDataSet(Dataset):
    def __init__(self,data:torch.Tensor, label:torch.Tensor=None) -> None:
        self.data = data
        self.label = label
        self.len = data.shape[0]
        super().__init__()
    def __getitem__(self, index):
        if self.label is None:
            return self.data[index]
        else:
            return self.data[index], self.label[index]
    def __len__(self):
        return self.len
    
class Attemtion(nn.Module):
    def __init__(self, input_dim:int, features_ex:int,):
        super().__init__()
        self.wq = nn.Linear(input_dim, features_ex)
        self.wk = nn.Linear(input_dim, features_ex)
        self.wv = nn.Linear(input_dim, features_ex)
        self.activate = nn.ReLU()
    
    def forward(self, k:torch.Tensor, v:torch.Tensor, q:torch.Tensor) -> torch.Tensor:
        output = torch.matmul(
            self.__attemtion_op(self.wk(k), self.wv(v)),
            self.wq(q)
        )
        return self.activate(output)
        
    def __attemtion_op(self, k:torch.Tensor, v:torch.Tensor) -> torch.Tensor:
        '''
        注意力操作，用于计算注意力分数
        '''
        attention = torch.matmul(k, v.transpose(1,2))
        # scale?
        # attention /= sqrt(k.shape[-1])
        # score
        output = nn.Softmax(0)(attention)
        return output

class Attemtions(nn.Module):
    '''
    注意力模块的实现
    '''
    def __init__(self, input_dim:int, features_ex:int, heads:int = 1) -> None:
        super().__init__()
        # 多头权重
        if 0 != features_ex % heads or 0 != input_dim % heads:
            raise f'{features_ex}%{heads} or {input_dim}%{heads} is not fit!'

        self.input_dim = input_dim
        self.features_ex = features_ex
        self.heads = heads

        self.input_dim_per_head = input_dim//heads
        self.features_ex_per_head = features_ex//heads

        self.attention_weights = nn.ModuleList()
        for i in range(heads):
            self.attention_weights.append(Attemtion(self.input_dim_per_head, self.features_ex_per_head))
        # 用于合并多头
        self.activate = nn.ReLU()
    def forward(self, k:torch.Tensor, v:torch.Tensor, q:torch.Tensor) -> torch.Tensor:
        '''
        定义计算过程，若为自注意力则三个输入一样。
        '''
        outputs = []
        k = torch.chunk(k, self.heads, -1)
        v = torch.chunk(v, self.heads, -1)
        q = torch.chunk(q, self.heads, -1)
        for index,attention_weight in enumerate(self.attention_weights):
            tranformed = attention_weight(k[index], v[index], q[index])
            # 在末尾添加一个维度用于合并多头
            outputs.append(tranformed)
        # 合并多头输出，并去除临时添加的一个维度
        output = torch.concat(outputs, dim=-1)
        return self.activate(output)
    
class Encoder(nn.Module):
    '''
    在前面atenntion的基础上实现Encoder模块
    '''
    def __init__(self, input_dim:int, features_ex:int, heads:int = 1) -> None:
        super().__init__()
        self.attentions = Attemtions(input_dim, features_ex, heads)
        self.linear = nn.Linear(features_ex, input_dim)
        self.norm = nn.LayerNorm(input_dim)
        self.activate = nn.ReLU()
    def forward(self, input:torch.Tensor)  -> torch.Tensor:
        '''
        定义Encoder计算流程
        '''
        attentioned = self.attentions(input, input, input)
        output = self.linear(attentioned) + input
        output = self.norm(output)
        return self.activate(output)

class Tokenizer(nn.Module):
    def __init__(self, featur_num:int, featur_ex:int) -> None:
        super().__init__()
        self.weight = torch.nn.Linear(featur_num, featur_ex)
    def forward(self, input:torch.Tensor):
        input = torch.matmul(input,input.transpose(1,2))
        return self.weight(input)

class EncoderPriceModel(nn.Module):
    def __init__(self, features_num:int, features_dim:int, features_ex:int, heads:int = 1, deepth:int = 6) -> None:
        '''
        features_num:输入特征的数量
        features_dim:单个特征的维度
        features_ex:单个特征扩展后的维度
        heads:注意力头的数量
        deepth:深度
        '''
        super().__init__()
        self.tokenizer = Tokenizer(features_num,features_ex)
        self.weights = nn.Sequential()
        for i in range(deepth):
            self.weights.append(
                Encoder(features_ex, features_ex, heads)
            )
        self.linear = nn.Linear(features_ex, 1)
        self.get_out = nn.Linear(features_num,1)

    def forward(self, input_data:torch.Tensor) -> torch.Tensor:
        input_data = self.tokenizer(input_data)
        input_data = self.weights(input_data)
        features = self.linear(input_data)
        output = torch.squeeze(features)
        output = self.get_out(output)
        return output

class MLPPriceModel(nn.Module):
    def __init__(self, input, deep) -> None:
        super().__init__()
        self.weight = nn.Sequential()
        for i in range(deep):
            self.weight.append(nn.Linear(input,input))
            self.weight.append(nn.ReLU())
        self.get_out = nn.Linear(input,1)
    def forward(self,input_data:torch.Tensor):
        input_data = input_data.squeeze()
        output = self.weight(input_data)
        return self.get_out(output)

class PriceModel:
    def __init__(self, model:nn.Module) -> None:
        self.model = model
    def fit(self, data:np.ndarray, label:np.ndarray, epoches=20, batchsize=1, lr=1e-3, machine='cpu', out_dir='./'):
        '''
        用于训练模型
        '''
        if data.shape[0] != label.shape[0]:
            raise 'The batch size of data is different from that of label'
        self.model.to(machine)
        self.model.train()
        loss_fun = nn.L1Loss()
        data = torch.tensor(data, device=machine).unsqueeze(-1).float()
        label = torch.tensor(label, device=machine).unsqueeze(-1).float()
        optim = Adam(self.model.parameters(), lr)

        loader = DataLoader(PriceDataSet(data,label),batchsize,shuffle=True)

        pre_loss = np.inf
        epoch = 0
        while True:
            #if  epoch >= epoches:
            #    break
            if (epoch+1)%10 == 0:
                torch.save(self.model.state_dict(), out_dir+'model%d.pth'%(epoch+1))
                # optim = Adam(self.parameters(), lr/(epoch+1))
            losses = []
            bar = tqdm(loader)
            for batch_data,batch_label in bar:
                output = self.model(batch_data)
                loss = loss_fun(output.squeeze(), batch_label.squeeze())
                loss.backward()
                losses.append(loss.item())
                optim.step()
                optim.zero_grad()
                bar.set_postfix_str(f'epoch[{epoch}:{epoches}]|loss[{np.mean(losses)}]')
            if np.mean(losses) > pre_loss and epoch > epoches:
                break
            pre_loss = np.mean(losses)
            epoch += 1

    def predict(self, data:np.ndarray, batchsize=1, machine='cpu') -> np.ndarray:
        '''
        对数据进行预测
        '''
        self.model.to(machine)
        self.model.eval()
        data = torch.tensor(data, device=machine).unsqueeze(-1).float()
        ans = []
        loader = DataLoader(PriceDataSet(data),batchsize,shuffle=False)
        bar = tqdm(loader)
        with torch.no_grad():
            for batch in bar:
                ans.append(
                    self.model(batch).detach().cpu().numpy()
                    )
        return np.concatenate(ans,0)
