import torch
import torch.nn as nn
from peft import LoraConfig, TaskType, get_peft_model,PeftConfig,PeftModel
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
from torch.cuda.amp import GradScaler, autocast
import numpy as np


class RecSys(nn.Module):
    def __init__(self, **args):
        super(RecSys, self).__init__()
        self.args = args
        self.input_dim, self.output_dim = args['input_dim'], args['output_dim']
        self.base_model = args["base_model"]
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # lora配置
        # 参照https://github.com/QwenLM/Qwen2.5/blob/main/examples/llama-factory/qwen2-7b-lora-sft.yaml
        peft_config = LoraConfig(task_type='CAUSAL_LM', target_modules=[
                                 "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], r=16, lora_alpha=16, lora_dropout=0.2)

        config = AutoConfig.from_pretrained(
            self.args['base_model'], output_hidden_states=True)
        config.upcast_layernorm = True
        # model和tokenizer设置
        self.model = AutoModelForCausalLM.from_pretrained(self.base_model,
                                                          config=config,
                                                          # load_in_8bit=True,
                                                          # torch_dtype=torch.float32,
                                                          # local_files_only=True,
                                                          cache_dir=args['model_path'])
        self.tokenizer = AutoTokenizer.from_pretrained(self.base_model,
                                                       # load_in_8bit=True,
                                                       # torch_dtype=torch.float32,
                                                       # local_files_only=True,
                                                       cache_dir=args['model_path'])
        self.model.to(self.device)
        # 加载lora配置
        if args['use_pretrained']:
            config = PeftConfig.from_pretrained(args['lora_path'])
            self.model = PeftModel.from_pretrained(self.model, args['lora_path'])
        else:
            self.model = get_peft_model(self.model, peft_config)

        # 得到输入的id和mask
        instruct = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\nGiven the user’s purchase history, predict next possible item to be purchased.\n\n### Input:\n"
        self.instruct_ids, self.instruct_mask = self.tokenizer(instruct,
                                                               # 获取两个值吗？
                                                               truncation=True, padding=False, return_tensors='pt', add_special_tokens=False).values()
        response = "\n### Response:\n"
        self.response_ids, self.response_mask = self.tokenizer(response,
                                                               truncation=True, padding=False, return_tensors='pt', add_special_tokens=False).values()

        self.embed_tokens = self.model.get_input_embeddings()
        # 嵌入层的设置
        self.item_embed = nn.Embedding.from_pretrained(
            self.args["item_embed"], freeze=True)  # 将SASRec的嵌入层权重加载进来，并冻结
        self.item_embed.to(self.device)
        self.embed_tokens.to(self.device)
        self.item_proj = nn.Linear(
            self.input_dim, self.model.config.hidden_size)  # 不确定是否可以得到config
        self.item_proj.to(self.device)

        self.score = nn.Linear(
            self.model.config.hidden_size, self.output_dim, bias=False)
        self.score.to(self.device)

    def predict(self, inputs, inputs_mask):
        bs = inputs.shape[0]
        instruct_embeds = self.embed_tokens(
            self.instruct_ids.to(self.device)).expand(bs, -1, -1)
        response_embeds = self.embed_tokens(
            self.response_ids.to(self.device)).expand(bs, -1, -1)
        instruct_mask = self.instruct_mask.to(self.device).expand(bs, -1)
        response_mask = self.response_mask.to(self.device).expand(bs, -1)

        inputs = self.item_proj(self.item_embed(inputs))
        inputs = torch.cat([instruct_embeds, inputs, response_embeds], dim=1)
        attention_mask = torch.cat(
            [instruct_mask, inputs_mask, response_mask], dim=1)

        with autocast():  # 使用自动混合精度
            outputs = self.model(
                inputs_embeds=inputs, attention_mask=attention_mask, return_dict=True)

        pooled_output = outputs.hidden_states[-1]
        pooled_logits = self.score(pooled_output[:, -1])

        return outputs, pooled_logits.view(-1, self.output_dim)

    def forward(self, inputs, inputs_mask):
        outputs, pooled_logits = self.predict(inputs, inputs_mask)
        return pooled_logits
    
    def load_from_checkpoint(self, checkpoint_path):
        checkpoint = torch.load(checkpoint_path, map_location=self.device)
        
        # 加载主模型及其LoRA参数
        # self.model.load_state_dict(checkpoint['model_state_dict'])
        
        # 加载额外的状态字典
        self.item_proj.load_state_dict(checkpoint['item_proj'])
        self.score.load_state_dict(checkpoint['score'])
    

class RecSys2(nn.Module):
    def __init__(self, **args):
        super(RecSys2, self).__init__()
        self.args = args
        self.input_dim, self.output_dim = args['input_dim'], args['output_dim']
        self.base_model = args["base_model"]
        self.device = args['device']

        # lora配置
        # 参照https://github.com/QwenLM/Qwen2.5/blob/main/examples/llama-factory/qwen2-7b-lora-sft.yaml
        peft_config = LoraConfig(task_type='CAUSAL_LM', target_modules=[
                                 "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], r=4, lora_alpha=16, lora_dropout=0.05)

        config = AutoConfig.from_pretrained(
            self.args['base_model'], output_hidden_states=True)
        config.upcast_layernorm = True
        # model和tokenizer设置
        self.model = AutoModelForCausalLM.from_pretrained(self.base_model,
                                                          config=config,
                                                          cache_dir=args['model_path'])
        self.tokenizer = AutoTokenizer.from_pretrained(self.base_model,
                                                       cache_dir=args['model_path'])
        self.model.to(self.device)
        # 加载lora配置
        if args['use_pretrained']:
            config = PeftConfig.from_pretrained(args['lora_path'])
            self.model = PeftModel.from_pretrained(self.model, args['lora_path'])
        else:
            self.model = get_peft_model(self.model, peft_config)

        # 得到输入的id和mask
        instruct = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\nGiven the user’s purchase history, predict next possible item to be purchased.\n\n### Input:\n"
        self.instruct_ids, self.instruct_mask = self.tokenizer(instruct,
                                                               # 获取两个值吗？
                                                               truncation=True, padding=False, return_tensors='pt', add_special_tokens=False).values()
        response = "\n### Response:\n"
        self.response_ids, self.response_mask = self.tokenizer(response,
                                                               truncation=True, padding=False, return_tensors='pt', add_special_tokens=False).values()

        self.embed_tokens = self.model.get_input_embeddings()
        # 嵌入层的设置
        self.item_embed = nn.Embedding.from_pretrained(
            self.args["item_embed"], freeze=True)  # 将SASRec的嵌入层权重加载进来，并冻结
        self.item_embed.to(self.device)
        self.embed_tokens.to(self.device)
        self.item_proj = nn.Linear(
            self.input_dim, self.model.config.hidden_size)  # 不确定是否可以得到config
        self.item_proj.to(self.device)

        self.score = nn.Linear(
            self.model.config.hidden_size, self.output_dim, bias=False)
        self.score.to(self.device)



    def predict(self, inputs, inputs_mask):
        bs = inputs.shape[0]
        instruct_embeds = self.embed_tokens(
            self.instruct_ids.to(self.device)).expand(bs, -1, -1)
        response_embeds = self.embed_tokens(
            self.response_ids.to(self.device)).expand(bs, -1, -1)
        instruct_mask = self.instruct_mask.to(self.device).expand(bs, -1)
        response_mask = self.response_mask.to(self.device).expand(bs, -1)
        inputs = self.item_proj(self.item_embed(inputs))
        # import pdb;pdb.set_trace()
        inputs = torch.cat([instruct_embeds, inputs, response_embeds], dim=1)
        attention_mask = torch.cat(
            [instruct_mask, inputs_mask, response_mask], dim=1)


        outputs = self.model(inputs_embeds=inputs, attention_mask=attention_mask, return_dict=True)
        pooled_output = outputs.hidden_states[-1]
        pooled_logits = self.score(pooled_output[:, -1])

        return pooled_output[:, -1], pooled_logits.view(-1, self.output_dim)

    def forward(self, inputs, inputs_mask):
        outputs, pooled_logits = self.predict(inputs, inputs_mask)

        return outputs, pooled_logits
    
    def load_from_checkpoint(self, checkpoint_path):
        checkpoint = torch.load(checkpoint_path, map_location=self.device)
        
        # 加载额外的状态字典
        self.item_proj.load_state_dict(checkpoint['item_proj'])
        self.score.load_state_dict(checkpoint['score'])
        

class PointWiseFeedForward(torch.nn.Module):
    def __init__(self, hidden_units, dropout_rate):

        super(PointWiseFeedForward, self).__init__()

        self.conv1 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
        self.dropout1 = torch.nn.Dropout(p=dropout_rate)
        self.relu = torch.nn.ReLU()
        self.conv2 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
        self.dropout2 = torch.nn.Dropout(p=dropout_rate)

    def forward(self, inputs):
        outputs = self.dropout2(self.conv2(
            self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
        # as Conv1D requires (N, C, Length)
        outputs = outputs.transpose(-1, -2)
        outputs += inputs
        return outputs

# pls use the following self-made multihead attention layer
# in case your pytorch version is below 1.16 or for other reasons
# https://github.com/pmixer/TiSASRec.pytorch/blob/master/model.py


class SASRec(torch.nn.Module):
    def __init__(self, user_num, item_num, args):
        super(SASRec, self).__init__()

        self.user_num = user_num
        self.item_num = item_num
        self.dev = args.device

        # TODO: loss += args.l2_emb for regularizing embedding vectors during training
        # https://stackoverflow.com/questions/42704283/adding-l1-l2-regularization-in-pytorch
        self.item_emb = torch.nn.Embedding(
            self.item_num+1, args.hidden_units, padding_idx=0)
        self.pos_emb = torch.nn.Embedding(
            args.maxlen+1, args.hidden_units, padding_idx=0)
        self.emb_dropout = torch.nn.Dropout(p=args.dropout_rate)

        self.attention_layernorms = torch.nn.ModuleList()  # to be Q for self-attention
        self.attention_layers = torch.nn.ModuleList()
        self.forward_layernorms = torch.nn.ModuleList()
        self.forward_layers = torch.nn.ModuleList()

        self.last_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)

        for _ in range(args.num_blocks):
            new_attn_layernorm = torch.nn.LayerNorm(
                args.hidden_units, eps=1e-8)
            self.attention_layernorms.append(new_attn_layernorm)

            new_attn_layer = torch.nn.MultiheadAttention(args.hidden_units,
                                                         args.num_heads,
                                                         args.dropout_rate)
            self.attention_layers.append(new_attn_layer)

            new_fwd_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.forward_layernorms.append(new_fwd_layernorm)

            new_fwd_layer = PointWiseFeedForward(
                args.hidden_units, args.dropout_rate)
            self.forward_layers.append(new_fwd_layer)

            # self.pos_sigmoid = torch.nn.Sigmoid()
            # self.neg_sigmoid = torch.nn.Sigmoid()

    def log2feats(self, log_seqs):  # TODO: fp64 and int64 as default in python, trim?
        seqs = self.item_emb(torch.LongTensor(log_seqs).to(self.dev))
        # print("seqs.shape:",seqs.shape)
        # print("seqs:",seqs)
        seqs *= self.item_emb.embedding_dim ** 0.5
        poss = np.tile(
            np.arange(1, log_seqs.shape[1] + 1), [log_seqs.shape[0], 1])
        # TODO: directly do tensor = torch.arange(1, xxx, device='cuda') to save extra overheads
        poss *= (log_seqs != 0)
        # print("poss.shape:",poss.shape)
        # print("poss:",poss)

        seqs += self.pos_emb(torch.LongTensor(poss).to(self.dev))
        seqs = self.emb_dropout(seqs)

        tl = seqs.shape[1]  # time dim len for enforce causality
        attention_mask = ~torch.tril(torch.ones(
            (tl, tl), dtype=torch.bool, device=self.dev))
        # print("attention_mask:",attention_mask)

        for i in range(len(self.attention_layers)):
            seqs = torch.transpose(seqs, 0, 1)
            Q = self.attention_layernorms[i](seqs)
            mha_outputs, _ = self.attention_layers[i](Q, seqs, seqs,
                                                      attn_mask=attention_mask)
            # need_weights=False) this arg do not work?
            seqs = Q + mha_outputs
            seqs = torch.transpose(seqs, 0, 1)

            seqs = self.forward_layernorms[i](seqs)
            seqs = self.forward_layers[i](seqs)
            # print(f"{i}:")
            # print("Q.shape:",Q.shape)
            # print("Q:",Q)
            # print("seqs.shape:",seqs.shape)
            # print("seqs:",seqs)

        log_feats = self.last_layernorm(seqs)  # (U, T, C) -> (U, -1, C)

        return log_feats

    def forward(self, user_ids, log_seqs, pos_seqs, neg_seqs):  # for training
        # print("log_seqs.shape:", log_seqs.shape)
        # print("log_seqs:", log_seqs)
        log_feats = self.log2feats(log_seqs)  # user_ids hasn't been used yet

        pos_embs = self.item_emb(torch.LongTensor(pos_seqs).to(self.dev))
        neg_embs = self.item_emb(torch.LongTensor(neg_seqs).to(self.dev))

        pos_logits = (log_feats * pos_embs).sum(dim=-1)
        neg_logits = (log_feats * neg_embs).sum(dim=-1)
        # print("log_feats.shape:", log_feats.shape)
        # print("log_feats:", log_feats)
        # print("pos_embs.shape:", pos_embs.shape)
        # print("pos_embs:", pos_embs)
        # print("pos_logits.shape:", pos_logits.shape)
        # print("pos_logits:", pos_logits)

        # pos_pred = self.pos_sigmoid(pos_logits)
        # neg_pred = self.neg_sigmoid(neg_logits)
        item_logits=torch.matmul(log_feats[:,-1,:],self.item_emb.weight.t())# t是转置

        return pos_logits, neg_logits, item_logits  # pos_pred, neg_pred
    
    def forward2(self, user_ids, log_seqs, pos_seqs, neg_seqs):  # for training
        log_feats = self.log2feats(log_seqs)  # user_ids hasn't been used yet

        pos_embs = self.item_emb(torch.LongTensor(pos_seqs).to(self.dev))
        neg_embs = self.item_emb(torch.LongTensor(neg_seqs).to(self.dev))

        pos_logits = (log_feats * pos_embs).sum(dim=-1)
        neg_logits = (log_feats * neg_embs).sum(dim=-1)

        item_logits=torch.matmul(log_feats[:,-1,:],self.item_emb.weight.t())# t是转置

        return pos_logits, neg_logits, item_logits  # pos_pred, neg_pred
    
    def get_logit(self,log_seqs):
        return self.log2feats(log_seqs)

    def predict(self, user_ids, log_seqs, item_indices):  # for inference
        log_feats = self.log2feats(log_seqs)  # user_ids hasn't been used yet

        # only use last QKV classifier, a waste
        final_feat = log_feats[:, -1, :]

        item_embs = self.item_emb(torch.LongTensor(
            item_indices).to(self.dev))  # (U, I, C)

        logits = item_embs.matmul(final_feat.unsqueeze(-1)).squeeze(-1)

        return logits  # preds # (U, I)
    
    def predict2(self, log_seqs):  # for inference
        log_feats = self.log2feats(log_seqs)  # user_ids hasn't been used yet

        # only use last QKV classifier, a waste
        final_feat = log_feats[:, -1, :]

        item_embs = self.item_emb.weight  # (U, I, C)

        item_logits=torch.matmul(log_feats[:,-1,:],self.item_emb.weight.t())# t是转置


        return item_logits  # preds # (U, I)

class SASRecwithClassify(torch.nn.Module):
    def __init__(self, user_num, item_num,maxlen,embed_size, args, dropout=0.4, class_num=1, d_model=128):
        super(SASRecwithClassify, self).__init__()

        self.maxlen = maxlen
        self.d_model = d_model
        self.embedding = nn.Embedding(embed_size, d_model)
        self.positional_encoding = PositionalEncoding(d_model, maxlen)
        # 定义一个 Transformer 编码器层
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model, nhead=8, dim_feedforward=512)
        # 定义一个 Transformer 编码器
        self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=4)
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(d_model+2, class_num)
        # 50是SASRec的hidden units num,1536是Qwen2.5-1.5B-Instruct的hidden size，即model.config.hidden_size
        self.sigmoid=nn.Sigmoid().to(args.device)

        self.user_num = user_num
        self.item_num = item_num
        self.dev = args.device

        # TODO: loss += args.l2_emb for regularizing embedding vectors during training
        # https://stackoverflow.com/questions/42704283/adding-l1-l2-regularization-in-pytorch
        self.item_emb = torch.nn.Embedding(
            self.item_num+1, args.hidden_units, padding_idx=0)
        self.pos_emb = torch.nn.Embedding(
            args.maxlen+1, args.hidden_units, padding_idx=0)
        self.emb_dropout = torch.nn.Dropout(p=args.dropout_rate)

        self.attention_layernorms = torch.nn.ModuleList()  # to be Q for self-attention
        self.attention_layers = torch.nn.ModuleList()
        self.forward_layernorms = torch.nn.ModuleList()
        self.forward_layers = torch.nn.ModuleList()

        self.last_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)

        for _ in range(args.num_blocks):
            new_attn_layernorm = torch.nn.LayerNorm(
                args.hidden_units, eps=1e-8)
            self.attention_layernorms.append(new_attn_layernorm)

            new_attn_layer = torch.nn.MultiheadAttention(args.hidden_units,
                                                         args.num_heads,
                                                         args.dropout_rate)
            self.attention_layers.append(new_attn_layer)

            new_fwd_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.forward_layernorms.append(new_fwd_layernorm)

            new_fwd_layer = PointWiseFeedForward(
                args.hidden_units, args.dropout_rate)
            self.forward_layers.append(new_fwd_layer)

            # self.pos_sigmoid = torch.nn.Sigmoid()
            # self.neg_sigmoid = torch.nn.Sigmoid()

    def log2feats(self, log_seqs):  # TODO: fp64 and int64 as default in python, trim?
        seqs = self.item_emb(torch.LongTensor(log_seqs).to(self.dev))
        # print("seqs.shape:",seqs.shape)
        # print("seqs:",seqs)
        seqs *= self.item_emb.embedding_dim ** 0.5
        poss = np.tile(
            np.arange(1, log_seqs.shape[1] + 1), [log_seqs.shape[0], 1])
        # TODO: directly do tensor = torch.arange(1, xxx, device='cuda') to save extra overheads
        poss *= (log_seqs != 0)
        # print("poss.shape:",poss.shape)
        # print("poss:",poss)

        seqs += self.pos_emb(torch.LongTensor(poss).to(self.dev))
        seqs = self.emb_dropout(seqs)

        tl = seqs.shape[1]  # time dim len for enforce causality
        attention_mask = ~torch.tril(torch.ones(
            (tl, tl), dtype=torch.bool, device=self.dev))
        # print("attention_mask:",attention_mask)

        for i in range(len(self.attention_layers)):
            seqs = torch.transpose(seqs, 0, 1)
            Q = self.attention_layernorms[i](seqs)
            mha_outputs, _ = self.attention_layers[i](Q, seqs, seqs,
                                                      attn_mask=attention_mask)
            # need_weights=False) this arg do not work?
            seqs = Q + mha_outputs
            seqs = torch.transpose(seqs, 0, 1)

            seqs = self.forward_layernorms[i](seqs)
            seqs = self.forward_layers[i](seqs)
            # print(f"{i}:")
            # print("Q.shape:",Q.shape)
            # print("Q:",Q)
            # print("seqs.shape:",seqs.shape)
            # print("seqs:",seqs)

        log_feats = self.last_layernorm(seqs)  # (U, T, C) -> (U, -1, C)

        return log_feats

    def forward(self, user_ids, log_seqs, pos_seqs, neg_seqs,eens):  # for training
        log_feats = self.log2feats(log_seqs)  # user_ids hasn't been used yet

        pos_embs = self.item_emb(torch.LongTensor(pos_seqs).to(self.dev))
        neg_embs = self.item_emb(torch.LongTensor(neg_seqs).to(self.dev))

        pos_logits = (log_feats * pos_embs).sum(dim=-1)
        neg_logits = (log_feats * neg_embs).sum(dim=-1)

        item_logits=torch.matmul(log_feats[:,-1,:],self.item_emb.weight.t())# t是转置

        inputs=torch.tensor(log_seqs,dtype=torch.int).to(self.dev)
        masks=(inputs!=0).to(self.dev)
        lengths=masks.sum(dim=1).unsqueeze(dim=1).to(self.dev)
        embeds = self.embedding(inputs)
        embeds = embeds.transpose(0, 1)
        embeds = self.positional_encoding(embeds)
        features = self.encoder(embeds, src_key_padding_mask=masks)
        features = self.dropout(features)
        pools = features.transpose(0, 1).mean(dim=1)
        additional_features = torch.cat((eens, lengths), dim=1)
        pools_with_additional = torch.cat((pools, additional_features), dim=1)
        confidence = self.sigmoid(self.fc(pools_with_additional))
        
        return pos_logits, neg_logits, item_logits,confidence  # pos_pred, neg_pred
    
    def predict(self, user_ids, log_seqs, item_indices):  # for inference
        log_feats = self.log2feats(log_seqs)  # user_ids hasn't been used yet

        # only use last QKV classifier, a waste
        final_feat = log_feats[:, -1, :]

        item_embs = self.item_emb(torch.LongTensor(
            item_indices).to(self.dev))  # (U, I, C)

        logits = item_embs.matmul(final_feat.unsqueeze(-1)).squeeze(-1)

        return logits  # preds # (U, I)


class EnClassify(nn.Module):  # 带有熵、长度特征的分类器
    def __init__(self, maxlen,embed_size,device, dropout=0.4,llm_hidden_size=1536, class_num=1, d_model=128):
        super(EnClassify, self).__init__()
        self.maxlen = maxlen
        self.d_model = d_model
        self.embedding = nn.Embedding(embed_size, d_model)
        self.positional_encoding = PositionalEncoding(d_model, maxlen)
        # 定义一个 Transformer 编码器层
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model, nhead=8, dim_feedforward=512)
        # 定义一个 Transformer 编码器
        self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=4)
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(d_model+2, class_num)
        # 50是SASRec的hidden units num,1536是Qwen2.5-1.5B-Instruct的hidden size，即model.config.hidden_size
        self.sas_proj = nn.Linear(llm_hidden_size, 50)
        self.sas_proj.to(device)
        self.sigmoid=nn.Sigmoid().to(device)
    def forward(self, inputs, masks, eens, lengths, teacher_logit):
        embeds = self.embedding(inputs)
        # import pdb;pdb.set_trace()
        embeds = embeds.transpose(0, 1)
        # 输入 positional_encoding 形状为 [seq_len, batch_size, d_model]
        embeds = self.positional_encoding(embeds)


        # 确保输入形状为 (seq_len, batch_size, d_model)
        # embeds = embeds.transpose(0, 1)  # 转换为 (seq_len, batch_size, d_model)
        # print(embeds.shape)
        # print("embeds:", embeds)
        features = self.encoder(embeds, src_key_padding_mask=masks)
        # print(features.shape)
        # print("features:", features)
        features = self.dropout(features)
        pools = features.transpose(0, 1).mean(dim=1)
        additional_features = torch.cat((eens, lengths), dim=1)
        pools_with_additional = torch.cat((pools, additional_features), dim=1)
        # print(pools.shape)
        # print("pools:", pools)
        logits = self.sigmoid(self.fc(pools_with_additional))
        
        proj_teache_logit=self.sas_proj(teacher_logit)
        return logits,proj_teache_logit


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len, dropout=0.2):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(
            0, d_model, 2).float() * (-np.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        # import pdb;pdb.set_trace()
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)