import torch
import torch.nn as nn
import numpy as np

class PointWiseFeedForward(torch.nn.Module):
    def __init__(self, hidden_units, dropout_rate):

        super(PointWiseFeedForward, self).__init__()

        self.conv1 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
        self.dropout1 = torch.nn.Dropout(p=dropout_rate)
        self.relu = torch.nn.ReLU()
        self.conv2 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
        self.dropout2 = torch.nn.Dropout(p=dropout_rate)

    def forward(self, inputs):
        outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
        outputs = outputs.transpose(-1, -2) # as Conv1D requires (N, C, Length)
        return outputs
    

class ItemEmb(nn.Module):
    def __init__(self, args):
        super().__init__()
        self.args=args
        # 1. id编码
        self.item_emb=nn.Embedding(args.item_num+1, args.hidden_units, padding_idx=0)
        # 2. 电影信息编码
        # 这里先只对评分编码
        if args.feat==1:
            self.score_emb=nn.Embedding(5+1, args.hidden_units, padding_idx=0)
            # 改进的特征融合层
            self.feature_fusion = nn.Sequential(
                nn.Linear(args.hidden_units * 2, args.hidden_units),
                nn.ReLU(),
                nn.Dropout(args.dropout_rate),
                nn.LayerNorm(args.hidden_units)
            )
            # 特征门控机制
            self.feature_gate = nn.Sequential(
                nn.Linear(args.hidden_units * 2, args.hidden_units),
                nn.Sigmoid()
            )

    def forward(self, x, item_feat):
        feat1=self.item_emb(x)
        if item_feat is None or None in item_feat:
            return feat1
        
        # 修复：item_feat应该是一个列表，每个元素对应一个batch的评分
        # 我们需要为每个序列中的每个物品都提供评分特征
        if len(x.shape) == 1:
            # 一维输入，用于predict方法
            batch_size = 1
            seq_len = x.shape[0]
        else:
            batch_size, seq_len = x.shape
        
        if isinstance(item_feat, list) and len(item_feat) == batch_size:
            # 如果item_feat是batch级别的特征列表
            # 为每个batch中的每个序列位置都使用相同的评分
            ratings = torch.zeros(batch_size, seq_len, dtype=torch.long, device=self.args.device)
            for i in range(batch_size):
                rating = item_feat[i]['rating']
                ratings[i, :] = rating
        elif isinstance(item_feat, list) and len(item_feat) == seq_len:
            # 如果item_feat是序列级别的特征（用于predict方法）
            ratings = torch.as_tensor([item_feat[i]['rating'] for i in range(len(item_feat))], 
                                    dtype=torch.long, device=self.args.device)
            ratings = ratings.unsqueeze(0).expand(batch_size, -1)
        else:
            # 默认情况
            ratings = torch.ones(batch_size, seq_len, dtype=torch.long, device=self.args.device) * 5
        
        feat2=self.score_emb(ratings)
        
        # 特征融合
        combined_feat = torch.cat([feat1, feat2], dim=-1)
        
        # 门控机制：动态控制评分特征的影响
        gate = self.feature_gate(combined_feat)
        
        # 最终特征融合
        out = self.feature_fusion(combined_feat)
        out = gate * out + (1 - gate) * feat1  # 残差连接

        return out
    
class UserEmb(nn.Module):
    def __init__(self, args):
        super().__init__()
        # 1. id编码
        self.user_emb=nn.Embedding(args.user_num+1, args.hidden_units, padding_idx=0)
        self.device=args.device
        # 2. 离散特征编码，年龄、性别、职业
        if args.feat==1:
            self.age_emb=nn.Embedding(8, args.hidden_units, padding_idx=0)
            self.occupation_emb=nn.Embedding(22, args.hidden_units, padding_idx=0)
            self.gender_emb=nn.Embedding(3, args.hidden_units, padding_idx=0)

            # 特征交互层
            self.feature_interaction = nn.MultiheadAttention(
                args.hidden_units, 2, args.dropout_rate, batch_first=True
            )
            
            # 特征融合层
            self.feature_fusion = nn.Sequential(
                nn.Linear(args.hidden_units * 4, args.hidden_units * 2),
                nn.ReLU(),
                nn.Dropout(args.dropout_rate),
                nn.Linear(args.hidden_units * 2, args.hidden_units)
            )
            
            # 特征重要性学习
            self.feature_importance = nn.Sequential(
                nn.Linear(args.hidden_units * 4, 4),
                nn.Softmax(dim=-1)
            )

    def forward(self, x):
        if x is None or None in x:
            return None
        user_ids = [d['user_id'] for d in x]
        
        # 使用cat2num函数转换字符串特征
        from utils.util import cat2num
        ages = [d['age'] for d in x]
        occupations = [d['occupation'] for d in x]
        genders = [d['gender'] for d in x]
        
        feat1 = self.user_emb(
            torch.as_tensor(user_ids, dtype=torch.long, device=self.device)
        )
        feat2 = self.age_emb(
            torch.as_tensor(ages, dtype=torch.long, device=self.device)
        )
        feat3 = self.occupation_emb(
            torch.as_tensor(occupations, dtype=torch.long, device=self.device)
        )
        feat4 = self.gender_emb(
            torch.as_tensor(genders, dtype=torch.long, device=self.device)
        )

        # 特征交互：让不同特征之间进行注意力交互
        features = torch.stack([feat1, feat2, feat3, feat4], dim=1)  # [batch_size, 4, hidden_units]
        interacted_features, _ = self.feature_interaction(features, features, features)
        
        # 学习特征重要性权重
        importance_weights = self.feature_importance(features.view(features.shape[0], -1))  # [batch_size, 4]
        
        # 加权融合特征
        weighted_features = (interacted_features * importance_weights.unsqueeze(-1)).sum(dim=1)  # [batch_size, hidden_units]
        
        # 最终特征融合
        all_features = torch.cat([feat1, feat2, feat3, feat4], dim=-1)
        out = self.feature_fusion(all_features)
        
        # 残差连接
        out = out + weighted_features

        return out

class SASRec(nn.Module):
    def __init__(self, user_num, item_num, args):
        super().__init__()

        self.user_num = user_num
        self.item_num = item_num
        self.dev = args.device
        self.norm_first = args.norm_first
        args.item_num=item_num
        args.user_num=user_num
        self.maxlen=args.maxlen

        self.item_emb = ItemEmb(args)
        self.user_emb = UserEmb(args)
        self.pos_emb = torch.nn.Embedding(args.maxlen+1, args.hidden_units, padding_idx=0)
        self.emb_dropout = torch.nn.Dropout(p=args.dropout_rate)

        # 用户特征融合相关组件
        if args.feat == 1:
            # 用户特征门控机制
            self.user_gate = nn.Sequential(
                nn.Linear(args.hidden_units, args.hidden_units),
                nn.Sigmoid()
            )
            # 用户特征投影层
            self.user_proj = nn.Linear(args.hidden_units, args.hidden_units)
            # 用户-物品交互层
            self.user_item_interaction = nn.MultiheadAttention(
                args.hidden_units, args.num_heads, args.dropout_rate, batch_first=True
            )
            # 用户特征增强层
            self.user_enhancement = nn.Sequential(
                nn.Linear(args.hidden_units * 2, args.hidden_units),
                nn.ReLU(),
                nn.Dropout(args.dropout_rate)
            )

        self.attention_layernorms = torch.nn.ModuleList() # to be Q for self-attention
        self.attention_layers = torch.nn.ModuleList()
        self.forward_layernorms = torch.nn.ModuleList()
        self.forward_layers = torch.nn.ModuleList()

        self.last_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)

        for _ in range(args.num_blocks):
            new_attn_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.attention_layernorms.append(new_attn_layernorm)

            new_attn_layer =  torch.nn.MultiheadAttention(args.hidden_units,
                                                            args.num_heads,
                                                            args.dropout_rate)
            self.attention_layers.append(new_attn_layer)

            new_fwd_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.forward_layernorms.append(new_fwd_layernorm)

            new_fwd_layer = PointWiseFeedForward(args.hidden_units, args.dropout_rate)
            self.forward_layers.append(new_fwd_layer)

    def log2feats(self, log_seqs, user_feat=None, item_feat=None):
        seqs = self.item_emb(log_seqs, item_feat)
        user_emb = self.user_emb(user_feat)
        
        # 用户特征融合
        if user_emb is not None and hasattr(self, 'user_gate'):
            # 1. 用户特征门控：动态控制用户特征的影响程度
            user_gate = self.user_gate(user_emb)  # [batch_size, hidden_units]
            
            # 2. 用户特征投影
            user_proj = self.user_proj(user_emb)  # [batch_size, hidden_units]
            
            # 3. 用户-物品交互：使用用户特征作为query，物品序列作为key和value
            user_emb_expanded = user_emb.unsqueeze(1).expand(-1, seqs.shape[1], -1)  # [batch_size, seq_len, hidden_units]
            user_item_attn, _ = self.user_item_interaction(
                user_emb_expanded, seqs, seqs
            )
            
            # 4. 融合用户特征到序列表示
            seqs = seqs + user_gate.unsqueeze(1) * user_item_attn
            
            # 5. 用户特征增强：将用户特征与序列特征拼接后增强
            user_enhanced = self.user_enhancement(
                torch.cat([seqs, user_emb_expanded], dim=-1)
            )
            seqs = seqs + user_enhanced
            
            # 6. 新增：用户感知的序列增强
            # 使用用户特征来调制序列表示
            user_modulation = torch.sigmoid(user_proj.unsqueeze(1))  # [batch_size, 1, hidden_units]
            seqs = seqs * (1 + user_modulation)  # 用户特征调制

        seqs *= self.item_emb.item_emb.embedding_dim ** 0.5
        poss = np.tile(np.arange(1, log_seqs.shape[1] + 1), [log_seqs.shape[0], 1])
        poss = torch.LongTensor(poss).to(self.dev)
        poss *= (log_seqs != 0)
        
        seqs += self.pos_emb(poss)
        seqs = self.emb_dropout(seqs)

        tl = seqs.shape[1] # time dim len for enforce causality
        attention_mask = ~torch.tril(torch.ones((tl, tl), dtype=torch.bool, device=self.dev))

        for i in range(len(self.attention_layers)):
            seqs = torch.transpose(seqs, 0, 1)
            if self.norm_first:
                x = self.attention_layernorms[i](seqs)
                mha_outputs, _ = self.attention_layers[i](x, x, x,
                                                attn_mask=attention_mask)
                seqs = seqs + mha_outputs
                seqs = torch.transpose(seqs, 0, 1)
                seqs = seqs + self.forward_layers[i](self.forward_layernorms[i](seqs))
            else:
                mha_outputs, _ = self.attention_layers[i](seqs, seqs, seqs,
                                                attn_mask=attention_mask)
                seqs = self.attention_layernorms[i](seqs + mha_outputs)
                seqs = torch.transpose(seqs, 0, 1)
                seqs = self.forward_layernorms[i](seqs + self.forward_layers[i](seqs))

        log_feats = self.last_layernorm(seqs) # (U, T, C) -> (U, -1, C)
        
        return log_feats

    def forward(self, log_seqs, pos_seqs, neg_seqs, user_feat=None, item_feat=None): # for training        
        log_feats = self.log2feats(log_seqs, user_feat, item_feat) # user_ids hasn't been used yet

        pos_embs = self.item_emb(pos_seqs, None) # 正负样本不应有评分特征
        neg_embs = self.item_emb(neg_seqs, None)

        pos_logits = (log_feats * pos_embs).sum(dim=-1)
        neg_logits = (log_feats * neg_embs).sum(dim=-1)

        # pos_pred = self.pos_sigmoid(pos_logits)
        # neg_pred = self.neg_sigmoid(neg_logits)

        return pos_logits, neg_logits # pos_pred, neg_pred

    def predict(self, log_seqs, item_indices, user_feat=None, item_feat=None): # for inference
        log_feats = self.log2feats(log_seqs, user_feat, item_feat) # user_ids hasn't been used yet

        final_feat = log_feats[:, -1, :] # only use last QKV classifier, a waste

        # 修复：为每个用户都提供相同的物品列表
        batch_size = log_seqs.shape[0]
        item_indices_tensor = torch.LongTensor(item_indices).to(self.dev)  # [num_items]
        
        # 扩展为 [batch_size, num_items]
        item_indices_expanded = item_indices_tensor.expand(batch_size, -1)
        
        # 为每个物品创建评分特征（这里使用默认评分5）
        if item_feat is not None:
            item_feat_expanded = [{'rating': 5} for _ in range(len(item_indices))]
        else:
            item_feat_expanded = None

        item_embs = self.item_emb(item_indices_expanded, item_feat_expanded) # (U, I, C)

        logits = item_embs.matmul(final_feat.unsqueeze(-1)).squeeze(-1)

        # preds = self.pos_sigmoid(logits) # rank same item list for different users

        return logits # preds # (U, I)
