from models.Modules import *
from models.MyModleConfig import *

class MyModel_prob(nn.Module):
    def __init__(self,config:MyModelConfig):
        super().__init__()
        self.vocab_size_dic = config.vocab_size_dic
        self.heads = config.heads
        self.q_num = config.q_num
        self.short_time = config.short_time
        self.split_range = config.split_range
        self.layers1 = config.layers1
        self.layers2 = config.layers2
        self.total_behavior_num = config.total_behavior_num
        self.early_group_num = config.early_group_num
        self.middle_group_num = config.middle_group_num
        self.embedding = Embedding(config.vocab_size_dic,
                                   config.id_embedding_dim,
                                   config.simple_embedding_dim)
        self.reg_loss = RegLoss()
        self.loss_fct = BPRLoss()
        self.embedding_dim = self.embedding.h + self.embedding.c
        self.fea2log = nn.Linear(self.embedding_dim, 2 * self.embedding_dim, bias=False)
        self.intersection = Intersection(self.embedding_dim)
        self.sampler = LogicalSampler() 
        self.entity_regularizer = Regularizer(1, 0.05, 1e9)
        self.projection_regularizer = Regularizer(0.05, 0.05, 1e9) 
        self.token_embedding = nn.Embedding(config.q_num,
                                            self.embedding_dim)
        self.q_gen = nn.Linear(config.total_behavior_num, config.q_num, bias=False)
        self.compressed = config.compressed
        if self.compressed:
            self.middle_series_compression = Series_Compression(config.middle_group_num,
                                                                self.embedding_dim,
                                                                config.heads)
            self.early_series_compression = Series_Compression(config.early_group_num,
                                                               self.embedding_dim ,
                                                               config.heads)
            self.behavior_group_num = self.split_behavior(self.total_behavior_num)
            self.behavior_num = (self.behavior_group_num[0] +
                                 self.behavior_group_num[1] // config.middle_group_num +
                                 self.behavior_group_num[2] // config.early_group_num)
        else:
            self.behavior_num = self.total_behavior_num

        if not config.use_eta:
            self.causal_transformers = nn.Sequential(
                *[Casual_Transformer(self.embedding_dim ,
                                    config.heads,
                                    self.behavior_num - self.short_time,
                                    self.short_time,
                                    config.drop_out,
                                    diff=config.diff1)
                for _ in range(self.layers1)],
            )
        
        self.ff = nn.Sequential(
            nn.Linear(config.short_time,
                      config.short_time * 4,
                      bias=False),
            nn.GELU(),
            nn.Linear(config.short_time * 4,
                      config.short_time ,
                      bias=False),
            nn.GELU(),
            nn.Linear(config.short_time ,
                      2, bias=False),
            nn.Softmax(dim=-1)
        )

    def logic_forward(self, alphas, betas):

        alpha, beta = self.intersection(alphas, betas)

        return alpha, beta

    def forward(self, behaviors, pos_item, neg_item):
        b = behaviors.shape[0]
        # global_interest_embed = self.token_embedding(global_interest_tokens)
        pos_embed = self.embedding(pos_item)
        neg_embed= self.embedding(neg_item)
        ts = self.embedding(behaviors)
        alpha_seq, beta_seq = self.feature_to_beta(ts)
        alpha_pos, beta_pos = self.feature_to_beta(pos_embed)
        alpha_neg, beta_neg = self.feature_to_beta(neg_embed)
        alpha_output, beta_output = self.logic_forward(alpha_seq, beta_seq)
        # output distribution
        out_dis = self.vec_to_dis(alpha_output, beta_output)
        pos_dis = self.vec_to_dis(alpha_pos, beta_pos)
        neg_dis = self.vec_to_dis(alpha_neg, beta_neg)
        logic_pos_score = self.distance(pos_dis, out_dis)
        logic_neg_score = self.distance(neg_dis, out_dis)
        logic_loss = self.loss_fct(logic_pos_score, logic_neg_score)

        # ts = torch.concat([alpha_seq, beta_seq], dim = -1)
        ts = alpha_seq / (alpha_seq + beta_seq)


        if self.compressed:
            early_ts = torch.narrow(ts, dim=-2,
                                    start=self.behavior_group_num[0] + self.behavior_group_num[1] - 1,
                                    length=self.behavior_group_num[2])
            middle_ts = torch.narrow(ts, dim=-2,
                                    start=self.behavior_group_num[0] - 1,
                                    length=self.behavior_group_num[1])
            latest_ts = torch.narrow(ts, dim=-2,
                                    start=0, length=self.behavior_group_num[0])
            compressed_early_ts = self.early_series_compression(early_ts)
            compressed_middle_ts = self.middle_series_compression(middle_ts)
            ts = torch.flip(torch.concat([
                latest_ts,
                compressed_middle_ts,
                compressed_early_ts
            ], dim=1), [1])


        early_behaviors = ts[:, :-self.short_time,:]
        latest_behaviors = ts[:, -self.short_time:,:]
        behaviors = (latest_behaviors, early_behaviors)
        transformed_latest_behaviors, _, atte1 = self.causal_transformers(behaviors)
        # pos_embed = torch.concat([alpha_pos, beta_pos], dim = -1)
        # neg_embed = torch.concat([alpha_neg, beta_neg], dim = -1)
        pos_embed = alpha_pos / (alpha_pos + beta_pos)
        neg_embed = alpha_neg / (alpha_neg + beta_neg)
        pos_embed = pos_embed.unsqueeze(1)
        neg_embed = neg_embed.unsqueeze(1)
        # transformed_latest_behaviors = transformed_latest_behaviors[:, :, :self.embedding_dim] / (transformed_latest_behaviors[:, :, :self.embedding_dim] + transformed_latest_behaviors[:, :, self.embedding_dim:])
        
        pos_y = torch.einsum("bsd,bad->bsa", [transformed_latest_behaviors,pos_embed]).squeeze(-1)
        neg_y = torch.einsum("bsd,bad->bsa", [transformed_latest_behaviors,neg_embed]).squeeze(-1)
        

        reg_loss = self.reg_loss(
                self.parameters()
            )


        return self.ff(pos_y) , self.ff(neg_y) , logic_loss , reg_loss

    def predict(self, behaviors, item):
        b = behaviors.shape[0]
        # global_interest_embed = self.token_embedding(global_interest_tokens)
        item_embed = self.embedding(item)
        ts = self.embedding(behaviors)
        alpha_seq, beta_seq = self.feature_to_beta(ts)
        # output distribution
        alpha_item, beta_item = self.feature_to_beta(item_embed)

        ts = torch.concat([alpha_seq, beta_seq], dim = -1)
        # pos_embed = alpha_pos / (alpha_pos + beta_pos)
        # neg_embed = alpha_neg1 / (alpha_neg1 + beta_neg1)
        if self.compressed:
            early_ts = torch.narrow(ts, dim=-2,
                                    start=self.behavior_group_num[0] + self.behavior_group_num[1] - 1,
                                    length=self.behavior_group_num[2])
            middle_ts = torch.narrow(ts, dim=-2,
                                    start=self.behavior_group_num[0] - 1,
                                    length=self.behavior_group_num[1])
            latest_ts = torch.narrow(ts, dim=-2,
                                    start=0, length=self.behavior_group_num[0])
            compressed_early_ts = self.early_series_compression(early_ts)
            compressed_middle_ts = self.middle_series_compression(middle_ts)
            ts = torch.flip(torch.concat([
                latest_ts,
                compressed_middle_ts,
                compressed_early_ts
            ], dim=1), [1])


        early_behaviors = ts[:, :-self.short_time,:]
        latest_behaviors = ts[:, -self.short_time:,:]
        behaviors = (latest_behaviors, early_behaviors)
        transformed_latest_behaviors, _, atte1 = self.causal_transformers(behaviors)
        item_embed = torch.concat([alpha_item, beta_item], dim = -1)
        item_embed = item_embed.unsqueeze(1)
        # transformed_latest_behaviors = transformed_latest_behaviors[:, :, :self.embedding_dim] / (transformed_latest_behaviors[:, :, :self.embedding_dim] + transformed_latest_behaviors[:, :, self.embedding_dim:])
        
        pos_y = torch.einsum("bsd,bad->bsa", [transformed_latest_behaviors,item_embed]).squeeze(-1)
        
        return self.ff(pos_y)

    def feature_to_beta(self, feature):
        logic_input = self.fea2log(feature)
        logic_input = self.projection_regularizer(logic_input)
        alpha, beta = torch.chunk(logic_input, 2, dim=-1)
        return alpha, beta

    def vec_to_dis(self, alpha, beta):

        dis = torch.distributions.beta.Beta(alpha, beta)

        return dis

    def distance(self, dis1, dis2):

        score = - torch.norm(torch.distributions.kl.kl_divergence(dis1, dis2), p=1, dim=-1)

        return score
    def split_behavior(self, total_behavior_len):
        early_num = int(total_behavior_len * self.split_range[-1])
        early_left_num = early_num % self.early_group_num
        if early_left_num > 0:
            early_num = early_num - early_left_num

        middle_num = int(total_behavior_len * self.split_range[-2])
        middle_left_num = middle_num % self.middle_group_num
        if middle_left_num > 0:
            middle_num = middle_num - middle_left_num

        latest_num = total_behavior_len - (early_num + middle_num)

        if latest_num > 0:
            return latest_num, middle_num, early_num

        over_num = -1 * latest_num
        if self.middle_group_num >= over_num >= 0:
            middle_num = middle_num - self.middle_group_num
        elif self.early_group_num >= over_num > self.middle_group_num:
            early_num = early_num - self.early_group_num
        else:
            middle_num = middle_num - self.middle_group_num
            early_num = early_num - self.early_group_num

        latest_num = total_behavior_len - (early_num + middle_num)
        return latest_num, middle_num, early_num

if __name__ == '__main__':
    heads = 8
    vocab_size_dic = {
        'inherent_feature': {'item_id': 100, 'author': 20},
        'cross_feature': {'play_time': 10, 'date': 50}
    }
    behaviors = [torch.randint(0, 99, (10, 500, 1)),
                 torch.randint(0, 19, (10, 500, 1)),
                 torch.randint(0, 9, (10, 500, 1)),
                 torch.randint(0, 49, (10, 500, 1))]
    behaviors = torch.concat(behaviors, dim=-1)
    q_num = 2
    global_interest_tokens = torch.tensor(range(q_num)).unsqueeze(0).repeat(10, 1).long()

    pos_item = [
        torch.randint(0, 99, (10, 1)),
        torch.randint(0, 19, (10, 1)),
        torch.randint(0, 9, (10, 1)),
        torch.randint(0, 49, (10, 1))
    ]
    neg_item = [
        torch.randint(0, 99, (10, 1)),
        torch.randint(0, 19, (10, 1)),
        torch.randint(0, 9, (10, 1)),
        torch.randint(0, 49, (10, 1))
    ]
    pos_item = torch.concat(pos_item, dim=-1)
    neg_item = torch.concat(neg_item, dim=-1)

    config = MyModelConfig(
        vocab_size_dic,
        heads,
        q_num=q_num,
        split_range=[0.2, 0.3, 0.5],
        total_behavior_num=500,
        short_time=20,
    )

    myModel = MyModel_prob(config)
    pos_output, neg_output,bpr_loss, reg_loss = myModel(behaviors, pos_item,neg_item)
    print(pos_output.shape)
    print(neg_output.shape)
    print(bpr_loss)
    print(reg_loss)