from models.Modules import *
from models.MyModleConfig import *

class MyModel_Cos(nn.Module):
    def __init__(self,config:MyModelConfig):
        super().__init__()
        self.vocab_size_dic = config.vocab_size_dic
        self.heads = config.heads
        self.q_num = config.q_num
        self.short_time = config.short_time
        self.split_range = config.split_range
        self.layers1 = config.layers1
        self.layers2 = config.layers2
        self.total_behavior_num = config.total_behavior_num
        self.early_group_num = config.early_group_num
        self.middle_group_num = config.middle_group_num
        self.cos_norm = config.cos_norm
        self.layer = config.layer
        self.use_cos_loss = config.use_cos_loss
        self.embedding = Embedding(config.vocab_size_dic,
                                   config.id_embedding_dim,
                                   config.simple_embedding_dim)
        self.embedding_dim = self.embedding.h + self.embedding.c
        self.token_embedding = nn.Embedding(config.q_num,
                                            self.embedding_dim)
        self.q_gen = nn.Linear(config.total_behavior_num, config.q_num, bias=False)
        self.compressed = config.compressed
        if self.compressed:
            self.middle_series_compression = Series_Compression(config.middle_group_num,
                                                                self.embedding_dim,
                                                                config.heads)
            self.early_series_compression = Series_Compression(config.early_group_num,
                                                               self.embedding_dim,
                                                               config.heads)
            self.behavior_group_num = self.split_behavior(self.total_behavior_num)
            self.behavior_num = (self.behavior_group_num[0] +
                                 self.behavior_group_num[1] // config.middle_group_num +
                                 self.behavior_group_num[2] // config.early_group_num)
        else:
            self.behavior_num = self.total_behavior_num

        if not config.use_eta:
            self.causal_transformers1 = nn.Sequential(
                *[Casual_Transformer(self.embedding_dim,
                                    config.heads,
                                    self.behavior_num - self.short_time,
                                    self.short_time,
                                    config.drop_out,
                                    diff=config.diff1)
                for _ in range(self.layers1)],
            )

            self.causal_transformers2 = nn.Sequential(
                *[Casual_Transformer(self.embedding_dim,
                                    config.heads,
                                    self.short_time,
                                    self.q_num,
                                    config.drop_out,
                                    diff=config.diff2)
                for _ in range(self.layers2)],
            )
        # else:
        #     self.causal_transformers1 = nn.Sequential(
        #         *[Casual_Transformer(self.embedding_dim,
        #                             config.heads,
        #                             self.behavior_num - self.short_time,
        #                             self.short_time,
        #                             config.drop_out,
        #                             diff=config.diff1)
        #         for _ in range(self.layers1)],
        #     )

        #     self.causal_transformers2 = nn.Sequential(
        #         *[Casual_Transformer(self.embedding_dim,
        #                             config.heads,
        #                             self.short_time,
        #                             self.q_num,
        #                             config.drop_out,
        #                             diff=config.diff2)
        #         for _ in range(self.layers2)],
        #     )

        # self.ff = nn.Sequential(
        #     nn.Linear(config.q_num,
        #               config.q_num * 4,
        #               bias=False),
        #     nn.GELU(),
        #     nn.Linear(config.q_num * 4,
        #               config.q_num ,
        #               bias=False),
        #     nn.GELU(),
        #     nn.Linear(config.q_num ,
        #               2, bias=False),
        #     nn.Softmax(dim=-1)
        # )

    def forward(self, behaviors, global_interest_tokens, target_item, us=None, atte1=None, atte2=None, ts=None):
        b = behaviors.shape[0]
        # global_interest_embed = self.token_embedding(global_interest_tokens)
        target_embed = self.embedding(target_item)
        if len(target_embed.shape) == 2:
            target_embed = target_embed.unsqueeze(1)
        if us is None:
            ts = self.embedding(behaviors)
            global_interest_embed = self.q_gen(ts.transpose(1,2)).transpose(1,2)
            if self.compressed:
                early_ts = torch.narrow(ts, dim=-2,
                                        start=self.behavior_group_num[0] + self.behavior_group_num[1] - 1,
                                        length=self.behavior_group_num[2])
                middle_ts = torch.narrow(ts, dim=-2,
                                        start=self.behavior_group_num[0] - 1,
                                        length=self.behavior_group_num[1])
                latest_ts = torch.narrow(ts, dim=-2,
                                        start=0, length=self.behavior_group_num[0])
                compressed_early_ts = self.early_series_compression(early_ts)
                if self.use_cos_loss:
                    cos_loss = torch.nn.functional.cosine_similarity(compressed_early_ts.unsqueeze(2), 
                                                                     early_ts.reshape(b,-1,self.early_group_num,self.embedding_dim),
                                                                     dim = -1).mean(dim=-1)[0].mean()
                compressed_middle_ts = self.middle_series_compression(middle_ts)
                if self.use_cos_loss:
                    cos_loss += torch.nn.functional.cosine_similarity(compressed_middle_ts.unsqueeze(2), 
                                                                     middle_ts.reshape(b,-1,self.middle_group_num,self.embedding_dim),
                                                                     dim = -1).mean(dim=-1)[0].mean()
                    cos_loss = -1 * cos_loss
                else :
                    cos_loss = None
                # cos_loss = 0
                ts = torch.flip(torch.concat([
                    latest_ts,
                    compressed_middle_ts,
                    compressed_early_ts
                ], dim=1), [1])

            behaviors = torch.concat([ts,
                                    global_interest_embed,
                                    ],
                                    dim=-2)

            early_behaviors = ts[:, :-self.short_time,:]
            latest_behaviors = ts[:, -self.short_time:,:]
            behaviors = (latest_behaviors, early_behaviors)
            transformed_latest_behaviors, _, atte1 = self.causal_transformers1(behaviors)
        if self.layer == 2:
            behaviors = (global_interest_embed, transformed_latest_behaviors)
            us, _, atte2 = self.causal_transformers2(behaviors)
            y = torch.max(torch.nn.functional.cosine_similarity(us, target_embed, dim=-1), dim = - 1, keepdim=True)[0]
            y = y.repeat(1,2)
            y[:,0] = 1 - y[:,1]
        else:
            y = torch.max(torch.nn.functional.cosine_similarity(transformed_latest_behaviors, target_embed, dim=-1), 
                        dim = - 1, keepdim=True)[0]
            y = y.repeat(1,2)
            y[:,0] = 1 - y[:,1]
        return y, atte1,atte2, us, target_embed, ts
    def split_behavior(self, total_behavior_len):
        early_num = int(total_behavior_len * self.split_range[-1])
        early_left_num = early_num % self.early_group_num
        if early_left_num > 0:
            early_num = early_num - early_left_num

        middle_num = int(total_behavior_len * self.split_range[-2])
        middle_left_num = middle_num % self.middle_group_num
        if middle_left_num > 0:
            middle_num = middle_num - middle_left_num

        latest_num = total_behavior_len - (early_num + middle_num)

        if latest_num > 0:
            return latest_num, middle_num, early_num

        over_num = -1 * latest_num
        if self.middle_group_num >= over_num >= 0:
            middle_num = middle_num - self.middle_group_num
        elif self.early_group_num >= over_num > self.middle_group_num:
            early_num = early_num - self.early_group_num
        else:
            middle_num = middle_num - self.middle_group_num
            early_num = early_num - self.early_group_num

        latest_num = total_behavior_len - (early_num + middle_num)
        return latest_num, middle_num, early_num

if __name__ == '__main__':
    heads = 8
    vocab_size_dic = {
        'inherent_feature': {'item_id': 100, 'author': 20},
        'cross_feature': {'play_time': 10, 'date': 50}
    }
    behaviors = [torch.randint(0, 99, (10, 500, 1)),
                 torch.randint(0, 19, (10, 500, 1)),
                 torch.randint(0, 9, (10, 500, 1)),
                 torch.randint(0, 49, (10, 500, 1))]
    behaviors = torch.concat(behaviors, dim=-1)
    q_num = 2
    global_interest_tokens = torch.tensor(range(q_num)).unsqueeze(0).repeat(10, 1).long()

    target_item = [
        torch.randint(0, 99, (10, 1)),
        torch.randint(0, 19, (10, 1)),
        torch.randint(0, 9, (10, 1)),
        torch.randint(0, 49, (10, 1))
    ]
    target_item = torch.concat(target_item, dim=-1)

    config = MyModelConfig(
        vocab_size_dic,
        heads,
        q_num=q_num,
        split_range=[0.2, 0.3, 0.5],
        total_behavior_num=500,
        short_time=20,
    )

    myModel = MyModel_Cos(config)
    output,atte1, atte2 = myModel(behaviors, global_interest_tokens, target_item)
    print(output.shape)
    print(atte1.shape)
    print(atte2.shape)
