from models.Modules import *
from models.config_class import *
import torch

seed = 42  # 设定一个随机种子

# 1. 设置 PyTorch 的随机种子
torch.manual_seed(seed)

# 2. 如果使用了 CUDA，需要同时设置 GPU 相关的随机种子
torch.cuda.manual_seed(seed)

class KuaiFormer(nn.Module):
    def __init__(self,
                 config: KuaiFormerConfig
                 ):
        super().__init__()
        latest_rate, middle_rate, early_rate = config.split_range
        assert (
                latest_rate + middle_rate + early_rate == 1 and min(config.split_range) > 0
        ), "分配比例有问题"
        self.vocab_size_dic = config.vocab_size_dic
        self.heads = config.heads
        self.q_num = config.q_num
        self.split_range = config.split_range
        self.layers = config.layers
        self.total_behavior_num = config.total_behavior_num
        self.early_group_num = config.early_group_num
        self.middle_group_num = config.middle_group_num
        self.embedding = Embedding(config.vocab_size_dic,
                                   config.id_embedding_dim,
                                   config.simple_embedding_dim)
        self.embedding_dim = self.embedding.h + self.embedding.c
        self.middle_series_compression = Series_Compression(config.middle_group_num,
                                                            self.embedding_dim,
                                                            config.heads)
        self.early_series_compression = Series_Compression(config.early_group_num,
                                                           self.embedding_dim,
                                                           config.heads)

        self.behavior_group_num = self.split_behavior(self.total_behavior_num)
        self.behavior_num = (self.behavior_group_num[0] +
                             self.behavior_group_num[1] // config.middle_group_num +
                             self.behavior_group_num[2] // config.early_group_num)

        self.token_embedding = nn.Embedding(config.q_num,
                                            self.embedding_dim)

        self.causal_transformers = nn.Sequential(
            *[Casual_Transformer(self.embedding_dim,
                                 config.heads,
                                 self.behavior_num,
                                 config.q_num)
              for _ in range(config.layers)],
        )

    def forward(self, behaviors,  target_item):
        b = behaviors.shape[0]
        ts = self.embedding(behaviors)
        global_interest_embed = self.token_embedding(torch.tensor(range(self.q_num), device=behaviors.device).long()).unsqueeze(0).repeat(b,1,1)
        target_embed = self.embedding(target_item)
        if len(target_embed.shape) == 2:
            target_embed = target_embed.unsqueeze(1)
        early_ts = torch.narrow(ts, dim=-2,
                                start=self.behavior_group_num[0] + self.behavior_group_num[1] - 1,
                                length=self.behavior_group_num[2])
        middle_ts = torch.narrow(ts, dim=-2,
                                 start=self.behavior_group_num[0] - 1,
                                 length=self.behavior_group_num[1])
        latest_ts = torch.narrow(ts, dim=-2,
                                 start=0, length=self.behavior_group_num[0])
        compressed_early_ts = self.early_series_compression(early_ts)
        compressed_middle_ts = self.middle_series_compression(middle_ts)
        ts = torch.flip(torch.concat([
            latest_ts,
            compressed_middle_ts,
            compressed_early_ts
        ], dim=1), [1])

        # if not self.separate_short_long:
        behaviors = torch.concat([ts, global_interest_embed],
                                 dim=-2)
        us, atte = self.causal_transformers(behaviors)
        us = torch.narrow(us, dim=-2, start=self.behavior_num,
                          length=self.q_num)
        y = torch.max(torch.nn.functional.cosine_similarity(us, target_embed, dim=-1), dim = - 1, keepdim=True)[0]
        y = 0.5 * y + 0.5
        y = y.repeat(1,2)
        y[:,0] = 1 - y[:,1]
        return y

    def split_behavior(self, total_behavior_len):
        early_num = int(total_behavior_len * self.split_range[-1])
        early_left_num = early_num % self.early_group_num
        if early_left_num > 0:
            early_num = early_num - early_left_num

        middle_num = int(total_behavior_len * self.split_range[-2])
        middle_left_num = middle_num % self.middle_group_num
        if middle_left_num > 0:
            middle_num = middle_num - middle_left_num

        latest_num = total_behavior_len - (early_num + middle_num)

        if latest_num > 0:
            return latest_num, middle_num, early_num

        over_num = -1 * latest_num
        if self.middle_group_num >= over_num >= 0:
            middle_num = middle_num - self.middle_group_num
        elif self.early_group_num >= over_num > self.middle_group_num:
            early_num = early_num - self.early_group_num
        else:
            middle_num = middle_num - self.middle_group_num
            early_num = early_num - self.early_group_num

        latest_num = total_behavior_len - (early_num + middle_num)
        return latest_num, middle_num, early_num


if __name__ == '__main__':
    heads = 8
    vocab_size_dic = {
        'inherent_feature': {'item_id': 100, 'author': 20},
        'cross_feature': {'play_time': 10, 'date': 50}
    }
    behaviors = [torch.randint(0,99,(10, 500, 1)),
                 torch.randint(0,19,(10,500, 1)),
                 torch.randint(0,9,(10,500,1)),
                 torch.randint(0,49,(10,500,1))]
    behaviors = torch.concat(behaviors, dim=-1)


    target_item = [
        torch.randint(0,99,(10,1)),
        torch.randint(0, 19, (10,  1)),
        torch.randint(0, 9, (10,  1)),
        torch.randint(0, 49, (10,  1))
    ]
    target_item = torch.concat(target_item, dim=-1)

    config = KuaiFormerConfig(
        vocab_size_dic,
        heads,
        q_num=4,
        split_range=[0.2, 0.3, 0.5],
        total_behavior_num=200,
    )
    kuaiformer = KuaiFormer(config)
    output = kuaiformer(behaviors,target_item)
    print(output)

