import torch
import torch.nn as nn
from Modules import *
from ReinforceModelConfig import ReinforceModelConfig

class Reinforce_Model(nn.Module):
    def __init__(self,
                 config:ReinforceModelConfig):
        super().__init__()
        self.vocab_size_dic = config.vocab_size_dic
        self.heads = config.heads
        self.interest_num = config.interest_num
        self.short_time = config.short_time
        self.layers1 = config.layers1
        self.layers2 = config.layers2
        self.group_num = config.group_num
        self.total_behavior_num = config.total_behavior_num
        self.layer = config.layer
        self.gradient_gather = config.gradient_gather
        self.embedding = Embedding(config.vocab_size_dic,
                                   config.id_embedding_dim,
                                   config.simple_embedding_dim)
        self.embedding_dim = self.embedding.h + self.embedding.c
        self.token_embedding = nn.Embedding(config.interest_num,
                                            self.embedding_dim)
        self.compress_net = config.compress_net
        self.random_init_q = config.random_init_q
        if not self.random_init_q:
            self.q_gen = nn.Linear(config.total_behavior_num, config.interest_num, bias=False)
        else :
            self.interest_embedding = nn.Embedding(self.interest_num, self.embedding_dim)
        self.compressed = config.compressed
        if self.compressed:
            if self.compress_net == "mlp":
                self.compression = CompressionPolicyNetwork_MLP(self.embedding_dim,
                                                                self.group_num,
                                                                self.total_behavior_num - self.short_time,
                                                                self.gradient_gather)
            elif self.compress_net == "gru":
                self.compression = CompressionPolicyNetwork_GRU(self.embedding_dim,
                                                                self.group_num)
            elif self.compress_net == "conv":
                self.compression = CompressionPolicyNetwork_Conv(self.embedding_dim,
                                                                self.group_num)
            else:
                print(f"No such compress net:{self.compress_net}")
                exit(-1)
            self.behavior_num = self.group_num + self.short_time
        else:
            self.behavior_num = self.total_behavior_num

        self.causal_transformers1 = nn.Sequential(
            *[Casual_Transformer(self.embedding_dim,
                                config.heads,
                                self.behavior_num - self.short_time,
                                self.short_time,
                                config.drop_out,
                                diff=config.diff1)
            for _ in range(self.layers1)],
        )

        self.causal_transformers2 = nn.Sequential(
            *[Casual_Transformer(self.embedding_dim,
                                config.heads,
                                self.short_time,
                                self.interest_num,
                                config.drop_out,
                                diff=config.diff2)
            for _ in range(self.layers2)],
        )
        self.use_cos = config.use_cos
        if not self.use_cos:
            if self.layer == 2:
                self.ff = nn.Sequential(
                    nn.Linear(config.interest_num,
                            config.interest_num * 4,
                            bias=False),
                    nn.GELU(),
                    nn.Linear(config.interest_num * 4,
                            config.interest_num ,
                            bias=False),
                    nn.GELU(),
                    nn.Linear(config.interest_num ,
                            2, bias=False),
                )
            else:
                self.ff = nn.Sequential(
                    nn.Linear(config.short_time,
                            config.short_time * 4,
                            bias=False),
                    nn.GELU(),
                    nn.Linear(config.short_time * 4,
                            config.short_time ,
                            bias=False),
                    nn.GELU(),
                    nn.Linear(config.short_time ,
                            2, bias=False),
                )

    def forward(self, behaviors, target_item):
        b = behaviors.shape[0]
        target_embed = self.embedding(target_item)
        if len(target_embed.shape) == 2:
            target_embed = target_embed.unsqueeze(1)
        ts = self.embedding(behaviors)
        if not self.random_init_q:
            interest_embed = self.q_gen(ts.transpose(1,2)).transpose(1,2)
        else:
            interest_tokens = torch.arange(self.interest_num).unsqueeze(0).repeat(b,1).long()
            interest_embed = self.interest_embedding(interest_tokens
                                                     )
        if self.compressed:
            short_time_ts = ts[:, -self.short_time:,:]
            ts, compress_loss = self.compression(ts[:,:-self.short_time,:])
            ts = torch.concat([ts, short_time_ts], dim=-2)
        else:
            compress_loss = 0
        behaviors = torch.concat([ts,interest_embed,],dim=-2)
        early_behaviors = ts[:, :-self.short_time,:]
        latest_behaviors = ts[:, -self.short_time:,:]
        behaviors = (latest_behaviors, early_behaviors)
        transformed_latest_behaviors, _, atte1 = self.causal_transformers1(behaviors)
        
        if self.layer == 2:
            behaviors = (interest_embed, transformed_latest_behaviors)
            us, _, atte2 = self.causal_transformers2(behaviors)
            if self.use_cos:
                y = torch.max(torch.nn.functional.cosine_similarity(us, target_embed, dim=-1), dim = - 1, keepdim=True)[0]
                y = y.repeat(1,2)
                y[:,0] = 1 - y[:,1]
            else:
                y = self.ff(torch.einsum("bqd,bad->bqa", [us, target_embed]).view(b, -1))
        else:
            if self.use_cos:
                y = torch.max(torch.nn.functional.cosine_similarity(transformed_latest_behaviors, target_embed, dim=-1), 
                          dim = - 1, keepdim=True)[0]
                y = y.repeat(1,2)
                y[:,0] = 1 - y[:,1]
            else:
                y = self.ff(torch.einsum("bqd,bad->bqa", [transformed_latest_behaviors, target_embed]).view(b, -1))
        return y, compress_loss
    
if __name__ == "__main__":
    heads = 8
    vocab_size_dic = {
        'inherent_feature': {'item_id': 100, 'author': 20},
        'cross_feature': {'play_time': 10, 'date': 50}
    }
    behaviors = [torch.randint(0, 99, (10, 20, 1)),
                 torch.randint(0, 19, (10, 20, 1)),
                 torch.randint(0, 9, (10, 20, 1)),
                 torch.randint(0, 49, (10, 20, 1))]
    behaviors = torch.concat(behaviors, dim=-1)
    interest_num = 2

    target_item = [
        torch.randint(0, 99, (10, 1)),
        torch.randint(0, 19, (10, 1)),
        torch.randint(0, 9, (10, 1)),
        torch.randint(0, 49, (10, 1))
    ]
    target_item = torch.concat(target_item, dim=-1)

    config = ReinforceModelConfig(
        vocab_size_dic,
        heads,
        interest_num=interest_num,
        group_num=5,
        total_behavior_num=20,
        short_time=3,
        compress_net='mlp'
    )

    myModel = Reinforce_Model(config)
    output,compress = myModel(behaviors,  target_item)
    print(output.shape)
    print(compress)
    print(output.requires_grad)
    print(compress.requires_grad)
