# the relation consensus module by Bolei
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable


class RelationModule(torch.nn.Module):
    # this is the naive implementation of the n-frame relation module, as num_frames == num_frames_relation
    def __init__(self, img_feature_dim, num_frames, num_class):
        super(RelationModule, self).__init__()
        self.num_frames = num_frames
        self.num_class = num_class
        self.img_feature_dim = img_feature_dim
        self.pre_classifier, self.classifiers = self.fc_fusion()

    def fc_fusion(self):
        # naive concatenate
        num_bottleneck = 512
        pre_classifier = nn.Sequential(
            nn.ReLU(),
            nn.Linear(self.num_frames * self.img_feature_dim, num_bottleneck),
            nn.ReLU(),
        )
        if isinstance(self.num_class, tuple):
            classifiers = nn.ModuleList(
                [nn.Linear(num_bottleneck, n) for n in self.num_class]
            )
        else:
            classifiers = nn.ModuleList([nn.Linear(num_bottleneck, self.num_class)])
        return pre_classifier, classifiers

    def forward(self, input):
        x = input.view(input.size(0), self.num_frames * self.img_feature_dim)
        x = self.pre_classifier(x)
        if len(self.classifiers) == 1:
            return self.classifiers[0](x)
        else:
            return tuple([c(x) for c in self.classifiers])


class RelationModuleMultiScale(torch.nn.Module):
    # Temporal Relation module in multiply scale, suming over
    # [2-frame relation, 3-frame relation, ..., n-frame relation]

    def __init__(self, img_feature_dim, num_frames, num_class):
        super(RelationModuleMultiScale, self).__init__()
        self.subsample_num = 3  # how many relations selected to sum up
        self.img_feature_dim = img_feature_dim
        self.scales = [
            i for i in range(num_frames, 1, -1)
        ]  # generate the multiple frame relations

        self.relations_scales = []
        self.subsample_scales = []
        for scale in self.scales:
            relations_scale = self.return_relationset(num_frames, scale)
            self.relations_scales.append(relations_scale)
            # how many samples of relation to select in each forward pass
            self.subsample_scales.append(min(self.subsample_num, len(relations_scale)))

        self.num_class = num_class
        self.multi_task = isinstance(num_class, tuple)
        # Nested list, first level is the temporal scale, second level is the task
        self.classifiers = torch.nn.ModuleList()
        self.num_frames = num_frames
        num_bottleneck = 256
        self.fc_fusion_scales = nn.ModuleList()  # high-tech modulelist
        for i in range(len(self.scales)):
            scale = self.scales[i]
            fc_fusion = nn.Sequential(
                nn.ReLU(),
                nn.Linear(scale * self.img_feature_dim, num_bottleneck),
                nn.ReLU(),
            )

            if self.multi_task:
                task_classifiers = [
                    nn.Linear(num_bottleneck, task_num_class)
                    for task_num_class in num_class
                ]
            else:
                task_classifiers = [nn.Linear(num_bottleneck, num_class)]
            self.classifiers.append(torch.nn.ModuleList(task_classifiers))
            self.fc_fusion_scales += [fc_fusion]

        print(
            "Multi-Scale Temporal Relation Network Module in use",
            ["%d-frame relation" % i for i in self.scales],
        )

    def forward(self, input):
        # the first one is the largest scale
        act_all = input[:, self.relations_scales[0][0], :]
        act_all = act_all.view(act_all.size(0), self.scales[0] * self.img_feature_dim)
        # output logits
        act_all = self.fc_fusion_scales[0](act_all)
        act_all = [classifier(act_all) for classifier in self.classifiers[0]]

        for i, scaleID in enumerate(range(1, len(self.scales))):
            # iterate over the scales
            idx_relations_randomsample = np.random.choice(
                len(self.relations_scales[scaleID]),
                self.subsample_scales[scaleID],
                replace=False,
            )
            task_classifiers = self.classifiers[i]
            for idx in idx_relations_randomsample:
                act_relation = input[:, self.relations_scales[scaleID][idx], :]
                act_relation = act_relation.view(
                    act_relation.size(0), self.scales[scaleID] * self.img_feature_dim
                )
                act_relation = self.fc_fusion_scales[scaleID](act_relation)
                for task_index, classifier in enumerate(task_classifiers):
                    act_all[task_index] += classifier(act_relation)

        if not self.multi_task:
            return act_all[0]
        return tuple(act_all)

    def return_relationset(self, num_frames, num_frames_relation):
        import itertools

        return list(
            itertools.combinations([i for i in range(num_frames)], num_frames_relation)
        )


class RelationModuleMultiScaleWithClassifier(torch.nn.Module):
    # relation module in multi-scale with a classifier at the end
    def __init__(self, img_feature_dim, num_frames, num_class):
        super(RelationModuleMultiScaleWithClassifier, self).__init__()
        self.subsample_num = 3  # how many relations selected to sum up
        self.img_feature_dim = img_feature_dim
        self.scales = [i for i in range(num_frames, 1, -1)]  #

        self.relations_scales = []
        self.subsample_scales = []
        for scale in self.scales:
            relations_scale = self.return_relationset(num_frames, scale)
            self.relations_scales.append(relations_scale)
            self.subsample_scales.append(
                min(self.subsample_num, len(relations_scale))
            )  # how many samples of relation to select in each forward pass

        self.num_class = num_class
        self.num_frames = num_frames
        num_bottleneck = 256
        self.fc_fusion_scales = nn.ModuleList()  # high-tech modulelist
        self.classifier_scales = nn.ModuleList()
        for i in range(len(self.scales)):
            scale = self.scales[i]

            fc_fusion = nn.Sequential(
                nn.ReLU(),
                nn.Linear(scale * self.img_feature_dim, num_bottleneck),
                nn.ReLU(),
                nn.Dropout(p=0.6),  # this is the newly added thing
                nn.Linear(num_bottleneck, num_bottleneck),
                nn.ReLU(),
                nn.Dropout(p=0.6),
            )
            classifier = nn.Linear(num_bottleneck, self.num_class)
            self.fc_fusion_scales += [fc_fusion]
            self.classifier_scales += [classifier]
        # maybe we put another fc layer after the summed up results???
        print("Multi-Scale Temporal Relation with classifier in use")
        print(["%d-frame relation" % i for i in self.scales])

    def forward(self, input):
        # the first one is the largest scale
        act_all = input[:, self.relations_scales[0][0], :]
        act_all = act_all.view(act_all.size(0), self.scales[0] * self.img_feature_dim)
        act_all = self.fc_fusion_scales[0](act_all)
        act_all = self.classifier_scales[0](act_all)

        for scaleID in range(1, len(self.scales)):
            # iterate over the scales
            idx_relations_randomsample = np.random.choice(
                len(self.relations_scales[scaleID]),
                self.subsample_scales[scaleID],
                replace=False,
            )
            for idx in idx_relations_randomsample:
                act_relation = input[:, self.relations_scales[scaleID][idx], :]
                act_relation = act_relation.view(
                    act_relation.size(0), self.scales[scaleID] * self.img_feature_dim
                )
                act_relation = self.fc_fusion_scales[scaleID](act_relation)
                act_relation = self.classifier_scales[scaleID](act_relation)
                act_all += act_relation
        return act_all

    def return_relationset(self, num_frames, num_frames_relation):
        import itertools

        return list(
            itertools.combinations([i for i in range(num_frames)], num_frames_relation)
        )


def return_TRN(relation_type, img_feature_dim, num_frames, num_class):
    relation_type = relation_type.lower()
    if relation_type == "trn":
        TRNmodel = RelationModule(img_feature_dim, num_frames, num_class)
    elif relation_type == "trnmultiscale":
        TRNmodel = RelationModuleMultiScale(img_feature_dim, num_frames, num_class)
    else:
        raise ValueError("Unknown TRN '{}'".format(relation_type))

    return TRNmodel


def __main():
    batch_size = 10
    num_frames = 5
    num_class = 174
    img_feature_dim = 512
    input_var = Variable(torch.randn(batch_size, num_frames, img_feature_dim))
    model = RelationModuleMultiScale(img_feature_dim, num_frames, num_class)
    output = model(input_var)
    print(output)


if __name__ == "__main__":
    __main()
