import oneflow as torch
import oneflow.nn as nn
import oneflow.nn.functional as F

MAX_FRAMES = 1000

class LookAheadSpikerDetector():
    def __init__(self, trigger_threshold, look_ahead, left_context=-1, blank=0):

        self.trigger_threshold = trigger_threshold
        self.look_ahead = look_ahead
        self.blank = blank
        self.left_context = left_context

    def split_into_chunks_online(self):
        raise NotImplementedError

    def split_into_chunks_mask(self, ctc_probs, memory_mask, max_length):

        spike = self.get_trigger_spike(ctc_probs)
        spike.masked_fill_(~memory_mask.squeeze(1), 0)
        # print(spike)
        num_spikes = torch.sum(spike, dim=-1)
        num_frames = torch.sum(memory_mask.squeeze(1), dim=-1)

        spike_indices = self.get_spike_indices(spike)

        # mask [b, max_length, t]
        batch_size, _, max_frames = memory_mask.size()
        mask = torch.zeros([batch_size, max_length, max_frames]).to(memory_mask.device)
        batch_chunks_pos = []

        for b in range(batch_size):

            predict_length = num_spikes[b].item()
            frame_length = num_frames[b].item()
            chunks_pos = []
            min_length = min(predict_length, max_length)
            for l in range(min_length):
                spike_pos = spike_indices[b][l].item()          
                start_pos = 0 if self.left_context == -1 else max(0, spike_pos - self.left_context)
                end_pos = min(spike_indices[b][l].item() + self.look_ahead, frame_length - 1)
                mask[b][l][start_pos:end_pos+1] = 1
                chunks_pos.append([start_pos, end_pos+1])

            if predict_length == 0:
                spike_pos = 0
            else:
                spike_pos = spike_indices[b][min_length-1].item()    

            if predict_length < max_length:
                for l in range(predict_length, max_length):
                    start_pos = 0 if self.left_context == -1 else max(0, spike_pos - self.left_context)
                    mask[b][l][start_pos:frame_length] = 1
                    chunks_pos.append([start_pos, frame_length])
            
            batch_chunks_pos.append(chunks_pos)

        return batch_chunks_pos, mask > 0

    def get_trigger_spike(self, ctc_probs):
        blank_probs = ctc_probs[:, :, self.blank] # [b, t]
        spike = (1 - blank_probs) > self.trigger_threshold
        shift_right_spike = torch.roll(spike.int(), 1, dims=1)
        shift_right_spike[:, 0] = False
        unique_spike = (spike.int() - shift_right_spike.int()) > 0
        return unique_spike.int()
        
    def get_spike_indices(self, spike):
        sorted_spike, indices = torch.sort(spike, dim=1, descending=True)
        mask = sorted_spike < 1
        indices.masked_fill_(mask, MAX_FRAMES)
        indices, _ = torch.sort(indices, dim=-1)
        return indices


class DelayedSpikerDetector():
    def __init__(self, trigger_threshold, left_spike=1, right_spike=0, blank=0):

        self.trigger_threshold = trigger_threshold
        self.blank = blank
        self.left_spike = left_spike
        self.right_spike = right_spike

    def split_into_chunks_online(self):
        raise NotImplementedError

    def split_into_chunks_mask(self, ctc_probs, memory_mask, max_length):

        spike = self.get_trigger_spike(ctc_probs)
        spike.masked_fill_(~memory_mask.squeeze(1), 0)
        # print(spike)
        num_spikes = torch.sum(spike, dim=-1)
        num_frames = torch.sum(memory_mask.squeeze(1), dim=-1)

        spike_indices = self.get_spike_indices(spike)

        # mask [b, max_length, t]
        batch_size, _, max_frames = memory_mask.size()
        mask = torch.zeros([batch_size, max_length, max_frames]).to(memory_mask.device)
        batch_chunks_pos = []

        for b in range(batch_size):

            predict_length = num_spikes[b].item()
            frame_length = num_frames[b].item()
            chunks_pos = []

            min_length = min(predict_length, max_length)
            for l in range(min_length):
                if l - self.left_spike < 0:
                    start_pos = 0
                else:
                    start_pos = spike_indices[b][l - self.left_spike].item()

                if (l + self.right_spike) > (min_length - 1):
                    end_pos = frame_length
                else:
                    end_pos = spike_indices[b][l + self.right_spike].item() + 1
                
                mask[b][l][start_pos:end_pos] = 1
                chunks_pos.append([start_pos, end_pos])

            if predict_length == 0:
                start_pos = 0
            else:
                if min_length - self.left_spike - 1 < 0:
                    start_pos = 0
                else:
                    start_pos = spike_indices[b][min_length - self.left_spike - 1].item() 
                
            if predict_length < max_length:
                for l in range(predict_length, max_length):
                    mask[b][l][start_pos:frame_length] = 1
                    chunks_pos.append([start_pos, frame_length])
            
            batch_chunks_pos.append(chunks_pos)

        return batch_chunks_pos, mask > 0

    def get_trigger_spike(self, ctc_probs):
        # > self.trigger_threshold
        blank_probs = ctc_probs[:, :, self.blank] # [b, t]
        spike = (1 - blank_probs) > self.trigger_threshold
        shift_right_spike = torch.roll(spike.int(), 1, dims=1)
        shift_right_spike[:, 0] = False
        unique_spike = (spike.int() - shift_right_spike.int()) > 0
        # print(unique_spike)
        return unique_spike.int()
        
    def get_spike_indices(self, spike):
        sorted_spike, indices = torch.sort(spike, dim=1, descending=True)
        mask = sorted_spike < 1
        indices.masked_fill_(mask, MAX_FRAMES)
        indices, _ = torch.sort(indices, dim=-1)
        return indices



if __name__ == '__main__':

    #detector = LookAheadSpikerDetector(0.5, 0, -1)
    detector = DelayedSpikerDetector(0.5, 1, 0)

    probs = torch.FloatTensor([[0.1, 0.2, 0.6, 0.3, 0.5, 0.7, 0.8, 0.1, 0.2, 0.9], 
                               [0.1, 0.6, 0.6, 0.5, 0.1, 0.5, 0.1, 0.6, 0.4, 0.3]]).unsqueeze(-1)

    mask = torch.LongTensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]).unsqueeze(1) >0

    chunk_pos, chunk_mask = detector.split_into_chunks_mask(probs, mask, 5)
    print(chunk_pos)
    print(chunk_mask.int())