import os
import torch
import torch.nn as nn
import numpy as np
import pickle
try:
    import cv2
except:
    from cv2 import cv2

from Config.Config import ACTNET200V13_PKL,FRAME_COUNT_PKL
from FeatureExtruct.transforms import *
from PIL import Image

class RGB_Single_Frame_Dataset(object):
    '''
    取activitynet帧数据
    '''
    def __init__(self,subset='training',modality='train'):
        # training validation testing
        self.subset = subset
        self.modality = modality
        self.init_values()

    def load_picture(self,vid,id,modality='RGB'):
        if modality == 'RGB':
            frame_path = '/mnt/md1/Dataset/ActivityNet/Frames/{}/img_{:05d}.jpg'.format(vid,id)
            img = Image.open(frame_path)
            return img

    def get_return_data(self,x):

        trans = torchvision.transforms.Compose([
            GroupScale(256),
            GroupRandomSizedCrop(224),
            Stack(roll=False),
            ToTorchFormatTensor(),
            GroupNormalize(
                mean =  [124. / 255, 117. / 255, 104. / 255],
                std = [1 / (.0167 * 255)] * 3)
        ])

        return trans(x)

    def get_return_data_for_enum(self,x):

        trans = torchvision.transforms.Compose([
            GroupScale(224),
            Stack(roll=False),
            ToTorchFormatTensor(),
            GroupNormalize(
                mean =  [124. / 255, 117. / 255, 104. / 255],
                std = [1 / (.0167 * 255)] * 3)
        ])

        return trans(x)

    def init_values(self):

        with open(ACTNET200V13_PKL,'rb') as f:
            groundtruth = pickle.load(f)['database']

        # filter by subset
        remainkeyset = []
        for key in groundtruth.keys():
            if groundtruth[key]['subset'] == self.subset:
                remainkeyset.append(key)
        origin_groundtruth = groundtruth.copy()

        self.groundtruth = dict()
        for key in remainkeyset:
            self.groundtruth[key] = origin_groundtruth[key]

        self.vids = remainkeyset

        with open(FRAME_COUNT_PKL, 'rb') as f:
            self.frames_count = pickle.load(f)

        # mod 为 0,2,4的当做训练集 1,3用来抽取特征
        self.MOD = 5
        self.TRAIN_MD = [0,2,4]
        self.EVAL_MD  = [1,3]

        if self.modality == 'test':
            self.EVAL_MD  = [0,2,4]
            self.TRAIN_MD = [1,3]


    def train_sample_from_video(self, batch=8, max_pos=6, max_neg=2, max_per_video = 2):

        # x = np.zeros((batch, 3, 224, 224))
        x = []
        y = np.ones(batch)

        # 采128张图片, 96张带类别 32章为背景
        pos_count = 0
        neg_count = 0
        count = 0

        for p, vid in enumerate(self.vids):

            item = self.groundtruth[vid]
            annos = item['annotations']
            frame_count = self.frames_count[vid]
            dur = item['duration']

            gts = []
            gt_labels = []

            for anno in annos:
                st,ed = anno['segment']
                st = max(np.ceil(st/dur*frame_count),1)
                ed = min(np.floor(ed/dur*frame_count),frame_count)
                if st > ed : continue
                gts.append([st,ed])
                gt_labels.append(anno['class'])

            def checkIn(x):
                for i in range(len(gts)):
                    if x >= gts[i][0] and x<=gts[i][1]:
                        return gt_labels[i]
                return 0

            randomidx = np.arange(1,frame_count,1)
            np.random.shuffle(randomidx)

            nd = 0
            m_count = max_per_video
            while m_count > 0 :

                if nd >= len(randomidx): break
                Id = randomidx[nd]
                nd+=1
                if Id%self.MOD not in self.TRAIN_MD:
                    continue

                lb = checkIn(Id)

                if lb!=0 and pos_count < max_pos : # postive example
                    m_count -= 1
                    img = self.load_picture(vid,Id)
                    # x[count,...] = img
                    x.append(img)
                    y[count] = lb
                    pos_count += 1
                    count += 1
                elif lb==0 and neg_count < max_neg : # neg postive
                    m_count -= 1
                    img = self.load_picture(vid,Id)
                    # x[count,...] = img
                    x.append(img)
                    y[count] = lb
                    neg_count += 1
                    count += 1


            if count == batch:
                # 位移动一下
                self.vids = self.vids[p:] + self.vids[:p]
                break

        x = self.get_return_data(x)
        # x is (8*3),224,224
        x = x.view(-1,3,224,224)
        return x,y

    def nextbatch(self):
        return self.train_sample_from_video(batch=64,max_pos=48,max_neg=16,max_per_video=8)

    def nextbatch_flow(self):
        return self.train_sample_from_video(batch=64,max_pos=32,max_neg=32,max_per_video=8)

    def enum_vid_feature(self,vid,batchsize=64):

        if os.path.exists('/mnt/md1/Dataset/ActivityNet/Frames/{}'.format(vid)) == False:
            raise FileNotFoundError

        item = self.groundtruth[vid]
        annos = item['annotations']
        frame_count = self.frames_count[vid]
        dur = item['duration']

        gts = []
        gt_labels = []

        for anno in annos:
            st,ed = anno['segment']
            st = max(np.ceil(st/dur*frame_count),1)
            ed = min(np.floor(ed/dur*frame_count),frame_count)
            if st > ed: continue
            gts.append([st,ed])
            gt_labels.append(anno['class'])

        def checkIn(x):
            for i in range(len(gts)):
                if x >= gts[i][0] and x<=gts[i][1]: return gt_labels[i]
            return 0

        imgs = []
        labels = []

        for i in range(1,frame_count):
            if i%self.MOD not in [1,3]: continue
            imgs.append(self.load_picture(vid,i))
            labels.append(checkIn(i))

            if len(labels) == batchsize:
                imgs = self.get_return_data_for_enum(imgs)
                imgs = imgs.view(-1,3,224,224)
                yield vid,imgs,labels
                imgs = []
                labels = []

        if len(labels) != 0:
            imgs = self.get_return_data_for_enum(imgs)
            imgs = imgs.view(-1,3,224,224)
            yield vid,imgs,labels


    def _enum_all_pictures(self,batchsize=8):

        for p, vid in enumerate(self.vids):

            if os.path.exists('/mnt/md1/Dataset/ActivityNet/Frames/{}'.format(vid)) == False:
                continue

            # DEBUG VAlUE
            # if vid != 'UoGoOznvKew': continue

            item = self.groundtruth[vid]
            annos = item['annotations']
            frame_count = self.frames_count[vid]
            dur = item['duration']

            gts = []
            gt_labels = []

            for anno in annos:
                st,ed = anno['segment']
                st = max(np.ceil(st/dur*frame_count),1)
                ed = min(np.floor(ed/dur*frame_count),frame_count)
                if st > ed: continue
                gts.append([st,ed])
                gt_labels.append(anno['class'])

            def checkIn(x):
                for i in range(len(gts)):
                    if x >= gts[i][0] and x<=gts[i][1]:
                        return gt_labels[i]
                return 0

            imgs = []
            labels = []

            for i in range(1,frame_count):
                if i%self.MOD not in [1,3]: continue
                imgs.append(self.load_picture(vid,i))
                labels.append(checkIn(i))

                if len(labels) == batchsize:
                    imgs = self.get_return_data_for_enum(imgs)
                    imgs = imgs.view(-1,3,224,224)
                    yield vid,imgs,labels
                    imgs = []
                    labels = []

            if len(labels) != 0:
                imgs = self.get_return_data_for_enum(imgs)
                imgs = imgs.view(-1,3,224,224)
                yield vid,imgs,labels

    def enum_all_picture(self):
        return self._enum_all_pictures(batchsize=64)

if __name__=='__main__':
    print('here')
    dataset = RGB_Single_Frame_Dataset('training')
    print('done')
    # X,Y = dataset.nextbatch()
    # import matplotlib.pyplot as plt
    # X = X.numpy()
    # plt.clf()
    # plt.imshow(X[7,0,:,:])
    # plt.show()

    gen = dataset.enum_all_picture()

    vid,imgs,labels = next(gen)
    import IPython;IPython.embed()