import argparse
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from PIL import Image
from torch.autograd import Variable

from modules.sample_generator import gen_samples, SampleGenerator
from net.ade.ade32 import Encoder
from net.fbnet.fbnet import FBNet
from tracking.options import opts
from tracking.gen_config import gen_config
from tracking.run_tracker_backup import forward_samples, RegionExtractor, FBDataProvider


class DFCT():
    def __init__(self, args=None):
        self.fbnet = FBNet().cuda()
        self.encoder = Encoder().cuda()

        self.fb_criterion = nn.MSELoss().cuda()
        self.fb_optimizer = optim.Adam(self.fbnet.parameters(), lr=opts['lr_init'])
        # Generate sequence config
        self.img_list, \
        self.init_bbox, \
        self.gt, \
        self.savefig_dir, \
        self.display, \
        self.result_path = gen_config(args)
        self.first_frame = Image.open(img_list[0]).convert('RGB')
        self.img_sz = self.first_frame.size

    def train_fbnet(self):
        # Draw pos/neg samples
        pos_rects = gen_samples(SampleGenerator('gaussian', self.img_sz, 0.1, 1.2),
                                   self.init_bbox, opts['n_pos_init'], opts['overlap_pos_init'])
        pos_rects = np.hstack((pos_rects, np.ones((pos_rects.shape[0], 1))))
        neg_rects = np.concatenate([
            gen_samples(SampleGenerator('uniform', self.img_sz, 1, 2, 1.1),
                        self.init_bbox, opts['n_neg_init'] // 2, opts['overlap_neg_init']),
            gen_samples(SampleGenerator('whole', self.img_sz, 0, 1.2, 1.1),
                        self.init_bbox, opts['n_neg_init'] // 2, opts['overlap_neg_init'])])
        neg_rects = np.random.permutation(neg_rects)
        neg_rects = np.hstack((neg_rects, np.zeros((neg_rects.shape[0], 1))))

        rects = np.concatenate((neg_rects, pos_rects), axis=0)
        # data: BCHW + Label
        data = FBDataProvider(self.first_frame, rects, opts['fb_imsz'],
                              opts['padding'], opts['batch_test'], shuffle=True)



        for epoch in range(opts['fbepoch']):
            for i, (regions, labels) in enumerate(data):
                regions = Variable(regions)
                if opts['use_gpu']:
                    regions = regions.cuda()
                pred = self.fbnet(regions)
                # optimize
                loss = self.fb_criterion(pred.squeeze(1).squeeze(1).squeeze(1), labels.cuda())
                self.fbnet.zero_grad()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.fbnet.parameters(), opts['grad_clip'])
                self.fb_optimizer.step()

        pass


    def stage_one(self):
        self.train_fbnet()
        pass

    def stage_two(self):
        pass

    def init(self):
        self.stage_one()
        self.stage_two()

    def update(self, frame):

        # Load first image
        image = Image.open(img_list[0]).convert('RGB')
        self.init()


        pass


if __name__ == '__main__':
    # 设置随机种子(据说设置以后会变慢?)
    if opts['seed'] is not None:
        np.random.seed(opts['seed'])
        torch.manual_seed(opts['seed'])
        torch.cuda.manual_seed(opts['seed'])

    parser = argparse.ArgumentParser()
    parser.add_argument('-s', '--seq', default='DragonBaby', help='input seq')
    parser.add_argument('-j', '--json', default='', help='input json')
    parser.add_argument('-f', '--savefig', default=True, action='store_true')
    parser.add_argument('-d', '--display', default=False, action='store_true')

    args = parser.parse_args()
    assert (args.seq != '' or args.json != '')

    # Generate sequence config
    img_list, init_bbox, gt, savefig_dir, display, result_path = gen_config(args)
    tracker = DFCT(args=args)
    tracker.init()
