from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import os
import argparse
import sys

sys.path.append(".")
import cv2
import torch
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation

from glob import glob
from pysot.core.config import cfg
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.set_num_threads(1)

# test
parser = argparse.ArgumentParser(description='tracking demo')

parser.add_argument('--name', default='Walking2',
                    type=str, help='config file')
# parser.add_argument('--video_name', default='/home/xulong/snowstorm/0Dataset/VOT2018/girl/',
parser.add_argument('--video_name', default='/home/xulong/snowstorm/0Dataset/OTB100/Walking2/img1/',
                    type=str, help='videos or image files')
# parser.add_argument('--first_gt8', default=[923.65,308.27,758.05,342.11,741.83,262.68,907.42,228.85] ,
parser.add_argument('--first_gt4', default=[100,	69,	22,	80],
                    type=str, help='config file')

parser.add_argument('--config', default='experiments/siamrpn_r50_l234_dwxcorr/config.yaml',
                    type=str, help='config file')
parser.add_argument('--snapshot', default='experiments/siamrpn_r50_l234_dwxcorr/model.pth',
                    type=str, help='model name')

args = parser.parse_args()


num_t = 0
x_x = []
y_y = []
fig, ax = plt.subplots()
ln, = ax.plot([], [], 'r.')
xdata, ydata = [], []



def init():
    ax.set_xlim(0, 1000)
    ax.set_ylim(0, 1.2)


def gen_dot():
    for i in range(0, len(x_x)):
        newdot = [x_x[i], y_y[i]]
        yield newdot


def update_dot(newd):
    xdata.append(newd[0])
    ydata.append(newd[1])

    ln.set_data(xdata, ydata)
    return ln,


def get_frames(video_name):
    if not video_name:
        cap = cv2.VideoCapture(0)
        # warmup
        for i in range(5):
            cap.read()
        while True:
            ret, frame = cap.read()
            if ret:
                yield frame
            else:
                break
    elif video_name.endswith('avi') or \
            video_name.endswith('mp4'):
        cap = cv2.VideoCapture(args.video_name)
        while True:
            ret, frame = cap.read()
            if ret:
                yield frame
            else:
                break
    else:
        images = glob(os.path.join(video_name, '*.jpg*'))

        images = sorted(images,
                        # key=lambda x: int(x.split('\\')[-1].split('.')[0]))
                        key=lambda x: int(x.split('/')[-1].split('.')[0]))

        # print(images)
        for img in images:
            frame = cv2.imread(img)
            yield frame



def main():
    # load config
    cfg.merge_from_file(args.config)
    cfg.CUDA = torch.cuda.is_available()
    device = torch.device('cuda' if cfg.CUDA else 'cpu')

    # create model
    model = ModelBuilder()

    # load model
    model.load_state_dict(torch.load(args.snapshot,
                                     map_location=lambda storage, loc: storage.cpu()))
    model.eval().to(device)

    # build tracker
    tracker = build_tracker(model)

    first_frame = True
    if args.video_name:
        video_name = args.video_name.split('/')[-1].split('.')[0]
    else:
        video_name = 'webcam'
    cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN)

    global fram_order, all_bbox, block
    fram_order = 1
    all_bbox = []
    block = 0
    for frame in get_frames(args.video_name):

        # print("frame:", frame)
        if first_frame:
            try:

                # # ****************** first_gt8 ******************
                # first_gt8 = args.first_gt8
                # center_x = (first_gt8[0]+first_gt8[2]+first_gt8[4]+first_gt8[6])/4
                # center_y = (first_gt8[1]+first_gt8[3]+first_gt8[5]+first_gt8[7])/4
                # w = max(first_gt8[2], first_gt8[4]) - min(first_gt8[0], first_gt8[6])
                # h = max(first_gt8[5], first_gt8[7]) - min(first_gt8[1], first_gt8[3])
                # x = center_x - w/2
                # y = center_y - h/2
                # first_gt4 = [x, y, w, h]

                # # ****************** first_gt8 nature ******************
                # first_gt8 = args.first_gt8
                # x1 = min(first_gt8[0], first_gt8[2], first_gt8[4], first_gt8[6])
                # x2 = max(first_gt8[0], first_gt8[2], first_gt8[4], first_gt8[6])
                # y1 = min(first_gt8[1], first_gt8[3], first_gt8[5], first_gt8[7])
                # y2 = max(first_gt8[1], first_gt8[3], first_gt8[5], first_gt8[7])
                # w = x2 - x1
                # h = y2 - y1
                # first_gt4 = [x1, y1, w, h]

                # ****************** first_gt4 ******************
                first_gt4 = args.first_gt4

                init_rect = first_gt4

                # ****************** draw line ******************
                # init_rect = cv2.selectROI(video_name, frame, False, False)

            except:
                exit()
            tracker.init(frame, init_rect)
            first_frame = False

            print('first', 'bbox{}:'.format(fram_order), init_rect)
            fram_order = fram_order + 1
            init_rect = list(init_rect)
            all_bbox.append(init_rect)
            # print('all_bbox:', all_bbox)


        else:
            outputs = tracker.track(frame, fram_order, all_bbox)
            if 'polygon' in outputs:
                # print('polygon')
                polygon = np.array(outputs['polygon']).astype(np.int32)
                cv2.polylines(frame, [polygon.reshape((-1, 1, 2))],
                              True, (0, 255, 0), 3)
                mask = ((outputs['mask'] > cfg.TRACK.MASK_THERSHOLD) * 255)
                mask = mask.astype(np.uint8)
                mask = np.stack([mask, mask * 255, mask]).transpose(1, 2, 0)
                frame = cv2.addWeighted(frame, 0.77, mask, 0.23, -1)
            else:
                bbox = list(map(int, outputs['bbox']))
                best_score = outputs['best_score']
                peak_distance = outputs['distance']
                cv2.rectangle(frame, (bbox[0], bbox[1]),
                              (bbox[0] + bbox[2], bbox[1] + bbox[3]),
                              (0, 255, 0), 3)

                # print('other', 'bbox{}:'.format(fram_order), bbox)
                fram_order = fram_order + 1
                all_bbox.append(bbox)
                # print('all_bbox:',all_bbox)


            # **************** save result ****************
            # model_path = 'results/'
            model_path = '/home/xulong/snowstorm/0result/OTB/OTB100_txt_s/'
            if not os.path.isdir(model_path):
                os.makedirs(model_path)
            result_path = os.path.join(model_path, '{}.txt'.format(args.name))
            with open(result_path, 'a') as f:
                f.write(','.join([str(i) for i in bbox]) + '\r\n')

            cv2.imshow(video_name, frame)
            cv2.waitKey(1)
            # plt.pause(0.01)


if __name__ == '__main__':
    # anti = animation.FuncAnimation(fig, update_dot, frames = gen_dot, interval = 0.001, init_func = init)
    main()


