import numpy as np
import cv2
import os
import time
from PIL import Image
import io
import copy
import torch
from models.experimental import attempt_load
from utils.torch_utils import select_device
from utils.general import (
    check_img_size, non_max_suppression, apply_classifier, scale_coords,
    xyxy2xywh, xywh2xyxy, strip_optimizer)
from torchvision import transforms
# import random
import fall_TSM_Module
import random
from torch.nn import functional as F

frame_number = 8
# Initialize
device = select_device('0')
# image data list
imagedatalist = []

detectFall = 0
detectNum = 0

def plot_action(img, color=None, line_thickness=None, action=None):
    tl = line_thickness or round(
        0.002 * (img.shape[0] + img.shape[1]) / 2) + 1  # line/font thickness
    color = color or [random.randint(0, 255) for _ in range(3)]
    tf = max(tl - 1, 1)  # font thickness
    t_size1 = cv2.getTextSize(action, 0, fontScale=tl / 3, thickness=tf)[0]
    c3 = (100, 100)
    # c3 = (1, img.shape[0] - 1)
    c4 = (c3[0] + t_size1[0], c3[1] - t_size1[1] - 3)
    cv2.rectangle(img, c3, c4, color, -1, cv2.LINE_AA)  # filled
    cv2.putText(img, action, (c3[0], c3[1] - 2), 0, tl / 3,
                [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)


def fallDetection(img_org):
    global detectFall, detectNum
    imagedatalist.append(img_org)
    # TODO filter condition
    if len(imagedatalist) == frame_number:
        #TODO TSM
        tsmPred, tsmConf = fall_TSM_Module.alertAction(imagedatalist)
        actionLabel = f'{tsmPred} {tsmConf:.2f}%'
        if tsmPred == 'fall':
            detectFall += 1
            for index, fallFrame in enumerate(imagedatalist):
                plot_action(fallFrame, action=actionLabel)
                savepath = os.path.join('data/falltest/tsm/', '{0}_{1}_{2}.jpg'.format(
                    detectFall, tsmPred, index))
                cv2.imwrite(savepath, fallFrame)
        detectNum += 1
        del imagedatalist[0:4]


def main():
    os.system('rm -rf ./data/falltest/tsm/*')
    videoPath = 'data/input/luoyan.mp4'
    cap = cv2.VideoCapture(videoPath)
    # cap = cv2.VideoCapture('rtsp://admin:ivlab2019@192.168.104.233//Streaming/Channels/1')
    fNUMS = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    fps = cap.get(cv2.CAP_PROP_FPS)
    print("FPS is ", fps)
    # saveImageNew = './save/img'
    # saveVideo = './save/mp4'

    frameCount = 0
    print("Starting...")

    start = time.time()
    while cap.isOpened():
        ret, frame = cap.read()
        if ret:
            frameCount += 1
            if frameCount % 6 == 0:
                fallDetection(frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
        if frameCount % 1000 == 0:
            print("Current :", round(frameCount / float(fNUMS) * 100, 1), "%")
    cap.release()
    end = time.time()
    print("all done")
    print("processing time : {0:.1f}, total : {1}, fall : {2}".format(
        float(end - start), detectNum, detectFall))


if __name__ == "__main__":
    main()
