import numpy as np
import cv2
import torch
import glob as glob
from model import create_model
from const import ProjectDir as PROJECT_DIR
from const import DEVICE
import os
from dataset_utils import ICBHI_Dataset, Dataset

# EXP_ID = 'exp-1689663127'
# EXP_ID = 'exp-1689863241'
# EXP_ID = 'exp-1690331338'
# model_name = 'model20.pth'

# # 300 * 1600
# EXP_ID = 'exp-1690353283'
# model_name = 'model12.pth'

# # 固定 10s 语谱图，无幕布
# EXP_ID = 'exp-1690422579'
# model_name = 'model20.pth'

# 固定 10s 语谱图，无幕布，学习率 1e-4
EXP_ID = 'exp-1690427584'
model_name = 'model100.pth'


SAVE_DIR = f'{PROJECT_DIR}/test_predictions/{EXP_ID}/{model_name}'
if not os.path.exists(SAVE_DIR):
    os.makedirs(SAVE_DIR)


# set the computation device
# device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# load the model and the trained weights
model = create_model(num_classes=2).to(DEVICE)
model.load_state_dict(torch.load(
    os.path.join(PROJECT_DIR, f'temp/{EXP_ID}/{model_name}'), 
    map_location=DEVICE
))
model.eval()

_, test_audio_list = ICBHI_Dataset().DatasetDivision(test_fold=4)
TestSet = Dataset(test_audio_list)
CLASSES = [
    'background', 'breath_cycle'
]
# define the detection threshold...
# ... any detection having score below this will be discarded
# detection_threshold = 0.8
detection_threshold = 0.1


for i in range(len(TestSet)):
# for i in range(5):
    # get the image file name for saving output later on
    image_name = f'{TestSet.DictKeys[i]}'
    
    # [TODO] draw ground truth
    image, ground_truth = TestSet[i]
    
    orig_image = np.uint8((image.numpy())*255).transpose(2,1,0).copy()
    
    # print(ground_truth['boxes'])
    # draw ground truth boxes
    # print(ground_truth['boxes'])
    for g_box in ground_truth['boxes']:
        cv2.rectangle(
            orig_image,
            (int(g_box[0]), int(g_box[1])),
            (int(g_box[2]), int(g_box[3])),
            (255, 255, 255),
            1
        )
    
    image = image.cuda()
    # # orig_image = image.copy()
    # # BGR to RGB
    # image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB).astype(np.float32)
    # # make the pixel range between 0 and 1
    # image /= 255.0
    # # # bring color channels to front
    # image = np.transpose(image, (2, 0, 1)).astype(np.float32)
    # # convert to tensor
    # image = torch.tensor(image, dtype=torch.float).cuda()
    # add batch dimension
    image = torch.unsqueeze(image, 0)
    with torch.no_grad():
        outputs = model(image)
    
    # load all detection to CPU for further operations
    outputs = [{k: v.to('cpu') for k, v in t.items()} for t in outputs]
    # carry further only if there are detected boxes
    if len(outputs[0]['boxes']) != 0:
        boxes = outputs[0]['boxes'].data.numpy()
        scores = outputs[0]['scores'].data.numpy()
        # print(boxes, scores)
        # input()
        # filter out boxes according to `detection_threshold`
        boxes = boxes[scores >= detection_threshold].astype(np.int32)
        # print(boxes)
        draw_boxes = boxes.copy()
        # get all the predicited class names
        pred_classes = [CLASSES[i] for i in outputs[0]['labels'].cpu().numpy()]
        # print(pred_classes)
        # input()
        # print(draw_boxes)
        # draw the bounding boxes and write the class name on top of it
        for j, box in enumerate(draw_boxes):
            # print(box)
            # input()
            cv2.rectangle(orig_image,
                        (int(box[0]), int(box[1])),
                        (int(box[2]), int(box[3])),
                        (0, 0, 255), 2)
            # cv2.putText(orig_image, pred_classes[j], 
            #             (int(box[0]), int(box[1]-5)),
            #             cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 
            #             2, lineType=cv2.LINE_AA)
        # cv2.imshow('Prediction', orig_image)
        # cv2.waitKey(1)
        cv2.imwrite(f"{SAVE_DIR}/{image_name}.jpg", orig_image,)
    print(f"Image {i+1} done...")
    print('-'*50)
print('TEST PREDICTIONS COMPLETE')
# cv2.destroyAllWindows()
