"""
original model with no dropout, and used regression 
"""
import json
import os
import sys

sys.path.extend(
    [os.path.join(os.getcwd(), "cls"), os.path.join(os.path.dirname(__file__), "cls")]
)
print(sys.path)


import cv2
import numpy as np
import torch
import utils
from cls.train_utils import CVModule2 as CVModule, slice_tensor, lengths_reg as lengths
from torchvision import transforms
from ultralytics import YOLO
from utils import Timer

cnt = 0
num_front, num_back, num_other = 0, 0, 0
names = {0: "person", 1: "front", 2: "back", 3: "other"}
imgsz = 960

cls_model_path = "/project/train/models/cls/lightning_logs/version_0/last.ckpt"
det_model_path = "/project/train/models/face_det/train7/weights/best.pt"
print("using cls_model: ", cls_model_path)
print("using det_model: ", det_model_path)


def crop_and_transf(
    img,
    box,
):
    # Load your image
    for i in range(len(box)):
        box[i] = int(box[i])
    # transform = transforms.Compose([
    #     transforms.Resize((224, 224)),
    #     # transforms.ToTensor(),
    #     utils.Int8ToFloat01()
    # ])

    transform = transforms.Compose(
        [
            # transforms.ToPILImage(),
            transforms.Resize(256),
            transforms.CenterCrop(224),
            # transforms.ToTensor(),
            utils.Int8ToFloat01(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]
    )

    # Crop the image using the bounding box coordinates
    cropped_image = utils.crop_by_hand(img, box)
    img_tensor = utils.hwc2tensor(cropped_image)
    tsf_img = transform(img_tensor)
    print(tsf_img.shape)
    return tsf_img


def init():
    """Initialize model
    Returns: model
    """
    print("test script update time 2023-12-02 08:39:19")

    print(f"cls_model_path: {cls_model_path}")
    print(f"det_model_path: {det_model_path}")
    detector = YOLO(det_model_path)
    classifier = CVModule.load_from_checkpoint(cls_model_path).eval()
    # warm up
    height, width, channels = 600, 600, 3
    random_image = np.random.randint(0, 256, (height, width, channels), dtype=np.uint8)
    detector.predict(random_image, imgsz=imgsz)
    classifier(torch.randn(1, 3, 224, 224).cuda())
    return detector, classifier


@torch.no_grad()
def process_image(model, input_image=None, args=None, **kwargs):
    global cnt
    global num_front, num_back, num_other
    cnt += 1
    print(
        f"============================= processing number {cnt} image ================================================="
    )

    detector, classifier = model
    with Timer("detector"):
        # det_results = detector.predict(input_image, imgsz=imgsz)[0].boxes.cpu()
        det_results = detector.predict(input_image, imgsz=imgsz, conf=0.1)[0].boxes

    # with Timer("convert to PIL rgb"):
    #     input_image = Image.fromarray(cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB))

    final_result = {}

    final_result["algorithm_data"] = {
        "is_alert": False,
        "target_count": 0,
        "target_info": [],
    }
    final_result["model_data"] = {"objects": []}
    front_crops = []
    xyxys = []
    for i in range(len(det_results.cls)):
        cls = int(det_results.cls[i])
        xywh = det_results.xywh[i].tolist()
        xyxy = det_results.xyxy[i].tolist()
        if "person" in names[cls]:
            final_result["model_data"]["objects"].append(
                {
                    "x": int(xywh[0]),
                    "y": int(xywh[1]),
                    "width": int(xywh[2]),
                    "height": int(xywh[3]),
                    # "confidence":float(conf),
                    "id": "1",
                    "name": names[int(cls)],
                }
            )
        elif "front" in names[cls]:
            with Timer("crop"):
                crop_img = crop_and_transf(input_image, xyxy).unsqueeze(0)
                front_crops.append(crop_img)
                xyxys.append(xyxy)
            # with Timer('classifier'):
            #     cls_result = classifier(crop_img).cpu()
            # print(cls_result)
            num_front += 1

        else:  # other and back
            toward = names[int(cls)]
            if det_results.conf[i] <0.35:
                toward = 'back'   # share some other to back 
            final_result["model_data"]["objects"].append(
                {
                    "xmin": int(xyxy[0]),
                    "ymin": int(xyxy[1]),
                    "xmax": int(xyxy[2]),
                    "ymax": int(xyxy[3]),
                    "id": "1",
                    "name": "head",
                    "toward": toward,
                    # 写死
                    "glasses": "-1",
                    "gender": "-1",
                    "age": "-1",
                    "race": "-1",
                    "emotion": "-1",
                    "mask": "-1",
                    "hat": "-1",
                    "whiskers": "-1",
                }
            )
            if toward == "back":
                num_back += 1
            elif toward == "other":
                num_other += 1
            else:
                raise ValueError("unknown toward: {}".format(toward))
    # classify that batch
    if front_crops:
        crop_batch = torch.cat(front_crops).cuda()
        with Timer("classifier"):
            cls_results = classifier(crop_batch)
        for i in range(len(front_crops)):
            result_dict = slice_tensor(cls_results[i].unsqueeze(0), lengths)
            # lengths = {
            #     'AGE': 102,
            #     'GENDER': 3,
            #     'GLASSES': 4,
            #     'RACE': 5,
            #     'EMOTION': 4,
            #     'MASK': 3,
            #     'HAT': 3,
            #     'WHISKERS': 3
            # }
            xyxy = xyxys[i]
            for k, v in result_dict.items():
                # -1 because it was add 1 in the dataset code
                if k == "AGE":
                    v = int(torch.round(v).item())
                else:
                    v = torch.argmax(v).item()
                result_dict[k] = v - 1
            final_result["model_data"]["objects"].append(
                {
                    "xmin": int(xyxy[0]),
                    "ymin": int(xyxy[1]),
                    "xmax": int(xyxy[2]),
                    "ymax": int(xyxy[3]),
                    "id": "1",
                    "name": "head",
                    "toward": "front",
                    "glasses": result_dict["GLASSES"],
                    "gender": result_dict["GENDER"],
                    "age": result_dict["AGE"],
                    # "age": -1,
                    "race": result_dict["RACE"],
                    "emotion": result_dict["EMOTION"],
                    "mask": result_dict["MASK"],
                    "hat": result_dict["HAT"],
                    "whiskers": result_dict["WHISKERS"],
                }
            )

    if cnt == 6032:
        print("predicted num of towards")
        print(f"{num_front=}")
        print(f"{num_back=}")
        print(f"{num_other=}")

    return json.dumps(final_result, indent=4)


def main():
    if os.getenv("face_dev"):
        dirname = "/root/code/cvmark/face/data/2795"

    else:
        dirname = "/home/data/2795"

    global det_model_path
    global cls_model_path 
    cls_model_path = "/project/train/models/cls/lightning_logs/version_10/last.ckpt"
    det_model_path = "/project/train/models/face_det/train/weights/best.pt"
    # det_model_path = glob(
    #     "/project/train/models/face_det/**/last.engine", recursive=True
    # )[0]
    print("using cls_model: ", cls_model_path)
    print("using det_model: ", det_model_path)
    img_files = []
    for filename in os.listdir(dirname):
        if filename.endswith(".jpg") or filename.endswith(".jpeg"):
            img_files.append(os.path.join(dirname, filename))
    predictor = init()
    for image_name in img_files:
        img_arr = cv2.imread(image_name)
        print(type(img_arr))
        res = process_image(predictor, img_arr)
        print(res)


if __name__ == "__main__":
    with Timer("main"):
        main()
