#!/usr/bin/env python
import os
import time
import argparse
from typing import Optional
import argparse
import cv2
import numpy as np
import onnxruntime as ort
import mindspore_lite as mslite

def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument("--img", type=str, required=True, help="Path to input image.")
    parser.add_argument("--onnx_weight_path", type=str, required=True, default="./BiRefNet-general-epoch_244.onnx",  help="Path to ONNX model.")
    parser.add_argument("--mindir_weight_name", type=str, required=True, default="./birefnet_1024x1024", help="Path to mindir model.")
    parser.add_argument("--viz", action="store_true", help="Whether to visualize the results." )
    parser.add_argument("--convert_weight", action="store_true", help="Whether to convert onnx weight to mindir weight." )
    parser.add_argument('--score_th', type=float, default=None)
    return parser.parse_args()

def sigmoid(x: np.ndarray) -> np.ndarray:
    return 1 / (1 + np.exp(-x))

def download_file(
    url: str,
    save_path: str,
    retries: int = 10,
) -> None:
    import requests  # type: ignore
    from tqdm import tqdm  # type: ignore

    print('Download:', save_path)

    for attempt in range(retries):
        try:
            response = requests.get(url, stream=True, timeout=10)
            response.raise_for_status()

            total_size = int(response.headers.get('content-length', 0))
            with open(save_path, 'wb') as file, tqdm(
                    total=total_size,
                    unit='B',
                    unit_scale=True,
                    desc=save_path,
            ) as pbar:
                for chunk in response.iter_content(chunk_size=8192):
                    file.write(chunk)
                    pbar.update(len(chunk))
            return
        except requests.exceptions.RequestException as e:
            if attempt < retries - 1:
                time.sleep(5)
            else:
                raise

def convert_onnx_to_mindir(onnx_weight_path, mindir_weight_name):
    converter = mslite.Converter()
    converter.optimize = "ascend_oriented"
    converter.save_type = mslite.ModelType.MINDIR
    converter.convert(fmk_type=mslite.FmkType.ONNX, model_file=onnx_weight_path, output_file=mindir_weight_name)
    print("----------convert successfully!--------------")

class BiRefNetInfer:
    def __init__(self, args) -> None:
        self.session = ort.InferenceSession(args.onnx_weight_path, providers=["CPUExecutionProvider"])
        self.input_size = self.session.get_inputs()[0].shape

        context = mslite.Context()
        context.target = ["Ascend"]
        context.cpu.thread_num = 1
        context.cpu.thread_affinity_mode = 2
        # context.ascend.device_id=0
        mindir_path = args.mindir_weight_name +".mindir"
        self.model = mslite.Model()
        # breakpoint()
        # self.model.build_from_file(mindir_path, mslite.ModelType.MINDIR, context)
        # self.input_size = self.model.get_inputs()[0].shape

    def infer(self, args) -> None:
        start_time = time.time()
        image = cv2.imread(args.img)  # H, W, C

        input_width = 1024
        input_height = 1024
        breakpoint()
        # Pre process: Resize, BGR->RGB, Normalize, Transpose, float32 cast
        image = cv2.resize(image, dsize=(input_width, input_height))
        input_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
        input_image = (input_image / 255.0 - mean) / std  # type: ignore
        input_image = input_image.transpose(2, 0, 1)
        input_image = np.expand_dims(input_image, axis=0)
        input_image = input_image.astype('float32')
        
        # onnx Inference
        input_name = self.session.get_inputs()[0].name
        result1 = self.session.run(None, {input_name: input_image})

        #mindir Inference
        inputs = self.model.get_inputs()
        # shape = image.shape
        # self.model.resize(inputs, [[shape[0], shape[1], shape[2], shape[3]]])
        inputs[0].set_data_from_numpy(input_image)
        # execute inference
        t1=time.time()
        outputs = self.model.predict(inputs)
        print("npu infer time:", time.time()-t1, flush=True)
        result2 = outputs[0].get_data_to_numpy()
        print("均方差：",np.mean(result2-result1))

        # Post process: Squeeze, Sigmoid, Multiply by 255, uint8 cast
        result = result1
        mask = np.squeeze(result[-1])
        mask = sigmoid(mask)
        if args.score_th is not None:
            mask = np.where(mask < args.score_th, 0, 1)
        mask *= 255
        mask = mask.astype('uint8')

        elapsed_time = time.time() - start_time

        # Resize
        mask = cv2.resize(
            mask,
            dsize=(image.shape[1], image.shape[0]),
        )

        # Mask extract
        temp_image = np.zeros(image.shape, dtype=np.uint8)
        temp_image[:] = (255, 255, 255)
        mask = np.stack((mask, ) * 3, axis=-1).astype('uint8')
        debug_image = np.where(mask, image, temp_image)

        # Inference elapsed time
        elapsed_time_text = 'Elapsed time: '
        elapsed_time_text += str(round((elapsed_time * 1000), 1))
        elapsed_time_text += 'ms'
        cv2.putText(debug_image, elapsed_time_text, (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, cv2.LINE_AA)
        cv2.imwrite("debug_image.jpg", debug_image)
        
        # cv2.imshow('BiRefNet : Input', frame)
        # cv2.imshow('BiRefNet : Output', mask)
        # cv2.imshow('BiRefNet : Debug', debug_image)
        # key = cv2.waitKey(1)
        # if key == 27:  # ESC
        #     break

        # cap.release()
        # cv2.destroyAllWindows()
        return debug_image

if __name__ == '__main__':
    args = parse_args()
    if not os.path.exists(args.onnx_weight_path):
        url = 'https://github.com/Kazuhito00/BiRefNet-ONNX-Sample/releases/download/v0.0.1/birefnet_1024x1024.onnx'
        save_path = args.onnx_weight_path
        download_file(url, save_path)
    
    if args.convert_weight:
        convert_onnx_to_mindir(args.onnx_weight_path, args.mindir_weight_name)

    demo = BiRefNetInfer(args)
    res = demo.infer(args)
