from XEdu.hub import Workflow as wf
import numpy as np
import cv2
det = wf(task="det_hand")
hand = wf(task='hand21' )
basenn = wf(task='basenn',checkpoint='checkpoints/basenn/basenn.onnx')
out_file =[]
cap = cv2.VideoCapture(0)
while True:
    ret,img = cap.read()
    if ret: 
          
        result,img_with_box = det.inference(data=img,img_type='cv2') # 进行模型推理
        if len(result)==0:
            print('NO hand')
             
        else:
            x_y = [abs(result[0][0]-result[0][2]),abs(result[0][3]-result[0][1])]  #计算手部选框的坐标差值
            keypoints,img_with_keypoints = hand.inference(data=img,img_type='cv2') # 进行模型推理
            out = keypoints - [result[0][0],result[0][1]]
            out = out / x_y
            out_file = np.concatenate(out).reshape(1, -1)
            res = basenn.inference(data=out_file)
            res_show = basenn.format_output(lang="en")
            label = ['scissors','rock','paper']
            num = res_show[0]["prediction"]#rea_show是一个二维数组，其中第一的值为一个字典
            text = label[num]#文本内容
            position = (50, 100) # 左上角起始点的坐标
            font = cv2.FONT_HERSHEY_SIMPLEX
            scale = 1
            color = (255, 255, 255) # BGR颜色值（白色）
            thickness = 2
 
        # 将文字写入图像中
            cv2.putText(img, text, position, font, scale, color, thickness)
        
        
        # 显示结果图像
        cv2.imshow("Image with Text", img)
        if cv2.waitKey(1) & 0xFF == 27:
            break
cap.release()
cv2.destroyAllWindows