import shutil

from ultralytics import YOLO
from PIL import Image
import numpy as np
import cv2
import ctypes as C
import os
import time

class AI_OCR():
    def __init__(self):
        self.detection_model = YOLO(r'./AI_OCR/best_det_0112.pt')
        self.direction_model = YOLO(r'./AI_OCR/best_ZF.pt')
        self.reconization_model = YOLO(r'./AI_OCR/best_rec_0116_1.pt')
        self.DLL = C.CDLL('./dll/ConvBitMap.dll')
        self.temp_pot_folder = r'./AI_OCR/POT'

    def load_pot(self,pot_img_path):
        # pot_img_path = r'Z:\Data_Pemtron\Artesyn\Image\791-024650-0100_AB_T\20231218095254\OKImage\1@1115.pot'
        basename = os.path.basename(pot_img_path)
        basename = basename.split('.')[0]
        name = "{}_{}".format(basename, 1)
        a = self.DLL.BSTR_GetPotToBmp(pot_img_path, 1, self.temp_pot_folder, name)
        if a == 0:
            try:
                image = Image.open(os.path.join(self.temp_pot_folder,"{}.bmp".format(name)))
                img_cv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
                image.close()
                os.remove(os.path.join(self.temp_pot_folder,"{}.bmp".format(name)))
                # image.close()
            except Exception as e:
                print(e)
                return None
            # os.remove(os.path.join(self.temp_pot_folder,"{}.bmp".format(name)))
            # cv2.imshow('a',img)
            # cv2.waitKey(0)
            return img_cv
        else:
            return

    def gray_scale(self,image):
        img_gray = image.copy()
        rows, cols = img_gray.shape
        flat_gray = img_gray.reshape((cols * rows,)).tolist()
        A = min(flat_gray)
        B = max(flat_gray)
        # print(B)
        out_put = np.uint8(255 / (B - A) * (img_gray - A) + 0.5)
        # out_put = robert_filter(out_put)
        return out_put
    def trans_str(self,res_rec):
        H =  res_rec[0].orig_shape[1]
        names = res_rec[0].names
        xywh = res_rec[0].boxes.xywh
        xywh_arr = np.asarray(xywh.cpu())

        xy_arr = xywh_arr[:, 0]
        list_xywh = list(xy_arr)
        xy_arr_sort = xy_arr.copy()
        # xy_arr = [tuple(p) for p in xy_arr]
        xy_arr_sort.sort()
        clss = res_rec[0].boxes.cls
        Txt = ''
        for x in xy_arr_sort:
            idx = list_xywh.index(x)
            h = xywh_arr[idx][3]
            # if h < H/3:
            #     continue
            # else:
            cls = clss[idx]
            cls_name = names[int(cls)]
            Txt = Txt+cls_name

        return Txt



    def start_ocr(self,datadict,POT_Root):
        ocr_res={}
        for key,[jpg_image,date,slavename,angle,REFID] in datadict.items():
            start_time = time.time()
            pot_image_path = os.path.join(POT_Root,slavename,date,key+'.pot')
            print(pot_image_path)
            # pot_image_path = r'Z:\Data_Pemtron\Artesyn\Image\791-024650-0100_AB_T\20231218095254\OKImage\1@1115.pot'
            if os.path.exists(pot_image_path):
                image = self.load_pot(pot_image_path)
                image = cv2.flip(image,0)

                if image is None:
                    print("POT Load Failed!")
                    image = jpg_image
            else:
                print("{} not exists!".format(pot_image_path))
                image = jpg_image
            img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
            det_res = self.detection_model(img)
            xyxys = det_res[0].boxes.xyxy
            Txt = []
            for i, xyxy in enumerate(xyxys):
                H = xyxy[3] - xyxy[1]
                W = xyxy[2] - xyxy[0]
                c_x = (int(xyxy[1]) + int(xyxy[3])) / 2
                c_y = (int(xyxy[0]) + int(xyxy[2])) / 2
                x1_ = c_x - H / 2
                x2_ = c_x + H / 2
                y1_ = c_y - W / 2
                y2_ = c_y + W / 2
                crop = image[int(x1_):int(x2_), int(y1_):int(y2_), :]
                b, g, r = cv2.split(crop)
                b_1 = self.gray_scale(b)
                g_1 = self.gray_scale(g)
                r_1 = self.gray_scale(r)
                crop = cv2.merge([b_1, g_1, r_1])
                H, W = crop.shape[:2]
                if H > W:
                    crop = np.rot90(crop, k=-1)
                    # cv2.imshow('cc',crop)
                    # cv2.waitKey(0)
                # res_dir = self.direction_model(crop_img)
                # classes = res_dir[0].names
                # class_name = classes[res_dir[0].probs.top1]
                if REFID in ['U']:
                    if angle == '90'or '180':
                        crop = np.rot90(crop, k=2)
                        # cv2.imshow('flip1', crop)
                        # cv2.waitKey(0)
                        crop_img = Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB))
                        res_rec = self.reconization_model(crop_img)
                        text = self.trans_str(res_rec)
                        Txt.append(text)
                    else:
                        crop_img = Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB))
                        res_rec = self.reconization_model(crop_img)
                        text = self.trans_str(res_rec)
                        Txt.append(text)

                else:
                    crop2 = np.rot90(crop, k=2)
                    # cv2.imshow('flip1', crop)
                    # cv2.waitKey(0)
                    crop_img = Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB))
                    res_rec = self.reconization_model(crop_img)
                    text = self.trans_str(res_rec)
                    Txt.append(text)
                    crop_img2 = Image.fromarray(cv2.cvtColor(crop2, cv2.COLOR_BGR2RGB))
                    res_rec2 = self.reconization_model(crop_img2)
                    res_rec = self.reconization_model(crop_img)
                    text2 = self.trans_str(res_rec2)
                    Txt.append(text2)

                    # cv2.imshow(Txt, crop)
                # cv2.waitKey(0)
            ocr_res[key] = Txt

            end_time = time.time()
            print('pot cost time:', end_time - start_time)
                # shutil.rmtree(self.temp_pot_folder)
        print(ocr_res)
        return ocr_res
        #   img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        #     det_res = self.detection_model(img)
        #     xyxys = det_res[0].boxes.xyxy
        #     Txt = []
        #     for i,xyxy in enumerate(xyxys):
        #         H = xyxy[3] - xyxy[1]
        #         W = xyxy[2] - xyxy[0]
        #         c_x = (int(xyxy[1]) + int(xyxy[3])) / 2
        #         c_y = (int(xyxy[0]) + int(xyxy[2])) / 2
        #         x1_ = c_x - H / 2
        #         x2_ = c_x + H / 2
        #         y1_ = c_y - W / 2
        #         y2_ = c_y + W / 2
        #         crop = image[int(x1_):int(x2_), int(y1_):int(y2_), :]
        #         b, g, r = cv2.split(crop)
        #         b_1 = self.gray_scale(b)
        #         g_1 = self.gray_scale(g)
        #         r_1 = self.gray_scale(r)
        #         crop = cv2.merge([b_1, g_1, r_1])
        #         H, W = crop.shape[:2]
        #         if H > W:
        #             crop = np.rot90(crop,k=-1)
        #
        #
        #         # res_dir = self.direction_model(crop_img)
        #         # classes = res_dir[0].names
        #         # class_name = classes[res_dir[0].probs.top1]
        #         if angle == '180':
        #             crop = cv2.flip(crop, -1)
        #             crop_img = Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB))
        #             res_rec = self.reconization_model(crop_img)
        #             text = self.trans_str(res_rec)
        #             Txt.append(text)
        #             # ocr_res[key] = Txt
        #         elif angle == '90' or angle == '270':
        #             crop2 = cv2.flip(crop, -1)
        #             crop_img = Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB))
        #             res_rec = self.reconization_model(crop_img)
        #             text = self.trans_str(res_rec)
        #             Txt.append(text)
        #             crop_img2 = Image.fromarray(cv2.cvtColor(crop2, cv2.COLOR_BGR2RGB))
        #             res_rec2 = self.reconization_model(crop_img2)
        #             res_rec = self.reconization_model(crop_img)
        #             text2 = self.trans_str(res_rec2)
        #             Txt.append(text2)
        #         else:
        #             crop_img = Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB))
        #             res_rec = self.reconization_model(crop_img)
        #             text = self.trans_str(res_rec)
        #             Txt.append(text)
        #             # cv2.imshow(Txt, crop)
        #         # cv2.waitKey(0)
        #     ocr_res[key] = Txt
        # print(ocr_res)
        # return ocr_res




