import cv2
import re
import numpy as np
import numpy.matlib
import argparse
import time
from PIL import Image, ImageEnhance
import pytesseract

# 图片尺寸:
# min: 100*300-200*600
# minWS: 字符最小宽度或高度
# maxWS: 字符最大宽度或高度
# minWA: 字符最小面积
# maxWA: 字符最大面积
# delta: 字符与所处直线偏差阈值
# 解析规则:
# regular: 条码规则
# shape: 条码形状, lf: 左右结构, ud: 上下结构, none: 不知道会是什么结构
# total: 总解析行数
# target: 需要解析的行数, 按坐标从小到大排列0, 1, 2...
CFG = {
    'thresh': {
        'min': {'margin': 6, 'minWS': 4, 'maxWS': 30, 'minWA': 40, 'maxWA': 400, 'delta': 60},
    },
    'shape': {
        'ud': {'regular': r'\d{7,}', 'total': 4, 'target': [0, 1]},
        'lr': {'regular': r'\d{7,}', 'total': 4, 'target': [3]},
        'none': {'regular': r'\d{7,}', 'total': -1, 'target': []},
    },
}
SIZE = 'min'

def get_barcode(img):
    h, w = img.shape[0], img.shape[1]
    imdst = img[int(h*3//4):h, 0:w].copy()
    return imdst

def box_filter(img, r):
    I = img.copy()
    imdst = np.zeros(I.shape)
    h, w = I.shape

    #行处理
    I = np.cumsum(I, 0)
    imdst[:r+1, :] = I[r:2*r+1, :]
    imdst[r+1:h-r, :] = I[2*r+1:, :] - I[:h-2*r-1, :]
    imdst[h-r:, :] = np.matlib.repmat(I[h-1,:], r, 1) - I[h-2*r-1:h-r-1, :]
    #列处理
    I=np.cumsum(imdst, 1)
    imdst[:, :r+1] = I[:, r:2*r+1]
    imdst[:, r+1:w-r] = I[:, 2*r+1:] - I[:, :w-2*r-1]
    imdst[:, w-r:] = np.matlib.repmat(I[:, w-1].reshape(-1, 1), 1, r) - I[:, w-2*r-1:w-r-1]
    imdst = np.uint8(imdst * 255 / np.max(imdst))
    return imdst

def side_win_box_filter(img, radius, iternum):
    pass
    #r = radius
    #k = np.ones()

def filter(img):
    #imdst = cv2.medianBlur(img, 3)
    #imdst = box_filter(img, 1)
    imdst = cv2.bilateralFilter(img, 7, 75, 75)
    #imdst = cv2.bilateralFilter(img, 5, 30, 30)
    return imdst

def threshold(img):
    #th = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
    th = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
    th = cv2.bitwise_not(th)
    #kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
    #kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
    #th = cv2.erode(th, kernel)
    return th

def find_boxes(thresh):
    width, height = thresh.shape[1], thresh.shape[0]
    margin = CFG['thresh'][SIZE]['margin']
    minWS = CFG['thresh'][SIZE]['minWS']
    maxWS = CFG['thresh'][SIZE]['maxWS']
    minWA = CFG['thresh'][SIZE]['minWA']
    maxWA = CFG['thresh'][SIZE]['maxWA']
    # 寻找物体的凸包并绘制凸包的轮廓
    #contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours, hier = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #contours, hier = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
    boxes = []
    for cnt in contours:
        x, y, w, h = cv2.boundingRect(cnt)
        if minWS <= w <= maxWS:
            if minWS <= h <= maxWS:
                if minWA <= w * h <= maxWA:
                    boxes.append([max(0, x-margin), max(0, y-margin),
                        min(width, x+w+margin), min(height, y+h+margin)])
    boxes = non_max_suppression_fast(np.array(boxes), 0.5)
    return boxes

def pick_pnts(points):
    delta = CFG['thresh'][SIZE]['delta']
    best = []
    if len(points) < 3:
        best = points.copy()
        points.clear()
        return best
    for i in range(0, len(points) - 2):
        for j in range(i + 1, len(points) - 1):
            O = points[i]
            P = points[j]
            OP = np.array([P[0] - O[0], P[1] - O[1]])
            tmp = [O, P]
            for k in range(0, len(points)):
                if i == j or j == k or k == i: continue
                Q = points[k]
                PQ = np.array([P[0] - Q[0], P[1] - Q[1]])
                h = abs(np.cross(OP, PQ))
                if h < delta:
                    tmp.append(Q)
            if len(tmp) >= 3:
                if len(tmp) > len(best):
                    best = tmp
    print(f'Count: {len(best)}', end='')
    for p in best:
        print(f', {p}', end='')
        points.remove(p)
    print('')
    return best

def pick_box(boxes):
    points = []
    for i, (bgnX, bgnY, endX, endY) in enumerate(boxes):
        points.append(((bgnX + endX)//2, (bgnY + endY)//2, i))
        #points.append((endX, endY, i))

    # 显示boxes位置及面积
    #X = [x for x, y in points]
    #Y = [y for x, y in points]
    #plt.scatter(X, Y)
    #for i in range(len(X)):
    #    plt.annotate(f'{(boxes[i][2] - boxes[i][0]) * (boxes[i][3] - boxes[i][1])}', xy=(X[i], Y[i]))
    #plt.show()

    val = 0
    rects = []
    while len(points) > 0:
        line = pick_pnts(points)
        if len(line) <= 1: break
        pnts = []
        for _, _, i in line:
            b = boxes[i]
            pnts.append([b[0], b[1]])
            pnts.append([b[2], b[3]])
            pnts.append([b[0], b[3]])
            pnts.append([b[2], b[1]])
        if len(pnts) >= 4:
            box2d = cv2.minAreaRect(np.array(pnts))
            # 统一box2d, 使后面流程一致
            if box2d[2] < -45:
                box2d = ((box2d[0][0], box2d[0][1]), (box2d[1][1], box2d[1][0]), 90 + box2d[2])
            if box2d[1][0] > box2d[1][1]:
                val += 1
            elif box2d[1][1] > box2d[1][0]:
                val -= 1
            rects.append(box2d)

    if val > 0:
        shape = 'ud'
        rects.sort(key=lambda x: x[0][1])
        weights = [int(b[1][0]) for b in rects]
    elif val < 0:
        shape = 'lr'
        rects.sort(key=lambda x: x[0][0])
        weights = [int(b[1][1]) for b in rects]
    else:
        shape = 'none'
        weights = [1] * len(rects)
    mean_rotate = np.average([b[2] for b in rects], weights=weights)

    total = CFG['shape'][shape]['total']
    target = CFG['shape'][shape]['target']
    print('total: {}, target: {}, shape: {}, rectnum: {}'.format(total, target, shape, len(rects)))

    # 筛选目标矩形
    if len(rects) >= total:
        rects = [rects[i] for i in target]
    return rects, shape, mean_rotate

def stat(img):
    result = np.apply_along_axis(lambda x: np.sum(x), 0, img)
    x1 = np.arange(0, result.shape[0])
    y1 = result
    result = np.apply_along_axis(lambda x: np.sum(x), 1, img)
    x2 = np.arange(0, result.shape[0])
    y2 = result
    plt.subplot(2, 1, 1)
    plt.plot(x1, y1)
    plt.subplot(2, 1, 2)
    plt.plot(x2, y2)
    plt.show()

def non_max_suppression_fast(boxes, overlapThresh):
    # 空数组检测
    if len(boxes) == 0: return []
 
    # 将类型转为float
    if boxes.dtype.kind == "i":
        boxes = boxes.astype("float")
 
    pick = []
 
    # 四个坐标数组
    x1 = boxes[:,0]
    y1 = boxes[:,1]
    x2 = boxes[:,2]
    y2 = boxes[:,3]
 
    area = (x2 - x1 + 1) * (y2 - y1 + 1) # 计算面积数组
    idxs = np.argsort(y2) # 返回的是右下角坐标从小到大的索引值
 
    # 开始遍历删除重复的框
    while len(idxs) > 0:
        # 将最右下方的框放入pick数组
        last = len(idxs) - 1
        i = idxs[last]
        pick.append(i)
 
        # 找到剩下的其余框中最大的坐标x1y1，和最小的坐标x2y2,
        xx1 = np.maximum(x1[i], x1[idxs[:last]])
        yy1 = np.maximum(y1[i], y1[idxs[:last]])
        xx2 = np.minimum(x2[i], x2[idxs[:last]])
        yy2 = np.minimum(y2[i], y2[idxs[:last]])
 
        # 计算重叠面积占对应框的比例
        w = np.maximum(0, xx2 - xx1 + 1)
        h = np.maximum(0, yy2 - yy1 + 1)
        overlap = (w * h) / area[idxs[:last]]
 
        # 如果占比大于阈值，则删除
        idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))
    return boxes[pick].astype("int")

def eval_text(img, size='min', debug=False):
    bgn = time.time()
    global SIZE
    SIZE = size

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    barcode = filter(gray)
    if debug: cv2.imshow('barcode', barcode)

    th = threshold(barcode)
    if debug: cv2.imshow('th', th)

    boxes = find_boxes(th)
    if True:
        tmp = img.copy()
        points = []
        for (bgnX, bgnY, endX, endY) in boxes:
            tmp = cv2.rectangle(tmp, (bgnX, bgnY), (endX, endY), (0, 0, 255), 1)
        if debug: cv2.imshow('tmp', tmp)

    boxes, shape, mean_rotate = pick_box(boxes)

    im = img.copy()
    # 锐化
    #im = Image.fromarray(im)#cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    #im = ImageEnhance.Sharpness(im).enhance(3)
    #im = np.asarray(im)
    #img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

    # 平滑
    #im = gray.copy()
    #im = cv2.medianBlur(im, 3)
    #im = cv2.bilateralFilter(im, 5, 30, 30)
    imshow = im.copy()
    regular = CFG['shape'][shape]['regular']
    result = []
    indx = -1
    margin = CFG['thresh'][SIZE]['margin']
    for i, b in enumerate(boxes):
        r = np.int0(cv2.boxPoints(b))
        cv2.drawContours(imshow, [r], 0, (255, 255, 255), 1)

        tmp = gray.copy()
        print('box2d: {}, mean_rotate: {}'.format(b, mean_rotate))
        if abs(mean_rotate) < 1:
            # 角度较小不做旋转
            x1, y1 = r[1][0], r[1][1]
            x2, y2 = r[3][0], r[3][1]
        else:
            # 角度较大旋转
            rotate = cv2.getRotationMatrix2D((b[0][1], b[0][0]), mean_rotate, 1) # mat rotate 1 center 2 angle 3 缩放系数
            tmp = cv2.warpAffine(tmp, rotate, (tmp.shape[1], tmp.shape[0]))
            #cv2.imshow(f'rotate{i}', tmp)
            x1, y1 = int(b[0][0] - b[1][0]/2), int(b[0][1] - b[1][1]/2)
            x2, y2 = int(b[0][0] + b[1][0]/2), int(b[0][1] + b[1][1]/2)

        tmp = tmp[y1:y2, x1:x2]
        mult = 2
        tmp[:margin//mult, :]  = 190
        tmp[-margin//mult:, :] = 190
        tmp[:, :margin//mult]  = 190
        tmp[:, -margin//mult:] = 190
        if x2 - x1 < y2 -y1:
            # 转90度
            tmp = cv2.transpose(tmp)
            tmp = cv2.flip(tmp, 1)

        print('-'*40)
        str1, str2 = '', ''
        if indx == 0:
            tmp = cv2.flip(tmp, -1)
            str1 = pytesseract.image_to_string(tmp)
            str1 = re.sub(r'[^-\w\d]', '', str1)
            if debug: cv2.imshow(f'tmp{i}_180', tmp)
            print(str1)
            result.append((str1, str2))
            continue
        elif indx == 1:
            str2 = pytesseract.image_to_string(tmp)
            str2 = re.sub(r'[^-\w\d]', '', str2)
            if debug: cv2.imshow(f'tmp{i}_0', tmp)
            print(str2)
            result.append((str1, str2))
            continue
        else:
            tmp = cv2.flip(tmp, -1)
            str1 = pytesseract.image_to_string(tmp)
            str1 = re.sub(r'[^-\w\d]', '', str1)
            if debug: cv2.imshow(f'tmp{i}_180', tmp)
            print(str1)
            if re.match(regular, str1, re.I):
                indx = 0 
                result.append((str1, str2))
                continue

            tmp = cv2.flip(tmp, -1)
            str2 = pytesseract.image_to_string(tmp)
            str2 = re.sub(r'[^-\w\d]', '', str2)
            if debug: cv2.imshow(f'tmp{i}_0', tmp)
            print(str2)
            if re.match(regular, str2, re.I):
                indx = 1
                result.append((str1, str2))
                continue

    if debug: cv2.imshow('rects', imshow)
    print(result)

    end = time.time()
    print(f'use time: {end - bgn}s')
    if indx < 0: return ''
    return '_'.join([x[indx] for x in result if len(x[indx]) > 0])

if __name__=='__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-t', '--type', default='slide', help='plate locate type: [slide|paper|waxblock]')
    parser.add_argument('-f', '--file', default='./test.jpg', help='input picture file.')
    args = parser.parse_args()

    img = cv2.imread(args.file)
    
    label = get_barcode(img)
    print(eval_text(label, debug=True))

    cv2.waitKey(0)
    cv2.destroyAllWindows()

