# %%
import os
import cv2
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as F
from matplotlib import pyplot as plt

# %%
# 使用matplotlib展示图片
def img_show_plt(img):
    plt.imshow(img, cmap="gray")
    plt.show()

# 导入模型
class CNN(nn.Module):
    def __init__(self):
        super(CNN,self).__init__()
        self.conv1 = nn.Conv2d(1,32,kernel_size=3,stride=1,padding=1)
        self.pool = nn.MaxPool2d(2,2)
        self.conv2 = nn.Conv2d(32,64,kernel_size=3,stride=1,padding=1)
        # 两个池化，所以是7*7而不是14*14
        self.fc1 = nn.Linear(64*7*7,1024)
        self.fc2 = nn.Linear(1024,512)
        self.fc3 = nn.Linear(512,10)

    def forward(self,x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        # 将数据平整为一维的
        x = x.view(-1, 64 * 7* 7)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

# 反相灰度图，将黑白阈值颠倒
def accessPiexl(img):
    height = img.shape[0]
    width = img.shape[1]
    for i in range(height):
       for j in range(width):
           img[i][j] = 255 - img[i][j]
    return img

# 反相二值化图像
def accessBinary(image, threshold=128):
    image_2 = accessPiexl(image)
    # 边缘膨胀，不加也可以
    kernel = np.ones((3, 3), np.uint8)
    image_2 = cv2.dilate(image_2, kernel, iterations=1)
    _, image_2 = cv2.threshold(image_2, threshold, 0, cv2.THRESH_TOZERO)
    return image_2

# 根据长向量找出顶点
def extractPeek(array_vals, min_vals=10, min_rect=20):
    extrackPoints = []
    startPoint = None
    endPoint = None
    for i, point in enumerate(array_vals):
        if point > min_vals and startPoint == None:
            startPoint = i
        elif point < min_vals and startPoint != None:
            endPoint = i

        if startPoint != None and endPoint != None:
            extrackPoints.append((startPoint, endPoint))
            startPoint = None
            endPoint = None

    # 剔除一些噪点
    for point in extrackPoints:
        if point[1] - point[0] < min_rect:
            extrackPoints.remove(point)
    return extrackPoints

# 寻找边缘，返回边框的左上角和右下角（利用直方图寻找边缘算法（需行对齐））
def findBorderHistogram(img):
    borders = []
    # 行扫描
    hori_vals = np.sum(img, axis=1)
    hori_points = extractPeek(hori_vals)
    # 根据每一行来扫描列
    for hori_point in hori_points:
        extractImg = img[hori_point[0]:hori_point[1], :]
        vec_vals = np.sum(extractImg, axis=0)
        vec_points = extractPeek(vec_vals, min_rect=0)
        for vect_point in vec_points:
            border = [(vect_point[0], hori_point[0]), (vect_point[1], hori_point[1])]
            borders.append(border)
    return borders

# 显示结果及边框
def showResults(img, borders, results=None):
    # 绘制
    print(img.shape)
    for i, border in enumerate(borders):
        cv2.rectangle(img, border[0], border[1], (0, 0, 255))
        if results:
            cv2.putText(img, str(results[i]), border[0], cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 255, 0), 1)
    # img_show_plt(img)

# 根据边框转换为MNIST格式
def transMNIST(img, borders, size=(28, 28)):
    imgData = np.zeros((len(borders), size[0], size[0]), dtype='uint8')
    img = accessBinary(img)
    for i, border in enumerate(borders):
        borderImg = img[border[0][1]:border[1][1], border[0][0]:border[1][0]]
        # 根据最大边缘拓展像素
        extendPiexl = (max(borderImg.shape) - min(borderImg.shape)) // 2
        targetImg = cv2.copyMakeBorder(borderImg, 7, 7, extendPiexl + 7, extendPiexl + 7, cv2.BORDER_CONSTANT)
        targetImg = cv2.resize(targetImg, size)
        imgData[i] = targetImg
    return imgData


# %%
def process(img_path):
    img_raw = cv2.imread(img_path)
    _, img_bin = cv2.threshold(img_raw, 125, 255, cv2.THRESH_BINARY)
    img_bin = cv2.cvtColor(img_bin, cv2.COLOR_BGR2GRAY)
    accessBinary(img_bin)
    model = CNN()
    model.load_state_dict(torch.load("./mnist_net.pth", map_location=torch.device('cpu'))) # 加载State模型
    model.eval()
    borders = findBorderHistogram(img_bin)
    img_bin_back = accessBinary(img_bin)
    imgData = transMNIST(img_bin_back, borders)
    label_lst = []
    with torch.no_grad():
        for i, img in enumerate(imgData):
            name = './cut_dir/test_' + str(i) + '.jpg'
            cv2.imwrite(name, img)
            # print(img.shape)
            img = np.array(img).astype(np.float32)
            img = np.expand_dims(img, axis=0)
            img_tensor = torch.from_numpy(img)
            i = img_tensor
            _, preds = torch.max(model(i), dim=1)
            label_lst.append(preds.cpu().numpy()[0])
    return label_lst

