# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import cv2
import torch
from PIL import Image
from torchvision import transforms
import os
import shutil
from data import MyDataset
from resnet2 import ResNet18
# from model import se_resnext50
import torch.nn as nn


def divide_method1(img, w, h, m, n):  # 分割成m行n列
    gx, gy = np.meshgrid(np.linspace(0, w, n), np.linspace(0, h, m))
    gx = np.round(gx).astype(np.int)
    gy = np.round(gy).astype(np.int)

    divide_image = np.zeros([m - 1, n - 1, int(h * 1.0 / (m - 1) + 0.5), int(w * 1.0 / (n - 1) + 0.5), 3],
                            np.uint8)  # 这是一个五维的张量，前面两维表示分块后图像的位置（第m行，第n列），后面三维表示每个分块后的图像信息
    for i in range(m - 1):
        for j in range(n - 1):
            divide_image[i, j, 0:gy[i + 1][j + 1] - gy[i][j], 0:gx[i + 1][j + 1] - gx[i][j], :] = img[
                                                                                                  gy[i][j]:gy[i + 1][
                                                                                                      j + 1],
                                                                                                  gx[i][j]:gx[i + 1][
                                                                                                      j + 1],
                                                                                                  :]  # 这样写比a[i,j,...]=要麻烦，但是可以避免网格分块的时候，有些图像块的比其他图像块大一点或者小一点的情况引起程序出错
    return divide_image


def display_blocks(divide_image):  #
    m, n = divide_image.shape[0], divide_image.shape[1]
    file = open('test.txt', 'w')
    for i in range(m):
        for j in range(n):
            plt.subplot(m, n, i * n + j + 1)
            plt.imshow(divide_image[i, j, :])
            cv2.imwrite('block/'+str(i * n + j + 1)+'.jpg',divide_image[i, j, :])
            file.write('block/'+str(i * n + j + 1)+'.jpg' + '\n')
            plt.axis('off')
    plt.savefig("display_blocks.jpg")
    file.close()
    e = prediect()
    return e

def image_concat(divide_image, e):
    m, n, grid_h, grid_w = [divide_image.shape[0], divide_image.shape[1],  # 每行，每列的图像块数
                            divide_image.shape[2], divide_image.shape[3]]  # 每个图像块的尺寸

    restore_image = np.zeros([m * grid_h, n * grid_w, 3], np.uint8)

    restore_image[0:grid_h, 0:]

    k = 0

    for i in range(m):
        for j in range(n):
            if k < len(e):
                if i * n + j == e[k]:
                    k = k + 1
                    divide_image[i, j, :] = cv2.rectangle(divide_image[i, j, :], (0, 0), (64, 64), (255, 0, 0), 2)

            restore_image[i * grid_h:(i + 1) * grid_h, j * grid_w:(j + 1) * grid_w] = divide_image[i, j, :]
    return restore_image


def prediect():
    list=[]
    e=[]
    transform = transforms.Compose([
        # transforms.RandomCrop(32,padding = 4),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
    testset = MyDataset('test.txt', transform=transform)
    testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2)
    # model = se_resnext50()
    model = ResNet18()
    model = nn.DataParallel(model).cuda()
    # model.load_state_dict(torch.load('net_160.pth'))
    model.load_state_dict(torch.load('test.pth'))
    for data in testloader:
        model.eval()
        data = data.cuda()
        outputs = model(data)
        # 取得分最高的那个类 (outputs.data的索引号)
        _, predicted = torch.max(outputs[0].data, 1)
        predicted = predicted.cpu().numpy()
        predicted = predicted.tolist()
        list=list+predicted
    print(list)
    maxlabel = max(list, key=list.count)
    print(maxlabel)
    for i, val in enumerate(list):
        if val != maxlabel:
            e.append(i)
    print(e)
    return e

def setDir(filepath):
    '''
    如果文件夹不存在就创建，如果文件存在就清空！
    :param filepath:需要创建的文件夹路径
    :return:
    '''
    if os.path.exists('test.txt'):  # 如果文件存在
        os.remove('test.txt')
    if not os.path.exists(filepath):
        os.mkdir(filepath)
    else:
        shutil.rmtree(filepath)
        os.mkdir(filepath)

def detect(path):
    setDir('block')
    img = cv2.imread(path)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
    print('原始图像形状:', img.shape)
    h, w = img.shape[0], img.shape[1]
    fig1 = plt.figure('原始图像')  # fig
    plt.imshow(img)
    plt.axis('off')
    plt.title('Original image')

    # 原始图像分块
    m = img.shape[0]//64
    h = m * 64
    n = img.shape[1]//64
    w = n * 64
    img = img[0:h, 0:w]  # 裁剪坐标为[y0:y1, x0:x1]

    divide_image1 = divide_method1(img, w, h, m + 1, n + 1)  # 该函数中m+1和n+1表示网格点个数，m和n分别表示分块的块数
    fig2 = plt.figure('分块后的子图像')
    e = display_blocks(divide_image1)

    # 分块图像还原
    fig3 = plt.figure('分块图像的还原')
    restore_image1 = image_concat(divide_image1, e)  # 四舍五入法分块还原
    plt.imshow(restore_image1)

    cv2.imwrite('restore_image1.jpg', cv2.cvtColor(restore_image1, cv2.COLOR_RGB2BGR))

    plt.axis('off')
    plt.title('Rounding')
    print('还原后的图像尺寸:')
    print(restore_image1.shape)
    plt.show()

detect('test/Canon/Ixus70/1/Canon_Ixus70_1_3848.JPG')
# detect('test/modify/Agfa_DC-504_0_12.png')
# detect('test/modify/Canon_Ixus55_0_2652.png')