import torch
import  matplotlib.pyplot as plt
import cv2
from torch import nn
import os
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.optim as optim

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def high_pass_filter(img):
    kernel = np.array([[0, 0, 0, 0, 0],
                       [0, 0, 1, 0, 0],
                           [0, 0, -1, 0, 0],
                           [0, 0, 0, 0, 0],
                           [0, 0, 0, 0, 0]])
    return cv2.filter2D(img, -1, kernel)

# rich_img_high_pass = high_pass_filter(rich_img)
# poor_img_high_pass = high_pass_filter(poor_img)

# fig, ax = plt.subplots(2, 1)
# ax[0].imshow(rich_img_high_pass, cmap='gray')
# ax[1].imshow(poor_img_high_pass, cmap='gray')
# plt.show()

# 通过高通滤波器后经过卷积层和全连接层提取指纹特征

#使用torch构建指纹提取器

class FingerprintExtractor(nn.Module):
    def __init__(self):
        super(FingerprintExtractor, self).__init__()
        self.rich_extractor = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Hardtanh(),
        )
        self.poor_extractor = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Hardtanh(),
        )

    def forward(self, rich, poor):
        rich_feature = self.rich_extractor(rich)
        poor_feature = self.poor_extractor(poor)
        return rich_feature, poor_feature
    

'''
使用如下网络作为分类器
Type Kernel num With BN Activation
Convo. 32 TRUE ReLU
Convo. 32 TRUE ReLU
Convo. 32 TRUE ReLU
Convo. 32 TRUE ReLU
Avg Pooling None None None
Convo. 32 TRUE ReLU
Convo. 32 TRUE ReLU
Avg Pooling None None None
Convo. 32 TRUE ReLU
Convo. 32 TRUE ReLU
Avg Pooling None None None
Convo. 32 TRUE ReLU
Convo. 32 TRUE ReLU
AdpAvgPool None None None
Flatten None None None
FC None FALSE None
'''

class Classifier(nn.Module):
    def __init__(self):
        super(Classifier, self).__init__()
        self.layer = nn.Sequential(
            nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.AvgPool2d(kernel_size=2, stride=2),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.AvgPool2d(kernel_size=2, stride=2),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.AvgPool2d(kernel_size=2, stride=2),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Flatten(),
            nn.Linear(32, 2),
            nn.Sigmoid()
        )

    def forward(self, x):
        return self.layer(x)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.classifier = Classifier()
        self.f = FingerprintExtractor()
    def forward(self, rich, poor):
        rich_feature, poor_feature = self.f(rich, poor)
        combined_feature = torch.cat((rich_feature, poor_feature), dim=1)
        return self.classifier(combined_feature)
        

# rich_img_high_pass = torch.from_numpy(rich_img_high_pass).float().unsqueeze(0).unsqueeze(0)
# poor_img_high_pass = torch.from_numpy(poor_img_high_pass).float().unsqueeze(0).unsqueeze(0)

class CustomDataset(torch.utils.data.Dataset):
    def __init__(self, img_paths, transform=None):
        data = datasets.ImageFolder(img_paths)
        self.labels = []
        self.imgs = []
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Resize((256, 256)),
        ])
        for d in data.imgs:
            img, label = d
            self.labels.append(label)
            self.imgs.append(img)
    def __len__(self):
        return len(self.imgs)
    def __getitem__(self, idx):
        img = cv2.imread(self.imgs[idx], cv2.IMREAD_GRAYSCALE)

        if self.transform:
            img = self.transform(img)
        img = img.squeeze().numpy()
        res = self.split_pic(img, 32)
        ldiv = self.get_res_sum(res)

        dt = {i:ldiv[i] for i in range(len(ldiv))}
        sorted_dt = sorted(dt.items(), key=lambda items:items[1])
        poor = sorted_dt[:len(sorted_dt) // 2]
        rich = sorted_dt[len(sorted_dt) // 2:]
        # 拼接纹理图片
        con_list = []
        for i in rich:
            idx = i[0]
            con_list.append(res[idx])
        rich_img = self.concatenate_images(con_list)
        con_list = []
        for i in poor:
            idx = i[0]
            con_list.append(res[idx])
        poor_img = self.concatenate_images(con_list)

        rich_img = self.high_pass_filter(rich_img)
        poor_img = self.high_pass_filter(poor_img)
        rich_img = torch.from_numpy(rich_img).float().unsqueeze(0)
        poor_img = torch.from_numpy(poor_img).float().unsqueeze(0)
        return rich_img, poor_img, self.labels[idx]
    def concatenate_images(self, images):        
        if not all(img.size == images[0].size for img in images):
            raise ValueError("All images must be of the same size.")


        width, height = images[0].shape


        new_img = np.zeros((width * 8, height * 8, ))

        x, y = new_img.shape
        
        for n, img in enumerate(images):
            i = n // 8
            j = n % 8
            if(j * height + height > y or i * width + width > x):
                continue
            new_img[i * width : i * width + width, j * height : j * height + height] = img
            

        return new_img

    def split_pic(self, img, M):
        x, y = img.shape
        
        res = img[np.newaxis, 0:M, 0:M]
        #分块
        i = M
        j = 0
        while(j + M < y):
            while(i + M < x):
                block = img[np.newaxis, i : i + M, j : j + M]
                res = np.concatenate([res, block])
                i += M
            j += M
            i = 0
        return res
    def get_res_sum(self, split_arr):
        ldiv_list = []
        for pic in split_arr:
            #求残差和
            x, y = pic.shape
            col = 0
            div = 0
            row = 0
            adiv = 0
            for i in range(x):
                for j in range(y-1):
                    col += np.abs(pic[i, j] - pic[i, j + 1])
            for i in range(x-1):
                for j in range(y):
                    row += np.abs(pic[i, j] - pic[i + 1, j])
            for i in range(x-1):
                for j in range(y-1):
                    div += np.abs(pic[i + 1, j + 1] - pic[i, j])
            for i in range(x-1):
                for j in range(y-1):
                    adiv += np.abs(pic[i + 1, j] - pic[i, j + 1])
            ldiv = col + row + adiv + div
            ldiv_list.append(ldiv)
        return ldiv_list
    def high_pass_filter(self,img):
        kernel = np.array([[0, 0, 0, 0, 0],
                            [0, 0, 1, 0, 0],
                            [0, 0, -1, 0, 0],
                            [0, 0, 0, 0, 0],
                            [0, 0, 0, 0, 0]])
        return cv2.filter2D(img, -1, kernel)



cd = CustomDataset("dl-ml/FakePicDetector/datasets/Progan")
dat = DataLoader(cd, batch_size=8, shuffle=True)

# optimizer = optim.SGD(net.parameters(), lr=0.01)
criterion = nn.CrossEntropyLoss()

def train(criterion):
    net = Net().to(device)
    optimizer = optim.SGD(net.parameters(), lr=0.1)
    for num, (rich, poor, label) in enumerate(dat):
        rich, poor = rich.to(device), poor.to(device)
        label = label.to(device)  # Change this line
        out = net(rich, poor)
        loss = criterion(out, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print(loss)

train(criterion)
