import torch
from torch import nn
from torch.nn import functional as F


def up_conv(cin, cout):
    yield nn.Conv2d(cin, cout, 3, padding=1)
    yield nn.GroupNorm(1, cout)
    yield nn.ReLU(inplace=True)
    
def foreground_sign(pred):
    b, c, w, h = pred.size()
    p = pred.gt(0).float()
    num_pos = p[:, :, 0, 0] + p[:, :, w-1, 0] + p[:, :, w-1, h-1] + p[:, :, 0, h-1]
    sign = ((num_pos < 2).float() * 2 - 1).view(b, c, 1, 1)
    return sign


class SE_block(nn.Module):
    def __init__(self, feat):
        super(SE_block, self).__init__()
        self.conv = nn.Conv2d(feat, feat, 1)
        self.gn = nn.GroupNorm(feat // 2, feat)

    def forward(self, x):
        glob_x = F.adaptive_avg_pool2d(x, (1, 1))
        glob_x = torch.sigmoid(self.conv(glob_x))
        x = glob_x * x
        return x

class ada_block(nn.Module):
    def __init__(self, config, feat, out_feat=64):
        super(ada_block, self).__init__()
        
        self.ad0 = nn.Sequential(*list(up_conv(feat, out_feat)))
        self.se = SE_block(out_feat)

    def forward(self, x):
        x = self.ad0(x)
        x = self.se(x)
        return x

def normalize(x):
    center = torch.mean(x, dim=(2, 3), keepdim=True)
    x = x - center
    return x

class decoder(nn.Module):
    def __init__(self, config, encoder, feat):
        super(decoder, self).__init__()
        
        self.ad2 = ada_block(config, feat[2], feat[0])
        self.ad3 = ada_block(config, feat[3], feat[0])
        self.ad4 = ada_block(config, feat[4], feat[0])
        self.fusion = ada_block(config, feat[0] * 3, feat[0])
        
    def forward(self, xs, x_size, phase='test'):
        x2 = self.ad2(xs[2])
        x3 = self.ad3(xs[3])
        x4 = self.ad4(xs[4])
        
        x2u = nn.functional.interpolate(x2, size=xs[0].size()[2:], mode='bilinear', align_corners=True)
        x3u = nn.functional.interpolate(x3, size=xs[0].size()[2:], mode='bilinear', align_corners=True)
        x4u = nn.functional.interpolate(x4, size=xs[0].size()[2:], mode='bilinear', align_corners=True)
        
        fuse = torch.cat([x2u, x3u, x4u], dim=1)
        feat = self.fusion(fuse)
        feat = normalize(feat)
        
        pred = torch.sum(feat, dim=1, keepdim=True)
        
        # Sign function
        pred = pred * foreground_sign(pred)
        pred = nn.functional.interpolate(pred, size=x_size, mode='bilinear', align_corners=True)
        
        OutDict = {}
        OutDict['feat'] = [feat, ]
        OutDict['sal'] = [pred, ]
        OutDict['final'] = pred
        
        return OutDict

class Network(nn.Module):
    def __init__(self, config, encoder, feat):
        # encoder: backbone, forward function output 5 encoder features. details in methods/base/model.py
        # feat: length of encoder features. e.g.: VGG:[64, 128, 256, 512, 512]; Resnet:[64, 256, 512, 1024, 2048]
        super(Network, self).__init__()
        self.encoder = encoder
        self.decoder = decoder(config, encoder, feat)

    def forward(self, x, phase='test'):
        x_size = x.size()[2:]
        xs = self.encoder(x)
        out = self.decoder(xs, x_size, phase)
        return out

from a2s_utils.a2s.config import get_config
import numpy as np
from a2s_utils.a2s.resnet import resnet
from a2s_utils.a2s.util import tran_param, transform
from a2s_utils.a2s.loss import Loss as model_loss
import torch.nn as nn
class A2S(torch.nn.Module):
    def __init__(self):
        super().__init__()
        config, _ = get_config()
        encoder = resnet(pretrained=config['pretrain'])
        # self.encoder = encoder.to(device)
        fl = [64, 256, 512, 1024, 2048]
        self.net = Network(config, encoder, fl)
        self.net.load_state_dict(torch.load(config['weight'], map_location='cpu'))
        print('Load weights from: {}.'.format(config['weight']))
        self.config = config

    def set_train(self):
        self.net.train()

    def set_val(self):
        self.net.eval()

    def forward(self, inp, gt, epoch):
        if self.net.training:
            img = inp[0].float()
            Y = self.net(img)
            priors = [img]
            self.config['param'] = tran_param(self.config)
            images_temp = transform(img, False, self.config)
            priors = torch.cat(priors, dim=1)
            priors_temp = transform(priors, False, self.config)

            Y_ref = self.net(images_temp, 'train')

            lr_weight = np.array(self.config['lrw'].split(',')).astype(float)
            if lr_weight is None or len(lr_weight) != 3:
                lr_weight = [0.5, 0.05, 1]

            loss0, loss1, loss2 = model_loss(Y, priors, Y_ref, priors_temp, epoch, lr_weight, self.config, gt)

            return loss0, loss1, loss2, Y
        else:
            img = inp[0]
            Y = self.net(img)
            return 0,0,0,Y
# if __name__ == '__main__':
    # img = torch.randn(1,3,512,512)
    # gt = torch.randn(1,1,512,512)
    # loss0, loss1, loss2, loss3 = a2s(img,gt)
    # print(loss3)
    # print(loss1)
    # print(loss2)
    # print(loss0)
    # import cv2
    # import matplotlib.pyplot as plt
    # import torchvision.transforms.functional as TF
    # config,_ = get_config()
    # encoder = resnet(pretrained=config['pretrain'])
    # fl = [64, 256, 512, 1024, 2048]
    # net = Network(config,encoder,fl)
    # print('Load weights from: {}.'.format(config['weight']))
    # net.load_state_dict(torch.load(config['weight'], map_location='cpu'))
    # # img = torch.randn(1,3,512,512)
    # img = cv2.imread("/raid/datasets/ImageForgery/IMD2020/image_all/00015_fake.jpg")
    # # 2. 转换颜色通道从 BGR 到 RGB
    # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # plt.imshow(img)
    # plt.show()
    # # 3. 将图像数据类型转换为 float 并缩放到 [0, 1] 范围
    # img = img.astype('float32') / 255.0
    #
    # # 4. 转换为 Tensor，并调整维度顺序为 (C, H, W)
    # img_tensor = torch.from_numpy(img).permute(2, 0, 1)
    #
    # images_norm = TF.normalize(img_tensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    # images_norm = images_norm.unsqueeze(0)
    # Y = net(images_norm)
    # # print(Y)
    # image = Y['final'].squeeze(0).permute(1, 2, 0).detach().numpy()
    #
    # # 1. 归一化到 [0, 1]
    # image_normalized = (image - image.min()) / (image.max() - image.min())
    #
    # # # 2. 转换为 uint8 类型并缩放到 [0, 255]
    # # image_scaled = (image_normalized * 255).astype(np.uint8)
    # # 可视化
    # plt.imshow(image_normalized.squeeze(), cmap='gray')
    # plt.axis('off')  # 不显示坐标轴
    # plt.title('Tensor Visualization')
    # plt.show()
    # print(type(Y['final']))
    #
    # print(Y['final'].shape)
    #
    # priors = [img]
    # config['param'] = tran_param(config)
    # images_temp = transform(img, False, config)
    # priors = torch.cat(priors, dim=1)
    # priors_temp = transform(priors, False, config)
    #
    # Y_ref = net(images_temp, 'train')
    #
    # lr_weight = np.array(config['lrw'].split(',')).astype(float)
    # if lr_weight is None or len(lr_weight) != 3:
    #     lr_weight = [0.5, 0.05, 1]
    #
    # loss0, loss1, loss2 = model_loss(Y, priors, Y_ref, priors_temp, 1, lr_weight, config,gt)
    # print(loss2)
    # print(loss1)
    # print(loss0)
    # # for key, value in out.items():
    # #     print(f'键: {key}, 值: {value[0].shape}')
    '''
        键: feat, 值: torch.Size([1, 64, 256, 256])
        键: sal, 值: torch.Size([1, 1, 512, 512])
        键: final, 值: torch.Size([1, 512, 512])
    '''

