

import copy
import cv2
import numpy as np

import torch
import torch.nn.functional as F
import torchvision

from torch import nn
from torchvision.ops import MultiScaleRoIAlign
from typing import List, Tuple, Dict

from my_py_toolkit.file.file_toolkit import *
# from torch.optim import Adam

from torchvision.models.detection.anchor_utils import AnchorGenerator
from torchvision.models.detection.rpn import RPNHead, RegionProposalNetwork
# from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.transform import GeneralizedRCNNTransform
# from torchvision.models.detection.backbone_utils import resnet_fpn_backbone, _validate_trainable_layers, mobilenet_backbone
# from torchvision.models.detection import _utils as det_utils
# from torchvision.models.detection.roi_heads import fastrcnn_loss, maskrcnn_loss, maskrcnn_inference, keypointrcnn_loss, keypointrcnn_inference
# from torchvision.ops import boxes as box_ops
# from tqdm import tqdm

from .layer import *

def SRM(imgs):
    # 第一层滤波器
    # 定义三个滤波器,滤波器大小为5x5
    # filter1: egde3*3
    filter2 = [[0, 0, 0, 0, 0],
               [0, -1, 2, -1, 0],
               [0, 2, -4, 2, 0],
               [0, -1, 2, -1, 0],
               [0, 0, 0, 0, 0]]
    # filter2：egde5*5
    filter1 = [[-1, 2, -2, 2, -1],
               [2, -6, 8, -6, 2],
               [-2, 8, -12, 8, -2],
               [2, -6, 8, -6, 2],
               [-1, 2, -2, 2, -1]]
    # filter3：一阶线性
    filter3 = [[0, 0, 0, 0, 0],
               [0, 0, 1, 0, 0],
               [0, 0,-2, 0, 0],
               [0, 0, 1, 0, 0],
               [0, 0, 0, 0, 0]]
    # 定义q，将三个滤波器归一化
    q = [4.0, 12.0, 2.0]
    filter1 = np.asarray(filter1, dtype=float) / 4
    filter2 = np.asarray(filter2, dtype=float) / 12
    filter3 = np.asarray(filter3, dtype=float) / 2
    # 将不同类的滤波器堆叠、处理，得到新滤波器
    filters = [[filter1, filter1, filter1], [filter2, filter2, filter2], [filter3, filter3, filter3]]# (3,3,5,5)
    #print(np.array(filters).shape)
    #filters = np.einsum('klij->klij', filters)  # new_filter(i,j,l,k) = origin_filter(k,l,i,j) # (5,5,3,3)
    filters = torch.FloatTensor(filters)    # (3,3,5,5)
    # imgs = np.array(imgs, dtype=float)  # (375,500,3)
    # #imgs = imgs[:, :, np.newaxis, :]
    # #print("img shape", imgs.shape)
    # imgs = np.einsum('klij->kjli', imgs)
    # #print("img shape", imgs.shape)
    # input = torch.tensor(imgs, dtype=torch.float32)
    # 未标出的卷积参数：use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None
    # 得到第一层输出：op
    #op = tf.nn.conv2d(input, filters, strides=[1, 1, 1, 1], padding='SAME')
    # [B, C, H, W], [out, in, H, W]
    op1 = F.conv2d(imgs, filters.to(imgs.device), stride=1, padding=2)
    
    return op1

    
class PSDetector(nn.Module):
    def __init__(self, backbone, out_channels=None, num_classes=None,
                 # transform parameters
                 min_size=200, max_size=500,
                 image_mean=None, image_std=None,
                 # RPN parameters
                 rpn_anchor_generator=None, rpn_head=None,
                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
                 rpn_nms_thresh=0.7,
                 rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
                 rpn_score_thresh=0.0,
                 # Box parameters
                 box_roi_pool=None, box_head=None, box_predictor=None,
                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
                 box_batch_size_per_image=512, box_positive_fraction=0.25,
                 bbox_reg_weights=None, box_roi_pool_output_size=7):
        super().__init__()
        # rgb
        self.reg_backbone = backbone
        if rpn_anchor_generator is None:
            anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
            aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
            rpn_anchor_generator = AnchorGenerator(
                anchor_sizes, aspect_ratios
            )
        if rpn_head is None: 
            rpn_head = RPNHead(
                out_channels, rpn_anchor_generator.num_anchors_per_location()[0]
            )
        
        
        rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
        rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)

        self.rpn = RegionProposalNetwork(
            rpn_anchor_generator, rpn_head,
            rpn_fg_iou_thresh, rpn_bg_iou_thresh,
            rpn_batch_size_per_image, rpn_positive_fraction,
            rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh,
            score_thresh=rpn_score_thresh)

        if box_roi_pool is None:
            box_roi_pool = MultiScaleRoIAlign(
                featmap_names=['0', '1', '2', '3'],
                output_size=box_roi_pool_output_size,
                sampling_ratio=2)

        if box_head is None:
            resolution = box_roi_pool.output_size[0]
            representation_size = 1024
            box_head = TwoMLPHead(
                out_channels * resolution ** 2,
                representation_size)

        if box_predictor is None:
            representation_size = 1024
            box_predictor = FastRCNNPredictor(
                representation_size,
                num_classes)

        # real_fake_cls = torch.nn.Linear(representation_size ** 2, 2)
        # todo: 看看 256 是哪个参数设置的，后面改下
        bili_pool = BilinearPooling(256  ** 2, 2)
        
        self.roi_heads = RoIHeads(
            # Box
            box_roi_pool, box_head, box_predictor, bili_pool,
            box_fg_iou_thresh, box_bg_iou_thresh,
            box_batch_size_per_image, box_positive_fraction,
            bbox_reg_weights,
            box_score_thresh, box_nms_thresh, box_detections_per_img)

        if image_mean is None:
            image_mean = [0.485, 0.456, 0.406]
        if image_std is None:
            image_std = [0.229, 0.224, 0.225]
        self.transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
        
        
        # noise
        
        self.srm = SRM
        self.noise_backbone = copy.deepcopy(backbone)
        
        
        
    def forward(self, images, targets=None):

        original_image_sizes: List[Tuple[int, int]] = []
        for img in images:
            val = img.shape[-2:]
            assert len(val) == 2
            original_image_sizes.append((val[0], val[1]))

        images, targets = self.transform(images, targets)

        reg_features = self.reg_backbone(images.tensors)
        noise_features = self.noise_backbone(self.srm(images.tensors))
        
        proposals, proposal_losses = self.rpn(images, reg_features, targets) 
        reg_detections, detector_losses = self.roi_heads(reg_features, noise_features, proposals, images.image_sizes, targets)
        
        if not self.training:
            return reg_detections
        else: 
            losses = {
                'proposal_losses': proposal_losses,
                'detector_losses': detector_losses
            }
            return losses
         

class SAM(torch.optim.Optimizer):
    def __init__(self, params, base_optimizer, rho=0.05, **kwargs):
        assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}"

        defaults = dict(rho=rho, **kwargs)
        super(SAM, self).__init__(params, defaults)

        self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
        self.param_groups = self.base_optimizer.param_groups

    @torch.no_grad()
    def first_step(self, zero_grad=False):
        grad_norm = self._grad_norm()
        for group in self.param_groups:
            scale = group["rho"] / (grad_norm + 1e-12)

            for p in group["params"]:
                if p.grad is None: continue
                e_w = p.grad * scale.to(p)
                p.add_(e_w)  # climb to the local maximum "w + e(w)"
                self.state[p]["e_w"] = e_w

        if zero_grad: self.zero_grad()

    @torch.no_grad()
    def second_step(self, zero_grad=False):
        for group in self.param_groups:
            for p in group["params"]:
                if p.grad is None: continue
                p.sub_(self.state[p]["e_w"])  # get back to "w" from "w + e(w)"

        self.base_optimizer.step()  # do the actual "sharpness-aware" update

        if zero_grad: self.zero_grad()

    @torch.no_grad()
    def step(self, closure=None):
        assert closure is not None, "Sharpness Aware Minimization requires closure, but it was not provided"
        closure = torch.enable_grad()(closure)  # the closure should do a full forward-backward pass

        self.first_step(zero_grad=True)
        closure()
        self.second_step()

    def _grad_norm(self):
        shared_device = self.param_groups[0]["params"][0].device  # put everything on the same device, in case of model parallelism
        norm = torch.norm(
                    torch.stack([
                        p.grad.norm(p=2).to(shared_device)
                        for group in self.param_groups for p in group["params"]
                        if p.grad is not None
                    ]),
                    p=2
               )
        return norm

class ClsEff(nn.Module):
    def __init__(self, nums_cls, pretrained=True, b4out_channel=1000) -> None:
        super().__init__()
        self.b4 =  torchvision.models.efficientnet_b4(pretrained)
        self.cls = torch.nn.Linear(b4out_channel, nums_cls)
        self.loss = torch.nn.CrossEntropyLoss()
        self.optimizer=SAM(self.parameters(), torch.optim.SGD, lr=0.001, momentum=0.9)
        
    def forward(self, inputs, targets=None):
        features = self.b4(inputs)
        logits = self.cls(features)
        if targets is not None:
            return self.loss(logits, targets).sum()
        else:
            return F.softmax(logits)
    
    def training_step(self, x, target):
        for i in range(2):
            pred_cls=self(x)
            if i==0:
                pred_first=pred_cls
            loss_cls=self.loss(pred_cls,target)
            loss=loss_cls
            self.optimizer.zero_grad()
            loss.backward()
            if i==0:
                self.optimizer.first_step(zero_grad=True)
            else:
                self.optimizer.second_step(zero_grad=True)
        
        return pred_first    
