import cv2

# image_background = cv2.imread("监控点1_市港航局上游_市港航局上游_20220106121417_20220106133009_1585316.mp4_20220113_234921.104.jpg")
# print(image_background.shape)

# class VGG():
#     def __init__(self, features):
#         print("vgg_init")
#         print(features)
#
# def f(x):
#     print("f")
#     return x
#
# model = VGG(f(7))

# def m(x,y):
#     def n(i):
#         return i
#     return n(x) + n(y)
#
# print(m(8,9))

# def get_img_output_length(width, height):
#     def get_output_length(input_length):
#         # input_length += 6
#         filter_sizes = [2, 2, 2, 2, 2]
#         padding = [0, 0, 0, 0, 0]
#         stride = 2
#         for i in range(5):
#             input_length = (input_length + 2 * padding[i] - filter_sizes[i]) // stride + 1
#         return input_length
#     return get_output_length(width) * get_output_length(height)
#
# print(get_img_output_length(1280,720))

# import torch
#
# i = torch.nn.Linear(880, 512)
#
# x = torch.randn(1280, 880)
#
# print(i(x).shape)
#
# class VGG():
#     def __init__(self, features, num_classes=1000):
#         self.features = features
#         print(self.features)
#         print("--")
#
#
# v = VGG(8)
# print(v)

# class Sigmoid(Module):
#
#     def forward(self, input: Tensor) -> Tensor:
#         return torch.sigmoid(input)


import os
import random
from random import shuffle

import cv2
import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset

# print(image)

def rand(a=0, b=1):
    return np.random.rand()*(b-a) + a

def get_random_data(image, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5, flip_signal=False):

    h, w = input_shape
    # ------------------------------------------#
    #   图像大小调整
    # ------------------------------------------#
    rand_jit1 = rand(1 - jitter, 1 + jitter)
    rand_jit2 = rand(1 - jitter, 1 + jitter)
    new_ar = w / h * rand_jit1 / rand_jit2

    scale = rand(0.75, 1.25)
    if new_ar < 1:
        nh = int(scale * h)
        nw = int(nh * new_ar)
    else:
        nw = int(scale * w)
        nh = int(nw / new_ar)
    image = image.resize((nw, nh), Image.BICUBIC)  # 三次样条插值

    # ------------------------------------------#
    #   翻转图像
    # ------------------------------------------#
    flip = rand() < .5  # 按照概率翻转
    if flip and flip_signal:
        image = image.transpose(Image.FLIP_LEFT_RIGHT)

    # ------------------------------------------#
    #   放置图像
    # ------------------------------------------#
    dx = int(rand(0, w - nw))
    dy = int(rand(0, h - nh))
    new_image = Image.new('RGB', (w, h), (255, 255, 255))

    new_image.paste(image, (dx, dy))
    image = new_image

    # ------------------------------------------#
    #   图像旋转
    # ------------------------------------------#
    rotate = rand() < .5
    if rotate:
        angle = np.random.randint(-5, 5)
        a, b = w / 2, h / 2
        M = cv2.getRotationMatrix2D((a, b), angle, 1)
        image = cv2.warpAffine(np.array(image), M, (w, h), borderValue=[255, 255, 255])

        # ------------------------------------------#
    #   色域扭曲
    # ------------------------------------------#
    hue = rand(-hue, hue)
    sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
    val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
    x = cv2.cvtColor(np.array(image, np.float32) / 255, cv2.COLOR_RGB2HSV)
    x[..., 0] += hue * 360
    x[..., 0][x[..., 0] > 1] -= 1
    x[..., 0][x[..., 0] < 0] += 1
    x[..., 1] *= sat
    x[..., 2] *= val
    x[x[:, :, 0] > 360, 0] = 360
    x[:, :, 1:][x[:, :, 1:] > 1] = 1
    x[x < 0] = 0
    image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) * 255

    return image_data


# img = Image.open("../datasets/images_background105/Alphabet_of_the_Magi/character01/0709_01.png")
# img = np.array(img)
# ishape = img.shape()

# img2 =  cv2.imread("../datasets/images_background/o/character02/1641663362630.jpg")
#
# img =  cv2.imread("../datasets/images_background105/Alphabet_of_the_Magi/character01/0709_01.png")
# ishape = img.shape
# ishape2= img2.shape
#
#
#
# import matplotlib.pyplot as plt
#
# plt.imshow(img)
# plt.show()
# img2 = get_random_data(img,[630,630])
#
# plt.show()

# img = cv2.imread("./监控点1_市港航局上游_市港航局上游_20220106121417_20220106133009_1585316.mp4_20220113_234921.104.jpg")
#
# image = Image.open("../datasets/images_background/24/监控点1_市港航局上游_市港航局上游_20220107110133_20220107121728_2002163.mp4_20220114_094843.039.jpg")
# print(image)

# -*- coding: utf-8 -*-
# import torch
# import torch.utils.data as Data
# torch.manual_seed(1)    # reproducible
# class TensorDataset(Data.Dataset):
#     """Dataset wrapping tensors.
#     Each sample will be retrieved by indexing tensors along the first dimension.
#     Arguments:
#         *tensors (Tensor): tensors that have the same size of the first dimension.
#     """
#     def __init__(self, *tensors):
#         assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
#         self.tensors = tensors
#     def __getitem__(self, index):
#         print("__+__")
#         print('idex',index)
#         a=tuple(tensor[index] for tensor in self.tensors)
#         print('a', a)
#         return tuple(tensor[index] for tensor in self.tensors)
#     def __len__(self):
#         return self.tensors[0].size(0)
# BATCH_SIZE = 5
# x = torch.linspace(1, 10, 10)       # this is x data (torch tensor)
# y = torch.linspace(10, 1, 10)       # this is y data (torch tensor)
# '''先转换成 torch 能识别的 Dataset'''
# torch_dataset =TensorDataset(x,y) #Data.TensorDataset(x, y)
# #print(torch_dataset[0])     #输出(tensor(1.), tensor(10.))
# #print(torch_dataset[1])     #输出(tensor(2.), tensor(9.))
# ''' 把 dataset 放入 DataLoader'''
# loader = Data.DataLoader(
#     dataset=torch_dataset,      # torch TensorDataset format
#     batch_size=BATCH_SIZE,      # mini batch size
#     pin_memory=True               # 要不要打乱数据 (打乱比较好)
#     #num_workers=2,              # subprocesses for loading data
# )
# for epoch in range(2):   # train entire dataset 3 times
#     for step, (batch_x, batch_y) in enumerate(loader):  # for each training step
#         # train your data...
#         print('ok')
#         print('Epoch: ', epoch, '| Step: ', step, '| batch x: ',
#              batch_x.numpy(), '| batch y: ', batch_y.numpy())

# def get_img_output_length(width, height):
#     def get_output_length(input_length):
#         # input_length += 6
#         filter_sizes = [2, 2, 2, 2, 2]
#         padding = [0, 0, 0, 0, 0]
#         stride = 2
#         for i in range(5):
#             input_length = (input_length + 2 * padding[i] - filter_sizes[i]) // stride + 1
#         return input_length
#     return get_output_length(width) * get_output_length(height)
#
# print(get_img_output_length(224,224))

# import torch
# state_dict = torch.load("../logs/ep120-loss0.214-val_loss1.640.pth", map_location=lambda storage, loc: storage)
# print(state_dict)

from torch import nn
import torch
class SPP(nn.Module):
    def __init__(self):
        super(SPP, self).__init__()
        self.pool1 = nn.MaxPool2d(kernel_size=5,stride=1,padding=5 // 2)
        self.pool2 = nn.MaxPool2d(kernel_size=7, stride=1, padding=7 // 2)
        self.pool3 = nn.MaxPool2d(kernel_size=13, stride=1, padding=13 // 2)
    def forward(self,x):
        x1 = self.pool1(x)
        x2 = self.pool2(x)
        x3 = self.pool3(x)
        return torch.cat([x,x1,x2,x3],dim=1)

import math
import torch.nn.functional as F
class SPPLayer(nn.Module):

    def __init__(self, num_levels, pool_type='max_pool'):
        super(SPPLayer, self).__init__()

        self.num_levels = num_levels
        self.pool_type = pool_type

    def forward(self, x):
        num, c, h, w = x.size() # num:样本数量 c:通道数 h:高 w:宽
        for i in range(self.num_levels):
            level = i+1
            kernel_size = (math.ceil(h / level), math.ceil(w / level))
            stride = (math.ceil(h / level), math.ceil(w / level))
            pooling = (math.floor((kernel_size[0]*level-h+1)/2), math.floor((kernel_size[1]*level-w+1)/2))

            # 选择池化方式
            if self.pool_type == 'max_pool':
                tensor = F.max_pool2d(x, kernel_size=kernel_size, stride=stride, padding=pooling).view(num, -1)
            else:
                tensor = F.avg_pool2d(x, kernel_size=kernel_size, stride=stride, padding=pooling).view(num, -1)

            # 展开、拼接
            if (i == 0):
                x_flatten = tensor.view(num, -1)
            else:
                x_flatten = torch.cat((x_flatten, tensor.view(num, -1)), 1)
        return x_flatten

x = torch.rand((2,512,10,13))
f = SPPLayer(3)
print(f(x).shape)