"""
测试编写代码有没有 bug
研究算法可以无视这个文件
write by qianqiainjun
"""

### DatasetUtil.py
# from PrepareData import PrepareData
#
# path="/home/qianqianjun/图片"
# Data=PrepareData(path,include_subdirectory=True)
# train_set,test_set=Data.train_test_split()
# Data.save_list(".",train_set,test_set)

### parameter.py
# from parameter import parameter as parm
#
# parm.setImageInfo()
# parm.setLantentInfo()


import torch
import torch.nn as nn
import numpy as np
from PIL import Image
path="/home/qianqianjun/CODE/DataSets/CelebA/Img/img_align_celeba/168835.jpg"
img=Image.open(path)

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.net=nn.Sequential(
            nn.Conv2d(in_channels=3,out_channels=10,kernel_size=3,stride=1),
            nn.MaxPool2d(kernel_size=2),
            nn.Conv2d(in_channels=10,out_channels=20,kernel_size=3,stride=1),
            nn.MaxPool2d(kernel_size=2)
        )
    def forward(self,x:torch.Tensor):
        out=self.net(x)
        return out

net=Net()

arr=torch.tensor(np.array(img)).unsqueeze(0)
arr=arr.type(torch.float32)
arr=arr.permute(0,3,1,2)

optimizer=torch.optim.Adam(Net.parameters(net),lr=0.002)

optimizer.zero_grad()
out=net(arr)

print(out.shape)
optimizer.step()


# 下面是 VGG19 的网络结构
from collections import OrderedDict
class VGG(nn.Module):
    def __init__(self):
        super(VGG, self).__init__()
        self.net=nn.Sequential(OrderedDict([
            ("conv1_1",nn.Conv2d(in_channels=3,out_channels=64,kernel_size=3,stride=1,padding=1)),
            ("norm1_1",nn.InstanceNorm2d(64)),
            ("relu1_1",nn.ReLU(inplace=True)),
            ("conv1_2",nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1)),
            ("relu1_2",nn.ReLU(inplace=True)),
            ("maxpool1",nn.MaxPool2d(kernel_size=2,stride=2)),

            ("conv2_1",nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1)),
            ("relu2_1",nn.ReLU(inplace=True)),
            ("conv2_2",nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1)),
            ("relu2_2",nn.ReLU(inplace=True)),
            ("maxpool2",nn.MaxPool2d(kernel_size=2,stride=2)),

            ("conv3_1",nn.Conv2d(in_channels=128,out_channels=256,kernel_size=3,stride=1,padding=1)),
            ("relu3_1",nn.ReLU(inplace=True)),
            ("conv3_2",nn.Conv2d(in_channels=256,out_channels=256,kernel_size=3,stride=1,padding=1)),
            ("relu3_2",nn.ReLU(inplace=True)),
            ("conv3_3",nn.Conv2d(in_channels=256,out_channels=256,kernel_size=3,stride=1,padding=1)),
            ("relu3_3",nn.ReLU(True)),
            ("conv3_4", nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1)),
            ("relu3_4", nn.ReLU(True)),
            ("maxpool3",nn.MaxPool2d(kernel_size=2,stride=2)),

            ("conv4_1", nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1)),
            ("relu4_1", nn.ReLU(inplace=True)),
            ("conv4_2", nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)),
            ("relu4_2", nn.ReLU(inplace=True)),
            ("conv4_3", nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)),
            ("relu4_3", nn.ReLU(True)),
            ("conv4_4", nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)),
            ("relu4_4", nn.ReLU(True)),
            ("maxpool4", nn.MaxPool2d(kernel_size=2, stride=2)),

            ("conv5_1", nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)),
            ("relu5_1", nn.ReLU(inplace=True)),
            ("conv5_2", nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)),
            ("relu5_2", nn.ReLU(inplace=True)),
            ("conv5_3", nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)),
            ("relu5_3", nn.ReLU(True)),
            ("conv5_4", nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)),
            ("relu5_4", nn.ReLU(True)),
            ("maxpool5", nn.MaxPool2d(kernel_size=2, stride=2)),

            ("conv6_1",nn.Conv2d(512,512,3,stride=2,padding=0)),
            ("relu6_1",nn.LeakyReLU(0.2,True)),
            ("conv6_2",nn.Conv2d(512,512,3,stride=1,padding=0)),
            ("relu6_2",nn.ReLU(True))
        ]))

        # self.classifier=nn.Sequential(
        #     nn.Linear(512,4096),
        #     nn.ReLU(True),
        #     nn.Dropout(),
        #     nn.Linear(4096,4096),
        #     nn.ReLU(True),
        #     nn.Dropout(),
        #     nn.Linear(4096,1000)
        # )
    def forward(self, x):
        out=self.net(x)
        out=out.view(out.size(0),-1)
        #x=self.classifier(out)
        return x

vgg=VGG()
path="/home/qianqianjun/CODE/DataSets/vgg19.pth"

load_dict=torch.load(path)
state_dic=nn.Module.state_dict(vgg)

print(state_dic)
print(load_dict)
print("--------------------------------")

step=0
for i,j in zip(state_dic,load_dict):
    print(i)
    print(j)
    print(step)
    step+=1
    if step==32:
        break
    state_dic[i]=load_dict[j]

nn.Module.load_state_dict(vgg,state_dic)


### 测试 VggDAE 代码

###### 测试代码  ###################
from autoencoder.VggDAE import Encoders,Decoders
from autoencoder.parameter import parameter as parm
from PIL import Image
import cv2
import os
import numpy as np
path ="/home/qianqianjun/CODE/DataSets/DaeDatasets"
files=os.listdir(path)
batch=[]
for i in range(1):
    batch.append(cv2.resize(np.array(Image.open(os.path.join(path,files[0]))),(224,224)))
batch_data=torch.tensor(np.array(batch),dtype=torch.float32).permute(0,3,1,2)

"""
vgg=VggEncoder(parm)
conv3_1,conv4_1,conv5_1,out=vgg(batch_data)
flatten=VggFlattener(parm)
out2=flatten(out)
print(conv3_1.shape)
print(conv4_1.shape)
print(conv5_1.shape)
print(out.shape)
print(out2.shape)
"""

encoder=Encoders(parm)
encoder=encoder.cuda()
batch_data=batch_data.cuda()

z,zimg,zwarp=encoder(batch_data)
print(z.shape)
print(zimg.shape)
print(zwarp.shape)
del encoder
del z
del zwarp

decoders=Decoders(parm)

decoders=decoders.cuda()

out=decoders(zimg.view(-1,parm.idim,1,1))
print(out.shape)