import torch
import torch.nn as nn
import numpy as np
import os
import cv2
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
from python_ai.CV_4.single_respect.darknet19.darknet19_pytorch_master.my_try_pretrained_darknet19 import MyDarknet19

# WEIGHT_PATH = '../../../../../large_data/model/darknet19/darknet19-deepBakSu-e1b3ec1e.pth'
#
# cfg = [32,'M',64,'M',128,64,128,'M',256,128,256,'M',512,256,512,256,512,'M',
#        1024,512,1024,512,1024]
#
# def make_layers(cfg,in_channels=3,batch_norm = True):
#     layers = []
#     in_channels = in_channels
#     flag = True
#     for v in cfg:
#         if v=='M':
#             layers.append(nn.MaxPool2d((2,2),2))
#         else:
#             layers.append(nn.Conv2d(in_channels=in_channels,
#                                     out_channels=v,
#                                     kernel_size=(1,3)[flag],
#                                     stride=1,
#                                     padding=(0,1)[flag],
#                                     bias=False))
#             if batch_norm:
#                 layers.append(nn.BatchNorm2d(v))
#             layers.append(nn.LeakyReLU(0.1,inplace=True))
#             in_channels = v
#         flag = not flag
#     return nn.Sequential(*layers)

# class DrakNet19(nn.Module):
#     def __init__(self,num_classes=1000,in_channels=3,batch_norm = True):
#         super(DrakNet19, self).__init__()
#         self.features = make_layers(cfg,in_channels,batch_norm)
#         self.classifier = nn.Sequential(
#             nn.Conv2d(1024,num_classes,1),
#             nn.AdaptiveAvgPool2d((1,1)),
#             nn.Softmax(dim=0)
#         )
#         self.load_weight()
#     def forward(self,inputs):
#         x = self.features(inputs)
#         x = self.classifier(x)
#         x = torch.squeeze(x)
#         return x
#     def load_weight(self):
#         # weight_file = './weights/darknet19-deepBakSu-e1b3ec1e.pth'
#         weight_file = WEIGHT_PATH
#         dic = {}
#         for now_keys,values in zip(self.state_dict().keys(),torch.load(weight_file).values()):
#             dic[now_keys] = values
#         self.load_state_dict(dic)

# class Conv_BN_LeakyRelu(nn.Module):
#     def __init__(self,in_channels,out_channels,kernel_size,stride=1,padding=0,dilation=1):
#         super(Conv_BN_LeakyRelu, self).__init__()
#         self.conv = nn.Sequential(
#             nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding,dilation),
#             nn.BatchNorm2d(out_channels),
#             nn.LeakyReLU(0.1,inplace=True)
#         )
#     def forward(self,inputs):
#         x = self.conv(inputs)
#         return x

class CNN(nn.Module):
    def __init__(self,num_classes=1000):
        super(CNN, self).__init__()
        draknet_out_channel = MyDarknet19(in_ch=3, n_cls=1000, is_pretrained=False).features[22].conv.out_channels
        # self.conv_layers = nn.Sequential(
        #     Conv_BN_LeakyRelu(draknet_out_channel,256,1),
        #     Conv_BN_LeakyRelu(256,64,1),
        # )
        self.conv = nn.Conv2d(draknet_out_channel,num_classes,1)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

    def forward(self,inputs):
        # x = self.conv_layers(inputs)
        x = self.conv(inputs)
        x = self.avgpool(x)
        x = torch.squeeze(x)
        return x

def readDatas(path):
    img_list = []
    label_list = []
    for i,label_name in enumerate(os.listdir(path)):
        label_path = os.path.join(path,label_name)
        for img_name in os.listdir(label_path):
            img_path = os.path.join(label_path,img_name)
            img = cv2.imread(img_path)/255.
            img = cv2.resize(img,(112,112))
            img_list.append(img)
            label_list.append(i)
    return np.array(img_list),np.array(label_list)

dir = '../../../../../large_data/DL1/_many_files/zoo'
img_list ,label_list = readDatas(dir)
img_list = Variable(torch.Tensor(img_list))
img_list = torch.transpose(img_list,1,3)
label_list = Variable(torch.LongTensor(label_list))

x_train,x_test,y_train,y_test = train_test_split(img_list,label_list,train_size=0.8)
x_val,x_test,y_val,y_test = train_test_split(x_test,y_test,train_size=0.5)

if __name__ == '__main__':
    net = MyDarknet19(in_ch=3, n_cls=1000, is_pretrained=True).features
    x_train = Variable(net(x_train))
    x_val = Variable(net(x_val))
    x_test = Variable(net(x_test))

    model = CNN(2)
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    for epoch in range(10):
        optimizer.zero_grad()
        pred_train = model(x_train)
        cost = loss(pred_train, y_train)
        cost.backward()
        optimizer.step()

        acc_train = (torch.argmax(pred_train, 1) == y_train).float().mean()
        pred_val = model(x_val)
        acc_val = (torch.argmax(pred_val, 1) == y_val).float().mean()
        print(f'迭代次数:{epoch + 1},训练集损失值:{cost:.3f},训练集准确率:{acc_train:.3f},验证集准确率:{acc_val:.3f}')

    pred_test = model(x_test)
    acc_test = (torch.argmax(pred_test, 1) == y_test).float().mean()
    print(f'测试集准确率:{acc_test:.3f}')
