from utils.Dataset import FaceEmbed
from torch.utils.data import DataLoader
import torch.optim as optim
import visdom
import os
import torch
from network.sexual import sexdiscrimination
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import time
from torch.utils.data import random_split, Subset

# visualize
vis = visdom.Visdom(server='localhost', env='shibie', port=8097)
x=0
y=0
win=vis.line(np.array([y]),np.array([x]))
# vis = visdom.Visdom(server='localhost', port=8097)


# %% setting
# train setting
batch_size = 16
lr = 4e-4
max_epoch = 2000
begin_epoch = 0
n_gpus = 1

save_per_epoch = 2#save models frequency
save_result_per_iter = 2

# device used
# device = torch.device('cuda')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# %% file path

base_root = './data'
model_path= os.path.join('../Arcface_100.pth')
data_path='/home/zsy/Desktop/sex/data/img_bust/img_bust2'

save_model_path=os.path.join(base_root,"model")
save_loss_path=os.path.join(base_root,"loss_data")


if not os.path.exists(save_model_path):
    os.makedirs(save_model_path)
if not os.path.exists(save_loss_path):
    os.makedirs(save_loss_path)



# record config
record_config = dict()
# record_config['visdom'] = vis
record_config['save_result_per_iter'] = save_result_per_iter
record_config['save_model_path'] = save_model_path
record_config['save_loss_path'] = save_loss_path


#model
# arcface = sexdiscrimination(model_path)
arcface = sexdiscrimination(model_path).to(device)
# arcface = sexdiscrimination("/home/zsy/Desktop/sex/sexfb/data/model/0_0.9309593023255814_1206_15:23:37.pth").to(device)
# key=torch.load("/home/zsy/Desktop/sex/sexfb/data/model/0_0.9309593023255814_1206_15:23:37.pth")
# arcface = arcface.load_state_dict(key)
# arcface.load_state_dict(key)




if torch.cuda.device_count() > 1:
    #多GPU

    arcface = nn.DataParallel(arcface, device_ids=list(range(n_gpus)))#load model to every GPU
    arcface.cuda()
else:
    arcface.cuda()
    # arcface.to(device)




arcface.train()  # start train
# arcface.eval()


# load dataset

dataset=FaceEmbed([data_path])

dataSet_length = len(dataset)

train_size= int(0.9*dataSet_length)
test_size=int(0.1*dataSet_length)
# train_dataset = Subset(dataset, range(train_size))
# test_dataset = Subset(dataset, range(train_size, dataSet_length))
train_dataset,test_dataset=torch.utils.data.random_split(dataset,[train_size,test_size])

train_dataloader=DataLoader(train_dataset,batch_size=batch_size, shuffle=True, num_workers=16, drop_last=True)
test_dataloader=DataLoader(test_dataset,batch_size=batch_size, shuffle=True, num_workers=16, drop_last=True)
# train_dataloader=DataLoader(train_dataset,batch_size=batch_size, shuffle=True,  drop_last=True)
# test_dataloader=DataLoader(test_dataset,batch_size=batch_size, shuffle=True,  drop_last=True)
# dataloader=DataLoader(dataset,batch_size=6,shuffle=True, drop_last=True)


# optimizer
opt = optim.Adam(arcface.parameters(), lr=lr)


# Trainer=tainer(arcface)

##computer loos
criterion=torch.nn.CrossEntropyLoss()

##train and test
for epoch in range(begin_epoch, max_epoch):

    # arcface.train_model(train_dataloader, epoch, record_config,opt)
    # Trainer.train_model(train_dataloader,epoch,record_config,opt)

    # train
    zhun = []
    loss_num=[]
    arcface.train()
    for iteam, data in enumerate(train_dataloader):
        opt.zero_grad()
        print(epoch,"epoch", iteam, '/', len(train_dataloader), 'train')
        # data[0] = data[0].to(device)
        Xs = data[0].to(device)
        # Xs = data[0].cuda()
        labels = data[1].to(device)
        # Xs = arcface(F.interpolate(Xs, (112, 112), mode='bilinear', align_corners=True))
        Xs = arcface(F.interpolate(Xs, (112, 112), mode='bilinear', align_corners=True))

        loss = criterion(Xs, labels.view(batch_size))


        loss.backward()
        # print(loss.data)

        opt.step()
        loss=loss.detach().cpu().numpy()
        loss_num.append(loss)
        # vis.line(loss_num, np.arange(1,len(loss_num),1),win,  update="append")
        print(" loss:  ",loss)


        yerd = Xs.argmax(dim=1)
        # if labels[0].data.numpy()==[0]
        z = 0
        for i in range(len(yerd)):
            if yerd[i].data == labels[i].data:
                z += 1
        zhun.append(z)
        if iteam % 100==0 and iteam!=0:
            accuracy=sum(zhun)/(len(zhun)*batch_size)
            print(iteam-100,"-",iteam,"train accuracy:    ",accuracy)
            print(iteam-100, "-", iteam, "loss:    ", sum(loss_num)/len(loss_num))
            zhun = []
            loss_num = []
        # if iteam>10:
        #     break
    # test
    arcface.eval()
    zhun=[]
    for iteam, data in enumerate(test_dataloader):
        ture_num=0
        print(epoch, iteam, '/', len(test_dataloader), 'test')
        # data[0] = data[0].to(device)
        Xs = data[0].to(device)
        # Xs = data[0].cuda()
        labels = data[1].to(device)
        # Xs = arcface(F.interpolate(Xs, (112, 112), mode='bilinear', align_corners=True))
        Xs = arcface(F.interpolate(Xs, (112, 112), mode='bilinear', align_corners=True))

        # print(Xs.argmax(dim=1))

        yerd=Xs.argmax(dim=1)
        # if labels[0].data.numpy()==[0]
        z=0
        for i in range(len(yerd)):
            if yerd[i].data==labels[i].data:
                z+=1
            # print(yerd[i])
            # print(labels[i])
        zhun.append(z)
        print(z,"/",batch_size)
        # if iteam>10:
        #     break

    # ton=(sum(zhun)/batch_size*len(zhun))
    accuracy=(sum(zhun)/(batch_size*len(zhun)))
    print("test accuracy:    ",sum(zhun),"/",batch_size*len(zhun),"==","{}".format(accuracy))


    if epoch/save_per_epoch==0:
        name=time.strftime('%m%d_%H:%M:%S.pth')
        save_path=os.path.join(save_model_path,str(epoch)+'_{}_'.format(accuracy)+name)
        torch.save(arcface.state_dict(), save_path)