

import torch
import matplotlib.pylab as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os

import stylegan2



input_path = 'checkpoint'
latents_path = os.path.join(input_path, 'latents.pth')
generator_path = os.path.join(input_path, 'Gs.pth')
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

#根据资源进行设置
batch_size = 16

seed = 0



state = torch.load(latents_path, map_location=device)

qlatents_data = state['qlatents_data']
dlatents_data = state['dlatents_data']
labels_data = state['labels_data']
tags = state['tags']

G = stylegan2.models.load(generator_path).to(device)

dlatents_data=dlatents_data.to(device=device, dtype=torch.float32)
labels_data=labels_data.to(device=device, dtype=torch.float32)
print("dlatents_data.size()",dlatents_data.size())
print("labels_data.size()",labels_data.size())

zipped = list(zip(dlatents_data, labels_data))

train_size = int(0.7 * len(zipped))
valid_size = int(len(zipped) * 0.2)
test_size = len(zipped) - train_size - valid_size

train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(zipped, [train_size, valid_size, test_size])

#参考代码num_workers=4会报错，可根据自己实际情况修改
datasets = dict(
    train=torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0),
    valid=torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, num_workers=0),
    test=test_dataset,
)




class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        # kernel
        self.main = nn.Sequential(
            nn.Linear(in_features=16 * 512, out_features=1),
            nn.LeakyReLU(0.2),
            nn.Sigmoid(),
        )

    def forward(self, x):
        return self.main(x)




def train_coeff(tag, total=5):
    model = Net()
    model=model.to(device)
    # create your optimizer
    optimizer = optim.SGD(model.parameters(), lr=0.01)
    criterion = nn.BCELoss()
    [tag_index], = np.where(tags == tag)

    epoch = 0
    epoch_val_loss_min = 100
    while True:
        epoch += 1
        training_loss, valid_loss = 0.0, 0.0
        for phase in ['train', 'valid']:
            dataset = datasets[phase]
            if phase == 'train':
                model.train()  # Set model to training mode
            else:
                model.eval()  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0

            for (dlatents, labels) in dataset:
                # in your training loop:
                optimizer.zero_grad()  # zero the gradient buffers
                with torch.set_grad_enabled(phase == 'train'):
                    inputs = dlatents.reshape(-1, 16 * 512)
                    inputs=inputs.to(device)
                    output = model(inputs)
                    targets = torch.Tensor(0, 1)
                    targets = targets.to(device)
                    for label in labels:
                        value = label[tag_index]
                        # value = 1.0 if value > 0.5 else 0.0
                        new_label = torch.Tensor([[value]])
                        new_label = new_label.to(device)
                        targets = torch.cat((targets, new_label))

                    loss = criterion(output, targets)

                    if phase == 'train':
                        loss.backward()
                        optimizer.step()  # Does the update
                # statistics
                running_loss += loss.item() * inputs.size(0)

            epoch_loss = running_loss / (len(dataset) * batch_size)
            print(f'Epoch:{epoch}/{total}, {phase} Loss: {epoch_loss:.4f}')

            #根据验证loss 保存最佳
            if phase=='valid':
                if epoch_loss < epoch_val_loss_min:
                    epoch_val_loss_min=epoch_loss
                    weight_val_min = model.state_dict()['main.0.weight'] # not bias
                    direction_path = f'checkpoint/directions_{tag}_val{epoch_loss}.pth'
                    # torch.save(weight_val_min, direction_path)
                    print(f" the best val is:{epoch_loss}")


        if epoch == total:
            break

    weight = weight_val_min
    return weight.detach().cpu().reshape(16, 512)





def generate_image(dlatents, pixel_min=-1, pixel_max=1):
    generated = G(dlatents=dlatents)
    if pixel_min != 0 or pixel_max != 1:
        generated = (generated - pixel_min) / (pixel_max - pixel_min)
    generated.clamp_(min=0, max=1)
    return generated.detach().cpu().reshape(3, 512, 512).permute(1, 2, 0)




def move_and_show(latent_vector, direction, coeffs):

    img_list = []
    for i, coeff in enumerate(coeffs):
        new_latent_vector = latent_vector.clone()
        # direction=direction.to(direction)
        # print((new_latent_vector),(latent_vector),(direction),coeff)
        new_latent_vector[:8] = (latent_vector + coeff * direction)[:8]
        img=generate_image(new_latent_vector)
        img_list.append(img)
    # plt.show()
    return img_list


"core"


def move_and_show_samples(direction, direction_name,sample=3, coeffs=[-10,-5,2, 0,2, 5,10]):
    fig, ax = plt.subplots(sample, 1, figsize=(50, 50), dpi=80)
    for i,(latents, labels) in enumerate(list(datasets['test'])[:sample]):
        inputs = latents.clone().reshape(1, 16, 512)
        direction=direction.to(device)
        img_list=move_and_show(inputs, direction, coeffs)
        ax[i].imshow(np.hstack(img_list))

    plt.suptitle(f'Edit: {direction_name}',size=16)
    [x.axis('off') for x in ax] #取消网格
    plt.tight_layout() # 使图片自适应填充
    save_folder=f'./edit/'
    os.makedirs(save_folder,exist_ok=True)
    plt.savefig(f"{save_folder}/{direction_name}combine_{sample}.png")



if __name__ == '__main__':

    result = {}
    '''
    训练迭代,一般3次就够了
    '''
    flag_train=1
    direction_save_path = f'checkpoint/direction.pth'

    if flag_train:
        picked_tags=['black_hair','pink_hair','open_mouth','brown_hair']
        # filter out the real tags
        picked_tags = [tag for tag in picked_tags if tag in tags]
        print(picked_tags)
        for tag in picked_tags:
            print(f'training {tag}')
            result[tag] = train_coeff(tag, 3)


        '''
        保存所有维度
        '''
        torch.save(result, direction_save_path)

    else:
        result = torch.load(direction_save_path, map_location=device)
        '''
        可视化编辑结果
        '''
    for name in result.keys():
        move_and_show_samples(result[name],name,sample=5)




    '''
    ## Let's pick some tags and train it!
    
    colors = ['aqua', 'black', 'blue', 'brown', 'green', 'grey', 'lavender', 'light_brown', 'multicolored', 'orange',
              'pink', 'purple', 'red', 'silver', 'white', 'yellow']
    switches = ['open', 'closed', 'covered']
    
    # generate composition of elements
    components = ['eyes', 'hair', 'mouth']
    
    picked_tags = []
    for component in components:
        picked_tags = picked_tags + [f'{color}_{component}' for color in colors]
        picked_tags = picked_tags + [f'{switch}_{component}' for switch in switches]
    
    # filter out the real tags
    picked_tags = [tag for tag in picked_tags if tag in tags]
    print(picked_tags)
    
    
    
    ## Train all these tags!
    for tag in picked_tags:
        print(f'training {tag}')
        result[tag] = train_coeff(tag, 3)
    
    
    
    # try some of them
    move_and_show_samples(result['open_mouth'])
    
    
    
    # play a bit more, training charater specify encoder?
    
    charas = ['hakurei_reimu', 'kirisame_marisa']
    
    
    
    # Let's check out how many samples we got
    for chara in charas:
        [chara_index], = np.where(tags == chara)
        count = [x[chara_index] for x in labels_data if x[chara_index] > 0.5]
        print(f'{chara}: {len(count)}, {(len(count) / len(labels_data)) * 100}%')
        result[chara] = train_coeff(chara, 3)
    
    
    
    # too rare, properly don't work
    move_and_show_samples(result['hakurei_reimu'])
    
    
    
    move_and_show_samples(result['kirisame_marisa'])
    
    
    
    # store the result
    torch.save(result, 'checkpoint/directions.pth')
    '''