import os

from torch import optim
from torchnet import meter
from tqdm import tqdm

from model.GoogLeNet import GoogLeNet
from model.GooleNet import GooLeNet
import torch
from torchvision import datasets,transforms
from torch.utils.data import DataLoader
from utils.visualize import Visualizer
from utils.config import opt

# step1: env
dtype = torch.FloatTensor
vis = Visualizer(opt.env)
device = ("cuda:0" if torch.cuda.is_available() else "cpu")

# step2: data
from utils.config import opt

transform = transforms.Compose([transforms.Resize((224,224)),transforms.Grayscale(num_output_channels=1),
                                transforms.ToTensor(),
                                transforms.Normalize(mean=[0.2458], std=[0.0612])])
train_data = datasets.ImageFolder('./data/train',transform=transform)




def train():
    # step3: model
    model = GoogLeNet(num_classes=2,aux_logits=True)
    if opt.load_model_path:
        if os.path.exists(opt.load_model_path):
            weights_dict = torch.load(opt.load_model_path, map_location=device)
            load_weights_dict = {k: v for k, v in weights_dict.items()
                                 if model.state_dict()[k].numel() == v.numel()}
            print(model.load_state_dict(load_weights_dict, strict=False))
        else:
            raise FileNotFoundError("not found weights file: {}".format(opt.load_model_path))

    # step4: criterion and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    lr = opt.lr
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=opt.weight_decay)

    # step5: 统计指标，平滑处理后的损失以及混淆矩阵
    loss_meter = meter.AverageValueMeter()
    confusion_matrix = meter.ConfusionMeter(2)
    previous_loss = 1e100

    # train
    for epoch in range(opt.max_epoch):
        train_loader = DataLoader(dataset=train_data, batch_size=8, shuffle=True)
        loss_meter.reset()
        confusion_matrix.reset()

        for ii,(data,label) in enumerate(tqdm(train_loader)):
            input = data.to(device)
            target = label.to(device)
            optimizer.zero_grad()
            logits, aux_logits2, aux_logits1 = model(input)

            loss0 = criterion(logits, target)
            loss1 = criterion(aux_logits2, target)
            loss2 = criterion(aux_logits1, target)
            loss = loss0 + loss1 * 0.3 + loss2 * 0.3

            loss.backward()
            optimizer.step()

            loss_meter.add(loss.item())
            confusion_matrix.add(logits.detach(), target.detach())
            if(ii+1)%opt.print_freq==0:
                vis.plot("train-loss",loss_meter.value()[0])
        model.save()
        vis.log("epoch:{epoch},lr:{lr},loss:{loss},train_cm:{train_cm}".format(
            epoch=epoch, lr=lr, loss=loss_meter.value()[0], train_cm=str(confusion_matrix.value())))

        # update learning rate（如果损失不再下降，则降低学习率）
        if loss_meter.value()[0] > previous_loss:
            lr = lr * opt.lr_decay
            optimizer.param_groups[0]['lr'] = lr
        previous_loss = loss_meter.value()[0]

if __name__ == '__main__':
    for epoch in range(100):
        train()