import torch
from googleNet import googleNetDIY
from class8dataset import CLASS_NAMES,cancer5000Dataset,getTrainTestDataset
import time
from pathlib import Path
import os
import platform
import numpy as np
from multiprocessing import cpu_count
import torchvision
from torch.utils.data import DataLoader
from typing import Tuple

# configuration
CONFIG_DEVICE:torch.device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')

DATA_ROOT_FOLDER='E:/JohnsonProj/Kather_texture_2016_image_tiles_5000'

WEIGHTS_SAVE_DIR:str='./weights5000-'

suffix:str=time.strftime('%m-%d+%H-%M-%S', time.localtime(time.time()))
WEIGHTS_SAVE_DIR+=suffix
if Path(WEIGHTS_SAVE_DIR).is_dir()==False:
    os.mkdir(WEIGHTS_SAVE_DIR)

CONFIG_NUM_WORKERS = 0 if platform.system()=='Windows' else min(max(cpu_count()-2,0),10)

BATCH_SIZE:np.int32=3

DEBUG_MODE=True

SAMPLES_PER_EPOCH=3

REFERENCE_MODEL_PATH='E:\JohnsonProj/vggClassification/weights5000-11-17+22-59-57/googLeNet_epoch0_10000.pth'

print('-----------configuration-----------')
print('Device:',CONFIG_DEVICE)
print('Workers number:',CONFIG_NUM_WORKERS)
print('Reference model: ',end='')
if REFERENCE_MODEL_PATH==None:
    print('x')
else:
    print('√')
print('-----------------------------------')

# neural networks
model=googleNetDIY(len(CLASS_NAMES),aux_logits=True,init_weights=True)
if REFERENCE_MODEL_PATH is not None:
    model.load_state_dict(\
        torch.load(REFERENCE_MODEL_PATH, map_location='cpu'))
    print('pre-trained model loading succeed√')
model=model.to(CONFIG_DEVICE)

# loss functions
CE_loss_func=torch.nn.CrossEntropyLoss().to(CONFIG_DEVICE)
optimizer=torch.optim.Adam(model.parameters())

# dataset & loader

means=(0.5843984484672546, 0.4723328649997711, 0.6498987674713135,)
stds=(0.267836341731424, 0.3274180198302301, 0.25589914959136034,)

train_set:cancer5000Dataset
val_set:cancer5000Dataset
train_set, val_set=getTrainTestDataset(DATA_ROOT_FOLDER,\
    raw_transform=torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(means,stds),
    ])
)

train_loader=DataLoader(train_set,BATCH_SIZE,shuffle=True,num_workers=CONFIG_NUM_WORKERS)

val_loader=DataLoader(val_set,BATCH_SIZE,shuffle=False,num_workers=CONFIG_NUM_WORKERS)
print('数据集:',len(val_loader.dataset))


def train_iteration(model:googleNetDIY, \
        optimizer:torch.optim.Adam, \
        raw_imgs:torch.Tensor, labels:torch.Tensor)->Tuple[float]:
    """
        完成一次梯度下降
        返回float(total, main, aux1, aux2)
    """
    if model.training==False:
        model.train()
    raw_imgs,labels=raw_imgs.to(CONFIG_DEVICE),labels.to(CONFIG_DEVICE)
    optimizer.zero_grad()

    # forward
    main_pred:torch.Tensor
    aux1_pred:torch.Tensor
    aux2_pred:torch.Tensor
    main_pred,aux1_pred,aux2_pred=model(raw_imgs)

    # calculate loss
    main_loss:torch.Tensor=CE_loss_func(main_pred,labels)
    aux1_loss:torch.Tensor=CE_loss_func(aux1_pred,labels)
    aux2_loss:torch.Tensor=CE_loss_func(aux2_pred,labels)

    total_loss:torch.Tensor=main_loss+0.3*aux1_loss+0.3*aux2_loss

    # backward & update
    total_loss.backward()
    optimizer.step()

    return (total_loss.item(),main_loss.item(),aux1_loss.item(),aux2_loss.item(),)

def validate(model:googleNetDIY, data_loader:DataLoader)->float:
    if model.training==True:
        model.eval()
    
    total_count=0
    hit_count=0
    with torch.no_grad():
        for i,(raw_imgs, labels) in enumerate(data_loader):
            raw_imgs:torch.Tensor
            labels:torch.Tensor
            raw_imgs,labels=raw_imgs.to(CONFIG_DEVICE),labels.to(CONFIG_DEVICE)

            probability:torch.Tensor=model(raw_imgs)
            pred:torch.Tensor=torch.argmax(probability,dim=-1)
            assert(pred.shape==labels.shape)
            hit_count+=torch.sum(pred==labels).item()
            total_count+=len(labels)
            if DEBUG_MODE:
                print('原始输出:',probability)
                print('预测:',pred)
                print('实际:',labels)
                break
    return hit_count/total_count

if __name__ == '__main__':
    model.train()
    modulus:int=int(np.ceil(len(train_loader)/SAMPLES_PER_EPOCH))


    for epoch in range(20):
        total_loss=0
        main_loss=0
        aux1_loss=0
        aux2_loss=0
        count_imgs=0
        print('-----------epoch {}-----------'.format(epoch))
        """
        train
        """
        print('<=========train========>')
        for i,(raw_imgs, labels) in enumerate(train_loader):
            raw_imgs:torch.Tensor
            labels:torch.Tensor
            # print(raw_imgs.shape,labels.shape)
            part_count_imgs=raw_imgs.shape[0]
            part_total_loss:torch.Tensor
            part_main_loss:torch.Tensor
            part_aux1_loss:torch.Tensor
            part_aux2_loss:torch.Tensor
            part_total_loss, part_main_loss, part_aux1_loss, part_aux2_loss\
                =train_iteration(model,optimizer,raw_imgs,labels)
            count_imgs+=part_count_imgs
            total_loss+=part_total_loss*part_count_imgs
            main_loss+=part_main_loss*part_count_imgs
            aux1_loss+=part_aux1_loss*part_count_imgs
            aux2_loss+=part_aux2_loss*part_count_imgs
            if i%modulus==0:
                print('progress:',i,' losses:',\
                    part_total_loss,part_main_loss,part_aux1_loss,part_aux2_loss,)
            if DEBUG_MODE:
                break
        total_loss/=count_imgs
        main_loss/=count_imgs
        aux1_loss/=count_imgs
        aux2_loss/=count_imgs
        print('training losses:',total_loss,main_loss,aux1_loss,aux2_loss)
        """
            validate
        """
        print('<==========validate=========>')

        accuracy=validate(model,val_loader)
        print('score:',accuracy)
        torch.save(model.state_dict(),\
            os.path.join(WEIGHTS_SAVE_DIR,'googLeNet_epoch{}_{:04d}.pth'.format(epoch,int(accuracy*10000))))
        if DEBUG_MODE:
            break
