import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from configs import DEVICE, EPOCHS, LEARNING_RATE, TRAIN_BATCH_SIZE, IMG_SIZE, IMG_CHANNEL, PATCH_SIZE, EMB_DIM, HEAD_NUM, MLP_RATIO, DEPTH, CLASS_NUM, MOMENTUM, GAMMA, STEP_SIZE, SAVE_STEP, LEARNING_RATE_F, HEAD_DIM
from model import ViTPred, MyViTPred
from dataloader import prepare_train_data
import os
import math

def train_model(model,device,train_loader,optimizer,epoch):
    model.train()
    for batch_index,(data,label) in enumerate(train_loader):
        data, label = data.to(device), label.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = F.cross_entropy(output,label)
        loss.backward()
        optimizer.step()
        if batch_index % 3000 == 0:
            print('Epoch[{}/{}]],Loss: {:.4f}'.format(epoch,EPOCHS,loss))

def train():

    #model = ViTPred(img_size=IMG_SIZE, img_channel=IMG_CHANNEL, patch_size=PATCH_SIZE, emb_dim=EMB_DIM, batch_size=TRAIN_BATCH_SIZE, head_num=HEAD_NUM, mlp_ratio=MLP_RATIO, depth=DEPTH,class_num=CLASS_NUM).to(DEVICE)
    model = MyViTPred(img_size=IMG_SIZE, img_channel=IMG_CHANNEL, patch_size=PATCH_SIZE, emb_dim=EMB_DIM, batch_size=TRAIN_BATCH_SIZE, head_num=HEAD_NUM, mlp_ratio=MLP_RATIO, depth=DEPTH,class_num=CLASS_NUM,head_dim=HEAD_DIM).to(DEVICE)

    optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=MOMENTUM)
    lf = lambda x: ((1 + math.cos(x * math.pi / EPOCHS)) / 2) * (1 - LEARNING_RATE_F) + LEARNING_RATE_F  # cosine
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)

    train_loader = prepare_train_data()
    for epoch in range(1,EPOCHS + 1):
        train_model(model,DEVICE,train_loader,optimizer,epoch)
        if epoch % SAVE_STEP == 0 :
            if not os.path.exists('./ckpt/'):
                os.mkdir('./ckpt/')
            torch.save(model.state_dict(),'./ckpt/ViT'+str(epoch)+'.pth')
        scheduler.step()

train()