import load_save

load_checkpoint = 0
while True:
    try:
        load_checkpoint = int(input("Input checkpoint epoch:"))
        if load_save.has_checkpoint(load_checkpoint):
            break
        else:
            print("no such checkpoint")
            load_checkpoint = 0
    except:
        print("not valid integer.")
        load_checkpoint = 0
    finally:
        if load_checkpoint == 0 and input("Train without checkpoint?(y/n)") == 'y':
            print("initializing...")
            load_checkpoint = 0
            break

import datasets
from ClassifyModel import ClassifyModel
import configs, data

# repair REPOCARD_FILENAME
if not hasattr(datasets.config, 'REPOCARD_FILENAME'):
    datasets.config.REPOCARD_FILENAME = 'README.md'

import torch
import torch.nn as nn
import transformers
from transformers import BertModel

print("[train.py] ready")

# init model and optimizer
bert_embed_model: transformers.BertModel = BertModel.from_pretrained(configs.PRETRAINED_BERT_PATH)
model = ClassifyModel(
    bert_embed_model,
    configs.lstm_hidden_size,
    configs.lstm_n_layers,
    configs.stat_n_features,
    configs.fusion_n_out,
    configs.classifier_dropout,
    configs.out_n_classes
)
if load_checkpoint != 0:
    load_save.load_from_state(model, load_checkpoint)
    print("loaded from checkpoint", load_checkpoint)
optimizer = torch.optim.AdamW(model.parameters(), lr=configs.learning_rate)
criterion = nn.CrossEntropyLoss()
print("[data.py:AnsDataset::__init__] inited model")

num_epochs = configs.max_epoch

# train
dataset = data.load()
dataloader = data.getLoader(dataset, configs.batch_size)

epoch = 1
loss = None
# Training Data Collection: Loss Record
loss_rec = []
_count = 0
num_batches = len(dataset) // configs.batch_size
if len(dataset) % configs.batch_size != 0:
    num_batches += 1
print("Batches Num approximately =", num_batches)
for epoch in range(load_checkpoint + 1, num_epochs + 1):
    for batch in dataloader:

        # get data
        input_ids, attention_mask, stats, labels = batch[0], batch[1], batch[2], batch[3]

        # forward
        logits = model(input_ids, attention_mask, stats)
        loss = criterion(logits, labels)

        # record loss
        if _count % configs.loss_record_frequency == 0:
            loss_rec.append(loss.item())
            _count = 0
        _count += 1

        # backward
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if epoch % configs.save_frequency == 0:
        load_save.save_state(model, epoch)
        with open("loss_rec%d.txt" % epoch, "w") as f:
            f.write(str(loss_rec))
            f.close()

    print(f'Epoch [{epoch}/{num_epochs}], Loss: {loss.item()}')

if configs.save_after_train == 1:
    load_save.save_state(model, epoch)
elif configs.save_after_train == 2:
    load_save.save_all(model, epoch)

with open("loss_rec.txt", "w") as f:
    f.write(str(loss_rec))
