from xtools.config import parser
from xtools.utils import default_collate
from xtools.data import NerData, get_tokenizer
from xtools.evaluate import decode_bio, SpanF1
from xtools.models import TokenClassification, get_optimizer
from transformers import get_linear_schedule_with_warmup
from tqdm import tqdm
import torch

args = parser.parse_args()
tokenizer = get_tokenizer(args.model_type)
data = NerData(args, tokenizer, default_collate)
dataloaders = data.get_dataloader()
train_data = dataloaders['train'][0]
dev_data = dataloaders['dev'][0]
args.n_class = data.n_class
model = TokenClassification(args)

t_total = len(train_data) // args.accumulate_grad_batches * args.max_epochs
if args.warmup_steps != -1:
    warmup_steps = args.warmup_steps
else:
    warmup_steps = int(args.warmup_proportion * t_total)
optimizer = get_optimizer(model, args.lr, args.weight_decay)

scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
                                            num_training_steps=t_total)

progress_bar = tqdm(range(t_total))
global_step = 0
model.train()
model.to(args.device)
metrics = SpanF1(id2label=data.id2label)

for epoch in range(args.max_epochs):
    for step, batch in enumerate(train_data):
        batch = {k: v.to(args.device) for k, v in batch.items()}
        loss = model.train_step(batch) / args.accumulate_grad_batches
        loss.backward()

        if (step + 1) % args.accumulate_grad_batches == 0 or step == len(train_data) - 1:
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
            global_step += 1
            progress_bar.update(1)
            progress_bar.set_postfix(loss=loss.item())

        if epoch >= args.eval_start and ((global_step + 1) % args.eval_step == 0
                                         or global_step == t_total):
            model.eval()
            for batch in dev_data:
                batch = {k: v.to(args.device) for k, v in batch.items()}
                with torch.no_grad():
                    out = model(**batch)
                    yt = batch['labels'].cpu().numpy()
                    yp = out.argmax(-1).cpu().numpy()
                    seq_len = batch['attention_mask'].sum(-1).cpu().numpy()
                    metrics.update(yt, yp, seq_len)

            f1 = metrics.accumulate()
            metrics.reset()
            print(f1)
