from xtools.config import parser
from xtools.utils import paddle_collate
from xtools.data import NerData, get_tokenizer
from xtools.evaluate import decode_bio, SpanF1
from paddlenlp.transformers import LinearDecayWithWarmup
from xtools.models import CRFTokenClassification, get_optimizer
from tqdm import tqdm
import paddle
from xtools.utils import seed_everything

args = parser.parse_args()
seed_everything(args.seed)
tokenizer = get_tokenizer(args.model_type)
data = NerData(args, tokenizer, paddle_collate)
dataloaders = data.get_dataloader()
train_data = dataloaders['train'][0]
dev_data = dataloaders['dev'][0]
args.n_class = data.n_class
model = CRFTokenClassification(args)

t_total = len(train_data) // args.accumulate_grad_batches * args.max_epochs
if args.warmup_steps != -1:
    warmup_steps = args.warmup_steps
else:
    warmup_steps = int(args.warmup_proportion * t_total)
scheduler = LinearDecayWithWarmup(args.lr, t_total, warmup_steps)
optimize = get_optimizer(model, scheduler, args)

progress_bar = tqdm(range(t_total))
global_step = 0
model.train()
metrics = SpanF1(id2label=data.id2label)
for epoch in range(args.max_epochs):
    for step, batch in enumerate(train_data):
        loss = model.train_step(batch)

        if (step + 1) % args.accumulate_grad_batches == 0 or step == len(train_data) - 1:
            optimize.step()
            scheduler.step()
            optimize.clear_grad()
            global_step += 1
            progress_bar.update(1)
            progress_bar.set_postfix(loss=loss.item())

        t, p = [], []
        if epoch >= args.eval_start and ((global_step + 1) % args.eval_step == 0
                                         or global_step == t_total):
            if epoch >= args.eval_start and ((global_step + 1) % args.eval_step == 0
                                             or global_step == t_total):
                f1 = model.evaluate(dev_data, metrics)
