# -*- coding: utf-8 -*-
import argparse
from fastNLP import FitlogCallback, RichCallback, prepare_torch_dataloader

from models.bart.generater import SequenceGeneratorModel

from fastNLP import SortedSampler
from models.bart.callbacks import WarmupCallback
from models.bart.sampler import ConstantTokenNumSampler, BucketSampler
from fastNLP import (
    TorchGradClipCallback as GradientClipCallback,
    cache_results,
    EarlyStopCallback,
)
from torch import optim
from pathes import *

# from models.bart.losses import Seq2SeqLoss
from models.bart.metrics import Seq2SeqSpanMetric
from fastNLP import Trainer
import fitlog
from models.bart.bart import BartSeq2SeqModel
from data.bart_pipe import BartNERPipe

# import warnings
import torch

# import os
import sys

sys.path.append("../")
# if 'p' in os.environ:
#     os.environ['CUDA_VISIBLE_DEVICES'] = os.environ['p']
#     # os.environ['CUDA_VISIBLE_DEVICES'] = '7'

print(torch.cuda.is_available())
print(torch.cuda.device_count())
# if torch.cuda.is_available():
#     os.environ["CUDA_VISIBLE_DEVICES"] = 6

# warnings.filterwarnings("ignore")

# fitlog.debug()
fitlog.set_log_dir("bartner/")

parser = argparse.ArgumentParser()
parser.add_argument("--dataset_name", default="conll2003", type=str)
parser.add_argument("--device", default="cuda:1", type=str)

args = parser.parse_args()
dataset_name = args.dataset_name
device = args.device

args.length_penalty = 1
args.save_model = 0

# word: 生成word的start; bpe: 生成所有的bpe; span: 每一段按照start end生成; span_bpe: 每一段都是start的所有bpe，end的所有bpe
args.target_type = "word"
args.bart_name = "facebook/bart-large"
args.schedule = "linear"
# args.decoder_type = "avg_feature"
args.decoder_type = "none"
args.n_epochs = 30
args.num_beams = 1
args.batch_size = 16
args.use_encoder_mlp = 1
args.lr = 1e-5
args.warmup_ratio = 0.01
eval_start_epoch = 15

# the following hyper-parameters are for target_type=word
if dataset_name == "conll2003":  # three runs get 93.18/93.18/93.36 F1
    args.batch_size = 32
    max_len, max_len_a = 10, 0.6
elif dataset_name == "en-ontonotes":  # three runs get 90.46/90.4/90/52 F1
    max_len, max_len_a = 10, 0.8
    args.batch_size = 16
    args.lr = 5e-6
elif dataset_name == "CADEC":
    max_len, max_len_a = 10, 1.6
    args.num_beams = 4
    args.lr = 2e-5
    args.n_epochs = 30
    eval_start_epoch = 10
elif dataset_name == "Share_2013":
    max_len, max_len_a = 10, 0.6
    args.use_encoder_mlp = 0
    args.num_beams = 4
    args.lr = 2e-5
    eval_start_epoch = 5
elif dataset_name == "Share_2014":
    max_len, max_len_a = 10, 0.6
    args.num_beams = 4
    eval_start_epoch = 5
    args.n_epochs = 30
elif dataset_name == "genia":  # three runs: 79.29/79.13/78.75
    max_len, max_len_a = 10, 0.5
    args.target_type = "span"
    args.lr = 2e-5
    args.warmup_ratio = 0.01
elif dataset_name == "en_ace04":  # four runs: 86.84/86.33/87/87.17
    max_len, max_len_a = 50, 1.1
    args.lr = 4e-5
elif dataset_name == "en_ace05":  # three runs: 85.39/84.54/84.75
    max_len, max_len_a = 50, 0.7
    args.lr = 3e-5
    args.batch_size = 12
    args.num_beams = 4
    args.warmup_ratio = 0.1
elif dataset_name in ("weibo", "msra"):
    args.bart_name = "fnlp/bart-base-chinese"
    max_len, max_len_a = 10, 0.7
    args.lr = 1e-6
    args.batch_size = 16
    args.num_beams = 1
    args.warmup_ratio = 0.1

save_model = args.save_model
del args.save_model
lr = args.lr
n_epochs = args.n_epochs
batch_size = args.batch_size
num_beams = args.num_beams

length_penalty = args.length_penalty
if isinstance(args.decoder_type, str) and args.decoder_type.lower() == "none":
    args.decoder_type = None
decoder_type = args.decoder_type
target_type = args.target_type
bart_name = args.bart_name
schedule = args.schedule
use_encoder_mlp = args.use_encoder_mlp

# print(args)
# fitlog.add_hyper(args)


for k, v in args.__dict__.items():
    try:
        fitlog.add_hyper(v, name=k)

    except Exception as e:
        print(e, k, v)


# hyper
# hyper

demo = False
if demo:
    cache_fn = f"caches/data_{bart_name}_{dataset_name}_{target_type}_demo.pt"
else:
    cache_fn = f"caches/data_{bart_name}_{dataset_name}_{target_type}.pt"


@cache_results(cache_fn, _refresh=False)
def get_data():
    pipe = BartNERPipe(
        tokenizer=bart_name, dataset_name=dataset_name, target_type=target_type
    )
    if dataset_name == "conll2003":
        data_bundle = pipe.process_from_file(conll03_paths, demo=demo)
    elif dataset_name == "en-ontonotes":
        data_bundle = pipe.process_from_file(en_ontonotes_paths, demo=demo)
    elif dataset_name == "weibo":
        data_bundle = pipe.process_from_file(weibo_ner_path)
    elif dataset_name == "msra":
        data_bundle = pipe.process_from_file(msra_paths)
    else:
        data_bundle = pipe.process_from_file(
            f"~/datasets/NER/{dataset_name}", demo=demo
        )
    return data_bundle, pipe.tokenizer, pipe.mapping2id


data_bundle, tokenizer, mapping2id = get_data()

train_data = data_bundle.get_dataset("train")


print(f"max_len_a:{max_len_a}, max_len:{max_len}")
print(data_bundle)
print("The number of tokens in tokenizer ", tokenizer.vocab_size)

# bos_token_id = 0
# eos_token_id = 1

bos_token_id = tokenizer.bos_token_id
eos_token_id = tokenizer.pad_token_id


label_ids = list(mapping2id.values())
model = BartSeq2SeqModel.build_model(
    bart_name,
    tokenizer,
    label_ids=label_ids,
    decoder_type=decoder_type,
    use_encoder_mlp=use_encoder_mlp,
)

vocab_size = len(tokenizer)
print(vocab_size, model.decoder.decoder.embed_tokens.weight.data.size(0))
model = SequenceGeneratorModel(
    model,
    bos_token_id=bos_token_id,
    eos_token_id=eos_token_id,
    max_length=max_len,
    max_len_a=max_len_a,
    num_beams=num_beams,
    do_sample=False,
    repetition_penalty=1,
    length_penalty=length_penalty,
    pad_token_id=eos_token_id,
    restricter=None,
)

parameters = []
params = {"lr": lr, "weight_decay": 1e-2}
params["params"] = [
    param
    for name, param in model.named_parameters()
    if not ("bart_encoder" in name or "bart_decoder" in name)
]
parameters.append(params)

params = {"lr": lr, "weight_decay": 1e-2}
params["params"] = []
for name, param in model.named_parameters():
    if ("bart_encoder" in name or "bart_decoder" in name) and not (
        "layernorm" in name or "layer_norm" in name
    ):
        params["params"].append(param)
parameters.append(params)

params = {"lr": lr, "weight_decay": 0}
params["params"] = []
for name, param in model.named_parameters():
    if ("bart_encoder" in name or "bart_decoder" in name) and (
        "layernorm" in name or "layer_norm" in name
    ):
        params["params"].append(param)
parameters.append(params)

optimizer = optim.AdamW(parameters)

callbacks = [RichCallback(print_every=30), EarlyStopCallback(monitor="f#f")]
callbacks.append(GradientClipCallback(clip_value=5, clip_type="value"))
callbacks.append(WarmupCallback(warmup=args.warmup_ratio, schedule=schedule))

# if dataset_name not in ("conll2003", "genia"):
# callbacks.append(
#     FitlogCallback(
#         data_bundle.get_dataset("test"),
#         raise_threshold=0.04,
#         eval_begin_epoch=eval_start_epoch,
#     )
# )  # 如果低于0.04大概率是讯飞了
# eval_dataset = data_bundle.get_dataset("valid")
if dataset_name == "genia":
    dev_indices = []
    tr_indices = []
    for i in range(len(data_bundle.get_dataset("train"))):
        if i % 4 == 0 and len(dev_indices) < 1669:
            dev_indices.append(i)
        else:
            tr_indices.append(i)
    eval_dataset = data_bundle.get_dataset("train")[dev_indices]
    data_bundle.set_dataset(data_bundle.get_dataset("train")[tr_indices], name="train")
    print(data_bundle)
    callbacks.append(
        FitlogCallback(
            data_bundle.get_dataset("test"),
            raise_threshold=0.04,
            eval_begin_epoch=eval_start_epoch,
        )
    )  # 如果低于0.04大概率是讯飞了
    fitlog.add_other(name="demo", value="split dev")
else:
    callbacks.append(FitlogCallback(monitor="f#f"))

sampler = None


train_dataset = data_bundle.get_dataset("train")

if dataset_name in ("Share_2013",):
    if target_type == "bpe":
        sampler = ConstantTokenNumSampler("src_seq_len", max_token=3500)
    else:
        sampler = ConstantTokenNumSampler("src_seq_len", max_token=4000)
if dataset_name in ("en_ace04",) and target_type == "bpe":
    sampler = ConstantTokenNumSampler(
        "src_seq_len", max_sentence=batch_size, max_token=2500
    )
elif "large" in bart_name and dataset_name in ("en-ontonotes", "genia"):
    sampler = ConstantTokenNumSampler(
        train_dataset.get_field("src_seq_len").content, max_token=3000
    )
else:
    sampler = BucketSampler(seq_len_field_name="src_seq_len")

metric = {
    "f": Seq2SeqSpanMetric(
        eos_token_id, num_labels=len(label_ids), target_type=target_type
    )
}


if save_model == 1:
    save_path = "save_models/"
else:
    save_path = None
validate_every = 100000

dataloaders = prepare_torch_dataloader(
    data_bundle, batch_size=batch_size, num_workers=4
)

trainer = Trainer(
    model=model,
    train_dataloader=dataloaders["train"],
    optimizers=optimizer,
    # device=[3, 4, 5],
    device=device,
    n_epochs=n_epochs,
    evaluate_dataloaders=dataloaders["dev"],
    callbacks=callbacks,
    metrics=metric,
    sampler=sampler,
)

trainer.run()
