import argparse

import rocketqa

de_conf = {
    "model": "zh_dureader_de_v2",
    "use_cuda": False,
    "device_id": 0,
    "batch_size": 32,
}
dual_encoder = rocketqa.load_model(**de_conf)
del dual_encoder

ce_conf = {
    "model": "zh_dureader_ce_v2",
    "use_cuda": False,
    "device_id": 0,
    "batch_size": 32,
}
cross_encoder = rocketqa.load_model(**ce_conf)
del cross_encoder


""" 
import transformers

model_args = argparse.Namespace(
    do_mlm=None,
    pooler_type="cls",
    temp=0.05,
    mlp_only_train=False,
    init_embeddings_model=None,
    device="cpu",
)
transformers.AutoTokenizer.from_pretrained(
    "silk-road/luotuo-bert", trust_remote_code=True, model_args=model_args
)
transformers.AutoModel.from_pretrained("silk-road/luotuo-bert", trust_remote_code=True, model_args=model_args)

for model_name in [
    "GanymedeNil/text2vec-large-chinese",
    "THUDM/chatglm-6b-int4",
]:
    transformers.AutoTokenizer.from_pretrained(
        model_name, trust_remote_code=True
    )
    transformers.AutoModel.from_pretrained(model_name, trust_remote_code=True)

# download nltk data
import nltk

nltk.download("punkt")
nltk.download("averaged_perceptron_tagger")
nltk.download("cmudict")
"""