#encoding:utf-8

import json
import torch
from BertChineseTextClassificationPytorch.models.bert import Model
import torch.nn.functional as F
import numpy as np

filep = 'BertChineseTextClassificationPytorch/japanData/class.txt'
MODEL_PATH_OTHER = './BertChineseTextClassificationPytorch/multi/'
MODEL_PATH ='./BertChineseTextClassificationPytorch/saved_dict/bert.ckpt'

def get_label_dict():
    '''
    获取label转会为id 得dict
    '''
    with open(filep,'r',encoding='utf-8') as fr:
        id2label_context = {}
        i=0
        for line in fr:
           id2label_context[i]=line.strip()
           i+=1
        return id2label_context
def adaptor_embed(model, sign):
    if sign == 'value':
        return model.bert.embeddings.word_embeddings.weight.data
    elif sign == 'grad':
        return model.bert.embeddings.word_embeddings.weight.grad
    else:
        pass

def adaptor_model(text, model, tokenizer, device, label=None):
    _input = tokenizer(
                  text = text,
                  add_special_tokens=True,
                  return_tensors="pt"
             )
    _input.to(device=device)
   
    context = [_input['input_ids'],_input['token_type_ids'],_input['attention_mask']]
#     context 

#     print(**_input)
    if label is None:
        with torch.no_grad():
            outputs = model(context)

            logits = outputs[0]
            logits = logits.cpu().detach().numpy()
#             print(logits)
            label_id = np.argmax(logits)

#             print(len(label_id))

            return _input["input_ids"], [label_id]
    else:
        context.append(torch.tensor(label).to(device, dtype=torch.long))
#         _input["labels"] = torch.tensor(label).to(device, dtype=torch.long)
        label =  torch.tensor(label).to(device)
        outputs = model(context)
#         print(outputs.shape)
#         print(label.shape)
#         print(_input["labels"])
        loss = F.cross_entropy(outputs,label)
#         loss = outputs[0]

        return loss