import warnings
from transformers import BertTokenizer
from utils.remove_punc import pretreatment
from utils.config import ModelConfig
from lime.limebase import lasso_lime_base
import torch
import torch.nn.functional as F
from captum.attr import Lime, LimeBase
from captum._utils.models.linear_model import SkLearnLinearRegression, SkLearnLasso
from model.predict import bert_lstm_predict
from modules.embed import gen_emb
from model.Bert_LSTM import bert_lstm
warnings.filterwarnings("ignore", category=UserWarning)

config = ModelConfig()
net = bert_lstm(config.bert_path,
                config.hidden_dim,
                config.output_size,
                config.n_layers,
                config.bidirectional)
net.load_state_dict(torch.load(config.save_path))
net.cuda()
def model(inputs):
    emb = gen_emb(inputs,net)
    return emb
def forward_func(text):
    output,pred=bert_lstm_predict(text)
    return output

def exp_embedding_cosine_distance(original_inp, perturbed_inp, _, **kwargs):
    original_emb = model(original_inp)
    perturbed_emb = model(perturbed_inp)
    distance = 1 - F.cosine_similarity(original_emb, perturbed_emb, dim=1)
    return torch.exp(-1 * (distance ** 2) / 2)

def interp_to_input(interp_sample, original_input, **kwargs):
    return original_input[interp_sample.bool()].view(original_input.size(0), -1)

def bernoulli_perturb(text, **kwargs):
    probs = torch.ones_like(text) * 0.5
    return torch.bernoulli(probs).long()

lasso_lime_base = LimeBase(
    forward_func,
    interpretable_model=SkLearnLasso(alpha=0.08),
    similarity_func=exp_embedding_cosine_distance,
    perturb_func=bernoulli_perturb,
    perturb_interpretable_space=True,
    from_interp_rep_transform=interp_to_input,
    to_interp_rep_transform=None
)
def extract_attr(comments,tokenizer_id,label):
    attrs = lasso_lime_base.attribute(
        tokenizer_id,  # add batch dimension for Captum
        target=label,
        n_samples=1000,
        show_progress=True
    )
    attrs=attrs.squeeze(0)
    print(attrs)
    result = []
    result_comments="s"+comments[0]+"e"
    for token, attr in zip(result_comments, attrs.tolist()):
        if attr >0:
            result.append(token)
    return result

if __name__=="__main__":
    test_comments = ['业务水平高，服务质量好']
    result_comments = pretreatment(test_comments)  # 预处理去掉标点符号
    # 转换为字id
    tokenizer = BertTokenizer.from_pretrained(config.bert_path)
    result_comments_id = tokenizer(result_comments,
                                    padding=True,
                                    truncation=True,
                                    max_length=220,
                                    return_tensors='pt')
    tokenizer_id = result_comments_id['input_ids']
    _,label = bert_lstm_predict(tokenizer_id)
    print("=+++=",label)
    r = extract_attr(result_comments,tokenizer_id,label)
    print(r)
