#!/usr/bin/env python
# coding: utf-8

# In[1]:


from datasets import load_dataset
imdb = load_dataset("imdb")

# In[2]:


#从API中加载模型
from transformers import DistilBertTokenizer
tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")

from transformers import DistilBertForSequenceClassification
model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2,output_attentions = True)


# In[2]:


#加载保存中的模型
import os
output_directory = os.path.join("./model", "distilbert")
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification

tokenizer = DistilBertTokenizer.from_pretrained(output_directory)
model = DistilBertForSequenceClassification.from_pretrained(output_directory)


# In[3]:


def preprocess_function(examples):
    #print(examples["label"])
    return tokenizer(examples["text"], truncation=True)


# In[4]:


tokenized_imdb = imdb.map(preprocess_function, batched=True)


# In[5]:


from transformers import DataCollatorWithPadding
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)


# In[6]:


import numpy as np
from datasets import load_metric

metric = load_metric("./accuracy.py")


def compute_metrics(eval_pred):
    logits, labels = eval_pred
    predictions = np.argmax(logits[0], axis=-1)
    print(metric.compute(predictions=predictions, references=labels))
    return metric.compute(predictions=predictions, references=labels)


# In[7]:


def tokenize_function(examples):
    return tokenizer(examples["text"], padding="max_length",max_length=512,truncation=True)


tokenized_datasets = imdb.map(tokenize_function, batched=True)


# In[65]:


from transformers import TrainingArguments, Trainer

training_args = TrainingArguments(
    output_dir='./results',
    learning_rate=2e-5,
    per_device_train_batch_size=4,
    per_device_eval_batch_size=4,
    num_train_epochs=20,
    weight_decay=0.01,
    eval_accumulation_steps=10,
)

small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(500))
small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(500))

print(small_train_dataset["label"])
print(small_eval_dataset["label"])

#print(small_train_dataset["input_ids"])
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=small_train_dataset,
    #train_dataset=tokenized_imdb["train"],
    eval_dataset=small_eval_dataset,
    #eval_dataset=tokenized_imdb["test"],
    tokenizer=tokenizer,
    data_collator=data_collator,
    compute_metrics=compute_metrics,
)

trainer.train()


# In[66]:


trainer.evaluate()


# In[44]:





# In[19]:


import torch
import os
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


# In[20]:


from bertviz import model_view
from transformers import DistilBertModel, DistilBertTokenizer, utils


# In[21]:


def show_model_view(model, tokenizer, text):
    inputs = tokenizer.encode_plus(text, return_tensors='pt', add_special_tokens=True)
    input_ids = inputs['input_ids']
    attention = model(input_ids.to(device))[-1]
    input_id_list = input_ids[0].tolist() # Batch index 0
    tokens = tokenizer.convert_ids_to_tokens(input_id_list)
    model_view(attention, tokens)


# In[67]:


utils.logging.set_verbosity_error()  # Remove line to see warnings
text = imdb["train"]['text'][0][0:100]
show_model_view(model, tokenizer, text)


# In[35]:


from transformers import pipeline
classifier2 = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer, device=0)


# In[36]:


from transformers import pipeline
classifier = pipeline("sentiment-analysis")


# In[73]:





# In[74]:





# In[72]:





# In[ ]:




