raquiba's picture
Update README.md
c71f468
|
raw
history blame
874 Bytes
metadata
language:
  - bn
metrics:
  - code_eval
  - accuracy
  - bertscore

from transformers import AutoTokenizer, AutoModelForSequenceClassification irony_name = "raquiba/sarcasm-detection-BanglaSARC" tokenizer_irony = AutoTokenizer.from_pretrained(irony_name) model_irony = AutoModelForSequenceClassification.from_pretrained(irony_name) irony_pipeline = pipeline("sentiment-analysis", model=model_irony, tokenizer=tokenizer_irony, device=0,max_length=512, padding=True, truncation=True)

#Model Evaluation tokenizer = AutoTokenizer.from_pretrained(irony_name) df_train, df_test = tokenized_data(df_eval) model_irony = AutoModelForSequenceClassification.from_pretrained(irony_name, num_labels=2, ignore_mismatched_sizes=True).to(device) training_args = TrainingArguments("test-trainer-banglaBERT", {'reprocess_input_data': True}, evaluation_strategy="epoch") trainer_irony.train()