File size: 1,249 Bytes
325ed74
 
 
 
 
5a06c94
d3c9937
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
---
language:
- en
metrics:
- f1
pipeline_tag: text-classification
---


# Initialize tokenizer and model
    tokenizer = BartTokenizer.from_pretrained('ihgn/paraphrase-detection')
    model = BartForConditionalGeneration.from_pretrained("ihgn/paraphrase-detection").to(device)
    source_sentence = "This was a series of nested angular standards , so that measurements in azimuth and elevation could be done directly in polar coordinates relative to the ecliptic."
    target_paraphrase = "This was a series of nested polar scales , so that measurements in azimuth and elevation could be performed directly in angular coordinates relative to the ecliptic"
  
    def paraphrase_detection(model, tokenizer, source_sentence, target_paraphrase):
      # Tokenize the input sentence
      inputs = tokenizer.encode_plus(source_sentence + ' <sep> ' + target_paraphrase, return_tensors='pt')
  
      # Classify the input using the model
      with torch.no_grad():
          outputs = model.generate(inputs['input_ids'].to(device))
  
      # Get the predicted label
      predicted_label = 1 if generated_text == '1' else 0
      print("Predicted Label:", predicted_label)
  
  paraphrase_detection(model, tokenizer, source_sentence, target_paraphrase)