Update README.md
Browse files
README.md
CHANGED
@@ -15,28 +15,22 @@ The following provides the code to implement the task of detecting personality f
|
|
15 |
|
16 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
17 |
import torch
|
18 |
-
model = AutoModelForSequenceClassification.from_pretrained("
|
19 |
tokenizer = AutoTokenizer.from_pretrained("KevSun/Personality_LM")
|
20 |
|
21 |
-
|
22 |
# Example new text input
|
23 |
#new_text = "I really enjoy working on complex problems and collaborating with others."
|
24 |
|
25 |
-
|
26 |
-
# Define the path to your text file
|
27 |
file_path = 'path/to/your/textfile.txt'
|
28 |
|
29 |
-
# Read the content of the file
|
30 |
with open(file_path, 'r', encoding='utf-8') as file:
|
31 |
new_text = file.read()
|
32 |
|
33 |
-
|
34 |
# Encode the text using the same tokenizer used during training
|
35 |
encoded_input = tokenizer(new_text, return_tensors='pt', padding=True, truncation=True, max_length=64)
|
36 |
|
37 |
-
|
38 |
# Move the model to the correct device (CPU in this case, or GPU if available)
|
39 |
-
model.eval() # Set the model to evaluation mode
|
40 |
|
41 |
# Perform the prediction
|
42 |
with torch.no_grad():
|
@@ -45,7 +39,6 @@ with torch.no_grad():
|
|
45 |
# Get the predictions (the output here depends on whether you are doing regression or classification)
|
46 |
predictions = outputs.logits.squeeze()
|
47 |
|
48 |
-
|
49 |
# Assuming the model is a regression model and outputs raw scores
|
50 |
predicted_scores = predictions.numpy() # Convert to numpy array if necessary
|
51 |
trait_names = ["Agreeableness", "Openness", "Conscientiousness", "Extraversion", "Neuroticism"]
|
|
|
15 |
|
16 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
17 |
import torch
|
18 |
+
model = AutoModelForSequenceClassification.from_pretrained("KevSun/Personality_LM")
|
19 |
tokenizer = AutoTokenizer.from_pretrained("KevSun/Personality_LM")
|
20 |
|
|
|
21 |
# Example new text input
|
22 |
#new_text = "I really enjoy working on complex problems and collaborating with others."
|
23 |
|
|
|
|
|
24 |
file_path = 'path/to/your/textfile.txt'
|
25 |
|
|
|
26 |
with open(file_path, 'r', encoding='utf-8') as file:
|
27 |
new_text = file.read()
|
28 |
|
|
|
29 |
# Encode the text using the same tokenizer used during training
|
30 |
encoded_input = tokenizer(new_text, return_tensors='pt', padding=True, truncation=True, max_length=64)
|
31 |
|
|
|
32 |
# Move the model to the correct device (CPU in this case, or GPU if available)
|
33 |
+
#model.eval() # Set the model to evaluation mode
|
34 |
|
35 |
# Perform the prediction
|
36 |
with torch.no_grad():
|
|
|
39 |
# Get the predictions (the output here depends on whether you are doing regression or classification)
|
40 |
predictions = outputs.logits.squeeze()
|
41 |
|
|
|
42 |
# Assuming the model is a regression model and outputs raw scores
|
43 |
predicted_scores = predictions.numpy() # Convert to numpy array if necessary
|
44 |
trait_names = ["Agreeableness", "Openness", "Conscientiousness", "Extraversion", "Neuroticism"]
|