owaiskha9654 commited on
Commit
d3da12f
β€’
1 Parent(s): 8059fbf
Personality_detection_Classification_Save/config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 12,
32
+ "num_hidden_layers": 12,
33
+ "pad_token_id": 0,
34
+ "position_embedding_type": "absolute",
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.26.0",
37
+ "type_vocab_size": 2,
38
+ "use_cache": true,
39
+ "vocab_size": 30522
40
+ }
Personality_detection_Classification_Save/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2580b51fac872643c2c937da3680f017edd396500861f17605e5b7463e020e7b
3
+ size 438017141
Personality_detection_Classification_Save/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
Personality_detection_Classification_Save/tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "name_or_path": "bert-base-uncased",
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "special_tokens_map_file": null,
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "BertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
Personality_detection_Classification_Save/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
  title: Big Five Personality Traits Detection
3
- emoji: πŸŒ–
4
  colorFrom: pink
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 3.16.2
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
  title: Big Five Personality Traits Detection
3
+ emoji: πŸŒ–πŸ‘€
4
  colorFrom: pink
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 3.1.4
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import XLNetForSequenceClassification, XLNetTokenizer,BertForSequenceClassification,BertTokenizer, RobertaForSequenceClassification,RobertaTokenizer
2
+ import torch
3
+ from typing import Dict
4
+ import gradio as gr
5
+
6
+
7
+ model = BertForSequenceClassification.from_pretrained("./Personality_detection_Classification_Save/", num_labels=5)#=num_labels)
8
+ tokenizer = BertTokenizer.from_pretrained('./Personality_detection_Classification_Save/', do_lower_case=True)
9
+ model.config.label2id= {
10
+ "Extroversion": 0,
11
+ "Neuroticism": 1,
12
+ "Agreeableness": 2,
13
+ "Conscientiousness": 3,
14
+ "Openness": 4,
15
+ }
16
+
17
+ model.config.id2label={
18
+ "0": "Extroversion",
19
+ "1": "Neuroticism",
20
+ "2": "Agreeableness",
21
+ "3": "Conscientiousness",
22
+ "4": "Openness",}
23
+
24
+ def Personality_Detection_from_reviews_submitted (model_input: str) -> Dict[str, float]:
25
+ # Encoding input data
26
+ dict_custom={}
27
+ Preprocess_part1=model_input[:len(model_input)]
28
+ Preprocess_part2=model_input[len(model_input):]
29
+ dict1=tokenizer.encode_plus(Preprocess_part1,max_length=1024,padding=True,truncation=True)
30
+ dict2=tokenizer.encode_plus(Preprocess_part2,max_length=1024,padding=True,truncation=True)
31
+ dict_custom['input_ids']=[dict1['input_ids'],dict1['input_ids']]
32
+ dict_custom['token_type_ids']=[dict1['token_type_ids'],dict1['token_type_ids']]
33
+ dict_custom['attention_mask']=[dict1['attention_mask'],dict1['attention_mask']]
34
+ outs = model(torch.tensor(dict_custom['input_ids']), token_type_ids=None, attention_mask=torch.tensor(dict_custom['attention_mask']))
35
+ b_logit_pred = outs[0]
36
+ pred_label = torch.sigmoid(b_logit_pred)
37
+ ret ={
38
+ "Extroversion": float(pred_label[0][0]),
39
+ "Neuroticism": float(pred_label[0][1]),
40
+ "Agreeableness": float(pred_label[0][2]),
41
+ "Conscientiousness": float(pred_label[0][3]),
42
+ "Openness": float(pred_label[0][4]),}
43
+ return ret
44
+ model_input = gr.Textbox("Input text here (Note: This model is trained to classify Essays(Still in Progress phase))", show_label=False)
45
+ model_output = gr.Label(" Big-Five personality traits Result", num_top_classes=6, show_label=True, label="Big-Five personality traits Labels assigned to this text")
46
+ examples = [
47
+ ( "Well, here we go with the stream of consciousness essay. I used to do things like this in high school sometimes.",
48
+ "They were pretty interesting, but I often find myself with a lack of things to say. ",
49
+ "I normally consider myself someone who gets straight to the point. I wonder if I should hit enter any time to send this back to the front",
50
+ "Maybe I'll fix it later. My friend is playing guitar in my room now. Sort of playing anyway.",
51
+ "More like messing with it. He's still learning. There's a drawing on the wall next to me. "
52
+ ),
53
+ ( "An open keyboard and buttons to push. The thing finally worked and I need not use periods, commas and all those thinks.",
54
+ "Double space after a period. We can't help it. I put spaces between my words and I do my happy little assignment of jibber-jabber.",
55
+ "Babble babble babble for 20 relaxing minutes and I feel silly and grammatically incorrect. I am linked to an unknown reader.",
56
+ "A graduate student with an absurd job. I type. I jabber and I think about dinoflagellates. About sunflower crosses and about ",
57
+ "the fiberglass that has be added to my lips via clove cigarettes and I think about things that I shouldn't be thinking.",
58
+ "I know I shouldn't be thinking. or writing let's say/ So I don't. Thoughts don't solidify. They lodge in the back. behind my tongue maybe.",
59
+ )
60
+ ]
61
+
62
+ title = "Big Five Personality Traits Detection From Reviews Submitted"
63
+ description = "The traditional machine learning models give a lot of pain when we do not have sufficient labeled data for the specific task or domain we care about to train a reliable model. Transfer learning allows us to deal with these scenarios by leveraging the already existing labeled data of some related task or domain. We try to store this knowledge gained in solving the source task in the source domain and apply it to our problem of interest. In this work, I have utilized Transfer Learning utilizing BERT BASE UNCASED model to fine tune on Big-Five Personality traits Dataset."
64
+ text1 = (
65
+ #"<center> Author: Owais Ahmad Data Scientist at <b> Thoucentric </b> <a href=\"https://www.linkedin.com/in/owaiskhan9654/\">Visit Profile</a> <br></center>"
66
+
67
+ # "<center> Model Trained GOOGLE COLAB Kernel <a href=\"https://colab.research.google.com/drive/1dbr9t9vxF8M80FAH9WoBFEdT0iGaXr1r?usp=sharing\">Link</a> <br></center>"
68
+
69
+ # "<center> Kaggle Profile <a href=\"https://www.kaggle.com/owaiskhan9654\">Link</a> <br> </center>"
70
+
71
+ "<center> HuggingFace Model Deployed Repository <a href=\"\"> Yet to be deployed</a> <br></center>"
72
+ )
73
+
74
+ app = gr.Interface(
75
+ Personality_Detection_from_reviews_submitted,
76
+ inputs=model_input,
77
+ outputs=model_output,
78
+ examples=examples,
79
+ title=title,
80
+ description=description,
81
+ article=text1,
82
+ allow_flagging='never',
83
+ analytics_enabled=False,
84
+ )
85
+
86
+
87
+ app.launch(inline=True,share=True, show_error=True)
requirements..txt ADDED
@@ -0,0 +1 @@
 
 
1
+ transformers