Lum4yx commited on
Commit
2f0a5da
1 Parent(s): 0354728

Create EmCoBench.

Browse files
EmCo_Basic/basic_ground_truth.json ADDED
The diff for this file is too large to render. See raw diff
 
EmCo_Basic/get_scores.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ import argparse
4
+
5
+ def extract_scores_from_jsonl(file_path):
6
+ scores = []
7
+ happy = []
8
+ sad = []
9
+ angry = []
10
+ excit = []
11
+ score_pattern = re.compile(r"\{score: (\d+)/\d+\}")
12
+ with open(file_path, 'r') as f:
13
+ for line in f:
14
+ data = json.loads(line)
15
+ for path, score_str in data.items():
16
+ match = score_pattern.search(score_str)
17
+ if match:
18
+ correct, total = int(match[0].split(" ")[1].split("/")[0]), int(match[0].split(" ")[1].split("/")[1].replace("}", ""))
19
+ score_value = correct / total
20
+ scores.append(score_value)
21
+ else:
22
+ print(path)
23
+ if "Happy" in path:
24
+ happy.append(score_value)
25
+ elif "sad" in path:
26
+ sad.append(score_value)
27
+ elif "Angry" in path:
28
+ angry.append(score_value)
29
+ elif "excit" in path:
30
+ excit.append(score_value)
31
+ return scores, happy, angry, sad, excit
32
+
33
+ def calculate_average_score(scores):
34
+ if not scores:
35
+ return 0
36
+ return sum(scores) / len(scores)
37
+
38
+ def main(file_path):
39
+ scores, happy, angry, sad, excit = extract_scores_from_jsonl(file_path)
40
+ average_score = calculate_average_score(scores)
41
+ happy_score = calculate_average_score(happy)
42
+ angry_score = calculate_average_score(angry)
43
+ sad_score = calculate_average_score(sad)
44
+ excit_score = calculate_average_score(excit)
45
+
46
+ # print(f"Scores: {scores}")
47
+ print(f"happy Score: {happy_score:.5f}")
48
+ print(f"angry Score: {angry_score:.5f}")
49
+ print(f"sad Score: {sad_score:.5f}")
50
+ print(f"excit Score: {excit_score:.5f}")
51
+ print(f"average Score: {average_score:.5f}")
52
+
53
+
54
+ if __name__ == "__main__":
55
+ parser = argparse.ArgumentParser(description="Extract and calculate scores from JSONL file.")
56
+ parser.add_argument("--file-path", type=str, help="Path to the JSONL file.")
57
+ args = parser.parse_args()
58
+ main(args.file_path)
EmCo_Basic/gpt-eval.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ from tqdm import tqdm
4
+ from openai import OpenAI
5
+ import time
6
+
7
+
8
+ NUM_SECONDS_TO_SLEEP = 0.1
9
+
10
+ def write_to_json(obj_dict, json_file_path):
11
+ with open(json_file_path, 'a') as json_file:
12
+ json_file.write(json.dumps(obj_dict) + '\n')
13
+
14
+ def ask_chatgpt(prompt, model="gpt-4", temperature=0.1, max_tokens=512):
15
+ client = OpenAI(api_key="YOUR_API_KEY")
16
+ while True:
17
+ try:
18
+ response = client.chat.completions.create(
19
+ model=model,
20
+ messages=[{
21
+ 'role': 'system',
22
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer. Only say the content user wanted.'
23
+ }, {
24
+ 'role': 'user',
25
+ 'content': prompt,
26
+ }],
27
+ temperature=temperature, # TODO: figure out which temperature is best
28
+ max_tokens=max_tokens,
29
+ )
30
+ if "{score" not in response.choices[0].message.content:
31
+ raise
32
+ break
33
+ except Exception as e:
34
+ print(e)
35
+ time.sleep(0.1)
36
+ return response.choices[0].message.content
37
+
38
+
39
+ def main(ec_data_file, gt_file, output_file):
40
+ with open(ec_data_file, 'r') as f:
41
+ ec_data = []
42
+ for line in f:
43
+ ec_data.append(json.loads(line))
44
+
45
+ with open(gt_file, 'r') as f:
46
+ gt = json.load(f)
47
+
48
+ for data in tqdm(ec_data):
49
+ for img_path, data_input in data.items():
50
+ content = "Your task is to assess a record aimed at comprehension an emotion and compare it against the truth label. Determine the number of potential triggers identified correctly versus those missed. Please provide your assessment in the format: {score: correct/total}, e.g. {score: 2/5} for 2 correct and 5 in total from Ground Truth. And include an explanation of your assessment."
51
+
52
+ content = content + f" The record is below:\n\nRecord of comprehension:\n{data_input}. Here is the ground truth label:\n\nGround Truth:\n{gt[img_path]}\n\nYour Assessment:"
53
+
54
+ output = ask_chatgpt(prompt=content, model="gpt-3.5-turbo")
55
+ write_to_json({f"{img_path}": output}, output_file)
56
+
57
+
58
+ if __name__ == "__main__":
59
+ parser = argparse.ArgumentParser(description="Process Emotion comprehension Records.")
60
+ parser.add_argument("--ec-data-file", type=str, help="Path to emotion comprehension data file (JSONL).")
61
+ parser.add_argument("--gt-file", type=str, help="Path to ground truth data file (JSON).")
62
+ parser.add_argument("--output-file", type=str, help="Path to output JSONL file.")
63
+ args = parser.parse_args()
64
+
65
+ main(args.ec_data_file, args.gt_file, args.output_file)
EmCo_Basic/llama3-eval.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+ import json
5
+ from tqdm import tqdm
6
+
7
+ def write_to_json(obj_dict, json_file_path):
8
+ with open(json_file_path, 'a') as json_file:
9
+ json_file.write(json.dumps(obj_dict) + '\n')
10
+
11
+ def main(ec_data_file, gt_file, output_file, model_id):
12
+ with open(ec_data_file, 'r') as f:
13
+ ec_data = []
14
+ for line in f:
15
+ ec_data.append(json.loads(line))
16
+
17
+ with open(gt_file, 'r') as f:
18
+ gt = json.load(f)
19
+
20
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
21
+ model = AutoModelForCausalLM.from_pretrained(
22
+ model_id,
23
+ torch_dtype=torch.bfloat16,
24
+ device_map="auto",
25
+ )
26
+
27
+ for data in tqdm(ec_data):
28
+ for img_path, data_input in data.items():
29
+ if img_path in gt:
30
+ i = 0
31
+ while True:
32
+ content = "Your task is to assess a record aimed at comprehend an emotion and compare it against the truth label. Determine the number of potential triggers identified correctly versus those missed. Please provide your assessment in the format: {score: correct/total}, e.g. {score: 2/5} for 2 correct and 5 in total. And include an explanation of your assessment."
33
+
34
+ content = content + f" The record is below:\n\nRecord of comprehension:\n{data_input}. Here is the ground truth label:\n\nGround Truth:\n{gt[img_path]}\n\nYour Assessment:"
35
+
36
+ messages = [
37
+ {"role": "system", "content": "You are a helpful and precise assistant for checking the quality of the answer. Only say the content user wanted."},
38
+ {"role": "user", "content": content},
39
+ ]
40
+
41
+ input_ids = tokenizer.apply_chat_template(
42
+ messages,
43
+ add_generation_prompt=True,
44
+ return_tensors="pt"
45
+ ).to(model.device)
46
+
47
+ terminators = [
48
+ tokenizer.eos_token_id,
49
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
50
+ ]
51
+
52
+ outputs = model.generate(
53
+ input_ids,
54
+ max_new_tokens=512,
55
+ eos_token_id=terminators,
56
+ do_sample=True,
57
+ temperature=0.1,
58
+ top_p=0.9,
59
+ )
60
+
61
+ response = outputs[0][input_ids.shape[-1]:]
62
+ output = tokenizer.decode(response, skip_special_tokens=True)
63
+ if "{score: " in output.lower():
64
+ break
65
+ if i == 4:
66
+ output = "{score: 0/1}"
67
+ break
68
+ i += 1
69
+ write_to_json({f"{img_path}": output}, output_file)
70
+
71
+
72
+ if __name__ == "__main__":
73
+ parser = argparse.ArgumentParser(description="Process Emotional comprehension Records.")
74
+ parser.add_argument("--ec-data-file", type=str, help="Path to emotional comprehension data file (JSONL).")
75
+ parser.add_argument("--gt-file", type=str, help="Path to ground truth data file (JSON).")
76
+ parser.add_argument("--output-file", type=str, help="Path to output JSONL file.")
77
+ parser.add_argument("--model-id", type=str, help="Path to the pretrained model.")
78
+ args = parser.parse_args()
79
+
80
+ main(args.ec_data_file, args.gt_file, args.output_file, args.model_id)
EmCo_Basic/long_term_scores.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spacy
2
+ from transformers import BertModel, BertTokenizer
3
+ import torch
4
+ import numpy as np
5
+ from sklearn.metrics.pairwise import cosine_similarity
6
+ import json
7
+ from tqdm import tqdm
8
+
9
+ nlp = spacy.load("en_core_web_sm")
10
+
11
+
12
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
13
+ model = BertModel.from_pretrained('bert-base-uncased').to("cuda")
14
+
15
+ def get_sentence_embeddings(sentences):
16
+ inputs = tokenizer(sentences, return_tensors='pt', padding=True, truncation=True).to("cuda")
17
+ with torch.no_grad():
18
+ outputs = model(**inputs)
19
+ embeddings = outputs.last_hidden_state[:, 0, :].cpu().numpy()
20
+ return embeddings
21
+
22
+ def calculate_coherence(text):
23
+ doc = nlp(text)
24
+ sentences = [sent.text for sent in doc.sents]
25
+ if len(sentences) < 2:
26
+ return None
27
+ embeddings = get_sentence_embeddings(sentences)
28
+
29
+ similarities = []
30
+ for i in range(len(embeddings) - 1):
31
+ sim = cosine_similarity([embeddings[i]], [embeddings[i+1]])[0][0]
32
+ similarities.append(sim)
33
+ if not similarities:
34
+ return None
35
+ coherence_score = np.mean(similarities)
36
+ return coherence_score
37
+
38
+
39
+ with open("path/to/ec_data.jsonl", 'r') as f:
40
+ ec_data_jsons = []
41
+ for line in f:
42
+ ec_data_jsons.append(json.loads(line))
43
+
44
+
45
+ coherence_scores = []
46
+
47
+ for ec_data_json in tqdm(ec_data_jsons):
48
+ for img_path, data_input in ec_data_json.items():
49
+ coherence_score = calculate_coherence(data_input)
50
+ if coherence_score:
51
+ coherence_scores.append(coherence_score)
52
+
53
+ print(sum(coherence_scores)/len(coherence_scores))
EmCo_Basic/user.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
EmCo_Complex/ec_complex.jsonl ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"EmoSet-118k/image/anger/anger_00189.jpg": {"What factors might be contributing to the anger of the man in the image?": "1. Staring with wide opening eyes.\n2. Fully aggressive vibe.\n3. Metal claws.\n4. Defending gesture.\n5. Shrunk muscle."}}
2
+ {"EmoSet-118k/image/anger/anger_00189.jpg": {"What might have caused the man to be happy?": "1. Staring with wide opening eyes.\n2. Metal claws.\n3. Shrunk muscle.\n4. Gesture mimicking superhero.\n5. Pleasure and excitement of receiving new toys."}}
3
+ {"EmoSet-118k/image/anger/anger_00545.jpg": {"What do you think might have caused the anger or disappointment of the man raising his fist in the image?": "1. Wearing a T-shirt with signs.\n2. Raised fist.\n3. A protest scene.\n4. Serious look on the face.\n5. Disappointed and worried due to the unknown social crises."}}
4
+ {"EmoSet-118k/image/anger/anger_00545.jpg": {"What might be leading to the information of rouse of the man in the image?": "1. Raised fist.\n2. Standing straightly.\n3. Wide opening eyes.\n4. Posing in front of others.\n5. Community support."}}
5
+ {"EmoSet-118k/image/anger/anger_00562.jpg": {"What might have caused the guy in blue to be angry?": "1. Got hit by the other guy.\n2. Boxing competition.\n3. Maybe pissed by the person boxing with him.\n4. Annoyance with the situation of losing the score."}}
6
+ {"EmoSet-118k/image/anger/anger_00562.jpg": {"What do you think is the cause of the excitement of the man in the image?": "1. Boxing competition.\n2. Muscles intense.\n3. Large body movement.\n4. Maybe about to win.\n5. Sense of fulfillment on stage."}}
7
+ {"EmoSet-118k/image/anger/anger_01009.jpg": {"What factors might be contributing to the anger or displeasure of the lady standing on the right?": "1. Protesting with others.\n2. Holding the board down instead of high up.\n3. Helpless look on face.\n4. Annoyed by possible ignorance by the government."}}
8
+ {"EmoSet-118k/image/anger/anger_01009.jpg": {"What might be the cause of the happiness of the lady standing on the right in the image?": "1. Slight smile on her face.\n2. Holding the board down instead of high up.\n3. Standing without aggressive gestures.\n 4. Happy and satisfied with the possible reply or result."}}
9
+ {"EmoSet-118k/image/contentment/contentment_00110.jpg": {"What led to the information of the happiness of the kid in the image?": "1. Smile on the face.\n2. Sense of accompany.\n3. Being with someone famous or with great ability.\n4. Outdoors with bright sunlight.\n5. Showing his own work to another."}}
10
+ {"EmoSet-118k/image/contentment/contentment_00110.jpg": {"From what aspects can infer to the fear in the kid's face in the image?": "1. Fear of the stranger in front.\n2. Forced smile on his face.\n3. Showing his own work to another.\n4. Nervous or worried about the judgment."}}
11
+ {"EmoSet-118k/image/contentment/contentment_00110.jpg": {"Why does the man feel happy?": "1. A child listen to him.\n2. Well-dressed and working outdoor.\n3. Smiling in the boy face."}}
12
+ {"EmoSet-118k/image/contentment/contentment_00110.jpg": {"Why does the man feel sad?": "1. Work pressure.\n2. Well-dressed and working outdoor.\n3. The only person willing to listen is a boy."}}
13
+ {"EmoSet-118k/image/contentment/contentment_02783.jpg": {"What might have caused the happiness of the woman in the image?": "1. Scene of marriage.\n2. Well-dressed.\n3. Standing right next to the groom.\n4. Beautiful flowers in hand.\n5. Bright sunlight."}}
14
+ {"EmoSet-118k/image/contentment/contentment_02783.jpg": {"What led to the worries of the woman in the image?": "1. Scene of marriage.\n2. Frowning slightly.\n3. Hand reaching out for something.\n4. Maybe worried about the uncertain future or outcomes."}}
15
+ {"EmoSet-118k/image/contentment/contentment_02783.jpg": {"What led to the angry of the man in the image?": "1. Scene of marriage.\n2. Frowning slightly.\n3. Unhappy with the situation."}}
16
+ {"EmoSet-118k/image/contentment/contentment_02886.jpg": {"What do you think might have caused the woman in the image to be happy?": "1. Outdoors scene with bright sunlight.\n2. Rare opportunity to have friends gathered together.\n3. Getting closer to nature by touching the grass.\n4. Delightful conversation with each other.\n5. A glass of drink in her hand."}}
17
+ {"EmoSet-118k/image/contentment/contentment_02886.jpg": {"Why does the woman in the image seem unhappy?": "1. Frowning sightly.\n2. Having tension with friends at the moment.\n3. Sitting in a defending way.\n4. Holding instead of drinking the glass of drink in her hand."}}
18
+ {"EmoSet-118k/image/contentment/contentment_03306.jpg": {"What might have caused the man in the image to be enjoyed and relaxed?": "1. Deck chair on the beach.\n2. Resting under the shade of the unseen umbrella.\n3. Lying in a relaxed way or sleeping.\n4. A glass of drink in his hand.\n5. Maybe happy on his vacation."}}
19
+ {"EmoSet-118k/image/contentment/contentment_03306.jpg": {"What led to the sadness of the man in the image?": "1. Having a sad face.\n2. Unfinished drink in hand.\n3. Resting on a deck chair alone.\n4. Maybe heartbroken due to some reason during vacation."}}
20
+ {"EmoSet-118k/image/sadness/sadness_01979.jpg": {"Why does the man in the image seem to be upset?": "1. Sad look on his face.\n2. Being in the crowd.\n3. Turning head back.\n4. Maybe leaving an important place to him.\n5. Reluctant of leaving something he is staring at."}}
21
+ {"EmoSet-118k/image/sadness/sadness_01979.jpg": {"What might have caused the happiness of the man in the image?": "1. Smile on his face.\n2. Being in the crowd.\n3. Joy of returning home or leaving an unpleasant place."}}
22
+ {"EmoSet-118k/image/sadness/sadness_03640.jpg": {"What might have caused the information of sadness of the image?": "1. Sitting alone.\n2. Holding a skateboard.\n3. Depressing atmosphere of the scene.\n4. Depressed or upset due to the lack of company.\n5. Sitting instead of skating."}}
23
+ {"EmoSet-118k/image/sadness/sadness_03640.jpg": {"What factors might be contributing to the kid in the image being excited?": "1. Holding a skateboard.\n2. Sitting alone.\n3. Looking forward to his friends' arrival.\n4. Maybe excited because of the coming activity such as a competition.\n5. Maybe resting from a tense skating game."}}
24
+ {"EmoSet-118k/image/sadness/sadness_03910.jpg": {"What is the cause of the man in the image being sad?": "1. Crying.\n2. Indoors or at night.\n3. Standing next to another person.\n4. Being comforted by another.\n5. Being bullied by another."}}
25
+ {"EmoSet-118k/image/sadness/sadness_03910.jpg": {"Why does the man in the image seem happy?": "1. Crying.\n2. Standing next to another person.\n3. Sense of reunion.\n4. Maybe happy related to the meeting with an old friend."}}
26
+ {"EmoSet-118k/image/sadness/sadness_03832.jpg": {"What may caused the little girl upset?": "1. Crying.\n2. Can not making handiwork.\n3. The woman blamed her."}}
27
+ {"EmoSet-118k/image/sadness/sadness_03832.jpg": {"What may caused the little girl happy?": "1. Crying but the women comfort her.\n2. Can not making handiwork.\n3. Woman help her finishing the work."}}
28
+ {"EmoSet-118k/image/sadness/sadness_03832.jpg": {"What may cause the woman angry?": "1. The girl is not obedient\n2. The girl can't do handiwork\n3. The girl can't learn no matter how much taught\n4. Step-by-step instruction."}}
29
+ {"EmoSet-118k/image/excitement/excitement_01785.jpg": {"Why does this man in the picture look exhausted and annoyed?": "1. Maybe lack of Sleep.\n2. Closed-eyes.\n3. Taking care of a young child.\n4. Tired of the child.\n5. Naughty child."}}
30
+ {"EmoSet-118k/image/excitement/excitement_01785.jpg": {"Why does this man being enjoyment and pleasure?": "1. Enjoying spending time with his child.\n2. Child lying in arms.\n3. Satisfied with the moment.\n4. Sense of company of family.\n5. Engaging in playful activities."}}
31
+ {"EmoSet-118k/image/excitement/excitement_08734.jpg": {"What factors might be contributing to the anger or the sadness of the teenager in the middle of the image?": "1. A medium, not happy look on his face.\n2. Maybe the event is beyond his will.\n3. Don't like boating.\n4. Physical discomfort or fear of the water.\n5. Being yell by his parent.\n6. Boredom."}}
32
+ {"EmoSet-118k/image/excitement/excitement_08734.jpg": {"Why does the child in the middle show a smile?": "1. Enjoying the adventure of the rafting trip.\n2. Being with family.\n3. Thrill of the rapids.\n4. Positive and joyful atmosphere."}}
33
+ {"EmoSet-118k/image/excitement/excitement_00150.jpg": {"Why does the woman look scared?": "1. Inexperienced in horse riding.\n2. Horse unpredictable behavior.\n3. Horse move fast.\n4. Loss of Control."}}
34
+ {"EmoSet-118k/image/excitement/excitement_00150.jpg": {"Why is the woman excited?": "1. Enjoying the experience.\n2. Thrill of riding a horse.\n3. A sense of accomplishment to manage the ride.\n4. Try something new."}}
35
+ {"EmoSet-118k/image/excitement/excitement_10520.jpg": {"What led to the information of happiness in the expression of the kid's face in the image?": "1. Holding a bitten fruit.\n2. Maybe play with someone in front of him.\n3. Maybe happy and satisfied eating the fruit."}}
36
+ {"EmoSet-118k/image/excitement/excitement_10520.jpg": {"What might have caused the sadness of the bay in the image?": "1. Slightly frown.\n2. Tears in the eyes.\n3. Staring at something.\n4. Holding a bitten fruit.\n5. Maybe stimulated by the sour taste of the fruit."}}
37
+ {"EmoSet-118k/image/excitement/excitement_03302.jpg": {"What might have caused the anger of the kid in the image?": "1. Frowning with an angry face.\n2. Index finger pointing towards something outside of the picture.\n3. Held by a lady.\n4. Maybe unpleasant because he can't get what he wants.\n5. Maybe unhappy due to the ignorance of the lady."}}
38
+ {"EmoSet-118k/image/excitement/excitement_03302.jpg": {"Why does the kid in the image look excited?": "1. Outdoors with bright sunlight.\n2. Colorful scene.\n3. Index finger pointing towards something outside of the picture.\n4. Maybe attracted by something interesting or exciting.\n5. Maybe roused being in some kind of event."}}
39
+ {"EmoSet-118k/image/excitement/excitement_11293.jpg": {"What do you think might have caused the kid in the background of the image to be confused?": "1. Head turning back.\n2. Two others acting abnormally.\n3. Two others each holding a stick of corn.\n4. Maybe curious about the event.\n5. Maybe wondering about the motivation for the abnormality."}}
40
+ {"EmoSet-118k/image/excitement/excitement_11293.jpg": {"Why does the kid in the background seem excited?": "1. Head turning back.\n2. Starring at the two playing with each other on the focus.\n3. Sense of motion from the event.\n4. Maybe excited about the desire to join them."}}
41
+ {"EmoSet-118k/image/excitement/excitement_11350.jpg": {"What might have caused the kid on the left of the image to be frightened?": "1. Outdoors with grass and trees.\n2. Playing swings.\n3. Staring at another kid.\n4. Defending pose.\n5. Maybe he gets scared because another kid is about to crush on him."}}
42
+ {"EmoSet-118k/image/excitement/excitement_11350.jpg": {"What led to the excitement and happiness of the kid on the left side of the image?": "1. Outdoors with grass and trees.\n2. Playing swings.\n3. Smile on his face.\n4. Maybe happy and excited enjoying the event."}}
43
+ {"EmoSet-118k/image/excitement/excitement_09472.jpg": {"Why does the little girl on the right in the image seem to be upset?": "1. A forced smile on her face.\n2. Notes stuck on her face.\n3. Another showing an exaggerated face.\n4. Maybe sad because she is bullied.\n5. Maybe displeased for others laughing at her."}}
44
+ {"EmoSet-118k/image/excitement/excitement_09472.jpg": {"What might have caused the happiness of the girl on the right side of the image?": "1. Smile on his face.\n2. Another showing an exaggerated face.\n3. Scene in the back of a car.\n4. Notes stuck on her face.\n5. Maybe excited about on a trip.\n6. Maybe happy having fun with others."}}
45
+ {"EmoSet-118k/image/excitement/excitement_10860.jpg": {"Why does the girl on the right in the image seem to be sad?": "1. A medium or sad look on her face.\n2. Hands protecting her hat.\n3. Others all seem to be facing someone.\n4. Vacant-looking eyes.\n5. Maybe sad that her hat is falling off.\n6. Maybe upset about the absence of someone she looks forward to."}}
46
+ {"EmoSet-118k/image/excitement/excitement_10860.jpg": {"What factors might be contributing to the kid on the right side of the image being excited?": "1. Decorative hat worn.\n2. Being on her tiptoe.\n3. Sense of motion.\n4. Maybe excited because of the event."}}
47
+ {"EmoSet-118k/image/fear/fear_04441.jpg": {"Why is this woman very scared?": "1. chainsaw.\n2. The man has a mask.\n3. The man chasing at her.\n4. Maybe Halloween or horror-themed event.\n5. menacing appearance."}}
48
+ {"EmoSet-118k/image/fear/fear_04441.jpg": {"Why is this woman showing excitement?": "1. Chainsaw.\n2. Playing with the man.\n3. Maybe Halloween or horror-themed event.\n4. menacing appearance."}}
49
+ {"EmoSet-118k/image/fear/fear_02814.jpg": {"Why does the baby show the fear expression?": "1. The man's scary outfit.\n2. Afraid of the man.\n3. The man's makeup.\n4. Covering mouth with hand."}}
50
+ {"EmoSet-118k/image/fear/fear_02814.jpg": {"What make the baby surprise and happy?": "1. Shocking face and gesture.\n2. Staring at someone.\n3. Sense of unbelievable.\n4. A man colored in silver on the focus.\n5. Maybe shocked to see something abnormal."}}
EmCo_Complex/get_scores_complex.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ import argparse
4
+
5
+ def extract_scores_from_jsonl(file_path):
6
+ scores = []
7
+
8
+ score_pattern = re.compile(r"\{score: (\d+)/\d+\}")
9
+ with open(file_path, 'r') as f:
10
+ for line in f:
11
+ data = json.loads(line)
12
+ for path, score_str in data.items():
13
+ match = score_pattern.search(score_str)
14
+ if match:
15
+ correct, total = int(match[0].split(" ")[1].split("/")[0]), int(match[0].split(" ")[1].split("/")[1].replace("}", ""))
16
+ score_value = correct / total
17
+ scores.append(score_value)
18
+ else:
19
+ print(path)
20
+ return scores
21
+
22
+ def calculate_average_score(scores):
23
+ if not scores:
24
+ return 0
25
+ return sum(scores) / len(scores)
26
+
27
+ def main(file_path):
28
+ scores = extract_scores_from_jsonl(file_path)
29
+ average_score = calculate_average_score(scores)
30
+ print(f"average Score: {average_score:.5f}")
31
+
32
+
33
+ if __name__ == "__main__":
34
+ parser = argparse.ArgumentParser(description="Extract and calculate scores from JSONL file.")
35
+ parser.add_argument("--file-path", type=str, help="Path to the JSONL file.")
36
+ args = parser.parse_args()
37
+ main(args.file_path)
EmCo_Complex/gpt-eval-complex.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ from tqdm import tqdm
4
+ from openai import OpenAI
5
+ import time
6
+
7
+
8
+ NUM_SECONDS_TO_SLEEP = 0.1
9
+
10
+ def write_to_json(obj_dict, json_file_path):
11
+ with open(json_file_path, 'a') as json_file:
12
+ json_file.write(json.dumps(obj_dict) + '\n')
13
+
14
+ def ask_chatgpt(prompt, model="gpt-4", temperature=0.1, max_tokens=512):
15
+ client = OpenAI(api_key="YOUR_API_KEY")
16
+ while True:
17
+ try:
18
+ response = client.chat.completions.create(
19
+ model=model,
20
+ messages=[{
21
+ 'role': 'system',
22
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer. Only say the content user wanted.'
23
+ }, {
24
+ 'role': 'user',
25
+ 'content': prompt,
26
+ }],
27
+ temperature=temperature, # TODO: figure out which temperature is best
28
+ max_tokens=max_tokens,
29
+ )
30
+ if "{score" not in response.choices[0].message.content:
31
+ raise
32
+ break
33
+ except Exception as e:
34
+ print(e)
35
+ time.sleep(0.1)
36
+ return response.choices[0].message.content
37
+
38
+
39
+ def main(ec_data_file, gt_file, output_file):
40
+ with open(ec_data_file, 'r') as f:
41
+ ec_data = []
42
+ for line in f:
43
+ ec_data.append(json.loads(line))
44
+
45
+ with open(args.gt_file) as f:
46
+ gt = []
47
+ for line in f:
48
+ gt.append(json.loads(line))
49
+
50
+ for i in tqdm(range(len(ec_data))):
51
+ for img_path1, data_input in ec_data[i].items():
52
+ content = "Your task is to assess a record aimed at comprehension an emotion and compare it against the truth label. Determine the number of potential triggers identified correctly versus those missed. Please provide your assessment in the format: {score: correct/total}, e.g. {score: 2/5} for 2 correct and 5 in total from Ground Truth. And include an explanation of your assessment."
53
+ for _, gt_json in gt[i].items():
54
+ for _, label in gt_json.items():
55
+ content = content + f" The record is below:\n\nRecord of comprehension:\n{data_input}. Here is the ground truth label:\n\nGround Truth:\n{label}\n\nYour Assessment:"
56
+ output = ask_chatgpt(prompt=content, model="gpt-3.5-turbo")
57
+ write_to_json({f"{img_path1}": output}, output_file)
58
+
59
+
60
+ if __name__ == "__main__":
61
+ parser = argparse.ArgumentParser(description="Process Emotional comprehension Records.")
62
+ parser.add_argument("--ec-data-file", type=str, help="Path to emotional comprehension data file (JSONL).")
63
+ parser.add_argument("--gt-file", type=str, help="Path to ground truth data file (JSON).")
64
+ parser.add_argument("--output-file", type=str, help="Path to output JSONL file.")
65
+ args = parser.parse_args()
66
+
67
+ main(args.ec_data_file, args.gt_file, args.output_file)
68
+ # python gpt-eval-complex.py --ec-data-file experiments/llava7b_complex.jsonl --gt-file ec_complex.jsonl
EmCo_Complex/llama3-eval-complex.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+ import json
5
+ from tqdm import tqdm
6
+
7
+
8
+ def write_to_json(obj_dict, json_file_path):
9
+ with open(json_file_path, 'a') as json_file:
10
+ json_file.write(json.dumps(obj_dict) + '\n')
11
+
12
+ def main(ec_data_file, gt_file, output_file, model_id):
13
+ with open(ec_data_file, 'r') as f:
14
+ ec_data = []
15
+ for line in f:
16
+ ec_data.append(json.loads(line))
17
+
18
+ with open(args.gt_file) as f:
19
+ gt = []
20
+ for line in f:
21
+ gt.append(json.loads(line))
22
+
23
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
24
+ model = AutoModelForCausalLM.from_pretrained(
25
+ model_id,
26
+ torch_dtype=torch.bfloat16,
27
+ device_map="auto",
28
+ )
29
+
30
+ for j in tqdm(range(len(ec_data))):
31
+ for img_path1, data_input in ec_data[j].items():
32
+ i = 0
33
+ while True:
34
+ content = "Your task is to assess a record aimed at comprehending an emotion and compare it against the truth label. Determine the number of potential triggers identified correctly versus those missed. Please provide your assessment in the format: {score: correct/total}, e.g. {score: 2/5} for 2 correct and 5 in total. And include an explanation of your assessment."
35
+ for img_path2, gt_json in gt[j].items():
36
+ if img_path1 != img_path2:
37
+ print(gt_json)
38
+ for _, label in gt_json.items():
39
+ content = content + f" The record is below:\n\nRecord of comprehension:\n{data_input}. Here is the ground truth label:\n\nGround Truth:\n{label}\n\nYour Assessment:"
40
+ messages = [
41
+ {"role": "system", "content": "You are a helpful and precise assistant for checking the quality of the answer. Only say the content user wanted."},
42
+ {"role": "user", "content": content},
43
+ ]
44
+
45
+ input_ids = tokenizer.apply_chat_template(
46
+ messages,
47
+ add_generation_prompt=True,
48
+ return_tensors="pt"
49
+ ).to(model.device)
50
+
51
+ terminators = [
52
+ tokenizer.eos_token_id,
53
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
54
+ ]
55
+
56
+ outputs = model.generate(
57
+ input_ids,
58
+ max_new_tokens=512,
59
+ eos_token_id=terminators,
60
+ do_sample=True,
61
+ temperature=0.1,
62
+ top_p=0.9,
63
+ )
64
+
65
+ response = outputs[0][input_ids.shape[-1]:]
66
+ output = tokenizer.decode(response, skip_special_tokens=True)
67
+ if "{score: " in output.lower():
68
+ break
69
+ if i == 4:
70
+ output = "{score: 0/1}"
71
+ break
72
+ i += 1
73
+ write_to_json({f"{img_path1}": output}, output_file)
74
+
75
+
76
+ if __name__ == "__main__":
77
+ parser = argparse.ArgumentParser(description="Process Emotional comprehension Records.")
78
+ parser.add_argument("--ec-data-file", type=str, help="Path to emotional comprehension data file (JSONL).")
79
+ parser.add_argument("--gt-file", type=str, help="Path to ground truth data file (JSON).")
80
+ parser.add_argument("--output-file", type=str, help="Path to output JSONL file.")
81
+ parser.add_argument("--model-id", type=str, help="Path to the pretrained model.")
82
+ args = parser.parse_args()
83
+
84
+ main(args.ec_data_file, args.gt_file, args.output_file, args.model_id)
README.md CHANGED
@@ -1,3 +1,127 @@
1
- ---
2
- license: cc-by-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EmCoBench: An Extensive Benchmark for General Emotion Comprehension
2
+
3
+ EmCoBench is a comprehensive benchmark designed for evaluating systems on their ability to understand and identify emotional triggers, rather than just classifying emotions. This is essential for developing more empathetic and human-like AI systems.
4
+
5
+ ***More details about EmCoBench, including a forthcoming paper, will be available soon.***
6
+
7
+ ## Overview
8
+
9
+ - **Emotion Comprehension Task**: Focuses on identifying emotional triggers, providing a deeper understanding of emotions.
10
+ - **EmCoBench Dataset**: Includes 78 fine-grained emotions and 1,655 emotion comprehension samples, with 50 multifaceted complex samples.
11
+
12
+ ## Prerequisites
13
+
14
+ Download the following datasets before using EmCoBench:
15
+ - [EmoSet-118K](https://vcc.tech/EmoSet)
16
+ - [CAER-S](https://caer-dataset.github.io/)
17
+
18
+ Unzip and place them in the `dataset` folder.
19
+
20
+ ## Usage
21
+
22
+ To use the EmCoBench dataset and benchmark in your project:
23
+
24
+ 1. Clone this repository:
25
+ ```bash
26
+ git clone https://github.com/Lum1104/EmCoBench.git
27
+ ```
28
+
29
+ 2. Navigate to the directory:
30
+ ```bash
31
+ cd EmCoBench
32
+ ```
33
+
34
+ 3. Run the example baseline code and test your own models.
35
+
36
+ For each baseline model, please install the required environment as needed:
37
+ ```bash
38
+ # Basic EmCoBench
39
+ python EmCoBench/baselines/qwen/qwen_user.py --model-path Qwen/Qwen-VL-Chat --input-json EmCoBench/EmCo_Basic/user.jsonl --output-json EmCoBench/EmCo_Basic/qwen_basic.jsonl --image-path datasets/
40
+ # Complex EmCoBench
41
+ python EmCoBench/baselines/qwen/qwen_complex.py --model-path Qwen/Qwen-VL-Chat --input-json EmCo_Complex/ec_complex.jsonl --output-json EmCoBench/EmCo_Complex/qwen_complex.jsonl --image-path datasets/
42
+ ```
43
+ 4. Get evaluate results by LLaMA-3/ChatGPT-3.5
44
+
45
+ Here is the script for LLaMA-3 evaluation.
46
+ ```bash
47
+ # Basic EmCoBench
48
+ cd EmCoBench/EmCo_Basic/
49
+ python llama3-eval.py --model-id meta-llama/Meta-Llama-3-8B-Instruct --ec-data-file qwen_basic.jsonl --gt-file basic_ground_truth.json --output-file qwen_basic_scores_llama3.jsonl
50
+ python get_scores.py --file-path qwen_basic_llama3_scores.jsonl
51
+ # Complex EmCoBench
52
+ cd EmCoBench/EmCo_Complex/
53
+ python llama3-eval-complex.py --ec-data-file qwen_complex.jsonl --gt-file ec_complex.jsonl --output-file qwen_complex_llama3_scores.jsonl --model-id meta-llama/Meta-Llama-3-8B-Instruct
54
+ ```
55
+
56
+ Here is the script for ChatGPT-3.5 evaluation. Prepare your api key and write it in the variable `OpenAI(api_key="YOUR_API_KEY")`.
57
+ ```bash
58
+ # Basic EmCoBench
59
+ cd EmCoBench/EmCo_Basic/
60
+ python gpt-eval.py --ec-data-file qwen_basic.jsonl --gt-file basic_ground_truth.json --output-file qwen_basic_scores_gpt.jsonl
61
+ python get_scores.py --file-path qwen_basic_gpt_scores.jsonl
62
+ # Complex EmCoBench
63
+ cd EmCoBench/EmCo_Complex/
64
+ python gpt-eval-complex.py --ec-data-file qwen_complex.jsonl --gt-file ec_complex.jsonl --output-file qwen_complex_gpt_scores.jsonl
65
+ ```
66
+
67
+ We also provide evaluation code for Long-term Coherence. Please install the required packages:
68
+ ```bash
69
+ pip install spacy
70
+ pip -m spacy download en_core_web_sm
71
+ ```
72
+
73
+ ```python
74
+ import spacy
75
+ from transformers import BertModel, BertTokenizer
76
+ import torch
77
+ import numpy as np
78
+ from sklearn.metrics.pairwise import cosine_similarity
79
+ import json
80
+ from tqdm import tqdm
81
+
82
+ nlp = spacy.load("en_core_web_sm")
83
+
84
+
85
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
86
+ model = BertModel.from_pretrained('bert-base-uncased').to("cuda")
87
+
88
+ def get_sentence_embeddings(sentences):
89
+ inputs = tokenizer(sentences, return_tensors='pt', padding=True, truncation=True).to("cuda")
90
+ with torch.no_grad():
91
+ outputs = model(**inputs)
92
+ embeddings = outputs.last_hidden_state[:, 0, :].cpu().numpy()
93
+ return embeddings
94
+
95
+ def calculate_coherence(text):
96
+ doc = nlp(text)
97
+ sentences = [sent.text for sent in doc.sents]
98
+ if len(sentences) < 2:
99
+ return None
100
+ embeddings = get_sentence_embeddings(sentences)
101
+
102
+ similarities = []
103
+ for i in range(len(embeddings) - 1):
104
+ sim = cosine_similarity([embeddings[i]], [embeddings[i+1]])[0][0]
105
+ similarities.append(sim)
106
+ if not similarities:
107
+ return None
108
+ coherence_score = np.mean(similarities)
109
+ return coherence_score
110
+
111
+
112
+ with open("path/to/ec_data.jsonl", 'r') as f:
113
+ ec_data_jsons = []
114
+ for line in f:
115
+ ec_data_jsons.append(json.loads(line))
116
+
117
+
118
+ coherence_scores = []
119
+
120
+ for ec_data_json in tqdm(ec_data_jsons):
121
+ for img_path, data_input in ec_data_json.items():
122
+ coherence_score = calculate_coherence(data_input)
123
+ if coherence_score:
124
+ coherence_scores.append(coherence_score)
125
+
126
+ print(sum(coherence_scores)/len(coherence_scores))
127
+ ```
qwen/qwen_api_basic.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from http import HTTPStatus
2
+ from dashscope import MultiModalConversation
3
+ import dashscope
4
+ import base64
5
+ import json
6
+ from tqdm import tqdm
7
+
8
+ def write_to_json(obj_dict, json_file_path):
9
+ with open(json_file_path, 'a') as json_file:
10
+ json_file.write(json.dumps(obj_dict) + '\n')
11
+
12
+ def encode_image(image_path):
13
+ with open(image_path, "rb") as image_file:
14
+ return base64.b64encode(image_file.read()).decode('utf-8')
15
+
16
+ def get_response(img_path, content):
17
+ print(img_path, content)
18
+ messages = [{
19
+ 'role': 'user',
20
+ 'content': [
21
+ {
22
+ 'image': "file://" + img_path
23
+ },
24
+ {
25
+ 'text': content
26
+ },
27
+ ]
28
+ }]
29
+ response = MultiModalConversation.call(model='qwen-vl-plus', messages=messages)
30
+ print(response)
31
+ return response.output.choices[0].message.content[0]["text"]
32
+
33
+ if __name__ == '__main__':
34
+ dashscope.api_key = "YOUR_API_KEY"
35
+ with open("user.jsonl", 'r') as f:
36
+ ec_data = []
37
+ for line in f:
38
+ ec_data.append(json.loads(line))
39
+
40
+ for data in tqdm(ec_data):
41
+ for img_path, data_input in data.items():
42
+ content = f"You are an expert of emotion understanding. Look at this image, {data_input.lower()}"
43
+ try:
44
+ completion = get_response("path/to/dataset/"+img_path, content)
45
+ write_to_json({img_path: completion}, "api_qwen.jsonl")
46
+ except:
47
+ continue
qwen/qwen_api_complex.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from http import HTTPStatus
2
+ from dashscope import MultiModalConversation
3
+ import dashscope
4
+ import base64
5
+ import json
6
+ from tqdm import tqdm
7
+
8
+ def write_to_json(obj_dict, json_file_path):
9
+ with open(json_file_path, 'a') as json_file:
10
+ json_file.write(json.dumps(obj_dict) + '\n')
11
+
12
+ def encode_image(image_path):
13
+ with open(image_path, "rb") as image_file:
14
+ return base64.b64encode(image_file.read()).decode('utf-8')
15
+
16
+ def get_response(img_path, content):
17
+ print(img_path, content)
18
+ messages = [{
19
+ 'role': 'user',
20
+ 'content': [
21
+ {
22
+ 'image': "file://" + img_path
23
+ },
24
+ {
25
+ 'text': content
26
+ },
27
+ ]
28
+ }]
29
+ response = MultiModalConversation.call(model='qwen-vl-plus', messages=messages)
30
+ print(response)
31
+ try:
32
+ return response.output.choices[0].message.content[0]["text"]
33
+ except IndexError:
34
+ return "I don't know"
35
+ except:
36
+ return response.output.choices[0].message.content[1]["text"]
37
+
38
+ if __name__ == '__main__':
39
+ dashscope.api_key = "YOUR_API_KEY"
40
+ with open("ec_complex.jsonl") as f:
41
+ data_json = []
42
+ for line in f:
43
+ data_json.append(json.loads(line))
44
+
45
+ for datas in tqdm(data_json):
46
+ for img_path, data in datas.items():
47
+ for question, gt in data.items():
48
+ content = f"You are an expert of emotion understanding. Look at this image, {question.lower()}."
49
+ while True:
50
+ try:
51
+ completion = get_response("images/" + img_path, content)
52
+ write_to_json({img_path: completion}, "api_qwen_complex.jsonl")
53
+ break
54
+ except Exception as e:
55
+ print(e)
56
+ continue
qwen/qwen_complex.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from transformers.generation import GenerationConfig
4
+ import torch
5
+ import json
6
+ from tqdm import tqdm
7
+ torch.manual_seed(1234)
8
+
9
+ def write_to_json(obj_dict, json_file_path):
10
+ with open(json_file_path, 'a') as json_file:
11
+ json_file.write(json.dumps(obj_dict) + '\n')
12
+
13
+
14
+ def main(args):
15
+ # Note: The default behavior now has injection attack prevention off.
16
+ tokenizer = AutoTokenizer.from_pretrained(args.model_path, trust_remote_code=True)
17
+ model = AutoModelForCausalLM.from_pretrained(args.model_path, device_map="cuda", trust_remote_code=True).eval()
18
+
19
+ # Specify hyperparameters for generation
20
+ model.generation_config = GenerationConfig.from_pretrained(args.model_path, trust_remote_code=True)
21
+
22
+ with open(args.input_json) as f:
23
+ data_json = []
24
+ for line in f:
25
+ data_json.append(json.loads(line))
26
+
27
+ for datas in tqdm(data_json):
28
+ for img_path, data in datas.items():
29
+ for question, _ in data.items():
30
+ query = tokenizer.from_list_format([
31
+ {'image': args.image_path + img_path}, # Either a local path or an url
32
+ {'text': f'You are a good expert of emotion understanding. Look at the image, {question}'},
33
+ ])
34
+ response, _ = model.chat(tokenizer, query=query, history=None)
35
+ write_to_json({img_path: response}, args.output_json)
36
+
37
+
38
+ if __name__ == "__main__":
39
+ parser = argparse.ArgumentParser(description="Process images and generate responses.")
40
+ parser.add_argument('--model-path', type=str, required=True, help="Path to the model")
41
+ parser.add_argument('--image-path', type=str, required=True, help="Path to the model")
42
+ parser.add_argument('--input-json', type=str, required=True, help="Path to the input JSON file")
43
+ parser.add_argument('--output-json', type=str, required=True, help="Path to the output JSON file")
44
+ args = parser.parse_args()
45
+ main(args)
46
+ # python qwen_complex.py --model_path Qwen/Qwen-VL-Chat --input_json path/to/ec_complex.jsonl --output_json path/to/output.jsonl --image-path path/to/dataset
qwen/qwen_user.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from transformers.generation import GenerationConfig
4
+ import torch
5
+ import json
6
+ from tqdm import tqdm
7
+ torch.manual_seed(1234)
8
+
9
+ def write_to_json(obj_dict, json_file_path):
10
+ with open(json_file_path, 'a') as json_file:
11
+ json_file.write(json.dumps(obj_dict) + '\n')
12
+
13
+ def main(args):
14
+ # Note: The default behavior now has injection attack prevention off.
15
+ tokenizer = AutoTokenizer.from_pretrained(args.model_path, trust_remote_code=True)
16
+ model = AutoModelForCausalLM.from_pretrained(args.model_path, device_map="cuda", trust_remote_code=True).eval()
17
+
18
+ # Specify hyperparameters for generation
19
+ model.generation_config = GenerationConfig.from_pretrained(args.model_path, trust_remote_code=True)
20
+
21
+ with open(args.input_json) as f:
22
+ data_jsons = []
23
+ for line in f:
24
+ data_jsons.append(json.loads(line))
25
+
26
+ for data_json in tqdm(data_jsons):
27
+ for img_path, question in data_json.items():
28
+ query = tokenizer.from_list_format([
29
+ {'image': args.image_path + img_path}, # Either a local path or an url
30
+ {'text': f'You are a good expert of emotion understanding. Look at the image, {question}'},
31
+ ])
32
+ response, _ = model.chat(tokenizer, query=query, history=None)
33
+ write_to_json({img_path: response}, args.output_json)
34
+
35
+ if __name__ == "__main__":
36
+ parser = argparse.ArgumentParser(description="Process images and generate responses.")
37
+ parser.add_argument('--model-path', type=str, required=True, help="Path to the model")
38
+ parser.add_argument('--image-path', type=str, required=True, help="Path to the model")
39
+ parser.add_argument('--input-json', type=str, required=True, help="Path to the input JSON file")
40
+ parser.add_argument('--output-json', type=str, required=True, help="Path to the output JSON file")
41
+ args = parser.parse_args()
42
+ main(args)
43
+ # python qwen_user.py --model_path path/to/model --input_json path/to/user.jsonl --output_json path/to/output.jsonl --image-path path/to/dataset