Wendy-Fly commited on
Commit
bd06209
·
verified ·
1 Parent(s): e6592d0

Upload infer_1.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. infer_1.py +12 -10
infer_1.py CHANGED
@@ -42,7 +42,7 @@ data = read_json('/home/zbz5349/WorkSpace/aigeeks/Qwen2.5-VL/LLaMA-Factory/data/
42
  save_data = []
43
  correct_num = 0
44
  begin = 0
45
- end = len(data)
46
  batch_size = 1
47
  for batch_idx in tqdm(range(begin, end, batch_size)):
48
  batch = data[batch_idx:batch_idx + batch_size]
@@ -50,8 +50,8 @@ for batch_idx in tqdm(range(begin, end, batch_size)):
50
  image_list = []
51
  input_text_list = []
52
  data_list = []
53
- save_data = []
54
-
55
  # while True:
56
  for idx, i in enumerate(batch):
57
  save_ = {
@@ -66,7 +66,8 @@ for batch_idx in tqdm(range(begin, end, batch_size)):
66
  {"type": "image", "image": "file:///path/to/image2.jpg"},
67
  {"type": "text", "text": "Describe this video."},
68
  ],
69
- "answer":""
 
70
  }
71
  messages = {
72
  "role": "user",
@@ -95,8 +96,9 @@ for batch_idx in tqdm(range(begin, end, batch_size)):
95
  save_['content'][1]['image'] = image_path
96
  save_['content'][2]['text'] = question
97
  save_['answer'] = answer
 
98
  data_list.append(messages)
99
- save_data.append(save_)
100
 
101
  text = processor.apply_chat_template(data_list, tokenize=False, add_generation_prompt=True)
102
  image_inputs, video_inputs, video_kwargs = process_vision_info(data_list, return_video_kwargs=True)
@@ -119,11 +121,11 @@ for batch_idx in tqdm(range(begin, end, batch_size)):
119
  output_text = processor.batch_decode(
120
  generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
121
  )
122
- save_["answer"] = output_text
123
- if output_text == answer:
124
- correct_num = correct_num + 1
125
- save_data.append(save_)
126
  print("correct_num", correct_num)
127
- write_json("infer_answer.json",save_data)
128
 
129
 
 
42
  save_data = []
43
  correct_num = 0
44
  begin = 0
45
+ end = 1
46
  batch_size = 1
47
  for batch_idx in tqdm(range(begin, end, batch_size)):
48
  batch = data[batch_idx:batch_idx + batch_size]
 
50
  image_list = []
51
  input_text_list = []
52
  data_list = []
53
+ save_list = []
54
+ sd_ans = []
55
  # while True:
56
  for idx, i in enumerate(batch):
57
  save_ = {
 
66
  {"type": "image", "image": "file:///path/to/image2.jpg"},
67
  {"type": "text", "text": "Describe this video."},
68
  ],
69
+ "answer":"None",
70
+ "result":"None",
71
  }
72
  messages = {
73
  "role": "user",
 
96
  save_['content'][1]['image'] = image_path
97
  save_['content'][2]['text'] = question
98
  save_['answer'] = answer
99
+ sd_ans.append(answer)
100
  data_list.append(messages)
101
+ save_list.append(save_)
102
 
103
  text = processor.apply_chat_template(data_list, tokenize=False, add_generation_prompt=True)
104
  image_inputs, video_inputs, video_kwargs = process_vision_info(data_list, return_video_kwargs=True)
 
121
  output_text = processor.batch_decode(
122
  generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
123
  )
124
+ for idx,x in enumerate(output_text):
125
+ save_list[idx]['result'] = x
126
+ save_data.append(save_list[idx])
127
+
128
  print("correct_num", correct_num)
129
+ write_json("infer_answer_ori.json",save_data)
130
 
131