xuejing2409 commited on
Commit
ea9670f
·
verified ·
1 Parent(s): 6ae0f67

Add deepseek-ai/Janus-Pro-7B to eval queue

Browse files
deepseek-ai/Janus-Pro-7B_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "deepseek-ai/Janus-Pro-7B", "model_api_url": "", "model_api_key": "", "model_api_name": "", "base_model": "", "revision": "main", "precision": "float16", "private": false, "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-02-11T08:15:48Z", "model_type": "\ud83d\udfe2 : pretrained", "params": 0, "runsh": "#!/bin/bash\ncurrent_file=\"$0\"\ncurrent_dir=\"$(dirname \"$current_file\")\"\nSERVER_IP=$1\nSERVER_PORT=$2\nPYTHONPATH=$current_dir:$PYTHONPATH accelerate launch $current_dir/model_adapter.py --server_ip $SERVER_IP --server_port $SERVER_PORT \"${@:3}\" --cfg $current_dir/meta.json\n", "adapter": "import time\n\nfrom flagevalmm.server import ServerDataset\nfrom flagevalmm.models.base_model_adapter import BaseModelAdapter\nfrom flagevalmm.server.utils import (\n parse_args,\n default_collate_fn,\n process_images_symbol,\n load_pil_image,\n)\nfrom typing import Dict, Any\n\nimport torch\nfrom transformers import AutoModelForCausalLM\nfrom janus.models import MultiModalityCausalLM, VLChatProcessor\nfrom janus.utils.io import load_pil_images\n\n\nclass CustomDataset(ServerDataset):\n def __getitem__(self, index):\n data = self.get_data(index)\n qs, idx = process_images_symbol(\n data[\"question\"], dst_pattern=\"<image_placeholder>\"\n )\n question_id = data[\"question_id\"]\n img_path = data[\"img_path\"]\n image_list, idx = load_pil_image(\n img_path, idx, reqiures_img=True, reduplicate=False\n )\n\n return question_id, qs, image_list\n\n\nclass ModelAdapter(BaseModelAdapter):\n def model_init(self, task_info: Dict):\n ckpt_path = task_info[\"model_path\"]\n\n torch.set_grad_enabled(False)\n with accelerator.main_process_first():\n vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)\n tokenizer = vl_chat_processor.tokenizer\n\n vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(\n model_path, trust_remote_code=True\n )\n vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()\n model = accelerator.prepare_model(vl_gpt, evaluation_mode=True)\n if hasattr(model, \"module\"):\n model = model.module\n\n def build_message(\n self,\n query: str,\n image_paths=[],\n ) -> str:\n messages = [\n {\n \"role\": \"<|User|>\",\n \"content\": f\"<image_placeholder>\\n{question}\",\n \"images\": image_paths,\n },\n {\"role\": \"<|Assistant|>\", \"content\": \"\"},\n ]\n return messages\n\n def run_one_task(self, task_name: str, meta_info: Dict[str, Any]):\n results = []\n cnt = 0\n\n data_loader = self.create_data_loader(\n CustomDataset,\n task_name,\n collate_fn=default_collate_fn,\n batch_size=1,\n num_workers=2,\n )\n for question_id, question, images in data_loader:\n if cnt == 1:\n start_time = time.perf_counter()\n cnt += 1\n messages = self.build_message(question[0], images[0])\n pil_images = load_pil_images(messages)\n prepare_inputs = self.vl_chat_processor(\n conversations=messages, images=pil_images, force_batchify=True\n ).to(self.model.device)\n\n inputs_embeds = self.model.prepare_inputs_embeds(**prepare_inputs)\n\n # run the model to get the response\n outputs = self.model.language_model.generate(\n inputs_embeds=inputs_embeds,\n attention_mask=prepare_inputs.attention_mask,\n pad_token_id=self.tokenizer.eos_token_id,\n bos_token_id=self.tokenizer.bos_token_id,\n eos_token_id=self.tokenizer.eos_token_id,\n max_new_tokens=4096,\n do_sample=False,\n use_cache=True,\n )\n\n response = self.tokenizer.decode(\n outputs[0].cpu().tolist(), skip_special_tokens=True\n )\n\n self.accelerator.print(f\"{question[0]}\\n{response}\\n\\n\")\n results.append(\n {\n \"question_id\": question_id[0],\n \"answer\": response.strip(),\n \"prompt\": question[0],\n }\n )\n rank = self.accelerator.state.local_process_index\n\n # save results for the rank\n self.save_result(results, meta_info, rank=rank)\n self.accelerator.wait_for_everyone()\n\n if self.accelerator.is_main_process:\n correct_num = self.collect_results_and_save(meta_info)\n total_time = time.perf_counter() - start_time\n print(\n f\"Total time: {total_time}\\nAverage time:{total_time / cnt}\\nResults_collect number: {correct_num}\"\n )\n\n print(\"rank\", rank, \"finished\")\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n model_adapter = ModelAdapter(\n server_ip=args.server_ip,\n server_port=args.server_port,\n timeout=args.timeout,\n extra_cfg=args.cfg,\n )\n model_adapter.run()\n"}