## autodl pip 最好换源，它租用的是华为的线路。
pip config set global.index-url https://repo.huaweicloud.com/repository/pypi/simple 


wandb平替：https://swanlab.cn
6ee6ArUKi6uefH2zQkABA
pip install swanlab

### 魔塔
pip install modelscope
modelscope download --model="Qwen/Qwen2.5-0.5B-Instruct" --local_dir ./model-dir
modelscope download --dataset="lmms-lab/ChartQA"

## install
conda create -n msd python=3.10 -y
conda activate msd
pip install https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-linux_x86_64.whl#sha256=b2184b7729ef3b9b10065c074a37c1e603fd99f91e38376e25cb7ed6e1d54696
pip install https://download.pytorch.org/whl/cu121/torchvision-0.16.2%2Bcu121-cp310-cp310-linux_x86_64.whl#sha256=baa7970c6b5437312e5dd0bd0f2571a20b786c3e285bafd6ed3e4f62a5c3c76e
cd LLaVA
pip install -e .
cd ../EAGLE
pip install -e .
cd ../lmms-eval
pip install -e .

########## 生成数据
# /root/autodl-tmp/dataset_models/ShareGPT_Vicuna_unfiltered/ShareGPT_V3_unfiltered_cleaned_split.json
# /root/autodl-tmp/dataset_models/LLaVA-Instruct-150K/llava_v1_5_mix665k.json
## 格式转换 llava_v1_5_mix665k
python EAGLE/eagle/ge_data/convert.py
##
cd ~/projects/MSD/EAGLE/eagle/ge_data
# python -m eagle.ge_data.allocation --outdir /root/autodl-tmp/dataset_models/ge_data_500 \
python allocation.py --outdir /root/autodl-tmp/dataset_models/ge_data_lmdb \
    --model_type llava_v15_v \
    --model /root/autodl-tmp/dataset_models/models/llava-v1.5-7b \
    --image_data_path /root/autodl-tmp/dataset_models/image_data \
    --json_data_path /root/autodl-tmp/dataset_models/LLaVA-Instruct-150K/llava_v1_5_mix665k_fixed.json
;
python allocation.py --outdir /root/autodl-tmp/dataset_models/ge_data_lmdb \
    --model_type llava_v15_t \
    --model /root/autodl-tmp/dataset_models/models/llava-v1.5-7b \
    --image_data_path /root/autodl-tmp/dataset_models/image_data \
    --json_data_path /root/autodl-tmp/dataset_models/ShareGPT_Vicuna_unfiltered/ShareGPT_V3_unfiltered_cleaned_split.json

# python ge_data_all_llava15.py --start=0 --end=100 --index=0 --gpu_index 0 --outdir /root/autodl-tmp/dataset_models/ge_data/llava_v15_t_0_999 --model /root/autodl-tmp/dataset_models/models/llava-v1.5-7b --image_data_path /root/autodl-tmp/dataset_models/image_data --json_data_path /root/autodl-tmp/dataset_models/ShareGPT_Vicuna_unfiltered/ShareGPT_V3_unfiltered_cleaned_split.json
# python ge_data_all_llava15_lmdb.py --start=0 --end=10000 --index=0 --gpu_index 0 --outdir /root/autodl-tmp/dataset_models/ge_data/llava_v15_t_0_999 --model /root/autodl-tmp/dataset_models/models/llava-v1.5-7b --image_data_path /root/autodl-tmp/dataset_models/image_data --json_data_path /root/autodl-tmp/dataset_models/ShareGPT_Vicuna_unfiltered/ShareGPT_V3_unfiltered_cleaned_split.json
## 实际执行指令：python ge_data_all_llava15.py --start=0 --end=100 --index=0 --gpu_index 0 --outdir /root/autodl-tmp/dataset_models/ge_data/llava_v15_0_67999 --model /root/autodl-tmp/dataset_models/models/llava-v1.5-7b --image_data_path /root/autodl-tmp/dataset_models/image_data --json_data_path /root/autodl-tmp/dataset_models/LLaVA-Instruct-150K/llava_v1_5_mix665k_fixed.json

### 训练
## wandb offline
export WANDB_MODE=offline
cd ~/projects/MSD/EAGLE/eagle/train
deepspeed --master_port 29504 --include localhost:0 main_deepspeed.py \
    --deepspeed_config ds_config.json \
    --tmpdir_v /root/autodl-tmp/dataset_models/ge_data/llava_v15_v_0_99/ \
    --tmpdir_t /root/autodl-tmp/dataset_models/ge_data/llava_v15_t_0_99/ \
    --basepath /root/autodl-tmp/dataset_models/models/llava-v1.5-7b \
    --cpdir /root/autodl-tmp/dataset_models/models/MSD-LLaVA1.5-7B_test \
    --config llava_v15_7B_config.json
    # --tmpdir_t /root/autodl-tmp/dataset_models/ge_data/llava_v15_t_0_67/ \

export WANDB_MODE=offline
cd EAGLE/eagle/train
deepspeed --master_port 29504 --include localhost:0 main_deepspeed.py \
    --deepspeed_config ds_config.json \
    --tmpdir_v /root/autodl-tmp/dataset_models/ge_data_lmdb/llava_v15_v_0_99/0/features.lmdb \
    --tmpdir_t /root/autodl-tmp/dataset_models/ge_data_lmdb/llava_v15_t_0_99/0/features.lmdb \
    --basepath /root/autodl-tmp/dataset_models/models/llava-v1.5-7b \
    --cpdir /root/autodl-tmp/dataset_models/models/MSD-LLaVA1.5-7B_test \
    --config llava_v15_7B_config.json

### eval
CUDA_VISIBLE_DEVICES=0 accelerate launch --num_processes=1 --main_process_port=29506 -m lmms_eval \
    --model llava_msd \
    --model_args pretrained="/root/autodl-tmp/dataset_models/models/llava-v1.5-7b" \
    --msd_model_path /root/autodl-tmp/dataset_models/models/MSD-LLaVA1.5-7B \
    --tasks chartqa \
    --batch_size 1 \
    --gen_kwargs temperature=0 \
    --use_msd
    # --dataset /root/autodl-tmp/modelscope/hub/datasets/lmms-lab/ChartQA

python /root/projects/MSD/lmms-eval/lmms_eval/__main__.py --model llava_msd --model_args pretrained=/root/autodl-tmp/dataset_models/models/llava-v1.5-7b --msd_model_path /root/autodl-tmp/dataset_models/models/MSD-LLaVA1.5-7B --tasks chartqa --batch_size 1 --gen_kwargs temperature=0 --use_msd 

cd /root/projects/MSD/lmms-eval/lmms_eval ; /usr/bin/env /root/miniconda3/envs/msd/bin/python /root/.trae-cn-server/extensions/ms-python.debugpy-2025.6.0/bundled/libs/debugpy/adapter/../../debugpy/launcher 56645 -- __main__.py --model llava_msd --model_args pretrained=/root/autodl-tmp/dataset_models/models/llava-v1.5-7b --msd_model_path /root/autodl-tmp/dataset_models/models/MSD-LLaVA1.5-7B --tasks chartqa --batch_size 1 --gen_kwargs temperature=0 --use_msd 

### 推理
python -m llava.serve.cli \
    --model-path /root/autodl-tmp/dataset_models/models/MSD-LLaVA1.5-7B \
    --image-file "https://llava-vl.github.io/static/images/view.jpg" \
    --load-4bit

python -m llava.serve.cli \
    --model-path /root/autodl-tmp/dataset_models/models/llava-v1.5-7b \
    --image-file "https://llava-vl.github.io/static/images/view.jpg" \
    --load-4bit

cd ~/projects/MSD/LLaVA
python llava/eval/run_llava.py --model-path liuhaotian/llava-v1.5-7b --image-file https://llava-vl.github.io/static/images/view.jpg --query "What are the things I should be cautious about when I visit here?"

