Yiqin commited on
Commit
ea7140b
1 Parent(s): e19b610

revert to origin

Browse files
config/infer.yaml CHANGED
@@ -2,6 +2,7 @@ device: 'cuda'
2
 
3
  vicuna:
4
  model_path: '/home/user/app/vicuna-7b'
 
5
  device: 'cuda'
6
  num_gpus: 'auto'
7
  max_gpu_memory: '24Gib'
@@ -11,6 +12,7 @@ vicuna:
11
  max_new_tokens: 512
12
  debug: False
13
  output_path: '/home/user/app/vicuna_out.json'
 
14
 
15
  vid2seq:
16
  enable: False
 
2
 
3
  vicuna:
4
  model_path: '/home/user/app/vicuna-7b'
5
+ # model_path: '/mnt/petrelfs/wangyiqin/vid_cap/ChatVID_huggingface/vicuna-7b'
6
  device: 'cuda'
7
  num_gpus: 'auto'
8
  max_gpu_memory: '24Gib'
 
12
  max_new_tokens: 512
13
  debug: False
14
  output_path: '/home/user/app/vicuna_out.json'
15
+ # output_path: '/mnt/petrelfs/wangyiqin/vid_cap/ChatVID_huggingface/vicuna_out.json'
16
 
17
  vid2seq:
18
  enable: False
model/fastchat/serve/compression.py CHANGED
@@ -28,7 +28,7 @@ class CLinear(nn.Module):
28
  def __init__(self, weight, bias, device):
29
  super().__init__()
30
 
31
- self.weight = compress(weight.detach().to(device), default_compression_config)
32
  self.bias = bias
33
 
34
  def forward(self, input: Tensor) -> Tensor:
 
28
  def __init__(self, weight, bias, device):
29
  super().__init__()
30
 
31
+ self.weight = compress(weight.data.to(device), default_compression_config)
32
  self.bias = bias
33
 
34
  def forward(self, input: Tensor) -> Tensor:
model/vision/ImageCaptioner.py CHANGED
@@ -15,6 +15,11 @@ class ImageCaptioner:
15
  self.model = Blip2ForConditionalGeneration.from_pretrained(
16
  "/home/user/app/pretrained_models/blip2-opt-2.7b",
17
  torch_dtype=self.data_type, device_map="auto")
 
 
 
 
 
18
 
19
  def __call__(self, imgs):
20
  inputs = self.processor(
 
15
  self.model = Blip2ForConditionalGeneration.from_pretrained(
16
  "/home/user/app/pretrained_models/blip2-opt-2.7b",
17
  torch_dtype=self.data_type, device_map="auto")
18
+ # self.processor = Blip2Processor.from_pretrained(
19
+ # "/mnt/petrelfs/wangyiqin/vid_cap/ChatVID_huggingface/pretrained_models/blip2-opt-2.7b")
20
+ # self.model = Blip2ForConditionalGeneration.from_pretrained(
21
+ # "/mnt/petrelfs/wangyiqin/vid_cap/ChatVID_huggingface/pretrained_models/blip2-opt-2.7b",
22
+ # torch_dtype=self.data_type, device_map="auto")
23
 
24
  def __call__(self, imgs):
25
  inputs = self.processor(