ynhe commited on
Commit
ec1c755
β€’
1 Parent(s): bc40b84

Update models/videochat.py

Browse files
Files changed (1) hide show
  1. models/videochat.py +12 -1
models/videochat.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import random
3
- import logging
 
4
 
5
  import torch
6
  from torch.cuda.amp import autocast as autocast
@@ -11,6 +12,8 @@ from .modeling_llama import LlamaForCausalLM
11
  from transformers import LlamaTokenizer, LlamaConfig
12
 
13
 
 
 
14
  class VideoChat(Blip2Base):
15
  """
16
  VideoChat model.
@@ -113,6 +116,14 @@ class VideoChat(Blip2Base):
113
  self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False, use_auth_token=os.environ["HF_TOKEN"])
114
  self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
115
 
 
 
 
 
 
 
 
 
116
  if self.low_resource:
117
  self.llama_model = LlamaForCausalLM.from_pretrained(
118
  llama_model_path,
 
1
  import os
2
  import random
3
+ import
4
+ logging
5
 
6
  import torch
7
  from torch.cuda.amp import autocast as autocast
 
12
  from transformers import LlamaTokenizer, LlamaConfig
13
 
14
 
15
+
16
+
17
  class VideoChat(Blip2Base):
18
  """
19
  VideoChat model.
 
116
  self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False, use_auth_token=os.environ["HF_TOKEN"])
117
  self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
118
 
119
+ import psutil
120
+ import os
121
+ print(u'ε½“ε‰θΏ›η¨‹ηš„ε†…ε­˜δ½Ώη”¨οΌš%.4f GB' % (psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024) )
122
+ info = psutil.virtual_memory()
123
+ print( u'η”΅θ„‘ζ€»ε†…ε­˜οΌš%.4f GB' % (info.total / 1024 / 1024 / 1024) )
124
+ print(u'ε½“ε‰δ½Ώη”¨ηš„ζ€»ε†…ε­˜ε ζ―”οΌš',info.percent)
125
+ print(u'cpuδΈͺζ•°οΌš',psutil.cpu_count())
126
+
127
  if self.low_resource:
128
  self.llama_model = LlamaForCausalLM.from_pretrained(
129
  llama_model_path,