Spaces:
Build error
Build error
File size: 9,910 Bytes
52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 0bf42ca 52d0c82 3fe3e10 0bf42ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 |
# import packages
import os
from tqdm import tqdm
import warnings
import json
import torch.nn.functional as F
import torch
import gc
from transformers import AutoTokenizer, AutoModelForCausalLM
from datetime import datetime
import argparse
import mamba_ssm
import rwkv
RWKV4_TOKENIZER_FILE = "./support/20B_tokenizer.json"
def load_list_from_json(file_path):
"""
Loads a list of strings from a JSON file.
:param file_path: Path of the JSON file to be loaded.
:return: List of strings loaded from the JSON file.
"""
with open(file_path, 'r', encoding='utf-8') as file:
return json.load(file)
def calculate_log_sum(logits, target_token_ids):
shifted_logits = logits[:-1, :]
shifted_targets = target_token_ids[1:]
log_probs = F.log_softmax(shifted_logits, dim=-1)
target_log_probs = -log_probs.gather(1, shifted_targets.unsqueeze(1)).squeeze()
# print(target_log_probs)
log_sum = torch.sum(target_log_probs, dim=-1)
# print(perplexity_sum)
return log_sum.item()
def print_model_parameters_in_billions(model):
total_params = sum(p.numel() for p in model.parameters())
total_params_billion = total_params / 1e9
print(f"Model parameters: {total_params_billion:.3f} billion")
def make_log(data_dict, folder_path):
if not os.path.exists(folder_path):
try:
os.makedirs(folder_path)
print(f"Directory created at {folder_path}")
except Exception as e:
print(f"Error creating directory: {e}")
return
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
file_name = f"{timestamp}.json"
file_path = os.path.join(folder_path, file_name)
try:
with open(file_path, 'w') as file:
json.dump(data_dict, file, indent=4)
print(f"Dictionary saved successfully to {file_path}")
except Exception as e:
print(f"Error saving dictionary: {e}")
def load_rwkv(path):
os.environ['RWKV_JIT_ON'] = '1'
os.environ["RWKV_CUDA_ON"] = '1'
from rwkv.model import RWKV
from rwkv.utils import PIPELINE
rwkv_model = RWKV(model=path, strategy='cuda fp16')
rwkv_pipeline = PIPELINE(rwkv_model, r"rwkv_vocab_v20230424")
rwkv_tokenizer = rwkv_pipeline.tokenizer
return rwkv_model, rwkv_tokenizer
def load_rwkv4pile(path):
os.environ['RWKV_JIT_ON'] = '1'
os.environ["RWKV_CUDA_ON"] = '1'
from rwkv.model import RWKV
from rwkv.utils import PIPELINE
rwkv_model = RWKV(model=path, strategy='cuda fp16')
rwkv_pipeline = PIPELINE(rwkv_model, RWKV4_TOKENIZER_FILE)
rwkv_tokenizer = rwkv_pipeline.tokenizer
return rwkv_model, rwkv_tokenizer
def load_hf_model(path, cache_path):
hf_tokenizer = AutoTokenizer.from_pretrained(path)
if cache_path is not None:
hf_model = AutoModelForCausalLM.from_pretrained(path,
device_map="cuda",
trust_remote_code=True,
cache_dir=cache_path).eval()
else:
hf_model = AutoModelForCausalLM.from_pretrained(path,
device_map="cuda",
trust_remote_code=True).eval()
print_model_parameters_in_billions(hf_model)
return hf_model, hf_tokenizer
def load_mamba(path):
from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel
mamba_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
mamba_model = MambaLMHeadModel.from_pretrained(path, device="cuda", dtype=torch.float16)
mamba_model.device = torch.device('cuda')
print_model_parameters_in_billions(mamba_model)
return mamba_model, mamba_tokenizer
def eval_rwkv(model, tokenizer, texts, chunk_size, v4pile=False):
rwkv_test_data = []
rwkv_token_length_list = []
for idx, sample in tqdm(enumerate(texts), total=len(texts)):
with torch.no_grad():
if v4pile:
input_seq = tokenizer.encode(sample).ids # v4
else:
input_seq = tokenizer.encode(sample)
input_length = len(input_seq)
neg_log_prob_temp = 0
# for begin in range(0, input_length, chunk_size):
input_chunk = input_seq[:chunk_size]
logit = model.forward(input_chunk, None, full_output=True)[0]
if len(input_chunk) == 1:
logit = logit.unsqueeze(0)
# log_sum = calculate_log_sum(logit, torch.tensor(input_chunk).cuda())
# neg_log_prob_temp += log_sum
# rwkv_token_length_list.append(input_length)
# rwkv_test_data.append(neg_log_prob_temp)
# data_dict = {
# 'neg_log_prob_sum': sum(rwkv_test_data) / len(rwkv_test_data),
# 'avg tokens': sum(rwkv_token_length_list) / len(rwkv_token_length_list),
# }
# print(f'log probability sum: {sum(rwkv_test_data) / len(rwkv_test_data):.2f}')
# print(f'avg tokens: {sum(rwkv_token_length_list) / len(rwkv_token_length_list):.0f}')
return logit,logit,input_chunk,tokenizer
def eval_hf_model(model, tokenizer, texts, chunk_size):
data = []
token_length_list = []
for idx, sample in tqdm(enumerate(texts), total=len(texts)):
with torch.no_grad():
inputs = tokenizer(sample, return_tensors='pt')
inputs = inputs.to(model.device)
seq_length = inputs['input_ids'].shape[-1]
neg_log_prob_temp = 0
# for begin in range(0, seq_length, chunk_size):
input_chunk = inputs['input_ids'][:, :chunk_size]
logit = model.forward(input_ids=input_chunk).logits[0, :, :]
# log_sum = calculate_log_sum(logit, input_chunk.squeeze(0))
# neg_log_prob_temp += log_sum
# token_length_list.append(seq_length)
# data.append(neg_log_prob_temp)
# data_dict = {
# 'neg_log_prob_sum': sum(data) / len(data),
# 'avg tokens': sum(token_length_list) / len(token_length_list),
# }
# print(f'log probability sum: {sum(data) / len(data):.2f}')
# print(f'avg tokens: {sum(token_length_list) / len(token_length_list):.0f}')
return logit,input_chunk,tokenizer
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('--model', type=str, required=True, help='model name or path')
# parser.add_argument('--model_type', choices=['hf', 'rwkv', 'mamba', 'rwkv4pile'], required=True, help='model type')
# parser.add_argument('--data', type=str, required=True, help='data path (json file)')
# parser.add_argument('--log_path', type=str, default='./logs/', help='log file path')
# parser.add_argument('--model_cache', type=str, help='hugging face model cache')
# parser.add_argument('--chunk_size', type=int, default=1024, help='chunk size')
def run_get_loss(args):
# args = parser.parse_args()
# load data
texts = load_list_from_json(args.data)
print(f'data size: {len(texts)}')
# load model
if args.model_type == 'hf':
model, tokenizer = load_hf_model(args.model, args.model_cache)# tokenzier path, model path
elif args.model_type == 'rwkv':
model, tokenizer = load_rwkv(args.model)
elif args.model_type == 'mamba':
model, tokenizer = load_mamba(args.model)
elif args.model_type == 'rwkv4pile':
model, tokenizer = load_rwkv4pile(args.model)
else:
raise NotImplementedError
# eval
if args.model_type in ['hf', 'mamba']:
return eval_hf_model(model=model, tokenizer=tokenizer, texts=texts, chunk_size=args.chunk_size)
elif args.model_type == 'rwkv':
return eval_rwkv(model=model, tokenizer=tokenizer, texts=texts, chunk_size=args.chunk_size)
elif args.model_type == 'rwkv4pile':
return eval_rwkv(model=model, tokenizer=tokenizer, texts=texts, chunk_size=args.chunk_size, v4pile=True)
else:
raise NotImplementedError
# results['model_name_or_path'] = args.model
# results['data_path'] = args.data
# results['chunk_size'] = args.chunk_size
# make_log(results, args.log_path)
# print(json.dumps(results, indent=4, ensure_ascii=False))
from types import SimpleNamespace
if __name__ == '__main__':
args=SimpleNamespace(model='microsoft/phi-2',texts=['Hello FreshBench !'],model_type='hf',data='data.json',model_cache=None,chunk_size=1024)
# def run_get_loss(input_string, model_type):
# # load data
# texts = [input_string]
# print(f'data size: {len(texts)}')
# # load model
# if model_type == 'hf':
# model, tokenizer = load_hf_model(args.model, args.model_cache)# tokenzier path, model path
# elif model_type == 'rwkv':
# model, tokenizer = load_rwkv(args.model)
# elif model_type == 'mamba':
# model, tokenizer = load_mamba(args.model)
# elif model_type == 'rwkv4pile':
# model, tokenizer = load_rwkv4pile(args.model)
# else:
# raise NotImplementedError
# # eval
# if model_type in ['hf', 'mamba']:
# results = eval_hf_model(model=model, tokenizer=tokenizer, texts=texts, chunk_size=args.chunk_size)
# elif model_type == 'rwkv':
# results = eval_rwkv(model=model, tokenizer=tokenizer, texts=texts, chunk_size=args.chunk_size)
# elif model_type == 'rwkv4pile':
# results = eval_rwkv(model=model, tokenizer=tokenizer, texts=texts, chunk_size=args.chunk_size, v4pile=True)
# else:
# raise NotImplementedError
# results['model_name_or_path'] = args.model
# results['data_path'] = args.data
# results['chunk_size'] = args.chunk_size
# make_log(results, args.log_path)
# print(json.dumps(results, indent=4, ensure_ascii=False)) |