Update app.py
Browse files
app.py
CHANGED
@@ -11,11 +11,17 @@ model_id = "TheBloke/Yi-34B-200K-Llamafied-GPTQ"
|
|
11 |
|
12 |
gptq_config = GPTQConfig( bits=4, exllama_config={"version": 2})
|
13 |
tokenizer = YiTokenizer.from_pretrained("./")
|
14 |
-
model = AutoModelForCausalLM.from_pretrained( model_id, device_map="
|
15 |
|
16 |
def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
|
17 |
prompt = get_prompt(message, chat_history)
|
18 |
input_ids = tokenizer.encode(prompt, return_tensors='pt')
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
input_ids = input_ids.to(model.device)
|
20 |
response_ids = model.generate(
|
21 |
input_ids,
|
|
|
11 |
|
12 |
gptq_config = GPTQConfig( bits=4, exllama_config={"version": 2})
|
13 |
tokenizer = YiTokenizer.from_pretrained("./")
|
14 |
+
model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", torch_dtype="auto", trust_remote_code=True, quantization_config=gptq_config)
|
15 |
|
16 |
def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
|
17 |
prompt = get_prompt(message, chat_history)
|
18 |
input_ids = tokenizer.encode(prompt, return_tensors='pt')
|
19 |
+
|
20 |
+
print("Input IDs:", input_ids) # Debug print
|
21 |
+
print("Input IDs shape:", input_ids.shape) # Debug print
|
22 |
+
if input_ids.shape[1] == 0:
|
23 |
+
raise ValueError("The input is empty after tokenization.")
|
24 |
+
|
25 |
input_ids = input_ids.to(model.device)
|
26 |
response_ids = model.generate(
|
27 |
input_ids,
|