TETSU0701 commited on
Commit
0fc23c1
·
verified ·
1 Parent(s): 2afd961

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -49,7 +49,7 @@ def load_models():
49
 
50
  # 2. Load text generation model
51
  # llm_model_name = "Qwen/Qwen3-0.6B"
52
- llm_model_name = "meta-llama/Meta-Llama-3-8B"
53
  tokenizer = AutoTokenizer.from_pretrained(llm_model_name)
54
  llm_model = AutoModelForCausalLM.from_pretrained(
55
  llm_model_name,
@@ -145,7 +145,7 @@ def generate_response(message, chat_history, analysis_results):
145
  model_inputs = tokenizer([text], return_tensors="pt").to(llm_model.device)
146
  generated_ids = llm_model.generate(
147
  **model_inputs,
148
- max_new_tokens=256,
149
  do_sample=True,
150
  temperature=0.7,
151
  )
 
49
 
50
  # 2. Load text generation model
51
  # llm_model_name = "Qwen/Qwen3-0.6B"
52
+ llm_model_name = "Qwen/QwQ-32B"
53
  tokenizer = AutoTokenizer.from_pretrained(llm_model_name)
54
  llm_model = AutoModelForCausalLM.from_pretrained(
55
  llm_model_name,
 
145
  model_inputs = tokenizer([text], return_tensors="pt").to(llm_model.device)
146
  generated_ids = llm_model.generate(
147
  **model_inputs,
148
+ max_new_tokens=32768,
149
  do_sample=True,
150
  temperature=0.7,
151
  )