tomer-shimshi commited on
Commit
aa81271
1 Parent(s): c4203ca

Upload finale_project_Rav_talk.py

Browse files
Files changed (1) hide show
  1. finale_project_Rav_talk.py +6 -6
finale_project_Rav_talk.py CHANGED
@@ -25,7 +25,7 @@ quant_config = BitsAndBytesConfig(
25
  ### Load Base Model ###
26
  #######################
27
 
28
- base_model_name = "tomer-shimshi/llama2-Rav" #os.path.join(os.getcwd() ,"results_fine_tune_after_shulhan_aruch_no_heb_V3\llama-2")
29
  model = AutoModelForCausalLM.from_pretrained(
30
  base_model_name,
31
  quantization_config=quant_config,
@@ -74,7 +74,7 @@ def formatting_prompts_func(examples):
74
 
75
  # Replace this with the actual output from your LLM application
76
  #for i in range(len(test_dataset)):
77
- question = input('Please enter a question for the Rav \n Enter empty string to quite \n')
78
  while len(question)>1:
79
  ##question = item['question']#test_dataset['quastion'][i]
80
 
@@ -85,19 +85,19 @@ while len(question)>1:
85
  #eos_token_id=EOS_TOKEN,
86
  repetition_penalty = 2.0,
87
  do_sample = True,
88
- max_new_tokens = 400,
89
- #top_k=10,
90
  #num_return_sequences=1,
91
 
92
  )
93
  model_prompt = alpaca_prompt.format( question, "")
94
 
95
  result = pipe(model_prompt)
96
- actual_output = result[0]['generated_text'].split("### Answer:")[1]
97
 
98
  #append_dict_to_csv(save_dict, save_path_csv_path)
99
  print(f"The Rav answer is {actual_output} \n \n")
100
- question = input('Please enter a question for the Rav \n Enter empty string to quite \n')#item['question']#test_dataset['quastion'][i]
101
  #"We offer a 30-day full refund at no extra cost."
102
 
103
 
 
25
  ### Load Base Model ###
26
  #######################
27
 
28
+ base_model_name = "tomer-shimshi/llama2-Rav" #"results_fine_tune_after_shulhan_aruch_no_heb_V4\llama-2" #"tomer-shimshi/llama2-Rav" #os.path.join(os.getcwd() ,"results_fine_tune_after_shulhan_aruch_no_heb_V3\llama-2")
29
  model = AutoModelForCausalLM.from_pretrained(
30
  base_model_name,
31
  quantization_config=quant_config,
 
74
 
75
  # Replace this with the actual output from your LLM application
76
  #for i in range(len(test_dataset)):
77
+ question = input('Please enter a question for the Rav \n Enter empty string to quit \n')
78
  while len(question)>1:
79
  ##question = item['question']#test_dataset['quastion'][i]
80
 
 
85
  #eos_token_id=EOS_TOKEN,
86
  repetition_penalty = 2.0,
87
  do_sample = True,
88
+ max_new_tokens = 600,
89
+ top_k=10,
90
  #num_return_sequences=1,
91
 
92
  )
93
  model_prompt = alpaca_prompt.format( question, "")
94
 
95
  result = pipe(model_prompt)
96
+ actual_output = result[0]['generated_text'].split("### Answer:")[1].replace('/r','').replace('\n','')
97
 
98
  #append_dict_to_csv(save_dict, save_path_csv_path)
99
  print(f"The Rav answer is {actual_output} \n \n")
100
+ question = input('Please enter a question for the Rav \n Enter empty string to quit \n')#item['question']#test_dataset['quastion'][i]
101
  #"We offer a 30-day full refund at no extra cost."
102
 
103