typesdigital commited on
Commit
f2ff438
1 Parent(s): 606b3d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -9
app.py CHANGED
@@ -3,20 +3,23 @@ os.environ['REPLICATE_API_TOKEN'] = "r8_afc5kESy4ucPojF3Tw1GE25ER4Ovudy1iPVw6"
3
 
4
  import replicate
5
 
6
- #Prompts
7
  pre_prompt = "You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
8
- prompt_input = "What is streamlit"
9
 
10
- #Generate LLM response
11
- output = replicate.run('a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5', #LLM model
12
- input={"prompt": f"{pre_prompt} {prompt_input} Assistant: ", #Prompts
13
- "temperature":0.1, "top_p":0.9, "max_length":124, "repetition_penalty":1}) #Model parameter
14
-
15
- return output
 
 
 
16
 
17
  full_response = ''
18
 
19
  for item in output:
20
- full_response += item
21
 
22
  print(full_response)
 
3
 
4
  import replicate
5
 
6
+ # Prompts
7
  pre_prompt = "You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
8
+ prompt_input = "What is Hugging Face"
9
 
10
+ # Generate LLM response
11
+ output = replicate.run('huggingface/llama-base-125M', # LLM model
12
+ input={
13
+ "prompt": f"{pre_prompt} {prompt_input} Assistant: ", # Prompts
14
+ "temperature": 0.1,
15
+ "top_p": 0.9,
16
+ "max_length": 124,
17
+ "repetition_penalty": 1
18
+ }) # Model parameters
19
 
20
  full_response = ''
21
 
22
  for item in output:
23
+ full_response += item
24
 
25
  print(full_response)