janny127 commited on
Commit
6c00582
1 Parent(s): 31e18fd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -0
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import gradio as gr
4
+ from nltk.tokenize import sent_tokenize
5
+ import torch
6
+
7
+ model = "janny127/autotrain-5e45b-p5z66"
8
+ tokenizer = AutoTokenizer.from_pretrained(model)
9
+
10
+ pipeline = pipeline(
11
+ "text-generation",
12
+ model=model,
13
+ torch_dtype=torch.float16,
14
+ device_map="auto",
15
+ )
16
+
17
+ def predict(prompt, history):
18
+ # Prompt
19
+ formatted_prompt = (
20
+ f"### Human: {prompt}### Assistant:"
21
+ )
22
+
23
+ # Generate the Texts
24
+ sequences = pipeline(
25
+ formatted_prompt,
26
+ do_sample=True,
27
+ top_k=50,
28
+ top_p = 0.7,
29
+ num_return_sequences=1,
30
+ repetition_penalty=1.1,
31
+ max_new_tokens=500,
32
+ )
33
+ generated_text = sequences[0]['generated_text']
34
+
35
+ final_result = generated_text.split("### Assistant:")[1]
36
+ if " Human: " in final_result:
37
+ final_result = final_result.split(" Human: ")[0]
38
+ if " #" in final_result:
39
+ final_result = final_result.split(" #")[0]
40
+
41
+ # return generated_text.strip()
42
+ return final_result.strip()
43
+
44
+ gr.ChatInterface(predict,
45
+ title="Tinyllama_chatBot",
46
+ description="Ask Tiny llama any questions",
47
+ examples=['How to cook a fish?', 'Who is the president of US now?']
48
+ ).launch() # Launching the web interface.
49
+
50
+