awinml commited on
Commit
c4789a1
1 Parent(s): 3d309a7
Files changed (2) hide show
  1. app.py +45 -0
  2. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import urllib.request
3
+ import gradio as gr
4
+ from llama_cpp import Llama
5
+
6
+ def download_file(file_link, filename):
7
+ # Checks if the file already exists before downloading
8
+ if not os.path.isfile(filename):
9
+ urllib.request.urlretrieve(file_link, filename)
10
+ print("File downloaded successfully.")
11
+ else:
12
+ print("File already exists.")
13
+
14
+ # Dowloading GGML model from HuggingFace
15
+ ggml_model_path = "https://huggingface.co/CRD716/ggml-vicuna-1.1-quantized/resolve/main/ggml-vicuna-7b-1.1-q4_1.bin"
16
+ filename = "ggml-vicuna-7b-1.1-q4_1.bin"
17
+
18
+ download_file(ggml_model_path, filename)
19
+
20
+
21
+ llm = Llama(model_path=filename, n_ctx=512, n_batch=126)
22
+
23
+ def generate_text(prompt):
24
+ output = llm(prompt, max_tokens=256, temperature=0.1, top_p=0.5, echo=False, stop=["#"])
25
+ output_text = output['choices'][0]['text']
26
+ return output_text
27
+
28
+ description = "Vicuna-7B"
29
+
30
+ examples = [
31
+ ["What is the capital of France? ", "The capital of France is Paris."],
32
+ ["Who wrote the novel 'Pride and Prejudice'?", "The novel 'Pride and Prejudice' was written by Jane Austen."],
33
+ ["What is the square root of 64?", "The square root of 64 is 8."]
34
+ ]
35
+
36
+ gradio_interface = gr.Interface(
37
+ fn=generate_text,
38
+ inputs="text",
39
+ outputs="text",
40
+ examples=examples
41
+ title="Vicuna-7B",
42
+ )
43
+ gradio_interface.launch()
44
+
45
+
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ llama-cpp-python==0.1.62