YaTharThShaRma999 commited on
Commit
5efa561
0 Parent(s):

Duplicate from johnwick123forevr/Testtrial1

Browse files
Files changed (4) hide show
  1. .gitattributes +31 -0
  2. README.md +14 -0
  3. app.py +53 -0
  4. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.npy filter=lfs diff=lfs merge=lfs -text
13
+ *.npz filter=lfs diff=lfs merge=lfs -text
14
+ *.onnx filter=lfs diff=lfs merge=lfs -text
15
+ *.ot filter=lfs diff=lfs merge=lfs -text
16
+ *.parquet filter=lfs diff=lfs merge=lfs -text
17
+ *.pickle filter=lfs diff=lfs merge=lfs -text
18
+ *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pt filter=lfs diff=lfs merge=lfs -text
21
+ *.pth filter=lfs diff=lfs merge=lfs -text
22
+ *.rar filter=lfs diff=lfs merge=lfs -text
23
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
25
+ *.tflite filter=lfs diff=lfs merge=lfs -text
26
+ *.tgz filter=lfs diff=lfs merge=lfs -text
27
+ *.wasm filter=lfs diff=lfs merge=lfs -text
28
+ *.xz filter=lfs diff=lfs merge=lfs -text
29
+ *.zip filter=lfs diff=lfs merge=lfs -text
30
+ *.zst filter=lfs diff=lfs merge=lfs -text
31
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: WizardLM
3
+ emoji: 😁
4
+ colorFrom: yellow
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: johnwick123forevr/Testtrial1
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import wget
4
+ from llama_cpp import Llama
5
+ import random
6
+ url = 'https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGML/resolve/main/WizardLM-7B-uncensored.ggmlv3.q4_0.bin'
7
+ filename = wget.download(url)
8
+ llm2 = Llama(model_path=filename, seed=random.randint(1, 2**31))
9
+ theme = gr.themes.Soft(
10
+ primary_hue=gr.themes.Color("#ededed", "#fee2e2", "#fecaca", "#fca5a5", "#f87171", "#ef4444", "#dc2626", "#b91c1c", "#991b1b", "#7f1d1d", "#6c1e1e"),
11
+ neutral_hue="red",
12
+ )
13
+ title = """<h1 align="center">Chat with awesome WizardLM 7b model!</h1><br>"""
14
+ with gr.Blocks(theme=theme) as demo:
15
+ gr.HTML(title)
16
+ gr.HTML("This model is awesome for its size! It is only 20th the size of Chatgpt but is around 90% as good as Chatgpt. However, please don't rely on WizardLM to provide 100% true information as it might be wrong sometimes. ")
17
+ chatbot = gr.Chatbot()
18
+ msg = gr.Textbox()
19
+ clear = gr.ClearButton([msg, chatbot])
20
+ #instruction = gr.Textbox(label="Instruction", placeholder=)
21
+
22
+ def user(user_message, history):
23
+ return gr.update(value="", interactive=True), history + [[user_message, None]]
24
+
25
+ def bot(history):
26
+ #instruction = history[-1][1] or ""
27
+ user_message = history[-1][0]
28
+ #token1 = llm.tokenize(b"### Instruction: ")
29
+ #token2 = llm.tokenize(instruction.encode())
30
+ #token3 = llm2.tokenize(b"USER: ")
31
+ tokens3 = llm2.tokenize(user_message.encode())
32
+ token4 = llm2.tokenize(b"\n\n### Response:")
33
+ tokens = tokens3 + token4
34
+ history[-1][1] = ""
35
+ count = 0
36
+ output = ""
37
+ for token in llm2.generate(tokens, top_k=50, top_p=0.73, temp=0.72, repeat_penalty=1.1):
38
+ text = llm2.detokenize([token])
39
+ output += text.decode()
40
+ count += 1
41
+ if count >= 500 or (token == llm2.token_eos()):
42
+ break
43
+ history[-1][1] += text.decode()
44
+ yield history
45
+
46
+ response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
47
+ bot, chatbot, chatbot
48
+ )
49
+ response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
50
+ gr.HTML("Thanks for checking out this app!")
51
+
52
+ demo.queue()
53
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ llama-cpp-python
2
+ wget
3
+ gradio