khanhdhq mandar100 commited on
Commit
662a39e
0 Parent(s):

Duplicate from mandar100/chatbot_bloom3B

Browse files

Co-authored-by: mandar <mandar100@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +96 -0
  4. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Chatbot Bloom3B
3
+ emoji: 💻
4
+ colorFrom: gray
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.14.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: mandar100/chatbot_bloom3B
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[ ]:
5
+
6
+
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer
8
+ import torch
9
+ import gradio as gr
10
+ import re
11
+
12
+ def cleaning_history_tuple(history):
13
+ s=sum(history,())
14
+ s=list(s)
15
+ s2=""
16
+ for i in s:
17
+ i=re.sub("\n", '', i)
18
+ i=re.sub("<p>", '', i)
19
+ i=re.sub("</p>", '', i)
20
+ s2=s2+i+'\n'
21
+ return s2
22
+
23
+ def ai_output(string1,string2):
24
+ a1=len(string1)
25
+ a2=len(string2)
26
+ string3=string2[a1:]
27
+ sub1="A:"
28
+ sub2="User"
29
+ #sub3="\n"
30
+ try:
31
+ try:
32
+ idx1=string3.index(sub1)
33
+ response=string3[:idx1]
34
+ return response
35
+
36
+ except:
37
+ idx1=string3.index(sub2)
38
+ response=string3[:idx1]
39
+ return response
40
+ except:
41
+ return string3
42
+
43
+ model4 = AutoModelForCausalLM.from_pretrained("bigscience/bloom-3b")
44
+ tokenizer4 = AutoTokenizer.from_pretrained("bigscience/bloom-3b")
45
+
46
+ def predict(input,initial_prompt, temperature=0.7,top_p=1,top_k=5,max_tokens=64,no_repeat_ngram_size=1,num_beams=6,do_sample=True, history=[]):
47
+
48
+ s = cleaning_history_tuple(history)
49
+
50
+ s = s+ "\n"+ "User: "+ input + "\n" + "Assistant: "
51
+ s2=initial_prompt+" " + s
52
+
53
+ input_ids = tokenizer4.encode(str(s2), return_tensors="pt")
54
+ response = model4.generate(input_ids, min_length = 10,
55
+ max_new_tokens=int(max_tokens),
56
+ top_k=int(top_k),
57
+ top_p=float(top_p),
58
+ temperature=float(temperature),
59
+ no_repeat_ngram_size=int(no_repeat_ngram_size),
60
+ num_beams = int(num_beams),
61
+ do_sample = bool(do_sample),
62
+ )
63
+
64
+
65
+ response2 = tokenizer4.decode(response[0])
66
+ print("Response after decoding tokenizer: ",response2)
67
+ print("\n\n")
68
+ response3=ai_output(s2,response2)
69
+
70
+ input="User: "+input
71
+ response3="Assistant: "+ response3
72
+ history.append((input, response3))
73
+
74
+ return history, history
75
+
76
+ #gr.Interface(fn=predict,title="BLOOM-3b",
77
+ # inputs=["text","text","text","text","text","text","text","text","text",'state'],
78
+ #
79
+ # outputs=["chatbot",'state']).launch()
80
+
81
+
82
+ gr.Interface(inputs=[gr.Textbox(label="input", lines=1, value=""),
83
+ gr.Textbox(label="initial_prompt", lines=1, value=prompt),
84
+ gr.Textbox(label="temperature", lines=1, value=0.7),
85
+ gr.Textbox(label="top_p", lines=1, value=1),
86
+ gr.Textbox(label="top_k", lines=1, value=5),
87
+ gr.Textbox(label="max_tokens", lines=1, value=64),
88
+ gr.Textbox(label="no_repeat_ngram_size", lines=1, value=1),
89
+ gr.Textbox(label="num_beams", lines=1, value=6),
90
+ gr.Textbox(label="do_sample", lines=1, value="True"), 'state'],
91
+ fn=predict, title="OPT-6.7B", outputs=["chatbot",'state']
92
+
93
+ #inputs=["text","text","text","text","text","text","text","text","text",'state'],
94
+
95
+ ).launch()
96
+
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ gradio
3
+ transformers