sahilverma0696 adxyz commited on
Commit
4088707
·
0 Parent(s):

Duplicate from adxyz/cquip

Browse files

Co-authored-by: Anthony Daniell <adxyz@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +13 -0
  3. app.py +65 -0
  4. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Cquip
3
+ emoji: 📊
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.36.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: adxyz/cquip
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ model = pipeline("text-generation", model="PygmalionAI/pygmalion-1.3b", max_new_tokens=80)
5
+ # model = pipeline("conversational", model="PygmalionAI/pygmalion-6b")
6
+ # model = pipeline("conversational", model="PygmalionAI/pygmalion-1.3b", max_new_tokens=40)
7
+ # model = pipeline("text-generation", model="PygmalionAI/pygmalion-2.7b")
8
+
9
+ title = "Talk to Sheldon Cooper"
10
+ description = """
11
+ <img src="https://tse1.mm.bing.net/th?id=OIP.dvsVHZcwIf9k1tA4WUmc5gHaEK&pid=Api&rs=1&c=1&qlt=95&w=169&h=95" width=200px>
12
+ You can say things to Sheldon and get a Sheldonian quip in response.
13
+ """
14
+ examples = [["Do you like comic books?"],["What is your favorite expression?"]]
15
+
16
+
17
+ def predict(prompt):
18
+ full_prompt = ""
19
+ full_prompt += "Sheldon's persona: Sheldon is a narcissistic theoretical physicist. "
20
+ full_prompt += "He lives in an apartment in the city of Pasadena in the state of California. "
21
+ # full_prompt += "He loves comic books and hates sports. "
22
+ # full_prompt += "His best friends are Penny and Leonard. "
23
+ # full_prompt += "His favorite food is Chinese. "
24
+ full_prompt += "His favorite expression is Bazinga!"
25
+ # full_prompt += "\n<START>\n"
26
+ full_prompt += "\n"
27
+ full_prompt += "You: this looks like some serious work. Leonard did you do this?\n"
28
+ full_prompt += "Sheldon: Actually, that is my work.\n"
29
+ full_prompt = full_prompt + "You: " + prompt + "\n"
30
+ completion = model(full_prompt)[0]["generated_text"]
31
+
32
+ print('prompt: ')
33
+ print(prompt)
34
+ print('full_prompt: ')
35
+ print(full_prompt)
36
+ print('full response: ')
37
+ print(completion)
38
+
39
+ # Filter for the output from Sheldon right after latest prompt from user.
40
+ parse_sentence = completion.split(sep="\n")
41
+ output_sentence="Sheldon: Bazinga-d!\n" # Create a default in case of issues in the output.
42
+
43
+ for iSent in range(len(parse_sentence)):
44
+ if len(parse_sentence[iSent]) == 0: # Avoid junk.
45
+ break
46
+ print('parsed sentence number [ iSent ]: ', iSent)
47
+ print(parse_sentence[iSent])
48
+ print('len of parsed sentence = ', len(parse_sentence[iSent]))
49
+ print('parse_sentence[iSent][0] = ', parse_sentence[iSent][0])
50
+ print('parse_sentence[iSent][-1] = ', parse_sentence[iSent][-1])
51
+ print('prompt[0] = ', prompt[0])
52
+ print('prompt[-1] = ', prompt[-1])
53
+ if parse_sentence[iSent]==("You: " + prompt):
54
+ print('inside parse_sentence-prompt comparison')
55
+ # check for overruns and correct prefix
56
+ if iSent+1 < len(parse_sentence) and parse_sentence[iSent+1][0:8]=="Sheldon:":
57
+ output_sentence=parse_sentence[iSent+1]
58
+ break
59
+
60
+ return output_sentence
61
+
62
+ iface = gr.Interface(fn=predict, inputs="text", outputs="text",
63
+ title=title, description=description,
64
+ examples=examples)
65
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch
2
+ transformers