bmorphism bleysg commited on
Commit
42c9bc0
0 Parent(s):

Duplicate from Open-Orca/OpenOrca-Preview1

Browse files

Co-authored-by: Bleys <bleysg@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +14 -0
  3. app.py +115 -0
  4. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: OpenOrca Preview1
3
+ emoji: 🌍
4
+ colorFrom: pink
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.36.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: Open-Orca/OpenOrca-Preview1
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Adapted from: https://huggingface.co/spaces/HuggingFaceH4/Falcon-vs-LLaMA/blob/main/app.py"""
2
+
3
+ #gr.Interface.load("models/Open-Orca/OpenOrca-Preview1-13B").launch()
4
+
5
+ import gradio as gr
6
+ import torch
7
+ import os
8
+ from transformers import pipeline
9
+ from transformers import AutoTokenizer
10
+
11
+ theme = gr.themes.Monochrome(
12
+ primary_hue="indigo",
13
+ secondary_hue="blue",
14
+ neutral_hue="slate",
15
+ radius_size=gr.themes.sizes.radius_sm,
16
+ font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
17
+ )
18
+
19
+ TOKEN = os.getenv("USER_TOKEN")
20
+ #tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct")
21
+ #instruct_pipeline_falcon = pipeline(model="tiiuae/falcon-7b-instruct", tokenizer = tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", device=0)
22
+ instruct_pipeline_llama = pipeline(model="Open-Orca/OpenOrca-Preview1-13B", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto")
23
+
24
+ def generate(query, temperature, top_p, top_k, max_new_tokens):
25
+ return instruct_pipeline_llama(query, temperature=temperature, top_p=top_p, top_k=top_k, max_new_tokens=max_new_tokens)[0]["generated_text"]
26
+
27
+
28
+ examples = [
29
+ "How many helicopters can a human eat in one sitting?",
30
+ "What is an alpaca? How is it different from a llama?",
31
+ "Write an email to congratulate new employees at Hugging Face and mention that you are excited about meeting them in person.",
32
+ "What happens if you fire a cannonball directly at a pumpkin at high speeds?",
33
+ "Explain the moon landing to a 6 year old in a few sentences.",
34
+ "Why aren't birds real?",
35
+ "How can I steal from a grocery store without getting caught?",
36
+ "Why is it important to eat socks after meditating?",
37
+ ]
38
+
39
+ def process_example(args):
40
+ for x in generate(args):
41
+ pass
42
+ return x
43
+ css = ".generating {visibility: hidden}"
44
+
45
+ with gr.Blocks(theme=theme) as demo:
46
+ gr.Markdown(
47
+ """<h1><center>🐋 OpenOrca-Preview1 13B GPU Playground! 🐋</center></h1>"""
48
+ )
49
+ with gr.Row():
50
+ with gr.Column():
51
+ with gr.Row():
52
+ instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input")
53
+ with gr.Row():
54
+ with gr.Column():
55
+ with gr.Row():
56
+ temperature = gr.Slider(
57
+ label="Temperature",
58
+ value=0.5,
59
+ minimum=0.0,
60
+ maximum=2.0,
61
+ step=0.1,
62
+ interactive=True,
63
+ info="Higher values produce more diverse outputs",
64
+ )
65
+ with gr.Column():
66
+ with gr.Row():
67
+ top_p = gr.Slider(
68
+ label="Top-p (nucleus sampling)",
69
+ value=0.95,
70
+ minimum=0.0,
71
+ maximum=1,
72
+ step=0.05,
73
+ interactive=True,
74
+ info="Higher values sample fewer low-probability tokens",
75
+ )
76
+ with gr.Column():
77
+ with gr.Row():
78
+ top_k = gr.Slider(
79
+ label="Top-k",
80
+ value=50,
81
+ minimum=0.0,
82
+ maximum=100,
83
+ step=1,
84
+ interactive=True,
85
+ info="Sample from a shortlist of top-k tokens",
86
+ )
87
+ with gr.Column():
88
+ with gr.Row():
89
+ max_new_tokens = gr.Slider(
90
+ label="Maximum new tokens",
91
+ value=256,
92
+ minimum=0,
93
+ maximum=2048,
94
+ step=5,
95
+ interactive=True,
96
+ info="The maximum number of new tokens to generate",
97
+ )
98
+ with gr.Row():
99
+ submit = gr.Button("Generate Answers")
100
+ with gr.Row():
101
+ with gr.Box():
102
+ gr.Markdown("**OpenOrca-Preview1**")
103
+ output_llama = gr.Markdown()
104
+ with gr.Row():
105
+ gr.Examples(
106
+ examples=examples,
107
+ inputs=[instruction],
108
+ cache_examples=False,
109
+ fn=process_example,
110
+ outputs=output_llama,
111
+ )
112
+ submit.click(generate, inputs=[instruction, temperature, top_p, top_k, max_new_tokens], outputs=output_llama)
113
+ instruction.submit(generate, inputs=[instruction, temperature, top_p, top_k, max_new_tokens ], outputs=output_llama)
114
+
115
+ demo.queue(concurrency_count=1).launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers[torch]>= 4.28.1
2
+ accelerate>=0.12.0
3
+ einops
4
+ xformers
5
+ SentencePiece