tonic commited on
Commit
065cadb
1 Parent(s): 16dd36b

initial commit

Browse files

trying this new model out using spaces !

Files changed (3) hide show
  1. README.md +2 -2
  2. app.py +61 -0
  3. requirements.txt +5 -0
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
  title: Genstruct
3
- emoji: 👀
4
  colorFrom: yellow
5
  colorTo: pink
6
  sdk: gradio
7
  sdk_version: 4.21.0
8
  app_file: app.py
9
- pinned: false
10
  license: mit
11
  ---
12
 
 
1
  ---
2
  title: Genstruct
3
+ emoji: 🧬📏💪🏻
4
  colorFrom: yellow
5
  colorTo: pink
6
  sdk: gradio
7
  sdk_version: 4.21.0
8
  app_file: app.py
9
+ pinned: true
10
  license: mit
11
  ---
12
 
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
+ from gradio_rich_textbox import RichTextbox
6
+
7
+ title = """# Welcome to 🌟Tonic's🧬📏💪🏻Genstruct 7B !
8
+ 🧬📏💪🏻[Genstruct 7B](https://huggingface.co/NousResearch/Genstruct-7B) is an instruction-generation model, designed to create valid instructions given a raw text corpus. This enables the creation of new, partially synthetic instruction finetuning datasets from any raw-text corpus. You can build with this endpoint using🧬📏💪🏻[Genstruct 7B](https://huggingface.co/NousResearch/Genstruct-7B) available here : [NousResearch/Genstruct-7B](https://huggingface.co/NousResearch/Genstruct-7B). You can also use ✨StarCoder by cloning this space. Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/starcoder2?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
9
+ Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) Math 🔍 [introspector](https://huggingface.co/introspector) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [SciTonic](https://github.com/Tonic-AI/multitonic)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
10
+ """
11
+
12
+ examplecofee = "A cortado is a Spanish beverage consisting of espresso mixed with a roughly equal amount of warm milk to reduce the acidity,[1][2] although the exact ratios have considerable regional variation.[3] The milk in a cortado is steamed, but not frothy and "texturized" as in many Italian coffee drinks.[4] The cortado is commonly served all over Spain.[5] The word cortado is the past participle of the Spanish verb cortar (to cut), in the sense of "dilute", and can refer variously to either coffee or espresso drinks throughout Spanish and Portuguese speaking countries."
13
+
14
+ model_path = "NousResearch/Genstruct-7B"
15
+
16
+ # hf_token = os.getenv("HF_TOKEN")
17
+ # if not hf_token:
18
+ # raise ValueError("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
19
+
20
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
21
+ # quantization_config = BitsAndBytesConfig(load_in_8bit=True)
22
+ model = AutoModelForCausalLM.from_pretrained( model_path, device_map='cuda', load_in_8bit=True)
23
+
24
+ @spaces.GPU
25
+ def generate_text(prompt, custom_prompt, temperature, max_length):
26
+ # Example message format
27
+ msg = [{
28
+ 'title': prompt,
29
+ 'content': custom_prompt
30
+ }]
31
+
32
+ prompt_text = f"title: {msg[0]['title']}\ncontent: {msg[0]['content']}"
33
+ inputs = tokenizer(prompt_text, return_tensors='pt').to('cuda')
34
+ generated_ids = model.generate(**inputs, max_new_tokens=max_length, temperature=temperature, pad_token_id=tokenizer.eos_token_id)
35
+ generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
36
+
37
+ return generated_text
38
+
39
+
40
+ def gradio_app():
41
+ with gr.Blocks() as demo:
42
+ gr.Markdown(title)
43
+ prompt = gr.Textbox(label="WordPhrases", value=examplecofee, lines=5)
44
+ custom_prompt = gr.Textbox(label="Title", value="Cortado", lines=1)
45
+ with gr.Row():
46
+ temperature = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.5, label="Temperature")
47
+ max_length = gr.Slider(minimum=250, maximum=1024, step=10, value=450, label="Generate Length")
48
+ generate_btn = gr.Button("Try 🧬📏💪🏻 Genstruct")
49
+ output = gr.RichTextbox(label="🧬📏💪🏻Genstruct 7B:", lines=20)
50
+
51
+ generate_btn.click(
52
+ fn=generate_text,
53
+ inputs=[prompt, custom_prompt, temperature, max_length],
54
+ outputs=output
55
+ )
56
+
57
+ demo.launch()
58
+
59
+ if __name__ == "__main__":
60
+ gradio_app()
61
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ accelerate
4
+ bitsandbytes
5
+ gradio_rich_textbox