misbah1955 commited on
Commit
49c1d87
β€’
1 Parent(s): 6bc8e25

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -145
app.py DELETED
@@ -1,145 +0,0 @@
1
- import os
2
- from threading import Thread
3
- from typing import Iterator
4
-
5
- import gradio as gr
6
- import spaces
7
- import torch
8
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
-
10
- MAX_MAX_NEW_TOKENS = 2048
11
- DEFAULT_MAX_NEW_TOKENS = 1024
12
- MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
-
14
- DESCRIPTION = """\
15
- # Llama-2 7B Chat
16
-
17
- This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, a Llama 2 model with 7B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
18
-
19
- πŸ”Ž For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
20
-
21
- πŸ”¨ Looking for an even more powerful model? Check out the [13B version](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat) or the large [70B model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
22
- """
23
-
24
- LICENSE = """
25
- <p/>
26
-
27
- ---
28
- As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta,
29
- this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
30
- """
31
-
32
- if not torch.cuda.is_available():
33
- DESCRIPTION += "\n<p>Running on CPU πŸ₯Ά This demo does not work on CPU.</p>"
34
-
35
-
36
- if torch.cuda.is_available():
37
- model_id = "meta-llama/Llama-2-7b-chat-hf"
38
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
39
- tokenizer = AutoTokenizer.from_pretrained(model_id)
40
- tokenizer.use_default_system_prompt = False
41
-
42
-
43
- @spaces.GPU
44
- def generate(
45
- message: str,
46
- chat_history: list[tuple[str, str]],
47
- system_prompt: str,
48
- max_new_tokens: int = 1024,
49
- temperature: float = 0.6,
50
- top_p: float = 0.9,
51
- top_k: int = 50,
52
- repetition_penalty: float = 1.2,
53
- ) -> Iterator[str]:
54
- conversation = []
55
- if system_prompt:
56
- conversation.append({"role": "system", "content": system_prompt})
57
- for user, assistant in chat_history:
58
- conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
59
- conversation.append({"role": "user", "content": message})
60
-
61
- input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
62
- if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
63
- input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
64
- gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
65
- input_ids = input_ids.to(model.device)
66
-
67
- streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
68
- generate_kwargs = dict(
69
- {"input_ids": input_ids},
70
- streamer=streamer,
71
- max_new_tokens=max_new_tokens,
72
- do_sample=True,
73
- top_p=top_p,
74
- top_k=top_k,
75
- temperature=temperature,
76
- num_beams=1,
77
- repetition_penalty=repetition_penalty,
78
- )
79
- t = Thread(target=model.generate, kwargs=generate_kwargs)
80
- t.start()
81
-
82
- outputs = []
83
- for text in streamer:
84
- outputs.append(text)
85
- yield "".join(outputs)
86
-
87
-
88
- chat_interface = gr.ChatInterface(
89
- fn=generate,
90
- additional_inputs=[
91
- gr.Textbox(label="System prompt", lines=6),
92
- gr.Slider(
93
- label="Max new tokens",
94
- minimum=1,
95
- maximum=MAX_MAX_NEW_TOKENS,
96
- step=1,
97
- value=DEFAULT_MAX_NEW_TOKENS,
98
- ),
99
- gr.Slider(
100
- label="Temperature",
101
- minimum=0.1,
102
- maximum=4.0,
103
- step=0.1,
104
- value=0.6,
105
- ),
106
- gr.Slider(
107
- label="Top-p (nucleus sampling)",
108
- minimum=0.05,
109
- maximum=1.0,
110
- step=0.05,
111
- value=0.9,
112
- ),
113
- gr.Slider(
114
- label="Top-k",
115
- minimum=1,
116
- maximum=1000,
117
- step=1,
118
- value=50,
119
- ),
120
- gr.Slider(
121
- label="Repetition penalty",
122
- minimum=1.0,
123
- maximum=2.0,
124
- step=0.05,
125
- value=1.2,
126
- ),
127
- ],
128
- stop_btn=None,
129
- examples=[
130
- ["Hello there! How are you doing?"],
131
- ["Can you explain briefly to me what is the Python programming language?"],
132
- ["Explain the plot of Cinderella in a sentence."],
133
- ["How many hours does it take a man to eat a Helicopter?"],
134
- ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
135
- ],
136
- )
137
-
138
- with gr.Blocks(css="style.css") as demo:
139
- gr.Markdown(DESCRIPTION)
140
- gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
141
- chat_interface.render()
142
- gr.Markdown(LICENSE)
143
-
144
- if __name__ == "__main__":
145
- demo.queue(max_size=20).launch()