script52 commited on
Commit
11b54c5
·
verified ·
1 Parent(s): 70ec114

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -47
app.py CHANGED
@@ -1,69 +1,203 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
 
5
  def respond(
6
  message,
7
- history: list[dict[str, str]],
8
  system_message,
9
  max_tokens,
10
  temperature,
11
  top_p,
12
- hf_token: gr.OAuthToken,
13
  ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
 
21
- messages.extend(history)
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- messages.append({"role": "user", "content": message})
 
 
24
 
25
  response = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
62
  with gr.Blocks() as demo:
 
 
63
  with gr.Sidebar():
64
  gr.LoginButton()
65
- chatbot.render()
 
 
 
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  if __name__ == "__main__":
69
- demo.launch()
 
1
+ import os
2
  import gradio as gr
3
+ from huggingface_hub import hf_hub_download
4
+ from llama_cpp import Llama
5
+
6
+ MODEL_REPO = "Jackrong/Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled-GGUF"
7
+
8
+ # Önce daha pratik quant dosyalarını dene
9
+ MODEL_CANDIDATES = [
10
+ "Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled-Q4_K_M.gguf",
11
+ "Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled-Q4_K_S.gguf",
12
+ "Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled-Q3_K_M.gguf",
13
+ "Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled-Q2_K.gguf",
14
+ ]
15
+
16
+ llm = None
17
+ loaded_model_file = None
18
+
19
+
20
+ def download_first_available_model(token: str | None):
21
+ last_error = None
22
+
23
+ for filename in MODEL_CANDIDATES:
24
+ try:
25
+ model_path = hf_hub_download(
26
+ repo_id=MODEL_REPO,
27
+ filename=filename,
28
+ token=token,
29
+ )
30
+ return model_path, filename
31
+ except Exception as e:
32
+ last_error = e
33
+
34
+ raise RuntimeError(
35
+ "Uygun GGUF dosyası indirilemedi. "
36
+ f"Denenen dosyalar: {', '.join(MODEL_CANDIDATES)}. "
37
+ f"Son hata: {last_error}"
38
+ )
39
+
40
+
41
+ def build_model(model_path: str):
42
+ cpu_count = os.cpu_count() or 2
43
+
44
+ # CPU Space için daha temkinli ayarlar
45
+ n_threads = max(1, min(8, cpu_count))
46
+
47
+ return Llama(
48
+ model_path=model_path,
49
+ n_ctx=4096,
50
+ n_threads=n_threads,
51
+ n_batch=128,
52
+ n_gpu_layers=0,
53
+ verbose=False,
54
+ )
55
+
56
+
57
+ def get_model(hf_token: gr.OAuthToken | None):
58
+ global llm, loaded_model_file
59
+
60
+ if llm is not None:
61
+ return llm
62
+
63
+ token = hf_token.token if hf_token is not None else None
64
+
65
+ model_path, filename = download_first_available_model(token)
66
+ llm = build_model(model_path)
67
+ loaded_model_file = filename
68
+ return llm
69
+
70
+
71
+ def normalize_history(history):
72
+ messages = []
73
+
74
+ for item in history or []:
75
+ if isinstance(item, dict):
76
+ role = item.get("role")
77
+ content = item.get("content", "")
78
+ if role in ("user", "assistant", "system"):
79
+ messages.append({"role": role, "content": str(content)})
80
+ elif isinstance(item, (list, tuple)) and len(item) == 2:
81
+ user_msg, assistant_msg = item
82
+ if user_msg:
83
+ messages.append({"role": "user", "content": str(user_msg)})
84
+ if assistant_msg:
85
+ messages.append({"role": "assistant", "content": str(assistant_msg)})
86
+
87
+ return messages
88
 
89
 
90
  def respond(
91
  message,
92
+ history,
93
  system_message,
94
  max_tokens,
95
  temperature,
96
  top_p,
97
+ hf_token: gr.OAuthToken | None,
98
  ):
99
+ global loaded_model_file
 
 
 
 
 
100
 
101
+ try:
102
+ model = get_model(hf_token)
103
+ except Exception as e:
104
+ yield (
105
+ "Model yüklenemedi.\n\n"
106
+ f"Hata: {e}\n\n"
107
+ "Olası nedenler:\n"
108
+ "- Space RAM kapasitesi yetersiz\n"
109
+ "- GGUF dosya adı değişmiş\n"
110
+ "- Model erişimi için yetkili Hugging Face hesabı gerekiyor\n"
111
+ "- llama-cpp-python bu ortamda düzgün kurulmadı"
112
+ )
113
+ return
114
 
115
+ messages = [{"role": "system", "content": str(system_message)}]
116
+ messages.extend(normalize_history(history))
117
+ messages.append({"role": "user", "content": str(message)})
118
 
119
  response = ""
120
+ header = f"[Model: {loaded_model_file}]\n\n"
121
+
122
+ try:
123
+ stream = model.create_chat_completion(
124
+ messages=messages,
125
+ max_tokens=int(max_tokens),
126
+ temperature=float(temperature),
127
+ top_p=float(top_p),
128
+ stream=True,
129
+ )
130
+
131
+ first_token = True
132
+ for chunk in stream:
133
+ token = ""
134
+ choices = chunk.get("choices", [])
135
+ if choices:
136
+ delta = choices[0].get("delta", {})
137
+ token = delta.get("content", "") or ""
138
+
139
+ if token:
140
+ response += token
141
+ if first_token:
142
+ yield header + response
143
+ first_token = False
144
+ else:
145
+ yield header + response
146
+
147
+ if not response:
148
+ yield header + "(Model yanıt üretmedi.)"
149
+
150
+ except Exception as e:
151
+ partial = header + response if response else header
152
+ yield (
153
+ partial
154
+ + "\n\nÜretim sırasında hata oluştu.\n"
155
+ f"Hata: {e}\n\n"
156
+ "Daha düşük max_tokens veya daha küçük quant dosyası deneyebilirsin."
157
+ )
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
 
160
  with gr.Blocks() as demo:
161
+ gr.Markdown("# GGUF Chat Demo (Fallback)")
162
+
163
  with gr.Sidebar():
164
  gr.LoginButton()
165
+ gr.Markdown(
166
+ "Model private veya gated ise giriş yapman gerekebilir. "
167
+ "Uygun GGUF dosyası otomatik seçilmeye çalışılır."
168
+ )
169
 
170
+ chatbot = gr.ChatInterface(
171
+ fn=respond,
172
+ additional_inputs=[
173
+ gr.Textbox(
174
+ value="You are a friendly Chatbot.",
175
+ label="System message",
176
+ ),
177
+ gr.Slider(
178
+ minimum=1,
179
+ maximum=1024,
180
+ value=256,
181
+ step=1,
182
+ label="Max new tokens",
183
+ ),
184
+ gr.Slider(
185
+ minimum=0.1,
186
+ maximum=1.5,
187
+ value=0.7,
188
+ step=0.1,
189
+ label="Temperature",
190
+ ),
191
+ gr.Slider(
192
+ minimum=0.1,
193
+ maximum=1.0,
194
+ value=0.9,
195
+ step=0.05,
196
+ label="Top-p",
197
+ ),
198
+ ],
199
+ )
200
+ chatbot.render()
201
 
202
  if __name__ == "__main__":
203
+ demo.launch()