Yhhxhfh commited on
Commit
e4815ea
1 Parent(s): 04d2414

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -0
app.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from llama_cpp import Llama
3
+ import os
4
+ import gradio as gr
5
+ from dotenv import load_dotenv
6
+ from fastapi import FastAPI, Request
7
+ from fastapi.responses import JSONResponse, StreamingResponse
8
+ import spaces
9
+ import asyncio
10
+ import random
11
+ from transformers import pipeline
12
+ from io import BytesIO
13
+ import requests
14
+
15
+ app = FastAPI()
16
+ load_dotenv()
17
+
18
+ HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
19
+
20
+ summarizer = pipeline("summarization")
21
+
22
+ class ModelManager:
23
+ def __init__(self):
24
+ self.unified_model = self.load_unified_model()
25
+ self.params = {
26
+ "n_ctx": 2048,
27
+ "n_batch": 512,
28
+ "n_predict": 512,
29
+ "repeat_penalty": 1.1,
30
+ "n_threads": int(os.cpu_count() * 0.75),
31
+ "seed": -1,
32
+ "stop": ["</s>"],
33
+ "tokens": [],
34
+ "eos_token_id": None,
35
+ "pad_token_id": None,
36
+ }
37
+
38
+ def load_unified_model(self):
39
+ model_configs = [
40
+ {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf", "name": "GPT-2 XL"},
41
+ {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-8B Instruct"},
42
+ {"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf", "name": "Gemma 2-9B IT"},
43
+ {"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf", "name": "Gemma 2-27B"},
44
+ {"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf", "name": "Phi-3 Mini 128K Instruct"},
45
+ {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-q2_k.gguf", "name": "Meta Llama 3.1-8B"},
46
+ {"repo_id": "Ffftdtd5dtft/Qwen2-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-7b-instruct-q2_k.gguf", "name": "Qwen2 7B Instruct"},
47
+ {"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf", "name": "Starcoder2 3B"},
48
+ {"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf", "name": "Qwen2 1.5B Instruct"},
49
+ {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-q2_k.gguf", "name": "Meta Llama 3.1-70B"},
50
+ {"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf", "name": "Mistral Nemo Instruct 2407"},
51
+ {"repo_id": "Ffftdtd5dtft/Hermes-3-Llama-3.1-8B-IQ1_S-GGUF", "filename": "hermes-3-llama-3.1-8b-iq1_s-imat.gguf", "name": "Hermes 3 Llama 3.1-8B"},
52
+ {"repo_id": "Ffftdtd5dtft/Phi-3.5-mini-instruct-Q2_K-GGUF", "filename": "phi-3.5-mini-instruct-q2_k.gguf", "name": "Phi 3.5 Mini Instruct"},
53
+ {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-70B Instruct"},
54
+ {"repo_id": "Ffftdtd5dtft/codegemma-2b-IQ1_S-GGUF", "filename": "codegemma-2b-iq1_s-imat.gguf", "name": "Codegemma 2B"},
55
+ {"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-IQ2_XXS-GGUF", "filename": "phi-3-mini-128k-instruct-iq2_xxs-imat.gguf", "name": "Phi 3 Mini 128K Instruct XXS"},
56
+ {"repo_id": "Ffftdtd5dtft/TinyLlama-1.1B-Chat-v1.0-IQ1_S-GGUF", "filename": "tinyllama-1.1b-chat-v1.0-iq1_s-imat.gguf", "name": "TinyLlama 1.1B Chat"},
57
+ {"repo_id": "Ffftdtd5dtft/Mistral-NeMo-Minitron-8B-Base-IQ1_S-GGUF", "filename": "mistral-nemo-minitron-8b-base-iq1_s-imat.gguf", "name": "Mistral NeMo Minitron 8B Base"},
58
+ {"repo_id": "eren23/DistiLabelOrca-TinyLLama-1.1B", "filename": "distilabelorca-tinyllama-1.1b.gguf", "name": "DistiLabelOrca-TinyLLama-1.1B"},
59
+ {"repo_id": "DevQuasar/Llama-3.2-1B-GGUF", "filename": "Llama-3.2-1B.Q2_K.gguf", "name": "Llama-3.2-1B-GGUF"},
60
+ ]
61
+
62
+ models = []
63
+ for config in model_configs:
64
+ model_data = BytesIO(requests.get(f"https://huggingface.co/{config['repo_id']}/resolve/main/{config['filename']}", headers={"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"}).content)
65
+ model = Llama.from_pretrained(model=model_data, **self.params)
66
+ models.append(model)
67
+
68
+ self.params["tokens"] = models[0].tokenize(b"Hello")
69
+ self.params["eos_token_id"] = models[0].eos_token_id
70
+ self.params["pad_token_id"] = models[0].pad_token_id
71
+ return models[0]
72
+
73
+ model_manager = ModelManager()
74
+
75
+ class ChatRequest(BaseModel):
76
+ message: str
77
+
78
+ @spaces.GPU()
79
+ async def generate_streaming_response(inputs):
80
+ top_p = round(random.uniform(0.01, 1.00), 2)
81
+ top_k = random.randint(1, 100)
82
+ temperature = round(random.uniform(0.01, 2.00), 2)
83
+ max_tokens = model_manager.params["n_ctx"] - len(model_manager.unified_model.tokenize(inputs))
84
+
85
+ async def stream_response():
86
+ response = await asyncio.to_thread(model_manager.unified_model, inputs, top_p=top_p, top_k=top_k, temperature=temperature, max_tokens=max_tokens, **model_manager.params)
87
+ full_text = response['choices'][0]['text']
88
+
89
+ if len(full_text) > max_tokens:
90
+ chunks = [full_text[i:i + max_tokens] for i in range(0, len(full_text), max_tokens)]
91
+ for chunk in chunks:
92
+ yield chunk
93
+ else:
94
+ yield full_text
95
+
96
+ return StreamingResponse(stream_response())
97
+
98
+ async def process_message(message):
99
+ inputs = message.strip()
100
+ return await generate_streaming_response(inputs)
101
+
102
+ @app.post("/generate_multimodel")
103
+ async def api_generate_multimodel(request: Request):
104
+ data = await request.json()
105
+ message = data["message"]
106
+ return await process_message(message)
107
+
108
+ iface = gr.Interface(
109
+ fn=process_message,
110
+ inputs=gr.Textbox(lines=2, placeholder="Enter your message here..."),
111
+ outputs=gr.Markdown(stream=True),
112
+ title="Unified Multi-Model API",
113
+ description="Enter a message to get responses from a unified model."
114
+ )
115
+
116
+ if __name__ == "__main__":
117
+ iface.launch()