Spaces:
Runtime error
Runtime error
File size: 3,375 Bytes
2dc339d 9fcde1e 4b2e116 2dc339d 9fcde1e 2dc339d 9fcde1e f1a4c08 9fcde1e f1a4c08 9fcde1e 2dc339d 9fcde1e 2dc339d 5b21cd9 4b2e116 5b21cd9 9fcde1e 2dc339d 9fcde1e 2dc339d 9fcde1e 2dc339d 9fcde1e 2dc339d 9fcde1e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
from pydantic import BaseModel
from llama_cpp import Llama
import os
import gradio as gr
from dotenv import load_dotenv
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
import spaces
import asyncio
import random
app = FastAPI()
load_dotenv()
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
class ModelManager:
def __init__(self):
self.model = self.load_models()
def load_models(self):
models = []
model_configs = [
{"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/codegemma-2b-IQ1_S-GGUF", "filename": "codegemma-2b-iq1_s-imat.gguf"},
{"repo_id": "Ffftdtd5dtft/Phi-3.5-mini-instruct-Q2_K-GGUF", "filename": "phi-3.5-mini-instruct-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/TinyLlama-1.1B-Chat-v1.0-IQ1_S-GGUF", "filename": "tinyllama-1.1b-chat-v1.0-iq1_s-imat.gguf"},
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Minitron-8B-Base-IQ1_S-GGUF", "filename": "mistral-nemo-minitron-8b-base-iq1_s-imat.gguf"},
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf"}
]
for config in model_configs:
model = Llama.from_pretrained(repo_id=config['repo_id'], filename=config['filename'], use_auth_token=HUGGINGFACE_TOKEN)
models.append(model)
return models
model_manager = ModelManager()
class ChatRequest(BaseModel):
message: str
@spaces.GPU()
async def generate_combined_response(inputs):
combined_response = ""
top_p = round(random.uniform(0.01, 1.00), 2)
top_k = random.randint(1, 100)
temperature = round(random.uniform(0.01, 2.00), 2)
tasks = []
for model in model_manager.model:
tasks.append(model(inputs, top_p=top_p, top_k=top_k, temperature=temperature))
responses = await asyncio.gather(*tasks)
for response in responses:
combined_response += response['choices'][0]['text'] + "\n"
return combined_response
async def process_message(message):
inputs = message.strip()
combined_response = await generate_combined_response(inputs)
return combined_response
@app.post("/generate_multimodel")
async def api_generate_multimodel(request: Request):
data = await request.json()
message = data["message"]
formatted_response = await process_message(message)
return JSONResponse({"response": formatted_response})
iface = gr.Interface(
fn=process_message,
inputs=gr.Textbox(lines=2, placeholder="Enter your message here..."),
outputs=gr.Markdown(),
title="Unified Multi-Model API",
description="Enter a message to get responses from a unified model."
)
if __name__ == "__main__":
iface.launch()
|