asasasText-servicegggg / ayfufugpp.py
Yhhxhfh's picture
Rename app.py to ayfufugpp.py
04d2414 verified
raw
history blame
No virus
3.38 kB
from pydantic import BaseModel
from llama_cpp import Llama
import os
import gradio as gr
from dotenv import load_dotenv
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
import spaces
import asyncio
import random
app = FastAPI()
load_dotenv()
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
class ModelManager:
def __init__(self):
self.model = self.load_models()
def load_models(self):
models = []
model_configs = [
{"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/codegemma-2b-IQ1_S-GGUF", "filename": "codegemma-2b-iq1_s-imat.gguf"},
{"repo_id": "Ffftdtd5dtft/Phi-3.5-mini-instruct-Q2_K-GGUF", "filename": "phi-3.5-mini-instruct-q2_k.gguf"},
{"repo_id": "Ffftdtd5dtft/TinyLlama-1.1B-Chat-v1.0-IQ1_S-GGUF", "filename": "tinyllama-1.1b-chat-v1.0-iq1_s-imat.gguf"},
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Minitron-8B-Base-IQ1_S-GGUF", "filename": "mistral-nemo-minitron-8b-base-iq1_s-imat.gguf"},
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf"}
]
for config in model_configs:
model = Llama.from_pretrained(repo_id=config['repo_id'], filename=config['filename'], use_auth_token=HUGGINGFACE_TOKEN)
models.append(model)
return models
model_manager = ModelManager()
class ChatRequest(BaseModel):
message: str
@spaces.GPU()
async def generate_combined_response(inputs):
combined_response = ""
top_p = round(random.uniform(0.01, 1.00), 2)
top_k = random.randint(1, 100)
temperature = round(random.uniform(0.01, 2.00), 2)
tasks = []
for model in model_manager.model:
tasks.append(model(inputs, top_p=top_p, top_k=top_k, temperature=temperature))
responses = await asyncio.gather(*tasks)
for response in responses:
combined_response += response['choices'][0]['text'] + "\n"
return combined_response
async def process_message(message):
inputs = message.strip()
combined_response = await generate_combined_response(inputs)
return combined_response
@app.post("/generate_multimodel")
async def api_generate_multimodel(request: Request):
data = await request.json()
message = data["message"]
formatted_response = await process_message(message)
return JSONResponse({"response": formatted_response})
iface = gr.Interface(
fn=process_message,
inputs=gr.Textbox(lines=2, placeholder="Enter your message here..."),
outputs=gr.Markdown(),
title="Unified Multi-Model API",
description="Enter a message to get responses from a unified model."
)
if __name__ == "__main__":
iface.launch()