Spaces:
Sleeping
Sleeping
File size: 1,584 Bytes
51a45e9 2b330e8 17aa59f e226db7 17aa59f caa5775 51a45e9 e226db7 17aa59f e226db7 caa5775 e226db7 2b330e8 17aa59f 2b330e8 17aa59f 2b330e8 17aa59f e226db7 17aa59f ba86ee1 2b330e8 e226db7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
from flask import Flask
from flask import request
from groq import Groq
import google.generativeai
import os
app = Flask(__name__)
groq_client = Groq(
api_key=os.environ.get("GROQ_API_KEY")
)
google.generativeai.configure(
api_key=os.environ.get("GEMINI_API_KEY"))
@app.route("/api/groq/generate", methods=['POST'])
def groq_completion():
"""
{
"model": "llama3-70b-8192",
"prompt": "why is the sky blue?"
}
"""
message = request.get_json()
model = message['model']
prompt = message['prompt']
chat_completion = groq_client.chat.completions.create(
messages=[
{
"role": "user",
"content": prompt,
}
],
model=model,
)
return chat_completion.to_dict()
@app.route("/api/google/generate", methods=['POST'])
def google_completion():
"""
{
"model": "gemini-1.5-flash",
"prompt": ""
}
"""
message = request.get_json()
model = message['model']
prompt = message['prompt']
llm_model = google.generativeai.GenerativeModel(model)
chat_completion = llm_model.generate_content(prompt)
return chat_completion.to_dict()
# curl -v -X POST 'https://robinroy03-fury-bot.hf.space/api/groq/generate' --header 'Content-Type: application/json' --data '{"model": "llama3-70b-8192", "prompt": "why is sky blue?"}'
# curl -v POST 'http://127.0.0.1:8000/api/google/generate' --header 'Content-Type: application/json' --data '{"model": "gemini-1.5-flash", "prompt": "why is sky blue?"}'
|