Update TextGen/router.py
Browse files- TextGen/router.py +29 -5
TextGen/router.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import os
|
2 |
import time
|
3 |
-
from
|
4 |
from fastapi import FastAPI, HTTPException, Query, Request
|
5 |
from fastapi.responses import FileResponse
|
6 |
from fastapi.middleware.cors import CORSMiddleware
|
@@ -17,6 +17,23 @@ from TextGen import app
|
|
17 |
from gradio_client import Client
|
18 |
from typing import List
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
class Message(BaseModel):
|
21 |
npc: str | None = None
|
22 |
messages: List[str] | None = None
|
@@ -63,12 +80,19 @@ def generate_text(messages: List[str], npc:str):
|
|
63 |
print(new_messages)
|
64 |
# Initialize the LLM
|
65 |
llm = ChatGoogleGenerativeAI(
|
66 |
-
model="gemini-pro",
|
67 |
-
max_output_tokens=
|
|
|
68 |
safety_settings={
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
71 |
)
|
|
|
|
|
|
|
72 |
llm_response = llm.invoke(new_messages)
|
73 |
print(llm_response)
|
74 |
return Generate(text=llm_response.content)
|
|
|
1 |
import os
|
2 |
import time
|
3 |
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
4 |
from fastapi import FastAPI, HTTPException, Query, Request
|
5 |
from fastapi.responses import FileResponse
|
6 |
from fastapi.middleware.cors import CORSMiddleware
|
|
|
17 |
from gradio_client import Client
|
18 |
from typing import List
|
19 |
|
20 |
+
class PlayLastMusic(BaseModel):
|
21 |
+
'''plays the lastest created music '''
|
22 |
+
Desicion: str = Field(
|
23 |
+
..., description="Yes or No"
|
24 |
+
)
|
25 |
+
class CreateLyrics(BaseModel):
|
26 |
+
f'''create some Lyrics for a new music'''
|
27 |
+
Desicion: str = Field(
|
28 |
+
..., description="Yes or No"
|
29 |
+
)
|
30 |
+
class CreateNewMusic(BaseModel):
|
31 |
+
f'''create a new music with the Lyrics previously computed'''
|
32 |
+
Name: str = Field(
|
33 |
+
..., description="tags to describe the new music"
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
class Message(BaseModel):
|
38 |
npc: str | None = None
|
39 |
messages: List[str] | None = None
|
|
|
80 |
print(new_messages)
|
81 |
# Initialize the LLM
|
82 |
llm = ChatGoogleGenerativeAI(
|
83 |
+
model="gemini-1.5-pro-latest",
|
84 |
+
max_output_tokens=300,
|
85 |
+
temperature=1,
|
86 |
safety_settings={
|
87 |
+
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
88 |
+
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
89 |
+
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
90 |
+
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE
|
91 |
+
},
|
92 |
)
|
93 |
+
if npc=="bard":
|
94 |
+
llm = llm.bind_tools([PlayLastMusic,CreateNewMusic,CreateLyrics])
|
95 |
+
|
96 |
llm_response = llm.invoke(new_messages)
|
97 |
print(llm_response)
|
98 |
return Generate(text=llm_response.content)
|