File size: 2,289 Bytes
e31d3f6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
"""
noun generator module will take the string and return the nouns list in string.
"""
import os
import openai
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
openai.api_key = os.getenv('OPENAI_API_KEY')
def make_prompt()-> str:
"""
make_prompt take code as input and retirn the prompt with the given
pargraph.
Parameters
----------
paragraph: str
string text to find the nouns.
action: str
type of action.
Return
------
prompt: str
prompt to find the nouns from given paragraph.
"""
file_path = "./prompt/blog_or_essay_prompt.txt"
with open(file_path, "r", encoding = "utf8") as file:
prompt = file.read()
return prompt
def generate_blog(topic = " ",
content_type = " ", link = " ", tone = " ",
length = 500):
"""
code_debug method take topic type link tone length as input and return the output
according to the prompt.
Parameters
----------
topic: str
topic of essay or blog.
type: str
essay or blog.
link: str
link of user sources.
tone: str
essay or blog tone.
Return
------
return generated blog or essay.
"""
full_text = ""
length = str(length)
prompt = make_prompt()
prompt = prompt.format(TOPIC = topic, WORDS = length,
TYPE = content_type,
LINKS = link
)
tone_prompt = f"tone should be {tone}"
messages=[
{
"role": "system",
"content": prompt
},
{
"role": "user",
"content": tone_prompt
}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages = messages,
temperature = 1,
top_p = 1,
frequency_penalty = 0,
presence_penalty = 0,
stream = True,
stop = None
)
try:
for chunk in response:
chunk_message = chunk['choices'][0]['delta'].get("content")
full_text = full_text + chunk_message
yield full_text
except Exception as error:
print("OPenAI reponse (streaming) error" + str(error))
return 503
|