Update main.py
Browse files
main.py
CHANGED
@@ -368,9 +368,11 @@ def get_desc_llm(desc):
|
|
368 |
|
369 |
# find my description:
|
370 |
check_desc=f'''
|
371 |
-
Description:
|
372 |
Thought: What do i know about myself?
|
373 |
-
Show observations and Action.
|
|
|
|
|
374 |
'''
|
375 |
|
376 |
completion = openai.ChatCompletion.create(
|
@@ -379,7 +381,10 @@ def get_desc_llm(desc):
|
|
379 |
messages=[
|
380 |
{"role": "system", "content": "You are an expert at finding the context fromt the description"},
|
381 |
{"role": "user", "content": "You are part of a group which can be described as 'Married women with 2 children'"},
|
382 |
-
{"role": "assistant", "content":
|
|
|
|
|
|
|
383 |
{"role": "user", "content": check_desc }
|
384 |
]
|
385 |
)
|
@@ -430,6 +435,20 @@ def filter_questions_chatgpt(questions: dict, decs: str):
|
|
430 |
"""
|
431 |
# Set up the chat conversation with OpenAI Chat API
|
432 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
433 |
prompt = f'''
|
434 |
Questions:
|
435 |
{questions}
|
@@ -441,7 +460,7 @@ def filter_questions_chatgpt(questions: dict, decs: str):
|
|
441 |
model="gpt-3.5-turbo",
|
442 |
temperature=0.3,
|
443 |
messages=[
|
444 |
-
{"role": "system", "content": "You are an expert at selecting relavent questions from the list of
|
445 |
{"role": "user", "content": prompt}
|
446 |
]
|
447 |
)
|
@@ -454,18 +473,22 @@ def filter_questions_chatgpt(questions: dict, decs: str):
|
|
454 |
|
455 |
def get_answers_llm(cont:str,q_json:dict):
|
456 |
prompt=f'''
|
457 |
-
You will be provided with a list of
|
458 |
-
If the answer is not available, please respond with "NaN" in the JSON format for that question.
|
459 |
If you are not sure about the answer, please respond with "NeC" in the JSON format for that question.
|
460 |
-
|
461 |
-
|
|
|
|
|
|
|
|
|
|
|
462 |
'''
|
463 |
print(prompt)
|
464 |
completion = openai.ChatCompletion.create(
|
465 |
-
model="gpt-3.5-turbo
|
466 |
temperature=0.4,
|
467 |
messages=[
|
468 |
-
{"role": "system", "content": "You are an expert at answering
|
469 |
{"role": "user", "content": "what is your age?"},
|
470 |
{"role": "assistant", "content": "50"},
|
471 |
{"role": "user", "content": prompt}
|
@@ -480,9 +503,9 @@ def revalidate_final_response_gpt(context:str,
|
|
480 |
)->dict:
|
481 |
|
482 |
prompt=f'''
|
483 |
-
You will be provided with a list of
|
484 |
-
If the
|
485 |
-
If
|
486 |
|
487 |
Context:{context}
|
488 |
Questions:{json_response}
|
@@ -495,14 +518,16 @@ def revalidate_final_response_gpt(context:str,
|
|
495 |
{{"id": "632", "question": "What is your relationship status?", "answer": "Single, never married"}},
|
496 |
]
|
497 |
}}
|
498 |
-
|
|
|
|
|
499 |
'''
|
500 |
print(prompt)
|
501 |
|
502 |
response = openai.Completion.create(
|
503 |
model="text-davinci-003",
|
504 |
prompt=prompt,
|
505 |
-
temperature=0.
|
506 |
max_tokens=2000,
|
507 |
top_p=1,
|
508 |
frequency_penalty=0,
|
@@ -512,7 +537,6 @@ def revalidate_final_response_gpt(context:str,
|
|
512 |
print("revalidate_final_response_gpt > Total tokens used:", response["usage"]["total_tokens"])
|
513 |
return json_response
|
514 |
|
515 |
-
|
516 |
# indxing methods
|
517 |
|
518 |
def data_cleanup(file_path):
|
|
|
368 |
|
369 |
# find my description:
|
370 |
check_desc=f'''
|
371 |
+
Description: you are part of a group which can be described as '{desc}'
|
372 |
Thought: What do i know about myself?
|
373 |
+
Show detailed observations and Action.
|
374 |
+
Thought: What i don't know about myself?
|
375 |
+
Don't entire show verbose, don't mention anything additional
|
376 |
'''
|
377 |
|
378 |
completion = openai.ChatCompletion.create(
|
|
|
381 |
messages=[
|
382 |
{"role": "system", "content": "You are an expert at finding the context fromt the description"},
|
383 |
{"role": "user", "content": "You are part of a group which can be described as 'Married women with 2 children'"},
|
384 |
+
{"role": "assistant", "content": '''i am a women hence my gender is female, i have 2 children not sure about their age and gender, my relationship status is married.
|
385 |
+
I know about:
|
386 |
+
my gender, children, relationship status only
|
387 |
+
'''},
|
388 |
{"role": "user", "content": check_desc }
|
389 |
]
|
390 |
)
|
|
|
435 |
"""
|
436 |
# Set up the chat conversation with OpenAI Chat API
|
437 |
|
438 |
+
# prompt = f'''You are Tan. You are described as "{decs}".
|
439 |
+
# Find the relavent questions to you from below list of questions based on your description
|
440 |
+
# {questions}
|
441 |
+
# Return their IDs in below format:
|
442 |
+
# ```["53","39", ...]```
|
443 |
+
# Don't add anything to the response other than above format.
|
444 |
+
# '''
|
445 |
+
|
446 |
+
|
447 |
+
# Return their IDs in below format:
|
448 |
+
# ```["53","39", ...]```
|
449 |
+
|
450 |
+
# Don't add anything to the response other than above format.
|
451 |
+
|
452 |
prompt = f'''
|
453 |
Questions:
|
454 |
{questions}
|
|
|
460 |
model="gpt-3.5-turbo",
|
461 |
temperature=0.3,
|
462 |
messages=[
|
463 |
+
{"role": "system", "content": "You are an expert at selecting relavent questions from the list of qestions for which answers are available in description and returning in list format"},
|
464 |
{"role": "user", "content": prompt}
|
465 |
]
|
466 |
)
|
|
|
473 |
|
474 |
def get_answers_llm(cont:str,q_json:dict):
|
475 |
prompt=f'''
|
476 |
+
You will be provided with a list of questions in JSON format and should respond to all questions based on the observation provided.
|
|
|
477 |
If you are not sure about the answer, please respond with "NeC" in the JSON format for that question.
|
478 |
+
If the answer is not available, please respond with "NaN" in the JSON format for that question.
|
479 |
+
Context:
|
480 |
+
{cont}
|
481 |
+
Questions:
|
482 |
+
{q_json}
|
483 |
+
Before answering the questions, please make sure you have read the context and questions carefully.
|
484 |
+
check if the answer is available in the context, if not, please remove that question from the JSON response.
|
485 |
'''
|
486 |
print(prompt)
|
487 |
completion = openai.ChatCompletion.create(
|
488 |
+
model="gpt-3.5-turbo",
|
489 |
temperature=0.4,
|
490 |
messages=[
|
491 |
+
{"role": "system", "content": "You are an expert at answering questions based on the context, observation and action"},
|
492 |
{"role": "user", "content": "what is your age?"},
|
493 |
{"role": "assistant", "content": "50"},
|
494 |
{"role": "user", "content": prompt}
|
|
|
503 |
)->dict:
|
504 |
|
505 |
prompt=f'''
|
506 |
+
You will be provided with a list of questions in JSON format and should select right answer from the answer_options strictly based on the context provided.
|
507 |
+
If the answer is not available, remove that question from the JSON response.
|
508 |
+
If answer is similar to "No", "None", "None of the above","I don't" remove that question from the JSON response.
|
509 |
|
510 |
Context:{context}
|
511 |
Questions:{json_response}
|
|
|
518 |
{{"id": "632", "question": "What is your relationship status?", "answer": "Single, never married"}},
|
519 |
]
|
520 |
}}
|
521 |
+
|
522 |
+
Before answering the questions, please make sure you have read the context and questions carefully.
|
523 |
+
check if the answer is available in the context, if not, please remove that question from the JSON response.
|
524 |
'''
|
525 |
print(prompt)
|
526 |
|
527 |
response = openai.Completion.create(
|
528 |
model="text-davinci-003",
|
529 |
prompt=prompt,
|
530 |
+
temperature=0.0,
|
531 |
max_tokens=2000,
|
532 |
top_p=1,
|
533 |
frequency_penalty=0,
|
|
|
537 |
print("revalidate_final_response_gpt > Total tokens used:", response["usage"]["total_tokens"])
|
538 |
return json_response
|
539 |
|
|
|
540 |
# indxing methods
|
541 |
|
542 |
def data_cleanup(file_path):
|