File size: 4,291 Bytes
a0d1776
 
ae495a3
a0d1776
 
c9efba3
 
 
a0d1776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c9efba3
a0d1776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae495a3
 
 
 
 
 
 
 
 
 
 
 
c9efba3
ae495a3
 
 
 
 
 
 
 
 
05783f8
ae495a3
 
 
c9efba3
 
 
 
 
 
 
 
 
 
 
 
a0d1776
c9efba3
 
 
 
 
 
 
 
 
 
 
 
 
a0d1776
ae495a3
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from utils.prompts import generate_paper_prompts, generate_keywords_prompts, generate_experiments_prompts, generate_bg_summary_prompts
from utils.gpt_interaction import get_responses, extract_responses, extract_keywords, extract_json
from utils.figures import generate_random_figures
import time
import os
from utils.prompts import KEYWORDS_SYSTEM
from utils.gpt_interaction import get_gpt_responses
import json

#  three GPT-based content generator:
#       1. section_generation: used to generate main content of the paper
#       2. keywords_generation: used to generate a json output {key1: output1, key2: output2} for multiple purpose.
#       3. figure_generation: used to generate sample figures.
#  all generator should return the token usage.


def section_generation_bg(paper, section, save_to_path, model):
    """
    The main pipeline of generating a section.
        1. Generate prompts.
        2. Get responses from AI assistant.
        3. Extract the section text.
        4. Save the text to .tex file.
    :return usage
    """
    print(f"Generating {section}...")
    prompts = generate_bg_summary_prompts(paper, section)
    gpt_response, usage = get_responses(prompts, model)
    output = gpt_response # extract_responses(gpt_response)
    paper["body"][section] = output
    tex_file = os.path.join(save_to_path, f"{section}.tex")
    # tex_file = save_to_path + f"/{section}.tex"
    if section == "abstract":
        with open(tex_file, "w") as f:
            f.write(r"\begin{abstract}")
        with open(tex_file, "a") as f:
            f.write(output)
        with open(tex_file, "a") as f:
            f.write(r"\end{abstract}")
    else:
        with open(tex_file, "w") as f:
            f.write(f"\section{{{section.upper()}}}\n")
        with open(tex_file, "a") as f:
            f.write(output)
    time.sleep(5)
    print(f"{section} has been generated. Saved to {tex_file}.")
    return usage


def section_generation(paper, section, save_to_path, model):
    """
    The main pipeline of generating a section.
        1. Generate prompts.
        2. Get responses from AI assistant.
        3. Extract the section text.
        4. Save the text to .tex file.
    :return usage
    """
    print(f"Generating {section}...")
    prompts = generate_paper_prompts(paper, section)
    gpt_response, usage = get_responses(prompts, model)
    output = gpt_response # extract_responses(gpt_response)
    paper["body"][section] = output
    tex_file = os.path.join(save_to_path, f"{section}.tex")
    # tex_file = save_to_path + f"/{section}.tex"
    if section == "abstract":
        with open(tex_file, "w") as f:
            f.write(output)
    else:
        with open(tex_file, "w") as f:
            f.write(output)
    time.sleep(5)
    print(f"{section} has been generated. Saved to {tex_file}.")
    return usage

# def keywords_generation(input_dict,  model, max_kw_refs = 10):
#     title = input_dict.get("title")
#     description = input_dict.get("description", "")
#     if title is not None:
#         prompts = generate_keywords_prompts(title, description, max_kw_refs)
#         gpt_response, usage = get_responses(prompts, model)
#         keywords = extract_keywords(gpt_response)
#         return keywords, usage
#     else:
#         raise ValueError("`input_dict` must include the key 'title'.")

def keywords_generation(input_dict):
    title = input_dict.get("title")
    max_attempts = 10
    attempts_count = 0
    while attempts_count < max_attempts:
        try:
            keywords, usage= get_gpt_responses(KEYWORDS_SYSTEM.format(min_refs_num=3, max_refs_num=5), title,
                                     model="gpt-3.5-turbo", temperature=0.4)
            print(keywords)
            output = json.loads(keywords)
            return output, usage
        except json.decoder.JSONDecodeError:
            attempts_count += 1
            time.sleep(20)
    raise RuntimeError("Fail to generate keywords.")

def figures_generation(paper, save_to_path, model):
    prompts = generate_experiments_prompts(paper)
    gpt_response, usage = get_responses(prompts, model)
    list_of_methods = list(extract_json(gpt_response))
    generate_random_figures(list_of_methods, os.path.join(save_to_path, "comparison.png"))
    return usage