File size: 2,572 Bytes
012c551
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from transformers import TextIteratorStreamer
from threading import Thread
from utils.tree_utils import get_docstrings, grab_before_comments

def combine_generation_kwargs(temperature, max_new_tokens, top_p, repetition_penalty):
    """
    Combines the generation kwargs into a single dict.
    """
    gen_kwargs = {}
    gen_kwargs["temperature"] = temperature
    gen_kwargs["max_new_tokens"] = max_new_tokens
    gen_kwargs["top_p"] = top_p
    gen_kwargs["repetition_penalty"] = repetition_penalty
    return gen_kwargs


def stream_generation(prompt:str, pipe, gen_kwargs:dict):
    """
    Text generation function
    Args:
        prompt (str): The context to start generation from.
        pipe (Pipeline): The pipeline to use for generation.
        gen_kwargs (dict): The generation kwargs.
    Returns:
        str: The generated text. (it iterates over time)
    """
    # Tokenize the model_context
    model_inputs = pipe.tokenizer(prompt, return_tensors="pt")

    # Start generation on a separate thread, so that we don't block the UI. The text is pulled from the streamer
    # in the main thread. Adds timeout to the streamer to handle exceptions in the generation thread.
    streamer = TextIteratorStreamer(pipe.tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15.0)
    generate_kwargs = dict(model_inputs, streamer=streamer, **gen_kwargs)
    t = Thread(target=pipe.model.generate, kwargs=generate_kwargs)
    t.start()

    # Pull the generated text from the streamer, and update the model output.
    model_output = ""
    for new_text in streamer:
        # print("step", end="")
        model_output += new_text
        yield model_output
    streamer.on_finalized_text("stream reached the end.")
    return model_output #is this ever reached?

def construct_model_context(func_node, prompt="") -> str:
    """
    Constructs the model context from a function node.
    """
    model_context = func_node.child_by_field_name("type").text.decode() + " " + func_node.child_by_field_name("declarator").text.decode() #func_start_idx:body_start_idx?
    docstring = get_docstrings(func_node) #might be empty?
    if docstring:
        model_context = model_context + "\n" + docstring
    model_context = grab_before_comments(func_node) + model_context #prepend comments
    if prompt != "":
        model_context = "//Title: " + prompt + "\n" + model_context #prepend user prompt/title
        model_context = "//Language: Shadertoy GLSL fragment shader\n" + model_context #prepend system prompt, language hint
    return model_context