File size: 5,680 Bytes
08c3121
94910e7
d3c4475
fc789e9
94910e7
 
d3c4475
fc789e9
d3c4475
 
8a364cd
 
 
 
9b09dc5
95829d6
 
9b09dc5
d3c4475
 
2b7da55
65ad43e
c9893a4
2b7da55
53a498b
08c3121
 
 
 
53a498b
 
 
 
 
556b6f8
 
7a62924
94677a0
 
591df45
 
 
94677a0
 
 
 
591df45
0aa1779
591df45
94677a0
 
591df45
 
 
 
ca48b86
 
 
591df45
 
 
 
8a364cd
591df45
36e853a
 
50fc7dd
591df45
 
38a02be
 
7a98a8f
4f6c52f
7a62924
f6ec8cd
94677a0
 
e21fb7e
37863c7
94677a0
 
7a62924
 
f6ec8cd
2f30c16
5b0c860
9013cfa
 
3aaf827
37863c7
71d7c4f
 
7a98a8f
7a62924
71d7c4f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import os; import json; import gradio as gr; import requests as req
from strings import dfs_code, function_code, real_docstring, tree_code, insert_code, display_code, article_string, descr_string

"""
import gradio as gr

gr.Interface.load("models/stmnk/codet5-small-code-summarization-python").launch()
"""

"""
def greet(name):
    return "Hello " + name + "!!"

iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch(
    # share=True  # RuntimeError: Share is not supported when you are in Spaces (!?!?!?)
    # share=False # To create a public link, set `share=True` in `launch()`.
)
"""

code_nl = "function for db connection"

CT5_URL = "https://api-inference.huggingface.co/models/stmnk/codet5-small-code-summarization-python"
CT5_METHOD = 'POST'
API_URL = CT5_URL
API_KEY = os.environ.get("API_KEY")

# headers = {"Authorization": "Bearer api_UhCKXKyqxJOpOcbvrZurQFqmVNZRTtxVfl"}
headers = {"Authorization": f"Bearer {API_KEY}"}

def query(payload):
	response = req.post(API_URL, headers=headers, json=payload)
	return response.json()

task_code = f' Summarize Python: {function_code}'
# task_code = f' Summarize Python: {dfs_code}'

def docgen_func(function_code, min_length, max_length, top_k, top_p, temp, repetition_penalty):
    m, M, k, p, t, r = int(min_length), int(max_length), int(top_k), float(top_p/100), float(temp), float(repetition_penalty)
    req_data = {
      "inputs": function_code,
      "parameters": {
        "min_length": m,   # (Default: None). Integer to define the minimum length in tokens of the output summary.
        "max_length": M,   # (Default: None). Integer to define the maximum length in tokens of the output summary.
        "top_k": k,        # (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
        "top_p": p,        # (Default: None). Float to define the tokens that are within the sample` operation of text generation. 
                           # Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
        "temperature": t,  # (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 
                           # 1 means regular sampling, 0 means top_k=1, 100.0 is getting closer to uniform probability.
        "repetition_penalty": r, # (Default: None). Float (0.0-100.0). The more a token is used within generation 
                                 # the more it is penalized to not be picked in successive generation passes.
        "max_time": 80,    # (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. 
                           # Network can cause some overhead so it will be a soft limit.
      },
      "options": {
        "use_gpu": False,        # (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least)
        "use_cache": True,       # (Default: true). Boolean. There is a cache layer on the inference API to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
        "wait_for_model": False, # (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
      }
    }
    output = query(req_data)
    if type(output) is list:
        return f'""{output[0]["generated_text"]}""' # 3 quotations "" -> 3 * "
    else:
        msg = str(output)
        if msg == "{'error': 'Model stmnk/codet5-small-code-summarization-python is currently loading', 'estimated_time': 20}":
            return msg + 'Please wait for the model to load and try again'
        return str(output)

iface = gr.Interface(
    # pygen_func,
    docgen_func,
    [
        # gr.inputs.Textbox(lines=7, label="Code Intent (NL)", default=task_code),
        gr.inputs.Textbox(lines=10, label="Enter Task + Code in Python (Programming Language syntax, e.g. a Python function or class)", default=task_code), 
        gr.inputs.Slider(30, 200, default=100, label="Minimum Length (of the output summary, in tokens)"),
        gr.inputs.Slider(200, 500, default=350, label="Maximum Length (of the output summary, in tokens)"),
        gr.inputs.Slider(1, 7, default=3, step=1, label="Top K (tokens considered within the sample operation to create new text)"),
        gr.inputs.Slider(0, 100, default=80, label="Top P (probability threshold for next tokens in sample of new text, cumulative)"),
        gr.inputs.Slider(0, 100, default=1, label="Temperature (of the sampling operation)"),
        gr.inputs.Slider(0, 100, default=70, label="Repetition Penalty (frequently previously used tokens are downsized)"), 
    ],
    # gr.outputs.Textbox(label="Code Generated PL")) 
    gr.outputs.Textbox(label="Docstring Generated (Natural Language, code comment for documentation)"),
    layout="unaligned",
    title='Generate a documentation string for Python code',
    description=descr_string,
    article=article_string,
    theme='grass',
    examples=[[tree_code,50,200,2,70,10,80],[insert_code,100,250,3,90,20,90],[display_code,150,300,5,100,100,95]],
    # verbose=True,
    show_tips=True
)
    
# iface.launch(share=True) # "share" not allowed in hf spaces? (!?!?)
iface.launch()