gagan3012 commited on
Commit
2ffe805
1 Parent(s): 7d6b181

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import streamlit as st
3
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
4
+
5
+ st.set_page_config(
6
+ page_title="AI Leetcode",
7
+ layout="wide",
8
+ initial_sidebar_state="expanded", )
9
+
10
+
11
+
12
+ @st.cache(suppress_st_warning=True,ttl=1000)
13
+ def modelgpt(sequence, temp, top_p):
14
+ tokenizer = GPT2Tokenizer.from_pretrained("gagan3012/project-code-py-small")
15
+ model = GPT2LMHeadModel.from_pretrained("gagan3012/project-code-py-small")
16
+ inputs = tokenizer.encode(sequence, return_tensors='pt')
17
+ outputs = model.generate(inputs, max_length=1024, do_sample=True, temperature=temp, top_p=top_p)
18
+ text = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+ return text
20
+
21
+ def display():
22
+ st.write('# Using AI to Generate LeetCode solutions')
23
+ st.sidebar.markdown(
24
+ '''
25
+ # Project-code-py
26
+
27
+ ## This is a demo of a text generation model trained with GPT-2 to generate LeetCode Answers in Python
28
+ *For additional questions and inquiries, please contact **Gagan Bhatia** via [LinkedIn](
29
+ https://www.linkedin.com/in/gbhatia30/) or [Github](https://github.com/gagan3012).*
30
+ ''')
31
+ st.sidebar.write('## Options:')
32
+
33
+ tokens = st.sidebar.slider(label='Number of Tokens', min_value=1, max_value=15, value=3, step=1)
34
+ samples = st.sidebar.slider(label='Number of Samples', min_value=1, max_value=9, value=9, step=1)
35
+ top_p = st.sidebar.slider(label='Top k', min_value=0.0, max_value=40.0, value=1.0, step=1.0)
36
+ temp = st.sidebar.slider(label='Temperature', min_value=0.1, max_value=1.0, value=1.0, step=0.05)
37
+ st.sidebar.markdown(
38
+ '''
39
+ `Number of Tokens:` number of tokens in generated text\n
40
+ `Number of Samples:` number of samples to return total\n
41
+ `Temperature:` Float value controlling randomness in boltzmann distribution. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions.\n
42
+ `Top k:` Integer value controlling diversity. 1 means only 1 word is considered for each step (token), resulting in deterministic completions, while 40 means 40 words are considered at each step. 0 (default) is a special setting meaning no restrictions. 40 generally is a good value.
43
+ ''')
44
+
45
+ st.write('## Enter a Leetcode Question or Starting code:')
46
+ sequence = st.text_area("", value="Given the root of a binary tree, return its maximum depth. A binary tree's maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.",height=150)
47
+ if st.button("Get Answer"):
48
+ text = modelgpt(sequence, temp, top_p)
49
+ st.code(text.encode().decode('unicode_escape'), language='python')
50
+
51
+ if __name__ == '__main__':
52
+ display()