ajeetkumar01 commited on
Commit
1efe09a
1 Parent(s): b26efb3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -65
app.py CHANGED
@@ -1,68 +1,3 @@
1
- # import torch
2
- # from transformers import AutoTokenizer, AutoModelForCausalLM
3
- # import gradio as gr
4
-
5
- # # Load pre-trained GPT-2 model and tokenizer
6
- # model_name = "gpt2-large"
7
- # tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- # model = AutoModelForCausalLM.from_pretrained(model_name)
9
-
10
-
11
- # def generate_text(input_text, max_length=32, num_beams=5, do_sample=False, no_repeat_ngram_size=2):
12
- # """
13
- # Generate text based on the given input text.
14
- # Parameters:
15
- # - input_text (str): The input text to start generation from.
16
- # - max_length (int): Maximum length of the generated text.
17
- # - num_beams (int): Number of beams for beam search.
18
- # - do_sample (bool): Whether to use sampling or not.
19
- # - no_repeat_ngram_size (int): Size of the n-gram to avoid repetition.
20
- # Returns:
21
- # - generated_text (str): The generated text.
22
- # """
23
- # # Encode the input text and move it to the appropriate device
24
- # input_ids = tokenizer(input_text, return_tensors='pt')['input_ids']
25
- # # Generate text using the model
26
- # output = model.generate(input_ids, max_length=max_length, num_beams=num_beams,
27
- # do_sample=do_sample, no_repeat_ngram_size=no_repeat_ngram_size)
28
- # # Decode the generated output
29
- # generated_text = tokenizer.decode(output[0])
30
- # return generated_text
31
-
32
-
33
- # # def generate_text_with_nucleus_search(input_text, max_length=16, do_sample=True, top_p=0.9):
34
- # # """
35
- # # Generate text with nucleus sampling based on the given input text.
36
- # # Parameters:
37
- # # - input_text (str): The input text to start generation from.
38
- # # - max_length (int): Maximum length of the generated text.
39
- # # - do_sample (bool): Whether to use sampling or not.
40
- # # - top_p (float): Nucleus sampling parameter.
41
- # # Returns:
42
- # # - generated_text (str): The generated text.
43
- # # """
44
- # # # Encode the input text and move it to the appropriate device
45
- # # input_ids = tokenizer(input_text, return_tensors='pt')['input_ids']
46
- # # # Generate text using nucleus sampling
47
- # # output = model.generate(input_ids, max_length=max_length, do_sample=do_sample, top_p=top_p)
48
- # # # Decode the generated output
49
- # # generated_text = tokenizer.decode(output[0])
50
- # # return generated_text
51
-
52
-
53
- # # Create Gradio interface
54
- # input_text = gr.Textbox(lines=10, label="Input Text", placeholder="Enter text for text generation...")
55
- # output_text = gr.Textbox(label="Generated Text")
56
-
57
-
58
-
59
- # gr.Interface(generate_text, input_text, output_text,
60
- # title="Text Generation with GPT-2",
61
- # description="Generate text using the GPT-2 model.",
62
- # theme="default",
63
- # allow_flagging="never").launch(share=True)
64
-
65
-
66
  import torch
67
  from transformers import AutoTokenizer, AutoModelForCausalLM
68
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr