Grady Harwood commited on
Commit
2abe29a
1 Parent(s): 488dbc6

updated model

Browse files
Files changed (2) hide show
  1. .DS_Store +0 -0
  2. app.py +45 -44
.DS_Store ADDED
Binary file (6.15 kB). View file
 
app.py CHANGED
@@ -1,57 +1,58 @@
1
- # # from transformers import pipeline
2
  # from transformers import T5Tokenizer, T5ForConditionalGeneration
3
- # import gradio as gr
4
 
5
- # def pipe(input_text):
6
- # tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
7
- # model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
8
 
9
- # input_text = "reword for clarity" + input_text
10
- # input_ids = tokenizer(input_text, return_tensors="pt").input_ids
11
 
12
- # outputs = model.generate(input_ids)
13
- # return tokenizer.decode(outputs[0])
14
 
15
- # # model = pipeline(
16
- # # task='question-answering',
17
- # # model="google/flan-t5-base",
18
- # # )
19
- # # output = model(
20
- # # question="reword for clarity",
21
- # # context=input_text,
22
- # # )
23
- # # return output["answer"]
 
24
 
25
- # demo = gr.Interface(
26
- # fn=pipe,
27
- # inputs=gr.Textbox(lines=7),
28
- # outputs="text",
29
- # )
30
- # demo.launch()
31
 
32
 
33
- # pip install -q transformers
34
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
35
 
36
- checkpoint = "CohereForAI/aya-101"
37
 
38
- tokenizer = AutoTokenizer.from_pretrained(checkpoint)
39
- aya_model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
40
 
41
- def generator(input_text):
42
- inputs = tokenizer.encode("Translate to English: " + input_text, return_tensors="pt")
43
- outputs = aya_model.generate(inputs, max_new_tokens=128)
44
- return tokenizer.decode(outputs[0])
45
 
46
- # # Turkish to English translation
47
- # tur_inputs = tokenizer.encode("Translate to English: Aya cok dilli bir dil modelidir.", return_tensors="pt")
48
- # tur_outputs = aya_model.generate(tur_inputs, max_new_tokens=128)
49
- # print(tokenizer.decode(tur_outputs[0]))
50
- # # Aya is a multi-lingual language model
51
 
52
- demo = gr.Interface(
53
- fn=generator,
54
- inputs=gr.Textbox(lines=7),
55
- outputs="text",
56
- )
57
- demo.launch()
 
1
+ from transformers import pipeline
2
  # from transformers import T5Tokenizer, T5ForConditionalGeneration
3
+ import gradio as gr
4
 
5
+ def pipe(input_text):
6
+ # tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
7
+ # model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
8
 
9
+ # input_text = "reword for clarity" + input_text
10
+ # input_ids = tokenizer(input_text, return_tensors="pt").input_ids
11
 
12
+ # outputs = model.generate(input_ids)
13
+ # return tokenizer.decode(outputs[0])
14
 
15
+ # Use a pipeline as a high-level helper
16
+ model = pipeline(
17
+ task='question-answering',
18
+ model="mistralai/Mistral-7B-Instruct-v0.3",
19
+ )
20
+ output = model(
21
+ question="reword for clarity",
22
+ context=input_text,
23
+ )
24
+ return output["answer"]
25
 
26
+ demo = gr.Interface(
27
+ fn=pipe,
28
+ inputs=gr.Textbox(lines=7),
29
+ outputs="text",
30
+ )
31
+ demo.launch()
32
 
33
 
34
+ # # pip install -q transformers
35
+ # from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
36
 
37
+ # checkpoint = "CohereForAI/aya-101"
38
 
39
+ # tokenizer = AutoTokenizer.from_pretrained(checkpoint)
40
+ # aya_model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
41
 
42
+ # def generator(input_text):
43
+ # inputs = tokenizer.encode("Translate to English: " + input_text, return_tensors="pt")
44
+ # outputs = aya_model.generate(inputs, max_new_tokens=128)
45
+ # return tokenizer.decode(outputs[0])
46
 
47
+ # # # Turkish to English translation
48
+ # # tur_inputs = tokenizer.encode("Translate to English: Aya cok dilli bir dil modelidir.", return_tensors="pt")
49
+ # # tur_outputs = aya_model.generate(tur_inputs, max_new_tokens=128)
50
+ # # print(tokenizer.decode(tur_outputs[0]))
51
+ # # # Aya is a multi-lingual language model
52
 
53
+ # demo = gr.Interface(
54
+ # fn=generator,
55
+ # inputs=gr.Textbox(lines=7),
56
+ # outputs="text",
57
+ # )
58
+ # demo.launch()