thepolymerguy commited on
Commit
6745420
1 Parent(s): f076e4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -6
app.py CHANGED
@@ -13,10 +13,83 @@ def bot(history):
13
  history[-1][1] = response
14
  return history
15
 
16
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  gr.Markdown("""
19
- Hey there, genius!
20
 
21
  Welcome to our demo! We've trained Meta's Llama on almost 200k data entries in the question/answer format.
22
 
@@ -33,8 +106,6 @@ with gr.Blocks() as demo:
33
 
34
  None of the outputs should be taken as solid legal advice. If you are an inventor looking to patent an invention, always seek the help of a registered patent attorney.
35
 
36
-
37
-
38
 
39
  """)
40
 
@@ -126,10 +197,10 @@ with gr.Blocks() as demo:
126
  placeholder="Enter text and press enter, or upload an image",
127
  ).style(container=False)
128
  with gr.Column(scale=0.15, min_width=0):
129
- btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
130
 
131
  txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
132
- bot, chatbot, chatbot
133
  )
134
 
135
  demo.launch()
 
13
  history[-1][1] = response
14
  return history
15
 
16
+ """
17
+
18
+ Alpaca model trained: example (n.b. can upload mine as a HF model to load from?)
19
+
20
+ """
21
+ '''
22
+ from peft import PeftModel
23
+ from transformers import LLaMATokenizer, LLaMAForCausalLM, GenerationConfig
24
+
25
+ tokenizer = LLaMATokenizer.from_pretrained("chavinlo/alpaca-native")
26
+
27
+ model = LLaMAForCausalLM.from_pretrained(
28
+ "chavinlo/alpaca-native",
29
+ load_in_8bit=True,
30
+ device_map="auto",
31
+ )
32
+ '''
33
+
34
+
35
+ def generateresponse(history):
36
+ """
37
+ Model definition here:
38
+ """
39
+ '''
40
+ global model
41
+ global tokenizer
42
+
43
+ PROMPT = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
44
+ ### Instruction:
45
+ {user}
46
+ ### Response:"""
47
+
48
+ inputs = tokenizer(
49
+ PROMPT,
50
+ return_tensors="pt",
51
+ )
52
+ input_ids = inputs["input_ids"].cuda()
53
+
54
+ generation_config = GenerationConfig(
55
+ temperature=0.6,
56
+ top_p=0.95,
57
+ repetition_penalty=1.15,
58
+ )
59
+ print("Generating...")
60
+ generation_output = model.generate(
61
+ input_ids=input_ids,
62
+ generation_config=generation_config,
63
+ return_dict_in_generate=True,
64
+ output_scores=True,
65
+ max_new_tokens=256,
66
+ )
67
+ output = []
68
+ for s in generation_output.sequences:
69
+ outputs.append(tokenizer.decode(s))
70
+ print(tokenizer.decode(s))
71
+
72
+ output = (outputs[0].split('### Response:'))[1]
73
+
74
+ '''
75
+
76
+ user = history[-1][0]
77
+
78
+ response = f"you asked: {user}"
79
+ history[-1][1] = response
80
+ print(history)
81
+ return history
82
+
83
+ theme = gr.themes.Base(
84
+ primary_hue="indigo",
85
+ ).set(
86
+ prose_text_size='*text_sm'
87
+ )
88
+
89
+ with gr.Blocks(title='Claimed', theme=theme) as demo:
90
 
91
  gr.Markdown("""
92
+ ### Hey there, genius! 🫡 🫡 🫡
93
 
94
  Welcome to our demo! We've trained Meta's Llama on almost 200k data entries in the question/answer format.
95
 
 
106
 
107
  None of the outputs should be taken as solid legal advice. If you are an inventor looking to patent an invention, always seek the help of a registered patent attorney.
108
 
 
 
109
 
110
  """)
111
 
 
197
  placeholder="Enter text and press enter, or upload an image",
198
  ).style(container=False)
199
  with gr.Column(scale=0.15, min_width=0):
200
+ btn = gr.Button("Submit")
201
 
202
  txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
203
+ generateresponse, chatbot, chatbot
204
  )
205
 
206
  demo.launch()