CountingMstar commited on
Commit
5f6fcc3
โ€ข
1 Parent(s): 9dab2e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -16
app.py CHANGED
@@ -26,22 +26,17 @@ def submit(context, question):
26
  answer = question_answer(context, question)
27
  return answer
28
 
29
- examples_text = [
30
- ["A large language model (LLM) is a type of language model notable for its ability to achieve general-purpose language understanding and generation. LLMs acquire these abilities by using massive amounts of data to learn billions of parameters during training and consuming large computational resources during their training and operation.[1] LLMs are artificial neural networks (mainly transformers[2]) and are (pre-)trained using self-supervised learning and semi-supervised learning.","What is large language model?"],
31
  ["Feature engineering or feature extraction or feature discovery is the process of extracting features (characteristics, properties, attributes) from raw data. Due to deep learning networks, such as convolutional neural networks, that are able to learn features by themselves, domain-specific-based feature engineering has become obsolete for vision and speech processing. Other examples of features in physics include the construction of dimensionless numbers such as Reynolds number in fluid dynamics; then Nusselt number in heat transfer; Archimedes number in sedimentation; construction of first approximations of the solution such as analytical strength of materials solutions in mechanics, etc.", "What is Feature engineering?"],
32
  ["It calculates soft weights for each word, more precisely for its embedding, in the context window. It can do it either in parallel (such as in transformers) or sequentially (such as recurrent neural networks). Soft weights can change during each runtime, in contrast to hard weights, which are (pre-)trained and fine-tuned and remain frozen afterwards. Attention was developed to address the weaknesses of recurrent neural networks, where words in a sentence are slowly processed one at a time. Machine learning-based attention is a mechanism mimicking cognitive attention. Recurrent neural networks favor more recent words at the end of a sentence while earlier words fade away in volatile neural activations. Attention gives all words equal access to any part of a sentence in a faster parallel scheme and no longer suffers the wait time of serial processing. Earlier uses attached this mechanism to a serial recurrent neural network's language translation system (below), but later uses in Transformers large language models removed the recurrent neural network and relied heavily on the faster parallel attention scheme.", "What is Attention mechanism?"]
33
- ]
34
 
35
- input_textbox = gr.Textbox("Context", placeholder="Enter context here")
36
- question_textbox = gr.Textbox("Question", placeholder="Enter question here")
37
 
38
- input_section = gr.Row([input_textbox, question_textbox])
39
 
40
- gr.Markdown(
41
- """
42
  # AI Tutor BERT
43
  ์ด ๋ชจ๋ธ์€ ์ธ๊ณต์ง€๋Šฅ(AI) ๊ด€๋ จ ์šฉ์–ด ๋ฐ ์„ค๋ช…์„ ํŒŒ์ธํŠœ๋‹(fine-tuning)ํ•œ BERT ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค.
44
-
45
  ## Model
46
  https://huggingface.co/bert-base-uncased
47
  ๋ชจ๋ธ์˜ ๊ฒฝ์šฐ ์ž์—ฐ์–ด ์ฒ˜๋ฆฌ ๋ชจ๋ธ ์ค‘ ๊ฐ€์žฅ ์œ ๋ช…ํ•œ Google์—์„œ ๊ฐœ๋ฐœํ•œ BERT๋ฅผ ์‚ฌ์šฉํ–ˆ์Šต๋‹ˆ๋‹ค. ์ž์„ธํ•œ ์„ค๋ช…์€ ์œ„ ์‚ฌ์ดํŠธ๋ฅผ ์ฐธ๊ณ ํ•˜์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ์งˆ์˜์‘๋‹ต์ด ์ฃผ์ธ ๊ณผ์™ธ ์„ ์ƒ๋‹˜๋‹ต๊ฒŒ, BERT ์ค‘์—์„œ๋„ ์งˆ์˜์‘๋‹ต์— ํŠนํ™”๋œ Question and Answering ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜์˜€์Šต๋‹ˆ๋‹ค.
@@ -54,19 +49,18 @@ gr.Markdown(
54
  ### Adrien Beaulieu
55
  https://product.house/100-ai-glossary-terms-explained-to-the-rest-of-us/
56
  ํ•™์Šต ๋ฐ์ดํ„ฐ์…‹์€ ์ธ๊ณต์ง€๋Šฅ ๊ด€๋ จ ๋ฌธ๋งฅ, ์งˆ๋ฌธ, ๊ทธ๋ฆฌ๊ณ  ์‘๋‹ต ์ด๋ ‡๊ฒŒ 3๊ฐ€์ง€๋กœ ๊ตฌ์„ฑ์ด ๋˜์–ด์žˆ์Šต๋‹ˆ๋‹ค. ์‘๋‹ต(์ •๋‹ต) ๋ฐ์ดํ„ฐ๋Š” ๋ฌธ๋งฅ ๋ฐ์ดํ„ฐ ์•ˆ์— ํฌํ•จ๋˜์–ด ์žˆ๊ณ , ๋ฌธ๋งฅ ๋ฐ์ดํ„ฐ์˜ ๋ฌธ์žฅ ์ˆœ์„œ๋ฅผ ๋ฐ”๊ฟ”์ฃผ์–ด ๋ฐ์ดํ„ฐ๋ฅผ ์ฆ๊ฐ•ํ•˜์˜€์Šต๋‹ˆ๋‹ค. ์งˆ๋ฌธ ๋ฐ์ดํ„ฐ๋Š” ์ฃผ์ œ๊ฐ€ ๋˜๋Š” ์ธ๊ณต์ง€๋Šฅ ์šฉ์–ด๋กœ ์„ค์ •ํ–ˆ์Šต๋‹ˆ๋‹ค. ์œ„์˜ ์˜ˆ์‹œ๋ฅผ ๋ณด์‹œ๋ฉด ์ดํ•ดํ•˜์‹œ๊ธฐ ํŽธํ•˜์‹ค ๊ฒ๋‹ˆ๋‹ค. ์ด ๋ฐ์ดํ„ฐ ์ˆ˜๋Š” 3300์—ฌ ๊ฐœ๋กœ data ํด๋”์— pickle ํŒŒ์ผ ํ˜•ํƒœ๋กœ ์ €์žฅ๋˜์–ด ์žˆ๊ณ , ๋ฐ์ดํ„ฐ๋Š” Wikipedia ๋ฐ ๋‹ค๋ฅธ ์‚ฌ์ดํŠธ๋“ค์„ ์—์„œ html์„ ์ด์šฉํ•˜์—ฌ ์ถ”์ถœ ๋ฐ ๊ฐ€๊ณตํ•˜์—ฌ ์ œ์ž‘ํ•˜์˜€์Šต๋‹ˆ๋‹ค. ํ•ด๋‹น ์ถœ์ฒ˜๋Š” ์œ„์™€ ๊ฐ™์Šต๋‹ˆ๋‹ค.
57
-
58
  ## How to use
59
  ์ž…๋ ฅ ์˜ˆ์ œ๋Š” 'Examples'์— ํ‘œ๊ธฐํ•ด ๋‘์—ˆ์Šต๋‹ˆ๋‹ค.
60
  ๊ด€๋ จ ๋ฌธ์žฅ๊ณผ ์ •์˜๋ฅผ ์•Œ๊ณ  ์‹ถ์€ ๋‹จ์–ด๋ฅผ ๊ฐ๊ฐ 'Contexts', 'Question'์— ์ž…๋ ฅํ•œ ํ›„ 'Submit' ๋ฒ„ํŠผ์„ ๋ˆ„๋ฅด๋ฉด ํ•ด๋‹น ๋‹จ์–ด์— ๋Œ€ํ•œ ์„ค๋ช…์ด ๋‚˜์˜ต๋‹ˆ๋‹ค.
61
- """)
62
-
63
  iface = gr.Interface(
64
  fn=submit,
65
- inputs=input_section,
66
  outputs=gr.Textbox("Answer"),
67
- examples=examples_text,
68
- live=True,
69
- title="BERT Question Answering"
70
  )
71
 
72
  iface.launch()
 
26
  answer = question_answer(context, question)
27
  return answer
28
 
29
+ examples = [
30
+ ["A large language model (LLM) is a type of language model notable for its ability to achieve general-purpose language understanding and generation. LLMs acquire these abilities by using massive amounts of data to learn billions of parameters during training and consuming large computational resources during their training and operation.[1] LLMs are artificial neural networks (mainly transformers[2]) and are (pre-)trained using self-supervised learning and semi-supervised learning.","What is large language model?"],
31
  ["Feature engineering or feature extraction or feature discovery is the process of extracting features (characteristics, properties, attributes) from raw data. Due to deep learning networks, such as convolutional neural networks, that are able to learn features by themselves, domain-specific-based feature engineering has become obsolete for vision and speech processing. Other examples of features in physics include the construction of dimensionless numbers such as Reynolds number in fluid dynamics; then Nusselt number in heat transfer; Archimedes number in sedimentation; construction of first approximations of the solution such as analytical strength of materials solutions in mechanics, etc.", "What is Feature engineering?"],
32
  ["It calculates soft weights for each word, more precisely for its embedding, in the context window. It can do it either in parallel (such as in transformers) or sequentially (such as recurrent neural networks). Soft weights can change during each runtime, in contrast to hard weights, which are (pre-)trained and fine-tuned and remain frozen afterwards. Attention was developed to address the weaknesses of recurrent neural networks, where words in a sentence are slowly processed one at a time. Machine learning-based attention is a mechanism mimicking cognitive attention. Recurrent neural networks favor more recent words at the end of a sentence while earlier words fade away in volatile neural activations. Attention gives all words equal access to any part of a sentence in a faster parallel scheme and no longer suffers the wait time of serial processing. Earlier uses attached this mechanism to a serial recurrent neural network's language translation system (below), but later uses in Transformers large language models removed the recurrent neural network and relied heavily on the faster parallel attention scheme.", "What is Attention mechanism?"]
 
33
 
34
+ ]
 
35
 
 
36
 
37
+ markdown_text = """
 
38
  # AI Tutor BERT
39
  ์ด ๋ชจ๋ธ์€ ์ธ๊ณต์ง€๋Šฅ(AI) ๊ด€๋ จ ์šฉ์–ด ๋ฐ ์„ค๋ช…์„ ํŒŒ์ธํŠœ๋‹(fine-tuning)ํ•œ BERT ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค.
 
40
  ## Model
41
  https://huggingface.co/bert-base-uncased
42
  ๋ชจ๋ธ์˜ ๊ฒฝ์šฐ ์ž์—ฐ์–ด ์ฒ˜๋ฆฌ ๋ชจ๋ธ ์ค‘ ๊ฐ€์žฅ ์œ ๋ช…ํ•œ Google์—์„œ ๊ฐœ๋ฐœํ•œ BERT๋ฅผ ์‚ฌ์šฉํ–ˆ์Šต๋‹ˆ๋‹ค. ์ž์„ธํ•œ ์„ค๋ช…์€ ์œ„ ์‚ฌ์ดํŠธ๋ฅผ ์ฐธ๊ณ ํ•˜์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ์งˆ์˜์‘๋‹ต์ด ์ฃผ์ธ ๊ณผ์™ธ ์„ ์ƒ๋‹˜๋‹ต๊ฒŒ, BERT ์ค‘์—์„œ๋„ ์งˆ์˜์‘๋‹ต์— ํŠนํ™”๋œ Question and Answering ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜์˜€์Šต๋‹ˆ๋‹ค.
 
49
  ### Adrien Beaulieu
50
  https://product.house/100-ai-glossary-terms-explained-to-the-rest-of-us/
51
  ํ•™์Šต ๋ฐ์ดํ„ฐ์…‹์€ ์ธ๊ณต์ง€๋Šฅ ๊ด€๋ จ ๋ฌธ๋งฅ, ์งˆ๋ฌธ, ๊ทธ๋ฆฌ๊ณ  ์‘๋‹ต ์ด๋ ‡๊ฒŒ 3๊ฐ€์ง€๋กœ ๊ตฌ์„ฑ์ด ๋˜์–ด์žˆ์Šต๋‹ˆ๋‹ค. ์‘๋‹ต(์ •๋‹ต) ๋ฐ์ดํ„ฐ๋Š” ๋ฌธ๋งฅ ๋ฐ์ดํ„ฐ ์•ˆ์— ํฌํ•จ๋˜์–ด ์žˆ๊ณ , ๋ฌธ๋งฅ ๋ฐ์ดํ„ฐ์˜ ๋ฌธ์žฅ ์ˆœ์„œ๋ฅผ ๋ฐ”๊ฟ”์ฃผ์–ด ๋ฐ์ดํ„ฐ๋ฅผ ์ฆ๊ฐ•ํ•˜์˜€์Šต๋‹ˆ๋‹ค. ์งˆ๋ฌธ ๋ฐ์ดํ„ฐ๋Š” ์ฃผ์ œ๊ฐ€ ๋˜๋Š” ์ธ๊ณต์ง€๋Šฅ ์šฉ์–ด๋กœ ์„ค์ •ํ–ˆ์Šต๋‹ˆ๋‹ค. ์œ„์˜ ์˜ˆ์‹œ๋ฅผ ๋ณด์‹œ๋ฉด ์ดํ•ดํ•˜์‹œ๊ธฐ ํŽธํ•˜์‹ค ๊ฒ๋‹ˆ๋‹ค. ์ด ๋ฐ์ดํ„ฐ ์ˆ˜๋Š” 3300์—ฌ ๊ฐœ๋กœ data ํด๋”์— pickle ํŒŒ์ผ ํ˜•ํƒœ๋กœ ์ €์žฅ๋˜์–ด ์žˆ๊ณ , ๋ฐ์ดํ„ฐ๋Š” Wikipedia ๋ฐ ๋‹ค๋ฅธ ์‚ฌ์ดํŠธ๋“ค์„ ์—์„œ html์„ ์ด์šฉํ•˜์—ฌ ์ถ”์ถœ ๋ฐ ๊ฐ€๊ณตํ•˜์—ฌ ์ œ์ž‘ํ•˜์˜€์Šต๋‹ˆ๋‹ค. ํ•ด๋‹น ์ถœ์ฒ˜๋Š” ์œ„์™€ ๊ฐ™์Šต๋‹ˆ๋‹ค.
 
52
  ## How to use
53
  ์ž…๋ ฅ ์˜ˆ์ œ๋Š” 'Examples'์— ํ‘œ๊ธฐํ•ด ๋‘์—ˆ์Šต๋‹ˆ๋‹ค.
54
  ๊ด€๋ จ ๋ฌธ์žฅ๊ณผ ์ •์˜๋ฅผ ์•Œ๊ณ  ์‹ถ์€ ๋‹จ์–ด๋ฅผ ๊ฐ๊ฐ 'Contexts', 'Question'์— ์ž…๋ ฅํ•œ ํ›„ 'Submit' ๋ฒ„ํŠผ์„ ๋ˆ„๋ฅด๋ฉด ํ•ด๋‹น ๋‹จ์–ด์— ๋Œ€ํ•œ ์„ค๋ช…์ด ๋‚˜์˜ต๋‹ˆ๋‹ค.
55
+ """
56
+ input_col = gr.Column([gr.Textbox("Context"), gr.Textbox("Question"), gr.Markdown(markdown_text)])
57
  iface = gr.Interface(
58
  fn=submit,
59
+ inputs=input_col,
60
  outputs=gr.Textbox("Answer"),
61
+ examples=examples,
62
+ live=True, # Set live to True to use the submit button
63
+ title="BERT Question Answering",
64
  )
65
 
66
  iface.launch()