Spaces:
Runtime error
Runtime error
Commit
β’
5515112
1
Parent(s):
7c4c56e
Delete pages
Browse files- pages/beta.py +0 -312
pages/beta.py
DELETED
@@ -1,312 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
import requests
|
3 |
-
import streamlit as st
|
4 |
-
from streamlit_lottie import st_lottie
|
5 |
-
import torch
|
6 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
-
import re
|
8 |
-
|
9 |
-
# Page Config
|
10 |
-
st.set_page_config(
|
11 |
-
page_title="λ
Έλ κ°μ¬ nνμ Beta",
|
12 |
-
page_icon="π",
|
13 |
-
layout="wide"
|
14 |
-
)
|
15 |
-
# st.text(os.listdir(os.curdir))
|
16 |
-
|
17 |
-
### Model
|
18 |
-
tokenizer = AutoTokenizer.from_pretrained("wumusill/final_project_kogpt2")
|
19 |
-
|
20 |
-
@st.cache(show_spinner=False)
|
21 |
-
def load_model():
|
22 |
-
model = AutoModelForCausalLM.from_pretrained("wumusill/final_project_kogpt2")
|
23 |
-
return model
|
24 |
-
|
25 |
-
model = load_model()
|
26 |
-
|
27 |
-
@st.cache(show_spinner=False)
|
28 |
-
def get_word():
|
29 |
-
word = pd.read_csv("ballad_word.csv", encoding="cp949")
|
30 |
-
return word
|
31 |
-
|
32 |
-
|
33 |
-
word = get_word()
|
34 |
-
|
35 |
-
|
36 |
-
one = word[word["0"].str.startswith("ν")].sample(1).values[0][0]
|
37 |
-
# st.header(type(one))
|
38 |
-
# st.header(one)
|
39 |
-
|
40 |
-
|
41 |
-
# Class : Dict μ€λ³΅ ν€ μΆλ ₯
|
42 |
-
class poem(object):
|
43 |
-
def __init__(self,letter):
|
44 |
-
self.letter = letter
|
45 |
-
|
46 |
-
def __str__(self):
|
47 |
-
return self.letter
|
48 |
-
|
49 |
-
def __repr__(self):
|
50 |
-
return "'"+self.letter+"'"
|
51 |
-
|
52 |
-
|
53 |
-
def beta_poem(input_letter):
|
54 |
-
# λμ λ²μΉ μ¬μ
|
55 |
-
dooeum = {"λΌ":"λ", "λ½":"λ", "λ":"λ", "λ":"λ ", "λ":"λ¨", "λ":"λ©", "λ":"λ",
|
56 |
-
"λ":"λ΄", "λ":"λ", "λ":"μ½", "λ΅":"μ½", "λ₯":"μ", "λ":"μ", "λ
":"μ¬",
|
57 |
-
"λ €":"μ¬", "λ
":"μ", "λ ₯":"μ", "λ
":"μ°", "λ ¨":"μ°", "λ
":"μ΄", "λ ¬":"μ΄",
|
58 |
-
"λ
":"μΌ", "λ ΄":"μΌ", "λ ΅":"μ½", "λ
":"μ", "λ Ή":"μ", "λ
":"μ", "λ‘":"μ",
|
59 |
-
"λ‘":"λ
Έ", "λ‘":"λ
Ή", "λ‘ ":"λ
Ό", "λ‘±":"λ", "λ’°":"λ", "λ¨":"μ", "λ£":"μ",
|
60 |
-
"룑":"μ©", "루":"λ", "λ΄":"μ ", "λ₯":"μ ", "λ΅":"μ‘", "λ₯":"μ‘", "λ₯":"μ€",
|
61 |
-
"λ₯ ":"μ¨", "λ₯":"μ΅", "λ₯΅":"λ", "λ¦":"λ ", "λ¦":"λ₯", "λ":"μ΄", "리":"μ΄",
|
62 |
-
"λ¦°":'μΈ', 'λ¦Ό':'μ', '립':'μ
'}
|
63 |
-
# κ²°κ³Όλ¬Όμ λ΄μ list
|
64 |
-
res_l = []
|
65 |
-
len_sequence = 0
|
66 |
-
|
67 |
-
# ν κΈμμ© μΈλ±μ€μ ν¨κ» κ°μ Έμ΄
|
68 |
-
for idx, val in enumerate(input_letter):
|
69 |
-
# λμ λ²μΉ μ μ©
|
70 |
-
if val in dooeum.keys():
|
71 |
-
val = dooeum[val]
|
72 |
-
|
73 |
-
# λ°λΌλμ μλ λ¨μ΄ μ μ©
|
74 |
-
try:
|
75 |
-
one = word[word["0"].str.startswith(val)].sample(1).values[0][0]
|
76 |
-
# st.text(one)
|
77 |
-
except:
|
78 |
-
one = val
|
79 |
-
|
80 |
-
# μ’λ 맀λλ¬μ΄ μΌνμλ₯Ό μν΄ μ΄μ λ¬Έμ₯μ΄λ νμ¬ μμ μ°κ²°
|
81 |
-
# μ΄ν generate λ λ¬Έμ₯μμ μ΄μ λ¬Έμ₯μ λν λ°μ΄ν° μ κ±°
|
82 |
-
link_with_pre_sentence = (" ".join(res_l)+ " " + one + " " if idx != 0 else one).strip()
|
83 |
-
# print(link_with_pre_sentence)
|
84 |
-
|
85 |
-
# μ°κ²°λ λ¬Έμ₯μ μΈμ½λ©
|
86 |
-
input_ids = tokenizer.encode(link_with_pre_sentence, add_special_tokens=False, return_tensors="pt")
|
87 |
-
|
88 |
-
# μΈμ½λ© κ°μΌλ‘ λ¬Έμ₯ μμ±
|
89 |
-
output_sequence = model.generate(
|
90 |
-
input_ids=input_ids,
|
91 |
-
do_sample=True,
|
92 |
-
max_length=42,
|
93 |
-
min_length=len_sequence + 2,
|
94 |
-
temperature=0.9,
|
95 |
-
repetition_penalty=1.5,
|
96 |
-
no_repeat_ngram_size=2)
|
97 |
-
|
98 |
-
# μμ±λ λ¬Έμ₯ 리μ€νΈλ‘ λ³ν (μΈμ½λ© λμ΄μκ³ , μμ±λ λ¬Έμ₯ λ€λ‘ padding μ΄ μλ μν)
|
99 |
-
generated_sequence = output_sequence.tolist()[0]
|
100 |
-
|
101 |
-
# padding index μκΉμ§ slicing ν¨μΌλ‘μ¨ padding μ κ±°, paddingμ΄ μμ μλ μκΈ° λλ¬Έμ 쑰건문 νμΈ ν μ κ±°
|
102 |
-
# μ¬μ©ν generated_sequence κ° 5λ³΄λ€ μ§§μΌλ©΄ κ°μ μ μΌλ‘ κΈΈμ΄λ₯Ό 8λ‘ ν΄μ€λ€...
|
103 |
-
if tokenizer.pad_token_id in generated_sequence:
|
104 |
-
check_index = generated_sequence.index(tokenizer.pad_token_id)
|
105 |
-
check_index = check_index if check_index-len_sequence > 3 else len_sequence + 8
|
106 |
-
generated_sequence = generated_sequence[:check_index]
|
107 |
-
|
108 |
-
word_encode = tokenizer.encode(one, add_special_tokens=False, return_tensors="pt").tolist()[0][0]
|
109 |
-
split_index = len(generated_sequence) - 1 - generated_sequence[::-1].index(word_encode)
|
110 |
-
|
111 |
-
# 첫 κΈμκ° μλλΌλ©΄, generate λ μμ λ§ κ²°κ³Όλ¬Ό listμ λ€μ΄κ° μ μκ² μ λ¬Έμ₯μ λν μΈμ½λ© κ° μ κ±°
|
112 |
-
generated_sequence = generated_sequence[split_index:]
|
113 |
-
|
114 |
-
# print(tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True, skip_special_tokens=True))
|
115 |
-
# λ€μ μμ μ μν΄ κΈΈμ΄ κ°±μ
|
116 |
-
len_sequence += len([elem for elem in generated_sequence if elem not in(tokenizer.all_special_ids)])
|
117 |
-
# κ²°κ³Όλ¬Ό λμ½λ©
|
118 |
-
decoded_sequence = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True, skip_special_tokens=True)
|
119 |
-
|
120 |
-
# κ²°κ³Όλ¬Ό 리μ€νΈμ λ΄κΈ°
|
121 |
-
res_l.append(decoded_sequence)
|
122 |
-
|
123 |
-
poem_dict = {"Type":"beta"}
|
124 |
-
|
125 |
-
for letter, res in zip(input_letter, res_l):
|
126 |
-
# decode_res = tokenizer.decode(res, clean_up_tokenization_spaces=True, skip_special_tokens=True)
|
127 |
-
poem_dict[poem(letter)] = res
|
128 |
-
|
129 |
-
return poem_dict
|
130 |
-
|
131 |
-
def alpha_poem(input_letter):
|
132 |
-
|
133 |
-
# λμ λ²μΉ μ¬μ
|
134 |
-
dooeum = {"λΌ":"λ", "λ½":"λ", "λ":"λ", "λ":"λ ", "λ":"λ¨", "λ":"λ©", "λ":"λ",
|
135 |
-
"λ":"λ΄", "λ":"λ", "λ":"μ½", "λ΅":"μ½", "λ₯":"μ", "λ":"μ", "λ
":"μ¬",
|
136 |
-
"λ €":"μ¬", "λ
":"μ", "λ ₯":"μ", "λ
":"μ°", "λ ¨":"μ°", "λ
":"μ΄", "λ ¬":"μ΄",
|
137 |
-
"λ
":"μΌ", "λ ΄":"μΌ", "λ ΅":"μ½", "λ
":"μ", "λ Ή":"μ", "λ
":"μ", "λ‘":"μ",
|
138 |
-
"λ‘":"λ
Έ", "λ‘":"λ
Ή", "λ‘ ":"λ
Ό", "λ‘±":"λ", "λ’°":"λ", "λ¨":"μ", "λ£":"μ",
|
139 |
-
"룑":"μ©", "루":"λ", "λ΄":"μ ", "λ₯":"μ ", "λ΅":"μ‘", "λ₯":"μ‘", "λ₯":"μ€",
|
140 |
-
"λ₯ ":"μ¨", "λ₯":"μ΅", "λ₯΅":"λ", "λ¦":"λ ", "λ¦":"λ₯", "λ":"μ΄", "리":"μ΄",
|
141 |
-
"λ¦°":'μΈ', 'λ¦Ό':'μ', '립':'μ
'}
|
142 |
-
# κ²°κ³Όλ¬Όμ λ΄μ list
|
143 |
-
res_l = []
|
144 |
-
|
145 |
-
# ν κΈμμ© μΈλ±μ€μ ν¨κ» κ°μ Έμ΄
|
146 |
-
for idx, val in enumerate(input_letter):
|
147 |
-
# λμ λ²μΉ μ μ©
|
148 |
-
if val in dooeum.keys():
|
149 |
-
val = dooeum[val]
|
150 |
-
|
151 |
-
|
152 |
-
while True:
|
153 |
-
# λ§μ½ idx κ° 0 μ΄λΌλ©΄ == 첫 κΈμ
|
154 |
-
if idx == 0:
|
155 |
-
# 첫 κΈμ μΈμ½λ©
|
156 |
-
input_ids = tokenizer.encode(
|
157 |
-
val, add_special_tokens=False, return_tensors="pt")
|
158 |
-
# print(f"{idx}λ² μΈμ½λ© : {input_ids}\n") # 2μ°¨μ ν
μ
|
159 |
-
|
160 |
-
# 첫 κΈμ μΈμ½λ© κ°μΌλ‘ λ¬Έμ₯ μμ±
|
161 |
-
output_sequence = model.generate(
|
162 |
-
input_ids=input_ids,
|
163 |
-
do_sample=True,
|
164 |
-
max_length=42,
|
165 |
-
min_length=5,
|
166 |
-
temperature=0.9,
|
167 |
-
repetition_penalty=1.7,
|
168 |
-
no_repeat_ngram_size=2)[0]
|
169 |
-
# print("첫 κΈμ μΈμ½λ© ν generate κ²°κ³Ό:", output_sequence, "\n") # tensor
|
170 |
-
|
171 |
-
# 첫 κΈμκ° μλλΌλ©΄
|
172 |
-
else:
|
173 |
-
# ν μμ
|
174 |
-
input_ids = tokenizer.encode(
|
175 |
-
val, add_special_tokens=False, return_tensors="pt")
|
176 |
-
# print(f"{idx}λ² μ§Έ κΈμ μΈμ½λ© : {input_ids} \n")
|
177 |
-
|
178 |
-
# μ’λ 맀λλ¬μ΄ μΌνμλ₯Ό μν΄ μ΄μ μΈμ½λ©κ³Ό μ§κΈ μΈμ½λ© μ°κ²°
|
179 |
-
link_with_pre_sentence = torch.cat((generated_sequence, input_ids[0]), 0)
|
180 |
-
link_with_pre_sentence = torch.reshape(link_with_pre_sentence, (1, len(link_with_pre_sentence)))
|
181 |
-
# print(f"μ΄μ ν
μμ μ°κ²°λ ν
μ {link_with_pre_sentence} \n")
|
182 |
-
|
183 |
-
# μΈμ½λ© κ°μΌλ‘ λ¬Έμ₯ μμ±
|
184 |
-
output_sequence = model.generate(
|
185 |
-
input_ids=link_with_pre_sentence,
|
186 |
-
do_sample=True,
|
187 |
-
max_length=42,
|
188 |
-
min_length=5,
|
189 |
-
temperature=0.9,
|
190 |
-
repetition_penalty=1.7,
|
191 |
-
no_repeat_ngram_size=2)[0]
|
192 |
-
# print(f"{idx}λ² μΈμ½λ© ν generate : {output_sequence}")
|
193 |
-
|
194 |
-
# μμ±λ λ¬Έμ₯ 리μ€νΈλ‘ λ³ν (μΈμ½λ© λμ΄μκ³ , μμ±λ λ¬Έμ₯ λ€λ‘ padding μ΄ μλ μν)
|
195 |
-
generated_sequence = output_sequence.tolist()
|
196 |
-
# print(f"{idx}λ² μΈμ½λ© 리μ€νΈ : {generated_sequence} \n")
|
197 |
-
|
198 |
-
# padding index μκΉμ§ slicing ν¨μΌλ‘μ¨ padding μ κ±°, paddingμ΄ μμ μλ μκΈ° λλ¬Έμ 쑰건문 νμΈ ν μ κ±°
|
199 |
-
if tokenizer.pad_token_id in generated_sequence:
|
200 |
-
generated_sequence = generated_sequence[:generated_sequence.index(tokenizer.pad_token_id)]
|
201 |
-
|
202 |
-
generated_sequence = torch.tensor(generated_sequence)
|
203 |
-
# print(f"{idx}λ² μΈμ½λ© 리μ€νΈ ν¨λ© μ κ±° ν λ€μ ν
μ : {generated_sequence} \n")
|
204 |
-
|
205 |
-
# 첫 κΈμκ° μλλΌλ©΄, generate λ μμ λ§ κ²°κ³Όλ¬Ό listμ λ€μ΄κ° μ μκ² μ λ¬Έμ₯μ λν μΈμ½λ© κ° μ κ±°
|
206 |
-
# print(generated_sequence)
|
207 |
-
if idx != 0:
|
208 |
-
# μ΄μ λ¬Έμ₯μ κΈΈμ΄ μ΄νλ‘ μ¬λΌμ΄μ±ν΄μ μ λ¬Έμ₯ μ κ±°
|
209 |
-
generated_sequence = generated_sequence[len_sequence:]
|
210 |
-
|
211 |
-
len_sequence = len(generated_sequence)
|
212 |
-
# print("len_seq", len_sequence)
|
213 |
-
|
214 |
-
# μμ κ·Έλλ‘ λ±μΌλ©΄ λ€μ ν΄μ, μλλ©΄ whileλ¬Έ νμΆ
|
215 |
-
if len_sequence > 1:
|
216 |
-
break
|
217 |
-
|
218 |
-
# κ²°κ³Όλ¬Ό 리μ€νΈμ λ΄κΈ°
|
219 |
-
res_l.append(generated_sequence)
|
220 |
-
|
221 |
-
poem_dict = {"Type":"alpha"}
|
222 |
-
|
223 |
-
for letter, res in zip(input_letter, res_l):
|
224 |
-
decode_res = tokenizer.decode(res, clean_up_tokenization_spaces=True, skip_special_tokens=True)
|
225 |
-
poem_dict[poem(letter)] = decode_res
|
226 |
-
|
227 |
-
return poem_dict
|
228 |
-
|
229 |
-
# Image(.gif)
|
230 |
-
@st.cache(show_spinner=False)
|
231 |
-
def load_lottieurl(url: str):
|
232 |
-
r = requests.get(url)
|
233 |
-
if r.status_code != 200:
|
234 |
-
return None
|
235 |
-
return r.json()
|
236 |
-
|
237 |
-
lottie_url = "https://assets7.lottiefiles.com/private_files/lf30_fjln45y5.json"
|
238 |
-
|
239 |
-
lottie_json = load_lottieurl(lottie_url)
|
240 |
-
st_lottie(lottie_json, speed=1, height=200, key="initial")
|
241 |
-
|
242 |
-
|
243 |
-
# Title
|
244 |
-
row0_spacer1, row0_1, row0_spacer2, row0_2, row0_spacer3 = st.columns(
|
245 |
-
(0.01, 2, 0.05, 0.5, 0.01)
|
246 |
-
)
|
247 |
-
|
248 |
-
with row0_1:
|
249 |
-
st.markdown("# νκΈ λ
Έλ κ°μ¬ nνμβ")
|
250 |
-
st.markdown("### π¦λ©μμ΄μ¬μμ²λΌ AIS7π¦ - νμ΄λ νλ‘μ νΈ")
|
251 |
-
|
252 |
-
with row0_2:
|
253 |
-
st.write("")
|
254 |
-
st.write("")
|
255 |
-
st.write("")
|
256 |
-
st.subheader("1μ‘° - ν΄ν리")
|
257 |
-
st.write("μ΄μ§ν, μ΅μ§μ, κΆμν¬, λ¬Έμ’
ν, ꡬμν, κΉμμ€")
|
258 |
-
|
259 |
-
st.write('---')
|
260 |
-
|
261 |
-
# Explanation
|
262 |
-
row1_spacer1, row1_1, row1_spacer2 = st.columns((0.01, 0.01, 0.01))
|
263 |
-
|
264 |
-
with row1_1:
|
265 |
-
st.markdown("### nνμ κ°μ΄λλΌμΈ")
|
266 |
-
st.markdown("1. νλ¨μ μλ ν
μ€νΈλ°μ 5μ μ΄ν λ¨μ΄λ₯Ό λ£μ΄μ£ΌμΈμ")
|
267 |
-
st.markdown("2. 'nνμ μ μνκΈ°' λ²νΌμ ν΄λ¦ν΄μ£ΌμΈμ")
|
268 |
-
st.markdown("* nνμ νμ
μ€μ \n"
|
269 |
-
" * Alpha ver. : λͺ¨λΈμ΄ 첫 μμ λΆν° μμ±\n"
|
270 |
-
" * Beta ver. : 첫 μμ μ λ°μ΄ν°μ
μμ μ°Ύκ³ , λ€μ λΆλΆμ μμ±")
|
271 |
-
|
272 |
-
st.write('---')
|
273 |
-
|
274 |
-
# Model & Input
|
275 |
-
row2_spacer1, row2_1, row2_spacer2= st.columns((0.01, 0.01, 0.01))
|
276 |
-
|
277 |
-
col1, col2 = st.columns(2)
|
278 |
-
|
279 |
-
# Word Input
|
280 |
-
with row2_1:
|
281 |
-
|
282 |
-
with col1:
|
283 |
-
genre = st.radio(
|
284 |
-
"nνμ νμ
μ ν",
|
285 |
-
('Alpha', 'Beta(testμ€)'))
|
286 |
-
|
287 |
-
if genre == 'Alpha':
|
288 |
-
n_line_poem = alpha_poem
|
289 |
-
|
290 |
-
else:
|
291 |
-
n_line_poem = beta_poem
|
292 |
-
|
293 |
-
with col2:
|
294 |
-
word_input = st.text_input(
|
295 |
-
"nνμμ μ¬μ©ν λ¨μ΄λ₯Ό μ κ³ λ²νΌμ λλ¬μ£ΌμΈμ.(μ΅λ 5μ) π",
|
296 |
-
placeholder='νκΈ λ¨μ΄λ₯Ό μ
λ ₯ν΄μ£ΌμΈμ',
|
297 |
-
max_chars=5
|
298 |
-
)
|
299 |
-
word_input = re.sub("[^κ°-ν£]", "", word_input)
|
300 |
-
|
301 |
-
if st.button('nνμ μ μνκΈ°'):
|
302 |
-
if word_input == "":
|
303 |
-
st.error("μ¨μ ν νκΈ λ¨μ΄λ₯Ό μ¬μ©ν΄μ£ΌμΈμ!")
|
304 |
-
|
305 |
-
else:
|
306 |
-
st.write("nνμ λ¨μ΄ : ", word_input)
|
307 |
-
with st.spinner('μ μ κΈ°λ€λ €μ£ΌμΈμ...'):
|
308 |
-
result = n_line_poem(word_input)
|
309 |
-
st.success('μλ£λμ΅λλ€!')
|
310 |
-
for r in result:
|
311 |
-
st.write(f'{r} : {result[r]}')
|
312 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|