File size: 13,058 Bytes
01f15df f9b9d56 83ee74c 705c5b5 83ee74c c22a5cd f9b9d56 c22a5cd 01f15df 1fd4ab2 ad9db85 1fd4ab2 bee778b ad9db85 c22a5cd 95ce689 c22a5cd 95ce689 72fb2d4 95ce689 c22a5cd 95ce689 c22a5cd 95ce689 d6d0bdd 95ce689 d6d0bdd 95ce689 c22a5cd d6d0bdd 72fb2d4 7b27b5d c22a5cd bee778b 01f15df bee778b 01f15df f9b9d56 2af89cf 1fd4ab2 99d94e0 da20c1b 99d94e0 1fd4ab2 01f15df da20c1b 01f15df 2af89cf da20c1b 2af89cf da20c1b 01f15df da20c1b 01f15df 2af89cf 705c5b5 0997082 2af89cf 705c5b5 99d94e0 705c5b5 2af89cf 99d94e0 521288b 01f15df 521288b 01f15df 521288b 01f15df 521288b 01f15df 705c5b5 521288b 01f15df 705c5b5 01f15df 0997082 bee778b 95ce689 18d99cc 5bb381c 18d99cc 705c5b5 95ce689 252a7fb 95ce689 2ff143a 95ce689 2ff143a 252a7fb 95ce689 d6d0bdd 95ce689 2ff143a 252a7fb 2ff143a d6d0bdd 2ff143a d6d0bdd 2ff143a 95ce689 d6d0bdd 95ce689 c22a5cd 2ff143a 95ce689 d6d0bdd 2ff143a 95ce689 63c5e29 b45e256 d6d0bdd b45e256 4fec5a3 72fb2d4 d6d0bdd 72fb2d4 7ffca43 4fec5a3 7ffca43 4fec5a3 7ffca43 b45e256 4fec5a3 63c5e29 7d860cd 7ffca43 4fec5a3 99d94e0 d57197f 7ffca43 63c5e29 4fec5a3 63c5e29 01f15df 63c5e29 5a41d75 6c1183d 5a41d75 63c5e29 f9b9d56 c22a5cd 01f15df c22a5cd 01f15df f9b9d56 95ce689 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 |
import os
from dotenv import load_dotenv
import gradio as gr
from huggingface_hub import InferenceClient
import pandas as pd
from typing import List, Tuple
import json
from datetime import datetime
# νκ²½ λ³μ μ€μ
HF_TOKEN = os.getenv("HF_TOKEN")
# LLM Models Definition
LLM_MODELS = {
"Cohere c4ai-crp-08-2024": "CohereForAI/c4ai-command-r-plus-08-2024", # Default
"Meta Llama3.3-70B": "meta-llama/Llama-3.3-70B-Instruct" # Backup model
}
class ChatHistory:
def __init__(self):
self.history = []
self.history_file = "/tmp/chat_history.json"
self.load_history()
def add_conversation(self, user_msg: str, assistant_msg: str):
conversation = {
"timestamp": datetime.now().isoformat(),
"messages": [
{"role": "user", "content": user_msg},
{"role": "assistant", "content": assistant_msg}
]
}
self.history.append(conversation)
self.save_history()
def format_for_display(self):
# Gradio Chatbot μ»΄ν¬λνΈμ λ§λ νμμΌλ‘ λ³ν
formatted = []
for conv in self.history:
formatted.append([
conv["messages"][0]["content"], # user message
conv["messages"][1]["content"] # assistant message
])
return formatted
def get_messages_for_api(self):
# API νΈμΆμ μν λ©μμ§ νμ
messages = []
for conv in self.history:
messages.extend([
{"role": "user", "content": conv["messages"][0]["content"]},
{"role": "assistant", "content": conv["messages"][1]["content"]}
])
return messages
def clear_history(self):
self.history = []
self.save_history()
def save_history(self):
try:
with open(self.history_file, 'w', encoding='utf-8') as f:
json.dump(self.history, f, ensure_ascii=False, indent=2)
except Exception as e:
print(f"νμ€ν 리 μ μ₯ μ€ν¨: {e}")
def load_history(self):
try:
if os.path.exists(self.history_file):
with open(self.history_file, 'r', encoding='utf-8') as f:
self.history = json.load(f)
except Exception as e:
print(f"νμ€ν 리 λ‘λ μ€ν¨: {e}")
self.history = []
# μ μ ChatHistory μΈμ€ν΄μ€ μμ±
chat_history = ChatHistory()
def get_client(model_name="Cohere c4ai-crp-08-2024"):
try:
return InferenceClient(LLM_MODELS[model_name], token=HF_TOKEN)
except Exception:
return InferenceClient(LLM_MODELS["Meta Llama3.3-70B"], token=HF_TOKEN)
def analyze_file_content(content, file_type):
"""Analyze file content and return structural summary"""
if file_type in ['parquet', 'csv']:
try:
lines = content.split('\n')
header = lines[0]
columns = header.count('|') - 1
rows = len(lines) - 3
return f"π λ°μ΄ν°μ
ꡬ쑰: {columns}κ° μ»¬λΌ, {rows}κ° λ°μ΄ν°"
except:
return "β λ°μ΄ν°μ
ꡬ쑰 λΆμ μ€ν¨"
lines = content.split('\n')
total_lines = len(lines)
non_empty_lines = len([line for line in lines if line.strip()])
if any(keyword in content.lower() for keyword in ['def ', 'class ', 'import ', 'function']):
functions = len([line for line in lines if 'def ' in line])
classes = len([line for line in lines if 'class ' in line])
imports = len([line for line in lines if 'import ' in line or 'from ' in line])
return f"π» μ½λ ꡬ쑰: {total_lines}μ€ (ν¨μ: {functions}, ν΄λμ€: {classes}, μν¬νΈ: {imports})"
paragraphs = content.count('\n\n') + 1
words = len(content.split())
return f"π λ¬Έμ ꡬ쑰: {total_lines}μ€, {paragraphs}λ¨λ½, μ½ {words}λ¨μ΄"
def read_uploaded_file(file):
if file is None:
return "", ""
try:
file_ext = os.path.splitext(file.name)[1].lower()
if file_ext == '.parquet':
df = pd.read_parquet(file.name, engine='pyarrow')
content = df.head(10).to_markdown(index=False)
return content, "parquet"
elif file_ext == '.csv':
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin1']
for encoding in encodings:
try:
df = pd.read_csv(file.name, encoding=encoding)
content = f"π λ°μ΄ν° 미리보기:\n{df.head(10).to_markdown(index=False)}\n\n"
content += f"\nπ λ°μ΄ν° μ 보:\n"
content += f"- μ 체 ν μ: {len(df)}\n"
content += f"- μ 체 μ΄ μ: {len(df.columns)}\n"
content += f"- μ»¬λΌ λͺ©λ‘: {', '.join(df.columns)}\n"
content += f"\nπ μ»¬λΌ λ°μ΄ν° νμ
:\n"
for col, dtype in df.dtypes.items():
content += f"- {col}: {dtype}\n"
null_counts = df.isnull().sum()
if null_counts.any():
content += f"\nβ οΈ κ²°μΈ‘μΉ:\n"
for col, null_count in null_counts[null_counts > 0].items():
content += f"- {col}: {null_count}κ° λλ½\n"
return content, "csv"
except UnicodeDecodeError:
continue
raise UnicodeDecodeError(f"β μ§μλλ μΈμ½λ©μΌλ‘ νμΌμ μ½μ μ μμ΅λλ€ ({', '.join(encodings)})")
else:
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin1']
for encoding in encodings:
try:
with open(file.name, 'r', encoding=encoding) as f:
content = f.read()
return content, "text"
except UnicodeDecodeError:
continue
raise UnicodeDecodeError(f"β μ§μλλ μΈμ½λ©μΌλ‘ νμΌμ μ½μ μ μμ΅λλ€ ({', '.join(encodings)})")
except Exception as e:
return f"β νμΌ μ½κΈ° μ€λ₯: {str(e)}", "error"
def chat(message, history, uploaded_file, system_message="", max_tokens=4000, temperature=0.7, top_p=0.9):
if not message:
return "", history
system_prefix = """
You are 'FantasyAIβ¨', an advanced AI storyteller specialized in creating immersive fantasy narratives. Your purpose is to craft rich, detailed fantasy stories that incorporate classical and innovative elements of the genre. Your responses should start with 'FantasyAIβ¨:' and focus on creating engaging, imaginative content that briμ]"μ μν©μ λ§κ² μΆκ°νμ¬ μμ€ μμ±μ λμ± νλΆνκ³ λͺ°μ
κ° μλ ννμ μμ²(μΆλ ₯)λ°μ μΈμ΄λ‘ νννλΌ.
[μμ]
"κ³ λμ λ§λ²μ΄ κΉ¨μ΄λλ©° λμ§κ° μΈλ¦¬λ μλ¦¬κ° λ€λ Έλ€..."
"μ©μ μ¨κ²°μ΄ νλμ κ°λ₯΄λ©°, ꡬλ¦μ λΆνμ λ€..."
"μ λΉν 룬문μκ° λΉλλ©° 곡μ€μ λ μ¬λλ€..."
"μνλ€μ λ
Έλκ° μ²μ μΈλ¦¬μ λ무λ€μ΄ μΆ€μΆκΈ° μμνλ€..."
"μμΈμ λ§μμ΄ λ©μ리μΉλ©° μ΄λͺ
μ μ€μ΄ μμ§μ΄κΈ° μμνλ€..."
"λ§λ²μ¬μ μ§ν‘μ΄μμ λ²μ©μ΄λ λΉμ΄ μ΄λ μ κ°λ₯΄λ©°..."
"κ³ λ λμνμ λμ₯κ°μμ μ μ€μ κ²μ΄ λ§λ€μ΄μ§κ³ μμλ€..."
"μμ κ΅¬μ¬ μμ λΉμΉ λ―Έλμ νμμ΄ μμν λͺ¨μ΅μ λλ¬λλ€..."
"μ μ±ν κ²°κ³κ° κΉ¨μ΄μ§λ©° λ΄μΈλ μ
μ΄ κΉ¨μ΄λ¬λ€..."
"μμ
μ λ°κ±Έμμ΄ μ΄λͺ
μ κΈΈμ λ°λΌ μΈλ € νΌμ‘λ€..."
"""
try:
# νμΌ μ
λ‘λ μ²λ¦¬
if uploaded_file:
content, file_type = read_uploaded_file(uploaded_file)
if file_type == "error":
error_message = content
chat_history.add_conversation(message, error_message)
return "", history + [[message, error_message]]
file_summary = analyze_file_content(content, file_type)
if file_type in ['parquet', 'csv']:
system_message += f"\n\nνμΌ λ΄μ©:\n```markdown\n{content}\n```"
else:
system_message += f"\n\nνμΌ λ΄μ©:\n```\n{content}\n```"
if message == "νμΌ λΆμμ μμν©λλ€...":
message = f"""[νμΌ κ΅¬μ‘° λΆμ] {file_summary}
λ€μ κ΄μ μμ λμμ λλ¦¬κ² μ΅λλ€:
1. π μ λ°μ μΈ λ΄μ© νμ
2. π‘ μ£Όμ νΉμ§ μ€λͺ
3. π― μ€μ©μ μΈ νμ© λ°©μ
4. β¨ κ°μ μ μ
5. π¬ μΆκ° μ§λ¬Έμ΄λ νμν μ€λͺ
"""
# λ©μμ§ μ²λ¦¬
messages = [{"role": "system", "content": system_prefix + system_message}]
# μ΄μ λν νμ€ν 리 μΆκ°
if history:
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": message})
# API νΈμΆ λ° μλ΅ μ²λ¦¬
client = get_client()
partial_message = ""
for msg in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = msg.choices[0].delta.get('content', None)
if token:
partial_message += token
current_history = history + [[message, partial_message]]
yield "", current_history
# μμ±λ λν μ μ₯
chat_history.add_conversation(message, partial_message)
except Exception as e:
error_msg = f"β μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}"
chat_history.add_conversation(message, error_msg)
yield "", history + [[message, error_msg]]
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", title="GiniGEN π€") as demo:
# κΈ°μ‘΄ νμ€ν 리 λ‘λ
initial_history = chat_history.format_for_display()
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot(
value=initial_history, # μ μ₯λ νμ€ν λ¦¬λ‘ μ΄κΈ°ν
height=600,
label="λνμ°½ π¬",
show_label=True
)
msg = gr.Textbox(
label="λ©μμ§ μ
λ ₯",
show_label=False,
placeholder="무μμ΄λ λ¬Όμ΄λ³΄μΈμ... π",
container=False
)
with gr.Row():
clear = gr.ClearButton([msg, chatbot], value="λνλ΄μ© μ§μ°κΈ°")
send = gr.Button("보λ΄κΈ° π€")
with gr.Column(scale=1):
gr.Markdown("### GiniGEN π€ [νμΌ μ
λ‘λ] π\nμ§μ νμ: ν
μ€νΈ, μ½λ, CSV, Parquet νμΌ")
file_upload = gr.File(
label="νμΌ μ ν",
file_types=["text", ".csv", ".parquet"],
type="filepath"
)
with gr.Accordion("κ³ κΈ μ€μ βοΈ", open=False):
system_message = gr.Textbox(label="μμ€ν
λ©μμ§ π", value="")
max_tokens = gr.Slider(minimum=1, maximum=8000, value=4000, label="μ΅λ ν ν° μ π")
temperature = gr.Slider(minimum=0, maximum=1, value=0.7, label="μ°½μμ± μμ€ π‘οΈ")
top_p = gr.Slider(minimum=0, maximum=1, value=0.9, label="μλ΅ λ€μμ± π")
# μμ μ§λ¬Έ
gr.Examples(
examples=[
["ν₯λ―Έλ‘μ΄ μμ¬ 10κ°μ§λ₯Ό μ μν΄μ€μ π€"],
["λμ± νμμ μ΄κ³ μμΈν λ¬μ¬λ₯Ό μμΈνν΄μ€μ π"],
["μ΄μΈκ²(λ€λ₯Έ μ°¨μμ μΈμ) λ°°κ²½μΌλ‘ ν΄μ€μ π―"],
["μμ§μλ μλλ‘ νμμν μμ¬λ‘ μμ±μ± β¨"],
["κ³μ μ΄μ΄μ μμ±ν΄μ€ π€"],
],
inputs=msg,
)
# λνλ΄μ© μ§μ°κΈ° λ²νΌμ νμ€ν 리 μ΄κΈ°ν κΈ°λ₯ μΆκ°
def clear_chat():
chat_history.clear_history()
return None, None
# μ΄λ²€νΈ λ°μΈλ©
msg.submit(
chat,
inputs=[msg, chatbot, file_upload, system_message, max_tokens, temperature, top_p],
outputs=[msg, chatbot]
)
send.click(
chat,
inputs=[msg, chatbot, file_upload, system_message, max_tokens, temperature, top_p],
outputs=[msg, chatbot]
)
clear.click(
clear_chat,
outputs=[msg, chatbot]
)
# νμΌ μ
λ‘λμ μλ λΆμ
file_upload.change(
lambda: "νμΌ λΆμμ μμν©λλ€...",
outputs=msg
).then(
chat,
inputs=[msg, chatbot, file_upload, system_message, max_tokens, temperature, top_p],
outputs=[msg, chatbot]
)
if __name__ == "__main__":
demo.launch() |