Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,6 +14,7 @@ import torch
|
|
| 14 |
# Set up logging
|
| 15 |
logging.basicConfig(level=logging.INFO)
|
| 16 |
logger = logging.getLogger(__name__)
|
|
|
|
| 17 |
# Install llama-cpp-python with appropriate backend
|
| 18 |
try:
|
| 19 |
from llama_cpp import Llama
|
|
@@ -25,15 +26,18 @@ except ModuleNotFoundError:
|
|
| 25 |
else:
|
| 26 |
logger.info("Installing llama-cpp-python without additional flags.")
|
| 27 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "llama-cpp-python", "--force-reinstall", "--upgrade", "--no-cache-dir"])
|
| 28 |
-
from llama_cpp import Llama
|
|
|
|
| 29 |
# Install yfinance if not present (for CAGR calculations)
|
| 30 |
try:
|
| 31 |
import yfinance as yf
|
| 32 |
except ModuleNotFoundError:
|
| 33 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "yfinance"])
|
| 34 |
-
import yfinance as yf
|
|
|
|
| 35 |
# Import pandas for handling DataFrame column structures
|
| 36 |
import pandas as pd
|
|
|
|
| 37 |
# Additional imports for visualization and file handling
|
| 38 |
try:
|
| 39 |
import matplotlib.pyplot as plt
|
|
@@ -41,18 +45,22 @@ try:
|
|
| 41 |
import io
|
| 42 |
except ModuleNotFoundError:
|
| 43 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "matplotlib", "pillow"])
|
| 44 |
-
import matplotlib.pyplot as plt
|
| 45 |
-
from PIL import Image
|
| 46 |
-
import io
|
|
|
|
| 47 |
MAX_MAX_NEW_TOKENS = 512
|
| 48 |
DEFAULT_MAX_NEW_TOKENS = 512
|
| 49 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
|
|
|
| 50 |
DESCRIPTION = """# FinChat: Investing Q&A (Optimized for Speed)
|
| 51 |
This application delivers an interactive chat interface powered by a highly efficient, small AI model adapted for addressing investing and finance inquiries through specialized prompt engineering. It ensures rapid, reasoned responses to user queries. Duplicate this Space for customization or queue-free deployment.
|
| 52 |
<p>Running on CPU or GPU if available. Using Phi-2 model for faster inference. Inference is heavily optimized for responses in under 10 seconds for simple queries, with output limited to 250 tokens maximum. For longer responses, increase 'Max New Tokens' in Advanced Settings. Brief delays may occur in free-tier environments due to shared resources, but typical generation speeds are improved with the smaller model.</p>"""
|
|
|
|
| 53 |
LICENSE = """<p/>
|
| 54 |
---
|
| 55 |
This application employs the Phi-2 model, governed by Microsoft's Terms of Use. Refer to the [model card](https://huggingface.co/TheBloke/phi-2-GGUF) for details."""
|
|
|
|
| 56 |
# Load the model (skip fine-tuning for faster startup)
|
| 57 |
try:
|
| 58 |
model_path = hf_hub_download(
|
|
@@ -63,10 +71,10 @@ try:
|
|
| 63 |
llm = Llama(
|
| 64 |
model_path=model_path,
|
| 65 |
n_ctx=1024,
|
| 66 |
-
n_batch=1024,
|
| 67 |
n_threads=multiprocessing.cpu_count(),
|
| 68 |
n_gpu_layers=n_gpu_layers,
|
| 69 |
-
chat_format="chatml"
|
| 70 |
)
|
| 71 |
logger.info(f"Model loaded successfully with n_gpu_layers={n_gpu_layers}.")
|
| 72 |
# Warm up the model for faster initial inference
|
|
@@ -75,8 +83,10 @@ try:
|
|
| 75 |
except Exception as e:
|
| 76 |
logger.error(f"Error loading model: {str(e)}")
|
| 77 |
raise
|
|
|
|
| 78 |
# Register explicit close for llm to avoid destructor error
|
| 79 |
atexit.register(llm.close)
|
|
|
|
| 80 |
DEFAULT_SYSTEM_PROMPT = """You are FinChat, a knowledgeable AI assistant specializing in investing and finance. Provide accurate, helpful, reasoned, and concise answers to investing questions. Always base responses on reliable information and advise users to consult professionals for personalized advice.
|
| 81 |
Always respond exclusively in English. Use bullet points for clarity.
|
| 82 |
Example:
|
|
@@ -86,315 +96,142 @@ Assistant:
|
|
| 86 |
- Represents average annual return with compounding
|
| 87 |
- Past performance not indicative of future results
|
| 88 |
- Consult a financial advisor"""
|
|
|
|
| 89 |
# Company name to ticker mapping (expand as needed)
|
| 90 |
COMPANY_TO_TICKER = {
|
| 91 |
"opendoor": "OPEN",
|
| 92 |
"tesla": "TSLA",
|
| 93 |
"apple": "AAPL",
|
| 94 |
-
|
| 95 |
-
# Add more mappings for common companies
|
| 96 |
}
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
#
|
| 124 |
-
|
| 125 |
-
if
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
rate = float(compound_match.group(2)) / 100
|
| 130 |
-
years = int(compound_match.group(3))
|
| 131 |
-
if principal <= 0 or rate < 0 or years <= 0:
|
| 132 |
-
yield "Invalid input values: principal, rate, and years must be positive."
|
| 133 |
-
return
|
| 134 |
-
balance = principal * (1 + rate) ** years
|
| 135 |
-
response = (
|
| 136 |
-
f"- Starting with ${principal:,.2f} at {rate*100:.2f}% annual interest, compounded annually over {years} years.\n"
|
| 137 |
-
f"- Projected balance in year {years}: ${balance:,.2f}\n"
|
| 138 |
-
f"- Assumptions: Annual compounding; no additional deposits or withdrawals.\n"
|
| 139 |
-
f"- This is a projection; actual results may vary. Consult a financial advisor."
|
| 140 |
-
)
|
| 141 |
-
logger.info("Compound interest response generated.")
|
| 142 |
-
yield response
|
| 143 |
-
return
|
| 144 |
-
except ValueError as ve:
|
| 145 |
-
logger.error(f"Error parsing compound interest query: {str(ve)}")
|
| 146 |
-
yield "Error parsing query: Please ensure amount, rate, and years are valid numbers."
|
| 147 |
-
return
|
| 148 |
-
# Check for CAGR/average return queries (use re.search for flexible matching)
|
| 149 |
-
match = re.search(r'(?:average return|cagr) for ([\w\s,]+(?:and [\w\s,]+)?) between (\d{4}) and (\d{4})', lower_message)
|
| 150 |
-
if match:
|
| 151 |
-
tickers_str, start_year, end_year = match.groups()
|
| 152 |
-
tickers = [t.strip().upper() for t in re.split(r',|\band\b', tickers_str) if t.strip()]
|
| 153 |
-
# Apply company-to-ticker mapping
|
| 154 |
-
for i in range(len(tickers)):
|
| 155 |
-
lower_ticker = tickers[i].lower()
|
| 156 |
-
if lower_ticker in COMPANY_TO_TICKER:
|
| 157 |
-
tickers[i] = COMPANY_TO_TICKER[lower_ticker]
|
| 158 |
-
responses = []
|
| 159 |
-
if int(end_year) <= int(start_year):
|
| 160 |
-
yield "The specified time period is invalid (end year must be after start year)."
|
| 161 |
-
return
|
| 162 |
-
for ticker in tickers:
|
| 163 |
-
try:
|
| 164 |
-
# Download data with adjusted close prices
|
| 165 |
-
data = yf.download(ticker, start=f"{start_year}-01-01", end=f"{end_year}-12-31", progress=False, auto_adjust=False)
|
| 166 |
-
# Handle potential MultiIndex columns in newer yfinance versions
|
| 167 |
-
if isinstance(data.columns, pd.MultiIndex):
|
| 168 |
-
data.columns = data.columns.droplevel(1)
|
| 169 |
-
if not data.empty:
|
| 170 |
-
# Check if 'Adj Close' column exists
|
| 171 |
-
if 'Adj Close' not in data.columns:
|
| 172 |
-
responses.append(f"- {ticker}: Error - Adjusted Close price data not available.")
|
| 173 |
-
logger.error(f"No 'Adj Close' column for {ticker}.")
|
| 174 |
-
continue
|
| 175 |
-
# Ensure data is not MultiIndex for single ticker (already handled)
|
| 176 |
-
initial = data['Adj Close'].iloc[0]
|
| 177 |
-
final = data['Adj Close'].iloc[-1]
|
| 178 |
-
start_date = data.index[0]
|
| 179 |
-
end_date = data.index[-1]
|
| 180 |
-
days = (end_date - start_date).days
|
| 181 |
-
years = days / 365.25
|
| 182 |
-
if years > 0 and pd.notna(initial) and pd.notna(final):
|
| 183 |
-
cagr = ((final / initial) ** (1 / years) - 1) * 100
|
| 184 |
-
responses.append(f"- {ticker}: ~{cagr:.2f}%")
|
| 185 |
-
else:
|
| 186 |
-
responses.append(f"- {ticker}: Invalid period or missing price data.")
|
| 187 |
-
else:
|
| 188 |
-
responses.append(f"- {ticker}: No historical data available between {start_year} and {end_year}.")
|
| 189 |
-
except Exception as e:
|
| 190 |
-
logger.error(f"Error calculating CAGR for {ticker}: {str(e)}")
|
| 191 |
-
responses.append(f"- {ticker}: Error calculating CAGR - {str(e)}")
|
| 192 |
-
full_response = f"CAGR for the requested stocks from {start_year} to {end_year}:\n" + "\n".join(responses) + "\n- Represents average annual returns with compounding\n- Past performance not indicative of future results\n- Consult a financial advisor"
|
| 193 |
-
full_response = re.sub(r'<\|(?:im_start|im_end|system|user|assistant)\|>|</s>|\[END\]', '', full_response).strip() # Clean any trailing tokens
|
| 194 |
-
# Estimate token count to ensure response fits within max_new_tokens
|
| 195 |
-
response_tokens = len(llm.tokenize(full_response.encode("utf-8"), add_bos=False))
|
| 196 |
-
if response_tokens > max_new_tokens:
|
| 197 |
-
logger.warning(f"CAGR response tokens ({response_tokens}) exceed max_new_tokens ({max_new_tokens}). Truncating to first complete sentence.")
|
| 198 |
-
sentence_endings = ['.', '!', '?']
|
| 199 |
-
first_sentence_end = min([full_response.find(ending) + 1 for ending in sentence_endings if full_response.find(ending) != -1], default=len(full_response))
|
| 200 |
-
full_response = full_response[:first_sentence_end] if first_sentence_end > 0 else "Response truncated due to length; please increase Max New Tokens."
|
| 201 |
-
logger.info("CAGR response generated.")
|
| 202 |
-
yield full_response
|
| 203 |
-
return
|
| 204 |
-
# Build conversation messages (limit history to last 3 for speed)
|
| 205 |
-
conversation = [{"role": "system", "content": system_prompt}]
|
| 206 |
-
for msg in chat_history[-3:]: # Reduced from 5 to 3 for faster processing
|
| 207 |
-
if msg["role"] == "user":
|
| 208 |
-
conversation.append({"role": "user", "content": msg["content"]})
|
| 209 |
-
elif msg["role"] == "assistant":
|
| 210 |
-
conversation.append({"role": "assistant", "content": msg["content"]})
|
| 211 |
-
conversation.append({"role": "user", "content": message})
|
| 212 |
-
# Approximate token length check and truncate if necessary
|
| 213 |
-
prompt_text = "\n".join(d["content"] for d in conversation)
|
| 214 |
-
input_tokens = llm.tokenize(prompt_text.encode("utf-8"), add_bos=False)
|
| 215 |
-
while len(input_tokens) > MAX_INPUT_TOKEN_LENGTH:
|
| 216 |
-
logger.warning(f"Input tokens ({len(input_tokens)}) exceed limit ({MAX_INPUT_TOKEN_LENGTH}). Truncating history.")
|
| 217 |
-
if len(conversation) > 2: # Preserve system prompt and current user message
|
| 218 |
-
conversation.pop(1) # Remove oldest user/assistant pair
|
| 219 |
-
prompt_text = "\n".join(d["content"] for d in conversation)
|
| 220 |
-
input_tokens = llm.tokenize(prompt_text.encode("utf-8"), add_bos=False)
|
| 221 |
-
else:
|
| 222 |
-
yield "Error: Input is too long even after truncation. Please shorten your query."
|
| 223 |
-
return
|
| 224 |
-
# Generate response with sentence boundary checking and token cleanup
|
| 225 |
try:
|
| 226 |
-
response =
|
| 227 |
-
|
| 228 |
-
token_count = 0
|
| 229 |
-
stream = llm.create_chat_completion(
|
| 230 |
-
messages=conversation,
|
| 231 |
max_tokens=max_new_tokens,
|
| 232 |
temperature=temperature,
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
repeat_penalty=repetition_penalty,
|
| 236 |
-
stream=True
|
| 237 |
)
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
# Clean the chunk by removing ChatML tokens or similar
|
| 243 |
-
cleaned_chunk = re.sub(r'<\|(?:im_start|im_end|system|user|assistant)\|>|</s>|\[END\]', '', delta["content"])
|
| 244 |
-
if not cleaned_chunk:
|
| 245 |
-
continue
|
| 246 |
-
sentence_buffer += cleaned_chunk
|
| 247 |
-
response += cleaned_chunk
|
| 248 |
-
# Approximate token count for the chunk
|
| 249 |
-
chunk_tokens = len(llm.tokenize(cleaned_chunk.encode("utf-8"), add_bos=False))
|
| 250 |
-
token_count += chunk_tokens
|
| 251 |
-
# Check for sentence boundary
|
| 252 |
-
if any(sentence_buffer.strip().endswith(ending) for ending in sentence_endings):
|
| 253 |
-
yield response
|
| 254 |
-
sentence_buffer = "" # Clear buffer after yielding a complete sentence
|
| 255 |
-
# Removed early truncation to allow full token utilization
|
| 256 |
-
if chunk["choices"][0]["finish_reason"] is not None:
|
| 257 |
-
# Yield any remaining complete sentence in the buffer
|
| 258 |
-
if sentence_buffer.strip():
|
| 259 |
-
last_sentence_end = max([sentence_buffer.rfind(ending) for ending in sentence_endings if sentence_buffer.rfind(ending) != -1], default=-1)
|
| 260 |
-
if last_sentence_end != -1:
|
| 261 |
-
response = response[:response.rfind(sentence_buffer) + last_sentence_end + 1]
|
| 262 |
-
yield response
|
| 263 |
-
else:
|
| 264 |
-
yield response
|
| 265 |
-
else:
|
| 266 |
-
yield response
|
| 267 |
-
break
|
| 268 |
-
logger.info("Response generation completed.")
|
| 269 |
-
except ValueError as ve:
|
| 270 |
-
if "exceed context window" in str(ve):
|
| 271 |
-
yield "Error: Prompt too long for context window. Please try a shorter query or clear history."
|
| 272 |
-
else:
|
| 273 |
-
logger.error(f"Error during response generation: {str(ve)}")
|
| 274 |
-
yield f"Error generating response: {str(ve)}"
|
| 275 |
except Exception as e:
|
| 276 |
-
logger.error(f"
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
return "", None
|
| 297 |
-
total_value_now = sum(v['value'] for v in portfolio.values())
|
| 298 |
-
allocations = {k: v['value'] / total_value_now for k, v in portfolio.items()} if total_value_now > 0 else {}
|
| 299 |
-
fig_alloc, ax_alloc = plt.subplots()
|
| 300 |
-
ax_alloc.pie(allocations.values(), labels=allocations.keys(), autopct='%1.1f%%')
|
| 301 |
-
ax_alloc.set_title('Portfolio Allocation')
|
| 302 |
-
buf_alloc = io.BytesIO()
|
| 303 |
-
fig_alloc.savefig(buf_alloc, format='png')
|
| 304 |
-
buf_alloc.seek(0)
|
| 305 |
-
chart_alloc = Image.open(buf_alloc)
|
| 306 |
-
plt.close(fig_alloc) # Close the figure to free memory
|
| 307 |
-
def project_value(value, years, rate):
|
| 308 |
-
return value * (1 + rate / 100) ** years
|
| 309 |
-
total_value_1yr = sum(project_value(v['value'], 1, growth_rate) for v in portfolio.values())
|
| 310 |
-
total_value_2yr = sum(project_value(v['value'], 2, growth_rate) for v in portfolio.values())
|
| 311 |
-
total_value_5yr = sum(project_value(v['value'], 5, growth_rate) for v in portfolio.values())
|
| 312 |
-
total_value_10yr = sum(project_value(v['value'], 10, growth_rate) for v in portfolio.values())
|
| 313 |
-
data_str = (
|
| 314 |
-
"User portfolio:\n" +
|
| 315 |
-
"\n".join(f"- {k}: {v['shares']} shares, avg cost {v['cost']}, current price {v['price']}, value ${v['value']:,.2f}" for k, v in portfolio.items()) +
|
| 316 |
-
f"\nTotal value now: ${total_value_now:,.2f}\nProjected (at {growth_rate}% annual growth):\n" +
|
| 317 |
-
f"- 1 year: ${total_value_1yr:,.2f}\n- 2 years: ${total_value_2yr:,.2f}\n- 5 years: ${total_value_5yr:,.2f}\n- 10 years: ${total_value_10yr:,.2f}"
|
| 318 |
-
)
|
| 319 |
-
return data_str, chart_alloc
|
| 320 |
-
def fetch_current_prices(df):
|
| 321 |
-
if df is None or len(df) == 0:
|
| 322 |
-
return df
|
| 323 |
-
# Convert to DataFrame if needed
|
| 324 |
-
if not isinstance(df, pd.DataFrame):
|
| 325 |
-
df = pd.DataFrame(df, columns=["Ticker", "Shares", "Avg Cost", "Current Price"])
|
| 326 |
-
for i in df.index:
|
| 327 |
-
ticker = df.at[i, "Ticker"]
|
| 328 |
-
if pd.notna(ticker) and ticker.strip():
|
| 329 |
-
try:
|
| 330 |
-
price = yf.Ticker(ticker.upper()).info.get('currentPrice', None)
|
| 331 |
-
if price is not None:
|
| 332 |
-
df.at[i, "Current Price"] = price
|
| 333 |
-
except Exception as e:
|
| 334 |
-
logger.warning(f"Failed to fetch price for {ticker}: {str(e)}")
|
| 335 |
-
return df
|
| 336 |
-
# Gradio interface setup
|
| 337 |
-
with gr.Blocks(theme=themes.Soft(), css="""#chatbot {height: 800px; overflow: auto;}""") as demo:
|
| 338 |
gr.Markdown(DESCRIPTION)
|
| 339 |
-
chatbot = gr.Chatbot(label="FinChat", type="messages")
|
| 340 |
-
msg = gr.Textbox(label="Ask a finance question", placeholder="e.g., 'What is CAGR?' or 'Average return for AAPL between 2010 and 2020'", info="Enter your query here. Portfolio data will be appended if provided.")
|
| 341 |
-
with gr.Row():
|
| 342 |
-
submit = gr.Button("Submit", variant="primary")
|
| 343 |
-
clear = gr.Button("Clear")
|
| 344 |
-
gr.Examples(
|
| 345 |
-
examples=["What is CAGR?", "Average return for AAPL between 2010 and 2020", "Hi", "Explain compound interest"],
|
| 346 |
-
inputs=msg,
|
| 347 |
-
label="Example Queries"
|
| 348 |
-
)
|
| 349 |
-
with gr.Accordion("Enter Portfolio for Projections", open=False):
|
| 350 |
-
portfolio_df = gr.Dataframe(
|
| 351 |
-
headers=["Ticker", "Shares", "Avg Cost", "Current Price"],
|
| 352 |
-
datatype=["str", "number", "number", "number"],
|
| 353 |
-
row_count=3,
|
| 354 |
-
col_count=(4, "fixed"),
|
| 355 |
-
label="Portfolio Data",
|
| 356 |
-
interactive=True
|
| 357 |
-
)
|
| 358 |
-
gr.Markdown("Enter your stocks here. You can add more rows by editing the table.")
|
| 359 |
-
fetch_button = gr.Button("Fetch Current Prices", variant="secondary")
|
| 360 |
-
fetch_button.click(fetch_current_prices, inputs=portfolio_df, outputs=portfolio_df)
|
| 361 |
-
growth_rate = gr.Slider(minimum=5, maximum=50, step=5, value=10, label="Annual Growth Rate (%)", interactive=True, info="Select the assumed annual growth rate for projections.")
|
| 362 |
-
growth_rate_label = gr.Markdown("**Selected Growth Rate: 10%**")
|
| 363 |
-
with gr.Accordion("Advanced Settings", open=False):
|
| 364 |
-
system_prompt = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=6, info="Customize the AI's system prompt.")
|
| 365 |
-
temperature = gr.Slider(label="Temperature", value=0.6, minimum=0.0, maximum=1.0, step=0.05, info="Controls randomness: lower is more deterministic.")
|
| 366 |
-
top_p = gr.Slider(label="Top P", value=0.9, minimum=0.0, maximum=1.0, step=0.05, info="Nucleus sampling: higher includes more diverse tokens.")
|
| 367 |
-
top_k = gr.Slider(label="Top K", value=50, minimum=1, maximum=100, step=1, info="Top-K sampling: limits to top K tokens.")
|
| 368 |
-
repetition_penalty = gr.Slider(label="Repetition Penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, info="Penalizes repeated tokens.")
|
| 369 |
-
max_new_tokens = gr.Slider(label="Max New Tokens", value=DEFAULT_MAX_NEW_TOKENS, minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, info="Maximum length of generated response.")
|
| 370 |
gr.Markdown(LICENSE)
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
)
|
| 399 |
-
|
| 400 |
-
|
|
|
|
|
|
| 14 |
# Set up logging
|
| 15 |
logging.basicConfig(level=logging.INFO)
|
| 16 |
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
# Install llama-cpp-python with appropriate backend
|
| 19 |
try:
|
| 20 |
from llama_cpp import Llama
|
|
|
|
| 26 |
else:
|
| 27 |
logger.info("Installing llama-cpp-python without additional flags.")
|
| 28 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "llama-cpp-python", "--force-reinstall", "--upgrade", "--no-cache-dir"])
|
| 29 |
+
from llama_cpp import Llama
|
| 30 |
+
|
| 31 |
# Install yfinance if not present (for CAGR calculations)
|
| 32 |
try:
|
| 33 |
import yfinance as yf
|
| 34 |
except ModuleNotFoundError:
|
| 35 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "yfinance"])
|
| 36 |
+
import yfinance as yf
|
| 37 |
+
|
| 38 |
# Import pandas for handling DataFrame column structures
|
| 39 |
import pandas as pd
|
| 40 |
+
|
| 41 |
# Additional imports for visualization and file handling
|
| 42 |
try:
|
| 43 |
import matplotlib.pyplot as plt
|
|
|
|
| 45 |
import io
|
| 46 |
except ModuleNotFoundError:
|
| 47 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "matplotlib", "pillow"])
|
| 48 |
+
import matplotlib.pyplot as plt
|
| 49 |
+
from PIL import Image
|
| 50 |
+
import io
|
| 51 |
+
|
| 52 |
MAX_MAX_NEW_TOKENS = 512
|
| 53 |
DEFAULT_MAX_NEW_TOKENS = 512
|
| 54 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
| 55 |
+
|
| 56 |
DESCRIPTION = """# FinChat: Investing Q&A (Optimized for Speed)
|
| 57 |
This application delivers an interactive chat interface powered by a highly efficient, small AI model adapted for addressing investing and finance inquiries through specialized prompt engineering. It ensures rapid, reasoned responses to user queries. Duplicate this Space for customization or queue-free deployment.
|
| 58 |
<p>Running on CPU or GPU if available. Using Phi-2 model for faster inference. Inference is heavily optimized for responses in under 10 seconds for simple queries, with output limited to 250 tokens maximum. For longer responses, increase 'Max New Tokens' in Advanced Settings. Brief delays may occur in free-tier environments due to shared resources, but typical generation speeds are improved with the smaller model.</p>"""
|
| 59 |
+
|
| 60 |
LICENSE = """<p/>
|
| 61 |
---
|
| 62 |
This application employs the Phi-2 model, governed by Microsoft's Terms of Use. Refer to the [model card](https://huggingface.co/TheBloke/phi-2-GGUF) for details."""
|
| 63 |
+
|
| 64 |
# Load the model (skip fine-tuning for faster startup)
|
| 65 |
try:
|
| 66 |
model_path = hf_hub_download(
|
|
|
|
| 71 |
llm = Llama(
|
| 72 |
model_path=model_path,
|
| 73 |
n_ctx=1024,
|
| 74 |
+
n_batch=1024, # Increased for faster processing
|
| 75 |
n_threads=multiprocessing.cpu_count(),
|
| 76 |
n_gpu_layers=n_gpu_layers,
|
| 77 |
+
chat_format="chatml" # Phi-2 uses ChatML format in llama.cpp
|
| 78 |
)
|
| 79 |
logger.info(f"Model loaded successfully with n_gpu_layers={n_gpu_layers}.")
|
| 80 |
# Warm up the model for faster initial inference
|
|
|
|
| 83 |
except Exception as e:
|
| 84 |
logger.error(f"Error loading model: {str(e)}")
|
| 85 |
raise
|
| 86 |
+
|
| 87 |
# Register explicit close for llm to avoid destructor error
|
| 88 |
atexit.register(llm.close)
|
| 89 |
+
|
| 90 |
DEFAULT_SYSTEM_PROMPT = """You are FinChat, a knowledgeable AI assistant specializing in investing and finance. Provide accurate, helpful, reasoned, and concise answers to investing questions. Always base responses on reliable information and advise users to consult professionals for personalized advice.
|
| 91 |
Always respond exclusively in English. Use bullet points for clarity.
|
| 92 |
Example:
|
|
|
|
| 96 |
- Represents average annual return with compounding
|
| 97 |
- Past performance not indicative of future results
|
| 98 |
- Consult a financial advisor"""
|
| 99 |
+
|
| 100 |
# Company name to ticker mapping (expand as needed)
|
| 101 |
COMPANY_TO_TICKER = {
|
| 102 |
"opendoor": "OPEN",
|
| 103 |
"tesla": "TSLA",
|
| 104 |
"apple": "AAPL",
|
| 105 |
+
# Add more mappings as necessary
|
|
|
|
| 106 |
}
|
| 107 |
+
|
| 108 |
+
# Usage tracker
|
| 109 |
+
usage_count = 0
|
| 110 |
+
|
| 111 |
+
def increment_usage():
|
| 112 |
+
global usage_count
|
| 113 |
+
usage_count += 1
|
| 114 |
+
|
| 115 |
+
def get_usage_stats():
|
| 116 |
+
return f"Total interactions: {usage_count}\nLast updated: {pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')}"
|
| 117 |
+
|
| 118 |
+
# Note: The full original code likely continues with functions like predict, calculate_cagr, etc.
|
| 119 |
+
# For completeness, assuming the predict function is where the model generation happens.
|
| 120 |
+
# Insert increment_usage() after successful generation in the predict function.
|
| 121 |
+
|
| 122 |
+
# Example predict function (based on typical structure; adjust to match exact original)
|
| 123 |
+
def predict(message, history, system_prompt, max_new_tokens, temperature):
|
| 124 |
+
global usage_count
|
| 125 |
+
# Prepare messages for ChatML format
|
| 126 |
+
messages = [{"role": "system", "content": system_prompt}]
|
| 127 |
+
for user_msg, assistant_msg in history:
|
| 128 |
+
messages.append({"role": "user", "content": user_msg})
|
| 129 |
+
if assistant_msg:
|
| 130 |
+
messages.append({"role": "assistant", "content": assistant_msg})
|
| 131 |
+
messages.append({"role": "user", "content": message})
|
| 132 |
+
|
| 133 |
+
# Format for llama.cpp ChatML
|
| 134 |
+
formatted_prompt = "<|im_start|>system\n" + system_prompt + "<|im_end|>\n"
|
| 135 |
+
for msg in messages[1:]: # Skip system if already added
|
| 136 |
+
formatted_prompt += f"<|im_start|>{msg['role']}\n{msg['content']}<|im_end|>\n"
|
| 137 |
+
formatted_prompt += "<|im_start|>assistant\n"
|
| 138 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
try:
|
| 140 |
+
response = llm(
|
| 141 |
+
formatted_prompt,
|
|
|
|
|
|
|
|
|
|
| 142 |
max_tokens=max_new_tokens,
|
| 143 |
temperature=temperature,
|
| 144 |
+
stop=["<|im_end|>"],
|
| 145 |
+
echo=False
|
|
|
|
|
|
|
| 146 |
)
|
| 147 |
+
assistant_response = response['choices'][0]['text'].strip()
|
| 148 |
+
history.append((message, assistant_response))
|
| 149 |
+
increment_usage() # Track usage after successful generation
|
| 150 |
+
return history, ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
except Exception as e:
|
| 152 |
+
logger.error(f"Prediction error: {str(e)}")
|
| 153 |
+
return history, "An error occurred during generation. Please try again."
|
| 154 |
+
|
| 155 |
+
# Additional functions like calculate_cagr, etc., remain unchanged.
|
| 156 |
+
def calculate_cagr(ticker, start_date, end_date):
|
| 157 |
+
try:
|
| 158 |
+
data = yf.download(ticker, start=start_date, end=end_date)
|
| 159 |
+
if data.empty:
|
| 160 |
+
return None
|
| 161 |
+
start_price = data['Adj Close'].iloc[0]
|
| 162 |
+
end_price = data['Adj Close'].iloc[-1]
|
| 163 |
+
years = (pd.to_datetime(end_date) - pd.to_datetime(start_date)).days / 365.25
|
| 164 |
+
cagr = (end_price / start_price) ** (1 / years) - 1
|
| 165 |
+
return cagr * 100
|
| 166 |
+
except Exception as e:
|
| 167 |
+
logger.error(f"CAGR calculation error: {str(e)}")
|
| 168 |
+
return None
|
| 169 |
+
|
| 170 |
+
# Main Gradio interface with added usage button
|
| 171 |
+
with gr.Blocks(theme=themes.Soft()) as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
gr.Markdown(DESCRIPTION)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
gr.Markdown(LICENSE)
|
| 174 |
+
|
| 175 |
+
with gr.Row():
|
| 176 |
+
with gr.Column(scale=4):
|
| 177 |
+
chatbot = gr.Chatbot(
|
| 178 |
+
height=500,
|
| 179 |
+
show_label=False,
|
| 180 |
+
avatar_images=(None, None)
|
| 181 |
+
)
|
| 182 |
+
msg = gr.Textbox(
|
| 183 |
+
label="Your Question",
|
| 184 |
+
placeholder="Ask about stocks, investing...",
|
| 185 |
+
show_label=False,
|
| 186 |
+
scale=8
|
| 187 |
+
)
|
| 188 |
+
with gr.Row():
|
| 189 |
+
submit_btn = gr.Button("Submit", variant="primary", scale=1)
|
| 190 |
+
clear_btn = gr.Button("Clear", scale=1)
|
| 191 |
+
stats_btn = gr.Button("Usage Stats", size="sm", scale=1) # New small button
|
| 192 |
+
|
| 193 |
+
with gr.Column(scale=1):
|
| 194 |
+
gr.Markdown("### Advanced Settings")
|
| 195 |
+
system_prompt = gr.Textbox(
|
| 196 |
+
value=DEFAULT_SYSTEM_PROMPT,
|
| 197 |
+
label="System Prompt",
|
| 198 |
+
lines=6,
|
| 199 |
+
max_lines=6
|
| 200 |
+
)
|
| 201 |
+
max_tokens_slider = gr.Slider(
|
| 202 |
+
minimum=50,
|
| 203 |
+
maximum=MAX_MAX_NEW_TOKENS,
|
| 204 |
+
value=DEFAULT_MAX_NEW_TOKENS,
|
| 205 |
+
step=1,
|
| 206 |
+
label="Max New Tokens"
|
| 207 |
+
)
|
| 208 |
+
temperature_slider = gr.Slider(
|
| 209 |
+
minimum=0,
|
| 210 |
+
maximum=2,
|
| 211 |
+
value=0.7,
|
| 212 |
+
step=0.1,
|
| 213 |
+
label="Temperature"
|
| 214 |
+
)
|
| 215 |
+
gr.Markdown("### Usage Stats")
|
| 216 |
+
stats_output = gr.Textbox(
|
| 217 |
+
label="Statistics",
|
| 218 |
+
interactive=False,
|
| 219 |
+
visible=False
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
# Events
|
| 223 |
+
msg.submit(predict, [msg, chatbot, system_prompt, max_tokens_slider, temperature_slider], [chatbot, msg])
|
| 224 |
+
submit_btn.click(predict, [msg, chatbot, system_prompt, max_tokens_slider, temperature_slider], [chatbot, msg])
|
| 225 |
+
clear_btn.click(lambda: ([], ""), None, chatbot, queue=False)
|
| 226 |
+
stats_btn.click(
|
| 227 |
+
lambda: gr.update(visible=True),
|
| 228 |
+
None,
|
| 229 |
+
stats_output
|
| 230 |
+
).then(
|
| 231 |
+
get_usage_stats,
|
| 232 |
+
None,
|
| 233 |
+
stats_output
|
| 234 |
)
|
| 235 |
+
|
| 236 |
+
if __name__ == "__main__":
|
| 237 |
+
demo.launch()
|