Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,12 +4,14 @@ import time
|
|
4 |
import json
|
5 |
import random
|
6 |
import finnhub
|
|
|
7 |
import gradio as gr
|
8 |
import pandas as pd
|
9 |
import yfinance as yf
|
10 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
11 |
from peft import PeftModel
|
|
|
12 |
from datetime import date, datetime, timedelta
|
|
|
13 |
|
14 |
os.environ['HF_HOME'] = '/data/.huggingface'
|
15 |
|
@@ -36,7 +38,6 @@ tokenizer = AutoTokenizer.from_pretrained(
|
|
36 |
token=access_token
|
37 |
)
|
38 |
|
39 |
-
|
40 |
B_INST, E_INST = "[INST]", "[/INST]"
|
41 |
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
42 |
|
@@ -208,17 +209,18 @@ def get_all_prompts_online(symbol, data, curday, with_basics=True):
|
|
208 |
return info, prompt
|
209 |
|
210 |
|
211 |
-
def construct_prompt(ticker,
|
212 |
|
213 |
-
curday = get_curday()
|
214 |
steps = [n_weeks_before(curday, n) for n in range(n_weeks + 1)][::-1]
|
215 |
data = get_stock_data(ticker, steps)
|
216 |
data = get_news(ticker, data)
|
217 |
data['Basics'] = [json.dumps({})] * len(data)
|
|
|
218 |
|
219 |
info, prompt = get_all_prompts_online(ticker, data, curday, use_basics)
|
220 |
|
221 |
prompt = B_INST + B_SYS + SYSTEM_PROMPT + E_SYS + prompt + E_INST
|
|
|
222 |
|
223 |
return info, prompt
|
224 |
|
@@ -228,8 +230,7 @@ def predict(ticker, date, n_weeks, use_basics):
|
|
228 |
info, prompt = construct_prompt(ticker, date, n_weeks, use_basics)
|
229 |
|
230 |
inputs = tokenizer(
|
231 |
-
prompt, return_tensors='pt',
|
232 |
-
padding=False, max_length=4096
|
233 |
)
|
234 |
inputs = {key: value.to(model.device) for key, value in inputs.items()}
|
235 |
|
@@ -240,6 +241,8 @@ def predict(ticker, date, n_weeks, use_basics):
|
|
240 |
)
|
241 |
output = tokenizer.decode(res[0], skip_special_tokens=True)
|
242 |
answer = re.sub(r'.*\[/INST\]\s*', '', output, flags=re.DOTALL)
|
|
|
|
|
243 |
|
244 |
return info, answer
|
245 |
|
|
|
4 |
import json
|
5 |
import random
|
6 |
import finnhub
|
7 |
+
import torch
|
8 |
import gradio as gr
|
9 |
import pandas as pd
|
10 |
import yfinance as yf
|
|
|
11 |
from peft import PeftModel
|
12 |
+
from collections import defaultdict
|
13 |
from datetime import date, datetime, timedelta
|
14 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
15 |
|
16 |
os.environ['HF_HOME'] = '/data/.huggingface'
|
17 |
|
|
|
38 |
token=access_token
|
39 |
)
|
40 |
|
|
|
41 |
B_INST, E_INST = "[INST]", "[/INST]"
|
42 |
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
43 |
|
|
|
209 |
return info, prompt
|
210 |
|
211 |
|
212 |
+
def construct_prompt(ticker, curday, n_weeks, use_basics):
|
213 |
|
|
|
214 |
steps = [n_weeks_before(curday, n) for n in range(n_weeks + 1)][::-1]
|
215 |
data = get_stock_data(ticker, steps)
|
216 |
data = get_news(ticker, data)
|
217 |
data['Basics'] = [json.dumps({})] * len(data)
|
218 |
+
print(data)
|
219 |
|
220 |
info, prompt = get_all_prompts_online(ticker, data, curday, use_basics)
|
221 |
|
222 |
prompt = B_INST + B_SYS + SYSTEM_PROMPT + E_SYS + prompt + E_INST
|
223 |
+
print(prompt)
|
224 |
|
225 |
return info, prompt
|
226 |
|
|
|
230 |
info, prompt = construct_prompt(ticker, date, n_weeks, use_basics)
|
231 |
|
232 |
inputs = tokenizer(
|
233 |
+
prompt, return_tensors='pt', padding=False
|
|
|
234 |
)
|
235 |
inputs = {key: value.to(model.device) for key, value in inputs.items()}
|
236 |
|
|
|
241 |
)
|
242 |
output = tokenizer.decode(res[0], skip_special_tokens=True)
|
243 |
answer = re.sub(r'.*\[/INST\]\s*', '', output, flags=re.DOTALL)
|
244 |
+
|
245 |
+
torch.cuda.empty_cache()
|
246 |
|
247 |
return info, answer
|
248 |
|