FinTech-Llama-GPT / utils.py
tensorgirl's picture
Update utils.py
a98760a verified
raw
history blame
No virus
4.24 kB
import datetime
from urllib.request import Request, urlopen
from pypdf import PdfReader
from io import StringIO
import io
import pandas as pd
import os
import torch
import time
import json
from openai import OpenAI
openai_key = "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"
client = OpenAI(api_key = openai_key)
desc = pd.read_excel('Descriptor.xlsx',header = None)
desc_list = desc.iloc[:,0].to_list()
def filter(input_json):
sym = pd.read_excel('symbol.xlsx',header = None)
sym_list = sym.iloc[:,0].to_list()
if input_json['FileURL']==None or input_json['FileURL'].lower()=='null':
return [0,"File_URL"]
if input_json['symbol']== 'null' or input_json['symbol'] not in sym_list:
return [0,"symbol"]
if input_json['TypeofAnnouncement'] not in ['General_Announcements','Outcome','General']:
return [0,"Annoucement"]
if input_json['Descriptor'] not in desc_list:
return [0,"Desc"]
url = 'https://www.bseindia.com/xml-data/corpfiling/AttachLive/'+ input_json['FileURL'].split('Pname=')[-1]
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
html = urlopen(req)
cont = html.read()
reader = PdfReader(io.BytesIO(cont))
content = ''
for i in range(len(reader.pages)):
content+= reader.pages[i].extract_text()
document = content
return [1, document]
def summary(input_json):
prompt = pd.read_excel('DescriptorPrompt.xlsx')
promptShort = prompt.iloc[:,1].to_list()
promptLong = prompt.iloc[:,2].to_list()
output = {}
filtering_results = filter(input_json)
if filtering_results[0] == 0:
return 0
#return filtering_results[1]
id = desc_list.index(input_json['Descriptor'])
long_text = filtering_results[1]
long_text = long_text.lstrip()
long_text = long_text.rstrip()
long_text = long_text[:6000]
url = 'https://www.bseindia.com/xml-data/corpfiling/AttachLive/'+ input_json['FileURL'].split('Pname=')[-1]
output["Link to BSE website"] = url
output["Date of time of receiving data from BSE"] = input_json["newsdate"] + "Z"
output["Stock Ticker"] = input_json['symbol']
utc_now = datetime.datetime.utcnow()
ist_now = utc_now.astimezone(datetime.timezone(datetime.timedelta(hours=5, minutes=30)))
Date = ist_now.strftime("%Y-%m-%d")
time = ist_now.strftime("%X")
output['Date and time of data delivery from Skylark'] = Date+"T"+time+"Z"
prompt = """
Return the output in json format. This is the financial article {}
Following are the keys of the json.
1. Short Summary - {} Do not exceed over 400 characters. Make sure it is no more than 80 words
2. Tag - Provide the main topic of the news article strictly as a tag, using only one or two words, with only the first word capitalized and the rest in lowercase. No additional text or explanation.
3. Headline - Generate a precise headline for the news article that includes the name of the company. Be very careful about correctly representing any financial figures mentioned in lakhs and crores. Provide only the headline, with no additional text or explanation.
4. Sentiment - Answer in one word the sentiment of this News out of Positive, Negative or Neutral.
5. Long summary - {} Write the summary in max 500 words.
""".format(long_text,promptShort[id],promptLong[id])
completion = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a financial expert. Help the client with summarizing the financial newsletter. Do not truncate."},
{"role": "user", "content": "{}".format(prompt)}
],
temperature=0,
max_tokens=4000,
)
answer = json.loads(completion.choices[0].message.content[8:-3])
output['Short Summary'] = answer['Short Summary']
output['Tag'] = answer['Tag']
output['Headline'] = answer['Headline']
output['Sentiment'] = answer['Sentiment']
output['Long summary'] = answer['Long summary']
# response = client.images.generate(
# model="dall-e-3",
# prompt=headline.text,
# size="1024x1024",
# quality="standard",
# n=1
# )
# output["Link to Infographic (data visualization only)] = response.data[0].url
return output