FinTech-Llama-GPT / utils.py
tensorgirl's picture
Update utils.py
2f70f16 verified
raw
history blame contribute delete
No virus
4.24 kB
import datetime
from urllib.request import Request, urlopen
from pypdf import PdfReader
from io import StringIO
import io
import pandas as pd
import os
import torch
import time
import json
from openai import OpenAI
openai_key = "#"
client = OpenAI(api_key = openai_key)
desc = pd.read_excel('Descriptor.xlsx',header = None)
desc_list = desc.iloc[:,0].to_list()
def filter(input_json):
sym = pd.read_excel('symbol.xlsx',header = None)
sym_list = sym.iloc[:,0].to_list()
if input_json['FileURL']==None or input_json['FileURL'].lower()=='null':
return [0,"File_URL"]
if input_json['symbol']== 'null' or input_json['symbol'] not in sym_list:
return [0,"symbol"]
if input_json['TypeofAnnouncement'] not in ['General_Announcements','Outcome','General']:
return [0,"Annoucement"]
if input_json['Descriptor'] not in desc_list:
return [0,"Desc"]
url = 'https://www.bseindia.com/xml-data/corpfiling/AttachLive/'+ input_json['FileURL'].split('Pname=')[-1]
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
html = urlopen(req)
cont = html.read()
reader = PdfReader(io.BytesIO(cont))
content = ''
for i in range(len(reader.pages)):
content+= reader.pages[i].extract_text()
document = content
return [1, document]
def callGPT(prompt):
completion = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a financial expert. Help the client with summarizing the financial newsletter. Do not truncate."},
{"role": "user", "content": "{}".format(prompt)}
],
temperature=0,
max_tokens=4000,
)
return completion.choices[0].message.content
def summary(input_json):
prompt = pd.read_excel('DescriptorPrompt.xlsx')
promptShort = prompt.iloc[:,1].to_list()
promptLong = prompt.iloc[:,2].to_list()
output = {}
filtering_results = filter(input_json)
if filtering_results[0] == 0:
return 0
#return filtering_results[1]
id = desc_list.index(input_json['Descriptor'])
long_text = filtering_results[1]
long_text = long_text.lstrip()
long_text = long_text.rstrip()
long_text = long_text[:6000]
url = 'https://www.bseindia.com/xml-data/corpfiling/AttachLive/'+ input_json['FileURL'].split('Pname=')[-1]
output["Link to BSE website"] = url
output["Date of time of receiving data from BSE"] = input_json["newsdate"] + "Z"
output["Stock Ticker"] = input_json['symbol']
utc_now = datetime.datetime.utcnow()
ist_now = utc_now.astimezone(datetime.timezone(datetime.timedelta(hours=5, minutes=30)))
Date = ist_now.strftime("%Y-%m-%d")
time = ist_now.strftime("%X")
output['Date and time of data delivery from Skylark'] = Date+"T"+time+"Z"
output['Short Summary'] = callGPT("This is a financial article {} {} Do not exceed over 400 characters. Make sure it is no more than 80 words".format(long_text, promptShort[id]))
output['Tag'] = callGPT("Provide the main topic of the news article strictly as a tag, using only one or two words, with only the first word capitalized and the rest in lowercase. No additional text or explanation. The article is {}".format(long_text))
output['Headline'] = callGPT("Generate a precise headline for the news article that includes the name of the company. Be very careful about correctly representing any financial figures mentioned in lakhs and crores. Provide only the headline, with no additional text or explanation. The article is {}".format(long_text))
output['Sentiment'] = callGPT("Answer in one word the sentiment of this News out of Positive, Negative or Neutral. The article is {}".format(long_text))
output['Long summary'] = callGPT("This is a financial article {} {} Write the summary in max 500 words. Keep it long, should not be smaller than 300 words. Keep spacing and make sure it's pointwise. It should be in a format of a news article".format(long_text, promptLong[id]))
# response = client.images.generate(
# model="dall-e-3",
# prompt=headline.text,
# size="1024x1024",
# quality="standard",
# n=1
# )
# output["Link to Infographic (data visualization only)] = response.data[0].url
return output