rfp_to_story / Functions.py
Darpan07's picture
Upload 4 files
d94942e verified
raw
history blame
5.13 kB
from dotenv import load_dotenv
import os
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import OpenAIEmbeddings
import streamlit as st
from textwrap import dedent
from Prompts_and_Chains import LLMChains
from Templates import json_structure
import json
from Utils import estimate_to_value
class RFPProcessor:
def __init__(self):
load_dotenv()
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.chains_obj = LLMChains()
def generate_estimations(self, tech_leads, senior_developers, junior_developers):
print(st.session_state["user_stories_json"])
inputs = {
"project_summary": st.session_state["rfp_summary"],
"user_stories": st.session_state["user_stories_json"],
"tech_leads": tech_leads,
"senior_developers": senior_developers,
"junior_developers": junior_developers,
}
data = self.chains_obj.estimations_chain.run(inputs)
estimation_json_data = json.loads(data)
for epic_data in estimation_json_data["epics"]:
epic = epic_data["name"]
for feature_data in epic_data["features"]:
feature = feature_data["name"]
for story in feature_data["stories"]:
average = estimate_to_value(story["estimate"])
st.session_state.estimation_data.append(
{
"epic": epic,
"Feature": feature,
"Story Description": story["description"],
"Story Estimation": story["estimate"],
"Story Effort": story["effort"],
"Average Estimate": average,
"Story Rationale": story["rationale"],
}
)
st.session_state["is_estimation_data_created"] = True
def process_rfp_data(self, project_name, file):
if project_name and file:
loader = PdfReader(file)
for i, page in enumerate(loader.pages):
content = page.extract_text()
if content:
temp = st.session_state["rfp_details"]
st.session_state["rfp_details"] = temp + content
text_splitter = CharacterTextSplitter(
separator="\n", chunk_size=1000, chunk_overlap=150, length_function=len
)
texts = text_splitter.split_text(st.session_state["rfp_details"])
st.session_state["vectorstore"] = Chroma().from_texts(
texts, embedding=OpenAIEmbeddings(openai_api_key=self.openai_api_key)
)
st.session_state.project_name = project_name
st.session_state["rfp_summary"] = self.chains_obj.summary_chain.run(
{
"project_name": st.session_state["project_name"],
"rfp_details": dedent(st.session_state["rfp_details"]),
}
)
st.session_state["is_data_processed"] = True
st.success("data processed sucessfully")
def genrate_bot_result(self):
if len(st.session_state["input"]) > 0:
db = st.session_state["vectorstore"]
context = db.similarity_search(st.session_state["input"])
inputs = {
"context": context[0].page_content,
"input": st.session_state["input"],
}
output = self.chains_obj.bot_chain.run(inputs)
st.session_state.past.append(st.session_state["input"])
st.session_state.generated.append(output)
st.session_state["input"] = ""
def genrate_user_stories(self):
output = self.chains_obj.user_story_chain.run(
{
"project_name": st.session_state["project_name"],
"rfp_details": st.session_state["rfp_details"],
}
)
st.session_state["user_stories"] = output
json_response = self.chains_obj.json_chain.run(
{
"user_stories": st.session_state["user_stories"],
"json_structure": json_structure,
}
)
user_stories_data = json.loads(json_response)
print(user_stories_data)
st.session_state["user_stories_json"] = user_stories_data
for epic_data in user_stories_data["epics"]:
epic = epic_data["name"]
for feature_data in epic_data["features"]:
feature = feature_data["name"]
for story in feature_data["stories"]:
st.session_state.user_stories_data.append(
{
"epic": epic,
"Feature": feature,
"Story Description": story["description"],
}
)
st.session_state["is_user_stories_created"] = True