Spaces:
Sleeping
Sleeping
import streamlit as st | |
import langchain | |
import pandas as pd | |
import numpy as np | |
import os | |
import re | |
from langchain.chat_models import ChatOpenAI | |
import openai | |
from langchain import HuggingFaceHub, LLMChain, PromptTemplate | |
from langchain.memory import ConversationBufferWindowMemory | |
from langchain.chains import ConversationalRetrievalChain | |
trait_content_df=pd.read_csv('AI Personality Chart trait_content (2).csv') | |
trait_content_df=trait_content_df.drop(0,axis=0) | |
trait_content_df.rename(columns={'Column 1':'Question','Column 2':'Options','Column 3':'Traits','Column 4':'Content'},inplace=True) | |
trait_content_df['Title'].fillna(method='ffill',inplace=True) | |
trait_content_df['Question'].fillna(method='ffill',inplace=True) | |
template = """ | |
Imagine you're someone looking to create a unique personalized bio based on your traits and experiences. You've shared some details about your background, and now it's time to craft a bio that stands out. Respond in the second person and avoid using the same sentences for different users. Your response should be concise and conclude within 150 words. | |
{history} | |
You: {human_input} | |
Bot: | |
[CHARACTER_LIMIT=150] | |
""" | |
prompt = PromptTemplate( | |
input_variables=["history", "human_input"], | |
template=template | |
) | |
llm_chain = LLMChain( | |
llm = ChatOpenAI(temperature=1.3,model_name='gpt-3.5-turbo'), | |
prompt=prompt, | |
verbose=True, | |
memory=ConversationBufferWindowMemory(k=0) | |
) | |
def extract_text_from_html(html): | |
cleanr = re.compile('<.*?>') | |
cleantext = re.sub(cleanr, '', html) | |
return cleantext.strip() | |
def conversational_chat(query, replacement_word=None): | |
hist_dict['past'].append(query) | |
output = llm_chain.predict(human_input=query) | |
hist_dict['generated'].append(output) | |
if replacement_word is not None: | |
# Use a regular expression with the re module for case-insensitive replacement | |
output = re.sub(r'\bjack\b', replacement_word, output, flags=re.IGNORECASE) | |
return extract_text_from_html(output) | |
def word_count(text): | |
words = re.findall(r'\w+', text) | |
return len(words) | |
hist_dict={} | |
hist_dict['generated']=["Hello ! Ask me anything about " + " π€"] | |
hist_dict['past'] = ["Hey ! π"] | |
trait_content_df_org=pd.read_csv('AI Personality Chart trait_content (2).csv') | |
trait_content_df_org=trait_content_df_org.drop(0,axis=0) | |
trait_content_df_org.rename(columns={'Column 1':'Question','Column 2':'Options','Column 3':'Traits','Column 4':'Content'},inplace=True) | |
def ui(): | |
# Initialize a dictionary to store responses | |
responses = {} | |
# Create checkboxes for each question and options | |
index = 0 | |
while index < len(trait_content_df_org): | |
question = trait_content_df_org.iloc[index]["Question"] | |
st.write(question) | |
option_a = st.checkbox(f"Option A: {trait_content_df_org.iloc[index]['Options']}", key=f"option_a_{index}") | |
# Check if Option B has a corresponding question (not None) | |
if trait_content_df_org.iloc[index + 1]["Question"] is not None: | |
option_b = st.checkbox(f"Option B: {trait_content_df_org.iloc[index + 1]['Options']}", key=f"option_b_{index + 1}") | |
else: | |
option_b = False | |
st.write("") # Add some spacing between questions | |
# Store responses in the dictionary | |
if option_a: | |
responses[question] = f"{trait_content_df_org.iloc[index]['Options']}" | |
if option_b: | |
responses[question] = f"{trait_content_df_org.iloc[index + 1]['Options']}" | |
index += 2 # Move to the next question and options (skipping None) | |
st.write("Responses:") | |
for question, selected_option in responses.items(): | |
st.write(question) | |
st.write(selected_option) | |
# Generate a prompt based on selected options | |
selected_traits = [responses[question] for question in responses] | |
options_list = [] | |
traits_list = [] | |
content_list = [] | |
for trait_str in selected_traits: | |
matching_rows = trait_content_df_org[trait_content_df_org["Options"] == trait_str] | |
if not matching_rows.empty: | |
options_list.append(matching_rows["Options"].values[0]) | |
traits_list.append(matching_rows["Traits"].values[0]) | |
content_list.append(matching_rows["Content"].values[0]) | |
prompt = f"The following are Traits {', '.join(traits_list)}, and the content for the options is {', '.join(content_list)}" | |
# Display user input field | |
name_input = st.text_input("Enter your name:") | |
# Add a submit button | |
if st.button("Submit"): | |
# Generate a chatbot response | |
name_input = st.text_input("Enter your name:") | |
bio = conversational_chat(prompt, name_input) | |
# Count words in the generated bio | |
bio_word_count = word_count(bio) | |
# Check if the bio exceeds 250 words | |
if bio_word_count > 250: | |
st.warning("Generated Bio exceeded 250 words. Re-inferencing...") | |
bio = conversational_chat(prompt, name_input) # Re-inferencing | |
# Count words in the re-inferenced bio | |
bio_word_count = word_count(bio) | |
st.write(f"Generated Bio Word Count: {bio_word_count}") | |
st.write(bio) | |
if __name__=='__main__': | |
ui() | |