Spaces:
Runtime error
Runtime error
Commit
·
8bc7dc5
1
Parent(s):
ffff599
Upload 35 files
Browse files- .env +1 -0
- Logo.png +0 -0
- README.md +2 -13
- X_content_creation.py +83 -0
- app.py +434 -0
- article_conversion_to_x_thread.py +15 -0
- article_generation.py +80 -0
- blog_content_creation.py +98 -0
- blog_ideas_description_generation.py +15 -0
- blog_ideas_generation.py +15 -0
- blog_post_generation.py +13 -0
- blog_post_tags_generation.py +15 -0
- email_marketing_campaigns_generation.py +15 -0
- email_writer.py +46 -0
- facebook_ads_generation.py +25 -0
- facebook_campaign.py +33 -0
- facebook_post_generation.py +15 -0
- feather_chat.py +65 -0
- idea_generation.py +104 -0
- insta_image_caption_generation.py +27 -0
- keywords_generation.py +83 -0
- landing_page_generation.py +13 -0
- linkedIn_ads_generation.py +15 -0
- linkedin_post_generation.py +15 -0
- load_model.py +86 -0
- meta_content_creation.py +94 -0
- outline_generation.py +67 -0
- product_description_generation.py +33 -0
- remove_astricks.py +5 -0
- workout_plan_generation.py +26 -0
- x_bio_creation.py +15 -0
- x_campaign_generation.py +15 -0
- x_retweet_commenting_generation.py +15 -0
- x_thread_generation.py +29 -0
- youtube_ideas.py +13 -0
.env
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
OPENAI_API_KEY='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'
|
Logo.png
ADDED
![]() |
README.md
CHANGED
@@ -1,13 +1,2 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: gray
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.29.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
# Feather
|
2 |
+
Feather AI
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
X_content_creation.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
class X_Generator:
|
5 |
+
|
6 |
+
|
7 |
+
def __init__(self, llm):
|
8 |
+
self.llm = llm
|
9 |
+
|
10 |
+
|
11 |
+
def x_camp_gen(self, product_name, product_desc, goal):
|
12 |
+
|
13 |
+
x_camp_prompt = f"Design a potent Twitter campaign for my product '{product_name}'. {product_name} is {product_desc} aiming to accomplish {goal} through the meticulous adoption of planning and content creation best practices.\n\nCampaign will include: Campaign Teaser, Educational Content Series, Customer Testimonials and Case Studies, Interactive Content, Limited-Time Offer Announcement, Call-to-Action for Consultation and Recap and Thank You."
|
14 |
+
print(x_camp_prompt)
|
15 |
+
x_camp_promptTemp = PromptTemplate(
|
16 |
+
input_variables=["text_input"],
|
17 |
+
template="You are a Specialist in Twitter Copywriting\n\n{text_input}\n\nMake the tweets engaging, creative and coherent")
|
18 |
+
x_camp_extraction_chain = LLMChain(llm=self.llm, prompt=x_camp_promptTemp)
|
19 |
+
x_camp = x_camp_extraction_chain.run(x_camp_prompt)
|
20 |
+
|
21 |
+
return x_camp
|
22 |
+
|
23 |
+
def x_retweet_commenting_gen(self, tweet, tone_of_voice):
|
24 |
+
|
25 |
+
x_retweet_comment_prompt = f"I'm planning to retweet the following tweet:\n“{tweet}”\nConstruct 5 varied comments I could append to this retweet. tone should be {tone_of_voice}"
|
26 |
+
x_retweet_comment_promptTemp = PromptTemplate(
|
27 |
+
input_variables=["text_input"],
|
28 |
+
template="You are Specilaized Twitter Copywriter\n{text_input}\n\nRetweet Comment:")
|
29 |
+
x_retweet_comment_extraction_chain = LLMChain(llm=self.llm, prompt=x_retweet_comment_promptTemp)
|
30 |
+
x_retweet_comment = x_retweet_comment_extraction_chain.run(x_retweet_comment_prompt)
|
31 |
+
|
32 |
+
return x_retweet_comment
|
33 |
+
|
34 |
+
def X_thread_gen(self, topic, num_tweets, tone_of_voice):
|
35 |
+
|
36 |
+
X_thread_prompt = f"Write a an engagging Twitter Thread on '{topic}' consists of {num_tweets} tweets. Tone should be {tone_of_voice}"
|
37 |
+
|
38 |
+
X_thread_promptTemp = PromptTemplate(
|
39 |
+
input_variables=["text_input"],
|
40 |
+
template="You are a Twitter Content Creator:\n{text_input}\n\nTwitter Thread:")
|
41 |
+
|
42 |
+
X_thread_extraction_chain = LLMChain(llm=self.llm, prompt=X_thread_promptTemp)
|
43 |
+
X_thread = X_thread_extraction_chain.run(X_thread_prompt)
|
44 |
+
|
45 |
+
return X_thread
|
46 |
+
|
47 |
+
def X_thread_gen_intro(self, topic, thread, tone_of_voice):
|
48 |
+
|
49 |
+
X_thread_prompt = f"Write a an engagging and attractive Introduction head tweet for my twitter thread on {topic}. here is my twitter thread:\n{thread}\nTone should be {tone_of_voice}"
|
50 |
+
|
51 |
+
X_thread_promptTemp = PromptTemplate(
|
52 |
+
input_variables=["text_input"],
|
53 |
+
template="You are a Twitter Content Creator:\n{text_input}\n\nTweet:")
|
54 |
+
|
55 |
+
X_thread_extraction_chain = LLMChain(llm=self.llm, prompt=X_thread_promptTemp)
|
56 |
+
X_thread = X_thread_extraction_chain.run(X_thread_prompt)
|
57 |
+
|
58 |
+
return X_thread
|
59 |
+
|
60 |
+
def x_bio_gen(self, info, tone_of_voice):
|
61 |
+
|
62 |
+
x_bio_prompt = f"Craft a personalized Twitter bio for me, based on the content I create, related to {info}. tone should be {tone_of_voice}, Produce 10 tailor-made Twitter bios for me."
|
63 |
+
x_bio_promptTemp = PromptTemplate(
|
64 |
+
input_variables=["text_input"],
|
65 |
+
template="You are a copywriter\n{text_input}\n\nTwitter Bios:")
|
66 |
+
x_bio_extraction_chain = LLMChain(llm=self.llm, prompt=x_bio_promptTemp)
|
67 |
+
x_bio = x_bio_extraction_chain.run(x_bio_prompt)
|
68 |
+
|
69 |
+
return x_bio
|
70 |
+
|
71 |
+
def article_to_x_thread_gen(self, article):
|
72 |
+
|
73 |
+
article_to_x_thread_prompt = f"Transform the ensuing article into a sequence of Twitter posts, creating a coherent Twitter thread between 5 to 20 tweets\nArticle: “{article}.“"
|
74 |
+
article_to_x_thread_promptTemp = PromptTemplate(
|
75 |
+
input_variables=["text_input"],
|
76 |
+
template="You are a twitter copywriter\n{text_input}\n\nCoherent Twitter Thread:")
|
77 |
+
article_to_x_thread_extraction_chain = LLMChain(llm=self.llm, prompt=article_to_x_thread_promptTemp)
|
78 |
+
article_to_x_thread = article_to_x_thread_extraction_chain.run(article_to_x_thread_prompt)
|
79 |
+
|
80 |
+
return article_to_x_thread
|
81 |
+
|
82 |
+
|
83 |
+
|
app.py
ADDED
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from remove_astricks import remove_ast
|
2 |
+
from load_model import call_palm
|
3 |
+
from keywords_generation import keywords_generator, filter_keywords, process_keywords
|
4 |
+
from idea_generation import ideas_generator, filter_ideas
|
5 |
+
from outline_generation import outlines_generator, filtered_outlines
|
6 |
+
from article_generation import full_article, rephrase
|
7 |
+
from product_description_generation import product_description_gen
|
8 |
+
from x_thread_generation import X_thread_gen, X_thread_gen_intro
|
9 |
+
from linkedin_post_generation import linkedIn_post_gen
|
10 |
+
from insta_image_caption_generation import img2text, generate_InstaCap
|
11 |
+
from facebook_post_generation import facebook_post_gen
|
12 |
+
from facebook_ads_generation import facebook_ads_gen
|
13 |
+
from facebook_campaign import facbook_camp_gen, social_media_camp_gen
|
14 |
+
from linkedIn_ads_generation import linkedIn_ads_gen
|
15 |
+
from email_marketing_campaigns_generation import email_marketing_campaigns_gen
|
16 |
+
from workout_plan_generation import workout_plan_gen
|
17 |
+
from landing_page_generation import landing_page_gen
|
18 |
+
from blog_post_generation import blog_post_gen
|
19 |
+
from x_bio_creation import x_bio_gen
|
20 |
+
from x_retweet_commenting_generation import x_retweet_commenting_gen
|
21 |
+
from article_conversion_to_x_thread import article_to_x_thread_gen
|
22 |
+
from x_campaign_generation import x_camp_gen
|
23 |
+
from blog_ideas_generation import blog_idea_gen
|
24 |
+
from blog_ideas_description_generation import blog_idea_desc_gen
|
25 |
+
from blog_post_tags_generation import blog_tags_gen
|
26 |
+
import X_content_creation
|
27 |
+
import blog_content_creation
|
28 |
+
import meta_content_creation
|
29 |
+
import email_writer
|
30 |
+
|
31 |
+
import streamlit as st
|
32 |
+
from dotenv import load_dotenv, find_dotenv
|
33 |
+
import os
|
34 |
+
import time
|
35 |
+
|
36 |
+
load_dotenv(find_dotenv())
|
37 |
+
google_api_key = os.environ['OPENAI_API_KEY']
|
38 |
+
|
39 |
+
|
40 |
+
if __name__ == '__main__':
|
41 |
+
|
42 |
+
llm = call_palm(google_api_key)
|
43 |
+
|
44 |
+
tone_of_voice_list = ('Excited', 'Professional', 'Encouraging', 'Funny', 'Dramatic', 'Witty', 'Sarcastic', 'Engaging', 'Creative')
|
45 |
+
creativity_list = ('Original', 'Creative', 'Visionary')
|
46 |
+
|
47 |
+
with st.sidebar:
|
48 |
+
|
49 |
+
st.image('Logo.png')
|
50 |
+
choice = st.radio("Navigation", ["Articles and Blogs", "Product Description", "LinkedIn Post", "LinkedIn Ads", "Gym Workout Plan", "Landing Page", "X Content Creation", "Meta Content", "Emails"])
|
51 |
+
st.info("Generate your Content with Feather AI in Seconds..")
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
if choice == 'Product Description':
|
56 |
+
|
57 |
+
product_name = st.text_input('Product Name')
|
58 |
+
product_desc = st.text_area('Product Description')
|
59 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
60 |
+
|
61 |
+
if product_name and product_desc and tone_of_voice:
|
62 |
+
|
63 |
+
if st.button('Generate'):
|
64 |
+
|
65 |
+
product_description = product_description_gen(product_name, product_desc, tone_of_voice, llm)
|
66 |
+
st.markdown(product_description)
|
67 |
+
|
68 |
+
elif choice == 'LinkedIn Post':
|
69 |
+
|
70 |
+
topic = st.text_input("Topic")
|
71 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
72 |
+
|
73 |
+
if topic and tone_of_voice:
|
74 |
+
|
75 |
+
if st.button('Generate'):
|
76 |
+
|
77 |
+
linkedIn_post = linkedIn_post_gen(topic, tone_of_voice, llm)
|
78 |
+
st.markdown(linkedIn_post)
|
79 |
+
|
80 |
+
|
81 |
+
elif choice == "LinkedIn Ads":
|
82 |
+
|
83 |
+
product_name = st.text_input("Product Name")
|
84 |
+
product_desc = st.text_area("Product Description")
|
85 |
+
target_audience = st.text_input("Target Audience")
|
86 |
+
target_keywords = st.text_input("Target Keywords")
|
87 |
+
|
88 |
+
if product_name and product_desc and target_audience and target_keywords:
|
89 |
+
|
90 |
+
if st.button("Generate"):
|
91 |
+
|
92 |
+
linkedIn_ad = linkedIn_ads_gen(product_name, product_desc, target_audience, target_keywords, llm)
|
93 |
+
st.markdown(linkedIn_ad)
|
94 |
+
|
95 |
+
|
96 |
+
elif choice == "Gym Workout Plan":
|
97 |
+
|
98 |
+
fitness_level_list = ('Beginner', 'Intermediate', 'Advanced', 'Elite')
|
99 |
+
health_consd_list = ('Cutting Phase', 'Bulking Phase', 'Maintenance Phase', 'Lean Muscle Gain')
|
100 |
+
routine_list = ('Bro Split', 'Push Pull Leg', 'Upper and Lower', 'Full Body')
|
101 |
+
|
102 |
+
fitness_level = st.selectbox('Fitness Level', fitness_level_list)
|
103 |
+
days = st.slider('Avaliable Days Per Week',1,7,4)
|
104 |
+
hours = st.slider('Avaliable Hours Per Day',1,6,2)
|
105 |
+
health_consd = st.selectbox('Health Considerations',health_consd_list)
|
106 |
+
routine = st.selectbox("Preferred Routine", routine_list)
|
107 |
+
my_goals = st.text_input('Goals')
|
108 |
+
|
109 |
+
if fitness_level and days and hours and health_consd and routine and my_goals:
|
110 |
+
|
111 |
+
if st.button('Generate'):
|
112 |
+
|
113 |
+
workout_plan = workout_plan_gen(my_goals, fitness_level, days, hours, health_consd, routine, llm)
|
114 |
+
st.markdown(workout_plan)
|
115 |
+
|
116 |
+
elif choice == "Landing Page":
|
117 |
+
|
118 |
+
#product_name = st.text_input('Product Name')
|
119 |
+
#product_desc = st.text_area('Description')
|
120 |
+
#target_audience = st.text_input('Target Audience')
|
121 |
+
#goal = st.text_input('Goal')
|
122 |
+
st.title('Not Working')
|
123 |
+
#if product_name and product_desc and target_audience and goal:
|
124 |
+
|
125 |
+
#if st.button('Generate'):
|
126 |
+
|
127 |
+
#landing_page = landing_page_gen(product_name, product_desc, target_audience, goal, llm)
|
128 |
+
#st.markdown(landing_page)
|
129 |
+
|
130 |
+
|
131 |
+
elif choice == 'X Content Creation':
|
132 |
+
|
133 |
+
X_content = X_content_creation.X_Generator(llm)
|
134 |
+
|
135 |
+
x_choices_list = ('X thread', 'X Retweet Commenting', 'X Campaign', 'X Bio Creation', 'Article Conversion to X Thread')
|
136 |
+
|
137 |
+
x_choices = st.selectbox('Choose X Template', x_choices_list)
|
138 |
+
|
139 |
+
if x_choices == x_choices_list[0]:
|
140 |
+
|
141 |
+
topic = st.text_input('Topic')
|
142 |
+
num_tweets = st.slider('Number of Thread tweet', 1,10,5)
|
143 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
144 |
+
|
145 |
+
if topic and num_tweets and tone_of_voice:
|
146 |
+
|
147 |
+
if st.button('Generate'):
|
148 |
+
|
149 |
+
X_thread = X_content.X_thread_gen(topic, num_tweets, tone_of_voice)
|
150 |
+
intro = X_content.X_thread_gen_intro(topic, X_thread, tone_of_voice)
|
151 |
+
st.markdown(intro)
|
152 |
+
st.markdown(X_thread)
|
153 |
+
|
154 |
+
elif x_choices == x_choices_list[1]:
|
155 |
+
|
156 |
+
|
157 |
+
tweet = st.text_area('Tweet')
|
158 |
+
tone_of_voice = st.selectbox('Tone of voice', tone_of_voice_list)
|
159 |
+
|
160 |
+
if tweet and tone_of_voice:
|
161 |
+
|
162 |
+
if st.button('Generate'):
|
163 |
+
|
164 |
+
x_retweet_comment = X_content.x_retweet_commenting_gen(tweet, tone_of_voice)
|
165 |
+
st.markdown(x_retweet_comment)
|
166 |
+
|
167 |
+
elif x_choices == x_choices_list[2]:
|
168 |
+
|
169 |
+
product_name = st.text_input('Product Name')
|
170 |
+
product_desc = st.text_area('Product Description')
|
171 |
+
goal = st.text_input('Goal')
|
172 |
+
|
173 |
+
|
174 |
+
if product_name and product_desc and goal:
|
175 |
+
|
176 |
+
if st.button('Generate'):
|
177 |
+
|
178 |
+
x_camp = X_content.x_camp_gen(product_name, product_desc, goal)
|
179 |
+
st.markdown(x_camp)
|
180 |
+
|
181 |
+
elif x_choices == x_choices_list[3]:
|
182 |
+
|
183 |
+
info = st.text_area('info')
|
184 |
+
tone_of_voice = st.selectbox('Tone of voice', tone_of_voice_list)
|
185 |
+
|
186 |
+
if info and tone_of_voice:
|
187 |
+
|
188 |
+
if st.button('Generate'):
|
189 |
+
|
190 |
+
x_bio = X_content.x_bio_gen(info, tone_of_voice, llm)
|
191 |
+
st.markdown(x_bio)
|
192 |
+
|
193 |
+
elif x_choices == x_choices_list[4]:
|
194 |
+
|
195 |
+
article = st.text_area('Article')
|
196 |
+
|
197 |
+
if article:
|
198 |
+
|
199 |
+
if st.button('Generate'):
|
200 |
+
|
201 |
+
article_to_x_thread = X_content.article_to_x_thread_gen(article)
|
202 |
+
st.markdown(article_to_x_thread)
|
203 |
+
|
204 |
+
elif choice == 'Articles and Blogs':
|
205 |
+
|
206 |
+
blog_content = blog_content_creation.blog_content_generation(llm)
|
207 |
+
|
208 |
+
blog_choices_list = ('Article Writer', 'Blog Post', 'Blog Ideas', 'Blog Tags', 'Blog Ideas description')
|
209 |
+
|
210 |
+
blog_choices = st.selectbox('Choose Blog Template', blog_choices_list)
|
211 |
+
|
212 |
+
if blog_choices == blog_choices_list[0]:
|
213 |
+
|
214 |
+
topic = st.text_input('Topic')
|
215 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
216 |
+
|
217 |
+
if topic:
|
218 |
+
|
219 |
+
keywords = keywords_generator(topic, llm)
|
220 |
+
filtered_keywords = filter_keywords(keywords)
|
221 |
+
formatted_keywords = process_keywords(filtered_keywords)
|
222 |
+
|
223 |
+
st.markdown('### Generated Keywords:\n')
|
224 |
+
st.markdown(formatted_keywords)
|
225 |
+
|
226 |
+
formatted_keywords = remove_ast(formatted_keywords)
|
227 |
+
|
228 |
+
idea_numbers = st.slider("How many ideas you want?",1,10,5)
|
229 |
+
|
230 |
+
ideas = ideas_generator(topic, formatted_keywords, llm, tone_of_voice, idea_numbers)
|
231 |
+
filtered_ideas = filter_ideas(ideas)
|
232 |
+
|
233 |
+
index = 1
|
234 |
+
|
235 |
+
st.markdown("### Generated Ideas:\n")
|
236 |
+
for idea in filtered_ideas:
|
237 |
+
|
238 |
+
st.markdown(f"{index} - {idea}")
|
239 |
+
index+=1
|
240 |
+
|
241 |
+
st.text('\n')
|
242 |
+
num_idea = st.text_input("Choose the idea you want by number")
|
243 |
+
|
244 |
+
if num_idea:
|
245 |
+
|
246 |
+
num_idea = int(num_idea)
|
247 |
+
idea = filtered_ideas[num_idea-1]
|
248 |
+
idea = remove_ast(idea)
|
249 |
+
|
250 |
+
outline = outlines_generator(idea,formatted_keywords, llm)
|
251 |
+
st.text('\n')
|
252 |
+
st.markdown("### Generated Outline:\n")
|
253 |
+
st.markdown(outline)
|
254 |
+
outline_list = filtered_outlines(outline)
|
255 |
+
|
256 |
+
|
257 |
+
if st.button('Generate'):
|
258 |
+
|
259 |
+
article = full_article(idea, outline_list, tone_of_voice, llm)
|
260 |
+
|
261 |
+
st.markdown("# Your Article:\n")
|
262 |
+
st.markdown(f"{idea}\n")
|
263 |
+
st.markdown('\n\n'.join(article))
|
264 |
+
st.text('\n\n')
|
265 |
+
num_art_chars = len('\n'.join(article))
|
266 |
+
num_art_words = len('\n'.join(article).split(' '))
|
267 |
+
|
268 |
+
st.markdown(f"{num_art_chars} Characters")
|
269 |
+
st.markdown(f"{num_art_words} Words")
|
270 |
+
|
271 |
+
elif blog_choices == blog_choices_list[1]:
|
272 |
+
|
273 |
+
topic = st.text_input('Topic')
|
274 |
+
|
275 |
+
if topic:
|
276 |
+
|
277 |
+
if st.button('Generate'):
|
278 |
+
blog_post = blog_content.blog_post_gen(topic)
|
279 |
+
st.markdown(blog_post)
|
280 |
+
|
281 |
+
elif blog_choices == blog_choices_list[2]:
|
282 |
+
|
283 |
+
topic = st.text_input('Topic')
|
284 |
+
|
285 |
+
if topic:
|
286 |
+
|
287 |
+
if st.button('Generate'):
|
288 |
+
|
289 |
+
blog_idea = blog_content.blog_idea_gen(topic)
|
290 |
+
st.markdown(blog_idea)
|
291 |
+
|
292 |
+
elif blog_choices == blog_choices_list[4]:
|
293 |
+
|
294 |
+
blog = st.text_area('Blog')
|
295 |
+
|
296 |
+
if blog:
|
297 |
+
|
298 |
+
if st.button('Generate'):
|
299 |
+
|
300 |
+
blog_idea_desc = blog_content.blog_idea_desc_gen(blog)
|
301 |
+
st.markdown(blog_idea_desc)
|
302 |
+
|
303 |
+
elif blog_choices == blog_choices_list[3]:
|
304 |
+
|
305 |
+
blog = st.text_area('Blog')
|
306 |
+
|
307 |
+
if blog:
|
308 |
+
|
309 |
+
if st.button('Generate'):
|
310 |
+
|
311 |
+
blog_tags = blog_content.blog_tags_gen(blog)
|
312 |
+
st.markdown(blog_tags)
|
313 |
+
|
314 |
+
elif choice == 'Meta Content':
|
315 |
+
|
316 |
+
meta_content_gen = meta_content_creation.meta_content_generation(llm)
|
317 |
+
|
318 |
+
meta_choices_list = ('Facebook Post', 'Facebook Ads', 'Facebook Campaign', 'Insta Image Captioning')
|
319 |
+
|
320 |
+
meta_choices = st.selectbox('Choose Blog Template', meta_choices_list)
|
321 |
+
|
322 |
+
|
323 |
+
if meta_choices == meta_choices_list[0]:
|
324 |
+
|
325 |
+
topic = st.text_input("Topic")
|
326 |
+
|
327 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
328 |
+
|
329 |
+
if topic and tone_of_voice:
|
330 |
+
|
331 |
+
if st.button('Generate'):
|
332 |
+
|
333 |
+
facebook_post = meta_content_gen.facebook_post_gen(tone_of_voice, topic)
|
334 |
+
st.markdown(facebook_post)
|
335 |
+
|
336 |
+
elif meta_choices == meta_choices_list[1]:
|
337 |
+
|
338 |
+
product_name = st.text_input('Product Name')
|
339 |
+
product_desc = st.text_area('Description')
|
340 |
+
targeted_audience = st.text_input('Target Audience')
|
341 |
+
plans_proms = st.text_input('Plan and Promotions')
|
342 |
+
|
343 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
344 |
+
creativity = st.selectbox('Creativity', creativity_list)
|
345 |
+
|
346 |
+
if product_name and product_desc and tone_of_voice:
|
347 |
+
|
348 |
+
if st.button('Generate'):
|
349 |
+
|
350 |
+
face_ad__prom = meta_content_gen.facebook_ads_gen(product_name, product_desc, tone_of_voice, targeted_audience, plans_proms)
|
351 |
+
st.markdown(face_ad__prom)
|
352 |
+
|
353 |
+
elif meta_choices == meta_choices_list[2]:
|
354 |
+
|
355 |
+
product_name = st.text_input('Product Name')
|
356 |
+
product_desc = st.text_area('Product Description')
|
357 |
+
days = st.selectbox('Days', (10,15,20,25,30))
|
358 |
+
goal = st.text_input('Goal')
|
359 |
+
|
360 |
+
if product_name and product_desc and goal and days:
|
361 |
+
|
362 |
+
if st.button('Generate'):
|
363 |
+
|
364 |
+
facbook_camp = meta_content_gen.facbook_camp_gen(product_name, product_desc, days, goal)
|
365 |
+
st.markdown(facbook_camp)
|
366 |
+
|
367 |
+
elif meta_choices == meta_choices_list[3]:
|
368 |
+
|
369 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg","jpeg","png"])
|
370 |
+
|
371 |
+
if uploaded_file is not None:
|
372 |
+
|
373 |
+
bytes_data = uploaded_file.getvalue()
|
374 |
+
with open(uploaded_file.name, "wb") as file:
|
375 |
+
file.write(bytes_data)
|
376 |
+
|
377 |
+
st.image(uploaded_file, caption="Uploaded image",
|
378 |
+
use_column_width=True, width=500)
|
379 |
+
|
380 |
+
scenario = meta_content_gen.img2text(uploaded_file.name)
|
381 |
+
|
382 |
+
st.subheader("Image Scenario:")
|
383 |
+
with st.expander("scenario"):
|
384 |
+
st.write(scenario)
|
385 |
+
|
386 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
387 |
+
form = st.selectbox('Caption Form', ('Short Form', 'Medium Form', 'Long Form'))
|
388 |
+
|
389 |
+
if st.button('Generate'):
|
390 |
+
|
391 |
+
st.subheader("Generated Instagram Image according to the scenario:")
|
392 |
+
instaCap = meta_content_gen.generate_InstaCap(scenario, tone_of_voice, form)
|
393 |
+
st.markdown(instaCap)
|
394 |
+
|
395 |
+
elif choice == 'Emails':
|
396 |
+
|
397 |
+
email_content_gen = email_writer.email_writing(llm)
|
398 |
+
|
399 |
+
email_choices_list = ('Email Writer', 'Email Marketing Campaign')
|
400 |
+
|
401 |
+
email_choices = st.selectbox('Choose Emai Template', email_choices_list)
|
402 |
+
|
403 |
+
if email_choices == email_choices_list[0]:
|
404 |
+
|
405 |
+
recipient = st.text_input('Recipient')
|
406 |
+
recipient_position = st.text_input('Recipient Position')
|
407 |
+
sender_name = st.text_input('Sender Name')
|
408 |
+
position_sender = st.text_input('Sender Position')
|
409 |
+
description = st.text_area('Description')
|
410 |
+
|
411 |
+
if recipient and recipient_position and sender_name and position_sender and description:
|
412 |
+
|
413 |
+
if st.button('Generate'):
|
414 |
+
|
415 |
+
email = email_content_gen.email_gen(recipient, recipient_position, sender_name, position_sender, description)
|
416 |
+
email_subject = email_content_gen.email_subject_gen(email)
|
417 |
+
st.markdown(f'Subject: {email_subject}')
|
418 |
+
st.markdown(email)
|
419 |
+
|
420 |
+
elif email_choices == email_choices_list[1]:
|
421 |
+
|
422 |
+
product_name = st.text_input("Product Name")
|
423 |
+
product_description = st.text_area("Product Description")
|
424 |
+
target_audience = st.text_input("Target Audience")
|
425 |
+
goal = st.text_input("Goal")
|
426 |
+
|
427 |
+
if product_name and product_description and target_audience and goal:
|
428 |
+
|
429 |
+
if st.button("Generate"):
|
430 |
+
|
431 |
+
email_marketing_camp = email_content_gen.email_marketing_campaigns_gen(product_name, product_description, target_audience, goal)
|
432 |
+
st.markdown(email_marketing_camp)
|
433 |
+
|
434 |
+
|
article_conversion_to_x_thread.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def article_to_x_thread_gen(article, llm):
|
5 |
+
|
6 |
+
article_to_x_thread_prompt = f"Transform the ensuing article into a sequence of Twitter posts, creating a coherent Twitter thread between 5 to 20 tweets\nArticle: “{article}.“"
|
7 |
+
article_to_x_thread_promptTemp = PromptTemplate(
|
8 |
+
input_variables=["text_input"],
|
9 |
+
template="You are a twitter copywriter\n{text_input}\n\nCoherent Twitter Thread:")
|
10 |
+
article_to_x_thread_extraction_chain = LLMChain(llm=llm, prompt=article_to_x_thread_promptTemp)
|
11 |
+
article_to_x_thread = article_to_x_thread_extraction_chain.run(article_to_x_thread_prompt)
|
12 |
+
|
13 |
+
return article_to_x_thread
|
14 |
+
|
15 |
+
|
article_generation.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def article_generator(idea, outline, section, llm, tone_of_voice):
|
5 |
+
|
6 |
+
'''
|
7 |
+
Description:
|
8 |
+
This function generates paragraphs for an article based on provided inputs such as the main idea, outline, specific section, Long Language Model (LLM), and desired tone of voice.
|
9 |
+
It uses a language model to create content either for a catchy introduction paragraph or to expand the article based on an existing outline.
|
10 |
+
|
11 |
+
Parameters:
|
12 |
+
idea (str) -> Required: The main idea or topic of the article.
|
13 |
+
outline (str) -> Required: The existing outline (if any) that has been covered in the article.
|
14 |
+
section (str) -> Required: The specific section or point that needs content creation or elaboration.
|
15 |
+
llm -> Required: The Long Language Model (LLM) used for generating article content.
|
16 |
+
tone_of_voice (str) -> Required: The intended tone of the article (e.g., professional, conversational, persuasive).
|
17 |
+
|
18 |
+
Return Value:
|
19 |
+
article (str): The generated article paragraph based on the provided inputs.
|
20 |
+
'''
|
21 |
+
|
22 |
+
if len(outline) == 0:
|
23 |
+
article_prompt = f"Generate Catchy Introduction paragraph for my article on {idea} using the following main point: {section}\nThe tone should be {tone_of_voice}."
|
24 |
+
else:
|
25 |
+
article_prompt = f"Generate well-organized paragraph for my article on {idea}. I have already covered: {outline} in the outline. I need help with the following main point: {section}. Please ensure the paragraphs are connected logically and provide a smooth transition between main topics. The tone should be {tone_of_voice}."
|
26 |
+
|
27 |
+
article_promptTemp = PromptTemplate(
|
28 |
+
input_variables=["text_input"],
|
29 |
+
template="You are a Professional content creator and article Writer:\n\n{text_input}\n\nParagraph:")
|
30 |
+
|
31 |
+
print(article_prompt)
|
32 |
+
article_extraction_chain = LLMChain(llm=llm, prompt=article_promptTemp)
|
33 |
+
article = article_extraction_chain.run(article_prompt)
|
34 |
+
|
35 |
+
return article
|
36 |
+
|
37 |
+
def full_article(idea, outline_list, tone_of_voice, llm):
|
38 |
+
|
39 |
+
'''
|
40 |
+
Description:
|
41 |
+
This function generates a full article by iteratively calling the article_generator function for each section in the provided outline list. It accumulates paragraphs generated for each section to construct a complete article based on the specified idea, outline, tone of voice, and Long Language Model (LLM).
|
42 |
+
|
43 |
+
Parameters:
|
44 |
+
idea (str) -> Required: The main idea or topic for the article.
|
45 |
+
outline_list (list) -> Required: A list containing sections or subsections for the article's structure.
|
46 |
+
tone_of_voice (str) -> Required: The desired tone of the article (e.g., professional, conversational, persuasive).
|
47 |
+
llm -> Required: The Long Language Model (LLM) used for generating article content.
|
48 |
+
|
49 |
+
Return Value:
|
50 |
+
article (list): A list of paragraphs representing the article content for each section in the provided outline.
|
51 |
+
'''
|
52 |
+
|
53 |
+
article = []
|
54 |
+
outline = []
|
55 |
+
|
56 |
+
try:
|
57 |
+
|
58 |
+
for section in outline_list:
|
59 |
+
|
60 |
+
para = article_generator(idea, ' '.join(outline), section, llm, tone_of_voice)
|
61 |
+
outline.append(section)
|
62 |
+
article.append(para)
|
63 |
+
|
64 |
+
except:
|
65 |
+
pass
|
66 |
+
|
67 |
+
return article
|
68 |
+
|
69 |
+
def rephrase(par, llm):
|
70 |
+
|
71 |
+
paraCheck_prompt = f"Rephrase the following paragraph and make it more unique and excited: {par}"
|
72 |
+
|
73 |
+
paraCheck_promptTemp = PromptTemplate(
|
74 |
+
input_variables=["text_input"],
|
75 |
+
template="You are a content creator.\n{text_input}")
|
76 |
+
|
77 |
+
paraCheck_extraction_chain = LLMChain(llm=llm, prompt=paraCheck_promptTemp)
|
78 |
+
rephrased_paragraph = paraCheck_extraction_chain.run(paraCheck_prompt)
|
79 |
+
|
80 |
+
return rephrased_paragraph
|
blog_content_creation.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
class blog_content_generation:
|
5 |
+
|
6 |
+
'''
|
7 |
+
Description:
|
8 |
+
This Python class, blog_content_generation, is designed to facilitate the creation of various blog-related content using a language model (LLM).
|
9 |
+
It offers functionalities to generate blog post ideas, descriptions, actual posts, and relevant tags for search engine optimization (SEO).
|
10 |
+
|
11 |
+
Class Methods:
|
12 |
+
__init__(self, llm)
|
13 |
+
|
14 |
+
Description: Initializes the blog_content_generation class by setting the language model (llm) to be used for content generation.
|
15 |
+
Parameters:
|
16 |
+
llm: The language model instance used for generating content.
|
17 |
+
|
18 |
+
blog_idea_desc_gen(self, blog)
|
19 |
+
|
20 |
+
Description: Generates SEO-friendly titles and meta descriptions for a given blog post topic.
|
21 |
+
Parameters:
|
22 |
+
blog: The title or topic of the blog post.
|
23 |
+
Returns:
|
24 |
+
blog_idea_desc: A list of suggested titles and meta descriptions.
|
25 |
+
|
26 |
+
blog_idea_gen(self, topic)
|
27 |
+
|
28 |
+
Description: Creates a list of SEO-friendly blog post ideas with emotional and persuasive titles based on a specific topic.
|
29 |
+
Parameters:
|
30 |
+
topic: The subject or theme for which blog post ideas are requested.
|
31 |
+
Returns:
|
32 |
+
blog_idea: A list containing at least 10 suggested blog post ideas.
|
33 |
+
|
34 |
+
blog_post_gen(self, topic)
|
35 |
+
|
36 |
+
Description: Generates a blog post on a given topic.
|
37 |
+
Parameters:
|
38 |
+
topic: The subject or theme for the blog post content.
|
39 |
+
Returns:
|
40 |
+
blog_post: The generated blog post content.
|
41 |
+
|
42 |
+
blog_tags_gen(self, blog)
|
43 |
+
|
44 |
+
Description: Suggests tags for a specific blog post.
|
45 |
+
Parameters:
|
46 |
+
blog: The title or topic of the blog post for which tags are needed.
|
47 |
+
Returns:
|
48 |
+
blog_tags_desc: A list of suggested tags related to the provided blog post.
|
49 |
+
'''
|
50 |
+
|
51 |
+
def __init__(self, llm):
|
52 |
+
self.llm = llm
|
53 |
+
|
54 |
+
|
55 |
+
def blog_idea_desc_gen(self, blog):
|
56 |
+
|
57 |
+
blog_idea_desc_prompt = f"Suggest at least 5 SEO-friendly titles and meta descriptions for the following blog post {blog}. Use a persuasive and intriguing tone."
|
58 |
+
blog_idea_desc_promptTemp = PromptTemplate(
|
59 |
+
input_variables=["text_input"],
|
60 |
+
template="You are my blogger writter\n\n{text_input}\n\nSEO blog post ideas:")
|
61 |
+
blog_idea_desc_extraction_chain = LLMChain(llm=self.llm, prompt=blog_idea_desc_promptTemp)
|
62 |
+
blog_idea_desc = blog_idea_desc_extraction_chain.run(blog_idea_desc_prompt)
|
63 |
+
|
64 |
+
return blog_idea_desc
|
65 |
+
|
66 |
+
def blog_idea_gen(self, topic):
|
67 |
+
|
68 |
+
|
69 |
+
blog_idea_prompt = f"Create a list of at least 10 blog post ideas on the following topic: {topic}. Suggest SEO-Friendly title and use an emotional and persuasive tone in blog post titles."
|
70 |
+
blog_idea_promptTemp = PromptTemplate(
|
71 |
+
input_variables=["text_input"],
|
72 |
+
template="You are my blogger writter\n\n{text_input}\n\nSEO blog post ideas:")
|
73 |
+
blog_idea_extraction_chain = LLMChain(llm=self.llm, prompt=blog_idea_promptTemp)
|
74 |
+
blog_idea = blog_idea_extraction_chain.run(blog_idea_prompt)
|
75 |
+
|
76 |
+
return blog_idea
|
77 |
+
|
78 |
+
def blog_post_gen(self, topic):
|
79 |
+
|
80 |
+
blog_post_prompt = f"Write a blog Plost about {topic}."
|
81 |
+
blog_post_promptTemp = PromptTemplate(
|
82 |
+
input_variables=["text_input"],
|
83 |
+
template="You are a Professional Content Creator and Blog Writer:\n{text_input}\nBlog Post:")
|
84 |
+
blog_post_extraction_chain = LLMChain(llm=self.llm, prompt=blog_post_promptTemp)
|
85 |
+
blog_post = blog_post_extraction_chain.run(blog_post_prompt)
|
86 |
+
|
87 |
+
return blog_post
|
88 |
+
|
89 |
+
def blog_tags_gen(self, blog):
|
90 |
+
|
91 |
+
blog_tags_desc_prompt = f" Suggest at least 5 tags for the following blog post.\n{blog}"
|
92 |
+
blog_tags_desc_promptTemp = PromptTemplate(
|
93 |
+
input_variables=["text_input"],
|
94 |
+
template="You are my blogger writter\n\n{text_input}\n\nSEO blog post ideas:")
|
95 |
+
blog_tags_desc_extraction_chain = LLMChain(llm=self.llm, prompt=blog_tags_desc_promptTemp)
|
96 |
+
blog_tags_desc = blog_tags_desc_extraction_chain.run(blog_tags_desc_prompt)
|
97 |
+
|
98 |
+
return blog_tags_desc
|
blog_ideas_description_generation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def blog_idea_desc_gen(blog, llm):
|
5 |
+
|
6 |
+
|
7 |
+
blog_idea_desc_prompt = f"Suggest at least 5 SEO-friendly titles and meta descriptions for the following blog post {blog}. Use a persuasive and intriguing tone."
|
8 |
+
blog_idea_desc_promptTemp = PromptTemplate(
|
9 |
+
input_variables=["text_input"],
|
10 |
+
template="You are my blogger writter\n\n{text_input}\n\nSEO blog post ideas:")
|
11 |
+
blog_idea_desc_extraction_chain = LLMChain(llm=llm, prompt=blog_idea_desc_promptTemp)
|
12 |
+
blog_idea_desc = blog_idea_desc_extraction_chain.run(blog_idea_desc_prompt)
|
13 |
+
|
14 |
+
return blog_idea_desc
|
15 |
+
|
blog_ideas_generation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def blog_idea_gen(topic, llm):
|
5 |
+
|
6 |
+
|
7 |
+
blog_idea_prompt = f"Create a list of at least 10 blog post ideas on the following topic: {topic}. Suggest SEO-Friendly title and use an emotional and persuasive tone in blog post titles."
|
8 |
+
blog_idea_promptTemp = PromptTemplate(
|
9 |
+
input_variables=["text_input"],
|
10 |
+
template="You are my blogger writter\n\n{text_input}\n\nSEO blog post ideas:")
|
11 |
+
blog_idea_extraction_chain = LLMChain(llm=llm, prompt=blog_idea_promptTemp)
|
12 |
+
blog_idea = blog_idea_extraction_chain.run(blog_idea_prompt)
|
13 |
+
|
14 |
+
return blog_idea
|
15 |
+
|
blog_post_generation.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def blog_post_gen(topic, llm):
|
5 |
+
|
6 |
+
blog_post_prompt = f"Write a blog Plost about {topic}."
|
7 |
+
blog_post_promptTemp = PromptTemplate(
|
8 |
+
input_variables=["text_input"],
|
9 |
+
template="You are a Professional Content Creator and Blog Writer:\n{text_input}\nBlog Post:")
|
10 |
+
blog_post_extraction_chain = LLMChain(llm=llm, prompt=blog_post_promptTemp)
|
11 |
+
blog_post = blog_post_extraction_chain.run(blog_post_prompt)
|
12 |
+
|
13 |
+
return blog_post
|
blog_post_tags_generation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def blog_tags_gen(blog, llm):
|
5 |
+
|
6 |
+
|
7 |
+
blog_tags_desc_prompt = f" Suggest at least 5 tags for the following blog post.\n{blog}"
|
8 |
+
blog_tags_desc_promptTemp = PromptTemplate(
|
9 |
+
input_variables=["text_input"],
|
10 |
+
template="You are my blogger writter\n\n{text_input}\n\nSEO blog post ideas:")
|
11 |
+
blog_tags_desc_extraction_chain = LLMChain(llm=llm, prompt=blog_tags_desc_promptTemp)
|
12 |
+
blog_tags_desc = blog_tags_desc_extraction_chain.run(blog_tags_desc_prompt)
|
13 |
+
|
14 |
+
return blog_tags_desc
|
15 |
+
|
email_marketing_campaigns_generation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def email_marketing_campaigns_gen(product_name, product_description, target_audience, goal, llm):
|
5 |
+
|
6 |
+
email_prompt = f"Generate a high-converting email marketing campaign for {product_name}. {product_name} is {product_description}. that is targeted at {target_audience} and has the goal of {goal}. The campaign should include a welcome email, a nurture sequence, and a promotional email."
|
7 |
+
|
8 |
+
email_promptTemp = PromptTemplate(
|
9 |
+
input_variables=["text_input"],
|
10 |
+
template="You are a Professional Email Marketing Copywriter:\n{text_input}\nEmail Marketing Campaign:")
|
11 |
+
|
12 |
+
email_extraction_chain = LLMChain(llm=llm, prompt=email_promptTemp)
|
13 |
+
email = email_extraction_chain.run(email_prompt)
|
14 |
+
|
15 |
+
return email
|
email_writer.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
class email_writing:
|
5 |
+
|
6 |
+
def __init__(self, llm):
|
7 |
+
self.llm = llm
|
8 |
+
|
9 |
+
def email_gen(self, recipient, recipient_position, sender_name, position_sender, desc):
|
10 |
+
|
11 |
+
email_prompt = f"Write a professional and well organized Email on {desc}.\nThe name of the Recipient is {recipient} and the recipient position is {recipient_position}.\nMy Name is {sender_name} and my Position is {position_sender}."
|
12 |
+
|
13 |
+
email_promptTemp = PromptTemplate(
|
14 |
+
input_variables=["text_input"],
|
15 |
+
template="You are a professional email writer:\n{text_input}\nEmail:")
|
16 |
+
|
17 |
+
email_extraction_chain = LLMChain(llm=self.llm, prompt=email_promptTemp)
|
18 |
+
email = email_extraction_chain.run(email_prompt)
|
19 |
+
|
20 |
+
return email
|
21 |
+
|
22 |
+
def email_subject_gen(self, email):
|
23 |
+
|
24 |
+
email_subject_prompt = f"Generate a subject for the following email:\n{email}\n"
|
25 |
+
|
26 |
+
email_subject_promptTemp = PromptTemplate(
|
27 |
+
input_variables=["text_input"],
|
28 |
+
template="You are a professional email writer:\n{text_input}\nEmail Subject:")
|
29 |
+
|
30 |
+
email_subject_extraction_chain = LLMChain(llm=self.llm, prompt=email_subject_promptTemp)
|
31 |
+
email_subject = email_subject_extraction_chain.run(email_subject_prompt)
|
32 |
+
|
33 |
+
return email_subject
|
34 |
+
|
35 |
+
def email_marketing_campaigns_gen(self, product_name, product_description, target_audience, goal):
|
36 |
+
|
37 |
+
email_prompt = f"Generate a high-converting email marketing campaign for {product_name}. {product_name} is {product_description}. that is targeted at {target_audience} and has the goal of {goal}. The campaign should include a welcome email, a nurture sequence, and a promotional email."
|
38 |
+
|
39 |
+
email_promptTemp = PromptTemplate(
|
40 |
+
input_variables=["text_input"],
|
41 |
+
template="You are a Professional Email Marketing Copywriter:\n{text_input}\nEmail Marketing Campaign:")
|
42 |
+
|
43 |
+
email_extraction_chain = LLMChain(llm=self.llm, prompt=email_promptTemp)
|
44 |
+
email = email_extraction_chain.run(email_prompt)
|
45 |
+
|
46 |
+
return email
|
facebook_ads_generation.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def facebook_ads_gen(product_name, product_description, llm, tone_of_voice, targeted_audience="", plans_promotions=""):
|
5 |
+
|
6 |
+
if targeted_audience != "" and plans_promotions != "":
|
7 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Our Target Audience is {targeted_audience}. Our plans and promotions is {plans_promotions}. Tone of the ad should be {tone_of_voice}"
|
8 |
+
|
9 |
+
elif targeted_audience == "" and plans_promotions != "":
|
10 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Our plans and promotions is {plans_promotions}. Tone of the ad should be {tone_of_voice}"
|
11 |
+
|
12 |
+
elif targeted_audience != "" and plans_promotions == "":
|
13 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Our Target Audience is {targeted_audience}. Tone of the ad should be{tone_of_voice}."
|
14 |
+
|
15 |
+
else:
|
16 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Tone of the ad Should be {tone_of_voice}."
|
17 |
+
|
18 |
+
facebook_ads_promptTemp = PromptTemplate(
|
19 |
+
input_variables=["text_input"],
|
20 |
+
template="You are a Professional Facebook Ad Copywriter:\n{text_input}\nFacebook Ad:")
|
21 |
+
|
22 |
+
facebook_ad_extraction_chain = LLMChain(llm=llm, prompt=facebook_ads_promptTemp)
|
23 |
+
facebook_ad = facebook_ad_extraction_chain.run(facebook_ads_prompt)
|
24 |
+
|
25 |
+
return facebook_ad
|
facebook_campaign.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def facbook_camp_gen(product_name, product_desc, days, goal, llm):
|
5 |
+
|
6 |
+
|
7 |
+
facebook_ads_prompt = f"Generate a {days} days Facebook campaign (no budget included) calendar for our {product_name}. {product_name} is {product_desc}. with the goal to {goal}."
|
8 |
+
print(facebook_ads_prompt)
|
9 |
+
facebook_ads_promptTemp = PromptTemplate(
|
10 |
+
input_variables=["text_input"],
|
11 |
+
template="""You are a Professional Facebook Digital Marketer:\n{text_input}\nGenerate only the Facebook campaign Calender without any details and don't mention any budgets:\nExample to emulate it:\nWeek 1: Getting Started and Teasers
|
12 |
+
|
13 |
+
Day 1-2: Introduction to FAQGenius, share its features and benefits.
|
14 |
+
Day 3-4: Teaser posts about how FAQGenius can save time and improve customer satisfaction.
|
15 |
+
Day 5-7: User testimonials and success stories with FAQGenius." and so on..
|
16 |
+
""")
|
17 |
+
facebook_ad_extraction_chain = LLMChain(llm=llm, prompt=facebook_ads_promptTemp)
|
18 |
+
facebook_ad = facebook_ad_extraction_chain.run(facebook_ads_prompt)
|
19 |
+
|
20 |
+
return facebook_ad
|
21 |
+
|
22 |
+
def social_media_camp_gen(product_name, product_desc, days, goal, llm):
|
23 |
+
|
24 |
+
|
25 |
+
facebook_ads_prompt = f"Generate a {days} days Social Media campaign (no budget included) calendar for our {product_name}. {product_name} is {product_desc}. with the goal to {goal}."
|
26 |
+
print(facebook_ads_prompt)
|
27 |
+
facebook_ads_promptTemp = PromptTemplate(
|
28 |
+
input_variables=["text_input"],
|
29 |
+
template="You are a Professional Social Media Marketer:\n{text_input}\nSocial Media Campaign:")
|
30 |
+
facebook_ad_extraction_chain = LLMChain(llm=llm, prompt=facebook_ads_promptTemp)
|
31 |
+
facebook_ad = facebook_ad_extraction_chain.run(facebook_ads_prompt)
|
32 |
+
|
33 |
+
return facebook_ad
|
facebook_post_generation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def facebook_post_gen(tone_of_voice, topic, llm):
|
5 |
+
|
6 |
+
productDesc_prompt = f"Write an attractive facebook post on {topic}. Tone should be {tone_of_voice}. Post dosen't include any photos or videos."
|
7 |
+
|
8 |
+
productDesc_promptTemp = PromptTemplate(
|
9 |
+
input_variables=["text_input"],
|
10 |
+
template="You are a professional facebook content creator:\n{text_input}\n\nFacebook Post:")
|
11 |
+
|
12 |
+
productDesc_extraction_chain = LLMChain(llm=llm, prompt=productDesc_promptTemp)
|
13 |
+
product_desc = productDesc_extraction_chain.run(productDesc_prompt)
|
14 |
+
|
15 |
+
return product_desc
|
feather_chat.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from langchain.chains import ConversationChain
|
3 |
+
from langchain.llms import GooglePalm
|
4 |
+
from langchain.memory import ConversationBufferMemory
|
5 |
+
import os
|
6 |
+
from dotenv import load_dotenv, find_dotenv
|
7 |
+
import streamlit as st
|
8 |
+
|
9 |
+
|
10 |
+
load_dotenv(find_dotenv())
|
11 |
+
google_api_key = os.environ['GOOGLE_API_KEY']
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2")
|
13 |
+
|
14 |
+
def call_palm(google_api_key, temperature=0.5, max_tokens=8000, top_p=0.95, top_k=40, n_batch=9, repeat_penalty=1.1, n_ctx=8000):
|
15 |
+
|
16 |
+
|
17 |
+
google_palm_model = GooglePalm(
|
18 |
+
|
19 |
+
google_api_key=google_api_key,
|
20 |
+
temperature=temperature,
|
21 |
+
max_output_tokens=max_tokens,
|
22 |
+
top_p=top_p,
|
23 |
+
top_k=top_k,
|
24 |
+
n_batch=n_batch,
|
25 |
+
repeat_penalty = repeat_penalty,
|
26 |
+
n_ctx = n_ctx
|
27 |
+
)
|
28 |
+
|
29 |
+
return google_palm_model
|
30 |
+
|
31 |
+
llm = call_palm(google_api_key)
|
32 |
+
memory = ConversationBufferMemory()
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
conversation_total_tokens = 0
|
37 |
+
new_conversation = ConversationChain(llm=llm,
|
38 |
+
verbose=False,
|
39 |
+
memory=memory, )
|
40 |
+
|
41 |
+
current_line_number = 1
|
42 |
+
while True:
|
43 |
+
|
44 |
+
message = st.text_input('Human', key = str(current_line_number))
|
45 |
+
|
46 |
+
if message=='Exit':
|
47 |
+
|
48 |
+
st.text(f"{conversation_total_tokens} tokens used in total in this conversation.")
|
49 |
+
break
|
50 |
+
|
51 |
+
if message:
|
52 |
+
|
53 |
+
formatted_prompt = new_conversation.prompt.format(input=message, history=new_conversation.memory.buffer)
|
54 |
+
st.text(f'formatted_prompt is {formatted_prompt}')
|
55 |
+
|
56 |
+
num_tokens = len(tokenizer.tokenize(formatted_prompt))
|
57 |
+
conversation_total_tokens += num_tokens
|
58 |
+
st.text(f'tokens sent {num_tokens}')
|
59 |
+
|
60 |
+
response = new_conversation.predict(input=message)
|
61 |
+
response_num_tokens = len(tokenizer.tokenize(response))
|
62 |
+
conversation_total_tokens += response_num_tokens
|
63 |
+
st.text(f"Featherica: {response}")
|
64 |
+
|
65 |
+
current_line_number = current_line_number + 1
|
idea_generation.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
import re
|
4 |
+
|
5 |
+
def ideas_generator(topic, keywords, llm, tone_of_voice='Professional', num_of_ideas=3):
|
6 |
+
|
7 |
+
'''
|
8 |
+
Description:
|
9 |
+
|
10 |
+
The ideas_generator() function designed to generate catchy short or long form article titles for a given topic,
|
11 |
+
utilizing a set of specified keywords.
|
12 |
+
This function employs a language model to create these titles,
|
13 |
+
The function can produce either a single title or multiple titles based on the value of the num argument and supports both concise and informative title generation.
|
14 |
+
'''
|
15 |
+
|
16 |
+
'''
|
17 |
+
Parameters:
|
18 |
+
|
19 |
+
topic (str, required): The topic or subject matter of the article for which you want to generate titles.
|
20 |
+
keywords (str, required): A list of keywords that should be used to help generate catchy titles relevant to the topic. These keywords can provide context and improve the quality of the titles.
|
21 |
+
llm (langchain.llms object, required): An instance of a pre-trained language model (e.g., GPT-3 or GPT-4) that will be used to generate the titles. This model should be provided by the user.
|
22 |
+
num_of_ideas (int, optional): The number of long-form titles to generate. If num is set to 1, the function will produce a single title. If num is greater than 1, it will generate multiple titles. Default is 3.
|
23 |
+
tone_of_voice (str, optional): A String to determine the tone of voice of the title. Default Value Professional
|
24 |
+
'''
|
25 |
+
|
26 |
+
'''
|
27 |
+
Returns:
|
28 |
+
|
29 |
+
ideas (str): Functions returns a text with number of ideas numbered with roman numerals
|
30 |
+
'''
|
31 |
+
|
32 |
+
if num_of_ideas == 1:
|
33 |
+
|
34 |
+
ideas_prompt = f"Generate only 1 {tone_of_voice} and catchy Innovation title for my article about {topic} topic.\n\nuse this keywords to help you generate {tone_of_voice} catchy title: {keywords}."
|
35 |
+
|
36 |
+
else:
|
37 |
+
ideas_prompt = f"Generate only {num_of_ideas} {tone_of_voice} and catchy Innovation titles for my article about {topic} topic.\n\nuse this keywords to help you generate {tone_of_voice} catchy titles: {keywords}."
|
38 |
+
|
39 |
+
ideas_promptTemp = PromptTemplate(
|
40 |
+
input_variables=["text_input"],
|
41 |
+
template="You are a professional content creator and Title Generator:\n\n{text_input}\n\n:Titles (number them with roman numerals):")
|
42 |
+
|
43 |
+
ideas_extraction_chain = LLMChain(llm=llm, prompt=ideas_promptTemp)
|
44 |
+
ideas = ideas_extraction_chain.run(ideas_prompt)
|
45 |
+
|
46 |
+
return ideas
|
47 |
+
|
48 |
+
|
49 |
+
def filter_ideas(ideas):
|
50 |
+
|
51 |
+
'''
|
52 |
+
Description:
|
53 |
+
|
54 |
+
The filter_ideas() function extracts and filters article titles numbered with roman numerals from a given block of text.
|
55 |
+
This function uses a regular expression to identify and extract these titles and returns them as a list of strings.
|
56 |
+
'''
|
57 |
+
'''
|
58 |
+
Parameters:
|
59 |
+
|
60 |
+
ideas (str): A block of text that contain article titles formatted with Roman numerals and their corresponding content.
|
61 |
+
'''
|
62 |
+
|
63 |
+
'''
|
64 |
+
Returns
|
65 |
+
filtered_ideas (list of str): A list of long-form article titles extracted from the input text.
|
66 |
+
'''
|
67 |
+
pattern = r'\b[IVXLCDM]+\.\s*(.*?)(?:\n|$)'
|
68 |
+
filtered_ideas = re.findall(pattern, ideas)
|
69 |
+
|
70 |
+
return filtered_ideas
|
71 |
+
|
72 |
+
|
73 |
+
def pick_idea(list_ideas):
|
74 |
+
|
75 |
+
"""
|
76 |
+
Description:
|
77 |
+
|
78 |
+
The pick_idea() function allows a user to choose one idea from a list of ideas.
|
79 |
+
It presents the user with a numbered list of ideas and prompts them to select an idea by typing the corresponding number.
|
80 |
+
The selected idea is then returned as the output of the function.
|
81 |
+
"""
|
82 |
+
|
83 |
+
"""
|
84 |
+
Parameters:
|
85 |
+
|
86 |
+
list_ideas (list of str): A list of ideas from which the user will choose one.
|
87 |
+
"""
|
88 |
+
|
89 |
+
"""
|
90 |
+
Return:
|
91 |
+
|
92 |
+
idea (str): The idea selected by the user from the list of ideas.
|
93 |
+
"""
|
94 |
+
|
95 |
+
print("Choose One Idea:\n")
|
96 |
+
|
97 |
+
for counter, idea in enumerate(list_ideas):
|
98 |
+
c = counter+1
|
99 |
+
print(f"{c}. {idea}")
|
100 |
+
|
101 |
+
x = int(input("Type the number of the idea: "))
|
102 |
+
idea = list_ideas[x-1]
|
103 |
+
|
104 |
+
return idea
|
insta_image_caption_generation.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
|
5 |
+
def img2text(url):
|
6 |
+
|
7 |
+
image_to_text = pipeline("image-to-text", model='Salesforce/blip-image-captioning-base')
|
8 |
+
|
9 |
+
text = image_to_text(url)
|
10 |
+
|
11 |
+
out = text[0]['generated_text']
|
12 |
+
|
13 |
+
return out
|
14 |
+
|
15 |
+
|
16 |
+
def generate_InstaCap(scenario, tone_of_voice, form, llm):
|
17 |
+
|
18 |
+
instaCap_prompt = f"Craft a {form} Caption on my Instagram Image Here is the description of my Instagram Image: {scenario}.\nThe tone should be {tone_of_voice}"
|
19 |
+
|
20 |
+
instaCap_promptTemp = PromptTemplate(
|
21 |
+
input_variables=["text_input"],
|
22 |
+
template="You are infulencer:\n{text_input}\nInstagram Caption:")
|
23 |
+
|
24 |
+
instaCap_extraction_chain = LLMChain(llm=llm, prompt=instaCap_promptTemp)
|
25 |
+
instaCap = instaCap_extraction_chain.run(instaCap_prompt)
|
26 |
+
|
27 |
+
return instaCap
|
keywords_generation.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
import re
|
4 |
+
|
5 |
+
def keywords_generator(topic, llm, num_keywords=10):
|
6 |
+
|
7 |
+
'''
|
8 |
+
Description:
|
9 |
+
|
10 |
+
Generates a list of keywords for a given topic using a large language model (LLM).
|
11 |
+
'''
|
12 |
+
|
13 |
+
'''
|
14 |
+
Parameters:
|
15 |
+
|
16 |
+
topic (str): Required Parameter -> The topic for which to generate keywords.
|
17 |
+
llm (langchain.llms object): Required Parameter -> The LLM to use for generating keywords.
|
18 |
+
num_keywords (int): Optional Parameter -> The number of keywords to generate. Default Value is 10
|
19 |
+
long_tail_keywords (bool): Optional Parameter -> A boolean flag indicating whether to generate long-tail keywords. Default Value is False
|
20 |
+
'''
|
21 |
+
|
22 |
+
'''
|
23 |
+
Returns:
|
24 |
+
Keywords (str): A Text of list of keywords numbered with roman numerals for the given topic -> str datatype.
|
25 |
+
'''
|
26 |
+
|
27 |
+
keywords_prompt = f"Identify {num_keywords} SEO keywords related to '{topic}'."
|
28 |
+
|
29 |
+
keywords_promptTemp = PromptTemplate(
|
30 |
+
input_variables=["text_input"],
|
31 |
+
template="You are a professional content creator and SEO Keywords Generator:\n\n{text_input}\n\nKeywords (number them with roman numerals):")
|
32 |
+
|
33 |
+
keywords_extraction_chain = LLMChain(llm=llm, prompt=keywords_promptTemp)
|
34 |
+
keywords = keywords_extraction_chain.run(keywords_prompt)
|
35 |
+
|
36 |
+
return keywords
|
37 |
+
|
38 |
+
|
39 |
+
def filter_keywords(keywords):
|
40 |
+
'''
|
41 |
+
Description:
|
42 |
+
|
43 |
+
Filters keywords to extract the keywords that numbered with roman numerals using regx
|
44 |
+
'''
|
45 |
+
|
46 |
+
'''
|
47 |
+
Parameters:
|
48 |
+
|
49 |
+
keywords (str): Required Parameter -> A Text of keywords Numbered with roman numerals to filter.
|
50 |
+
'''
|
51 |
+
|
52 |
+
'''
|
53 |
+
Returns:
|
54 |
+
|
55 |
+
filtered_keywords(list): A filtered list of keywords.
|
56 |
+
'''
|
57 |
+
|
58 |
+
pattern = r'\b[IVXLCDM]+\.\s*(.*?)(?:\n|$)'
|
59 |
+
filtered_keywords = re.findall(pattern, keywords)
|
60 |
+
|
61 |
+
return filtered_keywords
|
62 |
+
|
63 |
+
|
64 |
+
def process_keywords(list_keywords):
|
65 |
+
|
66 |
+
'''
|
67 |
+
Description:
|
68 |
+
Formats a list of keywords into a comma-separated string.
|
69 |
+
'''
|
70 |
+
|
71 |
+
'''
|
72 |
+
Parameters:
|
73 |
+
|
74 |
+
list_keywords (list):Required Parameters -> A list of keywords to format.
|
75 |
+
'''
|
76 |
+
|
77 |
+
'''
|
78 |
+
Returns:
|
79 |
+
|
80 |
+
formatted_keywords (str): A comma-separated string of keywords.
|
81 |
+
'''
|
82 |
+
formatted_keywords = ", ".join(list_keywords)
|
83 |
+
return formatted_keywords
|
landing_page_generation.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def landing_page_gen(product_name, product_desc, target_audience, goal, llm):
|
5 |
+
|
6 |
+
landing_page_prompt = f"Generate a landing page content for {product_name}. {product_name} is {product_desc} that is targeted at {target_audience} and has the goal of {goal}. The landing page should be clear, concise, and persuasive. It should also include a call to action."
|
7 |
+
landing_page_promptTemp = PromptTemplate(
|
8 |
+
input_variables=["text_input"],
|
9 |
+
template="{text_input}")
|
10 |
+
landing_page_extraction_chain = LLMChain(llm=llm, prompt=landing_page_promptTemp)
|
11 |
+
landing_page = landing_page_extraction_chain.run(landing_page_prompt)
|
12 |
+
|
13 |
+
return landing_page
|
linkedIn_ads_generation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def linkedIn_ads_gen(product_name, product_desc, target_audience, target_keywords, llm):
|
5 |
+
|
6 |
+
|
7 |
+
linkedIn_ads_prompt = f"Generate a LinkedIn ad for {product_name}. {product_name} is {product_desc} that is targeted at {target_audience} and uses the keywords {target_keywords}. The ad should be persuasive and engaging, and it should include a call to action."
|
8 |
+
|
9 |
+
linkedIn_ads_promptTemp = PromptTemplate(
|
10 |
+
input_variables=["text_input"],
|
11 |
+
template="You are a Professional LinkedIn Ad Copywriter:\n{text_input}\nFacebook Ad:")
|
12 |
+
linkedIn_ad_extraction_chain = LLMChain(llm=llm, prompt=linkedIn_ads_promptTemp)
|
13 |
+
linkedIn_ad = linkedIn_ad_extraction_chain.run(linkedIn_ads_prompt)
|
14 |
+
|
15 |
+
return linkedIn_ad
|
linkedin_post_generation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def linkedIn_post_gen(topic, tone_of_voice, llm):
|
5 |
+
|
6 |
+
linkedIn_post_prompt = f"Write an engagging LinkedIn Post on {topic}. The tone should be {tone_of_voice}."
|
7 |
+
|
8 |
+
linkedIn_post_promptTemp = PromptTemplate(
|
9 |
+
input_variables=["text_input"],
|
10 |
+
template="You are a content creator and LinkedIn Posts writer :\n{text_input}\nLinkedIn Post:")
|
11 |
+
|
12 |
+
linkedIn_post_extraction_chain = LLMChain(llm=llm, prompt=linkedIn_post_promptTemp)
|
13 |
+
linkedIn_post = linkedIn_post_extraction_chain.run(linkedIn_post_prompt)
|
14 |
+
|
15 |
+
return linkedIn_post
|
load_model.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms import GooglePalm, LlamaCpp
|
2 |
+
|
3 |
+
def call_palm(google_api_key, temperature=0, max_tokens=8000, top_p=0.95, top_k=40, n_batch=9, repeat_penalty=1.1, n_ctx=8000):
|
4 |
+
|
5 |
+
'''
|
6 |
+
desc:
|
7 |
+
|
8 |
+
call_palm() is a fuction can be used to instantiate a Google Palm model.
|
9 |
+
this model can be used to generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way.
|
10 |
+
'''
|
11 |
+
|
12 |
+
'''
|
13 |
+
Params and args:
|
14 |
+
|
15 |
+
google_api_key (str): Required Parameter -> The Google API key for the Palm model.
|
16 |
+
temperature (float): Optional Parameter -> The temperature parameter controls the randomness of the generated text. A higher temperature will result in more creative and varied text, but it may also be less accurate.
|
17 |
+
max_output_tokens (int): Optional Parameter -> The maximum number of tokens to generate.
|
18 |
+
top_p (float): Optional Parameter -> The top_p parameter controls the diversity of the generated text. A higher top_p will result in more diverse text, but it may also be less coherent.
|
19 |
+
top_k (int): Optional Parameter -> The top_k parameter controls the number of tokens to consider when generating text. A higher top_k will result in more accurate text, but it may also be less creative.
|
20 |
+
n_batch (int): Optional Parameter -> The n_batch parameter controls the number of batches to use when generating text. A higher n_batch will result in faster generation, but it may also be less accurate.
|
21 |
+
repeat_penalty (float): Optional Parameter -> The repeat_penalty parameter controls the penalty for repeating tokens. A higher repeat_penalty will result in more diverse text, but it may also be less fluent.
|
22 |
+
n_ctx (int): Optional Parameter -> The n_ctx parameter controls the context length used to generate text. A higher n_ctx will result in more coherent text, but it may also be slower to generate.
|
23 |
+
'''
|
24 |
+
|
25 |
+
'''
|
26 |
+
return:
|
27 |
+
|
28 |
+
This function returns Google Palm as language model object.
|
29 |
+
This object can be used to generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way.
|
30 |
+
'''
|
31 |
+
|
32 |
+
google_palm_model = GooglePalm(
|
33 |
+
|
34 |
+
google_api_key=google_api_key,
|
35 |
+
temperature=temperature,
|
36 |
+
max_output_tokens=max_tokens,
|
37 |
+
top_p=top_p,
|
38 |
+
top_k=top_k,
|
39 |
+
n_batch=n_batch,
|
40 |
+
repeat_penalty = repeat_penalty,
|
41 |
+
n_ctx = n_ctx
|
42 |
+
)
|
43 |
+
|
44 |
+
return google_palm_model
|
45 |
+
|
46 |
+
def call_llama2(model_path, temperature=0, max_tokens=8192, top_p=0.95, top_k=40, n_batch=9, repeat_penalty=1.1, n_ctx=8192):
|
47 |
+
|
48 |
+
'''
|
49 |
+
desc:
|
50 |
+
call_llama() is a fuction can be used to instantiate a Meta llama-2 13B model.
|
51 |
+
this model can be used to generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way.
|
52 |
+
'''
|
53 |
+
|
54 |
+
'''
|
55 |
+
Params and args:
|
56 |
+
|
57 |
+
model_path (str): Required Parameter -> The path to the Llama model file.
|
58 |
+
temperature (float): Optional Parameter -> The temperature parameter controls the randomness of the generated text. A higher temperature will result in more creative and varied text, but it may also be less accurate.
|
59 |
+
max_tokens (int): Optional Parameter -> The maximum number of tokens to generate.
|
60 |
+
top_p (float): Optional Parameter -> The top_p parameter controls the diversity of the generated text. A higher top_p will result in more diverse text, but it may also be less coherent.
|
61 |
+
top_k (int): Optional Parameter -> The top_k parameter controls the number of tokens to consider when generating text. A higher top_k will result in more accurate text, but it may also be less creative.
|
62 |
+
n_batch (int): Optional Parameter -> The n_batch parameter controls the number of batches to use when generating text. A higher n_batch will result in faster generation, but it may also be less accurate.
|
63 |
+
repeat_penalty (float): Optional Parameter -> The repeat_penalty parameter controls the penalty for repeating tokens. A higher repeat_penalty will result in more diverse text, but it may also be less fluent.
|
64 |
+
n_ctx (int): Optional Parameter -> The n_ctx parameter controls the context length used to generate text. A higher n_ctx will result in more coherent text, but it may also be slower to generate.
|
65 |
+
'''
|
66 |
+
|
67 |
+
'''
|
68 |
+
return:
|
69 |
+
|
70 |
+
This function returns Meta llama-2 13B as language model object.
|
71 |
+
This object can be used to generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way.
|
72 |
+
'''
|
73 |
+
|
74 |
+
llm = LlamaCpp(
|
75 |
+
model_path=model_path,
|
76 |
+
temperature=temperature,
|
77 |
+
max_tokens=max_tokens,
|
78 |
+
top_p=top_p,
|
79 |
+
top_k=top_k,
|
80 |
+
n_batch=n_batch,
|
81 |
+
repeat_penalty=repeat_penalty,
|
82 |
+
n_ctx=n_ctx
|
83 |
+
|
84 |
+
)
|
85 |
+
|
86 |
+
return llm
|
meta_content_creation.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
from transformers import pipeline
|
4 |
+
|
5 |
+
class meta_content_generation:
|
6 |
+
|
7 |
+
|
8 |
+
def __init__(self, llm):
|
9 |
+
self.llm = llm
|
10 |
+
|
11 |
+
|
12 |
+
def facebook_ads_gen(self, product_name, product_description, tone_of_voice, targeted_audience="", plans_promotions=""):
|
13 |
+
|
14 |
+
if targeted_audience != "" and plans_promotions != "":
|
15 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Our Target Audience is {targeted_audience}. Our plans and promotions is {plans_promotions}. Tone of the ad should be {tone_of_voice}"
|
16 |
+
|
17 |
+
elif targeted_audience == "" and plans_promotions != "":
|
18 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Our plans and promotions is {plans_promotions}. Tone of the ad should be {tone_of_voice}"
|
19 |
+
|
20 |
+
elif targeted_audience != "" and plans_promotions == "":
|
21 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Our Target Audience is {targeted_audience}. Tone of the ad should be{tone_of_voice}."
|
22 |
+
|
23 |
+
else:
|
24 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Tone of the ad Should be {tone_of_voice}."
|
25 |
+
|
26 |
+
facebook_ads_promptTemp = PromptTemplate(
|
27 |
+
input_variables=["text_input"],
|
28 |
+
template="You are a Professional Facebook Ad Copywriter:\n{text_input}\nFacebook Ad:")
|
29 |
+
|
30 |
+
facebook_ad_extraction_chain = LLMChain(llm=self.llm, prompt=facebook_ads_promptTemp)
|
31 |
+
facebook_ad = facebook_ad_extraction_chain.run(facebook_ads_prompt)
|
32 |
+
|
33 |
+
return facebook_ad
|
34 |
+
|
35 |
+
def facbook_camp_gen(self, product_name, product_desc, days, goal):
|
36 |
+
|
37 |
+
|
38 |
+
facebook_ads_prompt = f"Generate a {days} days Facebook campaign (no budget included) calendar for our {product_name}. {product_name} is {product_desc}. with the goal to {goal}."
|
39 |
+
print(facebook_ads_prompt)
|
40 |
+
facebook_ads_promptTemp = PromptTemplate(
|
41 |
+
input_variables=["text_input"],
|
42 |
+
template="""You are a Professional Facebook Digital Marketer:\n{text_input}\nGenerate only the Facebook campaign Calender without any details and don't mention any budgets:\nExample to emulate it:\nWeek 1: Getting Started and Teasers
|
43 |
+
|
44 |
+
Day 1-2: Introduction to FAQGenius, share its features and benefits.
|
45 |
+
Day 3-4: Teaser posts about how FAQGenius can save time and improve customer satisfaction.
|
46 |
+
Day 5-7: User testimonials and success stories with FAQGenius." and so on..
|
47 |
+
""")
|
48 |
+
facebook_ad_extraction_chain = LLMChain(llm=self.llm, prompt=facebook_ads_promptTemp)
|
49 |
+
facebook_ad = facebook_ad_extraction_chain.run(facebook_ads_prompt)
|
50 |
+
|
51 |
+
return facebook_ad
|
52 |
+
|
53 |
+
def facebook_post_gen(self, tone_of_voice, topic):
|
54 |
+
|
55 |
+
productDesc_prompt = f"Write an attractive facebook post on {topic}. Tone should be {tone_of_voice}. Post dosen't include any photos or videos."
|
56 |
+
|
57 |
+
productDesc_promptTemp = PromptTemplate(
|
58 |
+
input_variables=["text_input"],
|
59 |
+
template="You are a professional facebook content creator:\n{text_input}\n\nFacebook Post:")
|
60 |
+
|
61 |
+
productDesc_extraction_chain = LLMChain(llm=self.llm, prompt=productDesc_promptTemp)
|
62 |
+
product_desc = productDesc_extraction_chain.run(productDesc_prompt)
|
63 |
+
|
64 |
+
return product_desc
|
65 |
+
|
66 |
+
def img2text(self, url):
|
67 |
+
|
68 |
+
image_to_text = pipeline("image-to-text", model='Salesforce/blip-image-captioning-base')
|
69 |
+
|
70 |
+
text = image_to_text(url)
|
71 |
+
|
72 |
+
out = text[0]['generated_text']
|
73 |
+
|
74 |
+
return out
|
75 |
+
|
76 |
+
|
77 |
+
def generate_InstaCap(self, scenario, tone_of_voice, form):
|
78 |
+
|
79 |
+
instaCap_prompt = f"Craft a {form} Caption on my Instagram Image Here is the description of my Instagram Image: {scenario}.\nThe tone should be {tone_of_voice}"
|
80 |
+
|
81 |
+
instaCap_promptTemp = PromptTemplate(
|
82 |
+
input_variables=["text_input"],
|
83 |
+
template="You are infulencer:\n{text_input}\nInstagram Caption:")
|
84 |
+
|
85 |
+
instaCap_extraction_chain = LLMChain(llm=self.llm, prompt=instaCap_promptTemp)
|
86 |
+
instaCap = instaCap_extraction_chain.run(instaCap_prompt)
|
87 |
+
|
88 |
+
return instaCap
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
outline_generation.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
import re
|
4 |
+
from remove_astricks import remove_ast
|
5 |
+
|
6 |
+
|
7 |
+
def outlines_generator(idea, keywords, llm):
|
8 |
+
|
9 |
+
'''
|
10 |
+
Description:
|
11 |
+
|
12 |
+
The outlines_generator() function designed to generate an outline for an article based on a given idea and a set of keywords.
|
13 |
+
The function leverages an LLM model to create a structured outline with section headings.
|
14 |
+
'''
|
15 |
+
|
16 |
+
'''
|
17 |
+
Parameters:
|
18 |
+
|
19 |
+
idea (str): The main idea or topic of the article for which you want to generate an outline.
|
20 |
+
keywords (str): A set of keywords or topic-related terms that will be used as section headings in the outline. These headings help organize the content and provide a structure for the article.
|
21 |
+
llm (langchain.llms object): An instance of a pre-trained language model (e.g., Google Palm or llama2) that will be used to generate the article outline.
|
22 |
+
'''
|
23 |
+
|
24 |
+
'''
|
25 |
+
Returns:
|
26 |
+
|
27 |
+
outlines (str): The generated outline for the article, including section headings and placeholders for content under each heading.
|
28 |
+
'''
|
29 |
+
|
30 |
+
outlines_prompt = f"Generate an table of contents with at least 10 main points for an article on {idea}. Include key points related to {keywords}.\nBe creative and innovation in each main topic"
|
31 |
+
|
32 |
+
outlines_promptTemp = PromptTemplate(
|
33 |
+
input_variables=["text_input"],
|
34 |
+
template="You are a content creator\n{text_input}\n\nTable of Content (number each main point with roman numerals):")
|
35 |
+
|
36 |
+
outlines_extraction_chain = LLMChain(llm=llm, prompt=outlines_promptTemp)
|
37 |
+
outlines = outlines_extraction_chain.run(outlines_prompt)
|
38 |
+
|
39 |
+
return outlines
|
40 |
+
|
41 |
+
|
42 |
+
def filtered_outlines(outline):
|
43 |
+
|
44 |
+
'''
|
45 |
+
Description:
|
46 |
+
This function processes an outline text containing Roman numeral formatted section headers,
|
47 |
+
splitting the input into sections and filtering out asterisks ('*') within each section.
|
48 |
+
The resulting output is a list of cleaned outline sections.
|
49 |
+
|
50 |
+
Parameters:
|
51 |
+
outline (str) -> Required: The input outline text containing Roman numeral formatted section headers.
|
52 |
+
|
53 |
+
Return:
|
54 |
+
outline_list (list): A list containing cleaned sections of the input outline, with asterisks removed.
|
55 |
+
'''
|
56 |
+
|
57 |
+
sections = re.split(r'\b[IVXLCDM]+\.\s', outline)[1:]
|
58 |
+
sections = [f"{i}. {section}" for i, section in enumerate(sections, start=1)]
|
59 |
+
|
60 |
+
outline_list = []
|
61 |
+
|
62 |
+
for section in sections:
|
63 |
+
cleaned_section = remove_ast(section)
|
64 |
+
outline_list.append(cleaned_section)
|
65 |
+
|
66 |
+
return outline_list
|
67 |
+
|
product_description_generation.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def product_description_gen(product_name, product_desc, tone_of_voice, llm):
|
5 |
+
|
6 |
+
'''
|
7 |
+
Description:
|
8 |
+
This function generates an engaging product description based on user-provided inputs.
|
9 |
+
It utilizes the LangChain library to prompt an AI model (Language Model) to create a product description tailored to the specified product name, description, tone of voice, and a provided LLM (Long Language Model).
|
10 |
+
'''
|
11 |
+
|
12 |
+
'''
|
13 |
+
Parameters:
|
14 |
+
product_name (str) -> Required: The name of the product for which the description is generated.
|
15 |
+
product_desc (str) -> Required: A brief description of the product.
|
16 |
+
tone_of_voice (str) -> Required: The intended tone of the product description (e.g., professional, friendly, persuasive).
|
17 |
+
llm -> Required: The Long Language Model (LLM) used for generating the description.
|
18 |
+
'''
|
19 |
+
|
20 |
+
'''
|
21 |
+
Return Value:
|
22 |
+
product_desc (str): The generated Amazon product description.
|
23 |
+
'''
|
24 |
+
productDesc_prompt = f"Write an engagging and {tone_of_voice} Amazon product description of {product_name} Product here is a short description of my product:\n\n{product_desc}\n"
|
25 |
+
|
26 |
+
productDesc_promptTemp = PromptTemplate(
|
27 |
+
input_variables=["text_input"],
|
28 |
+
template="You are a content creator and product description writer who helps clients to write their product description on amazon:\n{text_input}\nAmazon Product Description:")
|
29 |
+
|
30 |
+
productDesc_extraction_chain = LLMChain(llm=llm, prompt=productDesc_promptTemp)
|
31 |
+
product_desc = productDesc_extraction_chain.run(productDesc_prompt)
|
32 |
+
|
33 |
+
return product_desc
|
remove_astricks.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
def remove_ast(text):
|
4 |
+
cleaned_text = re.sub(r'\*', '', text)
|
5 |
+
return cleaned_text
|
workout_plan_generation.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def workout_plan_gen(my_goals, fitness_level, days, hours, helth_cosnd, routine, llm):
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
workout_plan_prompt = f"""Generate a workout plan with diversity and creative exercises.
|
10 |
+
Please note the following details:
|
11 |
+
* My Goals: I'm looking to {my_goals}
|
12 |
+
* Fitness Level: {fitness_level}.
|
13 |
+
* Available Equipment: I have access to a gym with various equipment.
|
14 |
+
* Time Commitment: I'm dedicated to working out {days} days a week, and I have {hours} hours a day I can spend during each session.
|
15 |
+
* Health Considerations: I'm in the {helth_cosnd}.
|
16 |
+
* Preferred Routine: I like to follow {routine} routine.\nWorkout Plan:"""
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
workout_plan_promptTemp = PromptTemplate(
|
21 |
+
input_variables=["text_input"],
|
22 |
+
template="You are a Professional Fitness Trainer:\n{text_input}")
|
23 |
+
workout_plan_extraction_chain = LLMChain(llm=llm, prompt=workout_plan_promptTemp)
|
24 |
+
workout_plan = workout_plan_extraction_chain.run(workout_plan_prompt)
|
25 |
+
|
26 |
+
return workout_plan
|
x_bio_creation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def x_bio_gen(info, tone_of_voice, llm):
|
5 |
+
|
6 |
+
x_bio_prompt = f"Craft a personalized Twitter bio for me, based on the content I create, related to {info}. tone should be {tone_of_voice}, Produce 10 tailor-made Twitter bios for me."
|
7 |
+
x_bio_promptTemp = PromptTemplate(
|
8 |
+
input_variables=["text_input"],
|
9 |
+
template="You are a copywriter\n{text_input}\n\nTwitter Bios:")
|
10 |
+
x_bio_extraction_chain = LLMChain(llm=llm, prompt=x_bio_promptTemp)
|
11 |
+
x_bio = x_bio_extraction_chain.run(x_bio_prompt)
|
12 |
+
|
13 |
+
return x_bio
|
14 |
+
|
15 |
+
|
x_campaign_generation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def x_camp_gen(product_name, product_desc, goal, llm):
|
5 |
+
|
6 |
+
|
7 |
+
x_camp_prompt = f"Design a potent Twitter campaign for my product '{product_name}'. {product_name} is {product_desc} aiming to accomplish {goal} through the meticulous adoption of planning and content creation best practices.\n\nCampaign will include: Campaign Teaser, Educational Content Series, Customer Testimonials and Case Studies, Interactive Content, Limited-Time Offer Announcement, Call-to-Action for Consultation and Recap and Thank You."
|
8 |
+
print(x_camp_prompt)
|
9 |
+
x_camp_promptTemp = PromptTemplate(
|
10 |
+
input_variables=["text_input"],
|
11 |
+
template="You are a Specialist in Twitter Copywriting\n\n{text_input}\n\nMake the tweets engaging, creative and coherent")
|
12 |
+
x_camp_extraction_chain = LLMChain(llm=llm, prompt=x_camp_promptTemp)
|
13 |
+
x_camp = x_camp_extraction_chain.run(x_camp_prompt)
|
14 |
+
|
15 |
+
return x_camp
|
x_retweet_commenting_generation.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def x_retweet_commenting_gen(tweet, tone_of_voice, llm):
|
5 |
+
|
6 |
+
x_retweet_comment_prompt = f"I'm planning to retweet the following tweet:\n“{tweet}”\nConstruct 5 varied comments I could append to this retweet. tone should be {tone_of_voice}"
|
7 |
+
x_retweet_comment_promptTemp = PromptTemplate(
|
8 |
+
input_variables=["text_input"],
|
9 |
+
template="You are Specilaized Twitter Copywriter\n{text_input}\n\nRetweet Comment:")
|
10 |
+
x_retweet_comment_extraction_chain = LLMChain(llm=llm, prompt=x_retweet_comment_promptTemp)
|
11 |
+
x_retweet_comment = x_retweet_comment_extraction_chain.run(x_retweet_comment_prompt)
|
12 |
+
|
13 |
+
return x_retweet_comment
|
14 |
+
|
15 |
+
|
x_thread_generation.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def X_thread_gen(topic, num_tweets, tone_of_voice, llm):
|
5 |
+
|
6 |
+
X_thread_prompt = f"Write a an engagging Twitter Thread on '{topic}' consists of {num_tweets} tweets. Tone should be {tone_of_voice}"
|
7 |
+
|
8 |
+
X_thread_promptTemp = PromptTemplate(
|
9 |
+
input_variables=["text_input"],
|
10 |
+
template="You are a Twitter Content Creator:\n{text_input}\n\nTwitter Thread:")
|
11 |
+
|
12 |
+
X_thread_extraction_chain = LLMChain(llm=llm, prompt=X_thread_promptTemp)
|
13 |
+
X_thread = X_thread_extraction_chain.run(X_thread_prompt)
|
14 |
+
|
15 |
+
return X_thread
|
16 |
+
|
17 |
+
|
18 |
+
def X_thread_gen_intro(topic, thread, tone_of_voice, llm):
|
19 |
+
|
20 |
+
X_thread_prompt = f"Write a an engagging and attractive Introduction head tweet for my twitter thread on {topic}. here is my twitter thread:\n{thread}\nTone should be {tone_of_voice}"
|
21 |
+
|
22 |
+
X_thread_promptTemp = PromptTemplate(
|
23 |
+
input_variables=["text_input"],
|
24 |
+
template="You are a Twitter Content Creator:\n{text_input}\n\nTweet:")
|
25 |
+
|
26 |
+
X_thread_extraction_chain = LLMChain(llm=llm, prompt=X_thread_promptTemp)
|
27 |
+
X_thread = X_thread_extraction_chain.run(X_thread_prompt)
|
28 |
+
|
29 |
+
return X_thread
|
youtube_ideas.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
|
4 |
+
def landing_page_gen(brand, customer, action, llm):
|
5 |
+
|
6 |
+
landing_page_prompt = f"Generate a YouTube video idea that will provide a behind-the-scenes look at my {brand} and persuade my {customer} to take {action} with a sense of authenticity and relatability."
|
7 |
+
landing_page_promptTemp = PromptTemplate(
|
8 |
+
input_variables=["text_input"],
|
9 |
+
template="{text_input}")
|
10 |
+
landing_page_extraction_chain = LLMChain(llm=llm, prompt=landing_page_promptTemp)
|
11 |
+
landing_page = landing_page_extraction_chain.run(landing_page_prompt)
|
12 |
+
|
13 |
+
return landing_page
|