daniellefranca96 commited on
Commit
1c86ad8
1 Parent(s): bf0a86d

Add application file

Browse files
Files changed (3) hide show
  1. app.py +35 -0
  2. generate_text.py +78 -0
  3. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ from generate_text import GenerateStyleText
5
+
6
+
7
+ def process(model, key, repo, example, prompt):
8
+ set_key(key)
9
+ generate_text = GenerateStyleText(example=example, prompt=prompt)
10
+ if model == "HugginFaceHub":
11
+ model = repo
12
+ generate_text.set_imp_llm(model)
13
+ return generate_text.run()
14
+
15
+
16
+
17
+ def set_key(key):
18
+ os.environ['OPENAI_API_KEY'] = key
19
+ os.environ['HUGGINGFACEHUB_API_TOKEN'] = key
20
+ os.environ['ANTHROPIC_API_KEY'] = key
21
+
22
+
23
+ title = "StyleScribble Demo"
24
+ description = "This is a demo of StyleScribble the AI powered app that generates text in your writing style, " \
25
+ "you can learn more and sign-up for full launch here: https://stylescribble.fly.dev"
26
+
27
+ model_c = gr.Dropdown(choices=["GPT3", "GPT4", "Claude", "HugginFaceHub"], label="Model", value="GPT3")
28
+ key_c = gr.Textbox(label="API Key")
29
+ repo_c = gr.Textbox(label="HF Repo(if selected HugginFaceHub as model)")
30
+ example_c = gr.Textbox(label="Example Writing:", lines=15)
31
+ prompt_c = gr.Textbox(label="Prompt:", lines=5)
32
+ output = gr.Textbox(label="Generated Text:", lines=39)
33
+ demo = gr.Interface(process, [model_c, key_c, repo_c, example_c, prompt_c], output, title=title, description=description)
34
+
35
+ demo.launch()
generate_text.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.base_language import BaseLanguageModel
2
+ from langchain.chains import LLMChain, SequentialChain
3
+ from langchain.chat_models import ChatAnthropic
4
+ from langchain.chat_models import ChatOpenAI
5
+ from langchain.llms import HuggingFaceHub
6
+ from langchain.prompts import (
7
+ PromptTemplate,
8
+ ChatPromptTemplate,
9
+ SystemMessagePromptTemplate,
10
+ HumanMessagePromptTemplate,
11
+ )
12
+
13
+
14
+ class GenerateStyleText:
15
+ example: str
16
+ prompt: str
17
+ llm: BaseLanguageModel
18
+
19
+ def __init__(self, example=None, prompt=None, llm=None):
20
+ self.example = example
21
+ self.prompt = prompt
22
+ self.llm = llm
23
+
24
+ def set_imp_llm(self, model):
25
+ if model == 'GPT3':
26
+ self.llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k")
27
+ elif model == "GPT4":
28
+ self.llm = ChatOpenAI(model_name="gpt-4")
29
+ elif model == "Claude":
30
+ self.llm = ChatAnthropic()
31
+ else:
32
+ self.llm = HuggingFaceHub(repo_id=model)
33
+
34
+ def run(self):
35
+ return self.process()
36
+
37
+ def process(self):
38
+ seq_chain = SequentialChain(
39
+ chains=[self.get_extract_tone_chain(), self.get_generate_text_chain(self.prompt),
40
+ self.get_apply_style_chain()],
41
+ input_variables=["text"], verbose=True)
42
+ result = seq_chain({'text': self.example, "style": ""})
43
+ return str(result.get('result'))
44
+
45
+ def create_chain(self, chat_prompt, output_key):
46
+ return LLMChain(llm=self.llm,
47
+ prompt=chat_prompt, output_key=output_key)
48
+
49
+ def get_extract_tone_chain(self):
50
+ template = """Based on the tone and writing style in the seed text, create a style guide for a blog or
51
+ publication that captures the essence of the seed’s tone. Emphasize engaging techniques that help readers
52
+ feel connected to the content.
53
+ """
54
+ system_message_prompt = SystemMessagePromptTemplate.from_template(template)
55
+ human_template = "{text}"
56
+ human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
57
+ chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
58
+
59
+ return self.create_chain(chat_prompt, "style")
60
+
61
+ def get_generate_text_chain(self, prompt):
62
+ template = """Generate a text following the user_request(use same language of the request):
63
+ {user_request}
64
+ """.replace("{user_request}", prompt)
65
+ return self.create_chain(PromptTemplate.from_template(template),
66
+ "generated_text")
67
+
68
+ def get_apply_style_chain(self):
69
+ template = """STYLE:
70
+ {style}
71
+ REWRITE THE TEXT BELLOW APPLYING THE STYLE ABOVE(use same language of the request),
72
+ ONLY GENERATE NEW TEXT BASED ON THE STYLE CONTEXT, DO NOT COPY STYLE EXACT PARTS:
73
+ {generated_text}
74
+ """
75
+
76
+ prompt = PromptTemplate.from_template(template=template)
77
+ prompt.partial(style="")
78
+ return self.create_chain(prompt, "result")
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio==3.36.1
2
+ langchain==0.0.231