gathnex commited on
Commit
6fa9e8b
β€’
1 Parent(s): 18c7e6b

Upload 4 files

Browse files
Files changed (4) hide show
  1. Linkedin_post.txt +150 -0
  2. app.txt +65 -0
  3. llm_automation.txt +73 -0
  4. requirements.txt +6 -0
Linkedin_post.txt ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re, requests, json, os, openai
2
+ from bs4 import BeautifulSoup
3
+ from openai import OpenAI
4
+
5
+ class LinkedinAutomate:
6
+ def __init__(self, access_token, medium_url, openai_api):
7
+ self.access_token = access_token
8
+ self.openai_api = openai_api
9
+ #self.yt_url = yt_url
10
+ self.medium_url = medium_url
11
+ self.python_group_list = []
12
+ self.headers = {
13
+ 'Authorization': f'Bearer {self.access_token}'
14
+ }
15
+
16
+ def get_page_content(self):
17
+ response = requests.get(self.medium_url)
18
+ soup = BeautifulSoup(response.text, 'html.parser')
19
+ for script in soup(["script", "style"]):
20
+ script.extract()
21
+ text = soup.get_text()
22
+ text = '\n'.join(line.strip() for line in text.split('\n'))
23
+ text = '\n'.join(line for line in text.split('\n') if line)
24
+ return text
25
+
26
+ def get_title_description(self):
27
+ def extract_title_and_description(input_text):
28
+ title_pattern = r"Title:(.+?)(?=Description:)"
29
+ description_pattern = r"Description:(.+)"
30
+ title_match = re.search(title_pattern, input_text, re.DOTALL)
31
+ description_match = re.search(description_pattern, input_text, re.DOTALL)
32
+ if title_match and description_match:
33
+ title = title_match.group(1).strip()
34
+ description = description_match.group(1).strip()
35
+ return title, description
36
+ else:
37
+ return None, None
38
+ x = self.get_page_content()
39
+ client = OpenAI(api_key = self.openai_api)
40
+ DEFAULT_SYSTEM_PROMPT = "You are a content title and description generator. Your task is to create compelling and engaging titles for various types of content, such as articles, blogs, videos, and products. Additionally, you are responsible for generating concise and informative descriptions that capture the essence of the content. Focus on creating attention-grabbing titles that pique the interest of the audience and descriptions that provide a clear overview of the content's key points. If additional context is needed, ask for clarification to generate more accurate titles and descriptions. Your goal is to assist users in creating captivating and informative content titles and descriptions."
41
+ response = client.chat.completions.create(
42
+ model= "gpt-3.5-turbo",
43
+ messages=[
44
+ {f"role": "system", "content": DEFAULT_SYSTEM_PROMPT},
45
+ {f"role": "user", "content": "Content :'" + x + "'. Create one title and description of the content and no other content"},
46
+ ]
47
+ )
48
+ mod_output = response.choices[0].message.content
49
+ title, description = extract_title_and_description(mod_output)
50
+ return title, description
51
+
52
+ def common_api_call_part(self, feed_type = "feed", group_id = None):
53
+ x, y = self.get_title_description()
54
+ payload_dict = {
55
+ "author": f"urn:li:person:{self.user_id}",
56
+ "lifecycleState": "PUBLISHED",
57
+ "specificContent": {
58
+ "com.linkedin.ugc.ShareContent": {
59
+ "shareCommentary": {
60
+ "text": y
61
+ },
62
+ "shareMediaCategory": "ARTICLE",
63
+ "media": [
64
+ {
65
+ "status": "READY",
66
+ "description": {
67
+ "text": y
68
+ },
69
+ "originalUrl": self.medium_url,
70
+ "title": {
71
+ "text": x
72
+ },
73
+ "thumbnails": [
74
+ {
75
+ "url": self.extract_medium_thumbnail()
76
+ }
77
+ ]
78
+ }
79
+ ]
80
+ }
81
+ },
82
+ "visibility": {
83
+ "com.linkedin.ugc.MemberNetworkVisibility": "PUBLIC" if feed_type == "feed" else "CONTAINER"
84
+ }
85
+ }
86
+ if feed_type == "group":
87
+ payload_dict["containerEntity"] = f"urn:li:group:{group_id}"
88
+
89
+ return json.dumps(payload_dict)
90
+
91
+ #Extract the thumbnail of youtube video
92
+ def extract_thumbnail_url_from_YT_video_url(self):
93
+ exp = "^.*((youtu.be\/)|(v\/)|(\/u\/\w\/)|(embed\/)|(watch\?))\??v?=?([^#&?]*).*"
94
+ s = re.findall(exp,self.yt_url)[0][-1]
95
+ return f"https://i.ytimg.com/vi/{s}/maxresdefault.jpg"
96
+
97
+
98
+ #Extract the thumbnail of medium blog
99
+ def fetch_blog_html(self, url):
100
+ response = requests.get(url)
101
+ if response.status_code == 200:
102
+ return response.text
103
+ else:
104
+ return None
105
+
106
+ def extract_medium_thumbnail(self):
107
+ def extract_thumbnail_url_from_medium_blog(blog_html):
108
+ soup = BeautifulSoup(blog_html, 'html.parser')
109
+ thumbnail_meta_tag = soup.find('meta', property='og:image')
110
+ if thumbnail_meta_tag:
111
+ thumbnail_url = thumbnail_meta_tag['content']
112
+ return thumbnail_url
113
+ return None
114
+
115
+ blog_html = self.fetch_blog_html(self.medium_url)
116
+ if blog_html:
117
+ thumbnail_url = extract_thumbnail_url_from_medium_blog(blog_html)
118
+ return thumbnail_url
119
+ else:
120
+ return None
121
+ def get_user_id(self):
122
+ url = "https://api.linkedin.com/v2/userinfo"
123
+ response = requests.request("GET", url, headers=self.headers)
124
+ jsonData = json.loads(response.text)
125
+ return jsonData["sub"]
126
+
127
+ def feed_post(self):
128
+ url = "https://api.linkedin.com/v2/ugcPosts"
129
+ payload = self.common_api_call_part()
130
+
131
+ return requests.request("POST", url, headers=self.headers, data=payload)
132
+
133
+ def group_post(self, group_id):
134
+ url = "https://api.linkedin.com/v2/ugcPosts"
135
+ payload = self.common_api_call_part(feed_type = "group", group_id=group_id)
136
+
137
+ return requests.request("POST", url, headers=self.headers, data=payload)
138
+
139
+
140
+ def main_func(self):
141
+ self.user_id = self.get_user_id()
142
+ #print(self.user_id)
143
+
144
+ feed_post = self.feed_post()
145
+ print(feed_post)
146
+ for group_id in self.python_group_list:
147
+ print(group_id)
148
+ group_post = self.group_post(group_id)
149
+ print(group_post)
150
+ return str(feed_post)
app.txt ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from Linkedin_post import LinkedinAutomate
2
+ from llm_automation import llm_auto
3
+ from openai import OpenAI
4
+ import gradio as gr
5
+
6
+ greety = """
7
+ As a derivate work of [Linkedin Automation System](https://medium.com/@gathnex) by Gathnex,
8
+ Follow us on [linkedin](https://www.linkedin.com/company/gathnex/) and [Github](https://github.com/gathnexadmin). a special thanks to [Gokul](https://www.linkedin.com/in/gokul-raja-1541b8226/) who make a significant contribution to this project.
9
+ """
10
+
11
+
12
+ def stream(prompt, g, OPENAI_API_KEY, access_token):
13
+ llm = llm_auto(prompt, OPENAI_API_KEY)
14
+ if llm.intent_indentifier() == "#Post":
15
+ url = llm.prompt_link_capturer()
16
+ res = LinkedinAutomate(access_token, url, OPENAI_API_KEY).main_func()
17
+ return llm.posted_or_not(res)
18
+ else:
19
+ return llm.normal_gpt()
20
+
21
+
22
+ css = """
23
+ h1 {
24
+ text-align: center;
25
+ }
26
+
27
+ #duplicate-button {
28
+ margin: auto;
29
+ color: white;
30
+ background: #1565c0;
31
+ border-radius: 100vh;
32
+ }
33
+
34
+ .contain {
35
+ max-width: 900px;
36
+ margin: auto;
37
+ padding-top: 1.5rem;
38
+ }
39
+
40
+ """
41
+
42
+ chat_interface = gr.ChatInterface(
43
+ fn=stream,
44
+ additional_inputs_accordion_name = "Credentials",
45
+ additional_inputs=[
46
+ gr.Textbox(label="OpenAI Key", lines=1),
47
+ gr.Textbox(label="Linkedin Access Token", lines=1),
48
+ ],
49
+ stop_btn=None,
50
+ examples=[
51
+ ["explain Large language model"],
52
+ ["what is quantum computing"]
53
+ ],
54
+ )
55
+
56
+ with gr.Blocks(css=css) as demo:
57
+ gr.HTML("<h1><center>Gathnex Linkedin Automation using Generative AI<h1><center>")
58
+ gr.HTML("<h3><center><a href='https://medium.com/@gathnex'>Gathnex AI</a>πŸ’¬<h3><center>")
59
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
60
+ chat_interface.render()
61
+ gr.Markdown(greety)
62
+
63
+ if __name__ == "__main__":
64
+ demo.queue(max_size=10).launch()
65
+
llm_automation.txt ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from openai import OpenAI
3
+
4
+ class llm_auto:
5
+
6
+ def __init__(self, prompt, openai_api):
7
+ self.prompt = prompt
8
+ self.openai_api = openai_api
9
+
10
+
11
+ def intent_indentifier(self):
12
+ model = "gpt-3.5-turbo"
13
+ client = OpenAI(api_key = self.openai_api)
14
+ DEFAULT_SYSTEM_PROMPT = '''You are a prompt classification assistant. Your role is to recognize prompts where the user intends to create and post content on LinkedIn. If the user clearly indicates the intent to 'post it on LinkedIn with a web URL,' classify it as #Post. If there are no indications of publishing or posting, classify the prompt as #Decline. For all other prompts without publishing intent, classify them as #Decline.
15
+ Your task is to distinguish prompts based on their intention to publish content on LinkedIn or not.
16
+ Sample prompts:
17
+ 1. create a content about this page 'www.xxx.com - #Decline
18
+ 2. create a content and post this is web url 'www.xxx.com' - #Post
19
+ 3. 'www.xxx.com' create a content to post on linkedin - #Decline
20
+ 4. create and publish the content about in this page 'www.xxx.com' - #Post
21
+ '''
22
+ response = client.chat.completions.create(
23
+ model= model,
24
+ messages=[
25
+ {f"role": "system", "content": DEFAULT_SYSTEM_PROMPT},
26
+ {f"role": "user", "content": "Classify the prompt in the following '#Post' or '#Decline' :" + self.prompt},
27
+ ]
28
+ )
29
+ return response.choices[0].message.content
30
+
31
+
32
+ def normal_gpt(self):
33
+ model = "gpt-3.5-turbo"
34
+ client = OpenAI(api_key = self.openai_api)
35
+ DEFAULT_SYSTEM_PROMPT = "You are Gathnex, an intelligent assistant dedicated to providing effective solutions. Your responses will include emojis to add a friendly and engaging touch. 😊 Analyze user queries and provide clear and practical answers, incorporating emojis to enhance the user experience. Focus on delivering solutions that are accurate, actionable, and helpful. If additional information is required for a more precise solution, politely ask clarifying questions. Your goal is to assist users by providing effective and reliable solutions to their queries. 🌟"
36
+ response = client.chat.completions.create(
37
+ model= model,
38
+ messages=[
39
+ {f"role": "system", "content": DEFAULT_SYSTEM_PROMPT},
40
+ {f"role": "user", "content": self.prompt},
41
+ ]
42
+ )
43
+ return response.choices[0].message.content
44
+
45
+
46
+ def prompt_link_capturer(self):
47
+ url_pattern = r'https?://\S+|www\.\S+'
48
+ urls = re.findall(url_pattern, self.prompt)
49
+ return urls[0]
50
+
51
+ def posted_or_not(self, y):
52
+ client = OpenAI(api_key = self.openai_api)
53
+ model = "gpt-3.5-turbo"
54
+ DEFAULT_SYSTEM_PROMPT = "your a assistance just inform the user the linkedin post."
55
+ if y == "<Response [201]>":
56
+ response1 = client.chat.completions.create(
57
+ model= model,
58
+ messages=[
59
+ {f"role": "system", "content": DEFAULT_SYSTEM_PROMPT},
60
+ {f"role": "user", "content": "Tell the user in friendly manner the linked post is succesefully posted with emoji's"},
61
+ ]
62
+ )
63
+ return response1.choices[0].message.content
64
+ else:
65
+ response2 = client.completions.create(
66
+ model= model,
67
+ messages=[
68
+ {f"role": "system", "content": DEFAULT_SYSTEM_PROMPT},
69
+ {f"role": "user", "content": "Tell the user in friendly manner the linked post is not succesefully posted and check the access tokens and hyperparamters correctly with sad emoji's"},
70
+ ]
71
+ )
72
+ return response2.choices[0].message.content
73
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openai
2
+ requests
3
+ uuid
4
+ gradio
5
+ beautifulsoup4
6
+