Update pages/linkedin_extractor.py
Browse files- pages/linkedin_extractor.py +237 -172
pages/linkedin_extractor.py
CHANGED
|
@@ -20,36 +20,56 @@ st.set_page_config(
|
|
| 20 |
)
|
| 21 |
|
| 22 |
def get_embeddings():
|
| 23 |
-
"""Initialize embeddings with
|
| 24 |
try:
|
| 25 |
-
# Try multiple embedding models
|
| 26 |
model_options = [
|
| 27 |
-
"sentence-transformers/all-MiniLM-L6-v2",
|
| 28 |
-
"sentence-transformers/
|
| 29 |
-
"
|
|
|
|
| 30 |
]
|
| 31 |
|
| 32 |
for model_name in model_options:
|
| 33 |
try:
|
|
|
|
| 34 |
embeddings = HuggingFaceEmbeddings(
|
| 35 |
model_name=model_name,
|
| 36 |
model_kwargs={'device': 'cpu'},
|
| 37 |
-
encode_kwargs={
|
|
|
|
|
|
|
|
|
|
| 38 |
)
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
except Exception as e:
|
|
|
|
| 42 |
continue
|
| 43 |
-
|
| 44 |
-
st.error("β All embedding models failed to load")
|
| 45 |
-
return None
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
except Exception as e:
|
| 48 |
st.error(f"β Embeddings error: {e}")
|
| 49 |
return None
|
| 50 |
|
| 51 |
def get_llm():
|
| 52 |
-
"""Initialize Mistral 7B LLM
|
| 53 |
try:
|
| 54 |
api_key = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
| 55 |
if not api_key:
|
|
@@ -65,24 +85,79 @@ def get_llm():
|
|
| 65 |
""")
|
| 66 |
return None
|
| 67 |
|
| 68 |
-
#
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
"
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
except Exception as e:
|
| 83 |
st.error(f"β AI Model error: {e}")
|
| 84 |
return None
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
def extract_linkedin_data(url, data_type):
|
| 87 |
"""Extract data from LinkedIn URLs"""
|
| 88 |
try:
|
|
@@ -154,64 +229,75 @@ def extract_linkedin_data(url, data_type):
|
|
| 154 |
return {"error": f"Extraction error: {str(e)}", "status": "error"}
|
| 155 |
|
| 156 |
def process_extracted_data(extracted_data):
|
| 157 |
-
"""Process extracted data for AI analysis"""
|
| 158 |
if not extracted_data or extracted_data.get("status") != "success":
|
| 159 |
return None, []
|
| 160 |
|
| 161 |
-
page_info = extracted_data['page_info']
|
| 162 |
-
content_blocks = extracted_data['content_blocks']
|
| 163 |
-
|
| 164 |
-
# Structure the data for AI
|
| 165 |
-
all_text = f"LINKEDIN DATA ANALYSIS REPORT\n"
|
| 166 |
-
all_text += "=" * 70 + "\n\n"
|
| 167 |
-
all_text += f"π PAGE INFORMATION:\n"
|
| 168 |
-
all_text += f"Title: {page_info['title']}\n"
|
| 169 |
-
all_text += f"URL: {page_info['url']}\n"
|
| 170 |
-
all_text += f"Type: {extracted_data['data_type'].upper()}\n"
|
| 171 |
-
all_text += f"Extracted: {extracted_data['extraction_time']}\n"
|
| 172 |
-
all_text += f"Response Code: {page_info['response_code']}\n"
|
| 173 |
-
all_text += f"Content Length: {page_info['content_length']} characters\n\n"
|
| 174 |
-
|
| 175 |
-
all_text += f"π CONTENT ANALYSIS:\n"
|
| 176 |
-
all_text += f"Total Content Blocks: {len(content_blocks)}\n\n"
|
| 177 |
-
|
| 178 |
-
# Add content blocks
|
| 179 |
-
for i, block in enumerate(content_blocks[:20]):
|
| 180 |
-
all_text += f"--- CONTENT BLOCK {i+1} ---\n"
|
| 181 |
-
all_text += f"Words: {len(block.split())} | Characters: {len(block)}\n"
|
| 182 |
-
all_text += f"Content: {block}\n\n"
|
| 183 |
-
|
| 184 |
-
all_text += "=" * 70 + "\n"
|
| 185 |
-
all_text += "END OF EXTRACTION REPORT"
|
| 186 |
-
|
| 187 |
-
# Split into chunks
|
| 188 |
-
splitter = CharacterTextSplitter(
|
| 189 |
-
separator="\n",
|
| 190 |
-
chunk_size=1000,
|
| 191 |
-
chunk_overlap=200,
|
| 192 |
-
length_function=len
|
| 193 |
-
)
|
| 194 |
-
|
| 195 |
-
chunks = splitter.split_text(all_text)
|
| 196 |
-
documents = [Document(page_content=chunk) for chunk in chunks]
|
| 197 |
-
|
| 198 |
-
# Create vector store
|
| 199 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
embeddings = get_embeddings()
|
| 201 |
if embeddings is None:
|
| 202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
vectorstore = FAISS.from_documents(documents, embeddings)
|
| 204 |
return vectorstore, chunks
|
|
|
|
| 205 |
except Exception as e:
|
| 206 |
-
st.error(f"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
return None, []
|
| 208 |
|
| 209 |
def create_chatbot(vectorstore):
|
| 210 |
-
"""Create conversational chatbot with
|
| 211 |
try:
|
| 212 |
llm = get_llm()
|
| 213 |
if llm is None:
|
| 214 |
-
|
|
|
|
| 215 |
|
| 216 |
memory = ConversationBufferMemory(
|
| 217 |
memory_key="chat_history",
|
|
@@ -221,22 +307,20 @@ def create_chatbot(vectorstore):
|
|
| 221 |
|
| 222 |
chain = ConversationalRetrievalChain.from_llm(
|
| 223 |
llm=llm,
|
| 224 |
-
retriever=vectorstore.as_retriever(search_kwargs={"k":
|
| 225 |
memory=memory,
|
| 226 |
return_source_documents=True,
|
| 227 |
output_key="answer"
|
| 228 |
)
|
| 229 |
return chain
|
| 230 |
except Exception as e:
|
| 231 |
-
st.error(f"
|
| 232 |
-
return
|
| 233 |
|
| 234 |
def clear_chat_history():
|
| 235 |
"""Clear chat history while keeping extracted data"""
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
st.session_state.chat_history = []
|
| 239 |
-
st.success("π Chat history cleared! Starting fresh conversation.")
|
| 240 |
|
| 241 |
def display_metrics(extracted_data):
|
| 242 |
"""Display extraction metrics"""
|
|
@@ -336,16 +420,24 @@ def main():
|
|
| 336 |
st.session_state.extracted_data = extracted_data
|
| 337 |
st.session_state.current_url = url_to_use
|
| 338 |
|
| 339 |
-
# Process for AI
|
| 340 |
-
|
| 341 |
-
if
|
|
|
|
| 342 |
st.session_state.vectorstore = vectorstore
|
| 343 |
-
|
|
|
|
|
|
|
|
|
|
| 344 |
st.session_state.chat_history = []
|
| 345 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 346 |
st.balloons()
|
| 347 |
else:
|
| 348 |
-
st.error("β Failed to process data for
|
| 349 |
else:
|
| 350 |
error_msg = extracted_data.get("error", "Unknown error occurred")
|
| 351 |
st.error(f"β Extraction failed: {error_msg}")
|
|
@@ -359,12 +451,12 @@ def main():
|
|
| 359 |
if st.button("ποΈ Clear Chat History", type="secondary", use_container_width=True):
|
| 360 |
clear_chat_history()
|
| 361 |
|
| 362 |
-
# Debug info
|
| 363 |
if st.checkbox("π§ Show Debug Info", False):
|
| 364 |
st.markdown("### Debug Information")
|
| 365 |
st.write("Extracted Data:", st.session_state.extracted_data is not None)
|
| 366 |
-
st.write("Vectorstore:", st.session_state.vectorstore
|
| 367 |
-
st.write("Chatbot:", st.session_state.chatbot
|
| 368 |
st.write("Chat History Length:", len(st.session_state.chat_history))
|
| 369 |
st.write("Processing:", st.session_state.processing)
|
| 370 |
|
|
@@ -408,8 +500,6 @@ def main():
|
|
| 408 |
st.info("""
|
| 409 |
π **Welcome to LinkedIn AI Analyzer!**
|
| 410 |
|
| 411 |
-
**Powered by Mistral 7B AI**
|
| 412 |
-
|
| 413 |
**To get started:**
|
| 414 |
1. Select content type
|
| 415 |
2. Enter a LinkedIn URL or click a suggested company
|
|
@@ -421,132 +511,107 @@ def main():
|
|
| 421 |
- π’ Company Pages
|
| 422 |
- π Public Posts
|
| 423 |
|
| 424 |
-
**
|
| 425 |
-
-
|
| 426 |
-
-
|
|
|
|
| 427 |
- Data insights
|
| 428 |
-
- Content summarization
|
| 429 |
""")
|
| 430 |
|
| 431 |
with col2:
|
| 432 |
st.markdown("### π¬ AI Chat Analysis")
|
| 433 |
|
| 434 |
-
# Check if we have everything needed for chat
|
| 435 |
has_extracted_data = st.session_state.extracted_data and st.session_state.extracted_data.get("status") == "success"
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
# Initialize chatbot if not exists
|
| 440 |
-
if st.session_state.chatbot is None:
|
| 441 |
-
with st.spinner("π Initializing AI Chat..."):
|
| 442 |
-
st.session_state.chatbot = create_chatbot(st.session_state.vectorstore)
|
| 443 |
-
if st.session_state.chatbot:
|
| 444 |
-
st.success("β
AI Chat ready!")
|
| 445 |
-
else:
|
| 446 |
-
st.error("β Failed to initialize AI chat")
|
| 447 |
|
| 448 |
-
# Display chat
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
#
|
| 462 |
-
|
| 463 |
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 470 |
try:
|
| 471 |
response = st.session_state.chatbot.invoke({"question": user_input})
|
| 472 |
answer = response.get("answer", "I couldn't generate a response based on the available data.")
|
| 473 |
-
|
| 474 |
st.session_state.chat_history.append({"role": "assistant", "content": answer})
|
| 475 |
st.rerun()
|
| 476 |
except Exception as e:
|
| 477 |
-
error_msg = f"β Error
|
| 478 |
-
st.session_state.
|
|
|
|
| 479 |
st.rerun()
|
| 480 |
-
|
| 481 |
-
# Suggested questions - only show when no chat history
|
| 482 |
-
if len(st.session_state.chat_history) == 0:
|
| 483 |
-
st.markdown("#### π‘ Try asking:")
|
| 484 |
-
suggestions = [
|
| 485 |
-
"Summarize the main information from this LinkedIn page",
|
| 486 |
-
"What are the key highlights or achievements mentioned?",
|
| 487 |
-
"Analyze the professional focus and expertise",
|
| 488 |
-
"What insights can you extract from this content?",
|
| 489 |
-
"Provide a comprehensive overview of this profile"
|
| 490 |
-
]
|
| 491 |
-
|
| 492 |
-
for suggestion in suggestions:
|
| 493 |
-
if st.button(suggestion, key=f"suggest_{suggestion}", use_container_width=True):
|
| 494 |
-
st.info(f"π‘ Type this in the chat: '{suggestion}'")
|
| 495 |
|
| 496 |
-
|
| 497 |
-
st.
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 506 |
|
| 507 |
elif st.session_state.processing:
|
| 508 |
st.info("π Extracting and processing LinkedIn data...")
|
| 509 |
|
| 510 |
else:
|
| 511 |
-
st.info(""
|
| 512 |
-
π **Extract LinkedIn data to enable AI analysis**
|
| 513 |
-
|
| 514 |
-
Once data is extracted, you can:
|
| 515 |
-
- Ask questions about the content
|
| 516 |
-
- Get summaries and insights
|
| 517 |
-
- Analyze professional information
|
| 518 |
-
- Extract key achievements
|
| 519 |
-
- Discuss career highlights
|
| 520 |
-
""")
|
| 521 |
|
| 522 |
# Features section
|
| 523 |
st.markdown("---")
|
| 524 |
-
st.markdown("### π
|
| 525 |
|
| 526 |
feature_cols = st.columns(3)
|
| 527 |
|
| 528 |
with feature_cols[0]:
|
| 529 |
st.markdown("""
|
| 530 |
-
|
| 531 |
-
-
|
| 532 |
-
-
|
| 533 |
-
-
|
| 534 |
""")
|
| 535 |
|
| 536 |
with feature_cols[1]:
|
| 537 |
st.markdown("""
|
| 538 |
**π¬ Smart Chat**
|
| 539 |
-
-
|
| 540 |
-
-
|
| 541 |
-
-
|
| 542 |
""")
|
| 543 |
|
| 544 |
with feature_cols[2]:
|
| 545 |
st.markdown("""
|
| 546 |
-
**π
|
| 547 |
- Content summarization
|
| 548 |
- Pattern recognition
|
| 549 |
-
- Professional
|
| 550 |
""")
|
| 551 |
|
| 552 |
if __name__ == "__main__":
|
|
|
|
| 20 |
)
|
| 21 |
|
| 22 |
def get_embeddings():
|
| 23 |
+
"""Initialize embeddings with better fallback options"""
|
| 24 |
try:
|
| 25 |
+
# Try multiple embedding models with different approaches
|
| 26 |
model_options = [
|
| 27 |
+
"sentence-transformers/all-MiniLM-L6-v2",
|
| 28 |
+
"sentence-transformers/all-mpnet-base-v2",
|
| 29 |
+
"BAAI/bge-small-en-v1.5",
|
| 30 |
+
"sentence-transformers/paraphrase-MiniLM-L6-v2"
|
| 31 |
]
|
| 32 |
|
| 33 |
for model_name in model_options:
|
| 34 |
try:
|
| 35 |
+
st.info(f"π Trying to load: {model_name}")
|
| 36 |
embeddings = HuggingFaceEmbeddings(
|
| 37 |
model_name=model_name,
|
| 38 |
model_kwargs={'device': 'cpu'},
|
| 39 |
+
encode_kwargs={
|
| 40 |
+
'normalize_embeddings': True,
|
| 41 |
+
'batch_size': 32
|
| 42 |
+
}
|
| 43 |
)
|
| 44 |
+
# Test the embeddings
|
| 45 |
+
test_text = "Hello world"
|
| 46 |
+
test_embedding = embeddings.embed_query(test_text)
|
| 47 |
+
if test_embedding and len(test_embedding) > 0:
|
| 48 |
+
st.success(f"β
Loaded embeddings: {model_name.split('/')[-1]}")
|
| 49 |
+
return embeddings
|
| 50 |
except Exception as e:
|
| 51 |
+
st.warning(f"β οΈ Failed to load {model_name}: {str(e)}")
|
| 52 |
continue
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
+
# If all models fail, try a simpler approach
|
| 55 |
+
st.warning("π Trying fallback embedding method...")
|
| 56 |
+
try:
|
| 57 |
+
embeddings = HuggingFaceEmbeddings(
|
| 58 |
+
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
| 59 |
+
cache_folder="/tmp/embeddings"
|
| 60 |
+
)
|
| 61 |
+
st.success("β
Loaded fallback embeddings")
|
| 62 |
+
return embeddings
|
| 63 |
+
except Exception as e:
|
| 64 |
+
st.error(f"β Fallback also failed: {e}")
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
except Exception as e:
|
| 68 |
st.error(f"β Embeddings error: {e}")
|
| 69 |
return None
|
| 70 |
|
| 71 |
def get_llm():
|
| 72 |
+
"""Initialize Mistral 7B LLM with better error handling"""
|
| 73 |
try:
|
| 74 |
api_key = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
| 75 |
if not api_key:
|
|
|
|
| 85 |
""")
|
| 86 |
return None
|
| 87 |
|
| 88 |
+
# Try multiple models
|
| 89 |
+
model_options = [
|
| 90 |
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
| 91 |
+
"HuggingFaceH4/zephyr-7b-beta",
|
| 92 |
+
"google/flan-t5-large"
|
| 93 |
+
]
|
| 94 |
+
|
| 95 |
+
for model_id in model_options:
|
| 96 |
+
try:
|
| 97 |
+
st.info(f"π Trying to load: {model_id}")
|
| 98 |
+
llm = HuggingFaceHub(
|
| 99 |
+
repo_id=model_id,
|
| 100 |
+
huggingfacehub_api_token=api_key,
|
| 101 |
+
model_kwargs={
|
| 102 |
+
"temperature": 0.7,
|
| 103 |
+
"max_length": 2048,
|
| 104 |
+
"max_new_tokens": 512,
|
| 105 |
+
"top_p": 0.95,
|
| 106 |
+
"repetition_penalty": 1.1,
|
| 107 |
+
"do_sample": True
|
| 108 |
+
}
|
| 109 |
+
)
|
| 110 |
+
# Test the model
|
| 111 |
+
test_response = llm.invoke("Hello")
|
| 112 |
+
if test_response:
|
| 113 |
+
st.success(f"β
Loaded model: {model_id.split('/')[-1]}")
|
| 114 |
+
return llm
|
| 115 |
+
except Exception as e:
|
| 116 |
+
st.warning(f"β οΈ Failed to load {model_id}: {str(e)}")
|
| 117 |
+
continue
|
| 118 |
+
|
| 119 |
+
st.error("β All AI models failed to load")
|
| 120 |
+
return None
|
| 121 |
+
|
| 122 |
except Exception as e:
|
| 123 |
st.error(f"β AI Model error: {e}")
|
| 124 |
return None
|
| 125 |
|
| 126 |
+
def simple_chat_analysis(user_input, extracted_data):
|
| 127 |
+
"""Simple chat analysis without embeddings as fallback"""
|
| 128 |
+
try:
|
| 129 |
+
if not extracted_data:
|
| 130 |
+
return "No data available for analysis."
|
| 131 |
+
|
| 132 |
+
content_blocks = extracted_data.get('content_blocks', [])
|
| 133 |
+
page_info = extracted_data.get('page_info', {})
|
| 134 |
+
|
| 135 |
+
# Create context from extracted data
|
| 136 |
+
context = f"Page Title: {page_info.get('title', 'N/A')}\n"
|
| 137 |
+
context += f"Content Type: {extracted_data.get('data_type', 'N/A')}\n"
|
| 138 |
+
context += f"Extracted Content:\n"
|
| 139 |
+
|
| 140 |
+
for i, block in enumerate(content_blocks[:5]): # Limit context
|
| 141 |
+
context += f"Block {i+1}: {block}\n"
|
| 142 |
+
|
| 143 |
+
# Simple rule-based responses
|
| 144 |
+
user_input_lower = user_input.lower()
|
| 145 |
+
|
| 146 |
+
if any(word in user_input_lower for word in ['summary', 'summarize', 'overview']):
|
| 147 |
+
return f"Based on the LinkedIn data, here's a summary:\n\nTitle: {page_info.get('title', 'N/A')}\nContent Type: {extracted_data.get('data_type', 'N/A')}\nTotal Content Blocks: {len(content_blocks)}\nKey Content: {content_blocks[0][:200] if content_blocks else 'No content available'}..."
|
| 148 |
+
|
| 149 |
+
elif any(word in user_input_lower for word in ['skills', 'expertise', 'technologies']):
|
| 150 |
+
return "I can analyze the content for skills and expertise. The extracted data shows professional information that can be reviewed for specific skills mentioned in the content blocks."
|
| 151 |
+
|
| 152 |
+
elif any(word in user_input_lower for word in ['experience', 'background', 'career']):
|
| 153 |
+
return "The LinkedIn data contains professional experience information. I can help you analyze the career background and work history mentioned in the profile."
|
| 154 |
+
|
| 155 |
+
else:
|
| 156 |
+
return f"I've analyzed the LinkedIn data. {page_info.get('title', 'The profile')} contains {len(content_blocks)} content blocks with professional information. You can ask me about summaries, skills, experience, or specific details from the extracted content."
|
| 157 |
+
|
| 158 |
+
except Exception as e:
|
| 159 |
+
return f"Analysis error: {str(e)}"
|
| 160 |
+
|
| 161 |
def extract_linkedin_data(url, data_type):
|
| 162 |
"""Extract data from LinkedIn URLs"""
|
| 163 |
try:
|
|
|
|
| 229 |
return {"error": f"Extraction error: {str(e)}", "status": "error"}
|
| 230 |
|
| 231 |
def process_extracted_data(extracted_data):
|
| 232 |
+
"""Process extracted data for AI analysis with fallbacks"""
|
| 233 |
if not extracted_data or extracted_data.get("status") != "success":
|
| 234 |
return None, []
|
| 235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
try:
|
| 237 |
+
page_info = extracted_data['page_info']
|
| 238 |
+
content_blocks = extracted_data['content_blocks']
|
| 239 |
+
|
| 240 |
+
# Structure the data for AI
|
| 241 |
+
all_text = f"LINKEDIN DATA ANALYSIS REPORT\n"
|
| 242 |
+
all_text += "=" * 70 + "\n\n"
|
| 243 |
+
all_text += f"π PAGE INFORMATION:\n"
|
| 244 |
+
all_text += f"Title: {page_info['title']}\n"
|
| 245 |
+
all_text += f"URL: {page_info['url']}\n"
|
| 246 |
+
all_text += f"Type: {extracted_data['data_type'].upper()}\n"
|
| 247 |
+
all_text += f"Extracted: {extracted_data['extraction_time']}\n"
|
| 248 |
+
all_text += f"Response Code: {page_info['response_code']}\n"
|
| 249 |
+
all_text += f"Content Length: {page_info['content_length']} characters\n\n"
|
| 250 |
+
|
| 251 |
+
all_text += f"π CONTENT ANALYSIS:\n"
|
| 252 |
+
all_text += f"Total Content Blocks: {len(content_blocks)}\n\n"
|
| 253 |
+
|
| 254 |
+
# Add content blocks
|
| 255 |
+
for i, block in enumerate(content_blocks[:10]): # Limit for performance
|
| 256 |
+
all_text += f"--- CONTENT BLOCK {i+1} ---\n"
|
| 257 |
+
all_text += f"Words: {len(block.split())} | Characters: {len(block)}\n"
|
| 258 |
+
all_text += f"Content: {block}\n\n"
|
| 259 |
+
|
| 260 |
+
all_text += "=" * 70 + "\n"
|
| 261 |
+
all_text += "END OF EXTRACTION REPORT"
|
| 262 |
+
|
| 263 |
+
# Try to create vector store
|
| 264 |
embeddings = get_embeddings()
|
| 265 |
if embeddings is None:
|
| 266 |
+
st.warning("β οΈ Using simple text processing (embeddings unavailable)")
|
| 267 |
+
# Return simple document structure
|
| 268 |
+
documents = [Document(page_content=all_text)]
|
| 269 |
+
return "simple", documents
|
| 270 |
+
|
| 271 |
+
# Split into chunks
|
| 272 |
+
splitter = CharacterTextSplitter(
|
| 273 |
+
separator="\n",
|
| 274 |
+
chunk_size=800, # Smaller for better performance
|
| 275 |
+
chunk_overlap=100,
|
| 276 |
+
length_function=len
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
chunks = splitter.split_text(all_text)
|
| 280 |
+
documents = [Document(page_content=chunk) for chunk in chunks]
|
| 281 |
+
|
| 282 |
+
# Create vector store
|
| 283 |
vectorstore = FAISS.from_documents(documents, embeddings)
|
| 284 |
return vectorstore, chunks
|
| 285 |
+
|
| 286 |
except Exception as e:
|
| 287 |
+
st.error(f"β Processing failed: {e}")
|
| 288 |
+
# Fallback: return simple structure
|
| 289 |
+
if extracted_data:
|
| 290 |
+
simple_doc = Document(page_content=f"LinkedIn Data: {extracted_data['page_info']['title']}")
|
| 291 |
+
return "simple", [simple_doc]
|
| 292 |
return None, []
|
| 293 |
|
| 294 |
def create_chatbot(vectorstore):
|
| 295 |
+
"""Create conversational chatbot with fallbacks"""
|
| 296 |
try:
|
| 297 |
llm = get_llm()
|
| 298 |
if llm is None:
|
| 299 |
+
st.warning("β οΈ Using simple chat analysis (AI model unavailable)")
|
| 300 |
+
return "simple"
|
| 301 |
|
| 302 |
memory = ConversationBufferMemory(
|
| 303 |
memory_key="chat_history",
|
|
|
|
| 307 |
|
| 308 |
chain = ConversationalRetrievalChain.from_llm(
|
| 309 |
llm=llm,
|
| 310 |
+
retriever=vectorstore.as_retriever(search_kwargs={"k": 3}),
|
| 311 |
memory=memory,
|
| 312 |
return_source_documents=True,
|
| 313 |
output_key="answer"
|
| 314 |
)
|
| 315 |
return chain
|
| 316 |
except Exception as e:
|
| 317 |
+
st.error(f"β Chatbot creation failed: {str(e)}")
|
| 318 |
+
return "simple"
|
| 319 |
|
| 320 |
def clear_chat_history():
|
| 321 |
"""Clear chat history while keeping extracted data"""
|
| 322 |
+
st.session_state.chat_history = []
|
| 323 |
+
st.success("π Chat history cleared! Starting fresh conversation.")
|
|
|
|
|
|
|
| 324 |
|
| 325 |
def display_metrics(extracted_data):
|
| 326 |
"""Display extraction metrics"""
|
|
|
|
| 420 |
st.session_state.extracted_data = extracted_data
|
| 421 |
st.session_state.current_url = url_to_use
|
| 422 |
|
| 423 |
+
# Process for AI (with fallbacks)
|
| 424 |
+
result = process_extracted_data(extracted_data)
|
| 425 |
+
if result:
|
| 426 |
+
vectorstore, chunks = result
|
| 427 |
st.session_state.vectorstore = vectorstore
|
| 428 |
+
|
| 429 |
+
# Create chatbot (with fallbacks)
|
| 430 |
+
chatbot = create_chatbot(vectorstore)
|
| 431 |
+
st.session_state.chatbot = chatbot
|
| 432 |
st.session_state.chat_history = []
|
| 433 |
+
|
| 434 |
+
if chatbot == "simple":
|
| 435 |
+
st.warning("β οΈ Using simple chat mode (AI features limited)")
|
| 436 |
+
else:
|
| 437 |
+
st.success(f"β
AI analysis ready! Processed {len(chunks) if chunks else 1} content chunks.")
|
| 438 |
st.balloons()
|
| 439 |
else:
|
| 440 |
+
st.error("β Failed to process data for analysis")
|
| 441 |
else:
|
| 442 |
error_msg = extracted_data.get("error", "Unknown error occurred")
|
| 443 |
st.error(f"β Extraction failed: {error_msg}")
|
|
|
|
| 451 |
if st.button("ποΈ Clear Chat History", type="secondary", use_container_width=True):
|
| 452 |
clear_chat_history()
|
| 453 |
|
| 454 |
+
# Debug info
|
| 455 |
if st.checkbox("π§ Show Debug Info", False):
|
| 456 |
st.markdown("### Debug Information")
|
| 457 |
st.write("Extracted Data:", st.session_state.extracted_data is not None)
|
| 458 |
+
st.write("Vectorstore Type:", type(st.session_state.vectorstore).__name__ if st.session_state.vectorstore else "None")
|
| 459 |
+
st.write("Chatbot Type:", "simple" if st.session_state.chatbot == "simple" else type(st.session_state.chatbot).__name__ if st.session_state.chatbot else "None")
|
| 460 |
st.write("Chat History Length:", len(st.session_state.chat_history))
|
| 461 |
st.write("Processing:", st.session_state.processing)
|
| 462 |
|
|
|
|
| 500 |
st.info("""
|
| 501 |
π **Welcome to LinkedIn AI Analyzer!**
|
| 502 |
|
|
|
|
|
|
|
| 503 |
**To get started:**
|
| 504 |
1. Select content type
|
| 505 |
2. Enter a LinkedIn URL or click a suggested company
|
|
|
|
| 511 |
- π’ Company Pages
|
| 512 |
- π Public Posts
|
| 513 |
|
| 514 |
+
**Features:**
|
| 515 |
+
- Content extraction
|
| 516 |
+
- Basic analysis
|
| 517 |
+
- Interactive chat
|
| 518 |
- Data insights
|
|
|
|
| 519 |
""")
|
| 520 |
|
| 521 |
with col2:
|
| 522 |
st.markdown("### π¬ AI Chat Analysis")
|
| 523 |
|
|
|
|
| 524 |
has_extracted_data = st.session_state.extracted_data and st.session_state.extracted_data.get("status") == "success"
|
| 525 |
+
|
| 526 |
+
if has_extracted_data:
|
| 527 |
+
st.success("π¬ Chat ready! Ask questions about the LinkedIn data.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 528 |
|
| 529 |
+
# Display chat history
|
| 530 |
+
for chat in st.session_state.chat_history:
|
| 531 |
+
if chat["role"] == "user":
|
| 532 |
+
with st.chat_message("user"):
|
| 533 |
+
st.write(chat['content'])
|
| 534 |
+
elif chat["role"] == "assistant":
|
| 535 |
+
with st.chat_message("assistant"):
|
| 536 |
+
st.write(chat['content'])
|
| 537 |
+
|
| 538 |
+
# Chat input
|
| 539 |
+
user_input = st.chat_input("Ask about the LinkedIn data...")
|
| 540 |
+
|
| 541 |
+
if user_input:
|
| 542 |
+
# Add user message to history
|
| 543 |
+
st.session_state.chat_history.append({"role": "user", "content": user_input})
|
| 544 |
|
| 545 |
+
# Generate response based on available capabilities
|
| 546 |
+
if st.session_state.chatbot == "simple" or st.session_state.chatbot is None:
|
| 547 |
+
# Use simple analysis
|
| 548 |
+
with st.spinner("π€ Analyzing..."):
|
| 549 |
+
response = simple_chat_analysis(user_input, st.session_state.extracted_data)
|
| 550 |
+
st.session_state.chat_history.append({"role": "assistant", "content": response})
|
| 551 |
+
st.rerun()
|
| 552 |
+
else:
|
| 553 |
+
# Use AI chatbot
|
| 554 |
+
with st.spinner("π€ AI is analyzing..."):
|
| 555 |
try:
|
| 556 |
response = st.session_state.chatbot.invoke({"question": user_input})
|
| 557 |
answer = response.get("answer", "I couldn't generate a response based on the available data.")
|
|
|
|
| 558 |
st.session_state.chat_history.append({"role": "assistant", "content": answer})
|
| 559 |
st.rerun()
|
| 560 |
except Exception as e:
|
| 561 |
+
error_msg = f"β AI Error: {str(e)}. Using simple analysis."
|
| 562 |
+
simple_response = simple_chat_analysis(user_input, st.session_state.extracted_data)
|
| 563 |
+
st.session_state.chat_history.append({"role": "assistant", "content": f"{error_msg}\n\n{simple_response}"})
|
| 564 |
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 565 |
|
| 566 |
+
# Suggested questions
|
| 567 |
+
if len(st.session_state.chat_history) == 0:
|
| 568 |
+
st.markdown("#### π‘ Try asking:")
|
| 569 |
+
suggestions = [
|
| 570 |
+
"Summarize the main information",
|
| 571 |
+
"What are the key highlights?",
|
| 572 |
+
"Analyze the professional focus",
|
| 573 |
+
"What insights can you extract?",
|
| 574 |
+
"Tell me about the experience"
|
| 575 |
+
]
|
| 576 |
+
|
| 577 |
+
for suggestion in suggestions:
|
| 578 |
+
if st.button(suggestion, key=f"suggest_{suggestion}", use_container_width=True):
|
| 579 |
+
st.info(f"π‘ Type in chat: '{suggestion}'")
|
| 580 |
|
| 581 |
elif st.session_state.processing:
|
| 582 |
st.info("π Extracting and processing LinkedIn data...")
|
| 583 |
|
| 584 |
else:
|
| 585 |
+
st.info("π Extract LinkedIn data to enable analysis")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 586 |
|
| 587 |
# Features section
|
| 588 |
st.markdown("---")
|
| 589 |
+
st.markdown("### π Analysis Features")
|
| 590 |
|
| 591 |
feature_cols = st.columns(3)
|
| 592 |
|
| 593 |
with feature_cols[0]:
|
| 594 |
st.markdown("""
|
| 595 |
+
**π Content Extraction**
|
| 596 |
+
- LinkedIn data scraping
|
| 597 |
+
- Text processing
|
| 598 |
+
- Content analysis
|
| 599 |
""")
|
| 600 |
|
| 601 |
with feature_cols[1]:
|
| 602 |
st.markdown("""
|
| 603 |
**π¬ Smart Chat**
|
| 604 |
+
- Interactive conversation
|
| 605 |
+
- Data-driven responses
|
| 606 |
+
- Context awareness
|
| 607 |
""")
|
| 608 |
|
| 609 |
with feature_cols[2]:
|
| 610 |
st.markdown("""
|
| 611 |
+
**π Insights**
|
| 612 |
- Content summarization
|
| 613 |
- Pattern recognition
|
| 614 |
+
- Professional analysis
|
| 615 |
""")
|
| 616 |
|
| 617 |
if __name__ == "__main__":
|