import streamlit as st from requests_html import HTMLSession import pandas as pd import langchain as lc import openai st.title("Cybersecurity Vulnerability Scanner & AI Analyzer") # URL input for scraping url = st.text_input("Enter the target URL:") if st.button("Scrape and Scan"): if url: st.write(f"Processing {url}...") # Initialize an HTML session session = HTMLSession() response = session.get(url) # Attempt to render JavaScript, if necessary if url: st.write(f"Processing {url}...") # Rest of the code... else: st.warning("Please enter a valid URL.") # Extract links, JavaScript files, and forms links = [link.attrs['href'] for link in response.html.find('a') if 'href' in link.attrs] js_files = [script.attrs['src'] for script in response.html.find('script') if 'src' in script.attrs] forms = [form.attrs.get('action', 'No action') for form in response.html.find('form')] # Display extracted data st.success("Scraping and scanning complete!") st.write("Links:", links) st.write("JavaScript Files:", js_files) st.write("Forms:", forms) else: st.warning("Please enter a valid URL.") # AI-based Analysis user_prompt = st.text_area("Enter an AI prompt for analysis:") if st.button("Analyze with AI"): if user_prompt: # Initialize LangChain/OpenAI LLM for analysis llm = lc.LLMChain(llm=lc.OpenAI()) full_prompt = f"{user_prompt}\nLinks:\n{links}\nJS Files:\n{js_files}\nForms:\n{forms}" ai_analysis = llm.run(full_prompt) st.write("AI Analysis:") st.write(ai_analysis) else: st.warning("Please provide an AI prompt for analysis.")