self-help / server.py
ayush-thakur02's picture
Upload 9 files
07aa026 verified
#!/usr/bin/env python3
"""
Simple HTTP server for serving the documentation hub locally.
This allows proper CORS handling and file serving for the markdown files.
"""
import http.server
import socketserver
import json
import os
import mimetypes
from urllib.parse import urlparse, parse_qs
from pathlib import Path
class DocumentationHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=str(Path(__file__).parent), **kwargs)
def end_headers(self):
# Add CORS headers
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
super().end_headers()
def do_GET(self):
# Handle the blogs directory listing API
if self.path.startswith('/api/blogs'):
self.handle_blogs_api()
elif self.path.startswith('/api/search'):
self.handle_search_api()
else:
# Serve static files normally
super().do_GET()
def handle_blogs_api(self):
"""API endpoint to list blog files and their metadata"""
try:
blogs_dir = Path(__file__).parent / 'blogs'
if not blogs_dir.exists():
self.send_error(404, "Blogs directory not found")
return
blogs = []
for md_file in blogs_dir.glob('*.md'):
# Get file stats
stat = md_file.stat()
size_kb = round(stat.st_size / 1024, 1)
# Read first few lines to extract title and excerpt
try:
with open(md_file, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
# Extract title (first # header)
title = md_file.stem.replace('-', ' ').title()
for line in lines:
if line.startswith('# '):
title = line[2:].strip()
break
# Extract excerpt (first paragraph after title)
excerpt = "No description available."
for i, line in enumerate(lines):
if line.startswith('# '):
# Look for first non-empty paragraph after title
for j in range(i + 1, min(i + 10, len(lines))):
if lines[j].strip() and not lines[j].startswith('#'):
excerpt = lines[j].strip()[:150]
if len(lines[j].strip()) > 150:
excerpt += "..."
break
break
blogs.append({
'filename': md_file.name,
'title': title,
'excerpt': excerpt,
'size': f"{size_kb} KB",
'lastModified': stat.st_mtime
})
except Exception as e:
print(f"Error reading {md_file}: {e}")
continue
# Sort by last modified (newest first)
blogs.sort(key=lambda x: x['lastModified'], reverse=True)
# Format dates
import datetime
for blog in blogs:
blog['lastModified'] = datetime.datetime.fromtimestamp(
blog['lastModified']
).strftime('%Y-%m-%d')
# Send JSON response
response = json.dumps(blogs, indent=2)
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(response.encode('utf-8'))
except Exception as e:
print(f"Error in blogs API: {e}")
self.send_error(500, f"Internal server error: {e}")
def handle_search_api(self):
"""API endpoint to search within blog content"""
try:
# Parse query parameters
parsed_url = urlparse(self.path)
query_params = parse_qs(parsed_url.query)
search_query = query_params.get('q', [''])[0].lower().strip()
if not search_query:
self.send_error(400, "Missing search query parameter 'q'")
return
blogs_dir = Path(__file__).parent / 'blogs'
if not blogs_dir.exists():
self.send_error(404, "Blogs directory not found")
return
search_results = []
for md_file in blogs_dir.glob('*.md'):
try:
with open(md_file, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
# Extract title
title = md_file.stem.replace('-', ' ').title()
for line in lines:
if line.startswith('# '):
title = line[2:].strip()
break
# Check if search term is in title, filename, or content
filename_match = search_query in md_file.name.lower()
title_match = search_query in title.lower()
content_match = search_query in content.lower()
if filename_match or title_match or content_match:
# Get file stats
stat = md_file.stat()
size_kb = round(stat.st_size / 1024, 1)
# Extract excerpt (preferably containing search term)
excerpt = self.extract_search_excerpt(content, search_query)
# Calculate relevance score
relevance = 0
if filename_match:
relevance += 3
if title_match:
relevance += 2
if content_match:
relevance += 1
search_results.append({
'filename': md_file.name,
'title': title,
'excerpt': excerpt,
'size': f"{size_kb} KB",
'lastModified': stat.st_mtime,
'relevance': relevance,
'matches': {
'filename': filename_match,
'title': title_match,
'content': content_match
}
})
except Exception as e:
print(f"Error searching {md_file}: {e}")
continue
# Sort by relevance, then by last modified
search_results.sort(key=lambda x: (x['relevance'], x['lastModified']), reverse=True)
# Format dates
import datetime
for result in search_results:
result['lastModified'] = datetime.datetime.fromtimestamp(
result['lastModified']
).strftime('%Y-%m-%d')
# Remove relevance from final response
del result['relevance']
# Send JSON response
response = json.dumps({
'query': search_query,
'results': search_results,
'total': len(search_results)
}, indent=2)
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(response.encode('utf-8'))
except Exception as e:
print(f"Error in search API: {e}")
self.send_error(500, f"Internal server error: {e}")
def extract_search_excerpt(self, content, search_term, context_chars=150):
"""Extract excerpt around search term or fallback to beginning"""
content_lower = content.lower()
search_pos = content_lower.find(search_term)
if search_pos == -1:
# If search term not found, return beginning of content
lines = content.split('\n')
for line in lines:
if line.strip() and not line.startswith('#'):
excerpt = line.strip()[:context_chars]
if len(line.strip()) > context_chars:
excerpt += "..."
return excerpt
return "No description available."
# Extract context around the search term
start = max(0, search_pos - context_chars // 2)
end = min(len(content), search_pos + len(search_term) + context_chars // 2)
excerpt = content[start:end].strip()
# Clean up excerpt (remove incomplete words at edges)
if start > 0:
space_pos = excerpt.find(' ')
if space_pos > 0:
excerpt = excerpt[space_pos:].strip()
excerpt = "..." + excerpt
if end < len(content):
last_space = excerpt.rfind(' ')
if last_space > 0:
excerpt = excerpt[:last_space].strip()
excerpt += "..."
return excerpt
def run_server(port=8000):
"""Start the documentation server"""
try:
with socketserver.TCPServer(("", port), DocumentationHandler) as httpd:
print(f"🌐 Server running at: http://localhost:{port}")
print(f"πŸ“ Serving from: {Path(__file__).parent}")
print(f"πŸ” Open http://localhost:{port} in your browser")
print()
httpd.serve_forever()
except KeyboardInterrupt:
print("\nπŸ‘‹ Server stopped by user")
except OSError as e:
if e.errno == 10048: # Windows: Address already in use
print(f"❌ Port {port} is already in use. Try a different port:")
print(f" python server.py --port 8001")
else:
print(f"❌ Error starting server: {e}")
if __name__ == "__main__":
import sys
port = 8000
# Simple command line argument parsing
if len(sys.argv) > 1:
for i, arg in enumerate(sys.argv):
if arg in ['--port', '-p'] and i + 1 < len(sys.argv):
try:
port = int(sys.argv[i + 1])
except ValueError:
print("❌ Invalid port number")
sys.exit(1)
elif arg in ['--help', '-h']:
print("Usage: python server.py [--port PORT]")
print("Options:")
print(" --port, -p PORT Port to run server on (default: 8000)")
print(" --help, -h Show this help message")
sys.exit(0)
run_server(port)